1. [Cloud] Openstack - Caracal 구축 개요
2. [Cloud] Openstack - 1-1. Controller Node (Preprocess, Environment, Keystone, Glance)
3. [Cloud] Openstack - 1-2. Controller Node (Placement, Nova, Neutron)
4. [Cloud] Openstack - 1-3. Controller Node (Cinder, Swift) ←
5. [Cloud] Openstack - 1-4. Controller Node (Horizon)
6. [Cloud] Openstack - 2. Compute Node
7. [Cloud] Openstack - 3. Block Node
8. [Cloud] Openstack - 4. Horizon Dashboard Console 개선
Cinder (Block Storage)
mysql
MariaDB [(none)]> CREATE DATABASE cinder;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'CINDER_DBPASS';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'CINDER_DBPASS';
openstack user create --domain default --password-prompt cinder
User Password: CINDER_PASS
Repeat User Password: CINDER_PASS
+---------------------+----------------------------------+
| Field | Value |
+---------------------+----------------------------------+
| domain_id | default |
| enabled | True |
| id | 9d7e33de3e1a498390353819bc7d245d |
| name | cinder |
| options | {} |
| password_expires_at | None |
+---------------------+----------------------------------+
openstack role add --project service --user cinder admin
openstack service create --name cinderv3 \
--description "OpenStack Block Storage" volumev3
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | OpenStack Block Storage |
| enabled | True |
| id | ab3bbbef780845a1a283490d281e7fda |
| name | cinderv3 |
| type | volumev3 |
+-------------+----------------------------------+
openstack endpoint create --region RegionOne \
volumev3 public http://controller:8776/v3/%\(project_id\)s
+--------------+------------------------------------------+
| Field | Value |
+--------------+------------------------------------------+
| enabled | True |
| id | 03fa2c90153546c295bf30ca86b1344b |
| interface | public |
| region | RegionOne |
| region_id | RegionOne |
| service_id | ab3bbbef780845a1a283490d281e7fda |
| service_name | cinderv3 |
| service_type | volumev3 |
| url | http://controller:8776/v3/%(project_id)s |
+--------------+------------------------------------------+
openstack endpoint create --region RegionOne \
volumev3 internal http://controller:8776/v3/%\(project_id\)s
+--------------+------------------------------------------+
| Field | Value |
+--------------+------------------------------------------+
| enabled | True |
| id | 94f684395d1b41068c70e4ecb11364b2 |
| interface | internal |
| region | RegionOne |
| region_id | RegionOne |
| service_id | ab3bbbef780845a1a283490d281e7fda |
| service_name | cinderv3 |
| service_type | volumev3 |
| url | http://controller:8776/v3/%(project_id)s |
+--------------+------------------------------------------+
openstack endpoint create --region RegionOne \
volumev3 admin http://controller:8776/v3/%\(project_id\)s
+--------------+------------------------------------------+
| Field | Value |
+--------------+------------------------------------------+
| enabled | True |
| id | 4511c28a0f9840c78bacb25f10f62c98 |
| interface | admin |
| region | RegionOne |
| region_id | RegionOne |
| service_id | ab3bbbef780845a1a283490d281e7fda |
| service_name | cinderv3 |
| service_type | volumev3 |
| url | http://controller:8776/v3/%(project_id)s |
+--------------+------------------------------------------+
apt install cinder-api cinder-scheduler -y
vi /etc/cinder/cinder.conf
[database]
# ...
connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
[DEFAULT]
# ...
transport_url = rabbit://openstack:RABBIT_PASS@controller
auth_strategy = keystone
my_ip = 192.168.2.10
[keystone_authtoken]
# ...
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = CINDER_PASS
[oslo_concurrency]
# ...
lock_path = /var/lib/cinder/tmp
su -s /bin/sh -c "cinder-manage db sync" cinder
vi /etc/nova/nova.conf
[cinder]
os_region_name = RegionOne
service nova-api restart
service cinder-scheduler restart
service apache2 restart
Swift (Object Storage)
Prerequisites
1. To create the Identity service credentials, complete these steps
openstack user create --domain default --password-prompt swift
User Password: SWIFT_PASS
Repeat User Password: SWIFT_PASS
+-----------+----------------------------------+
| Field | Value |
+-----------+----------------------------------+
| domain_id | default |
| enabled | True |
| id | d535e5cbd2b74ac7bfb97db9cced3ed6 |
| name | swift |
+-----------+----------------------------------+
openstack role add --project service --user swift admin
openstack service create --name swift \
--description "OpenStack Object Storage" object-store
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | OpenStack Object Storage |
| enabled | True |
| id | 75ef509da2c340499d454ae96a2c5c34 |
| name | swift |
| type | object-store |
+-------------+----------------------------------+
2. Create the Object Storage service API endpoints
openstack endpoint create --region RegionOne object-store public http://controller:8080/v1/AUTH_%\(tenant_id\)s
openstack endpoint create --region RegionOne object-store internal http://controller:8080/v1/AUTH_%\(tenant_id\)s
openstack endpoint create --region RegionOne object-store admin http://controller:8080/v1
Install and configure components
apt-get install swift swift-proxy python3-swiftclient python3-keystoneclient python3-keystonemiddleware memcached
- Edit the /etc/swift/proxy-server.conf file and complete the following actions
[DEFAULT]
bind_port = 8080
workers = auto
user = swift
log_facility = LOG_LOCAL1
[pipeline:main]
pipeline = catch_errors gatekeeper healthcheck proxy-logging cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
allow_account_management = true
account_autocreate = true
[filter:catch_errors]
use = egg:swift#catch_errors
[filter:gatekeeper]
use = egg:swift#gatekeeper
[filter:healthcheck]
use = egg:swift#healthcheck
[filter:proxy-logging]
use = egg:swift#proxy_logging
[filter:cache]
use = egg:swift#memcache
memcache_servers = 127.0.0.1:11211
Create and distribute initial rings
Create account ring
1. Change to the /etc/swift directory
cd /etc/swift
2. Create the base account.builder file
swift-ring-builder account.builder create 10 1 1
3. Add each storage node to the ring
swift-ring-builder account.builder \
add --region 1 --zone 1 --ip 192.168.2.30 --port 6202 \
--device sdb2 --weight 100
4. Verify the ring contents
swift-ring-builder account.builder
account.builder, build version 1, id 340742094cae4f9bbbf2c2128a93f303
1024 partitions, 3.000000 replicas, 1 regions, 1 zones, 1 devices, 100.00 balance, 0.00 dispersion
The minimum number of hours before a partition can be reassigned is 1 (0:00:00 remaining)
The overload factor is 0.00% (0.000000)
Ring file account.ring.gz not found, probably it hasn't been written yet
Devices: id region zone ip address:port replication ip:port name weight partitions balance flags meta
0 1 1 192.168.2.30:6202 192.168.2.30:6202 sdb2 100.00 0 -100.00
5. Rebalance the ring
swift-ring-builder account.builder rebalance
Create container ring
1. Change to the /etc/swift directory
cd /etc/swift
2. Create the base container.builder file
swift-ring-builder container.builder create 10 1 1
3. Add each storage node to the ring
swift-ring-builder container.builder \
add --region 1 --zone 1 --ip 192.168.2.30 --port 6201 \
--device sdb2 --weight 100
4. Verify the ring contents
swift-ring-builder container.builder
container.builder, build version 1, id b15fe3f75cd84c26a8a26054882aafe1
1024 partitions, 1.000000 replicas, 1 regions, 1 zones, 1 devices, 100.00 balance, 0.00 dispersion
The minimum number of hours before a partition can be reassigned is 1 (0:00:00 remaining)
The overload factor is 0.00% (0.000000)
Ring file container.ring.gz not found, probably it hasn't been written yet
Devices: id region zone ip address:port replication ip:port name weight partitions balance flags meta
0 1 1 192.168.2.30:6201 192.168.2.30:6201 sdb2 100.00 0 -100.00 0 -100.00
5. Rebalance the ring
swift-ring-builder container.builder rebalance
Create object ring
1. Change to the /etc/swift directory
cd /etc/swift
2. Create the base object.builder file
swift-ring-builder object.builder create 10 1 1
3. Add each storage node to the ring
swift-ring-builder object.builder \
add --region 1 --zone 1 --ip 192.168.2.30 --port 6200 \
--device sdb2 --weight 100
4. Verify the ring contents
swift-ring-builder object.builder
object.builder, build version 1, id 683b8a1e53e94599b9f6c9fc01fb6ce5
1024 partitions, 1.000000 replicas, 1 regions, 1 zones, 1 devices, 100.00 balance, 0.00 dispersion
The minimum number of hours before a partition can be reassigned is 1 (0:00:00 remaining)
The overload factor is 0.00% (0.000000)
Ring file object.ring.gz not found, probably it hasn't been written yet
Devices: id region zone ip address:port replication ip:port name weight partitions balance flags meta
0 1 1 192.168.2.30:6200 192.168.2.30:6200 sdb2 100.00 0 -100.00
5. Rebalance the ring
swift-ring-builder object.builder rebalance
Finalize installation for Ubuntu and Debian
Finalize installation — Swift 2.35.0.dev148 documentation
docs.openstack.org
block 노드에 openssh-server 설치 후 진행
scp /etc/swift/*.ring.gz block@192.168.2.30:/home/block
---block 노드에서
cd ~block
cp account.ring.gz container.ring.gz object.ring.gz /etc/swift
cd /etc/swift
1. Edit the /etc/swift/swift.conf file and complete the following actions
[swift-hash]
# swift_hash_path_suffix and swift_hash_path_prefix are used as part of the
# hashing algorithm when determining data placement in the cluster.
# These values should remain secret and MUST NOT change
# once a cluster has been deployed.
# Use only printable chars (python -c "import string; print(string.printable)")
swift_hash_path_suffix = HASH_PATH_SUFFIX
swift_hash_path_prefix = HASH_PATH_PREFIX
# Storage policies are defined here and determine various characteristics
# about how objects are stored and treated. More documentation can be found at
# https://docs.openstack.org/swift/latest/overview_policies.html.
# Client requests specify a policy on a per container basis using the policy
# name. Internally the policy name is mapped to the policy index specified in
# the policy's section header in this config file. Policy names are
# case-insensitive and, to avoid confusion with indexes names, should not be
# numbers.
#
# The policy with index 0 is always used for legacy containers and can be given
# a name for use in metadata however the ring file name will always be
# 'object.ring.gz' for backwards compatibility. If no policies are defined a
# policy with index 0 will be automatically created for backwards compatibility
# and given the name Policy-0. A default policy is used when creating new
# containers when no policy is specified in the request. If no other policies
# are defined the policy with index 0 will be declared the default. If
# multiple policies are defined you must define a policy with index 0 and you
# must specify a default. It is recommended you always define a section for
# storage-policy:0.
#
# A 'policy_type' argument is also supported but is not mandatory. Default
# policy type 'replication' is used when 'policy_type' is unspecified.
#
# A 'diskfile_module' optional argument lets you specify an alternate backend
# object storage plug-in architecture. The default is
# "egg:swift#replication.fs", or "egg:swift#erasure_coding.fs", depending on
# the policy type.
#
# Aliases for the storage policy name may be defined, but are not required.
#
[storage-policy:0]
name = Policy-0
default = yes
#policy_type = replication
#diskfile_module = egg:swift#replication.fs
#aliases = yellow, orange
# The following section would declare a policy called 'silver', the number of
# replicas will be determined by how the ring is built. In this example the
# 'silver' policy could have a lower or higher # of replicas than the
# 'Policy-0' policy above. The ring filename will be 'object-1.ring.gz'. You
# may only specify one storage policy section as the default. If you changed
# this section to specify 'silver' as the default, when a client created a new
# container w/o a policy specified, it will get the 'silver' policy because
# this config has specified it as the default. However if a legacy container
# (one created with a pre-policy version of swift) is accessed, it is known
# implicitly to be assigned to the policy with index 0 as opposed to the
# current default. Note that even without specifying any aliases, a policy
# always has at least the default name stored in aliases because this field is
# used to contain all human readable names for a storage policy.
#
#[storage-policy:1]
#name = silver
#policy_type = replication
#diskfile_module = egg:swift#replication.fs
# The following declares a storage policy of type 'erasure_coding' which uses
# Erasure Coding for data reliability. Please refer to Swift documentation for
# details on how the 'erasure_coding' storage policy is implemented.
#
# Swift uses PyECLib, a Python Erasure coding API library, for encode/decode
# operations. Please refer to Swift documentation for details on how to
# install PyECLib.
#
# When defining an EC policy, 'policy_type' needs to be 'erasure_coding' and
# EC configuration parameters 'ec_type', 'ec_num_data_fragments' and
# 'ec_num_parity_fragments' must be specified. 'ec_type' is chosen from the
# list of EC backends supported by PyECLib. The ring configured for the
# storage policy must have its "replica" count configured to
# 'ec_num_data_fragments' + 'ec_num_parity_fragments' - this requirement is
# validated when services start. 'ec_object_segment_size' is the amount of
# data that will be buffered up before feeding a segment into the
# encoder/decoder. More information about these configuration options and
# supported 'ec_type' schemes is available in the Swift documentation. See
# https://docs.openstack.org/swift/latest/overview_erasure_code.html
# for more information on how to configure EC policies.
#
# The example 'deepfreeze10-4' policy defined below is a _sample_
# configuration with an alias of 'df10-4' as well as 10 'data' and 4 'parity'
# fragments. 'ec_type' defines the Erasure Coding scheme.
# 'liberasurecode_rs_vand' (Reed-Solomon Vandermonde) is used as an example
# below.
#
#[storage-policy:2]
#name = deepfreeze10-4
#aliases = df10-4
#policy_type = erasure_coding
#diskfile_module = egg:swift#erasure_coding.fs
#ec_type = liberasurecode_rs_vand
#ec_num_data_fragments = 10
#ec_num_parity_fragments = 4
#ec_object_segment_size = 1048576
#
# Duplicated EC fragments is proof-of-concept experimental support to enable
# Global Erasure Coding policies with multiple regions acting as independent
# failure domains. Do not change the default except in development/testing.
#ec_duplication_factor = 1
# The swift-constraints section sets the basic constraints on data
# saved in the swift cluster. These constraints are automatically
# published by the proxy server in responses to /info requests.
[swift-constraints]
# max_file_size is the largest "normal" object that can be saved in
# the cluster. This is also the limit on the size of each segment of
# a "large" object when using the large object manifest support.
# This value is set in bytes. Setting it to lower than 1MiB will cause
# some tests to fail. It is STRONGLY recommended to leave this value at
# the default (5 * 2**30 + 2).
#max_file_size = 5368709122
# max_meta_name_length is the max number of bytes in the utf8 encoding
# of the name portion of a metadata header.
#max_meta_name_length = 128
# max_meta_value_length is the max number of bytes in the utf8 encoding
# of a metadata value
#max_meta_value_length = 256
# max_meta_count is the max number of metadata keys that can be stored
# on a single account, container, or object
#max_meta_count = 90
# max_meta_overall_size is the max number of bytes in the utf8 encoding
# of the metadata (keys + values)
#max_meta_overall_size = 4096
# max_header_size is the max number of bytes in the utf8 encoding of each
# header. Using 8192 as default because eventlet use 8192 as max size of
# header line. This value may need to be increased when using identity
# v3 API tokens including more than 7 catalog entries.
# See also include_service_catalog in proxy-server.conf-sample
# (documented at https://docs.openstack.org/swift/latest/overview_auth.html)
#max_header_size = 8192
# By default the maximum number of allowed headers depends on the number of max
# allowed metadata settings plus a default value of 36 for swift internally
# generated headers and regular http headers. If for some reason this is not
# enough (custom middleware for example) it can be increased with the
# extra_header_count constraint.
#extra_header_count = 0
# max_object_name_length is the max number of bytes in the utf8 encoding
# of an object name
#max_object_name_length = 1024
# container_listing_limit is the default (and max) number of items
# returned for a container listing request
#container_listing_limit = 10000
# account_listing_limit is the default (and max) number of items returned
# for an account listing request
#account_listing_limit = 10000
# max_account_name_length is the max number of bytes in the utf8 encoding
# of an account name
#max_account_name_length = 256
# max_container_name_length is the max number of bytes in the utf8 encoding
# of a container name
#max_container_name_length = 256
# By default all REST API calls should use "v1" or "v1.0" as the version string,
# for example "/v1/account". This can be manually overridden to make this
# backward-compatible, in case a different version string has been used before.
# Use a comma-separated list in case of multiple allowed versions, for example
# valid_api_versions = v0,v1,v2
# This is only enforced for account, container and object requests. The allowed
# api versions are by default excluded from /info.
# valid_api_versions = v1,v1.0
# The prefix used for hidden auto-created accounts, for example accounts in
# which shard containers are created. It defaults to '.'; don't change it.
# auto_create_account_prefix = .
- 검증
openstack project list
+----------------------------------+---------+
| ID | Name |
+----------------------------------+---------+
| 7f0dd86e4b734e92bc632d01af40fdb3 | service |
| 920dbacdacd74254b2fa8691c0437504 | admin |
+----------------------------------+---------+
vi /etc/swift/swift_test.sh
# OpenStack 환경 변수 설정
export OS_PROJECT_DOMAIN_NAME=vDefault
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=service
export OS_USERNAME=swift
export OS_PASSWORD=SWIFT_PASS
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
# 인증 토큰 발행
AUTH_TOKEN=$(openstack token issue -f value -c id)
echo "Auth Token: $AUTH_TOKEN"
# Swift 서비스에 접근
curl -i -H "X-Auth-Token: $AUTH_TOKEN" http://controller:8080/v1/AUTH_920dbacdacd74254b2fa8691c0437504
chmod +x /etc/swift/swift_test.sh
- /etc/memcached.conf 수정
-vv # 열기, 걍 verbose임
- 재시작
service memcached restart
service swift-proxy restart
- mycontainer
swift post mycontainer #이거 하면 ./swift_test.sh했을 때, mycontainer 나옴