openstack

openstack - ceph 스토리지 설정

sysman 2021. 2. 17. 16:50

 

host name ip address OS version role
controller.park.com x.x.100.110 Centos7.3 controller node
compute.park.com x.x.100.111 Centos7.3 compute node
network.park.com x.x.100.112 Centos7.3 ceph3 node( /dev/sdd 10G)
ceph1.park.com x.x.100.113 Centos7.3 ceph1 node( /dev/sdb 10G)
ceph2.park.com x.x.100.114 Centos7.3 ceph2 node( /dev/sdb 10G)

 

ceph cluster를 위한 사전 설치 작업

######### controller. compute,network, ceph1,ceph2 방화벽 해제 설정 ##################

#setenforce 0

#systemctl disable firewalld

#systemctl stop firewalld

 

######### controller. compute,network, ceph1,ceph2 hostname 설정 ##################

# cat /etc/hosts

x.x.100.110 controller.park.com     controller

x.x.100.111 compute.park.com        compute

x.x.100.112 network.park.com        network

x.x.100.113 ceph1.park.com          ceph1

x.x.100.114 ceph2.park.com          ceph2

 

#hostnamectl set-hostname X.park.com //이름에 따라서 설정

 

######### controller. compute,network, ceph1,ceph2 ntp 설정 ##################

 # yum install chrony -y

# vi /etc/chrony.conf

server time.bora.net iburst

allow x.x.100.0/24

 

#systemctl start chronyd

#systemctl enable chronyd

 

#chronyc sources

#chronyc tracking

#timedatectl

 

######### controller. compute,network, ceph1,ceph2 ceph 계정 설정 ##################

각 노드마다 전부 설정

# useradd ceph

#passwd ceph

new password : ceph

retype password : ceph

 

ceph1에서 설정

#vi /etc/sudoers.d/ceph

Defaults:ceph !requiretty

ceph ALL = (root) NOPASSWD:ALL

#chmod 440 /etc/sudoers.d/ceph

#scp /etc/sudoers.d/ceph root@ceph2:/etc/sudoers.d/   //각 노드마다 전부 넣기

 

######### ceph1 SSH 계정 설정 ##################

#su - ceph

$ssh-keygen

나머지 엔터

 

이걸 설정해두면 host만 치면 접속됨

$vi .ssh/config

Host ceph1

 Hostname ceph1.park.com

 User ceph

 

Host ceph2

 Hostname ceph2.park.com

 User ceph

 

Host network

 Hostname network.park.com

 User ceph

 

Host controller

 Hostname controller.park.com

 User ceph

 

Host compute

 Hostname compute.park.com

 User ceph

 

$chmod 644 .ssh/config

$ssh-copy-id ceph2                   //각 노드마다 전부 적용

 

ceph 스토리지 클러스터 구축

######### ceph1 ceph 설치 ##################

#yum install epel-release yum-plugin-priorities

 

rpm-qa | librados2* 버전 및 librbd1에 각노드에 맞는 버전 인지 확인 아래는 내게 맞는 버전을 선택

# yum install https://download.ceph.com/rpm-luminous/el7/noarch/ceph-release-1-1.el7.noarch.rpm

# yum install ceph-deploy

# yum -y install python-setuptools

#rpm -qa | grep ceph

ceph-deploy-~~~

ceph-release~~~

 

#su - ceph

$mkdir cluster ; cd cluster

 

ceph 패키지 설치

$ceph-deploy new ceph1

 

$ls

$vi ceph.conf

[global]
fsid = 2171ffd6-b239-420c-bf5e-bda8f9a22a9b
mon_initial_members = ceph1
mon_host = 192.168.100.113
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
osd pool default size = 2        //오브젝 저장할때 2번 저장

 

$ceph-deploy install --release luminous ceph1 ceph2 network compute controller

 

모니터 활성화

$ceph-deploy mon create-initial

$ls

 

ceph 스토리지 클러스터 구축

######### ceph1 osd 설정 ##################

$ceph-deploy disk list ceph1 ceph2 network

 

데이터 클리어 및 디스크 타입 정의

$ceph-deploy disk zap ceph1 /dev/sdb

$ceph-deploy disk zap ceph2 /dev/sdb

$ceph-deploy disk zap network /dev/sdd

 

ceph osd 생성 및 활성화

$ceph-deploy osd create --data /dev/sdb ceph1

$ceph-deploy osd create --data /dev/sdb ceph2

$ceph-deploy osd create --data /dev/sdd network

 

manager 호스트 설정

$ceph-deploy mgr create ceph1

 

ceph.client.admin.keyring 배포

$ceph-deploy admin ceph1 ceph2 network compute controller

 

$sudo chmod 644 /etc/ceph/ceph.client.admin.keyring   //각 노드마다 설정

 

$ceph health

HELTH_OK

$ceph -s

cluster:
    id:     2171ffd6-b239-420c-bf5e-bda8f9a22a9b
    health: HEALTH_WARN
            application not enabled on 3 pool(s)

  services:
    mon: 1 daemons, quorum ceph1
    mgr: ceph1(active)
    osd: 3 osds: 3 up, 3 in

  data:
    pools:   3 pools, 320 pgs
    objects: 151 objects, 208MiB
    usage:   3.17GiB used, 26.8GiB / 30.0GiB avail
    pgs:     320 active+clean

 

 

ceph를 GLANCE 백엔드 스토리지로 사용할때

######### ceph1 ceph admin 설정 ##################

$ceph osd pool create images 128  //128은 pg

$ceph osd pool ls

images

 

glance 키링 생성  (키링으로 서로 맞아야 통신함)

$sudo ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images' -o /etc/ceph/ceph.client.images.keyring

$ceph auth get client.glance

$cat /etc/ceph/ceph.client.images.keyring

$scp /etc/ceph/ceph.client.images.keyring root@controller:/etc/ceph/

 

######### controller ceph glance 설정 ##################

#chown glance. /etc/ceph/ceph.client.images.keyring

#chmod 640 /etc/ceph/ceph.client.images.keyring

 

아래 내용 추가

#vi /etc/ceph/ceph.conf

[client.glance]

keyring = /etc/ceph/ceph.client.images.keyring

 

#vi /etc/glance/glance-api.conf

 

[glance_store]

stores = file,http,rbd

default_store = rbd

rbd_store_chunk_size = 8

rbd_store_pool = images

rbd_store_user = glance

rbd_store_ceph_conf = /etc/ceph/ceph.conf

filesystem_store_datadir = /var/lib/glance/images/

 

#systemctl restart openstack-glance-api

 

######### ceph glance 설정 확인 ##################

#openstack image create "cirros2" --file cirros-0.5.1-x86_64-disk.img --disk-format qcow2 --container-format bare --public

 

#openstack image list

 

[ceph@ceph1 cluster]$ rbd -p images ls -l
NAME                                         SIZE PARENT FMT PROT LOCK
35245ade-6adb-49b2-8844-b5ef580f8bd7      12.1MiB          2
35245ade-6adb-49b2-8844-b5ef580f8bd7@snap 12.1MiB          2 yes
[ceph@ceph1 cluster]$

[ceph@ceph1 cluster]$ rbd info images/35245ade-6adb-49b2-8844-b5ef580f8bd7
rbd image '35245ade-6adb-49b2-8844-b5ef580f8bd7':
        size 12.1MiB in 2 objects
        order 23 (8MiB objects)
        block_name_prefix: rbd_data.1024312dceaf
        format: 2
        features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
        flags:
        create_timestamp: Wed Feb 17 11:32:47 2021

 

ceph를 Cinder 백엔드 스토리지로 사용할때

######### ceph1 ceph admin 설정 ##################

$ ceph osd pool create volumes 128

$ceph osd pool ls

images

volumes

 

$sudo ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rx pool=images' -o /etc/ceph/ceph.client.volumes.keyring

$ceph auth get client.cinder

$cat /etc/ceph/ceph.client.volumes.keyring

$scp /etc/ceph/ceph.client.volumes.keyring root@network:/etc/ceph/

 

######### network ceph cinder 설정 ##################

#chown cinder. /etc/ceph/ceph.client.volumes.keyring

#chmod 640 /etc/ceph/ceph.client.volumes.keyring

 

아래 내용 추가

#vi /etc/ceph/ceph.conf

[client.cinder]

keyring = /etc/ceph/ceph.client.volumes.keyring

 

#uuidgen

csjkdlf-dsj0231y380-32510

 

 

#vi /etc/cinder/cinder.conf

enabled_backends = lvm,ceph
glance_api_version = 2

 

[ceph]

volume_driver = cinder.volume.drivers.rbd.RBDDriver

rbd_cluster_name = ceph

rbd_pool = volumes

rbd_user = cinder

rbd_ceph_conf = /etc/ceph/ceph.conf

rbd_flatten_volume_from_snapshot = false

rbd_secret_uuid = csjkdlf-dsj0231y380-32510

rbd_max_clone_depth = 5

rbd_store_chunk_size = 4

rados_connect_timeout = -1

 

#systemctl restart openstack-cinder-volume

#openstack volume service list

######### controller ceph volume 설정 확인 ##################

#openstack volume create --size 1 ceph-vol1

볼륨확인

#openstack volume list

볼륨 id값 확인

#rbd -p volumes ls

volume-dsfsdjfl-2332325151

상세확인

#rbd -p volumes info volume-dsfsdjfl-2332325151

 

용량 확인

#ceph df

 

 

ceph를 Nova 백엔드 스토리지로 사용할때

######### ceph1 ceph admin 설정 ##################

$ ceph osd pool create vms 128

$ceph osd pool ls

images

volumes

vms

 

$sudo ceph auth get-or-create client.nova mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=vms, allow rmx pool=volumes, allow rx pool=images' -o /etc/ceph/ceph.client.compute.keyring

$ceph auth get client.nova

$cat /etc/ceph/ceph.client.compute.keyring

$scp /etc/ceph/ceph.client.compute.keyring root@compute:/etc/ceph/

 

######### compute ceph nova 설정 ##################

#chown nova. /etc/ceph/ceph.client.compute.keyring

#chmod 640 /etc/ceph/ceph.client.compute.keyring

 

아래 추가

#vi /etc/ceph/ceph.conf

[client.nova]

keyring = /etc/ceph/ceph.client.compute.keyring

 

#uuidgen

401347031gt103470

 

#vi /etc/nova/nova.conf

[libvirt]

images_type = rbd

images_rbd_pool = vms

images_rbd_ceph_conf = /etc/ceph/ceph.conf

rbd_user = nova

rbd_secret_uuid = 401347031gt103470

 

#systemctl restart openstack-nova-compute

#openstack compute service list

 

######### ceph1 비밀키 설정 ##################

ceph1 에서

$ceph auth get-key client.cinder | ssh root@compute "tee client.cinder.key"

$ceph auth get-key client.nova | ssh root@compute "tee client.nova.key"

 

######### compute 비밀키 설정 ##################

 

cinder의 uuid

#vi ceph.xml

<secret ephemeral="no" private="no">

<uuid>csjkdlf-dsj0231y380-32510</uuid>

<usage type="ceph">

<name>client.cinder secret</name>

</usage>

</secret>

 

#virsh secret-define --file ceph.xml

#virsh secret-set-value --secret csjkdlf-dsj0231y380-32510 --base64 $(cat client.cinder.key)

#virsh secret-list

 

# rm client.cinderkey ceph.xml

 

nova의 uuid

#vi ceph2.xml

<secret ephemeral="no" private="no">

<uuid>401347031gt103470</uuid>

<usage type="ceph">

<name>client.nova secret</name>

</usage>

</secret>

 

#virsh secret-define --file ceph2.xml

#virsh secret-set-value --secret 401347031gt103470 --base64 $(cat client.nova.key)

#virsh secret-list

 

# rm client.cinderkey ceph2.xml

 

######### controller ceph 확인 ##################

#openstack server list

#openstack network list

#openstack image list

#openstack server create --flavor m1.tiny --image cirros2 --security-group provider --nic net-id=1315~~2519 cirros2

 

#openstack server list

#openstack floating ip create ext_net

#openstack server add floating ip cirros2 x.x.200.202

#openstack floating ip show x.x.113.202

#openstack volume list

#openstack server add volume cirros2 ceph-vol1

#openstack volume list

 

[root@compute ~]# rbd -p vms ls -l
NAME                                      SIZE PARENT FMT PROT LOCK
f4ef944f-aa3d-4665-a50f-e65a16676bd1_disk 1GiB          2      excl
[root@compute ~]#

[root@compute ~]# rbd -p vms info f4ef944f-aa3d-4665-a50f-e65a16676bd1_disk
rbd image 'f4ef944f-aa3d-4665-a50f-e65a16676bd1_disk':
        size 1GiB in 256 objects
        order 22 (4MiB objects)
        block_name_prefix: rbd_data.131f6b8b4567
        format: 2
        features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
        flags:
        create_timestamp: Wed Feb 17 14:30:18 2021
[root@compute ~]#

[root@compute ~]# ceph df
GLOBAL:
    SIZE        AVAIL       RAW USED     %RAW USED
    30.0GiB     26.8GiB      3.17GiB         10.57
POOLS:
    NAME        ID     USED        %USED     MAX AVAIL     OBJECTS
    images      1      12.1MiB      0.09       12.6GiB           8
    volumes     2         133B         0       12.6GiB           5
    vms         3       196MiB      1.49       12.6GiB         138
[root@compute ~]#

[root@network neutron]# ls /var/lib/ceph/osd/ceph-2/

activate.monmap  bluefs     fsid     kv_backend  mkfs_done  ready                type

block            ceph_fsid  keyring  magic       osd_key    require_osd_release  whoami

[root@network neutron]#

 

 

 

 

#####################문제 증상######################

ceph-deploy install 설치시 에러증상

[network][WARNIN]                librados2 = 1:10.2.9-0.el7
[network][WARNIN]            Available: 1:librados2-10.2.10-0.el7.x86_64 (Ceph)
[network][WARNIN]                librados2 = 1:10.2.10-0.el7
[network][WARNIN]            Available: 2:librados2-10.2.11-0.el7.x86_64 (Ceph)
[network][WARNIN]                librados2 = 2:10.2.11-0.el7
[network][WARNIN] Error: Package: 2:ceph-common-10.2.11-0.el7.x86_64 (Ceph)
[network][WARNIN]            Requires: librbd1 = 2:10.2.11-0.el7
[network][WARNIN]            Installed: 2:librbd1-12.2.11-0.el7.x86_64 (@centos-ceph-luminous)
[network][WARNIN]                librbd1 = 2:12.2.11-0.el7
[network][WARNIN]            Available: 1:librbd1-10.1.0-0.el7.x86_64 (Ceph)
[network][WARNIN]                librbd1 = 1:10.1.0-0.el7
[network][WARNIN]            Available: 1:librbd1-10.1.1-0.el7.x86_64 (Ceph)
[network][WARNIN]                librbd1 = 1:10.1.1-0.el7
[network][WARNIN]            Available: 1:librbd1-10.1.2-0.el7.x86_64 (Ceph)

 

버전 매칭이 안되서 에러남

해결책

아래 주소로 접속해서 현재 설치된 버전과 비슷한 릴리즈 버전을 확인해야함.

https://download.ceph.com/

 

 

 

 

참고사항 들..

https://download.ceph.com/rpm-luminous/el7/x86_64/

yum.repos.d/ceph.repo 에도 luminous로 바꿈 수정

[ceph@ceph1 cluster]$ ceph-deploy install --release luminous ceph1 ceph2 network

 

https://docs.ceph.com/en/octopus/rados/deployment/ceph-deploy-osd/#list-disks 602page

https://www.kangtaeho.com/37 osd 설정 602page

*https://bobocomi.tistory.com/2 osd 설정

https://download.ceph.com/rpm-luminous/el7/noarch/ceph-release-1-1.el7.noarch.rpm

 

yum -y install python-setuptools