OpenStack 발표자료 (From Kubernetes to OpenStack)
OpenStack 발표자료 (From Kubernetes to OpenStack)
## OpenStack Foundation 사용자 등록
## OpenStack CLI 를 사용할 때 현재 어떤 프로젝트와 사용자인지를 알려주는 Prompt 만들기
## 오픈스택 사용자를 위한 프롬프트 설정 (project:user) 로 표시됨
$ vi ~/.bashrc
openstack_user() {
env | grep -E 'OS_USERNAME|OS_PROJECT_NAME' 2> /dev/null | sed -e 's/OS_PROJECT_NAME=\(.*\)/(\1/' -e 's/OS_USERNAME=\(.*\)/\1)/' | paste -sd ":"
}
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]$(openstack_user)\$ '
$ . demo/demo-openrc
(demo:demo)$ openstack server list
1. Network A -> Network A
PREROUTING(nat:dnat) -> INPUT(filter) -> OUTPUT(nat:dnat) -> OUTPUT(filter) ->POSTROUTING(nat:snat)
2. Network A -> Network B
PREROUTING(nat:dnat) -> FORWARD(filter) -> POSTROUTING(nat:snat)
3. Nova Instance 생성 후 iptables nat
PREROUTING ACCEPT
nova-network-PREROUTING
-> VM DNAT 변환
nova-compute-PREROUTING
nova-api-metadat-PREROUTING
INPUT ACCEPT
OUTPUT ACCEPT
nova-network-OUTPUT
-> VM DNAT 변환
nova-compute-OUTPUT
nova-api-metadat-OUTPUT
POSTROUTING ACCEPT
nova-network-POSTROUTING
nova-compute-POSTROUTING
nova-api-metadat-POSTROUTING
nova-postrouting-bottom
nova-network-snat
nova-network-float-snat
-> VM SNAT 변환
-> Host SNAT 변환
nova-compute-snat
nova-compute-float-snat
nova-api-metadat-snat
nova-api-metadat-float-snat
4. Nova Instance 생성 후 iptables filter
INPUT ACCEPT
nova-compute-INPUT
nova-network-INPUT
- dhcp 열기 (bridge 단위)
nova-api-metadat-INPUT
- nova metadata api 포트 8775 승인
FORWARD ACCEPT
nova-filter-top
nova-compute-local
- nova-compute-inst-732 (인스턴스별 생성)
nova-compute-provider
- Secutiry rules 입력
nova-compute-sg-fallback
- 모든 패킷 drop
nova-network-local
nova-api-metadat-local
nova-compute-FORWARD
nova-network-FORWARD
- bridge 별 in/out 패킷 승인
nova-api-metadat-FORWARD
OUTPUT ACCEPT
nova-filter-top
nova-compute-local
- nova-compute-inst-732 (인스턴스별 생성)
nova-compute-provider
- Secutiry rules 입력
nova-compute-sg-fallback
- 모든 패킷 drop
nova-network-local
nova-api-metadat-local
nova-compute-OUTPUT
nova-network-OUTPUT
nova-api-metadat-OUTPUT
[ Controller Install ]
1. controller node install (nova, mysql, rabbitmq keystone, glance, cinder, horizon)
$ sudo apt-get install nova-api nova-cert nova-conductor nova-consoleauth nova-novncproxy nova-scheduler python-novaclient
$ sudo apt-get install mysql-server-5.5
$ sudo apt-get install rabbitmq-server
$ sudo apt-get install keystone python-keystoneclient
$ sudo apt-get install glance python-glanceclient
$ sudo apt-get install cinder-api cinder-scheduler cinder-volume
$ apt-get install apache2 memcached libapache2-mod-wsgi openstack-dashboard
2. database configuration (nova, glance, cinder, keystone)
$ sudo sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf
$ sudo vi /etc/mysql/my.cnf
[mysqld]
# 추가
skip-host-cache
skip-name-resolve
$ sudo service mysql restart
$ mysql -u root -p
mysql> CREATE DATABASE nova;
mysql> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';
mysql> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';
mysql> CREATE DATABASE glance;
mysql> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'GLANCE_DBPASS';
mysql> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'GLANCE_DBPASS';
mysql> CREATE DATABASE cinder;
mysql> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'CINDER_DBPASS';
mysql> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'CINDER_DBPASS';
mysql> CREATE DATABASE keystone;
mysql> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' \
IDENTIFIED BY 'KEYSTONE_DBPASS';
mysql> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \
IDENTIFIED BY 'KEYSTONE_DBPASS';
sudo vi /etc/hosts.allow
ALL:192.168.0.0/255.255.0.0
mysqld:ALL
3. keystone setting
$ sudo rm /var/lib/keystone/keystone.db
$ sudo vi /etc/keystone/keystone.conf
connection = mysql://keystone:KEYSTONE_DBPASS@localhost/keystone
token_format = UUID
$ sudo keystone-manage db_sync
$ sudo service keystone restart
$ vi keystone_basic.sh
#!/bin/sh
#
# Keystone basic configuration
# Mainly inspired by https://github.com/openstack/keystone/blob/master/tools/sample_data.sh
# Modified by Bilel Msekni / Institut Telecom
#
# Support: openstack@lists.launchpad.net
# License: Apache Software License (ASL) 2.0
#
HOST_IP=192.168.75.131
ADMIN_PASSWORD=${ADMIN_PASSWORD:-admin_pass}
SERVICE_PASSWORD=${SERVICE_PASSWORD:-service_pass}
export SERVICE_TOKEN="ADMIN"
export SERVICE_ENDPOINT="http://${HOST_IP}:35357/v2.0"
SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
get_id () {
echo `$@ | awk '/ id / { print $4 }'`
}
# Tenants
ADMIN_TENANT=$(get_id keystone tenant-create --name=admin)
SERVICE_TENANT=$(get_id keystone tenant-create --name=$SERVICE_TENANT_NAME)
# Users
ADMIN_USER=$(get_id keystone user-create --name=admin --pass="$ADMIN_PASSWORD" --email=admin@domain.com)
# Roles
ADMIN_ROLE=$(get_id keystone role-create --name=admin)
KEYSTONEADMIN_ROLE=$(get_id keystone role-create --name=KeystoneAdmin)
KEYSTONESERVICE_ROLE=$(get_id keystone role-create --name=KeystoneServiceAdmin)
# Add Roles to Users in Tenants
keystone user-role-add --user-id $ADMIN_USER --role-id $ADMIN_ROLE --tenant-id $ADMIN_TENANT
keystone user-role-add --user-id $ADMIN_USER --role-id $KEYSTONEADMIN_ROLE --tenant-id $ADMIN_TENANT
keystone user-role-add --user-id $ADMIN_USER --role-id $KEYSTONESERVICE_ROLE --tenant-id $ADMIN_TENANT
# The Member role is used by Horizon and Swift
MEMBER_ROLE=$(get_id keystone role-create --name=Member)
# Configure service users/roles
NOVA_USER=$(get_id keystone user-create --name=nova --pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT --email=nova@domain.com)
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $NOVA_USER --role-id $ADMIN_ROLE
GLANCE_USER=$(get_id keystone user-create --name=glance --pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT --email=glance@domain.com)
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $GLANCE_USER --role-id $ADMIN_ROLE
QUANTUM_USER=$(get_id keystone user-create --name=quantum --pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT --email=quantum@domain.com)
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $QUANTUM_USER --role-id $ADMIN_ROLE
CINDER_USER=$(get_id keystone user-create --name=cinder --pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT --email=cinder@domain.com)
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $CINDER_USER --role-id $ADMIN_ROLE
$ vi keystone_endpoints_basic.sh
#!/bin/sh
#
# Keystone basic Endpoints
# Mainly inspired by https://github.com/openstack/keystone/blob/master/tools/sample_data.sh
# Modified by Bilel Msekni / Institut Telecom
#
# Support: openstack@lists.launchpad.net
# License: Apache Software License (ASL) 2.0
#
# Host address
HOST_IP=192.168.75.131
EXT_HOST_IP=192.168.75.131
VOLUME_HOST_IP=192.168.75.131
VOLUME_EXT_HOST_IP=192.168.75.131
NETWORK_HOST_IP=192.168.75.131
NETWORK_EXT_HOST_IP=192.168.75.131
# MySQL definitions
MYSQL_USER=keystone
MYSQL_DATABASE=keystone
MYSQL_HOST=$HOST_IP
MYSQL_PASSWORD=KEYSTONE_DBPASS
# Keystone definitions
KEYSTONE_REGION=RegionOne
export SERVICE_TOKEN=ADMIN
export SERVICE_ENDPOINT="http://${HOST_IP}:35357/v2.0"
while getopts "u:D:p:m:K:R:E:T:vh" opt; do
case $opt in
u)
MYSQL_USER=$OPTARG
;;
D)
MYSQL_DATABASE=$OPTARG
;;
p)
MYSQL_PASSWORD=$OPTARG
;;
m)
MYSQL_HOST=$OPTARG
;;
K)
MASTER=$OPTARG
;;
R)
KEYSTONE_REGION=$OPTARG
;;
E)
export SERVICE_ENDPOINT=$OPTARG
;;
T)
export SERVICE_TOKEN=$OPTARG
;;
v)
set -x
;;
h)
cat <<EOF
Usage: $0 [-m mysql_hostname] [-u mysql_username] [-D mysql_database] [-p mysql_password]
[-K keystone_master ] [ -R keystone_region ] [ -E keystone_endpoint_url ]
[ -T keystone_token ]
Add -v for verbose mode, -h to display this message.
EOF
exit 0
;;
\?)
echo "Unknown option -$OPTARG" >&2
exit 1
;;
:)
echo "Option -$OPTARG requires an argument" >&2
exit 1
;;
esac
done
if [ -z "$KEYSTONE_REGION" ]; then
echo "Keystone region not set. Please set with -R option or set KEYSTONE_REGION variable." >&2
missing_args="true"
fi
if [ -z "$SERVICE_TOKEN" ]; then
echo "Keystone service token not set. Please set with -T option or set SERVICE_TOKEN variable." >&2
missing_args="true"
fi
if [ -z "$SERVICE_ENDPOINT" ]; then
echo "Keystone service endpoint not set. Please set with -E option or set SERVICE_ENDPOINT variable." >&2
missing_args="true"
fi
if [ -z "$MYSQL_PASSWORD" ]; then
echo "MySQL password not set. Please set with -p option or set MYSQL_PASSWORD variable." >&2
missing_args="true"
fi
if [ -n "$missing_args" ]; then
exit 1
fi
keystone service-create --name nova --type compute --description 'OpenStack Compute Service'
keystone service-create --name cinder --type volume --description 'OpenStack Volume Service'
keystone service-create --name glance --type image --description 'OpenStack Image Service'
keystone service-create --name keystone --type identity --description 'OpenStack Identity'
keystone service-create --name ec2 --type ec2 --description 'OpenStack EC2 service'
keystone service-create --name quantum --type network --description 'OpenStack Networking service'
create_endpoint () {
case $1 in
compute)
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$EXT_HOST_IP"':8774/v2/$(tenant_id)s' --adminurl 'http://'"$HOST_IP"':8774/v2/$(tenant_id)s' --internalurl 'http://'"$HOST_IP"':8774/v2/$(tenant_id)s'
;;
volume)
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$VOLUME_EXT_HOST_IP"':8776/v1/$(tenant_id)s' --adminurl 'http://'"$VOLUME_HOST_IP"':8776/v1/$(tenant_id)s' --internalurl 'http://'"$VOLUME_HOST_IP"':8776/v1/$(tenant_id)s'
;;
image)
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$EXT_HOST_IP"':9292/v2' --adminurl 'http://'"$HOST_IP"':9292/v2' --internalurl 'http://'"$HOST_IP"':9292/v2'
;;
identity)
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$EXT_HOST_IP"':5000/v2.0' --adminurl 'http://'"$HOST_IP"':35357/v2.0' --internalurl 'http://'"$HOST_IP"':5000/v2.0'
;;
ec2)
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$EXT_HOST_IP"':8773/services/Cloud' --adminurl 'http://'"$HOST_IP"':8773/services/Admin' --internalurl 'http://'"$HOST_IP"':8773/services/Cloud'
;;
network)
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$NETWORK_EXT_HOST_IP"':9696/' --adminurl 'http://'"$NETWORK_HOST_IP"':9696/' --internalurl 'http://'"$NETWORK_HOST_IP"':9696/'
;;
esac
}
for i in compute volume image object-store identity ec2 network; do
id=`mysql -h "$MYSQL_HOST" -u "$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE" -ss -e "SELECT id FROM service WHERE type='"$i"';"` || exit 1
create_endpoint $i $id
done
$ vi admin.rc
export OS_TENANT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=admin_pass
export OS_AUTH_URL="http://192.168.75.131:5000/v2.0/"
$ keystone tenant-create --name DEV --enabled true
$ keystone user-create --name dev_admin --tenant 5e795212d0804ad89234d9a1ac30c8ca --pass adminPass --enabled true
$ keystone user-create --name dev_user01 --tenant 5e795212d0804ad89234d9a1ac30c8ca --pass userPass --enabled true
# Admin role 과 dev_admin 을 연결
$ keystone user-role-add --user c207c127ba7c46d2bf18f6c39ac4ff78 --role 19f87df854914a1a903972f70d7d631a --tenant 5e795212d0804ad89234d9a1ac30c8ca
# Member role 과 dev_user01 을 연결
$ keystone user-role-add --user 908c6c5691374d6a95b64fea0e1615ce --role b13ffb470d1040d298e08cf9f5a6003a --tenant 5e795212d0804ad89234d9a1ac30c8ca
$ vi dev_admin.rc
export OS_USERNAME=dev_admin
export OS_PASSWORD=adminPass
export OS_TENANT_NAME=DEV
export OS_AUTH_URL="http://192.168.75.131:5000/v2.0/"
$ vi dev_user.rc
export OS_USERNAME=dev_user01
export OS_PASSWORD=userPass
export OS_TENANT_NAME=DEV
export OS_AUTH_URL="http://192.168.75.131:5000/v2.0/"
4. nova settting
$ sudo vi /etc/nova/nova.conf
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
logdir=/var/log/nova
state_path=/var/lib/nova
lock_path=/var/lock/nova
force_dhcp_release=True
libvirt_use_virtio_for_bridges=True
connection_type=libvirt
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
verbose=True
debug=True
ec2_private_dns_show_ip=True
api_paste_config=/etc/nova/api-paste.ini
enabled_apis=ec2,osapi_compute,metadata
cinder_catalog_info=volume:cinder:adminURL
use_network_dns_servers=True
metadata_host=192.168.75.131
metadata_listen=0.0.0.0
metadata_listen_port=8775
metadata_manager=nova.api.manager.MetadataManager
metadata_port=8775
vncserver_proxyclient_address=192.168.230.131
vncserver_listen=0.0.0.0
vnc_enabled=true
xvpvncproxy_base_url=http://192.168.230.131:6081/console
novncproxy_base_url=http://192.168.230.131:6080/vnc_auto.html
remove_unused_base_images=False
image_create_to_qcow2 = True
api_rate_limit=True
#rpc setting
rpc_backend = rabbit
rabbit_host = 192.168.230.131
#network setting
network_api_class = nova.network.api.API
security_group_api = nova
# Network settings
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
network_manager=nova.network.manager.VlanManager
network_api_class=nova.network.api.API
dhcp_lease_time=600
vlan_start=1001
fixed_range=10.0.0.0/16
allow_same_net_traffic=False
multi_host=True
send_arp_for_ha=True
#share_dhcp_address=True
force_dhcp_release=True
flat_interface = eth1
public_interface=eth0
#auth setting
use_deprecated_auth = false
auth_strategy = keystone
#image setting
glance_api_services = 192.168.75.131:9292
image_service = nova.image.glance.GlanceImageService
glance_host = 192.168.230.131
[database]
connection = mysql://nova:NOVA_DBPASS@localhost/nova
[keystone_authtoken]
auth_uri = http://192.168.75.131:5000
auth_host = 192.168.75.131
auth_port = 35357
auth_protocol = http
admin_tenant_name = admin
admin_user = admin
admin_password = admin_pass
$ sudo nova-manage db sync
$ sudo service nova-api restart
$ sudo service nova-cert restart
$ sudo service nova-consoleauth restart
$ sudo service nova-scheduler restart
$ sudo service nova-conductor restart
$ sudo service nova-novncproxy restart
5. glance setting
$ sudo vi /etc/glance/glance-api.conf
# 아래 코멘트 처리
qpid, swift_store, s3_store, sheepdog_store
rabbit_host = 192.168.230.131
rabbit_port = 5672
rabbit_use_ssl = false
rabbit_virtual_host = /
rabbit_notification_exchange = glance
rabbit_notification_topic = notifications
rabbit_durable_queues = False
[database]
connection = mysql://glance:GLANCE_DBPASS@192.168.230.131/glance
[keystone_authtoken]
auth_uri = http://192.168.75.131:5000
auth_host = 192.168.75.131
auth_port = 35357
auth_protocol = http
admin_tenant_name = admin
admin_user = admin
admin_password = admin_pass
[paste_deploy]
flavor=keystone
$ sudo vi /etc/glance/glance-registry.conf
[database]
connection = mysql://glance:GLANCE_DBPASS@192.168.230.131/glance
[keystone_authtoken]
auth_uri = http://192.168.75.131:5000
auth_host = 192.168.75.131
auth_port = 35357
auth_protocol = http
admin_tenant_name = admin
admin_user = admin
admin_password = admin_pass
[paste_deploy]
flavor=keystone
$ mysql -u root -p
mysql> use glance;
mysql> alter table migrate_version convert to character set utf8 collate utf8_unicode_ci;
mysql> flush privileges;
$ sudo glance-manage db_sync
$ sudo service glance-api restart
$ sudo service glance-registry restart
$ glance image-create --name ubuntu-14.04-cloudimg --disk-format qcow2 --container-format bare --owner e07a35f02d9e4281b8336d9112faed51 --file ubuntu-14.04-server-cloudimg-amd64-disk1.img --is-public True --progress
$ wget --no-check-certificate https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img
$ glance image-create --name cirros-0.3.0 --disk-format qcow2 --container-format bare --owner e07a35f02d9e4281b8336d9112faed51 --file cirros-0.3.0-x86_64-disk.img --is-public True --progress
6. cinder setting
$ sudo cinder-manage db sync
$ sudo vi /etc/cinder/cinder.conf
[DEFAULT]
rootwrap_config = /etc/cinder/rootwrap.conf
api_paste_confg = /etc/cinder/api-paste.ini
iscsi_helper = tgtadm
volume_name_template = volume-sfpoc-%s
volume_group = cinder-volumes
verbose = True
debug=True
auth_strategy = keystone
state_path = /var/lib/cinder
lock_path = /var/lock/cinder
volumes_dir = /var/lib/cinder/volumes
default_availability_zone=LH_ZONE
storage_availability_zone=LH_ZONE
rpc_backend = cinder.openstack.common.rpc.impl_kombu
rabbit_host = 192.168.75.131
rabbit_port = 5672
glance_host=192.168.230.131
glance_port=9292
glance_api_servers=$glance_host:$glance_port
default_volume_type=LOW_END
# multi backend
enabled_backends=LEFTHAND,SOLIDFIRE
[LEFTHAND]
volume_name_template = volume-sfpoc-%s
volume_group = cinder-volumes
volume_driver=cinder.volume.drivers.san.hp.hp_lefthand_iscsi.HPLeftHandISCSIDriver
volume_backend_name=ISCSI_LH
san_ip=192.168.230.141
san_login=admin
san_password=admin_pass
san_clustername=CLUSTER-LEFTHAND
san_ssh_port=16022
[SOLIDFIRE]
volume_name_template = volume-sfpoc-%s
volume_group = cinder-volumes
verbose = True
volume_driver=cinder.volume.drivers.solidfire.SolidFireDriver
volume_backend_name=ISCSI_SF
san_ip=192.168.230.151
san_login=admin
san_password=admin_pass
[database]
connection=mysql://cinder:cinderPass@192.168.75.131/cinder
[keystone_authtoken]
auth_uri = http://192.168.75.131:5000
auth_host = 192.168.75.131
auth_port = 35357
auth_protocol = http
admin_tenant_name = admin
admin_user = admin
admin_password = admin_pass
$ sudo cinder-manage db sync
$ sudo service cinder-api restart
$ sudo service cinder-volume restart
$ sudo service cinder-scheduler restart
7. LeftHand Cluster 정보 보기
$ ssh -p 16022 user@192.168.230.140
CLIQ> getclusterinfo searchdepth=1 verbose=0
CLIQ> getserverinfo servername=ubuntu
CLIQ> getvolumeinfo volumename=volume-sfpoc-9d36737a-d332-4613-bce2-32465904a6fc
8. multi backend 세팅
$ cinder type-create LOW_END
$ cinder type-key LOW_END set volume_backend_name=ISCSI_LH
$ cinder type-create HIGH_END
$ cinder type-key HIGH_END set volume_backend_name=ISCSI_SF
# 1G High-end 볼륨 생성
$ cinder create --display-name high-test-01 --volume-type HIGH_END 1
9. backend qos 세팅
$ cinder type-create IOPS_3000
$ cinder type-key IOPS_3000 set volume_backend_name=ISCSI_SF
$ cinder qos-create QOS_IOPS_3000 consumer="back-end" minIOPS=3000 maxIOPS=3000 burstIOPS=3000
$ cinder qos-associate 1e9694b8-eca4-4ce7-b476-d1637535aaa2 9c241c66-30fd-442b-b7a1-79b4f1892919
$ cinder qos-get-association 1e9694b8-eca4-4ce7-b476-d1637535aaa2
[ Compute Node Install ]
1. compute node install (nova-compute, nova-network, nova-api-metadata)
$ sudo apt-get install nova-compute-kvm nova-network nova-api-metadata
[ 기본 설정 ]
1. network setting
$ nova network-create --fixed-range-v4 10.0.0.0/24 --vlan 1001 --gateway 10.0.0.1 --bridge br1001 --bridge-interface eth0 --multi-host T --dns1 8.8.8.8 --dns2 8.8.4.4 --project-id 5e795212d0804ad89234d9a1ac30c8ca dev_network
2. fixed ip reserve
$ nova fixed-ip-reserve 10.0.0.3
$ nova fixed-ip-reserve 10.0.0.4
$ nova fixed-ip-reserve 10.0.0.5
3. floating ip create
$ nova floating-ip-bulk-create 192.168.75.128/25 --interface eth0
4. secgroup 생성
$ nova secgroup-create connect 'icmp and ssh'
$ nova secgroup-add-rule connect icmp -1 -1 0.0.0.0/0
$ nova secgroup-add-rule connect tcp 22 22 0.0.0.0/0
5. keypair 생성
$ nova keypair-add stephen >> stephen.pem
6. pem 파일을 다른 호스트에 복사
$ scp -P 22 dev_admin.pem stack@192.168.230.132:~/creds/.
$ chmod 600 dev_admin.pem
7. nova.conf 를 다른 멀티호스트에 복사
$ for i in `seq 132 134`; do scp nova.conf stack@192.168.230.$i:~/creds/.; done
8. zone 설정
$ nova aggregate-create POC LH_ZONE
$ nova aggregate-add-host POC ubuntu
9. VM 생성
$ nova boot test01 --flavor 1 --image 4399bba0-17a4-43ef-8fdd-4edd9c2afe74 --key_name dev_admin --security_group connect
# boot on volume 및 attach volume 을 동시에 실행
$ nova boot [name] --flavor [flavorid]
--block-device id=[imageid],source=image,dest=volume,size=10,bootindex=0,shutdown=remove
--block-device id=[volumeid],source=volume,dest=volume,size=100,bootindex=1
10. VM 접속
$ ssh -i dev_admin.pem cirros@10.0.0.6
$ ssh -i dev_admin.pem ubuntu@10.0.0.6
[ VMware 관련 설정 ]
1. cinder.conf
[DEFAULT]
rootwrap_config = /etc/cinder/rootwrap.conf
api_paste_confg = /etc/cinder/api-paste.ini
iscsi_helper = tgtadm
volume_name_template = %s
volume_group = cinder-volumes
verbose = True
debug=True
auth_strategy = keystone
state_path = /var/lib/cinder
lock_path = /var/lock/cinder
volumes_dir = /var/lib/cinder/volumes
default_availability_zone=VMWARE_ZONE
storage_availability_zone=VMWARE_ZONE
rpc_backend = cinder.openstack.common.rpc.impl_kombu
rabbit_host = 192.168.75.131
rabbit_port = 5672
glance_host=192.168.75.131
glance_port=9292
glance_api_servers=$glance_host:$glance_port
default_volume_type=VMWARE_TYPE
# multi backend
enabled_backends=VMWARE_DRIVER
[VMWARE_DRIVER]
volume_driver = cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver
volume_backend_name=VMWARE
vmware_host_ip = 192.168.75.131
vmware_host_password = VMWARE_PASSWORD
vmware_host_username = root
[database]
connection=mysql://cinder:cinderPass@192.168.75.131/cinder
[keystone_authtoken]
auth_uri = http://192.168.75.131:5000
auth_host = 192.168.75.131
auth_port = 35357
auth_protocol = http
admin_tenant_name = admin
admin_user = admin
admin_password = admin_pass
2. multi backend 세팅
$ cinder type-create VMWARE_TYPE
$ cinder type-key VMWARE_TYPE set volume_backend_name=VMWARE
# 1G High-end 볼륨 생성
$ cinder create --display-name test-01 --volume-type VMWARE_TYPE 1
3. nova.conf
$ sudo vi /etc/nova/nova.conf
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
logdir=/var/log/nova
state_path=/var/lib/nova
lock_path=/var/lock/nova
force_dhcp_release=True
# libvirt_use_virtio_for_bridges=True
# connection_type=libvirt
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
verbose=True
debug=True
ec2_private_dns_show_ip=True
api_paste_config=/etc/nova/api-paste.ini
enabled_apis=ec2,osapi_compute,metadata
cinder_catalog_info=volume:cinder:adminURL
use_network_dns_servers=True
metadata_host=192.168.75.131
metadata_listen=0.0.0.0
metadata_listen_port=8775
metadata_manager=nova.api.manager.MetadataManager
metadata_port=8775
vncserver_proxyclient_address=192.168.230.131
vncserver_listen=0.0.0.0
vnc_enabled=true
xvpvncproxy_base_url=http://192.168.230.131:6081/console
novncproxy_base_url=http://192.168.230.131:6080/vnc_auto.html
compute_driver = vmwareapi.VMwareVCDriver
remove_unused_base_images=False
image_create_to_qcow2 = True
api_rate_limit=True
#rpc setting
rpc_backend = rabbit
rabbit_host = 192.168.230.131
#network setting
network_api_class = nova.network.api.API
security_group_api = nova
# Network settings
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
network_manager=nova.network.manager.VlanManager
network_api_class=nova.network.api.API
dhcp_lease_time=600
vlan_start=1001
fixed_range=10.0.0.0/16
allow_same_net_traffic=False
multi_host=True
send_arp_for_ha=True
#share_dhcp_address=True
force_dhcp_release=True
flat_interface = eth0
public_interface=eth0
#auth setting
use_deprecated_auth = false
auth_strategy = keystone
#image setting
glance_api_services = 192.168.75.131:9292
image_service = nova.image.glance.GlanceImageService
glance_host = 192.168.230.131
[vmware]
host_ip = 192.168.75.131
host_username = root
host_password = VMWARE_PASSWORD
cluster_name = cluster1
use_linked_clone = False
[database]
connection = mysql://nova:NOVA_DBPASS@localhost/nova
[keystone_authtoken]
auth_uri = http://192.168.75.131:5000
auth_host = 192.168.75.131
auth_port = 35357
auth_protocol = http
admin_tenant_name = admin
admin_user = admin
admin_password = admin_pass
4. nova-compute.conf
#[DEFAULT]
#compute_driver=libvirt.LibvirtDriver
#[libvirt]
#virt_type=kvm
5. zone 설정
$ nova aggregate-create VMWARE VMWARE_ZONE
$ nova aggregate-add-host VMWARE controller
6. image 등록
[ slitaz linux ]
$ wget http://partnerweb.vmware.com/programs/vmdkimage/trend-tinyvm1-flat.vmdk
$ glance image-create --name [vmware]trend-static-thin --file trend-tinyvm1-flat.vmdk --is-public=True --container-format=bare --disk-format=vmdk --property vmware_disktype="thin" --property vmware_adaptertype="ide"
[ slitaz linux 접속 및 dhcp 변경]
vmware / vmware 접속 후 root 권한 획득 root / root
# vi /etc/network.conf
DHCP="yes"
STATIC="no"
[ cirros ]
$ wget http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
$ qemu-img convert -f qcow2 -O vmdk cirros-0.3.3-x86_64-disk.img cirros-0.3.3-x86_64-disk.vmdk
$ glance image-create --name [vmware]cirros-0.3.3 --disk-format vmdk --container-format bare --file cirros-0.3.3-x86_64-disk.vmdk --property vmware-disktype="sparse" --property hw_vif_model="VirtualVmxnet" --property vmware_adaptertype="ide" --is-public True --progress
7. vm -> image 저장
1. ESXi 호스트 접속
2. vm위치로 이동
# cd /vmfs/volumes/datastore1/6c516279-c83f-43ec-a8d4-bec540604280
3. thin copy
# vmkfstools -i 6c516279-c83f-43ec-a8d4-bec540604280.vmdk -d thin .
./vmware_temp/trend-tinyvm1-dhcp-thin.vmdk
4. 다른 host 에서 scp 로 가져옴
$ scp root@192.168.75.182:/vmfs/volumes/542cf526-bef9f829-2f02-000c29fef6ec/vmware_temp/trend-tinyvm1-dhcp-thin-flat.vmdk .
8. nova boot
$ nova hypervisor-list
$ nova boot test01 --flavor 1 --image 6d9745dc-0fc9-4802-b21d-329004353406 --key_name stephen --availability-zone "VMWARE_ZONE::domain-c12(cluster1)"
1. compute host 간의 libvirt 버전이 동일해야 한다.
2. "libvirtd -d -l" 옵션으로 떠 있어야 한다.
# vi /etc/libvirt/libvirtd.conf
listen_tls = 0
listen_tcp = 1
auth_tcp = "none"
# vi /etc/init/libvirt-bin.conf
env libvirtd_opts="-d -l"
# vi /etc/default/libvirt-bin
libvirtd_opts=" -d -l"
sudo service libvirt-bin restart
3. nova.conf 의 "send_arp_for_ha" flag가 True로 셋팅되어야 함
# vi /etc/nova/nova.conf
send_arp_for_ha=True
#force_config_drive = always
block_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE
/dev/loop0: [2049]:1733858 (/opt/stack/data/swift/drives/images/swift.img)
/dev/loop1: [2049]:1733859 (/opt/stack/data/cinder-volumes-default-backing-file)
/dev/loop2: [2049]:1733860 (/opt/stack/data/cinder-volumes-lvmdriver-1-backing-file)
[ Mac vmware 에 설치한 Ubuntu 에 vt-x 활성화하기 위해 vmx 파일 수정]
vhv.enable = "TRUE"
[ ssh server 설치 ]
sudo apt-get install -y openssh-server
[ 구조 설명 ]
Cloud Controller
- hostname : controller
- eth0 : 192.168.75.131
- eth1 : 192.168.230.131
- 설치 모듈 : mysql, rabbitMQ, keystone, glance, nova-api,
cinder-api, cinder-scheduler, cinder-volume, open-iscsi, iscsitarget
quantum-server
Network
- hostname : network
- eth0 : 192.168.75.132
- eth1 : 192.168.230.132
- eth2 :
- eth3 : 192.168.75.133
- 설치 모듈 : openvswitch-switch openvswitch-datapath-dkms
quantum-plugin-openvswitch-agent dnsmasq quantum-dhcp-agent quantum-l3-agent
Compute
- hostname : compute
- eth0 : 192.168.75.134
- eth1 : 192.168.230.134
- eth2 :
- 설치 모듈 : openvswitch-switch openvswitch-datapath-dkms
quantum-plugin-openvswitch-agent, nova-compute-kvm, open-iscsi, iscsitarget
[ network 설정 ]
eth0 : public 망 (NAT) 192.168.75.0/24
eth1 : private host 망 Custom(VMnet2) 192.168.230.0/24
eth2 : vm private 망 10.0.0.0/24
eth3 : vm Quantum public 망(NAT) 192.168.75.0/26
[ hostname 변경 ]
vi /etc/hosts
192.168.230.131 controller
192.168.230.132 network
192.168.230.134 compute
vi /etc/hostname
controller
hostname -F /etc/hostname
새로운 터미널로 확인
[ eth0 eth1 설정 ]
vi /etc/network/interfaces
# The loopback network interface
auto lo
iface lo inet loopback
# Host Public 망
auto eth0
iface eth0 inet static
address 192.168.75.131
netmask 255.255.255.0
gateway 192.168.75.2
dns-nameservers 8.8.8.8 8.8.4.4
# Host Private 망
auto eth1
iface eth1 inet static
address 192.168.230.131
netmask 255.255.255.0
service networking restart
[ vmware 에 설치한 Ubuntu 에서 가상화를 지원하는지 확인 ]
egrep '(vmx|svm)' --color=always /proc/cpuinfo
[ nova 설치 매뉴얼 ]
https://github.com/mseknibilel/OpenStack-Grizzly-Install-Guide/blob/master/OpenStack_Grizzly_Install_Guide.rst
[ nova 소스 위치 ]
nova link source = /usr/lib/python2.7/dist-packages/nova
nova original source = /usr/share/pyshared/nova
################## 모든 node 공통 설치하기 #####################
[ root 패스워드 세팅 ]
sudo su -
passwd
[ repository upgrade ]
apt-get install -y ubuntu-cloud-keyring python-software-properties software-properties-common python-keyring
echo deb http://ubuntu-cloud.archive.canonical.com/ubuntu precise-updates/grizzly main >> /etc/apt/sources.list.d/grizzly.list
apt-get update
apt-get upgrade
apt-get dist-upgrade
[ screen vim 설치 ]
sudo apt-get install -y screen vim
[ .screenrc ]
vbell off
autodetach on
startup_message off
defscrollback 1000
attrcolor b ".I"
termcap xterm 'Co#256:AB=\E[48;5;%dm:AF=\E[38;5;%dm'
defbce "on"
#term screen-256color
## apps I want to auto-launch
#screen -t irssi irssi
#screen -t mutt mutt
## statusline, customized. (should be one-line)
hardstatus alwayslastline '%{gk}[ %{G}%H %{g}][%= %{wk}%?%-Lw%?%{=b kR}[%{W}%n%f %t%?(%u)%?%{=b kR}]%{= kw}%?%+Lw%?%?%= %{g}][%{Y}%l%{g}]%{=b C}[ %D %m/%d %C%a ]%{W}'
[ .vimrc ]
syntax on
set nocompatible
set number
set backspace=indent,eol,start
set tabstop=4
set shiftwidth=4
set autoindent
set visualbell
set laststatus=2
set statusline=%h%F%m%r%=[%l:%c(%p%%)]
set hlsearch
set background=dark
set expandtab
set tags=./tags,./TAGS,tags,TAGS,/usr/share/pyshared/nova/tags
[ remove dmidecode ]
apt-get purge dmidecode
apt-get autoremove
kill -9 [dmidecode process]
[ root 일 때 nova 계정이 없을 경우 유저 및 권한 설정 ]
adduser nova
visudo
nova ALL=(ALL:ALL) NOPASSWD:ALL
[ ntp 설치 ]
apt-get install -y ntp
vi /etc/ntp.conf
#server 0.ubuntu.pool.ntp.org
#server 1.ubuntu.pool.ntp.org
#server 2.ubuntu.pool.ntp.org
#server 3.ubuntu.pool.ntp.org
server time.bora.net
service ntp restart
# 한국 시간 세팅 및 최초 시간 맞추기
ntpdate -u time.bora.net
ln -sf /usr/share/zoneinfo/Asia/Seoul /etc/localtime
[ mysql client 설치 ]
apt-get install -y python-mysqldb mysql-client-5.5
[ KVM 설치 및 확인 ]
apt-get install -y cpu-checker
apt-get install -y kvm libvirt-bin pm-utils
kvm-ok
# kvm 이 load 되어 있는지 확인하기
lsmod | grep kvm
# 서버 reboot 시에 kvm 자동 load 추가
vi /etc/modules
kvm
kvm_intel
vi /etc/libvirt/qemu.conf
cgroup_device_acl = [
"/dev/null", "/dev/full", "/dev/zero",
"/dev/random", "/dev/urandom",
"/dev/ptmx", "/dev/kvm", "/dev/kqemu",
"/dev/rtc", "/dev/hpet","/dev/net/tun"
]
# delete default virtual bridge
virsh net-destroy default
virsh net-undefine default
# enable live migration
vi /etc/libvirt/libvirtd.conf
listen_tls = 0
listen_tcp = 1
auth_tcp = "none"
vi /etc/init/libvirt-bin.conf
env libvirtd_opts="-d -l"
vi /etc/default/libvirt-bin
libvirtd_opts="-d -l"
service dbus restart
service libvirt-bin restart
[ bridge 설치 ]
apt-get install -y vlan bridge-utils
[ IP_Forwarding 설정 ]
sed -i 's/#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/' /etc/sysctl.conf
sysctl net.ipv4.ip_forward=1
################## Cloud Controller 설치하기 #####################
[ ntp 세팅 ]
vi /etc/ntp.conf
server time.bora.net
service ntp restart
[ network 세팅 ]
vi /etc/network/interfaces
# The loopback network interface
auto lo
iface lo inet loopback
# Host Public 망
auto eth0
iface eth0 inet static
address 192.168.75.131
netmask 255.255.255.0
gateway 192.168.75.2
dns-nameservers 8.8.8.8 8.8.4.4
# Host Private 망
auto eth1
iface eth1 inet static
address 192.168.230.131
netmask 255.255.255.0
service networking restart
[ hostname 변경 ]
vi /etc/hosts
192.168.230.131 controller
192.168.230.132 network
192.168.230.134 compute
vi /etc/hostname
controller
hostname -F /etc/hostname
[ mysql db 설치 ]
apt-get install -y python-mysqldb mysql-server password : 임시 패스워드
sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf
service mysql restart
[ rabbitmq server install ]
apt-get install -y rabbitmq-server
# user 변환
sudo su - nova
[ Database 세팅 ]
mysql -u root -p
CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY '임시 패스워드';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY '임시 패스워드';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'controller' IDENTIFIED BY '임시 패스워드';
CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY '임시 패스워드';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY '임시 패스워드';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'controller' IDENTIFIED BY '임시 패스워드';
CREATE DATABASE nova;
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY '임시 패스워드';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY '임시 패스워드';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'controller' IDENTIFIED BY '임시 패스워드';
CREATE DATABASE quantum;
GRANT ALL PRIVILEGES ON quantum.* TO 'quantum'@'%' IDENTIFIED BY '임시 패스워드';
GRANT ALL PRIVILEGES ON quantum.* TO 'quantum'@'localhost' IDENTIFIED BY '임시 패스워드';
GRANT ALL PRIVILEGES ON quantum.* TO 'quantum'@'controller' IDENTIFIED BY '임시 패스워드';
CREATE DATABASE cinder;
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY '임시 패스워드';
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY '임시 패스워드';
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'controller' IDENTIFIED BY '임시 패스워드';
# grant 가 안될 때
use mysql;
UPDATE user SET
Select_priv = 'Y',
Insert_priv = 'Y',
Update_priv = 'Y',
Delete_priv = 'Y',
Create_priv = 'Y',
Drop_priv = 'Y',
Reload_priv = 'Y',
Shutdown_priv = 'Y',
Process_priv = 'Y',
File_priv = 'Y',
Grant_priv = 'Y',
References_priv = 'Y',
Index_priv = 'Y',
Alter_priv = 'Y',
Show_db_priv = 'Y',
Super_priv = 'Y',
Create_tmp_table_priv = 'Y',
Lock_tables_priv = 'Y',
Execute_priv = 'Y',
Repl_slave_priv = 'Y',
Repl_client_priv = 'Y',
Create_view_priv = 'Y',
Show_view_priv = 'Y',
Create_routine_priv = 'Y',
Alter_routine_priv = 'Y',
Create_user_priv = 'Y',
Event_priv = 'Y',
Trigger_priv = 'Y',
Create_tablespace_priv = 'Y'
WHERE user IN ('keystone', 'glance', 'nova', 'quantum', 'cinder');
[ keystone 설치 ]
sudo apt-get install -y keystone
sudo service keystone status
sudo rm /var/lib/keystone/keystone.db
sudo vi /etc/keystone/keystone.conf
connection = mysql://keystone:임시 패스워드@controller/keystone
token_format = UUID
sudo service keystone restart
sudo keystone-manage db_sync
[ keystone 세팅 ]
vi keystone_basic.sh
#!/bin/sh
#
# Keystone basic configuration
# Mainly inspired by https://github.com/openstack/keystone/blob/master/tools/sample_data.sh
# Modified by Bilel Msekni / Institut Telecom
#
# Support: openstack@lists.launchpad.net
# License: Apache Software License (ASL) 2.0
#
HOST_IP=192.168.230.131
ADMIN_PASSWORD=${ADMIN_PASSWORD:-admin_pass}
SERVICE_PASSWORD=${SERVICE_PASSWORD:-service_pass}
export SERVICE_TOKEN="ADMIN"
export SERVICE_ENDPOINT="http://${HOST_IP}:35357/v2.0"
SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
get_id () {
echo `$@ | awk '/ id / { print $4 }'`
}
# Tenants
ADMIN_TENANT=$(get_id keystone tenant-create --name=admin)
SERVICE_TENANT=$(get_id keystone tenant-create --name=$SERVICE_TENANT_NAME)
# Users
ADMIN_USER=$(get_id keystone user-create --name=admin --pass="$ADMIN_PASSWORD" --email=admin@domain.com)
# Roles
ADMIN_ROLE=$(get_id keystone role-create --name=admin)
KEYSTONEADMIN_ROLE=$(get_id keystone role-create --name=KeystoneAdmin)
KEYSTONESERVICE_ROLE=$(get_id keystone role-create --name=KeystoneServiceAdmin)
# Add Roles to Users in Tenants
keystone user-role-add --user-id $ADMIN_USER --role-id $ADMIN_ROLE --tenant-id $ADMIN_TENANT
keystone user-role-add --user-id $ADMIN_USER --role-id $KEYSTONEADMIN_ROLE --tenant-id $ADMIN_TENANT
keystone user-role-add --user-id $ADMIN_USER --role-id $KEYSTONESERVICE_ROLE --tenant-id $ADMIN_TENANT
# The Member role is used by Horizon and Swift
MEMBER_ROLE=$(get_id keystone role-create --name=Member)
# Configure service users/roles
NOVA_USER=$(get_id keystone user-create --name=nova --pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT --email=nova@domain.com)
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $NOVA_USER --role-id $ADMIN_ROLE
GLANCE_USER=$(get_id keystone user-create --name=glance --pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT --email=glance@domain.com)
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $GLANCE_USER --role-id $ADMIN_ROLE
QUANTUM_USER=$(get_id keystone user-create --name=quantum --pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT --email=quantum@domain.com)
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $QUANTUM_USER --role-id $ADMIN_ROLE
CINDER_USER=$(get_id keystone user-create --name=cinder --pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT --email=cinder@domain.com)
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $CINDER_USER --role-id $ADMIN_ROLE
vi keystone_endpoints_basic.sh
#!/bin/sh
#
# Keystone basic Endpoints
# Mainly inspired by https://github.com/openstack/keystone/blob/master/tools/sample_data.sh
# Modified by Bilel Msekni / Institut Telecom
#
# Support: openstack@lists.launchpad.net
# License: Apache Software License (ASL) 2.0
#
# Host address
HOST_IP=192.168.230.131
EXT_HOST_IP=192.168.75.131
VOLUME_HOST_IP=192.168.230.131
VOLUME_EXT_HOST_IP=192.168.75.131
NETWORK_HOST_IP=192.168.230.132
NETWORK_EXT_HOST_IP=192.168.75.133
# MySQL definitions
MYSQL_USER=keystone
MYSQL_DATABASE=keystone
MYSQL_HOST=$HOST_IP
MYSQL_PASSWORD=임시 패스워드
# Keystone definitions
KEYSTONE_REGION=RegionOne
export SERVICE_TOKEN=ADMIN
export SERVICE_ENDPOINT="http://${HOST_IP}:35357/v2.0"
while getopts "u:D:p:m:K:R:E:T:vh" opt; do
case $opt in
u)
MYSQL_USER=$OPTARG
;;
D)
MYSQL_DATABASE=$OPTARG
;;
p)
MYSQL_PASSWORD=$OPTARG
;;
m)
MYSQL_HOST=$OPTARG
;;
K)
MASTER=$OPTARG
;;
R)
KEYSTONE_REGION=$OPTARG
;;
E)
export SERVICE_ENDPOINT=$OPTARG
;;
T)
export SERVICE_TOKEN=$OPTARG
;;
v)
set -x
;;
h)
cat <<EOF
Usage: $0 [-m mysql_hostname] [-u mysql_username] [-D mysql_database] [-p mysql_password]
[-K keystone_master ] [ -R keystone_region ] [ -E keystone_endpoint_url ]
[ -T keystone_token ]
Add -v for verbose mode, -h to display this message.
EOF
exit 0
;;
\?)
echo "Unknown option -$OPTARG" >&2
exit 1
;;
:)
echo "Option -$OPTARG requires an argument" >&2
exit 1
;;
esac
done
if [ -z "$KEYSTONE_REGION" ]; then
echo "Keystone region not set. Please set with -R option or set KEYSTONE_REGION variable." >&2
missing_args="true"
fi
if [ -z "$SERVICE_TOKEN" ]; then
echo "Keystone service token not set. Please set with -T option or set SERVICE_TOKEN variable." >&2
missing_args="true"
fi
if [ -z "$SERVICE_ENDPOINT" ]; then
echo "Keystone service endpoint not set. Please set with -E option or set SERVICE_ENDPOINT variable." >&2
missing_args="true"
fi
if [ -z "$MYSQL_PASSWORD" ]; then
echo "MySQL password not set. Please set with -p option or set MYSQL_PASSWORD variable." >&2
missing_args="true"
fi
if [ -n "$missing_args" ]; then
exit 1
fi
keystone service-create --name nova --type compute --description 'OpenStack Compute Service'
keystone service-create --name cinder --type volume --description 'OpenStack Volume Service'
keystone service-create --name glance --type image --description 'OpenStack Image Service'
keystone service-create --name keystone --type identity --description 'OpenStack Identity'
keystone service-create --name ec2 --type ec2 --description 'OpenStack EC2 service'
keystone service-create --name quantum --type network --description 'OpenStack Networking service'
create_endpoint () {
case $1 in
compute)
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$EXT_HOST_IP"':8774/v2/$(tenant_id)s' --adminurl 'http://'"$HOST_IP"':8774/v2/$(tenant_id)s' --internalurl 'http://'"$HOST_IP"':8774/v2/$(tenant_id)s'
;;
volume)
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$VOLUME_EXT_HOST_IP"':8776/v1/$(tenant_id)s' --adminurl 'http://'"$VOLUME_HOST_IP"':8776/v1/$(tenant_id)s' --internalurl 'http://'"$VOLUME_HOST_IP"':8776/v1/$(tenant_id)s'
;;
image)
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$EXT_HOST_IP"':9292/v2' --adminurl 'http://'"$HOST_IP"':9292/v2' --internalurl 'http://'"$HOST_IP"':9292/v2'
;;
identity)
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$EXT_HOST_IP"':5000/v2.0' --adminurl 'http://'"$HOST_IP"':35357/v2.0' --internalurl 'http://'"$HOST_IP"':5000/v2.0'
;;
ec2)
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$EXT_HOST_IP"':8773/services/Cloud' --adminurl 'http://'"$HOST_IP"':8773/services/Admin' --internalurl 'http://'"$HOST_IP"':8773/services/Cloud'
;;
network)
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$NETWORK_EXT_HOST_IP"':9696/' --adminurl 'http://'"$NETWORK_HOST_IP"':9696/' --internalurl 'http://'"$NETWORK_HOST_IP"':9696/'
;;
esac
}
for i in compute volume image object-store identity ec2 network; do
id=`mysql -h "$MYSQL_HOST" -u "$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE" -ss -e "SELECT id FROM service WHERE type='"$i"';"` || exit 1
create_endpoint $i $id
done
# keystone 접근 어드민
vi creds
unset http_proxy
unset https_proxy
export OS_TENANT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=admin_pass
export OS_AUTH_URL="http://controller:5000/v2.0/"
source creds
keystone user-list
[ Glance 설치 ]
sudo apt-get install -y glance
sudo rm /var/lib/glance/glance.sqlite
sudo service glance-api status
sudo service glance-registry status
sudo vi /etc/glance/glance-api-paste.ini
[filter:authtoken]
paste.filter_factory = keystone.middleware.auth_token:filter_factory
delay_auth_decision = true
auth_host = 192.168.230.141
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = glance
admin_password = service_pass
sudo vi /etc/glance/glance-registry-paste.ini
[filter:authtoken]
paste.filter_factory = keystone.middleware.auth_token:filter_factory
auth_host = 192.168.230.141
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = glance
admin_password = service_pass
sudo vi /etc/glance/glance-api.conf
sql_connection = mysql://glance:임시 패스워드@192.168.230.141/glance
enable_v1_api = True
enable_v2_api = True
[paste_deploy]
flavor=keystone
sudo vi /etc/glance/glance-registry.conf
sql_connection = mysql://glance:임시 패스워드@192.168.230.141/glance
[paste_deploy]
flavor=keystone
sudo glance-manage db_sync
sudo service glance-registry restart
sudo service glance-api restart
[ Image 등록 ]
mkdir images
cd images
wget --no-check-certificate https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img
glance image-create --name cirros --is-public true --container-format bare --disk-format qcow2 < cirros-0.3.0-x86_64-disk.img
glance image-list
[ Nova-api, scheduler 설치 ]
sudo apt-get install -y nova-api nova-scheduler nova-cert novnc nova-consoleauth nova-novncproxy nova-doc nova-conductor
mysql -uroot -p임시 패스워드 -e 'CREATE DATABASE nova;'
mysql -uroot -p임시 패스워드 -e "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY '임시 패스워드';"
mysql -uroot -p임시 패스워드 -e "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY '임시 패스워드';"
sudo vi /etc/nova/api-paste.ini
[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
auth_host = 192.168.230.141
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = nova
admin_password = service_pass
signing_dir = /tmp/keystone-signing-nova
# Workaround for https://bugs.launchpad.net/nova/+bug/1154809
auth_version = v2.0
sudo vi /etc/nova/nova.conf
[DEFAULT]
logdir=/var/log/nova
state_path=/var/lib/nova
lock_path=/run/lock/nova
verbose=True
api_paste_config=/etc/nova/api-paste.ini
compute_scheduler_driver=nova.scheduler.simple.SimpleScheduler
rabbit_host=192.168.230.141
nova_url=http://192.168.230.141:8774/v1.1/
sql_connection=mysql://nova:imsi00@192.168.230.141/nova
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
# Auth
use_deprecated_auth=false
auth_strategy=keystone
# Imaging service
glance_api_servers=192.168.230.141:9292
image_service=nova.image.glance.GlanceImageService
# Vnc configuration
novnc_enabled=true
novncproxy_base_url=http://192.168.75.141:6080/vnc_auto.html
novncproxy_port=6080
vncserver_proxyclient_address=192.168.230.141
vncserver_listen=0.0.0.0
# Network settings
network_api_class=nova.network.quantumv2.api.API
quantum_url=http://192.168.230.143:9696
quantum_auth_strategy=keystone
quantum_admin_tenant_name=service
quantum_admin_username=quantum
quantum_admin_password=service_pass
quantum_admin_auth_url=http://192.168.230.141:35357/v2.0
libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver
#Metadata
service_quantum_metadata_proxy = True
quantum_metadata_proxy_shared_secret = helloOpenStack
metadata_host = 192.168.230.141
metadata_listen = 127.0.0.1
metadata_listen_port = 8775
# Compute #
compute_driver=libvirt.LibvirtDriver
# Cinder #
volume_api_class=nova.volume.cinder.API
osapi_volume_listen_port=5900
sudo nova-manage db sync
# restart nova services
cd /etc/init.d/; for i in $( ls nova-* ); do sudo service $i restart; done
# check nova services
nova-manage service list
[ Horizon 설치 ]
sudo apt-get install -y openstack-dashboard memcached
# ubuntu 테마 삭제
sudo apt-get purge openstack-dashboard-ubuntu-them
# apache and mecached reload
sudo service apache2 restart
sudo service memcached restart
# browser 접속 url
http://192.168.75.141/horizon/
################## Cinder 설치하기 #####################
[ ntp 세팅 ]
sudo vi /etc/ntp.conf
server 192.168.230.141
sudo service ntp restart
[ network 세팅 ]
[ hostname 변경 ]
[ Cinder 설치 ]
sudo apt-get install -y cinder-api cinder-scheduler cinder-volume iscsitarget open-iscsi iscsitarget-dkms
sudo sed -i 's/false/true/g' /etc/default/iscsitarget
sudo vi /etc/iscsi/iscsid.conf
node.startup = automatic
sudo service iscsitarget start
sudo service open-iscsi start
mysql -uroot -p임시 패스워드 -e 'CREATE DATABASE cinder;'
mysql -uroot -p임시 패스워드 -e "GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY '임시 패스워드';"
mysql -uroot -p임시 패스워드 -e "GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY '임시 패스워드';"
sudo vi /etc/cinder/api-paste.ini
[filter:authtoken]
paste.filter_factory = keystone.middleware.auth_token:filter_factory
service_protocol = http
service_host = 192.168.75.141
service_port = 5000
auth_host = 192.168.230.141
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = cinder
admin_password = service_pass
sudo vi /etc/cinder/cinder.conf
[DEFAULT]
rootwrap_config=/etc/cinder/rootwrap.conf
sql_connection = mysql://cinder:임시 패스워드@192.168.230.141/cinder
api_paste_config = /etc/cinder/api-paste.ini
iscsi_helper=ietadm
volume_name_template = volume-%s
volume_group = cinder-volumes
verbose = True
auth_strategy = keystone
rabbit_host = 192.168.230.141
sudo cinder-manage db sync
[ cinder volume 생성 ]
dd if=/dev/zero of=cinder-volumes bs=1 count=0 seek=10G
sudo losetup /dev/loop2 cinder-volumes
sudo fdisk /dev/loop2
1. sudo fdisk -l
2. sudo fdisk /dev/sdb
3. Press ‘n' to create a new disk partition,
4. Press 'p' to create a primary disk partition,
5. Press '1' to denote it as 1st disk partition,
6. Either press ENTER twice to accept the default of 1st and last cylinder – to convert the remainder of hard disk to a single disk partition
-OR- press ENTER once to accept the default of the 1st, and then choose how big you want the partition to be by specifying +size{K,M,G}
e.g. +5G or +6700M.
7. Press 't', then select the new partition you made.
8. Press '8e' change your new partition to 8e, i.e. Linux LVM partition type.
9. Press ‘p' to display the hard disk partition setup. Please take note that the first partition is denoted as /dev/sda1 in Linux.
10. Press 'w' to write the partition table and exit fdisk upon completion.
sudo pvcreate /dev/loop2
sudo vgcreate cinder-volumes /dev/loop2
# 서버 reboot 시에도 자동으로 설정
sudo vi /etc/init.d/cinder-setup-backing-file
losetup /dev/loop2 /home/nova/cinder-volumes
exit 0
sudo chmod 755 /etc/init.d/cinder-setup-backing-file
sudo ln -s /etc/init.d/cinder-setup-backing-file /etc/rc2.d/S10cinder-setup-backing-file
# restart cinder services
cd /etc/init.d/; for i in $( ls cinder-* ); do sudo service $i restart; done
# verify cinder services
cd /etc/init.d/; for i in $( ls cinder-* ); do sudo service $i status; done
################## Quantum Server 설치하기 #####################
[ ntp 세팅 ]
sudo vi /etc/ntp.conf
server 192.168.230.141
sudo service ntp restart
[ network 세팅 ]
[ hostname 변경 ]
[ quantum server 설치 ]
sudo apt-get install -y quantum-server
sudo rm -rf /var/lib/quantum/ovs.sqlite
mysql -uroot -p임시 패스워드 -e 'CREATE DATABASE quantum;'
mysql -uroot -p임시 패스워드 -e "GRANT ALL PRIVILEGES ON quantum.* TO 'quantum'@'%' IDENTIFIED BY '임시 패스워드';"
mysql -uroot -p임시 패스워드 -e "GRANT ALL PRIVILEGES ON quantum.* TO 'quantum'@'localhost' IDENTIFIED BY '임시 패스워드';"
sudo vi /etc/quantum/api-paste.ini
[filter:authtoken]
paste.filter_factory = keystone.middleware.auth_token:filter_factory
auth_host = 192.168.230.141
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = quantum
admin_password = service_pass
sudo vi /etc/quantum/quantum.conf
rabbit_host = 192.168.230.141
sudo service quantum-server restart
sudo service quantum-server status
################## Quantum Network 설치하기 #####################
[ ntp 세팅 ]
sudo vi /etc/ntp.conf
server 192.168.230.141
sudo service ntp restart
[ eth2 vm 용 public 망 추가 - Quantum public network 로 사용 ]
sudo vi /etc/network/interfaces
auto lo
iface lo inet loopback
# host public 망
auto eth0
iface eth0 inet static
address 192.168.75.144
netmask 255.255.255.0
gateway 192.168.75.2
dns-nameservers 8.8.8.8, 8.8.4.4
# vm private 망, host private 망
auto eth1
iface eth1 inet static
address 192.168.230.144
netmask 255.255.255.0
# vm public 망
auto eth2
iface eth2 inet manual
up ifconfig $IFACE 0.0.0.0 up
up ip link set $IFACE promisc on
down ip link set $IFACE promisc off
down ifconfig $IFACE down
sudo service networking restart
[ hostname 변경 ]
[ openVSwitch 설치 ]
sudo apt-get install -y openvswitch-switch openvswitch-datapath-dkms
# bridge 생성
sudo ovs-vsctl add-br br-int
sudo ovs-vsctl add-br br-ex
[ Quantum openVSwitch agent, dnsmasq, dhcp agent, L3 agent, metadata agent 설치 ]
sudo apt-get install -y quantum-plugin-openvswitch-agent dnsmasq quantum-dhcp-agent quantum-l3-agent quantum-metadata-agent
sudo vi /etc/quantum/api-paste.ini
[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
auth_host = 192.168.230.141
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = quantum
admin_password = service_pass
sudo vi /etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini
[DATABASE]
sql_connection = mysql://quantum:임시 패스워드@192.168.230.141/quantum
[OVS]
tenant_network_type = gre
enable_tunneling = True
tunnel_id_ranges = 1:1000
integration_bridge = br-int
tunnel_bridge = br-tun
local_ip = 192.168.230.144
sudo vi /etc/quantum/l3_agent.ini
# 맨 아랫줄에 추가
auth_url = http://192.168.230.141:35357/v2.0
auth_region = RegionOne
admin_tenant_name = service
admin_user = quantum
admin_password = service_pass
sudo vi /etc/quantum/metadata_agent.ini
auth_url = http://192.168.230.141:35357/v2.0
auth_region = RegionOne
admin_tenant_name = service
admin_user = quantum
admin_password = service_pass
nova_metadata_ip = 192.168.230.141
nova_metadata_port = 8775
metadata_proxy_shared_secret = helloOpenStack
sudo vi /etc/quantum/quantum.conf
rabbit_host = 192.168.230.141
# restart Quantum services
cd /etc/init.d/; for i in $( ls quantum-* ); do sudo service $i restart; done
# br-ex 와 public 망과 연결
sudo ovs-vsctl add-port br-ex eth2
################## Compute 설치하기 #####################
[ ntp 세팅 ]
sudo vi /etc/ntp.conf
server 192.168.230.141
sudo service ntp restart
[ network 세팅 ]
[ hostname 변경 ]
[ openVSwitch 설치 ]
sudo apt-get install -y openvswitch-switch openvswitch-datapath-dkms
# bridge 생성
sudo ovs-vsctl add-br br-int
[ Quantum openVSwitch agent 설치 ]
sudo apt-get install -y quantum-plugin-openvswitch-agent
sudo vi /etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini
[DATABASE]
sql_connection = mysql://quantum:imsi00@192.168.230.141/quantum
[OVS]
tenant_network_type = gre
enable_tunneling = True
tunnel_id_ranges = 1:1000
integration_bridge = br-int
tunnel_bridge = br-tun
local_ip = 192.168.230.145
sudo vi /etc/quantum/quantum.conf
rabbit_host = 192.168.230.141
[keystone_authtoken] ----> ? 필요한 세팅인가?
auth_host = 192.168.230.141
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = quantum
admin_password = service_pass
signing_dir = /var/lib/quantum/keystone-signing
# quantum openVSwitch agent restart
sudo service quantum-plugin-openvswitch-agent restart
[ Nova 설치 ]
sudo apt-get install -y nova-compute-kvm open-iscsi
sudo vi /etc/nova/api-paste.ini
[filter:authtoken]
paste.filter_factory = keystone.middleware.auth_token:filter_factory
auth_host = 192.168.230.141
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = nova
admin_password = service_pass
signing_dir = /tmp/keystone-signing-nova
# Workaround for https://bugs.launchpad.net/nova/+bug/1154809
auth_version = v2.0
sudo vi /etc/nova/nova-compute.conf
[DEFAULT]
libvirt_type=kvm
libvirt_ovs_bridge=br-int
libvirt_vif_type=ethernet
libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
libvirt_use_virtio_for_bridges=True
sudo vi /etc/nova/nova.conf
[DEFAULT]
logdir=/var/log/nova
state_path=/var/lib/nova
lock_path=/run/lock/nova
verbose=True
api_paste_config=/etc/nova/api-paste.ini
compute_scheduler_driver=nova.scheduler.simple.SimpleScheduler
rabbit_host=192.168.230.141
nova_url=http://192.168.230.141:8774/v1.1/
sql_connection=mysql://nova:imsi00@192.168.230.141/nova
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
# Auth
use_deprecated_auth=false
auth_strategy=keystone
# Imaging service
glance_api_servers=192.168.230.141:9292
image_service=nova.image.glance.GlanceImageService
# Vnc configuration
novnc_enabled=true
novncproxy_base_url=http://192.168.75.141:6080/vnc_auto.html
novncproxy_port=6080
vncserver_proxyclient_address=192.168.230.141
vncserver_listen=0.0.0.0
# Network settings
network_api_class=nova.network.quantumv2.api.API
quantum_url=http://192.168.230.141:9696
quantum_auth_strategy=keystone
quantum_admin_tenant_name=service
quantum_admin_username=quantum
quantum_admin_password=service_pass
quantum_admin_auth_url=http://192.168.230.141:35357/v2.0
libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver
#Metadata
service_quantum_metadata_proxy = True
quantum_metadata_proxy_shared_secret = helloOpenStack
metadata_host = 192.168.230.141
metadata_listen = 127.0.0.1
metadata_listen_port = 8775
# Compute #
compute_driver=libvirt.LibvirtDriver
# Cinder #
volume_api_class=nova.volume.cinder.API
osapi_volume_listen_port=5900
# restart nova service
cd /etc/init.d/; for i in $( ls nova-* ); do sudo service $i restart; done
# nova service status
nova-manage service list
[ Nova 명령어 실행 ]
# admin 권한으로 실행
source creds
# tenant, user 생성
keystone tenant-create --name myproject
keystone role-list
keystone user-create --name=myuser --pass=임시 패스워드 --tenant-id d8eca2f95bbf4ddc8bda878fe9669661 --email=myuser@domain.com
keystone user-role-add --tenant-id d8eca2f95bbf4ddc8bda878fe9669661 --user-id 29736a14d7d4471fa50ca04da38d89b1 --role-id 022cd675521b45ffb94693e7cab07db7
# Network 생성
quantum net-create --tenant-id d8eca2f95bbf4ddc8bda878fe9669661 net_myproject
quantum net-list
# Network 에 internal private subnet 생성
quantum subnet-create --tenant-id d8eca2f95bbf4ddc8bda878fe9669661 --name net_myproject_internal net_myproject 10.0.0.0/24
# Router 생성
quantum router-create --tenant-id d8eca2f95bbf4ddc8bda878fe9669661 net_myproject_router
# L3 agent 를 Router 와 연결
quantum l3-agent-router-add 829f424b-0879-4fee-a373-84c0f0bcbb9b net_myproject_router
# Router 를 Subnet 에 연결
quantum router-interface-add f3e2c02e-2146-4388-b415-c95d45f4f3a3 99189c7b-50cd-4353-9358-2dd74efbb762
# restart quantum services
cd /etc/init.d/; for i in $( ls quantum-* ); do sudo service $i restart; done
# 환경설정파일 생성
vi myproject
export OS_TENANT_NAME=myproject
export OS_USERNAME=myuser
export OS_PASSWORD=임시 패스워드
export OS_AUTH_URL="http://192.168.230.141:5000/v2.0/"
# project 권한으로 진행
source myproject
nova image-list
nova secgroup-list
nova secgroup-add-rule default tcp 22 22 0.0.0.0/0
nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0
ssh-keygen
nova keypair-add --pub_key ~/.ssh/id_rsa.pub mykey
nova keypair-list
nova flavor-list
nova boot test01 --flavor 1 --image 5c4c2339-55bd-4e9b-86cb-23694e3b9b17 --key_name mykey --security_group default
nova floating-ip-list
nova floating-ip-create
nova add-floating-ip 80eb7545-258e-4f26-a842-c1993cb03ae5 192.168.75.225
nova remove-floating-ip 80eb7545-258e-4f26-a842-c1993cb03ae5 192.168.75.225
nova floating-ip-delete 192.168.75.225
nova volume-list
nova volume-create --display_name ebs01 1
nova volume-attach 80eb7545-258e-4f26-a842-c1993cb03ae5 c209e2f1-5ff7-496c-8928-d57487d86c6f /dev/vdb
nova volume-detach 80eb7545-258e-4f26-a842-c1993cb03ae5 a078f20a-62c6-432c-8fa2-7cfd9950a64f
nova volume-delete a078f20a-62c6-432c-8fa2-7cfd9950a64f
# 접속 후 ext4 로 format 및 mount
mke2fs -t ext4 /dev/vdb
mount /dev/vdb /test
[ vnc console 접속 ]
nova get-vnc-console 80eb7545-258e-4f26-a842-c1993cb03ae5 novnc