반응형

[ Controller Install ]


1. controller node install (nova, mysql, rabbitmq keystone, glance, cinder, horizon)

$ sudo apt-get install nova-api nova-cert nova-conductor nova-consoleauth nova-novncproxy nova-scheduler python-novaclient


$ sudo apt-get install mysql-server-5.5


$ sudo apt-get install rabbitmq-server


$ sudo apt-get install keystone python-keystoneclient


$ sudo apt-get install glance python-glanceclient


$ sudo apt-get install cinder-api cinder-scheduler cinder-volume


$ apt-get install apache2 memcached libapache2-mod-wsgi openstack-dashboard


2. database configuration (nova, glance, cinder, keystone)

$ sudo sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf

$ sudo vi /etc/mysql/my.cnf

[mysqld] 

# 추가

skip-host-cache 
skip-name-resolve 


$ sudo service mysql restart


$ mysql -u root -p

mysql> CREATE DATABASE nova;

mysql> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhostIDENTIFIED BY 'NOVA_DBPASS';

mysql> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%IDENTIFIED BY 'NOVA_DBPASS';


mysql> CREATE DATABASE glance;

mysql> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhostIDENTIFIED BY 'GLANCE_DBPASS';

mysql> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%IDENTIFIED BY 'GLANCE_DBPASS';


mysql> CREATE DATABASE cinder;

mysql> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhostIDENTIFIED BY 'CINDER_DBPASS';

mysql> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%IDENTIFIED BY 'CINDER_DBPASS';


mysql> CREATE DATABASE keystone;

mysql> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' \

           IDENTIFIED BY 'KEYSTONE_DBPASS';

mysql> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \

           IDENTIFIED BY 'KEYSTONE_DBPASS';


sudo vi /etc/hosts.allow

ALL:192.168.0.0/255.255.0.0

mysqld:ALL


3. keystone setting

$ sudo rm /var/lib/keystone/keystone.db

$ sudo vi /etc/keystone/keystone.conf

connection = mysql://keystone:KEYSTONE_DBPASS@localhost/keystone

token_format = UUID


$ sudo keystone-manage db_sync

$ sudo service keystone restart


$ vi keystone_basic.sh

#!/bin/sh

#

# Keystone basic configuration 


# Mainly inspired by https://github.com/openstack/keystone/blob/master/tools/sample_data.sh


# Modified by Bilel Msekni / Institut Telecom

#

# Support: openstack@lists.launchpad.net

# License: Apache Software License (ASL) 2.0

#

HOST_IP=192.168.75.131

ADMIN_PASSWORD=${ADMIN_PASSWORD:-admin_pass}

SERVICE_PASSWORD=${SERVICE_PASSWORD:-service_pass}

export SERVICE_TOKEN="ADMIN"

export SERVICE_ENDPOINT="http://${HOST_IP}:35357/v2.0"

SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}


get_id () {

    echo `$@ | awk '/ id / { print $4 }'`

}


# Tenants

ADMIN_TENANT=$(get_id keystone tenant-create --name=admin)

SERVICE_TENANT=$(get_id keystone tenant-create --name=$SERVICE_TENANT_NAME)



# Users

ADMIN_USER=$(get_id keystone user-create --name=admin --pass="$ADMIN_PASSWORD" --email=admin@domain.com)



# Roles

ADMIN_ROLE=$(get_id keystone role-create --name=admin)

KEYSTONEADMIN_ROLE=$(get_id keystone role-create --name=KeystoneAdmin)

KEYSTONESERVICE_ROLE=$(get_id keystone role-create --name=KeystoneServiceAdmin)


# Add Roles to Users in Tenants

keystone user-role-add --user-id $ADMIN_USER --role-id $ADMIN_ROLE --tenant-id $ADMIN_TENANT

keystone user-role-add --user-id $ADMIN_USER --role-id $KEYSTONEADMIN_ROLE --tenant-id $ADMIN_TENANT

keystone user-role-add --user-id $ADMIN_USER --role-id $KEYSTONESERVICE_ROLE --tenant-id $ADMIN_TENANT


# The Member role is used by Horizon and Swift

MEMBER_ROLE=$(get_id keystone role-create --name=Member)


# Configure service users/roles

NOVA_USER=$(get_id keystone user-create --name=nova --pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT --email=nova@domain.com)

keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $NOVA_USER --role-id $ADMIN_ROLE


GLANCE_USER=$(get_id keystone user-create --name=glance --pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT --email=glance@domain.com)

keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $GLANCE_USER --role-id $ADMIN_ROLE


QUANTUM_USER=$(get_id keystone user-create --name=quantum --pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT --email=quantum@domain.com)

keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $QUANTUM_USER --role-id $ADMIN_ROLE


CINDER_USER=$(get_id keystone user-create --name=cinder --pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT --email=cinder@domain.com)

keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $CINDER_USER --role-id $ADMIN_ROLE


$ vi keystone_endpoints_basic.sh

#!/bin/sh

#

# Keystone basic Endpoints


# Mainly inspired by https://github.com/openstack/keystone/blob/master/tools/sample_data.sh


# Modified by Bilel Msekni / Institut Telecom

#

# Support: openstack@lists.launchpad.net

# License: Apache Software License (ASL) 2.0

#


# Host address

HOST_IP=192.168.75.131

EXT_HOST_IP=192.168.75.131

VOLUME_HOST_IP=192.168.75.131

VOLUME_EXT_HOST_IP=192.168.75.131

NETWORK_HOST_IP=192.168.75.131

NETWORK_EXT_HOST_IP=192.168.75.131


# MySQL definitions

MYSQL_USER=keystone

MYSQL_DATABASE=keystone

MYSQL_HOST=$HOST_IP

MYSQL_PASSWORD=KEYSTONE_DBPASS


# Keystone definitions

KEYSTONE_REGION=RegionOne

export SERVICE_TOKEN=ADMIN

export SERVICE_ENDPOINT="http://${HOST_IP}:35357/v2.0"


while getopts "u:D:p:m:K:R:E:T:vh" opt; do

  case $opt in

    u)

      MYSQL_USER=$OPTARG

      ;;

    D)

      MYSQL_DATABASE=$OPTARG

      ;;

    p)

      MYSQL_PASSWORD=$OPTARG

      ;;

    m)

      MYSQL_HOST=$OPTARG

      ;;

    K)

      MASTER=$OPTARG

      ;;

    R)

      KEYSTONE_REGION=$OPTARG

      ;;

    E)

      export SERVICE_ENDPOINT=$OPTARG

      ;;

    T)

      export SERVICE_TOKEN=$OPTARG

      ;;

    v)

      set -x

      ;;

    h)

      cat <<EOF

Usage: $0 [-m mysql_hostname] [-u mysql_username] [-D mysql_database] [-p mysql_password]

       [-K keystone_master ] [ -R keystone_region ] [ -E keystone_endpoint_url ] 

       [ -T keystone_token ]

          

Add -v for verbose mode, -h to display this message.

EOF

      exit 0

      ;;

    \?)

      echo "Unknown option -$OPTARG" >&2

      exit 1

      ;;

    :)

      echo "Option -$OPTARG requires an argument" >&2

      exit 1

      ;;

  esac

done  


if [ -z "$KEYSTONE_REGION" ]; then

  echo "Keystone region not set. Please set with -R option or set KEYSTONE_REGION variable." >&2

  missing_args="true"

fi


if [ -z "$SERVICE_TOKEN" ]; then

  echo "Keystone service token not set. Please set with -T option or set SERVICE_TOKEN variable." >&2

  missing_args="true"

fi


if [ -z "$SERVICE_ENDPOINT" ]; then

  echo "Keystone service endpoint not set. Please set with -E option or set SERVICE_ENDPOINT variable." >&2

  missing_args="true"

fi


if [ -z "$MYSQL_PASSWORD" ]; then

  echo "MySQL password not set. Please set with -p option or set MYSQL_PASSWORD variable." >&2

  missing_args="true"

fi


if [ -n "$missing_args" ]; then

  exit 1

fi

 

keystone service-create --name nova --type compute --description 'OpenStack Compute Service'

keystone service-create --name cinder --type volume --description 'OpenStack Volume Service'

keystone service-create --name glance --type image --description 'OpenStack Image Service'

keystone service-create --name keystone --type identity --description 'OpenStack Identity'

keystone service-create --name ec2 --type ec2 --description 'OpenStack EC2 service'

keystone service-create --name quantum --type network --description 'OpenStack Networking service'


create_endpoint () {

  case $1 in

    compute)

    keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$EXT_HOST_IP"':8774/v2/$(tenant_id)s' --adminurl 'http://'"$HOST_IP"':8774/v2/$(tenant_id)s' --internalurl 'http://'"$HOST_IP"':8774/v2/$(tenant_id)s'

    ;;

    volume)

    keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$VOLUME_EXT_HOST_IP"':8776/v1/$(tenant_id)s' --adminurl 'http://'"$VOLUME_HOST_IP"':8776/v1/$(tenant_id)s' --internalurl 'http://'"$VOLUME_HOST_IP"':8776/v1/$(tenant_id)s'

    ;;

    image)

    keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$EXT_HOST_IP"':9292/v2' --adminurl 'http://'"$HOST_IP"':9292/v2' --internalurl 'http://'"$HOST_IP"':9292/v2'

    ;;

    identity)

    keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$EXT_HOST_IP"':5000/v2.0' --adminurl 'http://'"$HOST_IP"':35357/v2.0' --internalurl 'http://'"$HOST_IP"':5000/v2.0'

    ;;

    ec2)

    keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$EXT_HOST_IP"':8773/services/Cloud' --adminurl 'http://'"$HOST_IP"':8773/services/Admin' --internalurl 'http://'"$HOST_IP"':8773/services/Cloud'

    ;;

    network)

    keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$NETWORK_EXT_HOST_IP"':9696/' --adminurl 'http://'"$NETWORK_HOST_IP"':9696/' --internalurl 'http://'"$NETWORK_HOST_IP"':9696/'

    ;;

  esac

}


for i in compute volume image object-store identity ec2 network; do

  id=`mysql -h "$MYSQL_HOST" -u "$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE" -ss -e "SELECT id FROM service WHERE type='"$i"';"` || exit 1

  create_endpoint $i $id

done


$ vi admin.rc

export OS_TENANT_NAME=admin

export OS_USERNAME=admin

export OS_PASSWORD=admin_pass

export OS_AUTH_URL="http://192.168.75.131:5000/v2.0/"


$ keystone tenant-create --name DEV --enabled true

$ keystone user-create --name dev_admin --tenant 5e795212d0804ad89234d9a1ac30c8ca --pass adminPass --enabled true

$ keystone user-create --name dev_user01 --tenant 5e795212d0804ad89234d9a1ac30c8ca --pass userPass --enabled true


# Admin role 과 dev_admin 을 연결

$ keystone user-role-add --user c207c127ba7c46d2bf18f6c39ac4ff78 --role 19f87df854914a1a903972f70d7d631a --tenant 5e795212d0804ad89234d9a1ac30c8ca


# Member role 과 dev_user01 을 연결

keystone user-role-add --user 908c6c5691374d6a95b64fea0e1615ce --role b13ffb470d1040d298e08cf9f5a6003a --tenant 5e795212d0804ad89234d9a1ac30c8ca



$ vi dev_admin.rc

export OS_USERNAME=dev_admin

export OS_PASSWORD=adminPass

export OS_TENANT_NAME=DEV

export OS_AUTH_URL="http://192.168.75.131:5000/v2.0/"


$ vi dev_user.rc

export OS_USERNAME=dev_user01

export OS_PASSWORD=userPass

export OS_TENANT_NAME=DEV

export OS_AUTH_URL="http://192.168.75.131:5000/v2.0/"


4. nova settting

$ sudo vi /etc/nova/nova.conf


dhcpbridge_flagfile=/etc/nova/nova.conf 

dhcpbridge=/usr/bin/nova-dhcpbridge 

logdir=/var/log/nova 

state_path=/var/lib/nova 

lock_path=/var/lock/nova 

force_dhcp_release=True 

libvirt_use_virtio_for_bridges=True 

connection_type=libvirt 

root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf 

verbose=True 

debug=True 

ec2_private_dns_show_ip=True 

api_paste_config=/etc/nova/api-paste.ini 

enabled_apis=ec2,osapi_compute,metadata 

cinder_catalog_info=volume:cinder:adminURL

use_network_dns_servers=True

metadata_host=192.168.75.131

metadata_listen=0.0.0.0

metadata_listen_port=8775

metadata_manager=nova.api.manager.MetadataManager

metadata_port=8775

vncserver_proxyclient_address=192.168.230.131

vncserver_listen=0.0.0.0

vnc_enabled=true

xvpvncproxy_base_url=http://192.168.230.131:6081/console

novncproxy_base_url=http://192.168.230.131:6080/vnc_auto.html

remove_unused_base_images=False

image_create_to_qcow2 = True

api_rate_limit=True


#rpc setting 

rpc_backend = rabbit 

rabbit_host = 192.168.230.131


#network setting 

network_api_class = nova.network.api.API 

security_group_api = nova


# Network settings 

dhcpbridge_flagfile=/etc/nova/nova.conf 

dhcpbridge=/usr/bin/nova-dhcpbridge 

network_manager=nova.network.manager.VlanManager 

network_api_class=nova.network.api.API 

dhcp_lease_time=600 

vlan_start=1001 

fixed_range=10.0.0.0/16 

allow_same_net_traffic=False 

multi_host=True 

send_arp_for_ha=True 

#share_dhcp_address=True 

force_dhcp_release=True 

flat_interface = eth1

public_interface=eth0


#auth setting 

use_deprecated_auth = false

auth_strategy = keystone


#image setting 

glance_api_services = 192.168.75.131:9292 

image_service = nova.image.glance.GlanceImageService 

glance_host = 192.168.230.131


[database] 

connection = mysql://nova:NOVA_DBPASS@localhost/nova

 

[keystone_authtoken] 

auth_uri = http://192.168.75.131:5000 

auth_host = 192.168.75.131 

auth_port = 35357

auth_protocol = http 

admin_tenant_name = admin 

admin_user = admin 

admin_password = admin_pass


$ sudo nova-manage db sync

$ sudo service nova-api restart

$ sudo service nova-cert restart

$ sudo service nova-consoleauth restart

$ sudo service nova-scheduler restart

$ sudo service nova-conductor restart

$ sudo service nova-novncproxy restart


5. glance setting

$ sudo vi /etc/glance/glance-api.conf


# 아래 코멘트 처리

qpid, swift_store, s3_store, sheepdog_store


rabbit_host = 192.168.230.131

rabbit_port = 5672 

rabbit_use_ssl = false 

rabbit_virtual_host = / 

rabbit_notification_exchange = glance

rabbit_notification_topic = notifications 

rabbit_durable_queues = False

 

[database]

connection = mysql://glance:GLANCE_DBPASS@192.168.230.131/glance

 

[keystone_authtoken] 

auth_uri = http://192.168.75.131:5000 

auth_host = 192.168.75.131 

auth_port = 35357 

auth_protocol = http 

admin_tenant_name = admin 

admin_user = admin

admin_password = admin_pass


[paste_deploy]

flavor=keystone


$ sudo vi /etc/glance/glance-registry.conf


[database]

connection = mysql://glance:GLANCE_DBPASS@192.168.230.131/glance

 

[keystone_authtoken] 

auth_uri = http://192.168.75.131:5000 

auth_host = 192.168.75.131

auth_port = 35357

auth_protocol = http 

admin_tenant_name = admin 

admin_user = admin

admin_password = admin_pass


[paste_deploy]

flavor=keystone


$ mysql -u root -p

mysql> use glance;

mysql> alter table migrate_version convert to character set utf8 collate utf8_unicode_ci;

mysql> flush privileges;


$ sudo glance-manage db_sync

$ sudo service glance-api restart

$ sudo service glance-registry restart


$ glance image-create --name ubuntu-14.04-cloudimg --disk-format qcow2 --container-format bare --owner e07a35f02d9e4281b8336d9112faed51 --file ubuntu-14.04-server-cloudimg-amd64-disk1.img --is-public True --progress


$ wget --no-check-certificate https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img

$ glance image-create --name cirros-0.3.0 --disk-format qcow2 --container-format bare --owner e07a35f02d9e4281b8336d9112faed51 --file cirros-0.3.0-x86_64-disk.img --is-public True --progress


6. cinder setting

$ sudo cinder-manage db sync

$ sudo vi /etc/cinder/cinder.conf


[DEFAULT]

rootwrap_config = /etc/cinder/rootwrap.conf

api_paste_confg = /etc/cinder/api-paste.ini

iscsi_helper = tgtadm

volume_name_template = volume-sfpoc-%s

volume_group = cinder-volumes

verbose = True

debug=True

auth_strategy = keystone

state_path = /var/lib/cinder

lock_path = /var/lock/cinder

volumes_dir = /var/lib/cinder/volumes


default_availability_zone=LH_ZONE

storage_availability_zone=LH_ZONE


rpc_backend = cinder.openstack.common.rpc.impl_kombu

rabbit_host = 192.168.75.131

rabbit_port = 5672


glance_host=192.168.230.131

glance_port=9292

glance_api_servers=$glance_host:$glance_port


default_volume_type=LOW_END


# multi backend

enabled_backends=LEFTHAND,SOLIDFIRE

[LEFTHAND]

volume_name_template = volume-sfpoc-%s

volume_group = cinder-volumes

volume_driver=cinder.volume.drivers.san.hp.hp_lefthand_iscsi.HPLeftHandISCSIDriver

volume_backend_name=ISCSI_LH

san_ip=192.168.230.141

san_login=admin

san_password=admin_pass

san_clustername=CLUSTER-LEFTHAND

san_ssh_port=16022


[SOLIDFIRE]

volume_name_template = volume-sfpoc-%s

volume_group = cinder-volumes

verbose = True

volume_driver=cinder.volume.drivers.solidfire.SolidFireDriver

volume_backend_name=ISCSI_SF

san_ip=192.168.230.151

san_login=admin

san_password=admin_pass



[database]

connection=mysql://cinder:cinderPass@192.168.75.131/cinder


[keystone_authtoken]

auth_uri = http://192.168.75.131:5000

auth_host = 192.168.75.131

auth_port = 35357

auth_protocol = http

admin_tenant_name = admin

admin_user = admin

admin_password = admin_pass


$ sudo cinder-manage db sync

$ sudo service cinder-api restart

$ sudo service cinder-volume restart

$ sudo service cinder-scheduler restart


7. LeftHand Cluster 정보 보기

$ ssh -p 16022 user@192.168.230.140

CLIQ> getclusterinfo searchdepth=1 verbose=0

CLIQ> getserverinfo servername=ubuntu

CLIQ> getvolumeinfo volumename=volume-sfpoc-9d36737a-d332-4613-bce2-32465904a6fc


8. multi backend 세팅

$ cinder type-create LOW_END

$ cinder type-key LOW_END set volume_backend_name=ISCSI_LH

$ cinder type-create HIGH_END

$ cinder type-key HIGH_END set volume_backend_name=ISCSI_SF


# 1G High-end 볼륨 생성

$ cinder create --display-name high-test-01 --volume-type HIGH_END 1


9. backend qos 세팅

$ cinder type-create IOPS_3000

$ cinder type-key IOPS_3000 set volume_backend_name=ISCSI_SF

$ cinder qos-create QOS_IOPS_3000 consumer="back-end" minIOPS=3000 maxIOPS=3000 burstIOPS=3000

$ cinder qos-associate 1e9694b8-eca4-4ce7-b476-d1637535aaa2 9c241c66-30fd-442b-b7a1-79b4f1892919

$ cinder qos-get-association 1e9694b8-eca4-4ce7-b476-d1637535aaa2



[ Compute Node Install ]


1. compute node install (nova-compute, nova-network, nova-api-metadata)

$ sudo apt-get install nova-compute-kvm nova-network nova-api-metadata





[ 기본 설정 ]


1. network setting

$ nova network-create --fixed-range-v4 10.0.0.0/24 --vlan 1001 --gateway 10.0.0.1 --bridge br1001 --bridge-interface eth0 --multi-host T --dns1 8.8.8.8 --dns2 8.8.4.4 --project-id 5e795212d0804ad89234d9a1ac30c8ca dev_network


2. fixed ip reserve

$ nova fixed-ip-reserve 10.0.0.3

$ nova fixed-ip-reserve 10.0.0.4

$ nova fixed-ip-reserve 10.0.0.5


3. floating ip create

$ nova floating-ip-bulk-create 192.168.75.128/25 --interface eth0


4. secgroup 생성

$ nova secgroup-create connect 'icmp and ssh'

$ nova secgroup-add-rule connect icmp -1 -1 0.0.0.0/0

$ nova secgroup-add-rule connect tcp 22 22 0.0.0.0/0


5. keypair 생성

$ nova keypair-add stephen >> stephen.pem


6. pem 파일을 다른 호스트에 복사

$ scp -P 22 dev_admin.pem stack@192.168.230.132:~/creds/.

$ chmod 600 dev_admin.pem


7. nova.conf 를 다른 멀티호스트에 복사

$ for i in `seq 132 134`; do scp nova.conf stack@192.168.230.$i:~/creds/.; done


8. zone 설정

$ nova aggregate-create POC LH_ZONE

$ nova aggregate-add-host POC ubuntu


9. VM 생성

$ nova boot test01 --flavor 1 --image 4399bba0-17a4-43ef-8fdd-4edd9c2afe74 --key_name dev_admin --security_group connect


# boot on volume 및 attach volume 을 동시에 실행

$ nova boot [name] --flavor [flavorid] 

  --block-device id=[imageid],source=image,dest=volume,size=10,bootindex=0,shutdown=remove

  --block-device id=[volumeid],source=volume,dest=volume,size=100,bootindex=1


10. VM 접속

$ ssh -i dev_admin.pem cirros@10.0.0.6

$ ssh -i dev_admin.pem ubuntu@10.0.0.6




[ VMware 관련 설정 ]


1. cinder.conf

[DEFAULT]

rootwrap_config = /etc/cinder/rootwrap.conf

api_paste_confg = /etc/cinder/api-paste.ini

iscsi_helper = tgtadm

volume_name_template = %s

volume_group = cinder-volumes

verbose = True

debug=True

auth_strategy = keystone

state_path = /var/lib/cinder

lock_path = /var/lock/cinder

volumes_dir = /var/lib/cinder/volumes


default_availability_zone=VMWARE_ZONE

storage_availability_zone=VMWARE_ZONE


rpc_backend = cinder.openstack.common.rpc.impl_kombu

rabbit_host = 192.168.75.131

rabbit_port = 5672


glance_host=192.168.75.131

glance_port=9292

glance_api_servers=$glance_host:$glance_port


default_volume_type=VMWARE_TYPE


# multi backend

enabled_backends=VMWARE_DRIVER


[VMWARE_DRIVER]

volume_driver = cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver

volume_backend_name=VMWARE

vmware_host_ip = 192.168.75.131

vmware_host_password = VMWARE_PASSWORD

vmware_host_username = root


[database]

connection=mysql://cinder:cinderPass@192.168.75.131/cinder


[keystone_authtoken]

auth_uri = http://192.168.75.131:5000

auth_host = 192.168.75.131

auth_port = 35357

auth_protocol = http

admin_tenant_name = admin

admin_user = admin

admin_password = admin_pass


2. multi backend 세팅

$ cinder type-create VMWARE_TYPE

$ cinder type-key VMWARE_TYPE set volume_backend_name=VMWARE


# 1G High-end 볼륨 생성

$ cinder create --display-name test-01 --volume-type VMWARE_TYPE 1


3. nova.conf 

$ sudo vi /etc/nova/nova.conf


dhcpbridge_flagfile=/etc/nova/nova.conf 

dhcpbridge=/usr/bin/nova-dhcpbridge 

logdir=/var/log/nova 

state_path=/var/lib/nova 

lock_path=/var/lock/nova 

force_dhcp_release=True 

# libvirt_use_virtio_for_bridges=True 

# connection_type=libvirt 

root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf 

verbose=True 

debug=True 

ec2_private_dns_show_ip=True 

api_paste_config=/etc/nova/api-paste.ini 

enabled_apis=ec2,osapi_compute,metadata 

cinder_catalog_info=volume:cinder:adminURL

use_network_dns_servers=True

metadata_host=192.168.75.131

metadata_listen=0.0.0.0

metadata_listen_port=8775

metadata_manager=nova.api.manager.MetadataManager

metadata_port=8775

vncserver_proxyclient_address=192.168.230.131

vncserver_listen=0.0.0.0

vnc_enabled=true

xvpvncproxy_base_url=http://192.168.230.131:6081/console

novncproxy_base_url=http://192.168.230.131:6080/vnc_auto.html

compute_driver = vmwareapi.VMwareVCDriver

remove_unused_base_images=False

image_create_to_qcow2 = True

api_rate_limit=True


#rpc setting 

rpc_backend = rabbit 

rabbit_host = 192.168.230.131


#network setting 

network_api_class = nova.network.api.API 

security_group_api = nova


# Network settings 

dhcpbridge_flagfile=/etc/nova/nova.conf 

dhcpbridge=/usr/bin/nova-dhcpbridge 

network_manager=nova.network.manager.VlanManager 

network_api_class=nova.network.api.API 

dhcp_lease_time=600 

vlan_start=1001 

fixed_range=10.0.0.0/16 

allow_same_net_traffic=False 

multi_host=True 

send_arp_for_ha=True 

#share_dhcp_address=True 

force_dhcp_release=True 

flat_interface = eth0

public_interface=eth0


#auth setting 

use_deprecated_auth = false

auth_strategy = keystone


#image setting 

glance_api_services = 192.168.75.131:9292 

image_service = nova.image.glance.GlanceImageService 

glance_host = 192.168.230.131


[vmware]

host_ip = 192.168.75.131

host_username = root

host_password = VMWARE_PASSWORD

cluster_name = cluster1

use_linked_clone = False


[database] 

connection = mysql://nova:NOVA_DBPASS@localhost/nova

 

[keystone_authtoken] 

auth_uri = http://192.168.75.131:5000 

auth_host = 192.168.75.131 

auth_port = 35357

auth_protocol = http 

admin_tenant_name = admin 

admin_user = admin 

admin_password = admin_pass


4. nova-compute.conf

#[DEFAULT]

#compute_driver=libvirt.LibvirtDriver

#[libvirt]

#virt_type=kvm


5. zone 설정

$ nova aggregate-create VMWARE VMWARE_ZONE

$ nova aggregate-add-host VMWARE controller


6. image 등록

[ slitaz linux ]

wget http://partnerweb.vmware.com/programs/vmdkimage/trend-tinyvm1-flat.vmdk

$ glance image-create --name [vmware]trend-static-thin --file trend-tinyvm1-flat.vmdk --is-public=True --container-format=bare --disk-format=vmdk --property vmware_disktype="thin" --property vmware_adaptertype="ide"


[ slitaz linux 접속 및 dhcp 변경]

vmware / vmware  접속 후 root 권한 획득   root / root


# vi /etc/network.conf

DHCP="yes"

STATIC="no"


[ cirros ]

wget http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img

$ qemu-img convert -f qcow2 -O vmdk cirros-0.3.3-x86_64-disk.img cirros-0.3.3-x86_64-disk.vmdk

$ glance image-create --name [vmware]cirros-0.3.3 --disk-format vmdk --container-format bare --file cirros-0.3.3-x86_64-disk.vmdk --property vmware-disktype="sparse" --property hw_vif_model="VirtualVmxnet" --property vmware_adaptertype="ide" --is-public True --progress


7. vm -> image 저장

1. ESXi 호스트 접속

2. vm위치로 이동

# cd /vmfs/volumes/datastore1/6c516279-c83f-43ec-a8d4-bec540604280

3. thin copy

# vmkfstools -i 6c516279-c83f-43ec-a8d4-bec540604280.vmdk -d thin .

./vmware_temp/trend-tinyvm1-dhcp-thin.vmdk

4. 다른 host 에서 scp 로 가져옴

$ scp root@192.168.75.182:/vmfs/volumes/542cf526-bef9f829-2f02-000c29fef6ec/vmware_temp/trend-tinyvm1-dhcp-thin-flat.vmdk .


8. nova boot

$ nova hypervisor-list

$ nova boot test01 --flavor 1 --image 6d9745dc-0fc9-4802-b21d-329004353406 --key_name stephen --availability-zone "VMWARE_ZONE::domain-c12(cluster1)"










반응형
Posted by seungkyua@gmail.com
,