安装Icehouse@Ubuntu14.04(4)

这里将配置计算节点。计算节点我们使用了一台2G内存的虚拟机,并使用了嵌套虚拟化,可以通过lscpu来看到CPU的VMX/VT-X标志都已经被下发到虚拟机中。

数据库准备

使用下列命令来创建nova所需数据库:

root@JunoController:~# mysql -u root -p
Enter password: 
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 35
Server version: 5.5.41-MariaDB-1ubuntu0.14.04.1 (Ubuntu)

Copyright (c) 2000, 2014, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> CREATE DATABASE nova;
Query OK, 1 row affected (0.00 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'xxxx';
Query OK, 0 rows affected (0.00 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'xxxx';
Query OK, 0 rows affected (0.00 sec)

MariaDB [(none)]> flush privileges;
Query OK, 0 rows affected (0.00 sec)

MariaDB [(none)]> quit
Bye

nova用户

创建nova用户:

root@JunoController:~# source ~/openstack/admin-openrc.sh
root@JunoController:~# keystone user-create --name nova --pass xxxx
+----------+----------------------------------+
| Property |              Value               |
+----------+----------------------------------+
|  email   |                                  |
| enabled  |               True               |
|    id    | 845c22d1a781458a8b28ba54534b73dd |
|   name   |               nova               |
| username |               nova               |
+----------+----------------------------------+

制定nova属于service tenant, 并赋予admin权限:

root@JunoController:~# keystone user-role-add --user nova --tenant service --role admin

在keystone注册nova:

root@JunoController:~# keystone service-create --name nova --type compute --description "OpenStack Compute"
+-------------+----------------------------------+
|   Property  |              Value               |
+-------------+----------------------------------+
| description |        OpenStack Compute         |
|   enabled   |               True               |
|      id     | 8733caba0b9742a39ee9ac53ad4d8e27 |
|     name    |               nova               |
|     type    |             compute              |
+-------------+----------------------------------+

在keystone注册nova end-point:

root@JunoController:~# keystone endpoint-create --service-id $(keystone service-list | awk '/ compute / {print $2}') --publicurl http://10.17.17.211:8774/v2/%\(tenant_id\)s --internalurl http://10.17.17.211:8774/v2/%\(tenant_id\)s --adminurl http://10.17.17.211:8774/v2/%\(tenant_id\)s --region regionOne
+-------------+-------------------------------------------+
|   Property  |                   Value                   |
+-------------+-------------------------------------------+
|   adminurl  | http://10.17.17.211:8774/v2/%(tenant_id)s |
|      id     |      d16c91bfacf2474ebee36314535a146f     |
| internalurl | http://10.17.17.211:8774/v2/%(tenant_id)s |
|  publicurl  | http://10.17.17.211:8774/v2/%(tenant_id)s |
|    region   |                 regionOne                 |
|  service_id |      8733caba0b9742a39ee9ac53ad4d8e27     |
+-------------+-------------------------------------------+

Compute服务安装

Controller节点配置

在Controller节点上,安装以下包:

root@JunoController:~# apt-get -y install nova-api nova-cert nova-conductor nova-consoleauth nova-novncproxy nova-scheduler python-novaclient

配置nova所需的配置文件:

# vim /etc/nova/nova.conf
[database]
connection = mysql://nova:xxxxx@10.17.17.211/nova

[DEFAULT]
....
rpc_backend = rabbit
rabbit_host = 10.17.17.211
rabbit_password = xxxxxx
my_ip=10.17.17.211
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 10.17.17.211
auth_strategy = keystone 

[keystone_authtoken]
auth_uri = http://10.17.17.211:5000
auth_host = 10.17.17.211
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = nova
admin_password = xxxx
[glance]
host=10.17.17.211

删除sqlite3数据库:

root@JunoController:~# rm /var/lib/nova/nova.sqlite 

创建数据库:

root@JunoController:~# su -s /bin/sh -c "nova-manage db sync" nova

重启服务,使用nova检查本机可用镜像情况:

root@JunoController:~# service nova-api restart
root@JunoController:~# service nova-cert restart
root@JunoController:~#  service nova-consoleauth restart
root@JunoController:~# service nova-scheduler restart
root@JunoController:~# service nova-conductor restart
root@JunoController:~# service nova-novncproxy restart
root@JunoController:~# nova image-list
+--------------------------------------+---------------------+--------+--------+
| ID                                   | Name                | Status | Server |
+--------------------------------------+---------------------+--------+--------+
| 68f14900-8b25-4329-ad56-8fbd497c6812 | cirros-0.3.3-x86_64 | ACTIVE |        |
+--------------------------------------+---------------------+--------+--------+

Compute节点安装

安装下列包:

root@JunoCompute:~#  apt-get -y install nova-compute sysfsutils

配置:

root@JunoCompute:~# vim /etc/nova/nova.conf 
[DEFAULT]
......
auth_strategy = keystone
rpc_backend = rabbit
rabbit_host = 10.17.17.211
rabbit_password = xxxx
my_ip = 10.17.17.213
vnc_enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 10.17.17.213
novncproxy_base_url = http://10.17.17.211:6080/vnc_auto.html
glance_host = 10.17.17.211

[keystone_authtoken]
auth_uri = http://10.17.17.211:5000
auth_host = 10.17.17.211
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = nova
admin_password = xxxx

[database]
#The SQLAlchemy connection string used to connect to the database
connection = mysql://nova:xxxx@10.17.17.211/nova
[glance]
host=10.17.17.211

看cpu硬件是否支持硬件加速:

root@JunoCompute:~# egrep -c '(vmx|svm)' /proc/cpuinfo
2

如果支持加速,则配置nova-compute.conf为:

root@JunoCompute:~# cat /etc/nova/nova-compute.conf
[libvirt]
virt_type=kvm

删除不需要的nova.sqlite文件:

root@JunoCompute:~# rm -f /var/lib/nova/nova.sqlite 

重起nova服务:

root@JunoCompute:~# service nova-compute restart

验证

在控制节点上,列出所有的service:

root@JunoController:~# nova service-list
+------------------+----------------+----------+---------+-------+----------------------------+-----------------+
| Binary           | Host           | Zone     | Status  | State | Updated_at                 | Disabled Reason |
+------------------+----------------+----------+---------+-------+----------------------------+-----------------+
| nova-cert        | JunoController | internal | enabled | up    | 2015-04-13T10:16:10.000000 | -               |
| nova-consoleauth | JunoController | internal | enabled | up    | 2015-04-13T10:16:12.000000 | -               |
| nova-scheduler   | JunoController | internal | enabled | up    | 2015-04-13T10:16:05.000000 | -               |
| nova-conductor   | JunoController | internal | enabled | up    | 2015-04-13T10:16:08.000000 | -               |
| nova-compute     | JunoCompute    | nova     | enabled | up    | 2015-04-13T10:16:09.000000 | -               |
+------------------+----------------+----------+---------+-------+----------------------------+-----------------+

现在compute节点已经配置完了,接下来可以配置网络节点。配置完网络节点后,就可以启动虚拟机了。

安装Icehouse@Ubuntu14.04(5)

Neutron Database

Follow following steps for create the database:

root@JunoController:~# mysql -u root -p
Enter password: 
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 58
Server version: 5.5.41-MariaDB-1ubuntu0.14.04.1 (Ubuntu)

Copyright (c) 2000, 2014, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> CREATE DATABASE neutron;
Query OK, 1 row affected (0.00 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'xxxxx'
    -> ;
Query OK, 0 rows affected (0.01 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'xxxxx';
Query OK, 0 rows affected (0.00 sec)

MariaDB [(none)]> flush privileges;
Query OK, 0 rows affected (0.01 sec)

MariaDB [(none)]> quit
Bye

Keystone items

创建用户:

root@JunoController:~# source ~/openstack/admin-openrc.sh
root@JunoController:~# keystone user-create --name neutron --pass xxxxx
+----------+----------------------------------+
| Property |              Value               |
+----------+----------------------------------+
|  email   |                                  |
| enabled  |               True               |
|    id    | a4cbae42a2164c6e9a4c05c3f6835782 |
|   name   |             neutron              |
| username |             neutron              |
+----------+----------------------------------+

更改权限,服务为tenant, 角色是admin:

root@JunoController:~# keystone user-role-add --user neutron --tenant service --role admin

创建服务:

root@JunoController:~# keystone service-create --name neutron --type network --description "OpenStack Networking"
+-------------+----------------------------------+
|   Property  |              Value               |
+-------------+----------------------------------+
| description |       OpenStack Networking       |
|   enabled   |               True               |
|      id     | 1142b316e4e04061bb676b73d0cf6f68 |
|     name    |             neutron              |
|     type    |             network              |
+-------------+----------------------------------+

创建服务的end-point:

root@JunoController:~# keystone endpoint-create --service-id $(keystone service-list | awk '/ network / {print $2}') --publicurl http://10.17.17.211:9696 --adminurl http://10.17.17.211:9696 --internalurl http://10.17.17.211:9696 --region regionOne
+-------------+----------------------------------+
|   Property  |              Value               |
+-------------+----------------------------------+
|   adminurl  |     http://10.17.17.211:9696     |
|      id     | 77bb946d42dc4d099875ecc377510937 |
| internalurl |     http://10.17.17.211:9696     |
|  publicurl  |     http://10.17.17.211:9696     |
|    region   |            regionOne             |
|  service_id | 1142b316e4e04061bb676b73d0cf6f68 |
+-------------+----------------------------------+

安装组件

在Controller端安装:

root@JunoController:~#  apt-get -y install neutron-server neutron-plugin-ml2 python-neutronclient

取得tenant service id:

root@JunoController:~# source ~/openstack/admin-openrc.sh
root@JunoController:~# keystone tenant-get service
+-------------+----------------------------------+
|   Property  |              Value               |
+-------------+----------------------------------+
| description |          Service Tenant          |
|   enabled   |               True               |
|      id     | 4b22bf4e6a68419aa91da6e0ffaca2dc |
|     name    |             service              |
+-------------+----------------------------------+

编辑nova配置文件,修改如下:

root@JunoController:~# vim /etc/neutron/neutron.conf
[DEFAULT]
rpc_backend = neutron.openstack.common.rpc.impl_kombu
rabbit_host = 10.17.17.211
rabbit_password = xxxxx

notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
nova_url = http://10.17.17.211:8774/v2
nova_admin_username = nova
nova_admin_tenant_id = 4b22bf4e6a68419aa91da6e0ffaca2dc
nova_admin_password = xxxxx
nova_admin_auth_url = http://10.17.17.211:35357/v2.0

core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True

auth_strategy = keystone

[keystone_authtoken]
auth_uri = http://10.17.17.211:5000
auth_host = 10.17.17.211
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = neutron
admin_password = xxxxx
signing_dir = $state_path/keystone-signing
[database]
connection = mysql://neutron:xxxxx@10.17.17.211/neutron

编辑ML2(Modular Layer2)插件, 在控制节点上:

root@JunoController:~# vim /etc/neutron/plugins/ml2/ml2_conf.ini | more
[ml2]
type_drivers = gre
tenant_network_types = gre
mechanism_drivers = openvswitch

[ml2_type_gre]
tunnel_id_ranges = 1:1000


[securitygroup]
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
# enable_security_group = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
enable_security_group = True

调整Compute Service使用Neutron服务:

# vim /etc/nova/nova.conf
network_api_class = nova.network.neutronv2.api.API
neutron_url = http://10.17.17.211:9696
neutron_auth_strategy = keystone
neutron_admin_tenant_name = service
neutron_admin_username = neutron
neutron_admin_password = xxxxx
neutron_admin_auth_url = http://10.17.17.211:35357/v2.0
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver = nova.virt.firewall.NoopFirewallDriver
security_group_api = neutron


重启服务:

# service nova-api restart
# service nova-scheduler restart
# service nova-conductor restart

重启网络服务:

# service neutron-server restart

检查是否完成的命令:

root@JunoController:~# neutron ext-list
+-----------------------+-----------------------------------------------+
| alias                 | name                                          |
+-----------------------+-----------------------------------------------+
| security-group        | security-group                                |
| l3_agent_scheduler    | L3 Agent Scheduler                            |
| ext-gw-mode           | Neutron L3 Configurable external gateway mode |
| binding               | Port Binding                                  |
| provider              | Provider Network                              |
| agent                 | agent                                         |
| quotas                | Quota management support                      |
| dhcp_agent_scheduler  | DHCP Agent Scheduler                          |
| multi-provider        | Multi Provider Network                        |
| external-net          | Neutron external network                      |
| router                | Neutron L3 Router                             |
| allowed-address-pairs | Allowed Address Pairs                         |
| extra_dhcp_opt        | Neutron Extra DHCP opts                       |
| extraroute            | Neutron Extra Route                           |
+-----------------------+-----------------------------------------------+

###配置网络节点 激活以下选项:

root@JunoNetwork:~# vim /etc/sysctl.conf
net.ipv4.ip_forward=1
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
net.bridge.bridge-nf-call-arptables=1
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1

提交更改(这里有错误):

root@JunoNetwork:~# sysctl -p
net.ipv4.ip_forward = 1
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-arptables: No such file or directory
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: No such file or directory

安装网络组件:

root@JunoNetwork:~# apt-get install neutron-plugin-ml2 neutron-plugin-openvswitch-agent neutron-l3-agent neutron-dhcp-agent

配置通用组件:

# vim /etc/neutron/neutron.conf
[DEFAULT]
rpc_backend = neutron.openstack.common.rpc.impl_kombu
rabbit_host = 10.17.17.211
rabbit_password = xxxxx
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
verbose = True
auth_strategy = keystone

[keystone_authtoken]
auth_uri = http://10.17.17.211:5000
auth_host = 10.17.17.211
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = neutron
admin_password = xxxxx

编辑L3 agent:

# vim /etc/neutron/l3_agent.ini
[DEFAULT]
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
use_namespaces = True
verbose = True

编辑DHCP插件:

# vim /etc/neutron/dhcp_agent.ini
 [DEFAULT]
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
use_namespaces = True

配置DHCP:

root@JunoNetwork:~# vim /etc/neutron/dhcp_agent.ini
[DEFAULT]
...
dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
root@JunoNetwork:~# vim /etc/neutron/dnsmasp-neutron.conf
dhcp-option-force=26,1454

配置metadata agent:

root@JunoNetwork:~# vim /etc/neutron/metadata_agent.ini
[DEFAULT]
auth_url = http://10.17.17.211:5000/v2.0
auth_region = regionOne
admin_tenant_name = service
admin_user = neutron
admin_password = xxxxx
nova_metadata_ip = 10.17.17.211
metadata_proxy_shared_secret = xxxxx

回到controller节点,编辑:

# vim /etc/nova/nova.conf
[DEFAULT]
...
service_neutron_metadata_proxy = true
neutron_metadata_proxy_shared_secret = xxxxx

重启compute api服务:

# service nva-api restart

配置 ml2:

root@JunoNetwork:~# vim /etc/neutron/plugins/ml2/ml2_conf.ini 
[ml2]
type_drivers = gre
tenant_network_types = gre
mechanism_drivers = openvswitch
[ml2_type_gre]
tunnel_id_ranges = 1:1000
[ovs]
local_ip = 10.19.19.212
tunnel_type = gre
enable_tunneling = True
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
enable_security_group = True

重启openvswitch 服务:

root@JunoNetwork:~# service openvswitch-switch restart

增加bridge配置:

root@JunoNetwork:~# ovs-vsctl add-br br-ex
root@JunoNetwork:~# cat /etc/network/interfaces
auto eth2
iface eth2 inet manual

iface br-ex inet static
address 10.22.22.212
netmask 255.255.255.0
gateway 10.22.22.1
bridge_ports eth2
bridge_stp off
auto br-ex

增加桥接端口,并且重启机器:

root@JunoNetwork:~# ovs-vsctl add-port br-ex eth2
root@JunoNetwork:~# reboot

###计算节点配置 更改sysctl配置:

root@JunoCompute:~# vim /etc/sysctl.conf
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
net.bridge.bridge-nf-call-arptables=1
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
root@JunoCompute:~# sysctl -p 
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.bridge.bridge-nf-call-arptables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1

安装下列包:

# apt-get install neutron-common neutron-plugin-ml2 neutron-plugin-openvswitch-agent openvswitch-datapath-dkms

配置compute节点上的网络通用组件:

root@JunoCompute:~# vim /etc/neutron/neutron.conf
    [DEFAULT]
    auth_strategy = keystone
    rpc_backend = neutron.openstack.common.rpc.impl_kombu
    rabbit_host = controller
    rabbit_password = xxxx
    core_plugin = ml2
    service_plugins = router
    allow_overlapping_ips = True
    verbose = True
    [keystone_authtoken]
    auth_uri = http://10.17.17.211:5000
    auth_host = 10.17.17.211
    auth_port = 35357
    auth_protocol = http
    admin_tenant_name = service
    admin_user = neutron
    admin_password = xxxx
    signing_dir = $state_path/keystone-signing
    [database]
root@JunoCompute:~# vim /etc/neutron/plugins/ml2/ml2_conf.ini 
    [DEFAULT]
    ...
    core_plugin = ml2
    service_plugins = router
    allow_overlapping_ips = True
    [ml2]
    ...
    type_drivers = gre
    tenant_network_types = gre
    mechanism_drivers = openvswitch
    [ml2_type_gre]
    ...
    tunnel_id_ranges = 1:1000
    [ovs]
    ...
    local_ip = 10.19.19.213
    tunnel_type = gre
    enable_tunneling = True
    [securitygroup]
    ...
    firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
    enable_security_group = True
root@JunoCompute:~# service openvswitch-switch restart
root@JunoCompute:~# service nova-compute restart
root@JunoCompute:~# service neutron-plugin-openvswitch-agent restart

接下来我们配置Compute节点上的nova,让它使用neutron作为网络管理器.

root@JunoCompute:~# vim /etc/nova/nova.conf 
[DEFAULT]
network_api_class = nova.network.neutronv2.api.API
neutron_url = http://10.17.17.211:9696
neutron_auth_strategy = keystone
neutron_admin_tenant_name = service
neutron_admin_username = neutron
neutron_admin_password = xxxx
neutron_admin_auth_url = http://10.17.17.211:35357/v2.0
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver = nova.virt.firewall.NoopFirewallDriver
security_group_api = neutron

修改完毕后,重启Compute节点上的服务:

root@JunoCompute:~# service nova-compute restart
nova-compute stop/waiting
nova-compute start/running, process 2266
root@JunoCompute:~# service neutron-plugin-openvswitch-agent restart
stop: Unknown instance: 
neutron-plugin-openvswitch-agent start/running, process 2303

配置Network节点的网络,因为我们需要br-ex作为对外网络的接口。
配置网络如下:

# ovs-vsctl add-br br-ex
# ovs-vsctl add-port br-ex eth2
# cat /etc/network/interfaces
# 
auto eth2
iface eth2 inet manual

iface br-ex inet static
address 10.22.22.212
netmask 255.255.255.0
gateway 10.22.22.1
bridge_ports eth2
bridge_stp off
auto br-ex
# reboot

增加ext-net:

root@JunoController:~# neutron net-create ext-net --shared --router:external=True
Created a new network:
+---------------------------+--------------------------------------+
| Field                     | Value                                |
+---------------------------+--------------------------------------+
| admin_state_up            | True                                 |
| id                        | d879d5b1-f16e-4e28-beda-eb2b433e1f39 |
| name                      | ext-net                              |
| provider:network_type     | gre                                  |
| provider:physical_network |                                      |
| provider:segmentation_id  | 1                                    |
| router:external           | True                                 |
| shared                    | True                                 |
| status                    | ACTIVE                               |
| subnets                   |                                      |
| tenant_id                 | ea1f0a6b15dc4796958f087c38756ed1     |
+---------------------------+--------------------------------------+

外部子网:

root@JunoController:~# neutron subnet-create ext-net --name ext-subnet --allocation-pool start=10.22.22.10,end=10.22.22.50 --disable-dhcp --gateway 10.22.22.1 --gateway 10.22.22.1 10.22.22.10/24
Created a new subnet:
+------------------+------------------------------------------------+
| Field            | Value                                          |
+------------------+------------------------------------------------+
| allocation_pools | {"start": "10.22.22.10", "end": "10.22.22.50"} |
| cidr             | 10.22.22.0/24                                  |
| dns_nameservers  |                                                |
| enable_dhcp      | False                                          |
| gateway_ip       | 10.22.22.1                                     |
| host_routes      |                                                |
| id               | 3c7e2224-0979-4eb6-b95f-16401ecbfef0           |
| ip_version       | 4                                              |
| name             | ext-subnet                                     |
| network_id       | d879d5b1-f16e-4e28-beda-eb2b433e1f39           |
| tenant_id        | ea1f0a6b15dc4796958f087c38756ed1               |
+------------------+------------------------------------------------+


root@JunoController:~# source openstack/demo-openrc.sh 
root@JunoController:~# neutron net-create demo-net
Created a new network:
+----------------+--------------------------------------+
| Field          | Value                                |
+----------------+--------------------------------------+
| admin_state_up | True                                 |
| id             | 01c966ce-88cf-43a2-a7b7-2ebf6d6b6d60 |
| name           | demo-net                             |
| shared         | False                                |
| status         | ACTIVE                               |
| subnets        |                                      |
| tenant_id      | 2ac9cae777014d3d94458f521b013e94     |
+----------------+--------------------------------------+
root@JunoController:~# neutron subnet-create demo-net --name demo-subnet --gateway 10.44.44.1 10.44.44.0/24
Created a new subnet:
+------------------+------------------------------------------------+
| Field            | Value                                          |
+------------------+------------------------------------------------+
| allocation_pools | {"start": "10.44.44.2", "end": "10.44.44.254"} |
| cidr             | 10.44.44.0/24                                  |
| dns_nameservers  |                                                |
| enable_dhcp      | True                                           |
| gateway_ip       | 10.44.44.1                                     |
| host_routes      |                                                |
| id               | c6181123-f729-4ad2-bddc-93cfc761d0e1           |
| ip_version       | 4                                              |
| name             | demo-subnet                                    |
| network_id       | 01c966ce-88cf-43a2-a7b7-2ebf6d6b6d60           |
| tenant_id        | 2ac9cae777014d3d94458f521b013e94               |
+------------------+------------------------------------------------+

root@JunoController:~# neutron router-create demo-router
Created a new router:
+-----------------------+--------------------------------------+
| Field                 | Value                                |
+-----------------------+--------------------------------------+
| admin_state_up        | True                                 |
| external_gateway_info |                                      |
| id                    | e5a010ba-371c-43d2-b3fb-a30e0dc5302b |
| name                  | demo-router                          |
| status                | ACTIVE                               |
| tenant_id             | 2ac9cae777014d3d94458f521b013e94     |
+-----------------------+--------------------------------------+

root@JunoController:~# neutron router-interface-add demo-router demo-subnet
Added interface c862f772-a1ef-4401-9a3b-2bdf5444e41b to router demo-router.

root@JunoController:~# neutron router-gateway-set demo-router ext-net
Set gateway for router demo-router

检查,在外网上ping 10.22.22.10这个地址,因为路由器占用了一个地址,所以如果能ping通这个地址,说明我们创建的网络是好的。

[root:~]# ping 10.22.22.212                                                                                                                    
PING 10.22.22.212 (10.22.22.212) 56(84) bytes of data.                                                                                         
64 bytes from 10.22.22.212: icmp_seq=1 ttl=64 time=0.152 ms                                                                                    
64 bytes from 10.22.22.212: icmp_seq=2 ttl=64 time=0.136 ms    

检查agent状态:

root@JunoController:~# neutron agent-list
+--------------------------------------+--------------------+-------------+-------+----------------+
| id                                   | agent_type         | host        | alive | admin_state_up |
+--------------------------------------+--------------------+-------------+-------+----------------+
| 0b7191e1-ecd2-4808-b87a-f616d0a3bc7b | Metadata agent     | JunoNetwork | :-)   | True           |
| 34511134-8392-44a9-a889-0ff03d85a995 | Open vSwitch agent | JunoCompute | :-)   | True           |
| 474065d1-a50a-4d11-89d3-37c7a88e449c | DHCP agent         | JunoNetwork | :-)   | True           |
| 5569c590-df83-4ee1-a073-15c908ef8d20 | L3 agent           | JunoNetwork | :-)   | True           |
| a22c6e2a-7af0-4404-9e5b-46996b370672 | Open vSwitch agent | JunoNetwork | :-)   | True           |
+--------------------------------------+--------------------+-------------+-------+----------------+

在 Compute Node 上的 OVS agent出现后,才能代表我们的网络配置成功。

Deploy OpenContrail On CentOS With Docker As Hypervisor

Reference:
https://software.intel.com/en-us/blogs/2014/12/28/experimenting-with-openstack-sahara-on-docker-containers
I wanna enable the docker as hypervisor then it would greatly save the resources, and benefit with docker’s rich resources. Following is the steps:

Preparation

First create the image file via:

# qemu-img create -f qcow2 CentOSOpenContrail.qcow2 100G
Formatting 'CentOSOpenContrail.qcow2', fmt=qcow2 size=107374182400 encryption=off cluster_size=65536 
[root:/home/juju/img/CentOSOpenContrail]# pwd
/home/juju/img/CentOSOpenContrail

Then create a virtual machine based on KVM, allocate 8G Memory, 4-core, which copies the host CPU configuration.

Installation

After installation, update the installed software via:

$ mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup   
$ wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo 
$ sudo yum makecache
$ sudo yum update -y
$ sudo reboot

Install following packages:

# wget https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
# rpm -ivh rdo-release-juno-1.noarch.rpm 
# yum install –y https://rdo.fedorapeople.org/rdo-release.rpm
# yum install openstack-packstack

Now you could use packstack for installing the packages:

# packstack --gen-answer-file=/root/answer.txt
# packstack --answer-file=/root/answer.txt

Install openstack-sahara via following command, the python-tox enable tox for generating the configuration files for sahara:

# yum install openstack-sahara 
# yum install python-tox

Create the username and password for sahara to use:

[root@10-17-17-183 etc]# mysql
MariaDB [(none)]> create user 'sahara'@'localhost' identified by 'saharapass';
Query OK, 0 rows affected (0.07 sec)
MariaDB [(none)]> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| cinder             |
| glance             |
| keystone           |
| mysql              |
| neutron            |
| nova               |
| performance_schema |
| test               |
+--------------------+
9 rows in set (0.00 sec)

MariaDB [(none)]> use mysql
MariaDB [mysql]> show tables;
MariaDB [mysql]> select * from user;
+-----------+----------------+-----------------------------------

How to get all of the password configuration information of packstack?

$ cd /etc/
$ grep -i "sql://" ./ -r

Create a database named ‘saharaDB” and grant it to user sahra:

MariaDB [(none)]> create database saharaDB;
MariaDB [(none)]> grant all on saharaDB.* to 'sahara'@'localhost';
Query OK, 0 rows affected (0.02 sec)

MariaDB [(none)]> quit;
[root@10-17-17-183 etc]# mysql -h 127.0.0.1 -u sahara -p saharaDB
Enter password: 
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 1481
Server version: 5.5.40-MariaDB-wsrep MariaDB Server, wsrep_25.11.r4026

Copyright (c) 2000, 2014, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [saharaDB]> 

So the syntax for sahara to use is like:

# cat /etc/sahara/sahara.conf
connection = mysql://sahara:saharapass@127.0.0.1/saharaDB
use_neutron=true

# sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head
INFO  [alembic.migration] Context impl MySQLImpl.
INFO  [alembic.migration] Will assume non-transactional DDL.
INFO  [alembic.migration] Running upgrade None -> 001, Icehouse release
INFO  [alembic.migration] Running upgrade 001 -> 002, placeholder
INFO  [alembic.migration] Running upgrade 002 -> 003, placeholder
INFO  [alembic.migration] Running upgrade 003 -> 004, placeholder
INFO  [alembic.migration] Running upgrade 004 -> 005, placeholder
INFO  [alembic.migration] Running upgrade 005 -> 006, placeholder
INFO  [alembic.migration] Running upgrade 006 -> 007, convert clusters.status_description to LongText
INFO  [alembic.migration] Running upgrade 007 -> 008, add security_groups field to node groups
INFO  [alembic.migration] Running upgrade 008 -> 009, add rollback info to cluster
INFO  [alembic.migration] Running upgrade 009 -> 010, add auto_security_groups flag to node group
INFO  [alembic.migration] Running upgrade 010 -> 011, add Sahara settings info to cluster

REgister the service and specify the endpoint:

[root@10-17-17-183 etc]# cat ./nagios/keystonerc_admin                                                                                         
export OS_USERNAME=admin                                                                                                                       
export OS_TENANT_NAME=admin                                                                                                                    
export OS_PASSWORD=5d4e62e79d314477                                                                                                            
export OS_AUTH_URL=http://10.17.17.183:35357/v2.0/ 
[root@10-17-17-183 etc]# pwd                                                                
/etc  
# keystone service-create --name sahara --type data_processing --description "Data processing service"
+-------------+----------------------------------+
|   Property  |              Value               |
+-------------+----------------------------------+
| description |     Data processing service      |
|   enabled   |               True               |
|      id     | 5f711f0d42754b349931349bd0c325d1 |
|     name    |              sahara              |
|     type    |         data_processing          |
+-------------+----------------------------------+
[root@10-17-17-183 ~]# keystone endpoint-create --service-id $(keystone service-list | awk '/ sahara / {print $2}') --publicurl http://127.0.0.1:8386/v1.1/%\(tenant_id\)s --internalurl http://127.0.0.1:8386/v1.1/%\(tenant_id\)s --adminurl http://127.0.0.1:8386/v1.1/%\(tenant_id\)s --region regionOne
+-------------+------------------------------------------+
|   Property  |                  Value                   |
+-------------+------------------------------------------+
|   adminurl  | http://127.0.0.1:8386/v1.1/%(tenant_id)s |
|      id     |     b2115bab8bd743f589b1ed68eef69b4c     |
| internalurl | http://127.0.0.1:8386/v1.1/%(tenant_id)s |
|  publicurl  | http://127.0.0.1:8386/v1.1/%(tenant_id)s |
|    region   |                regionOne                 |
|  service_id |     5f711f0d42754b349931349bd0c325d1     |
+-------------+------------------------------------------+
[root@10-17-17-183 ~]# systemctl start openstack-sahara-all
[root@10-17-17-183 ~]# systemctl enable openstack-sahara-all
ln -s '/usr/lib/systemd/system/openstack-sahara-all.service' '/etc/systemd/system/multi-user.target.wants/openstack-sahara-all.service'
[root@10-17-17-183 ~]# systemctl status openstack-sahara-all.service

More detailed info could be fetched from:
http://docs.openstack.org/juno/install-guide/install/apt/content/sahara-install.html
Now login to the http://10.17.17.183 then you could visit the Trustyboard. The password is the one we used in the OS_PASSWORD.
Install docker:

$  yum install docker
Or
$  wget https://get.docker.com/builds/Linux/x86_64/docker-latest -O docker
$  docker --version
[root@10-17-17-183 ~]#  usermod -G dockerroot nova
[root@10-17-17-183 ~]# service openstack-nova-compute restart
Redirecting to /bin/systemctl restart  openstack-nova-compute.service
# systemctl restart docker
# systemctl enable docker

Install nova-docker support:

$  yum install python-pip
$  pip install -e git+https://github.com/stackforge/nova-docker#egg=novadocker
$  cd src/novadocker/
$  python setup.py install
$ vim /etc/nova/nova.conf
[DEFAULT]
compute_driver = novadocker.virt.docker.DockerDriver

Edit the nova’s rootwrap like following:

[root@10-17-17-183 nova]# mkdir rootwrap.d
[root@10-17-17-183 nova]# cd rootwrap.d/
[root@10-17-17-183 rootwrap.d]# touch docker.filters
[root@10-17-17-183 rootwrap.d]# vim docker.filters 
[Filters]
# nova/virt/docker/driver.py:'ln', '-sf', '/var/run/netns/.*'
ln: CommandFilter, /bin/ln, root
[root@10-17-17-183 rootwrap.d]# vim /etc/nova/rootwrap.conf 
[root@10-17-17-183 rootwrap.d]# pwd
/etc/nova/rootwrap.d

Edit the glance-api.conf

[root@10-17-17-183 glance]# vim ./glance-api.conf 
[DEFAULT]
container_formats = ami,ari,aki,bare,ovf,docker
[root@10-17-17-183 glance]# pwd
/etc/glance

Create the ubuntu Container images via following commands:

$ docker pull ubuntu
$ docker save ubuntu | glance image-create --is-public=True --container-format=docker --disk-format=raw --name ubuntu_container

Build qemu for supporting glustfs

Following is the build procedure.

$ sudo apt-get build-dep qemu
$ sudo apt-get install libvde-dev libvdeplug2-dev libcap-ng-dev libattr1-dev
$ wget http://wiki.qemu-project.org/download/qemu-2.0.2.tar.bz2
$ tar xjvf qemu-2.0.2.tar.bz2
$ cd qemu-2.0.2/
$ mkdir -p bin/debug/native
$ cd bin/debug/native
$ sudo apt-get install libjpeg-turbo8-dev
$ sudo apt-get install glusterfs-common
 ../../../configure --enable-sdl --audio-drv-list=alsa,oss --enable-curses --enable-vnc-jpeg --enable-curl --enable-fdt --enable-kvm --enable-tcg-interpreter --enable-system --enable-user \\n --enable-linux-user --enable-guest-base --enable-pie --enable-uuid --enable-vde --enable-linux-aio --enable-cap-ng --enable-attr --enable-docs --enable-vhost-net --enable-rbd \\n --enable-guest-agent --enable-glusterfs --target-list=x86_64-softmmu,i386-softmmu
 ./qemu-img -h
 $ make -j2

Before, Found qemu-img:

$ qemu-img -h
Supported formats: vvfat vpc vmdk vhdx vdi sheepdog sheepdog sheepdog rbd raw host_cdrom host_floppy host_device file qed qcow2 qcow parallels nbd nbd nbd dmg tftp ftps ftp https http cow cloop bochs blkverify blkdebug

After, qemu-img:

Supported formats: vvfat vpc vmdk vhdx vdi sheepdog sheepdog sheepdog rbd raw host_cdrom host_floppy host_device file qed qcow2 qcow parallels nbd nbd nbd gluster gluster gluster gluster dmg tftp ftps ftp https http cow cloop bochs blkverify blkdebug

Whole Process For Deploying Contrail

Following are the steps, enjoy them:

# First bootstrap the environment.   
juju bootstrap --metadata-source ~/.juju/metadata --upload-tools -v --show-log --constraints="mem=3G"

#####################################################
# Machine 0, hold 9 services. 3G mem. trusty
# Memory: 3G
# Service: 10
#####################################################


# Since machine 0 is ready for using, deploy services to this node via following commands:     
# Juju-gui is for monitoring the status and the components
# 1. juju-gui
juju deploy --to 0 --repository=/home/Trusty/charms/ local:trusty/juju-gui
# 2. rabbitmq-server
juju deploy --to lxc:0 --repository=/home/Trusty/charms/ local:trusty/rabbitmq-server
# 3. mysql
juju deploy --to lxc:0 --repository=/home/Trusty/charms/ local:trusty/mysql
# 4. keystone
juju deploy --to lxc:0 --repository=/home/Trusty/Code/deployOpenContrail/contrail-deployer/src/charms/ local:trusty/keystone
# 5. openstack-Trustyboard
juju deploy --to lxc:0 --repository=/home/Trusty/Code/deployOpenContrail/contrail-deployer/src/charms/ local:trusty/openstack-Trustyboard

# 6. nova-cloud-controller
juju deploy --to lxc:0 --repository=/home/Trusty/Code/deployOpenContrail/contrail-deployer/src/charms/ local:trusty/nova-cloud-controller --config=/home/Trusty/Code/deploy/nova-cloud-controller-config.yaml 
# 7. glance to lxc:0
juju deploy --to lxc:0 --repository=/home/Trusty/Code/deployOpenContrail/contrail-deployer/src/charms/ local:trusty/glance
# 8. contrail-configuration to lxc:0
juju deploy --to lxc:0 --repository=/home/Trusty/Code/deployOpenContrail/contrail-deployer/src/charms/ local:trusty/contrail-configuration
# 9. control-control to lxc:0
juju deploy --to lxc:0 --repository=/home/Trusty/Code/deployOpenContrail/contrail-deployer/src/charms/ local:trusty/contrail-control
# 10. neutron-api to lxc:0
juju deploy --to lxc:0 --repository=/home/Trusty/Code/deployOpenContrail/contrail-deployer/src/charms/ local:trusty/neutron-api --config=/home/Trusty/Code/deploy/neutron-api.yaml

# Now add the first node for nova-compute.   
juju set-constraints "mem=900M"
#####################################################
# Machine 1, hold nova-compute service, 2G with nested CPU, trusty
# Memory: 2G
# Service: 1
#####################################################

# Add name specified machine, which memory is 2048M, with cpu nested for kvm
juju add-machine MaasOpenContrail4
# Machine 1 will be added into the Juju environment, deploy nova-compute into this node
juju deploy --to 1 --repository=/home/Trusty/Code/deployOpenContrail/contrail-deployer/src/charms/ local:trusty/nova-compute

#####################################################
# Machine 2, hold neutron-gateway service, 1G , trusty
# Memory: 1G
# Service: 1
#####################################################
# Add neutron-gateway to 1G based machine , it may fail, so you could remove-machine, and re-add again. via `juju destroy-machine 2 --force`  
juju add-machine MaasOpenContrail7
juju deploy --to 4 --repository=/home/Trusty/Code/deployOpenContrail/contrail-deployer/src/charms/ local:trusty/quantum-gateway neutron-gateway

#####################################################
# Machine 3, hold cassandra and zookeeper serivce, 4G, precise.
# Memory: 4G
# Service: 2
#####################################################
# First we should change the default deployed system version to precise. 
juju set-environment default-series=precise
# Add new machine, whose memory is 4G.
juju add-machine MaasOpenContrail5
# Deploy cassandra to 4G Node
juju deploy --to 5 --repository=/home/Trusty/Code/deployOpenContrail/contrail-deployer/src/charms/ local:precise/cassandra --config=/home/Trusty/Code/deploy/cassandra.yaml
# Also use this machine for deploy a lxc based service, zookeeper, which is also based on precise.
juju deploy --to lxc:5 --repository=/home/Trusty/Code/deployOpenContrail/contrail-deployer/src/charms/ local:precise/zookeeper


# Well, set the default environment from precise to trusty
juju set-environment default-series=trusty

# Finally deploy  neutron-contrail
juju deploy --repository=/home/Trusty/Code/deployOpenContrail/contrail-deployer/src/charms/ local:trusty/neutron-contrail



#####################################################
#####################################################
# Well the deploy is finished, then add relationship
#####################################################
#####################################################
juju add-relation keystone mysql
juju add-relation nova-cloud-controller mysql
juju add-relation nova-cloud-controller rabbitmq-server
juju add-relation nova-cloud-controller glance
juju add-relation nova-cloud-controller keystone
juju add-relation neutron-gateway mysql
juju add-relation neutron-gateway:amqp rabbitmq-server:amqp
juju add-relation neutron-gateway nova-cloud-controller
juju add-relation nova-compute:shared-db mysql:shared-db
juju add-relation nova-compute:amqp rabbitmq-server:amqp
juju add-relation nova-compute glance
juju add-relation nova-compute nova-cloud-controller
juju add-relation glance mysql
juju add-relation glance keystone
juju add-relation openstack-Trustyboard keystone
juju add-relation neutron-api mysql
juju add-relation neutron-api rabbitmq-server
juju add-relation neutron-api nova-cloud-controller
juju add-relation neutron-api:identity-service keystone:identity-service
juju add-relation neutron-api:identity-admin keystone:identity-admin
juju add-relation contrail-configuration:cassandra cassandra:database
juju add-relation contrail-configuration zookeeper
juju add-relation contrail-configuration rabbitmq-server
juju add-relation contrail-configuration keystone
juju add-relation contrail-configuration neutron-gateway
juju add-relation neutron-api contrail-configuration
juju add-relation contrail-control contrail-configuration
juju add-relation nova-compute neutron-contrail
juju add-relation neutron-contrail contrail-control
juju add-relation neutron-contrail neutron-gateway
juju add-relation neutron-contrail contrail-configuration
juju add-relation neutron-contrail keystone


# change the passwd of OpenStack:    
juju set keystone admin-password="helloworld"