Thursday, October 09, 2014

Forwarding packet from br-int to br-ex on Neutron Node

[root@controller ~(keystone_admin)]# neutron router-list
+--------------------------------------+---------+-----------------------------------------------------------------------------+
| id                                   | name    | external_gateway_info                                                       |
+--------------------------------------+---------+-----------------------------------------------------------------------------+
| 30b921ba-869c-4026-9010-f6554a82e37e | router2 | {"network_id": "022905d0-cfeb-4d5b-ae51-37c93080e7ae", "enable_snat": true} |
| db10bdfd-f0c6-4c30-8a08-30ceb132a979 | router3 | {"network_id": "022905d0-cfeb-4d5b-ae51-37c93080e7ae", "enable_snat": true} |
+--------------------------------------+---------+-----------------------------------------------------------------------------+

1. [root@controller ~(keystone_admin)]# ip netns exec qrouter-db10bdfd-f0c6-4c30-8a08-30ceb132a979 ifconfig
lo: flags=73  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10
        loop  txqueuelen 0  (Local Loopback)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

qg-a9e0c15a-b6: flags=67  mtu 1500
        inet 192.168.1.154  netmask 255.255.255.0  broadcast 192.168.1.255
        inet6 fe80::f816:3eff:fe61:7982  prefixlen 64  scopeid 0x20
        ether fa:16:3e:61:79:82  txqueuelen 0  (Ethernet)
        RX packets 794857  bytes 1090221979 (1.0 GiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 459453  bytes 38942381 (37.1 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

qr-12b39034-b3: flags=67  mtu 1500
        inet 20.0.0.1  netmask 255.255.255.0  broadcast 20.0.0.255
        inet6 fe80::f816:3eff:feed:976b  prefixlen 64  scopeid 0x20
        ether fa:16:3e:ed:97:6b  txqueuelen 0  (Ethernet)
        RX packets 460529  bytes 39050698 (37.2 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 788075  bytes 1089807974 (1.0 GiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0




2. [root@controller ~(keystone_admin)]# ip netns exec qrouter-db10bdfd-f0c6-4c30-8a08-30ceb132a979 route -n


Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         192.168.1.1     0.0.0.0         UG    0      0        0     qg-a9e0c15a-b6
20.0.0.0        0.0.0.0         255.255.255.0   U     0      0        0   qr-12b39034-b3
192.168.1.0     0.0.0.0         255.255.255.0   U     0      0        0 qg-a9e0c15a-b6



3. [root@controller ~(keystone_admin)]# ovs-vsctl show
ba7b93f5-f364-40ac-94ee-8c3266c14282
    Bridge br-tun
        Port "gre-c0a80089"
            Interface "gre-c0a80089"
                type: gre
                options: {in_key=flow, local_ip="192.168.0.127", out_key=flow, remote_ip="192.168.0.137"}
        Port br-tun
            Interface br-tun
                type: internal
        Port patch-int
            Interface patch-int
                type: patch
                options: {peer=patch-tun}
    Bridge br-ex
        Port br-ex
            Interface br-ex
                type: internal
        Port "qg-412ba0b5-e8"
            Interface "qg-412ba0b5-e8"
                type: internal
        Port "enp2s0"
            Interface "enp2s0"
        Port "qg-a9e0c15a-b6"
            Interface "qg-a9e0c15a-b6"
                type: internal
    Bridge br-int
        fail_mode: secure                                      
        Port br-int                                                   
            Interface br-int
                type: internal
        Port patch-tun
            Interface patch-tun
                type: patch
                options: {peer=patch-int}
        Port "qr-635a3c01-f9"
            tag: 2
            Interface "qr-635a3c01-f9"
                type: internal
        Port "tap0e08184f-89"
            tag: 1
            Interface "tap0e08184f-89"
                type: internal
        Port "tapa8c5a401-51"
            tag: 2
            Interface "tapa8c5a401-51"
                type: internal
        Port "qr-12b39034-b3"
            tag: 1
            Interface "qr-12b39034-b3"
                type: internal
    ovs_version: "2.0.0"



Thursday, October 02, 2014

RDO Two Node Cluster (Controller+Compute) IceHouse Neutron ML2&OVS&GRE Cluster on Fedora 20

Finally I've designed answer-file creating  ml2_conf.ini -> /etc/neutron/plugins/ml2/ml2_conf.ini, but plugin.ini -> /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini has been created manually exactly the same as ml2_conf.ini following http://kashyapc.fedorapeople.org/virt/openstack/rdo/IceHouse-Nova-Neutron-ML2-GRE-OVS.txt
Similar file has been created on Compute Node.
Metadata_agent.ini are the same on Controller and on Compute Nodes

Two boxes  have been setup , each one having 2  NICs (p37p1,p4p1) for
Controller && Compute Nodes setup. Before running
`packstack --answer-file= TwoNodeML2&OVS&GRE.txt` SELINUX set to permissive on both nodes.Both p4p1's assigned IPs and set to support GRE tunnel  (192.168.0.127, 192.168.0.137 ) between Controller and Compute Nodes. Services firewalld and NetworkManager disabled (after packstack completion), IPv4 firewall with iptables and service network are enabled and running. Packstack is bind to public IP of interface p37p1 192.169.1.127, Compute Node is 192.169.1.137 ( view answer-file ).

Setup configuration

- Controller node: Nova, Keystone, Cinder, Glance, Neutron (using Open vSwitch plugin && GRE )
- Compute node: Nova (nova-compute), Neutron (openvswitch-agent)


icehouse1.localdomain   -  Controller (192.168.1.127)
icehouse2.localdomain   -  Compute   (192.168.1.137)


********************************
Metadata access verification
********************************

[root@icehouse1 ~(keystone_admin)]# iptables-save | grep 8775
-A INPUT -p tcp -m multiport --dports 8773,8774,8775 -m comment --comment "001 novaapi incoming" -j ACCEPT
-A nova-api-INPUT -d 192.168.1.127/32 -p tcp -m tcp --dport 8775 -j ACCEPT

[root@icehouse1 ~(keystone_admin)]# netstat -antp | grep 8775
tcp        0      0 0.0.0.0:8775            0.0.0.0:*               LISTEN      1181/python        

[root@icehouse1 ~(keystone_admin)]# ps -ef| grep 1181
nova      1181     1  0 06:30 ?        00:00:25 /usr/bin/python /usr/bin/nova-api
nova      3478  1181  0 06:31 ?        00:00:00 /usr/bin/python /usr/bin/nova-api
nova      3479  1181  0 06:31 ?        00:00:00 /usr/bin/python /usr/bin/nova-api
nova      3524  1181  0 06:31 ?        00:00:04 /usr/bin/python /usr/bin/nova-api
nova      3525  1181  0 06:31 ?        00:00:04 /usr/bin/python /usr/bin/nova-api
nova      3549  1181  0 06:31 ?        00:00:00 /usr/bin/python /usr/bin/nova-api
nova      3555  1181  0 06:31 ?        00:00:00 /usr/bin/python /usr/bin/nova-api
root     11803  4686  0 07:48 pts/0    00:00:00 grep --color=auto 1181

[root@icehouse1 ~(keystone_admin)]# ip netns
qdhcp-8b22b262-c9c1-4138-8092-0581195f0889
qrouter-ecf9ee4e-b92c-4a5b-a884-d753a184764b

[root@icehouse1 ~(keystone_admin)]# ip netns exec qrouter-ecf9ee4e-b92c-4a5b-a884-d753a184764b iptables -S -t nat | grep 169.254

-A neutron-l3-agent-PREROUTING -d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j REDIRECT --to-ports 9697

[root@icehouse1 ~(keystone_admin)]# ip netns exec qrouter-ecf9ee4e-b92c-4a5b-a884-d753a184764b netstat -antp

Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name   
tcp        0      0 0.0.0.0:9697            0.0.0.0:*               LISTEN      3821/python        

[root@icehouse1 ~(keystone_admin)]# ps -ef| grep 3821
root      3821     1  0 06:31 ?        00:00:00 /usr/bin/python /bin/neutron-ns-metadata-proxy --pid_file=/var/lib/neutron/external/pids/ecf9ee4e-b92c-4a5b-a884-d753a184764b.pid --metadata_proxy_socket=/var/lib/neutron/metadata_proxy --router_id=ecf9ee4e-b92c-4a5b-a884-d753a184764b --state_path=/var/lib/neutron --metadata_port=9697 --verbose --log-file=neutron-ns-metadata-proxy-ecf9ee4e-b92c-4a5b-a884-d753a184764b.log --log-dir=/var/log/neutron
root     11908  4686  0 07:50 pts/0    00:00:00 grep --color=auto 3821



***********************************************
Status nova && neutron services after install
***********************************************

[root@icehouse1 ~(keystone_admin)]# neutron agent-list
+--------------------------------------+--------------------+-----------------------+-------+----------------+
| id                                   | agent_type         | host                  | alive | admin_state_up |
+--------------------------------------+--------------------+-----------------------+-------+----------------+
| 43fa28fb-46fa-4030-9f25-5da92847754f | Open vSwitch agent | icehouse2.localdomain | :-)   | True           |
| 471ab637-49eb-424b-b63e-3d03539150ac | Open vSwitch agent | icehouse1.localdomain | :-)   | True           |
| 495056c8-bb69-4bb4-b954-2398f49dd57a | Metadata agent     | icehouse1.localdomain | :-)   | True           |
| 76eb528d-2673-4ac2-936f-70157d46c566 | L3 agent           | icehouse1.localdomain | :-)   | True           |
| 8f1b4d6b-81df-4903-8a35-df9250143a8b | DHCP agent         | icehouse1.localdomain | :-)   | True           |
+--------------------------------------+--------------------+-----------------------+-------+----------------+

[root@icehouse1 ~(keystone_admin)]# nova-manage service list
Binary           Host                                 Zone             Status     State Updated_At
nova-consoleauth icehouse1.localdomain                internal         enabled    :-)   2014-06-14 17:44:56
nova-scheduler   icehouse1.localdomain                internal         enabled    :-)   2014-06-14 17:44:56
nova-conductor   icehouse1.localdomain                internal         enabled    :-)   2014-06-14 17:44:47
nova-cert        icehouse1.localdomain                internal         enabled    :-)   2014-06-14 17:44:46
nova-compute     icehouse2.localdomain                nova             enabled    :-)   2014-06-14 17:44:47

**********************************
Analysing type of created vlans :-
**********************************

 [root@icehouse1 ~(keystone_admin)]# neutron net-list
+--------------------------------------+----------+-----------------------------------------------------+
| id                                   | name     | subnets                                             |
+--------------------------------------+----------+-----------------------------------------------------+
| 50c6a3e8-9f12-4c21-8c9e-e87c41a712e2 | private  | 89b8f109-c849-4d04-8448-796eb780506e 40.0.0.0/24    |
| a4b6ba82-4df5-4b90-b5b4-dbd4e549894b | demo_net | 9f22f2cc-9d79-4f78-9ca2-0b5a330a097f 10.0.0.0/24    |
| 49be3ea7-e4b2-461b-8bba-80123c851ff0 | public   | 31b8c99c-87b6-480c-a8a8-bdc0ae6ffa1e 192.168.1.0/24 |
| 4291d703-2dea-45fa-9bc0-f09c71ec549e | private1 | a46d978b-f64e-4bf9-ae39-7e97d85e65f0 60.0.0.0/24    |
+--------------------------------------+----------+-----------------------------------------------------+

[root@icehouse1 ~(keystone_admin)]# neutron net-show 50c6a3e8-9f12-4c21-8c9e-e87c41a712e2
+---------------------------+--------------------------------------+
| Field                     | Value                                |
+---------------------------+--------------------------------------+
| admin_state_up            | True                                 |
| id                        | 50c6a3e8-9f12-4c21-8c9e-e87c41a712e2 |
| name                      | private                              |
| provider:network_type     | gre                                  |
| provider:physical_network |                                      |
| provider:segmentation_id  | 2                                    |
| router:external           | False                                |
| shared                    | True                                 |
| status                    | ACTIVE                               |
| subnets                   | 89b8f109-c849-4d04-8448-796eb780506e |
| tenant_id                 | 182ff3c10ff2483584f5b091eebb2885     |
+---------------------------+--------------------------------------+

[root@icehouse1 ~(keystone_admin)]# neutron net-show a4b6ba82-4df5-4b90-b5b4-dbd4e549894b
+---------------------------+--------------------------------------+
| Field                     | Value                                |
+---------------------------+--------------------------------------+
| admin_state_up            | True                                 |
| id                        | a4b6ba82-4df5-4b90-b5b4-dbd4e549894b |
| name                      | demo_net                             |
| provider:network_type     | gre                                  |
| provider:physical_network |                                      |
| provider:segmentation_id  | 3                                    |
| router:external           | False                                |
| shared                    | False                                |
| status                    | ACTIVE                               |
| subnets                   | 9f22f2cc-9d79-4f78-9ca2-0b5a330a097f |
| tenant_id                 | eaab1d7aa9c54560ac63a76268c34bf6     |
+---------------------------+--------------------------------------+


******************************************************
Routines tables on Controller && Compute Nodes
******************************************************

[root@icehouse1 ~(keystone_admin)]# route -n


Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         192.168.1.1     0.0.0.0         UG    0      0        0 br-ex
169.254.0.0     0.0.0.0         255.255.0.0     U     1003   0        0 p37p1
169.254.0.0     0.0.0.0         255.255.0.0     U     1004   0        0 p4p1
169.254.0.0     0.0.0.0         255.255.0.0     U     1018   0        0 br-ex
192.168.0.0     0.0.0.0         255.255.255.0   U     0      0        0 p4p1
192.168.1.0     0.0.0.0         255.255.255.0   U     0      0        0 br-ex

[root@icehouse1 ~(keystone_admin)]# ssh 192.168.1.137
Last login: Thu Oct  2 16:10:58 2014
[root@icehouse2 ~]# route -n
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         192.168.1.1     0.0.0.0         UG    0      0        0 p37p1
169.254.0.0     0.0.0.0         255.255.0.0     U     1002   0        0 p37p1
169.254.0.0     0.0.0.0         255.255.0.0     U     1003   0        0 p4p1
192.168.0.0     0.0.0.0         255.255.255.0   U     0      0        0 p4p1
192.168.1.0     0.0.0.0         255.255.255.0   U     0      0        0 p37p1
192.168.122.0   0.0.0.0       255.255.255.0   U     0      0        0 virbr0


****************************************
Neutron database status after install
****************************************


[root@icehouse1 ~(keystone_admin)]# mysql -u root -p
Enter password:
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 1588
Server version: 5.5.36-MariaDB-wsrep MariaDB Server, wsrep_25.9.r3961

Copyright (c) 2000, 2014, Oracle, Monty Program Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> show databases ;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| cinder             |
| glance             |
| keystone           |
| mysql              |
| neutron            |
| nova               |
| performance_schema |
| test               |
+--------------------+
9 rows in set (0.00 sec)

MariaDB [(none)]> use neutron ;
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A

Database changed
MariaDB [neutron]> show tables ;
+------------------------------+
| Tables_in_neutron            |
+------------------------------+
| agents                       |
| alembic_version              |
| allowedaddresspairs          |
| arista_provisioned_nets      |
| arista_provisioned_tenants   |
| arista_provisioned_vms       |
| cisco_ml2_credentials        |
| cisco_ml2_nexusport_bindings |
| consistencyhashes            |
| dnsnameservers               |
| externalnetworks             |
| extradhcpopts                |
| floatingips                  |
| ipallocationpools            |
| ipallocations                |
| ipavailabilityranges         |
| ml2_brocadenetworks          |
| ml2_brocadeports             |
| ml2_flat_allocations         |
| ml2_gre_allocations          |
| ml2_gre_endpoints            |
| ml2_network_segments         |
| ml2_port_bindings            |
| ml2_vlan_allocations         |
| ml2_vxlan_allocations        |
| ml2_vxlan_endpoints          |
| networkdhcpagentbindings     |
| networks                     |
| ports                        |
| quotas                       |
| routerl3agentbindings        |
| routerroutes                 |
| routers                      |
| securitygroupportbindings    |
| securitygrouprules           |
| securitygroups               |
| servicedefinitions           |
| servicetypes                 |
| subnetroutes                 |
| subnets                      |
+------------------------------+
40 rows in set (0.00 sec)


*******************************************************************************
System completely functional, however packstack picked up several undesired gre_endpoints showing up in `ovs-vsctl show` reports
*******************************************************************************
Removing not needed gre_endpoints via databases deleting 1 record from
ml2_gre_endpoints
*******************************************************************************


MariaDB [neutron]> select * from ml2_gre_endpoints ;
+---------------+
| ip_address    |
+---------------+
| 192.168.1.137 |
| 192.168.0.127 |
| 192.168.0.137 |
+---------------+
3 rows in set (0.00 sec)

MariaDB [neutron]> delete from ml2_gre_endpoints where ip_address='192.168.1.137' ;
Query OK, 1 row affected (0.01 sec)

MariaDB [neutron]> select * from ml2_gre_endpoints ;
+---------------+
| ip_address    |
+---------------+
| 192.168.0.127 |
| 192.168.0.137 |
+---------------+
2 rows in set (0.00 sec)

MariaDB [neutron]> quit
 

Restart neutron-openvswitch-agent service on both nodes


 [root@icehouse1 neutron(keystone_admin)]# ls -l
total 72
-rw-r--r--. 1 root root      193 Sep 30 17:08 api-paste.ini
-rw-r-----. 1 root neutron  3901 Sep 30 19:19 dhcp_agent.ini
-rw-r--r--. 1 root root       86 Sep 30 19:20 dnsmasq.conf
-rw-r-----. 1 root neutron   208 Sep 30 17:08 fwaas_driver.ini
-rw-r-----. 1 root neutron  3431 Sep 30 17:08 l3_agent.ini
-rw-r-----. 1 root neutron  1400 Aug  8 02:56 lbaas_agent.ini
-rw-r-----. 1 root neutron  1863 Sep 30 17:08 metadata_agent.ini
lrwxrwxrwx. 1 root root       37 Sep 30 18:41 ml2_conf.ini -> /etc/neutron/plugins/ml2/ml2_conf.ini
-rw-r-----. 1 root neutron 19187 Sep 30 17:08 neutron.conf
lrwxrwxrwx. 1 root root       55 Sep 30 18:40 plugin.ini -> /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini
-rw-r--r--. 1 root root      211 Sep 30 17:30 plugin.out
drwxr-xr-x. 4 root root     4096 Sep 30 17:08 plugins
-rw-r-----. 1 root neutron  6148 Aug  8 02:56 policy.json
-rw-r--r--. 1 root root       79 Aug 11 15:27 release
-rw-r--r--. 1 root root     1216 Aug  8 02:56 rootwrap.conf

[root@icehouse1 neutron(keystone_admin)]# cat ml2_conf.ini
[ml2]
type_drivers = gre
tenant_network_types = gre
mechanism_drivers = openvswitch
[ml2_type_flat]
[ml2_type_vlan]
[ml2_type_gre]
tunnel_id_ranges = 1:1000
[ml2_type_vxlan]
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
enable_security_group = True
[ovs]
local_ip = 192.168.0.127
[agent]
tunnel_types = gre
root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf

[root@icehouse1 neutron(keystone_admin)]# cat plugin.ini
[ml2]
type_drivers = gre
tenant_network_types = gre
mechanism_drivers = openvswitch
[ml2_type_flat]
[ml2_type_vlan]
[ml2_type_gre]
tunnel_id_ranges = 1:1000
[ml2_type_vxlan]
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
enable_security_group = True
[ovs]
local_ip = 192.168.0.127
[agent]
tunnel_types = gre
root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf




On Controller:-

 [root@icehouse1 ~(keystone_admin)]# ovs-vsctl show
50a2dcb7-9502-4c08-b175-563eec368db9
    Bridge br-int
        Port "qr-19f312c1-cb"
            tag: 1
            Interface "qr-19f312c1-cb"
                type: internal
        Port br-int
            Interface br-int
                type: internal
        Port "tap707ec6ff-71"
            tag: 1
            Interface "tap707ec6ff-71"
                type: internal
        Port patch-tun
            Interface patch-tun
                type: patch
                options: {peer=patch-int}
    Bridge br-tun
        Port "gre-c0a80189"
            Interface "gre-c0a80189"
                type: gre
                options: {in_key=flow, local_ip="192.168.0.127", out_key=flow, remote_ip="192.168.0.137"}
        Port br-tun
            Interface br-tun
                type: internal
        Port patch-int
            Interface patch-int
                type: patch
                options: {peer=patch-tun}
    Bridge br-ex
        Port br-ex
            Interface br-ex
                type: internal
        Port "qg-908c1363-66"
            Interface "qg-908c1363-66"
                type: internal
        Port "p37p1"
            Interface "p37p1"
    ovs_version: "2.1.2"

On Compute:-

[root@icehouse1 ~(keystone_admin)]# ssh 192.168.1.137
Last login: Sat Jun 14 12:47:57 2014
[root@icehouse2 ~]# ovs-vsctl show
bd17e782-fc1b-4c75-8a9a-0bd11ca90dbc
    Bridge br-int
        Port "qvo1e52ffe0-c9"
            tag: 1
            Interface "qvo1e52ffe0-c9"
        Port "qvo897b91ae-71"
            tag: 1
            Interface "qvo897b91ae-71"
        Port "qvo67962cf3-c8"
            tag: 1
            Interface "qvo67962cf3-c8"
        Port br-int
            Interface br-int
                type: internal
        Port patch-tun
            Interface patch-tun
                type: patch
                options: {peer=patch-int}
        Port "qvo7e0bdbb7-4e"
            tag: 1
            Interface "qvo7e0bdbb7-4e"
    Bridge br-tun
        Port patch-int
            Interface patch-int
                type: patch
                options: {peer=patch-tun}
        Port br-tun
            Interface br-tun
                type: internal
        Port "gre-c0a8017f"
            Interface "gre-c0a8017f"
                type: gre
                options: {in_key=flow, local_ip="192.168.0.137", out_key=flow, remote_ip="192.168.0.127"}
    ovs_version: "2.1.2"


 [root@icehouse1 ~(keystone_admin)]# ovs-ofctl show br-tun && ovs-ofctl dump-flows br-tun

OFPT_FEATURES_REPLY (xid=0x2): dpid:00001ecc77fbb64c
n_tables:254, n_buffers:256
capabilities: FLOW_STATS TABLE_STATS PORT_STATS QUEUE_STATS ARP_MATCH_IP
actions: OUTPUT SET_VLAN_VID SET_VLAN_PCP STRIP_VLAN SET_DL_SRC SET_DL_DST SET_NW_SRC SET_NW_DST SET_NW_TOS SET_TP_SRC SET_TP_DST ENQUEUE
 1(patch-int): addr:0a:f9:4e:af:fe:c6
     config:     0
     state:      0
     speed: 0 Mbps now, 0 Mbps max
 2(gre-c0a80089): addr:32:c5:59:d7:4c:8b
     config:     0
     state:      0
     speed: 0 Mbps now, 0 Mbps max
 LOCAL(br-tun): addr:1e:cc:77:fb:b6:4c
     config:     PORT_DOWN
     state:      LINK_DOWN
     speed: 0 Mbps now, 0 Mbps max
OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0
NXST_FLOW reply (xid=0x4):
 cookie=0x0, duration=15350.220s, table=0, n_packets=0, n_bytes=0, idle_age=15350, priority=0 actions=drop
 cookie=0x0, duration=15350.290s, table=0, n_packets=712066, n_bytes=983698886, idle_age=62, priority=1,in_port=1 actions=resubmit(,1)
 cookie=0x0, duration=13862.653s, table=0, n_packets=428887, n_bytes=34296128, idle_age=63, priority=1,in_port=2 actions=resubmit(,2)
 cookie=0x0, duration=15350.131s, table=1, n_packets=712019, n_bytes=983695552, idle_age=62, priority=1,dl_dst=00:00:00:00:00:00/01:00:00:00:00:00 actions=resubmit(,20)
 cookie=0x0, duration=15350.025s, table=1, n_packets=47, n_bytes=3334, idle_age=9071, priority=1,dl_dst=01:00:00:00:00:00/01:00:00:00:00:00 actions=resubmit(,21)
 cookie=0x0, duration=15349.909s, table=2, n_packets=0, n_bytes=0, idle_age=15349, priority=0 actions=drop
 cookie=0x0, duration=13583.119s, table=2, n_packets=360519, n_bytes=28901782, idle_age=9071, priority=1,tun_id=0x4 actions=mod_vlan_vid:3,resubmit(,10)
 cookie=0x0, duration=15346.715s, table=2, n_packets=68542, n_bytes=5413601, idle_age=63, priority=1,tun_id=0x3 actions=mod_vlan_vid:1,resubmit(,10)
 cookie=0x0, duration=15345.408s, table=2, n_packets=0, n_bytes=0, idle_age=15345, priority=1,tun_id=0x2 actions=mod_vlan_vid:2,resubmit(,10)
 cookie=0x0, duration=15349.797s, table=3, n_packets=0, n_bytes=0, idle_age=15349, priority=0 actions=drop
 cookie=0x0, duration=15349.663s, table=10, n_packets=429061, n_bytes=34315383, idle_age=63, priority=1

actions=learn(table=20,hard_timeout=300,priority=1,NXM_OF_VLAN_TCI[0..11],
NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],load:0->NXM_OF_VLAN_TCI[],
load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[],output:NXM_OF_IN_PORT[]),output:1

 cookie=0x0, duration=15349.575s, table=20, n_packets=2, n_bytes=204, idle_age=752, priority=0 actions=resubmit(,21)

 cookie=0x0, duration=752.794s, table=20, n_packets=25787, n_bytes=34340181, hard_timeout=300, idle_age=62, hard_age=62, priority=1,vlan_tci=0x0001/0x0fff,dl_dst=fa:16:3e:00:5e:64 actions=load:0->NXM_OF_VLAN_TCI[],load:0x3->NXM_NX_TUN_ID[],output:2

 cookie=0x0, duration=15349.503s, table=21, n_packets=28, n_bytes=2084, idle_age=13454, priority=0 actions=drop
 cookie=0x0, duration=13583.174s, table=21, n_packets=10, n_bytes=656, idle_age=9071, dl_vlan=3 actions=strip_vlan,set_tunnel:0x4,output:2
 cookie=0x0, duration=15345.489s, table=21, n_packets=3, n_bytes=210, idle_age=15337, hard_age=13862, dl_vlan=2 actions=strip_vlan,set_tunnel:0x2,output:2
 cookie=0x0, duration=15346.806s, table=21, n_packets=7, n_bytes=498, idle_age=752, hard_age=13862, dl_vlan=1 actions=strip_vlan,set_tunnel:0x3,output:2


Samples here

[root@icehouse1 neutron(keystone_admin)]# cat neutron.conf
[DEFAULT]
verbose = True
debug = False
use_syslog = False
log_dir =/var/log/neutron
bind_host = 0.0.0.0
bind_port = 9696
core_plugin = ml2
service_plugins = router
auth_strategy = keystone
base_mac = fa:16:3e:00:00:00
mac_generation_retries = 16
dhcp_lease_duration = 86400
allow_bulk = True
allow_pagination = False
allow_sorting = False
allow_overlapping_ips = True
rpc_backend = neutron.openstack.common.rpc.impl_kombu
control_exchange = neutron
rabbit_host = 192.168.0.127
rabbit_password = guest
rabbit_port = 5672
rabbit_hosts = 192.168.1.127:5672
rabbit_userid = guest
rabbit_virtual_host = /
rabbit_ha_queues = False
agent_down_time = 75
router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
dhcp_agents_per_network = 1
api_workers = 0
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
nova_url = http://192.168.1.127:8774/v2
nova_region_name =RegionOne
nova_admin_username =nova
nova_admin_tenant_id =f4e7985ae16d4fac9166b41c394614af
nova_admin_password =aaf8cf4c60224150
nova_admin_auth_url =http://192.168.1.127:35357/v2.0
send_events_interval = 2
[quotas]
[agent]
root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
report_interval = 30
[keystone_authtoken]
auth_host = 192.168.1.127
auth_port = 35357
auth_protocol = http
admin_tenant_name = services
admin_user = neutron
admin_password = 5f11f559abc94440
auth_uri=http://192.168.1.127:5000/
[database]
connection = mysql://neutron:0302dcfeb69e439f@192.168.1.127/neutron
max_retries = 10
retry_interval = 10
idle_timeout = 3600
[service_providers]
service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default

[root@icehouse1 neutron(keystone_admin)]# cat plugin.ini
[ml2]
type_drivers = gre
tenant_network_types = gre
mechanism_drivers = openvswitch
[ml2_type_flat]
[ml2_type_vlan]
[ml2_type_gre]
tunnel_id_ranges = 1:1000
[ml2_type_vxlan]
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
enable_security_group = True
[ovs]
local_ip = 192.168.0.127
[agent]
tunnel_types = gre
root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf

Friday, August 29, 2014

Setup QCOW2 standard CentOS 7 cloud image to work with 2 VLANs on IceHouse ML2&OVS&GRE System

Notice, that same schema would work for any F20 or Ubuntu QCOW2 cloud images via qemu-nbd mount and increasing number of NICs interface files up to 2,3,...
Approach suggested down here  is universal. Any cinder volume been built up on
updated glance image ( 2 NICs ready ) would be 2 NICs ready as well

*********************************************
Update qcow2 image for 2 NIC interfaces
*********************************************
[root@icehouse1 Downloads]# modprobe nbd max_part=63
[root@icehouse1 Downloads]# qemu-nbd -c /dev/nbd0 CentOS-7-x86_64-GenericCloud-20140826_02.qcow2
[root@icehouse1 Downloads]# mount /dev/nbd0p1 /mnt/image
[root@icehouse1 Downloads]# chroot /mnt/image
[root@icehouse1 /]# cd /etc/sysconfig/network-*
[root@icehouse1 network-scripts]# ls
ifcfg-eth0   ifdown-ipv6    ifdown-Team      ifup-eth    ifup-post      ifup-tunnel
ifcfg-lo     ifdown-isdn    ifdown-TeamPort  ifup-ippp   ifup-ppp       ifup-wireless
ifdown       ifdown-post    ifdown-tunnel    ifup-ipv6   ifup-routes    init.ipv6-global
ifdown-bnep  ifdown-ppp     ifup             ifup-isdn   ifup-sit       network-functions
ifdown-eth   ifdown-routes  ifup-aliases     ifup-plip   ifup-Team      network-functions-ipv6
ifdown-ippp  ifdown-sit     ifup-bnep        ifup-plusb  ifup-TeamPort
[root@icehouse1 network-scripts]# cp ifcfg-eth0 ifcfg-eth1
[root@icehouse1 network-scripts]# vi ifcfg-eth1
[root@icehouse1 network-scripts]# cat ifcfg-eth1
DEVICE="eth1"
BOOTPROTO="dhcp"
ONBOOT="yes"
TYPE="Ethernet"
USERCTL="yes"
PEERDNS="yes"
IPV6INIT="no"
PERSISTENT_DHCLIENT="1"
[root@icehouse1 network-scripts]# exit
exit
****************************
Libguestfs  tools
****************************

sudo yum install libguestfs-tools      # Fedora/RHEL/CentOS
sudo apt-get install libguestfs-tools  # Debian/Ubuntu


[boris@icehouse1 Downloads]$  guestfish --rw -a trusty-server-cloudimg-amd64-disk1.img

Welcome to guestfish, the guest filesystem shell for
editing virtual machine filesystems and disk images.

Type: 'help' for help on commands
      'man' to read the manual
      'quit' to quit the shell

> run
> list-filesystems
/dev/sda1: ext4
> mount /dev/sda1 /
> ls /etc/network/interfaces.d
eth0.cfg
> cp  /etc/network/interfaces.d/eth0.cfg /etc/network/interfaces.d/eth1.cfg
> edit /etc/network/interfaces.d/eth1.cfg
> ls  /etc/network/interfaces.d/
eth0.cfg
eth1.cfg
> cat /etc/network/interfaces.d/eth1.cfg
# The primary network interface
auto eth1
iface eth1 inet dhcp

> cat /etc/network/interfaces.d/eth0.cfg
# The primary network interface
auto eth0
iface eth0 inet dhcp


[boris@icehouse1 Downloads]$  guestfish --rw -a  Fedora-x86_64-20-20140407-sda.qcow2

Welcome to guestfish, the guest filesystem shell for
editing virtual machine filesystems and disk images.

Type: 'help' for help on commands
      'man' to read the manual
      'quit' to quit the shell

> run
 100% ⟦▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒⟧ --:--
> list-filesystems
/dev/sda1: ext4
> mount /dev/sda1 /
> cp /etc/sysconfig/network-scripts/ifcfg-eth0 /etc/sysconfig/network-scripts/ifcfg-eth1
> edit  /etc/sysconfig/network-scripts/ifcfg-eth1
> cat  /etc/sysconfig/network-scripts/ifcfg-eth1
DEVICE="eth1"
BOOTPROTO="dhcp"
ONBOOT="yes"
TYPE="Ethernet"

> cat  /etc/sysconfig/network-scripts/ifcfg-eth0
DEVICE="eth0"
BOOTPROTO="dhcp"
ONBOOT="yes"
TYPE="Ethernet"

> exit


*********************************************************
Upload image to glance and launch VM to this image
*********************************************************

   

  
  
  
  

[root@icehouse1 Downloads]# ssh centos@192.168.1.204
The authenticity of host '192.168.1.204 (192.168.1.204)' can't be established.
ECDSA key fingerprint is 46:54:d3:46:e3:d1:e0:a8:57:af:a8:22:f6:3a:ed:ea.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '192.168.1.204' (ECDSA) to the list of known hosts.
centos@192.168.1.204's password:
Last login: Sat Aug 30 06:00:10 2014
[centos@centos07twonic ~]$ sudo su
[root@centos07twonic centos]# ifconfig
eth0: flags=4163  mtu 1454
        inet 10.0.0.19  netmask 255.255.255.0  broadcast 10.0.0.255
        inet6 fe80::f816:3eff:fe9a:59f8  prefixlen 64  scopeid 0x20
        ether fa:16:3e:9a:59:f8  txqueuelen 1000  (Ethernet)
        RX packets 255  bytes 32133 (31.3 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 314  bytes 33467 (32.6 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth1: flags=4163  mtu 1454
        inet 40.0.0.67  netmask 255.255.255.0  broadcast 40.0.0.255
        inet6 fe80::f816:3eff:fe6c:3c8d  prefixlen 64  scopeid 0x20
        ether fa:16:3e:6c:3c:8d  txqueuelen 1000  (Ethernet)
        RX packets 27  bytes 2762 (2.6 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 31  bytes 4869 (4.7 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10
        loop  txqueuelen 0  (Local Loopback)
        RX packets 12  bytes 976 (976.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 12  bytes 976 (976.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0


[root@icehouse1 ~(keystone_admin)]# neutron net-list
+--------------------------------------+---------+-----------------------------------------------------+
| id                                   | name    | subnets                                             |
+--------------------------------------+---------+-----------------------------------------------------+
| 8b22b262-c9c1-4138-8092-0581195f0889 | private | 06530f5a-31af-4a14-a40f-808ee2e9e3ad 40.0.0.0/24    |
| 140d25a4-0d98-4424-a35a-2a985b2f0a17 | demonet | f2e318f8-05c6-4dda-8e8e-07f7a8f2c91a 10.0.0.0/24    |
| 295a5bba-c219-407f-830d-911cd2214349 | public  | c8421c61-7d85-4cf8-a5c8-03c05982bff9 192.168.1.0/24 |
+--------------------------------------+---------+-----------------------------------------------------+
 

[root@icehouse1 ~(keystone_admin)]# ip netns
qrouter-ecf9ee4e-b92c-4a5b-a884-d753a184764b
qrouter-4135e351-9ae4-4e89-9b23-7b131b2c4e6c
qdhcp-140d25a4-0d98-4424-a35a-2a985b2f0a17
qdhcp-8b22b262-c9c1-4138-8092-0581195f0889
 

[root@icehouse1 ~(keystone_admin)]# ip netns exec qdhcp-8b22b262-c9c1-4138-8092-0581195f0889 ssh centos@40.0.0.67
 

The authenticity of host '40.0.0.67 (40.0.0.67)' can't be established.
ECDSA key fingerprint is 46:54:d3:46:e3:d1:e0:a8:57:af:a8:22:f6:3a:ed:ea.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '40.0.0.67' (ECDSA) to the list of known hosts.
centos@40.0.0.67's password:
Last login: Sat Aug 30 15:20:36 2014 from 10.0.0.11
 

[centos@centos07twonic ~]$ curl http://169.254.169.254/latest/meta-data/local-ipv4
10.0.0.19


[root@icehouse1 ~(keystone_admin)]# ip netns exec qdhcp-140d25a4-0d98-4424-a35a-2a985b2f0a17 ssh   centos@10.0.0.19
centos@10.0.0.19's password:
Last login: Sat Aug 30 15:19:04 2014 from 10.0.0.11
[centos@centos07twonic ~]$ curl http://169.254.169.254/latest/meta-data/local-ipv4
10.0.0.19






   Same procedure done against Fedora 20 cloud image :-
  


[root@icehouse1 Downloads(keystone_admin)]# modprobe nbd max_part=63[root@icehouse1 Downloads(keystone_admin)]# qemu-nbd -c /dev/nbd0 Fedora-x86_64-20-20140407-sda.qcow2
[root@icehouse1 Downloads(keystone_admin)]# mount /dev/nbd0p1 /mnt/image
[root@icehouse1 Downloads(keystone_admin)]# chroot /mnt/image

[root@icehouse1 /(keystone_admin)]# ls -l
total 76
lrwxrwxrwx.  1 root root     7 Apr  8 02:28 bin -> usr/bin
dr-xr-xr-x.  4 root root  4096 Apr  8 02:29 boot
drwxr-xr-x.  4 root root  4096 Apr  8 02:29 dev
drwxr-xr-x. 63 root root  4096 Apr  8 02:30 etc
drwxr-xr-x.  2 root root  4096 Aug  7  2013 home
lrwxrwxrwx.  1 root root     7 Apr  8 02:28 lib -> usr/lib
lrwxrwxrwx.  1 root root     9 Apr  8 02:28 lib64 -> usr/lib64
drwx------.  2 root root 16384 Apr  8 02:27 lost+found
drwxr-xr-x.  2 root root  4096 Aug  7  2013 media
drwxr-xr-x.  2 root root  4096 Aug  7  2013 mnt
drwxr-xr-x.  2 root root  4096 Aug  7  2013 opt
drwxrwxr-x.  2 root root  4096 Apr  8 02:27 proc
dr-xr-x---.  2 root root  4096 Apr  8 02:29 root
drwxr-xr-x.  8 root root  4096 Apr  8 02:29 run
lrwxrwxrwx.  1 root root     8 Apr  8 02:28 sbin -> usr/sbin
drwxr-xr-x.  2 root root  4096 Aug  7  2013 srv
drwxrwxr-x.  2 root root  4096 Apr  8 02:27 sys
drwxrwxrwt.  2 root root  4096 Aug 30 07:39 tmp
drwxr-xr-x. 12 root root  4096 Apr  8 02:28 usr
drwxr-xr-x. 18 root root  4096 Apr  8 02:28 var

[root@icehouse1 /(keystone_admin)]# cd /etc/sysconfig/network-*
[root@icehouse1 network-scripts(keystone_admin)]# ls
ifcfg-eth0   ifdown-ipv6    ifdown-tunnel  ifup-ipv6   ifup-ppp          network-functions
ifcfg-lo     ifdown-isdn    ifup           ifup-ipx    ifup-routes       network-functions-ipv6
ifdown       ifdown-post    ifup-aliases   ifup-isdn   ifup-sit
ifdown-bnep  ifdown-ppp     ifup-bnep      ifup-plip   ifup-tunnel
ifdown-eth   ifdown-routes  ifup-eth       ifup-plusb  ifup-wireless
ifdown-ippp  ifdown-sit     ifup-ippp      ifup-post   init.ipv6-global
[root@icehouse1 network-scripts(keystone_admin)]# cp ifcfg-eth0 ifcfg-eth1
[root@icehouse1 network-scripts(keystone_admin)]# vi  ifcfg-eth1
[root@icehouse1 network-scripts(keystone_admin)]# vi  ifcfg-eth0
[root@icehouse1 network-scripts(keystone_admin)]# vi  ifcfg-eth1
[root@icehouse1 network-scripts(keystone_admin)]# exit

[root@icehouse1 ~(keystone_admin)]# neutron net-list
+--------------------------------------+---------+-----------------------------------------------------+
| id                                   | name    | subnets                                             |
+--------------------------------------+---------+-----------------------------------------------------+
| 8b22b262-c9c1-4138-8092-0581195f0889 | private | 06530f5a-31af-4a14-a40f-808ee2e9e3ad 40.0.0.0/24    |
| 140d25a4-0d98-4424-a35a-2a985b2f0a17 | demonet | f2e318f8-05c6-4dda-8e8e-07f7a8f2c91a 10.0.0.0/24    |
| 295a5bba-c219-407f-830d-911cd2214349 | public  | c8421c61-7d85-4cf8-a5c8-03c05982bff9 192.168.1.0/24 |
+--------------------------------------+---------+-----------------------------------------------------+

[root@icehouse1 ~(keystone_admin)]# ip netns
qrouter-ecf9ee4e-b92c-4a5b-a884-d753a184764b
qrouter-4135e351-9ae4-4e89-9b23-7b131b2c4e6c
qdhcp-140d25a4-0d98-4424-a35a-2a985b2f0a17
qdhcp-8b22b262-c9c1-4138-8092-0581195f0889

[root@icehouse1 ~(keystone_admin)]# ip netns exec qdhcp-8b22b262-c9c1-4138-8092-0581195f0889 ssh -i oskey45.pem fedora@40.0.0.71
Last login: Sat Aug 30 12:34:35 2014 from 40.0.0.11
[fedora@vf20twonicrxc ~]$ curl http://169.254.169.254/latest/meta-data/local-ipv4
40.0.0.71
[fedora@vf20twonicrxc ~]$ curl http://169.254.169.254/latest/meta-data/public-ipv4
192.168.1.205
[fedora@vf20twonicrxc ~]$ exit
logout
Connection to 40.0.0.71 closed.

[root@icehouse1 ~(keystone_admin)]# ip netns exec qdhcp-140d25a4-0d98-4424-a35a-2a985b2f0a17 ssh -i oskey45.pem fedora@10.0.0.23
Last login: Sat Aug 30 12:35:11 2014 from 40.0.0.11

[fedora@vf20twonicrxc ~]$ curl http://169.254.169.254/latest/meta-data/local-ipv4
40.0.0.71
[fedora@vf20twonicurl http://169.254.169.254/latest/meta-data/public-ipv4
192.168.1.205
[fedora@vf20twonicrxc ~]$ exit
logout
Connection to 10.0.0.23 closed.

*************************************
Ubuntu 14.04 guest snapshots
*************************************

 
  
   [root@icehouse1 Downloads]# ssh -i oskey45.pem ubuntu@192.168.1.203
   Welcome to Ubuntu 14.04.1 LTS (GNU/Linux 3.13.0-35-generic x86_64)

 * Documentation:  https://help.ubuntu.com/

  System information as of Sat Aug 30 14:34:00 UTC 2014

  System load:  0.01              Processes:           74
  Usage of /:   17.0% of 6.86GB   Users logged in:     1
  Memory usage: 4%                IP address for eth0: 10.0.0.24
  Swap usage:   0%                IP address for eth1: 40.0.0.72

  Graph this data and manage this system at:
    https://landscape.canonical.com/

  Get cloud support with Ubuntu Advantage Cloud Guest:
    http://www.ubuntu.com/business/services/cloud


  Last login: Sat Aug 30 14:34:00 2014

[root@icehouse1 ~(keystone_admin)]# ip netns exec qdhcp-140d25a4-0d98-4424-a35a-2a985b2f0a17 ssh  -i oskey45.pem ubuntu@10.0.0.24
Welcome to Ubuntu 14.04.1 LTS (GNU/Linux 3.13.0-35-generic x86_64)


 * Documentation:  https://help.ubuntu.com/

  System information as of Sat Aug 30 15:24:45 UTC 2014

  System load:  0.21              Processes:           76
  Usage of /:   19.3% of 6.86GB   Users logged in:     0
  Memory usage: 2%                IP address for eth0: 10.0.0.24
  Swap usage:   0%                IP address for eth1: 40.0.0.72

  Graph this data and manage this system at:
    https://landscape.canonical.com/

  Get cloud support with Ubuntu Advantage Cloud Guest:
    http://www.ubuntu.com/business/services/cloud


Last login: Sat Aug 30 15:24:44 2014


ubuntu@ubuntutwonicrsq:~$ curl http://169.254.169.254/latest/meta-data/local-ipv4
40.0.0.72
logout
Connection to 10.0.0.24 closed.


[root@icehouse1 ~(keystone_admin)]# ip netns exec qdhcp-8b22b262-c9c1-4138-8092-0581195f0889  ssh  -i oskey45.pem ubuntu@40.0.0.72

Welcome to Ubuntu 14.04.1 LTS (GNU/Linux 3.13.0-35-generic x86_64)
 * Documentation:  https://help.ubuntu.com/

  System information as of Sat Aug 30 16:24:45 UTC 2014

  System load:  0.0               Processes:           88
  Usage of /:   19.7% of 6.86GB   Users logged in:     1
  Memory usage: 6%                IP address for eth0: 10.0.0.24
  Swap usage:   0%                IP address for eth1: 40.0.0.72

  Graph this data and manage this system at:
    https://landscape.canonical.com/

  Get cloud support with Ubuntu Advantage Cloud Guest:
    http://www.ubuntu.com/business/services/cloud


Last login: Sat Aug 30 16:24:46 2014 from 10.0.0.11
ubuntu@ubuntutwonicrsq:~$ curl http://169.254.169.254/latest/meta-data/local-ipv4
40.0.0.72


  

 
  Assigning floating IP for instance with two NICs :-


  
One of two available ports will allow to assign properly working floating IP.

Thursday, August 28, 2014

Setup CentOS 7 cloud instance on IceHouse Neutron ML2&OVS&GRE System

   CentOS 7.0 qcow2 image for glance is available now at
http://openstack.redhat.com/Image_resources
   Regardless dhcp-option 26,1454 is setup in system current image loads with MTU 1500. Workaround for now is to launch instance with no ssh keypair and having postinstallation script :

#cloud-config
password: mysecret
chpasswd: { expire: False }
ssh_pwauth: True

Detecting new default cloud user name :-

modprobe nbd max_part=63
qemu-nbd -c /dev/nbd0 image.qcow2
mkdir -p /mnt/image
mount /dev/nbd0p1 /mnt/image
cat /mnt/image/etc/cloud/cloud.cfg | tail -20
 - ssh-authkey-fingerprints
 - keys-to-console
 - phone-home
 - final-message

system_info:
  default_user:
   name: centos  <= new default name
   lock_passwd: true
   gecos: Cloud User
   groups: [wheel, adm, systemd-journal]
   sudo: ["ALL=(ALL) NOPASSWD:ALL"]
   shell: /bin/bash
   distro: rhel
   paths:
   cloud_dir: /var/lib/cloud
   templates_dir: /etc/cloud/templates
   ssh_svcname: sshd

Then login to vnc-console with given password and run
 
# ifconfig eth0 mtu 1454 up

Setting stays stable between reboots.
 



 Setup Gnome Desktop on VM

# yum -y groupinstall "GNOME Desktop"
$ echo "exec /usr/bin/gnome-session" >> ~/.xinitrc
# ln -sf /lib/systemd/system/graphical.target /etc/systemd/system/default.target

 

Thursday, August 14, 2014

Setup Gluster 3.5.2 on Two Node Controller&Compute Neutron ML2&&VXLAN&&OVS CentOS 7 Cluster

    This post is an update for previous one -  RDO Setup Two Real Node (Controller+Compute) IceHouse Neutron ML2&OVS&VXLAN Cluster 
on CentOS  7  http://bderzhavets.blogspot.com/2014/07/rdo-setup-two-real-node_29.html.  It's focused on Gluster 3.5.2  implementation including tuning /etc/sysconfig/iptables files on Controller and Compute Nodes CentOS 7.
    Copying ssh-key from master node to compute, step by step verification of gluster volume replica 2  functionality and switching RDO IceHouse cinder services to work with gluster volume created  to store instances bootable cinders volumes for performance improvement. Of course creating gluster bricks under "/"  is not recommended . It should be a separate mount point for "xfs" filesystem to store gluster bricks on each node.


- Controller node: Nova, Keystone, Cinder, Glance, Neutron (using Open vSwitch plugin and GRE tunneling )
- Compute node: Nova (nova-compute), Neutron (openvswitch-agent)

icehouse1.localdomain   -  Controller (192.168.1.127)
icehouse2.localdomain   -  Compute   (192.168.1.137)

Download from http://download.gluster.org/pub/gluster/glusterfs/3.5/3.5.2/EPEL.repo/epel-7/SRPMS/
glusterfs-3.5.2-1.el7.src.rpm

$ rpm -iv glusterfs-3.5.2-1.el7.src.rpm

$ sudo yum install bison flex gcc automake libtool ncurses-devel readline-devel libxml2-devel openssl-devel libaio-devel lvm2-devel glib2-devel libattr-devel libibverbs-devel librdmacm-devel fuse-devel

$ rpmbuild -bb glusterfs.spec
. . . . . . . . . . . . . . . . . . . . . . .

Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-libs-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-cli-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-rdma-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-geo-replication-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-fuse-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-server-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-api-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-extra-xlators-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/noarch/glusterfs-resource-agents-3.5.2-1.el7.centos.noarch.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-devel-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-api-devel-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-regression-tests-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-debuginfo-3.5.2-1.el7.centos.x86_64.rpm
Executing(%clean): /bin/sh -e /var/tmp/rpm-tmp.Sigc7l
+ umask 022
+ cd /home/boris/rpmbuild/BUILD
+ cd glusterfs-3.5.2
+ rm -rf /home/boris/rpmbuild/BUILDROOT/glusterfs-3.5.2-1.el7.centos.x86_64
+ exit 0

[boris@icehouse1 x86_64]$ cat install
sudo yum install glusterfs-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-api-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-api-devel-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-cli-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-devel-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-extra-xlators-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-fuse-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-geo-replication-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-libs-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-rdma-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-server-3.5.2-1.el7.centos.x86_64.rpm

$ sudo service glusterd start

1. First step is tuning /etc/sysconfig/iptables for IPv4 iptables firewall (service firewalld should be disabled) :-

Update /etc/sysconfig/iptables on both nodes:-

-A INPUT -p tcp -m multiport --dport 24007:24047 -j ACCEPT
-A INPUT -p tcp --dport 111 -j ACCEPT
-A INPUT -p udp --dport 111 -j ACCEPT
-A INPUT -p tcp -m multiport --dport 38465:38485 -j ACCEPT

Comment out lines bellow , ignoring instruction

# -A FORWARD -j REJECT --reject-with icmp-host-prohibited
# -A INPUT -j REJECT --reject-with icmp-host-prohibited

 Restart service iptables on both nodes

2. Second step:-


On icehouse1, run the following commands :

# ssh-keygen (Hit Enter to accept all of the defaults)
# ssh-copy-id -i ~/.ssh/id_rsa.pub  root@icehouse2

On both nodes run :-

# ./install
# service glusterd start

On icehouse1

#gluster peer probe icehouse2.localdomain

Should return "success"

[root@icehouse1 ~(keystone_admin)]# gluster peer status
Number of Peers: 1

Hostname: icehouse2.localdomain
Uuid: 3ca6490b-c44a-4601-ac13-51fec99e9caf
State: Peer in Cluster (Connected)

[root@icehouse1 ~(keystone_admin)]# gluster volume info

Volume Name: cinder-volumes09
Type: Replicate
Volume ID: 83b645a0-532e-46df-93e2-ed1f95f081cd
Status: Started
Number of Bricks: 1 x 2 = 2
Transport-type: tcp
Bricks:
Brick1: icehouse1.localdomain:/GLSD/Volumes
Brick2: icehouse2.localdomain:/GLSD/Volumes
Options Reconfigured:
auth.allow: 192.168.1.*

[root@icehouse1 ~(keystone_admin)]# gluster volume status
Status of volume: cinder-volumes09
Gluster process Port Online Pid
------------------------------------------------------------------------------
Brick icehouse1.localdomain:/GLSD/Volumes 49152 Y 5453
Brick icehouse2.localdomain:/GLSD/Volumes 49152 Y 3009
NFS Server on localhost 2049 Y 5458
Self-heal Daemon on localhost N/A Y 5462
NFS Server on icehouse2.localdomain 2049 Y 3965
Self-heal Daemon on icehouse2.localdomain N/A Y 3964

Task Status of Volume cinder-volumes09
------------------------------------------------------------------------------
There are no active volume tasks


[root@icehouse1 ~(keystone_admin)]# ssh 192.168.1.137
Last login: Thu Aug 14 17:53:41 2014
[root@icehouse2 ~]# gluster peer status
Number of Peers: 1

Hostname: 192.168.1.127
Uuid: 051e7528-8c2b-46e1-abb6-6d84b2f2e45b
State: Peer in Cluster (Connected)


*************************************************************************
On Controller (192.168.1.127) and on Compute (192.168.1.137)
*************************************************************************

Verify ports availability:-

[root@icehouse1 ~(keystone_admin)]# netstat -lntp | grep gluster
tcp        0      0 0.0.0.0:49152           0.0.0.0:*               LISTEN      5453/glusterfsd  
tcp        0      0 0.0.0.0:2049             0.0.0.0:*               LISTEN      5458/glusterfs    
tcp        0      0 0.0.0.0:38465           0.0.0.0:*               LISTEN      5458/glusterfs    
tcp        0      0 0.0.0.0:38466           0.0.0.0:*               LISTEN      5458/glusterfs    
tcp        0      0 0.0.0.0:38468           0.0.0.0:*               LISTEN      5458/glusterfs    
tcp        0      0 0.0.0.0:38469           0.0.0.0:*               LISTEN      5458/glusterfs    
tcp        0      0 0.0.0.0:24007           0.0.0.0:*               LISTEN      2667/glusterd    
tcp        0      0 0.0.0.0:978               0.0.0.0:*               LISTEN      5458/glusterfs

************************************
Switching Cinder to Gluster volume
************************************

# gluster volume create cinder-volumes09  replica 2 icehouse1.localdomain:/GLSD/Volumes   icehouse2.localdomain:/GLSD/Volumes  force

# gluster volume start cinder-volumes09

# gluster volume set cinder-volumes09  auth.allow 192.168.1.*


# openstack-config --set /etc/cinder/cinder.conf DEFAULT volume_driver cinder.volume.drivers.glusterfs.GlusterfsDriver

# openstack-config --set /etc/cinder/cinder.conf DEFAULT glusterfs_shares_config /etc/cinder/shares.conf

# openstack-config --set /etc/cinder/cinder.conf DEFAULT glusterfs_mount_point_base /var/lib/cinder/volumes

# vi /etc/cinder/shares.conf
    192.168.1.127:/cinder-volumes09

:wq

Make sure all thin LVM have been deleted via `cinder list` , if no then delete them all.

[root@icehouse1 ~(keystone_admin)]$ for i in api scheduler volume ; do service openstack-cinder-${i} restart ; done

 It should add row to `df -h` output :

[root@icehouse1 ~(keystone_admin)]# df -h
Filesystem                       Size  Used Avail Use% Mounted on
/dev/mapper/centos01-root        147G   15G  132G  10% /
devtmpfs                         3.9G     0  3.9G   0% /dev
tmpfs                            3.9G   13M  3.9G   1% /dev/shm
tmpfs                            3.9G   18M  3.9G   1% /run
tmpfs                            3.9G     0  3.9G   0% /sys/fs/cgroup
/dev/sdb6                        477M  191M  257M  43% /boot
192.168.1.127:/cinder-volumes09  147G   18G  130G  12% /var/lib/cinder/volumes/5c5ae2460f1962d6f046ca5859584996
tmpfs                            3.9G   18M  3.9G   1% /run/netns



[root@icehouse1 ~(keystone_admin)]# df -h
Filesystem                       Size  Used Avail Use% Mounted on
/dev/mapper/centos01-root        147G   17G  131G  12% /
devtmpfs                         3.9G     0  3.9G   0% /dev
tmpfs                            3.9G   19M  3.9G   1% /dev/shm
tmpfs                            3.9G   42M  3.8G   2% /run
tmpfs                            3.9G     0  3.9G   0% /sys/fs/cgroup
/dev/sdb6                        477M  191M  257M  43% /boot
192.168.1.127:/cinder-volumes09  147G   18G  129G  13% /var/lib/cinder/volumes/5c5ae2460f1962d6f046ca5859584996
tmpfs                            3.9G   42M  3.8G   2% /run/netns

[root@icehouse1 ~(keystone_admin)]# ls -l /var/lib/cinder/volumes/5c5ae2460f1962d6f046ca5859584996
total 5739092
-rw-rw-rw-. 1 root root 5368709120 Aug 14 21:58 volume-2f20aefb-b1ab-4b3f-bb23-10a1cbe9b946
-rw-rw-rw-. 1 root root 5368709120 Aug 14 22:06 volume-d8b0d31c-6f3a-44a1-86a4-bc4575697c29

[root@icehouse1 ~(keystone_admin)]# cinder list --all-tenants
+--------------------------------------+--------+---------------+------+-------------+----------+--------------------------------------+
|                  ID                  | Status |  Display Name | Size | Volume Type | Bootable |             Attached to              |
+--------------------------------------+--------+---------------+------+-------------+----------+--------------------------------------+
| 2f20aefb-b1ab-4b3f-bb23-10a1cbe9b946 | in-use | UbuntuLVG0814 |  5   |     None    |   true   | ead0fe1b-923a-4a12-978c-ad33b9ea245c |
| d8b0d31c-6f3a-44a1-86a4-bc4575697c29 | in-use |  VF20VLG0814  |  5   |     None    |   true   | 7343807e-5bd1-4c7f-8b4a-e5efb1ce8c2e |
+--------------------------------------+--------+---------------+------+-------------+----------+--------------------------------------+