Thursday, November 20, 2014

OVS Setup on Juno network node


[root@juno1 ~(keystone_admin)]# ip netns exec qrouter-1cf08ea2-959f-4206-b2f1-a9b4708399c1 ifconfig
lo: flags=73  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10
        loop  txqueuelen 0  (Local Loopback)
        RX packets 12  bytes 1008 (1008.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 12  bytes 1008 (1008.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

qg-7b037650-10: flags=4163  mtu 1500
        inet 192.168.1.173  netmask 255.255.255.0  broadcast 192.168.1.255
        inet6 fe80::f816:3eff:fee5:de97  prefixlen 64  scopeid 0x20
        ether fa:16:3e:e5:de:97  txqueuelen 0  (Ethernet)
        RX packets 45149  bytes 46211483 (44.0 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 21438  bytes 4059759 (3.8 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

qr-17ddee14-9f: flags=4163  mtu 1500
        inet 50.0.0.1  netmask 255.255.255.0  broadcast 50.0.0.255
        inet6 fe80::f816:3eff:fe6f:a8e7  prefixlen 64  scopeid 0x20
        ether fa:16:3e:6f:a8:e7  txqueuelen 0  (Ethernet)
        RX packets 30107  bytes 4574015 (4.3 MiB)
        RX errors 0  dropped 5  overruns 0  frame 0
        TX packets 38725  bytes 44984619 (42.9 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0


[root@juno1 ~(keystone_admin)]# ip netns exec qdhcp-45577666-657d-4f75-a3ab-9bc232f15203 ifconfig
lo: flags=73  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10
        loop  txqueuelen 0  (Local Loopback)
        RX packets 16270  bytes 781242 (762.9 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 16270  bytes 781242 (762.9 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

tap7a12f9b0-a4: flags=4163  mtu 1500
        inet 50.0.0.11  netmask 255.255.255.0  broadcast 50.0.0.255
        inet6 fe80::f816:3eff:fe29:fef1  prefixlen 64  scopeid 0x20
        ether fa:16:3e:29:fe:f1  txqueuelen 0  (Ethernet)
        RX packets 4664  bytes 267057 (260.7 KiB)
        RX errors 0  dropped 5  overruns 0  frame 0
        TX packets 21948  bytes 1352385 (1.2 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
 


################################
Getting ifconfig
################################

[root@juno1 ~(keystone_admin)]# ifconfig
br-ex: flags=4163  mtu 1500
        inet 192.168.1.127  netmask 255.255.255.0  broadcast 192.168.1.255
        inet6 fe80::222:15ff:fe63:e4e2  prefixlen 64  scopeid 0x20
        ether 00:22:15:63:e4:e2  txqueuelen 0  (Ethernet)
        RX packets 3411331  bytes 548241709 (522.8 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 3171333  bytes 1172191351 (1.0 GiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

enp2s0: flags=4163  mtu 1500
        inet6 fe80::222:15ff:fe63:e4e2  prefixlen 64  scopeid 0x20
        ether 00:22:15:63:e4:e2  txqueuelen 1000  (Ethernet)
        RX packets 3448839  bytes 593192446 (565.7 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 3192798  bytes 1176251503 (1.0 GiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
        device interrupt 17 

enp5s1: flags=4163  mtu 1500
        inet 192.168.0.127  netmask 255.255.255.0  broadcast 192.168.0.255
        inet6 fe80::2e0:53ff:fe13:174c  prefixlen 64  scopeid 0x20
        ether 00:e0:53:13:17:4c  txqueuelen 1000  (Ethernet)
        RX packets 22472  bytes 5191240 (4.9 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 59792  bytes 48604605 (46.3 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10
        loop  txqueuelen 0  (Local Loopback)
        RX packets 5627133  bytes 1136824718 (1.0 GiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 5627133  bytes 1136824718 (1.0 GiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

virbr0: flags=4099  mtu 1500
        inet 192.168.122.1  netmask 255.255.255.0  broadcast 192.168.122.255
        ether 52:54:00:30:a6:39  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

################################
 Now verifying OVS configuration :
################################

[root@juno1 ~(keystone_admin)]# ovs-vsctl show
f2113bd0-c4ca-4c4b-af16-928ff03e53da
    Bridge br-int
        fail_mode: secure
        Port patch-tun
            Interface patch-tun
                type: patch
                options: {peer=patch-int}
        Port int-br-ex
            Interface int-br-ex
                type: patch
                options: {peer=phy-br-ex}
        Port "tap3f570ba8-a1"
            tag: 2
            Interface "tap3f570ba8-a1"
                type: internal
        Port "tapba3a2dd7-73"
            tag: 3
            Interface "tapba3a2dd7-73"
                type: internal
        Port "qr-00d5c709-9a"
            tag: 3
            Interface "qr-00d5c709-9a"
                type: internal
        Port "tap7a12f9b0-a4"   <=====  port of br-int ( tap-interface of qdhcp-
            tag: 1                                                                          -namespce )
            Interface "tap7a12f9b0-a4"
                type: internal
        Port "tapb593041a-c7"
            tag: 4095
            Interface "tapb593041a-c7"
                type: internal
        Port "qr-17ddee14-9f"    <====== port of br-int
            tag: 1
            Interface "qr-17ddee14-9f"
                type: internal
        Port "qr-5bbf9169-4b"
            tag: 4
            Interface "qr-5bbf9169-4b"
                type: internal
        Port br-int
            Interface br-int
                type: internal
    Bridge br-tun
        Port patch-int
            Interface patch-int
                type: patch
                options: {peer=patch-tun}
        Port "vxlan-c0a80089"
            Interface "vxlan-c0a80089"
                type: vxlan
                options: {df_default="true", in_key=flow, local_ip="192.168.0.127", out_key=flow, remote_ip="192.168.0.137"}
        Port br-tun
            Interface br-tun
                type: internal
    Bridge br-ex
        Port "qg-d3e929c6-ba"
            Interface "qg-d3e929c6-ba"
                type: internal
        Port phy-br-ex
            Interface phy-br-ex
                type: patch
                options: {peer=int-br-ex}
        Port "qg-7b037650-10"   <====== port of br-ex
            Interface "qg-7b037650-10"
                type: internal
        Port "enp2s0" <=========== port of br-ex
            Interface "enp2s0"
        Port "qg-fd2baf63-9e"
            Interface "qg-fd2baf63-9e"
                type: internal
        Port br-ex
            Interface br-ex
                type: internal
        Port "qg-38b0f41d-21"
            Interface "qg-38b0f41d-21"
                type: internal
    ovs_version: "2.1.3"



Tuesday, November 11, 2014

Tuning RDO Juno CentOS 7 TwoNode Gluster 3.5.2 Cluster for Qemu integration with libgfapi to work seamlessly

This post is  focused on tuning replica 2 gluster volume when building RDO Juno Gluster Cluster on CentOS 7. Steps undertaken come from Gluster 3.5.2 Release Notes (http://blog.nixpanic.net/2014_07_01_archive.html) and make integration Qemu (1.5.3) && libgfapi really working

- Controller node: Nova, Keystone, Cinder, Glance, Neutron (using Open vSwitch plugin and GRE tunneling )
- Compute node: Nova (nova-compute), Neutron (openvswitch-agent)

juno1.localdomain   -  Controller (192.168.1.127)
juno2.localdomain   -  Compute   (192.168.1.137)

Download from http://download.gluster.org/pub/gluster/glusterfs/3.5/3.5.2/EPEL.repo/epel-7/SRPMS/
glusterfs-3.5.2-1.el7.src.rpm

$ rpm -iv glusterfs-3.5.2-1.el7.src.rpm

$ sudo yum install bison flex gcc automake libtool ncurses-devel readline-devel libxml2-devel openssl-devel libaio-devel lvm2-devel glib2-devel libattr-devel libibverbs-devel librdmacm-devel fuse-devel

$ rpmbuild -bb glusterfs.spec
. . . . . . . . . . . . . . . . . . . . . . .

Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-libs-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-cli-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-rdma-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-geo-replication-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-fuse-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-server-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-api-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-extra-xlators-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/noarch/glusterfs-resource-agents-3.5.2-1.el7.centos.noarch.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-devel-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-api-devel-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-regression-tests-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-debuginfo-3.5.2-1.el7.centos.x86_64.rpm
Executing(%clean): /bin/sh -e /var/tmp/rpm-tmp.Sigc7l
+ umask 022
+ cd /home/boris/rpmbuild/BUILD
+ cd glusterfs-3.5.2
+ rm -rf /home/boris/rpmbuild/BUILDROOT/glusterfs-3.5.2-1.el7.centos.x86_64
+ exit 0

[boris@juno1 x86_64]$ cat install
sudo yum install glusterfs-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-api-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-api-devel-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-cli-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-devel-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-extra-xlators-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-fuse-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-geo-replication-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-libs-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-rdma-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-server-3.5.2-1.el7.centos.x86_64.rpm

$ sudo service glusterd start

1. First step is tuning /etc/sysconfig/iptables for IPv4 iptables firewall (service firewalld should be disabled) :-

Update /etc/sysconfig/iptables on both nodes:-

-A INPUT -p tcp -m multiport --dport 24007:24047 -j ACCEPT
-A INPUT -p tcp --dport 111 -j ACCEPT
-A INPUT -p udp --dport 111 -j ACCEPT
-A INPUT -p tcp -m multiport --dport 38465:38485 -j ACCEPT

Comment out lines bellow , ignoring instruction

# -A FORWARD -j REJECT --reject-with icmp-host-prohibited
# -A INPUT -j REJECT --reject-with icmp-host-prohibited

 Restart service iptables on both nodes

2. Second step:-


On juno1, run the following commands :

# ssh-keygen (Hit Enter to accept all of the defaults)
# ssh-copy-id -i ~/.ssh/id_rsa.pub  root@juno2

On both nodes run :-

# ./install
# service glusterd start

On juno1

#gluster peer probe juno2.localdomain

Should return "success"

[root@juno1 ~(keystone_admin)]# gluster peer status
Number of Peers: 1

Hostname: juno2.localdomain
Uuid: 3ca6490b-c44a-4601-ac13-51fec99e9caf
State: Peer in Cluster (Connected)

[root@juno1 ~(keystone_admin)]# ssh 192.168.1.137
Last login: Thu Aug 14 17:53:41 2014
[root@juno2 ~]# gluster peer status
Number of Peers: 1

Hostname: 192.168.1.127
Uuid: 051e7528-8c2b-46e1-abb6-6d84b2f2e45b
State: Peer in Cluster (Connected)


*************************************************************************
On Controller (192.168.1.127) and on Compute (192.168.1.137)
*************************************************************************

Verify ports availability:-

[root@juno1 ~(keystone_admin)]# netstat -lntp | grep gluster
tcp        0      0 0.0.0.0:49152           0.0.0.0:*               LISTEN      5453/glusterfsd 
tcp        0      0 0.0.0.0:2049             0.0.0.0:*               LISTEN      5458/glusterfs   
tcp        0      0 0.0.0.0:38465           0.0.0.0:*               LISTEN      5458/glusterfs   
tcp        0      0 0.0.0.0:38466           0.0.0.0:*               LISTEN      5458/glusterfs   
tcp        0      0 0.0.0.0:38468           0.0.0.0:*               LISTEN      5458/glusterfs   
tcp        0      0 0.0.0.0:38469           0.0.0.0:*               LISTEN      5458/glusterfs   
tcp        0      0 0.0.0.0:24007           0.0.0.0:*               LISTEN      2667/glusterd   
tcp        0      0 0.0.0.0:978               0.0.0.0:*               LISTEN      5458/glusterfs

************************************
Switching Cinder to Gluster volume
************************************

# gluster volume create cinder-volumes57 \
replica 2 juno1.localdomain:/data5/data-volumes   juno2.localdomain:/data5/data-volumes 

# gluster volume start cinder-volumes57

# gluster volume set cinder-volumes57  auth.allow 192.168.1.*

The following configuration changes are necessary for 'qemu' and '
samba vfs plugin' integration with libgfapi to work seamlessly:

1. First step

       gluster volume set cinder-volumes57 server.allow-insecure on

2. Restarting is required
   
    gluster volume stop cinder-volumes57
    gluster volume start cinder-volumes57

3. Edit /etc/glusterfs/glusterd.vol   to have a line :
    
     option rpc-auth-allow-insecure on

4. Restart glusterd is required :

     service glusterd restart
  

Nova.conf (on Compute Node)  should have entry :-

qemu_allowed_storage_drivers = gluster

[root@juno1 ~]# gluster volume info
Volume Name: cinder-volumes57
Type: Replicate
Volume ID: c1f2e1d2-0b11-426e-af3d-7af0d1d24d5e
Status: Started
Number of Bricks: 1 x 2 = 2
Transport-type: tcp
Bricks:
Brick1: juno1.localdomain:/data5/data-volumes
Brick2: juno2.localdomain:/data5/data-volumes
Options Reconfigured:
auth.allow: 192.168.1.*
server.allow-insecure: on

[root@juno1 ~]# gluster volume status
Status of volume: cinder-volumes57
Gluster process                        Port    Online    Pid
------------------------------------------------------------------------------
Brick juno1.localdomain:/data5/data-volumes        49152    Y    3346
Brick juno2.localdomain:/data5/data-volumes        49152    Y    3113
NFS Server on localhost                    2049    Y    3380
Self-heal Daemon on localhost                N/A    Y    3387
NFS Server on juno2.localdomain                2049    Y    3911
Self-heal Daemon on juno2.localdomain            N/A    Y    3916

Task Status of Volume cinder-volumes57
------------------------------------------------------------------------------
There are no active volume tasks


##############################
Create entries  in /etc/cinder/cinder.conf
############################## 

enabled_backends=gluster

[gluster]
volume_driver = cinder.volume.drivers.glusterfs.GlusterfsDriver
glusterfs_shares_config = /etc/cinder/shares.conf
glusterfs_mount_point_base = /var/lib/cinder/volumes
volume_backend_name=GLUSTER


# vi /etc/cinder/shares.conf
    192.168.1.127:/cinder-volumes57
:wq


[root@juno1 ~(keystone_admin)]# cinder type-create gluster
+--------------------------------------+---------+
|                  ID                  |   Name  |
+--------------------------------------+---------+
| 29917269-d73f-4c28-b295-59bfbda5d044 | gluster |
+--------------------------------------+---------+

[root@juno1 ~(keystone_admin)]# cinder type-key gluster  set volume_backend_name=GLUSTER

Next step is cinder services restart :-

[root@juno1 ~(keystone_demo)]# for i in api scheduler volume ; do service openstack-cinder-${i} restart ; done

[root@juno1 ~(keystone_admin)]# df -h
Filesystem                       Size  Used Avail Use% Mounted on
/dev/mapper/centos01-root00      147G   43G  105G  29% /
devtmpfs                         3.9G     0  3.9G   0% /dev
tmpfs                            3.9G  152K  3.9G   1% /dev/shm
tmpfs                            3.9G   26M  3.8G   1% /run
tmpfs                            3.9G     0  3.9G   0% /sys/fs/cgroup
/dev/loop0                       1.9G  6.0M  1.7G   1% /srv/node/swift_loopback
/dev/sda3                        477M  146M  302M  33% /boot
/dev/mapper/centos01-data5        98G   15G   83G  16% /data5
192.168.1.127:/cinder-volumes57   98G   15G   83G  16% /var/lib/cinder/volumes/8478b56ad61cf67ab9839fb0a5296965
tmpfs                            3.9G   26M  3.8G   1% /run/netns


###################################################
How to verify implementation success. Boot nova instance 
( with instance-id say 00000049) based on cinder volume.
###################################################

On Compute Node grep /var/log/libvirt/qemu/instance-00000049.log looking for
"gluster" entry . You are supposed to find a string highlighted down here

# cd /var/log/libvirt/qemu
# [root@juno2 qemu]# cat instance-00000049.log | grep gluster
LC_ALL=C PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin QEMU_AUDIO_DRV=none /usr/libexec/qemu-kvm -name instance-00000049 -S -machine pc-i440fx-rhel7.0.0,accel=kvm,usb=off -cpu Penryn,+osxsave,+xsave,+pdcm,+xtpr,+tm2,+est,+smx,+vmx,+ds_cpl,+monitor,+dtes64,+pbe,+tm,+ht,+ss,+acpi,+ds,+vme -m 2048 -realtime mlock=off -smp 1,sockets=1,cores=1,threads=1 -uuid 92151b16-c7b4-48d1-b49f-1e310e005c80 -smbios type=1,manufacturer=Fedora Project,product=OpenStack Nova,version=2014.2-2.el7.centos,serial=5dff0de4-c27d-453d-85b4-b2d9af514fcd,uuid=92151b16-c7b4-48d1-b49f-1e310e005c80 -no-user-config -nodefaults -chardev socket,id=charmonitor,path=/var/lib/libvirt/qemu/instance-00000049.monitor,server,nowait -mon chardev=charmonitor,id=monitor,mode=control -rtc base=utc,driftfix=slew -no-kvm-pit-reinjection -no-hpet -no-shutdown -boot strict=on -device piix3-usb-uhci,id=usb,bus=pci.0,addr=0x1.0x2 -drive file=gluster://192.168.1.127:24007/cinder-volumes57/volume-179b9782-d2b7-4891-ba89-5198b71c6188,if=none,id=drive-virtio-disk0,format=raw,serial=179b9782-d2b7-4891-ba89-5198b71c6188,cache=none -device virtio-blk-pci,scsi=off,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,id=virtio-disk0,bootindex=1 -netdev tap,fd=26,id=hostnet0,vhost=on,vhostfd=27 -device virtio-net-pci,netdev=hostnet0,id=net0,mac=fa:16:3e:8b:9f:6c,bus=pci.0,addr=0x3 -chardev file,id=charserial0,path=/var/lib/nova/instances/92151b16-c7b4-48d1-b49f-1e310e005c80/console.log -device isa-serial,chardev=charserial0,id=serial0 -chardev pty,id=charserial1 -device isa-serial,chardev=charserial1,id=serial1 -device usb-tablet,id=input0 -vnc 0.0.0.0:0 -k en-us -vga cirrus -device virtio-balloon-pci,id=balloon0,bus=pci.0,addr=0x5


At the same time issue on Controller following commands  :-

[root@juno1 ~(keystone_boris)]# cinder list
+--------------------------------------+--------+-----------------+------+-------------+----------+--------------------------------------+
|                  ID                  | Status |   Display Name  | Size | Volume Type | Bootable |             Attached to              |
+--------------------------------------+--------+-----------------+------+-------------+----------+--------------------------------------+
| 179b9782-d2b7-4891-ba89-5198b71c6188 | in-use | Win2012GLSVOL01 |  20  |   gluster   |   true   | 92151b16-c7b4-48d1-b49f-1e310e005c80 |
| ca0694ae-7e8d-4c84-aad8-3f178416dec6 | in-use |  VF20LVG520711  |  7   |     lvms    |   true   | 51a20959-0a0c-4ef6-81ec-2edeab6e3588 |
+--------------------------------------+--------+-----------------+------+-------------+----------+--------------------------------------+

[root@juno1 ~(keystone_boris)]# nova list
+--------------------------------------+--------------+-----------+------------+-------------+----------------------------------------+
| ID                                   | Name         | Status    | Task State | Power State | Networks                               |
+--------------------------------------+--------------+-----------+------------+-------------+----------------------------------------+
| 51a20959-0a0c-4ef6-81ec-2edeab6e3588 | VF20RX520711 | SUSPENDED | -          | Shutdown    | private_boris=50.0.0.12, 192.168.1.175 |
| 92151b16-c7b4-48d1-b49f-1e310e005c80 | Win2012SRV05 | SUSPENDED | -          | Shutdown    | private_boris=50.0.0.25, 192.168.1.179 |
+--------------------------------------+--------------+-----------+------------+-------------+----------------------------------------+

[root@juno1 ~(keystone_boris)]# nova show 92151b16-c7b4-48d1-b49f-1e310e005c80 | grep 179b9782-d2b7-4891-ba89-5198b71c6188
| os-extended-volumes:volumes_attached | [{"id": "179b9782-d2b7-4891-ba89-5198b71c6188"}]         |



##############################################
Another way of verification - run on Compute Node:-
##############################################

[root@juno1 ~(keystone_boris)]# ssh 192.168.1.137
Last login: Tue Nov 11 17:12:04 2014 from juno1.localdomain

[root@juno2 ~]# . keystonerc_boris

[root@juno2 ~(keystone_boris)]# nova list
+--------------------------------------+----------------+-----------+------------+-------------+----------------------------------------+
| ID                                   | Name           | Status    | Task State | Power State | Networks                               |
+--------------------------------------+----------------+-----------+------------+-------------+----------------------------------------+
| 57640068-3ab7-466a-8eae-cf132359b233 | UbuntuUTRX1211 | ACTIVE    | -          | Running     | private_boris=50.0.0.26, 192.168.1.174 |
| 51a20959-0a0c-4ef6-81ec-2edeab6e3588 | VF20RX520711   | SUSPENDED | -          | Shutdown    | private_boris=50.0.0.12, 192.168.1.175 |
| 92151b16-c7b4-48d1-b49f-1e310e005c80 | Win2012SRV05   | SUSPENDED | -          | Shutdown    | private_boris=50.0.0.25, 192.168.1.179 |
+--------------------------------------+----------------+-----------+------------+-------------+----------------------------------------+

[root@juno2 ~(keystone_boris)]# virsh dumpxml 57640068-3ab7-466a-8eae-cf132359b233 | grep -E 'source (file|protocol)'

  <source protocol='gluster' name='cinder-volumes57/volume-bf448475-50c8-4491-92aa-77d36666f296'>

[root@juno2 ~(keystone_boris)]# nova show 57640068-3ab7-466a-
8eae-cf132359b233 | grep bf448475-50c8-4491-92aa-77d36666f296
| os-extended-volumes:volumes_attached | [{"id": "bf448475-50c8-4491-92aa-77d36666f296"}]         |



Saturday, November 08, 2014

LVMiSCSI cinder backend for RDO Juno on CentOS 7

Current post follows up http://lxer.com/module/newswire/view/207415/index.html
RDO Juno has been intalled on Controller and Compute nodes via packstack as described in link @lxer.com. iSCSI target (Server) implementation on CentOS 7 differs significantly from CentOS 6.5 and is based on  CLI utility targetcli and service target.
   With Enterprise Linux 7, both Red Hat and CentOS, there is a big change in the management of iSCSI targets.Software run as part of the standard systemd structure. Consequently there will be significant changes in multi back end cinder architecture of RDO Juno running on CentOS 7 or Fedora 21 utilizing LVM
based iSCSI targets.

  Create following entries in /etc/cinder/cinder.conf on Controller ( which in case of two node Cluster works as Storage node as well).

First entry id [DEFAULT] section

#######################
enabled_backends=lvm51,lvm52
#######################

At the bottom of file

[lvm51]
iscsi_helper=lioadm
volume_group=cinder-volumes51
volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
iscsi_ip_address=192.168.1.127
volume_backend_name=LVM_iSCSI51


[lvm52]
iscsi_helper=lioadm
volume_group=cinder-volumes52
volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
iscsi_ip_address=192.168.1.127
volume_backend_name=LVM_iSCSI52
 

VG cinder-volumes52,51 created on /dev/sda6 and /dev/sdb1 correspondently :-

# pvcreate /dev/sda6
# pvcreate /dev/sdb1
# vgcreate cinder-volumes52 /dev/sda6
# vgcreate cinder-volumes51  /dev/sdb1

Then issue :-

[root@juno1 ~(keystone_admin)]# cinder type-create lvms
+--------------------------------------+------+
|                  ID                  | Name |
+--------------------------------------+------+
| 64414f3a-7770-4958-b422-8db0c3e2f433 | lvms  |
+--------------------------------------+------+


[root@juno1 ~(keystone_admin)]# cinder type-create lvmz +--------------------------------------+---------+
|                  ID                  |   Name  |
+--------------------------------------+---------+
| 29917269-d73f-4c28-b295-59bfbda5d044 | lvmz |

+--------------------------------------+---------+

[root@juno1 ~(keystone_admin)]# cinder type-list
+--------------------------------------+---------+
|                  ID                  |   Name  |
+--------------------------------------+---------+
| 29917269-d73f-4c28-b295-59bfbda5d044 |  lvmz   |
| 64414f3a-7770-4958-b422-8db0c3e2f433 |  lvms   |
+--------------------------------------+---------+


[root@juno1 ~(keystone_admin)]# cinder type-key lvmz set volume_backend_name=LVM_iSCSI51

[root@juno1 ~(keystone_admin)]# cinder type-key lvms set volume_backend_name=LVM_iSCSI52



Then enable and start service target:-

   [root@juno1 ~(keystone_admin)]#   service target enable
   [root@juno1 ~(keystone_admin)]#   service target start

[root@juno1 ~(keystone_admin)]# service target status
Redirecting to /bin/systemctl status  target.service
target.service - Restore LIO kernel target configuration
   Loaded: loaded (/usr/lib/systemd/system/target.service; enabled)
   Active: active (exited) since Wed 2014-11-05 13:23:09 MSK; 44min ago
  Process: 1611 ExecStart=/usr/bin/targetctl restore (code=exited, status=0/SUCCESS)
 Main PID: 1611 (code=exited, status=0/SUCCESS)
   CGroup: /system.slice/target.service


Nov 05 13:23:07 juno1.localdomain systemd[1]: Starting Restore LIO kernel target configuration...
Nov 05 13:23:09 juno1.localdomain systemd[1]: Started Restore LIO kernel target configuration.

Now all changes done by creating cinder volumes of types lvms,lvmz ( via
dashboard - volume create with dropdown menu volume types or via cinder CLI )
will be persistent in  targetcli> ls output between reboots

[root@juno1 ~(keystone_boris)]# cinder list
+--------------------------------------+--------+------------------+------+-------------+----------+--------------------------------------+
|                  ID                  | Status |   Display Name   | Size | Volume Type | Bootable |             Attached to              |
+--------------------------------------+--------+------------------+------+-------------+----------+--------------------------------------+
| 3a4f6878-530a-4a28-87bb-92ee256f63ea | in-use | UbuntuUTLV510851 |  5   |     lvmz    |   true   | efb1762e-6782-4895-bf2b-564f14105b5b |
| 51528876-405d-4a15-abc2-61ad72fc7d7e | in-use |   CentOS7LVG51   |  10  |     lvmz    |   true   | ba3e87fa-ee81-42fc-baed-c59ca6c8a100 |
| ca0694ae-7e8d-4c84-aad8-3f178416dec6 | in-use |  VF20LVG520711   |  7   |     lvms    |   true   | 51a20959-0a0c-4ef6-81ec-2edeab6e3588 |
| dc9e31f0-b27f-4400-a666-688365126f67 | in-use | UbuntuUTLV520711 |  7   |     lvms    |   true   | 1fe7d2c3-58ae-4ee8-8f5f-baf334195a59 |
+--------------------------------------+--------+------------------+------+-------------+----------+--------------------------------------+


    Compare 'green' highlighted volume id's and tarcgetcli>ls output

 



  

  
   Next snapshot demonstrates lvms && lvmz volumes attached to corresponding
   nova instances utilizing LVMiSCSI cinder backend.

   

On Compute Node iscsiadm output will look as follows :-

[root@juno2 ~]# iscsiadm -m discovery -t sendtargets -p 192.168.1.127
192.168.1.127:3260,1 iqn.2010-10.org.openstack:volume-3a4f6878-530a-4a28-87bb-92ee256f63ea
192.168.1.127:3260,1 iqn.2010-10.org.openstack:volume-ca0694ae-7e8d-4c84-aad8-3f178416dec6
192.168.1.127:3260,1 iqn.2010-10.org.openstack:volume-dc9e31f0-b27f-4400-a666-688365126f67
192.168.1.127:3260,1 iqn.2010-10.org.openstack:volume-51528876-405d-4a15-abc2-61ad72fc7d7e


References
1.  https://www.centos.org/forums/viewtopic.php?f=47&t=48591

  


                

   



Saturday, November 01, 2014

RDO Juno Set up Two Real Node (Controller+Compute) Gluster 3.5.2 Cluster ML2&OVS&VXLAN on CentOS 7

Post bellow follows up http://cloudssky.com/en/blog/RDO-OpenStack-Juno-ML2-VXLAN-2-Node-Deployment-On-CentOS-7-With-Packstack/ however answer file
provided here allows in single run create Controller && Compute Node.
Based oh RDO Juno release as of 10/27/2014 it doesn't require creating OVS bridge br-ex and OVS port enp2s0 on Compute Node. It also doesn't install nova-compute service on Controller. Gluster 3.5.2 setup also is performed in way which differs from similar procedure on IceHouse && Havana RDO releases.

I have also to note that in regards  of LVMiSCSI cinder backend support on CentOS 7 post http://theurbanpenguin.com/wp/?p=3403 is misleading. Name
of service making changes done in targetcli  persistent between reboots is "target" not "targetd"

To setup iSCSI targets (server)  on CentOS 7 ( activate LIO kernel support) you have to issue :-

# systemctl enable target
# systemctl start target
# systemctl status target -l
target.service - Restore LIO kernel target configuration
   Loaded: loaded (/usr/lib/systemd/system/target.service; enabled)
   Active: active (exited) since Sat 2014-11-08 14:45:06 MSK; 3h 26min ago
  Process: 1661 ExecStart=/usr/bin/targetctl restore (code=exited, status=0/SUCCESS)
 Main PID: 1661 (code=exited, status=0/SUCCESS)
   CGroup: /system.slice/target.service

Nov 01 14:45:06 juno1.localdomain systemd[1]: Started Restore LIO kernel target configuration.

 
Two boxes  have been setup , each one having 2  NICs (enp2s0,enp5s1) for
Controller && Compute Nodes setup. Before running
`packstack --answer-file=TwoNodeVXLAN.txt` SELINUX set to permissive on both nodes.Both enp5s1's assigned IPs and set support VXLAN tunnel  (192.168.0.127, 192.168.0.137 ). Services firewalld and NetworkManager disabled, IPv4 firewall with iptables and service network are enabled and running. Packstack is bind to public IP of interface enp2s0 192.169.1.127, Compute Node is 192.169.1.137 ( view answer-file ).

Setup configuration

- Controller node: Nova, Keystone, Cinder, Glance, Neutron (using Open vSwitch plugin && VXLAN )
- Compute node: Nova (nova-compute), Neutron (openvswitch-agent)


juno1.localdomain   -  Controller (192.168.1.127)
juno2.localdomain   -  Compute   (192.168.1.137)

 

Answer File :-

[general]
CONFIG_SSH_KEY=/root/.ssh/id_rsa.pub
CONFIG_DEFAULT_PASSWORD=
CONFIG_MARIADB_INSTALL=y
CONFIG_GLANCE_INSTALL=y
CONFIG_CINDER_INSTALL=y
CONFIG_NOVA_INSTALL=y
CONFIG_NEUTRON_INSTALL=y
CONFIG_HORIZON_INSTALL=y
CONFIG_SWIFT_INSTALL=y
CONFIG_CEILOMETER_INSTALL=y
CONFIG_HEAT_INSTALL=n
CONFIG_CLIENT_INSTALL=y
CONFIG_NTP_SERVERS=
CONFIG_NAGIOS_INSTALL=y
EXCLUDE_SERVERS=
CONFIG_DEBUG_MODE=n
CONFIG_CONTROLLER_HOST=192.168.1.127
CONFIG_COMPUTE_HOSTS=192.168.1.137
CONFIG_NETWORK_HOSTS=192.168.1.127
CONFIG_VMWARE_BACKEND=n
CONFIG_UNSUPPORTED=n
CONFIG_VCENTER_HOST=
CONFIG_VCENTER_USER=
CONFIG_VCENTER_PASSWORD=
CONFIG_VCENTER_CLUSTER_NAME=
CONFIG_STORAGE_HOST=192.168.1.127
CONFIG_USE_EPEL=y
CONFIG_REPO=
CONFIG_RH_USER=
CONFIG_SATELLITE_URL=
CONFIG_RH_PW=
CONFIG_RH_OPTIONAL=y
CONFIG_RH_PROXY=
CONFIG_RH_PROXY_PORT=
CONFIG_RH_PROXY_USER=
CONFIG_RH_PROXY_PW=
CONFIG_SATELLITE_USER=
CONFIG_SATELLITE_PW=
CONFIG_SATELLITE_AKEY=
CONFIG_SATELLITE_CACERT=
CONFIG_SATELLITE_PROFILE=
CONFIG_SATELLITE_FLAGS=
CONFIG_SATELLITE_PROXY=
CONFIG_SATELLITE_PROXY_USER=
CONFIG_SATELLITE_PROXY_PW=
CONFIG_AMQP_BACKEND=rabbitmq
CONFIG_AMQP_HOST=192.168.1.127
CONFIG_AMQP_ENABLE_SSL=n
CONFIG_AMQP_ENABLE_AUTH=n
CONFIG_AMQP_NSS_CERTDB_PW=PW_PLACEHOLDER
CONFIG_AMQP_SSL_PORT=5671
CONFIG_AMQP_SSL_CERT_FILE=/etc/pki/tls/certs/amqp_selfcert.pem
CONFIG_AMQP_SSL_KEY_FILE=/etc/pki/tls/private/amqp_selfkey.pem
CONFIG_AMQP_SSL_SELF_SIGNED=y
CONFIG_AMQP_AUTH_USER=amqp_user
CONFIG_AMQP_AUTH_PASSWORD=PW_PLACEHOLDER
CONFIG_MARIADB_HOST=192.168.1.127
CONFIG_MARIADB_USER=root
CONFIG_MARIADB_PW=7207ae344ed04957
CONFIG_KEYSTONE_DB_PW=abcae16b785245c3
CONFIG_KEYSTONE_REGION=RegionOne
CONFIG_KEYSTONE_ADMIN_TOKEN=3ad2de159f9649afb0c342ba57e637d9
CONFIG_KEYSTONE_ADMIN_PW=7049f834927e4468
CONFIG_KEYSTONE_DEMO_PW=bf737b785cfa4398
CONFIG_KEYSTONE_TOKEN_FORMAT=UUID
CONFIG_KEYSTONE_SERVICE_NAME=keystone
CONFIG_GLANCE_DB_PW=41264fc52ffd4fe8
CONFIG_GLANCE_KS_PW=f6a9398960534797
CONFIG_GLANCE_BACKEND=file
CONFIG_CINDER_DB_PW=5ac08c6d09ba4b69
CONFIG_CINDER_KS_PW=c8cb1ecb8c2b4f6f
CONFIG_CINDER_BACKEND=lvm
CONFIG_CINDER_VOLUMES_CREATE=y
CONFIG_CINDER_VOLUMES_SIZE=20G
CONFIG_CINDER_GLUSTER_MOUNTS=
CONFIG_CINDER_NFS_MOUNTS=
CONFIG_CINDER_NETAPP_LOGIN=
CONFIG_CINDER_NETAPP_PASSWORD=
CONFIG_CINDER_NETAPP_HOSTNAME=
CONFIG_CINDER_NETAPP_SERVER_PORT=80
CONFIG_CINDER_NETAPP_STORAGE_FAMILY=ontap_cluster
CONFIG_CINDER_NETAPP_TRANSPORT_TYPE=http
CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL=nfs
CONFIG_CINDER_NETAPP_SIZE_MULTIPLIER=1.0
CONFIG_CINDER_NETAPP_EXPIRY_THRES_MINUTES=720
CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_START=20
CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_STOP=60
CONFIG_CINDER_NETAPP_NFS_SHARES_CONFIG=
CONFIG_CINDER_NETAPP_VOLUME_LIST=
CONFIG_CINDER_NETAPP_VFILER=
CONFIG_CINDER_NETAPP_VSERVER=
CONFIG_CINDER_NETAPP_CONTROLLER_IPS=
CONFIG_CINDER_NETAPP_SA_PASSWORD=
CONFIG_CINDER_NETAPP_WEBSERVICE_PATH=/devmgr/v2
CONFIG_CINDER_NETAPP_STORAGE_POOLS=
CONFIG_NOVA_DB_PW=1e1b5aeeeaf342a8
CONFIG_NOVA_KS_PW=d9583177a2444f06
CONFIG_NOVA_SCHED_CPU_ALLOC_RATIO=16.0
CONFIG_NOVA_SCHED_RAM_ALLOC_RATIO=1.5
CONFIG_NOVA_COMPUTE_MIGRATE_PROTOCOL=tcp
CONFIG_NOVA_COMPUTE_PRIVIF=enp5s1
CONFIG_NOVA_NETWORK_MANAGER=nova.network.manager.FlatDHCPManager
CONFIG_NOVA_NETWORK_PUBIF=enp2s0
CONFIG_NOVA_NETWORK_PRIVIF=enp5s1
CONFIG_NOVA_NETWORK_FIXEDRANGE=192.168.32.0/22
CONFIG_NOVA_NETWORK_FLOATRANGE=10.3.4.0/22
CONFIG_NOVA_NETWORK_DEFAULTFLOATINGPOOL=nova
CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP=n
CONFIG_NOVA_NETWORK_VLAN_START=100
CONFIG_NOVA_NETWORK_NUMBER=1
CONFIG_NOVA_NETWORK_SIZE=255
CONFIG_NEUTRON_KS_PW=808e36e154bd4cee
CONFIG_NEUTRON_DB_PW=0e2b927a21b44737
CONFIG_NEUTRON_L3_EXT_BRIDGE=br-ex
CONFIG_NEUTRON_L2_PLUGIN=ml2
CONFIG_NEUTRON_METADATA_PW=a965cd23ed2f4502
CONFIG_LBAAS_INSTALL=n
CONFIG_NEUTRON_METERING_AGENT_INSTALL=n
CONFIG_NEUTRON_FWAAS=n
CONFIG_NEUTRON_ML2_TYPE_DRIVERS=vxlan
CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES=vxlan
CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS=openvswitch
CONFIG_NEUTRON_ML2_FLAT_NETWORKS=*
CONFIG_NEUTRON_ML2_VLAN_RANGES=
CONFIG_NEUTRON_ML2_TUNNEL_ID_RANGES=1001:2000
CONFIG_NEUTRON_ML2_VXLAN_GROUP=239.1.1.2
CONFIG_NEUTRON_ML2_VNI_RANGES=1001:2000
CONFIG_NEUTRON_L2_AGENT=openvswitch
CONFIG_NEUTRON_LB_TENANT_NETWORK_TYPE=local
CONFIG_NEUTRON_LB_VLAN_RANGES=
CONFIG_NEUTRON_LB_INTERFACE_MAPPINGS=
CONFIG_NEUTRON_OVS_TENANT_NETWORK_TYPE=vxlan
CONFIG_NEUTRON_OVS_VLAN_RANGES=
CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS=physnet1:br-ex
CONFIG_NEUTRON_OVS_BRIDGE_IFACES=
CONFIG_NEUTRON_OVS_TUNNEL_RANGES=1001:2000
CONFIG_NEUTRON_OVS_TUNNEL_IF=enp5s1
CONFIG_NEUTRON_OVS_VXLAN_UDP_PORT=4789

CONFIG_HORIZON_SSL=n
CONFIG_SSL_CERT=
CONFIG_SSL_KEY=
CONFIG_SSL_CACHAIN=
CONFIG_SWIFT_KS_PW=8f75bfd461234c30
CONFIG_SWIFT_STORAGES=
CONFIG_SWIFT_STORAGE_ZONES=1
CONFIG_SWIFT_STORAGE_REPLICAS=1
CONFIG_SWIFT_STORAGE_FSTYPE=ext4
CONFIG_SWIFT_HASH=a60aacbedde7429a
CONFIG_SWIFT_STORAGE_SIZE=2G
CONFIG_PROVISION_DEMO=y
CONFIG_PROVISION_TEMPEST=n
CONFIG_PROVISION_TEMPEST_USER=
CONFIG_PROVISION_TEMPEST_USER_PW=44faa4ebc3da4459
CONFIG_PROVISION_DEMO_FLOATRANGE=172.24.4.224/28
CONFIG_PROVISION_TEMPEST_REPO_URI=https://github.com/openstack/tempest.git
CONFIG_PROVISION_TEMPEST_REPO_REVISION=master
CONFIG_PROVISION_ALL_IN_ONE_OVS_BRIDGE=n
CONFIG_HEAT_DB_PW=PW_PLACEHOLDER
CONFIG_HEAT_AUTH_ENC_KEY=fc3fb7fee61e46b0
CONFIG_HEAT_KS_PW=PW_PLACEHOLDER
CONFIG_HEAT_CLOUDWATCH_INSTALL=n
CONFIG_HEAT_USING_TRUSTS=y
CONFIG_HEAT_CFN_INSTALL=n
CONFIG_HEAT_DOMAIN=heat
CONFIG_HEAT_DOMAIN_ADMIN=heat_admin
CONFIG_HEAT_DOMAIN_PASSWORD=PW_PLACEHOLDER
CONFIG_CEILOMETER_SECRET=19ae0e7430174349
CONFIG_CEILOMETER_KS_PW=337b08d4b3a44753
CONFIG_MONGODB_HOST=192.168.1.127
CONFIG_NAGIOS_PW=02f168ee8edd44e4

Only on Controller updates :-
[root@juno1 network-scripts(keystone_admin)]# cat ifcfg-br-ex
DEVICE="br-ex"
BOOTPROTO="static"
IPADDR="192.168.1.127"
NETMASK="255.255.255.0"
DNS1="83.221.202.254"
BROADCAST="192.168.1.255"
GATEWAY="192.168.1.1"
NM_CONTROLLED="no"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="yes"
IPV6INIT=no
ONBOOT="yes"
TYPE="OVSIntPort"
OVS_BRIDGE=br-ex

DEVICETYPE="ovs"

[root@juno1 network-scripts(keystone_admin)]# cat ifcfg-enp2s0
DEVICE="enp2s0"
# HWADDR=00:22:15:63:E4:E2
ONBOOT="yes"
TYPE="OVSPort"
DEVICETYPE="ovs"
OVS_BRIDGE=br-ex
NM_CONTROLLED=no
IPV6INIT=no

Setup Gluster Backend for cinder in Juno

*************************************************************************
Updates  /etc/cinder/cinder.conf to activate Gluster 3.5.2 backend
*************************************************************************

Gluster 3.5.2 cluster installed per  http://bderzhavets.blogspot.com/2014/08/setup-gluster-352-on-two-node.html

Just one more notice ( view http://blog.nixpanic.net/2014_07_01_archive.html )

The following configuration changes are necessary for 'qemu' and 'samba vfs plugin' integration with libgfapi to work seamlessly:
1. gluster volume set cinder-volumes57  server.allow-insecure on
2. Restarting is required
    
    gluster volume stop cinder-volumes57
    gluster volume start cinder-volumes57

3. Edit /etc/glusterfs/glusterd.vol  to have a line :
   
     option rpc-auth-allow-insecure on

4. Restart glusterd is required :

     service glusterd restart 
   

Nova.conf (on Compute Node)  should have entry :-

    qemu_allowed_storage_drivers = gluster



enabled_backends=gluster,lvm52

[gluster]
volume_driver = cinder.volume.drivers.glusterfs.GlusterfsDriver
glusterfs_shares_config = /etc/cinder/shares.conf
glusterfs_mount_point_base = /var/lib/cinder/volumes
volume_backend_name=GLUSTER

[lvm52]
iscsi_helper=lioadm
volume_group=cinder-volumes52
volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
iscsi_ip_address=192.168.1.127
volume_backend_name=LVM_iSCSI52

Now follow  http://giuliofidente.com/2013/06/openstack-cinder-configure-multiple-backends.html   :-

[root@juno1 ~(keystone_admin)]# cinder type-create lvms
+--------------------------------------+------+
|                  ID                  | Name |
+--------------------------------------+------+
| 64414f3a-7770-4958-b422-8db0c3e2f433 | lvms  |
+--------------------------------------+------+

+--------------------------------------+-------+

[root@juno1 ~(keystone_admin)]# cinder type-create gluster
+--------------------------------------+---------+
|                  ID                  |   Name  |
+--------------------------------------+---------+
| 29917269-d73f-4c28-b295-59bfbda5d044 | gluster |
+--------------------------------------+---------+

[root@juno1 ~(keystone_admin)]# cinder type-list
+--------------------------------------+---------+
|                  ID                  |   Name  |
+--------------------------------------+---------+

| 29917269-d73f-4c28-b295-59bfbda5d044 | gluster |
| 64414f3a-7770-4958-b422-8db0c3e2f433 |   lvms   |
+--------------------------------------+---------+

[root@juno1 ~(keystone_admin)]# cinder type-key lvms set volume_backend_name=LVM_iSCSI52
 

[root@juno1 ~(keystone_admin)]# cinder type-key gluster  set volume_backend_name=GLUSTER

Next step is cinder services restart :-

[root@juno1 ~(keystone_demo)]# for i in api scheduler volume ; do service openstack-cinder-${i} restart ; done



[root@juno1 ~(keystone_admin)]# df -h
Filesystem                       Size  Used Avail Use% Mounted on
/dev/mapper/centos01-root00      147G   17G  130G  12% /
devtmpfs                         3.9G     0  3.9G   0% /dev
tmpfs                            3.9G   96K  3.9G   1% /dev/shm
tmpfs                            3.9G  9.1M  3.9G   1% /run
tmpfs                            3.9G     0  3.9G   0% /sys/fs/cgroup
/dev/loop0                       1.9G  6.0M  1.7G   1% /srv/node/swift_loopback
/dev/sda3                        477M  146M  302M  33% /boot
/dev/mapper/centos01-data5        98G  1.4G   97G   2% /data5
192.168.1.127:/cinder-volumes57   98G  1.4G   97G   2% /var/lib/cinder/volumes/8478b56ad61cf67ab9839fb0a5296965
tmpfs                            3.9G  9.1M  3.9G   1% /run/netns


[root@juno1 ~(keystone_demo)]# gluster volume info

Volume Name: cinder-volumes57
Type: Replicate
Volume ID: c1f2e1d2-0b11-426e-af3d-7af0d1d24d5e
Status: Started
Number of Bricks: 1 x 2 = 2
Transport-type: tcp
Bricks:
Brick1: juno1.localdomain:/data5/data-volumes
Brick2: juno2.localdomain:/data5/data-volumes
Options Reconfigured:
auth.allow: 192.168.1.*

[root@juno1 ~(keystone_demo)]# gluster volume status
Status of volume: cinder-volumes57
Gluster process                        Port    Online    Pid
------------------------------------------------------------------------------
Brick juno1.localdomain:/data5/data-volumes        49152    Y    3806
Brick juno2.localdomain:/data5/data-volumes        49152    Y    3047
NFS Server on localhost                    2049    Y    4146
Self-heal Daemon on localhost                N/A    Y    4141
NFS Server on juno2.localdomain                2049    Y    3881
Self-heal Daemon on juno2.localdomain            N/A    Y    3877

Task Status of Volume cinder-volumes57
------------------------------------------------------------------------------



**********************************************
Creating cinder volume of gluster type:-
**********************************************


[root@juno1 ~(keystone_demo)]# cinder create --volume_type gluster --image-id d83a6fec-ce82-411c-aa11-04cbb34bf2a2 --display_name UbuntuGLS1029 5


[root@juno1 ~(keystone_demo)]# cinder list
+--------------------------------------+--------+---------------+------+-------------+----------+--------------------------------------+
|                  ID                  | Status |  Display Name | Size | Volume Type | Bootable |             Attached to              |
+--------------------------------------+--------+---------------+------+-------------+----------+--------------------------------------+
| ca7ac946-3c4e-4544-ba3a-8cd085d5882b | in-use | UbuntuGLS1029 |  5   |   gluster   |   true   | cdb57658-795a-4a6e-82c9-67bf24acd498 |
+--------------------------------------+--------+---------------+------+-------------+----------+--------------------------------------+

[root@juno1 ~(keystone_demo)]# nova list
+--------------------------------------+-------------+-----------+------------+-------------+-----------------------------------+
| ID                                   | Name        | Status    | Task State | Power State | Networks                          |
+--------------------------------------+-------------+-----------+------------+-------------+-----------------------------------+
| 5c366eb9-8830-4432-b9bb-06239ae83d8a | CentOS7RS01 | SUSPENDED | -          | Shutdown    | demo_net=40.0.0.25, 192.168.1.161 |
| cdb57658-795a-4a6e-82c9-67bf24acd498 | UbuntuGLS01 | ACTIVE  | -          | Shutdown    | demo_net=40.0.0.22, 192.168.1.157 |
| 39d5312c-e661-4f9f-82ab-db528a7cdc9a | UbuntuRXS52 | ACTIVE    | -          | Running     | demo_net=40.0.0.32, 192.168.1.165 |
| 16911bfa-cf8b-44b7-b46e-8a54c9b3db69 | VF20GLR01   | ACTIVE    | -          | Running     | demo_net=40.0.0.23, 192.168.1.159 |
+--------------------------------------+-------------+-----------+------------+-------------+-----------------------------------+



 
 

Get detailed information about server-id :-

[root@juno1 ~(keystone_demo)]# nova show 16911bfa-cf8b-44b7-b46e-8a54c9b3db69
+--------------------------------------+----------------------------------------------------------+
| Property                             | Value                                                    |
+--------------------------------------+----------------------------------------------------------+
| OS-DCF:diskConfig                    | AUTO                                                     |
| OS-EXT-AZ:availability_zone          | nova                                                     |
| OS-EXT-STS:power_state               | 1                                                        |
| OS-EXT-STS:task_state                | -                                                        |
| OS-EXT-STS:vm_state                  | active                                                   |
| OS-SRV-USG:launched_at               | 2014-11-01T22:20:12.000000                               |
| OS-SRV-USG:terminated_at             | -                                                        |
| accessIPv4                           |                                                          |
| accessIPv6                           |                                                          |
| config_drive                         |                                                          |
| created                              | 2014-11-01T22:20:04Z                                     |
| demo_net network                     | 40.0.0.23, 192.168.1.159                                 |
| flavor                               | m1.small (2)                                             |
| hostId                               | 2e37cbf1f1145a0eaad46d35cbc8f4df3b579bbaf0404855511732a9 |
| id                                   | 16911bfa-cf8b-44b7-b46e-8a54c9b3db69                     |
| image                                | Attempt to boot from volume - no image supplied          |
| key_name                             | oskey45                                                  |
| metadata                             | {}                                                       |
| name                                 | VF20GLR01                                                |
| os-extended-volumes:volumes_attached | [{"id": "6ff40c2b-c363-42da-8988-5425eca0eea3"}]         |
| progress                             | 0                                                        |
| security_groups                      | default                                                  |
| status                               | ACTIVE                                                   |
| tenant_id                            | b302ecfaf76740189fca446e2e4a9a6e                         |
| updated                              | 2014-11-03T09:29:25Z                                     |
| user_id                              | ad7db1242c7e41ee88bc813873c85da3                         |
+--------------------------------------+----------------------------------------------------------+

*******************************
Gluster cinder-volumes list :-
*******************************

[root@juno1 ~(keystone_demo)]# cinder show 6ff40c2b-c363-42da-8988-5425eca0eea3 | grep volume_type
volume_type | gluster

[root@juno1 data-volumes(keystone_demo)]# cinder list

+--------------------------------------+--------+---------------+------+-------------+----------+--------------------------------------+
|                  ID                  | Status |  Display Name | Size | Volume Type | Bootable |             Attached to              |
+--------------------------------------+--------+---------------+------+-------------+----------+--------------------------------------+
| 6ff40c2b-c363-42da-8988-5425eca0eea3 | in-use |  VF20VLG0211  |  7   |   gluster   |   true   | 16911bfa-cf8b-44b7-b46e-8a54c9b3db69 |
| 8ade9f17-163d-48ca-bea5-bc9c6ea99b17 | in-use |  UbuntuLVS52  |  5   |     lvms    |   true   | 39d5312c-e661-4f9f-82ab-db528a7cdc9a |
| ca7ac946-3c4e-4544-ba3a-8cd085d5882b | in-use | UbuntuGLS1029 |  5   |   gluster   |   true   | cdb57658-795a-4a6e-82c9-67bf24acd498 |
| d8f77604-f984-4e98-81cc-971003d3fb54 | in-use |   CentOS7VLG  |  10  |   gluster   |   true   | 5c366eb9-8830-4432-b9bb-06239ae83d8a |
+--------------------------------------+--------+---------------+------+-------------+----------+--------------------------------------+


[root@juno1 data-volumes(keystone_demo)]# ls -la
total 7219560
drwxrwxr-x.   3 root cinder        4096 Nov  3 19:29 .
drwxr-xr-x.   3 root root            25 Nov  1 19:17 ..
drw-------. 252 root root          4096 Nov  3 19:21 .glusterfs
-rw-rw-rw-.   2 qemu qemu    7516192768 Nov  3 19:06 volume-6ff40c2b-c363-42da-8988-5425eca0eea3
-rw-rw-rw-.   2 qemu qemu    5368709120 Nov  3 19:21 volume-ca7ac946-3c4e-4544-ba3a-8cd085d5882b
-rw-rw-rw-.   2 root root   10737418240 Nov  2 10:57 volume-d8f77604-f984-4e98-81cc-971003d3fb54





     To activate LVMiSCSI cinder backend

        service target enable
        service target start

[root@juno1 ~(keystone_admin)]# service target status
Redirecting to /bin/systemctl status  target.service
target.service - Restore LIO kernel target configuration
   Loaded: loaded (/usr/lib/systemd/system/target.service; enabled)
   Active: active (exited) since Wed 2014-11-05 13:23:09 MSK; 44min ago
  Process: 1611 ExecStart=/usr/bin/targetctl restore (code=exited, status=0/SUCCESS)
 Main PID: 1611 (code=exited, status=0/SUCCESS)
   CGroup: /system.slice/target.service

Nov 05 13:23:07 juno1.localdomain systemd[1]: Starting Restore LIO kernel target configuration...
Nov 05 13:23:09 juno1.localdomain systemd[1]: Started Restore LIO kernel target configuration.


  
  
  [root@juno1 ~(keystone_admin)]# vgs
  VG               #PV #LV #SN Attr   VSize   VFree
  centos             1   3   0 wz--n- 252.03g     0
  centos00         1   3   0 wz--n- 208.09g     0
  centos01         1   3   0 wz--n- 252.03g     0
  cinder-volumes51   1   1   0 wz--n-  42.29g 37.29g
  cinder-volumes52   1   2   0 wz--n-  97.65g 82.65g
  fedora             1   2   0 wz--n- 203.20g     0
      
References

1. http://cloudssky.com/en/blog/RDO-OpenStack-Juno-ML2-VXLAN-2-Node-Deployment-On-CentOS-7-With-Packstack
2. http://www.ksovi.eu/2014/09/red-hat-7-centos-7-iscsi-target-with-lio.html

Thursday, October 09, 2014

Forwarding packet from br-int to br-ex on Neutron Node

[root@controller ~(keystone_admin)]# neutron router-list
+--------------------------------------+---------+-----------------------------------------------------------------------------+
| id                                   | name    | external_gateway_info                                                       |
+--------------------------------------+---------+-----------------------------------------------------------------------------+
| 30b921ba-869c-4026-9010-f6554a82e37e | router2 | {"network_id": "022905d0-cfeb-4d5b-ae51-37c93080e7ae", "enable_snat": true} |
| db10bdfd-f0c6-4c30-8a08-30ceb132a979 | router3 | {"network_id": "022905d0-cfeb-4d5b-ae51-37c93080e7ae", "enable_snat": true} |
+--------------------------------------+---------+-----------------------------------------------------------------------------+

1. [root@controller ~(keystone_admin)]# ip netns exec qrouter-db10bdfd-f0c6-4c30-8a08-30ceb132a979 ifconfig
lo: flags=73  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10
        loop  txqueuelen 0  (Local Loopback)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

qg-a9e0c15a-b6: flags=67  mtu 1500
        inet 192.168.1.154  netmask 255.255.255.0  broadcast 192.168.1.255
        inet6 fe80::f816:3eff:fe61:7982  prefixlen 64  scopeid 0x20
        ether fa:16:3e:61:79:82  txqueuelen 0  (Ethernet)
        RX packets 794857  bytes 1090221979 (1.0 GiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 459453  bytes 38942381 (37.1 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

qr-12b39034-b3: flags=67  mtu 1500
        inet 20.0.0.1  netmask 255.255.255.0  broadcast 20.0.0.255
        inet6 fe80::f816:3eff:feed:976b  prefixlen 64  scopeid 0x20
        ether fa:16:3e:ed:97:6b  txqueuelen 0  (Ethernet)
        RX packets 460529  bytes 39050698 (37.2 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 788075  bytes 1089807974 (1.0 GiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0




2. [root@controller ~(keystone_admin)]# ip netns exec qrouter-db10bdfd-f0c6-4c30-8a08-30ceb132a979 route -n


Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         192.168.1.1     0.0.0.0         UG    0      0        0     qg-a9e0c15a-b6
20.0.0.0        0.0.0.0         255.255.255.0   U     0      0        0   qr-12b39034-b3
192.168.1.0     0.0.0.0         255.255.255.0   U     0      0        0 qg-a9e0c15a-b6



3. [root@controller ~(keystone_admin)]# ovs-vsctl show
ba7b93f5-f364-40ac-94ee-8c3266c14282
    Bridge br-tun
        Port "gre-c0a80089"
            Interface "gre-c0a80089"
                type: gre
                options: {in_key=flow, local_ip="192.168.0.127", out_key=flow, remote_ip="192.168.0.137"}
        Port br-tun
            Interface br-tun
                type: internal
        Port patch-int
            Interface patch-int
                type: patch
                options: {peer=patch-tun}
    Bridge br-ex
        Port br-ex
            Interface br-ex
                type: internal
        Port "qg-412ba0b5-e8"
            Interface "qg-412ba0b5-e8"
                type: internal
        Port "enp2s0"
            Interface "enp2s0"
        Port "qg-a9e0c15a-b6"
            Interface "qg-a9e0c15a-b6"
                type: internal
    Bridge br-int
        fail_mode: secure                                      
        Port br-int                                                   
            Interface br-int
                type: internal
        Port patch-tun
            Interface patch-tun
                type: patch
                options: {peer=patch-int}
        Port "qr-635a3c01-f9"
            tag: 2
            Interface "qr-635a3c01-f9"
                type: internal
        Port "tap0e08184f-89"
            tag: 1
            Interface "tap0e08184f-89"
                type: internal
        Port "tapa8c5a401-51"
            tag: 2
            Interface "tapa8c5a401-51"
                type: internal
        Port "qr-12b39034-b3"
            tag: 1
            Interface "qr-12b39034-b3"
                type: internal
    ovs_version: "2.0.0"