This will contain all information
All nodes should have hostname and fqdn of all other hosts in /etc/hosts and their fqdn in /etc/hostname!
Ref:
Download Opendaylight
distribution-karaf-0.2.1-Helium-SR1.1 wget https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.1-Helium-SR1/distribution-karaf-0.2.1-Helium-SR1.tar.gz https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.2-Helium-SR2/distribution-karaf-0.2.2-Helium-SR2.tar.gz
Enable OF13
vim distribution-karaf-0.2.1-Helium-SR1.1/etc/custom.properties
Start karaf and install features (After configuring Openstack. See below):
feature:install odl-base-all odl-aaa-authn odl-restconf odl-nsf-all odl-adsal-northbound odl-mdsal-apidocs odl-ovsdb-openstack odl-ovsdb-northbound odl-dlux-core
neutron net-list
# ext-net does not need dhcp-agent!
neutron dhcp-agent-list-hosting-net vx-net
neutron dhcp-agent-network-remove <subnet UUID from previous command> vx-net
neutron router-list
neutron router-port-list tenant-rtr
+--------------------------------------+------+-------------------+----------------------------------------------------------------------------------------+
| id | name | mac_address | fixed_ips |
+--------------------------------------+------+-------------------+----------------------------------------------------------------------------------------+
| 6e81e033-d85c-4b88-9f3b-b8024749f170 | | fa:16:3e:85:15:7a | {"subnet_id": "1d757265-2bf5-497e-bf88-6f2dae844875", "ip_address": "192.168.201.201"} <-------- EXTERNAL = GATEWAY to EXT-NET|
| 85ca0c57-934e-4028-820c-037ee1634ae6 | | fa:16:3e:85:9c:82 | {"subnet_id": "2a16439d-bfbd-45b5-bd54-01d4bda5b172", "ip_address": "10.8.8.1"} <------- TENANT = INTERFACE (LAN Port on Router) |
+--------------------------------------+------+-------------------+----------------------------------------------------------------------------------------+
neutron router-interface-delete vx-rtr <subnet_id>
neutron router-gateway-clear vx-rtr <subnet_id> - the 172.x address
neutron router-delete vx-rtr
neutron subnet-list
neutron subnet-list id|name
neutron subnet-delete private-subnet
neutron net-list
neutron net-show private
neutron net-delete private
# not needed
keystone tenant-list
keystone tenant-delete demo
neutron subnet-delete public-subnet
neutron net-delete public
sudo systemctl stop neutron-server sudo systemctl stop neutron-openvswitch-agent sudo systemctl disable neutron-openvswitch-agent # Stops, cleans and restarts openvswitch and logs captured. sudo systemctl stop openvswitch sudo rm -rf /var/log/openvswitch/* sudo rm -rf /etc/openvswitch/conf.db sudo systemctl start openvswitch# sudo crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers opendaylight sudo crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan cat <<EOT | sudo tee -a /etc/neutron/plugins/ml2/ml2_conf.ini > /dev/null [ml2_odl] password = admin username = admin url = http://192.168.120.1:8080/controller/nb/v2/neutron EOT # DB server is the same as in neutron.conf cat <<EOT | sudo tee -a /etc/neutron/plugins/ml2/ml2_conf.ini > /dev/null [database] sql_connection = mysql://neutronUser:neutronPass@MGMT_IP/neutron_ml2 EOT sudo mysql -e "drop database if exists neutron_ml2;" sudo mysql -e "create database neutron_ml2 character set utf8;" sudo mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';" #sudo neutron-db-manage --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head sudo neutron-db-manage --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade juno sudo systemctl start neutron-server
Must be done on Network & Compute
sudo systemctl stop neutron-openvswitch-agent sudo systemctl disable neutron-openvswitch-agent # Stops, cleans and restarts openvswitch and logs captured. sudo systemctl stop openvswitch sudo rm -rf /var/log/openvswitch/* sudo rm -rf /etc/openvswitch/conf.db sudo systemctl start openvswitch sudo ovs-vsctl show
This next script will attempt to clean up any namespaces, ports or bridges still hanging around. Make sure to use sudo ovs-vsctl show to determine if this is even needed.
#!/bin/bash
for ns in `ip netns`
do
`sudo ip netns del $ns`
done
for qvb in `ifconfig -a | grep qvb | cut -d' ' -f1`
do
`sudo ip link set $qvb down`
`sudo ip link delete $qvb`
done
for qbr in `ifconfig -a | grep qbr | cut -d' ' -f1`
do
`sudo ip link set $qbr down`
`sudo ip link delete $qbr`
done
for qvo in `ifconfig -a | grep qvo | cut -d' ' -f1`
do
`sudo ovs-vsctl --if-exists del-port br-int $qvo`
done
for tap in `ifconfig -a | grep tap | cut -d' ' -f1`
do
tap="${tap%?}"
`sudo ip link set $tap down`
`sudo ovs-vsctl --if-exists del-port br-int $tap`
done
for i in `sudo ovs-vsctl show | grep Bridge | awk '{print $2}'` ; do
if [[ $i == *br-eth1* ]]; then
sudo ovs-vsctl --if-exists del-br 'br-eth1'
else
sudo ovs-vsctl --if-exists del-br $i
fi
done
for i in `ip addr | grep tap | awk '{print $2}'`; do
tap="${i%?}"
echo "tap=$tap"
sudo ip link del dev $tap
done
for i in phy-br-eth1 int-br-eth1; do
ip -o link show dev $i &> /dev/null
if [ $? -eq 0 ]; then
sudo ip link del dev $i
fi
done
for iface in br-ex br-int br-tun; do
sudo ovs-dpctl del-if ovs-system $iface
done
echo "Delete vxlan_xxx if present"
for iface in `sudo ovs-dpctl show | awk 'match($0, /[Pp]ort\s+[[:digit:]]+\s*\:\s*(.+).+\(vxlan/, m) { print m[1]; }'` ; do
echo ${iface} ; sudo ovs-dpctl del-if ovs-system ${iface}
done
sudo ovs-dpctl show
JAVA_HOME set?
SR2 uses basic authentication, avoiding problem with PAZ
Make sure OF13 enabled
vim distribution-karaf-0.2.2-Helium-SR1.2/etc/custom.properties
Start karaf…
cd distribution* ./bin/karaf
…and install features:
feature:install odl-base-all odl-aaa-authn odl-restconf odl-nsf-all odl-adsal-northbound odl-mdsal-apidocs odl-ovsdb-openstack odl-ovsdb-northbound odl-dlux-core
Enable karaf log:
log:set debug org.opendaylight.ovsdb.openstack.netvirt.impl.NeutronL3Adapter log:set TRACE org.opendaylight.controller.networkconfig.neutron log:set debug org.opendaylight.ovsdb
cd opendaylight/ ./run.sh -Xmx1024m -XX:MaxPermSize=1024m -virt ovsdb
With OVSDB, stop simpleforwarding
lb | grep simple ss | grep simple 142|Active | 4|samples.simpleforwarding (0.4.2.SNAPSHOT) stop 142
To see processing of neutron event related to L3, do this from prompt in controller:
setLogLevel org.opendaylight.ovsdb.openstack.netvirt.impl.NeutronL3Adapter debug
The nodes need to be configured to use the OpenDaylight controller. Recall that in this setup the OpenDaylight controll is running on the host at 10.10.11.4. Change the value below if you have a different address. Run the below script on both nodes.
#/bin/bash
ens4=$(ip -o addr show dev ens4 | grep -w inet | awk '{print $4}' | sed -e 's/\/.*//g')
ovs-vsctl set-manager tcp:192.168.120.1:6640
read ovstbl <<< $(ovs-vsctl get Open_vSwitch . _uuid)
#ovs-vsctl set Open_vSwitch $ovstbl other_config:bridge_mappings=physnet1:eth1,physnet3:eth3
ovs-vsctl set Open_vSwitch $ovstbl
ovs-vsctl set Open_vSwitch $ovstbl other_config:local_ip=$ens4
ovs-vsctl list Manager
echo
ovs-vsctl list Open_vSwitch
Now open ODL and Openstack Web:
http://devcontroller.localdomain/dashboard http://devopendaylight:8181/dlux/index.html#/login
For now, ODL shows only 3 br-int. Goto Openstack and create networks!!. Check neutron.log to see it communicates with ODL.
neutron net-create ext-net --shared --router:external=True Internal Server Error (HTTP 500) (Request-ID: req-0f52115e-b912-4870-84f5-7b11bd776f14)
2015-02-06 12:21:32.086 10654 DEBUG neutron.plugins.ml2.drivers.mechanism_odl [req-7f1e68b5-5578-436f-b86d-50f70e3b7ceb None] Sending METHOD (post) URL (http://10.10.11.4:8080/controller/nb/v2/neutron/networks) JSON ({'network': {'name': u'ext-net', 'provider:physical_network': None, 'admin_state_up': True, 'tenant_id': u'c69208fb1b7641d0b63f6437036d42d0', 'provider:network_type': u'gre', 'router:external': True, 'shared': True, 'id': 'fcab3ff1-728b-4a26-b176-8bf1b173ed43', 'provider:segmentation_id': 1L}}) sendjson /usr/lib/python2.7/site-packages/neutron/plugins/ml2/drivers/mechanism_odl.py:295
2015-02-06 12:21:32.087 10654 INFO urllib3.connectionpool [req-7f1e68b5-5578-436f-b86d-50f70e3b7ceb ] Starting new HTTP connection (1): 10.10.11.4
2015-02-06 12:21:32.092 10654 DEBUG urllib3.connectionpool [req-7f1e68b5-5578-436f-b86d-50f70e3b7ceb ] "POST /controller/nb/v2/neutron/networks HTTP/1.1" 406 1095 _make_request /usr/lib/python2.7/site-packages/urllib3/connectionpool.py:357
2015-02-06 12:21:32.093 10654 ERROR neutron.plugins.ml2.managers [req-7f1e68b5-5578-436f-b86d-50f70e3b7ceb None] Mechanism driver 'opendaylight' failed in create_network_postcommit
2015-02-06 12:21:32.093 10654 TRACE neutron.plugins.ml2.managers Traceback (most recent call last):
2015-02-06 12:21:32.093 10654 TRACE neutron.plugins.ml2.managers File "/usr/lib/python2.7/site-packages/neutron/plugins/ml2/managers.py", line 291, in _call_on_drivers
2015-02-06 12:21:32.093 10654 TRACE neutron.plugins.ml2.managers getattr(driver.obj, method_name)(context)
2015-02-06 12:21:32.093 10654 TRACE neutron.plugins.ml2.managers File "/usr/lib/python2.7/site-packages/neutron/plugins/ml2/drivers/mechanism_odl.py", line 145, in create_network_postcommit
2015-02-06 12:21:32.093 10654 TRACE neutron.plugins.ml2.managers self.synchronize('create', ODL_NETWORKS, context)
2015-02-06 12:21:32.093 10654 TRACE neutron.plugins.ml2.managers File "/usr/lib/python2.7/site-packages/neutron/plugins/ml2/drivers/mechanism_odl.py", line 176, in synchronize
2015-02-06 12:21:32.093 10654 TRACE neutron.plugins.ml2.managers self.sync_single_resource(operation, object_type, context)
2015-02-06 12:21:32.093 10654 TRACE neutron.plugins.ml2.managers File "/usr/lib/python2.7/site-packages/neutron/plugins/ml2/drivers/mechanism_odl.py", line 279, in sync_single_resource
2015-02-06 12:21:32.093 10654 TRACE neutron.plugins.ml2.managers self.out_of_sync = True
2015-02-06 12:21:32.093 10654 TRACE neutron.plugins.ml2.managers File "/usr/lib/python2.7/site-packages/neutron/openstack/common/excutils.py", line 82, in __exit__
2015-02-06 12:21:32.093 10654 TRACE neutron.plugins.ml2.managers six.reraise(self.type_, self.value, self.tb)
2015-02-06 12:21:32.093 10654 TRACE neutron.plugins.ml2.managers File "/usr/lib/python2.7/site-packages/neutron/plugins/ml2/drivers/mechanism_odl.py", line 276, in sync_single_resource
2015-02-06 12:21:32.093 10654 TRACE neutron.plugins.ml2.managers self.sendjson(method, urlpath, {object_type[:-1]: resource})
2015-02-06 12:21:32.093 10654 TRACE neutron.plugins.ml2.managers File "/usr/lib/python2.7/site-packages/neutron/plugins/ml2/drivers/mechanism_odl.py", line 299, in sendjson
2015-02-06 12:21:32.093 10654 TRACE neutron.plugins.ml2.managers r.raise_for_status()
2015-02-06 12:21:32.093 10654 TRACE neutron.plugins.ml2.managers File "/usr/lib/python2.7/site-packages/requests/models.py", line 795, in raise_for_status
2015-02-06 12:21:32.093 10654 TRACE neutron.plugins.ml2.managers raise HTTPError(http_error_msg, response=self)
2015-02-06 12:21:32.093 10654 TRACE neutron.plugins.ml2.managers HTTPError: 406 Client Error: Not Acceptable
2015-02-06 12:21:32.093 10654 TRACE neutron.plugins.ml2.managers
2015-02-06 12:21:32.094 10654 ERROR neutron.plugins.ml2.plugin [req-7f1e68b5-5578-436f-b86d-50f70e3b7ceb None] mechanism_manager.create_network_postcommit failed, deleting network 'fcab3ff1-728b-4a26-b176-8bf1b173ed43'
Setup a vxlan tunnel between the two nodes to verify the setup. Use the vnc console of one of the VM's and try to ping the other VM. In the test below the two VMs should have the addresses 10.100.5.2 and 10.100.5.4 if ran the first time.
neutron net-create vx-net --provider:network_type vxlan --provider:segmentation_id 1400
neutron subnet-create vx-net 10.100.5.0/24 --name vx-subnet
neutron router-create vx-rtr
neutron router-interface-add vx-rtr vx-subnet
nova boot --flavor m1.nano --image $(nova image-list | grep 'uec\s' | awk '{print $2}' | tail -1) --nic net-id=$(neutron net-list | grep -w vx-net | awk '{print $2}') vmvx1 --availability_zone=nova:fedora51
nova boot --flavor m1.nano --image $(nova image-list | grep 'cirros\s' | awk '{print $2}' | tail -1) --nic net-id=$(neutron net-list | grep -w vx-net | awk '{print $2}') vmvx2 --availability_zone=nova:fedora52
nova get-vnc-console vmvx1 novnc
nova get-vnc-console vmvx2 novnc
Hi Dave, Stop OK and OVS. Remove /etc/openvswitch/conf.db. Remove opendaylight/data/*. Restart opendaylight, install features. When bundle is active start ovs and set local ip and manager on it. -Tim
This works only for latest Openstack/Kilo as the plugins are being split out of Openstack tree.
On controll and network nodes:
git clone https://github.com/stackforge/networking-odl.git cd networking-odl sudo python setup.py install # stop all running neutron systemctl | grep neutron systemctl stop neutron-dhcp-agent.service neutron-l3-agent.service neutron-lbaas-agent.service neutron-metadata-agent.service neutron-metering-agent.service
Helium-SR2 used Basic auth, which eliminates problem caused bay Jsession.
wget https://nexus.opendaylight.org/content/repositories/opendaylight.release/org/opendaylight/ovsdb/distribution.ovsdb/1.2.2-Helium-SR2/distribution.ovsdb-1.2.2-Helium-SR2-osgipackage.zip
sudo systemctl stop openvswitch sudo rm -rf /var/log/openvswitch/* sudo rm -rf /etc/openvswitch/conf.db sudo systemctl start openvswitch sudo ovs-vsctl show
Enable karaf log:
log:set debug org.opendaylight.ovsdb.openstack.netvirt.impl.NeutronL3Adapter log:set TRACE org.opendaylight.controller.networkconfig.neutron log:set debug org.opendaylight.ovsdb
With OVSDB, stop simpleforwarding
lb | grep simple ss | grep simple 142|Active | 4|samples.simpleforwarding (0.4.2.SNAPSHOT) stop 142
To see processing of neutron event related to L3, do this from prompt in controller:
setLogLevel org.opendaylight.ovsdb.openstack.netvirt.impl.NeutronL3Adapter debug
Solution:
https://ask.opendaylight.org/question/905/error-executing-command-no-feature-named-transaction/
Checked to make sure the default policy was ACCEPT for incoming connections, pay attention to RedHat's custom rule chain, which has a catch-all REJECT rule as the last rule in the chain.
Look at this: http://wiki.centos.org/HowTos/Network/IPTables#head-724ed81dbcd2b82b5fd3f648142796f3ce60c730
Check if ODL:8080 is reachable.
curl -v 'http://ODL:8080' telnet ODL 8080
In general:
From your network configuration, I understand that you will use the VTN Manager Node as the proxy for the devstack VM’s to get the packages
a. This makes me wonder whether the OpenDaylight was able to receive the Post Networks API from devstack? As they also fall under http requests category.
b. Please check the logs/web_access*.txt to know the list of the commands received by the OpenDaylight Controller
c. Please try to post request manually to ODL:8080 from the devstack node to ensure that http response is received
d. Can you please set the ODL IP address for no proxy to ensure the request is received
e. Can you please share the local.conf (control node) in the pastebin link
Check if Management, data, control network are pingable:
User INSERT instead:
sudo /sbin/iptables -I INPUT --src 10.20.21.2/24 -j ACCEPT sudo /sbin/iptables -I OUTPUT --src 10.20.21.2/24 -j ACCEPT
The iptables Rules changes using CLI commands will be lost upon system reboot. However, iptables comes with two useful utilities: iptables-save and iptables-restore.
iptables-save > iptables.dump iptables-restore < iptables.dump
ip address, route, netns, neighbor etc.
ifconfig, route and netstat are deprecated. Distros have started removing these commands
iptables Useful options: -n –v –line-numbers
ping, host, traceroute, tcpdump, ip neighbor, arp, arping
Protocol decoders: wireshark
ovs-vsctl
show - overview of Open vSwitch configuration add-br - add bridge
ovs-ofctl
dump-flows <br> – examine flow tables dump-ports<br> - port statistics by port number show <br> - port number to port name mapping sudo ovs-ofctl -O OpenFlow13 dump-flows br-int
ovs-appctl
bridge/dump-flows <br> – examine flow tables fdb/show <br> lists mac/vlan pairs learned
Use port mirroring to see traffic processed by a port
Create a virtual ethernet interface: ip link add type veth ip link set veth0 up Add it into the Open vSwitch bridge br-int: ovs-vsctl add-port br-int "veth0" Create the mirror and mirror the packets from eth1, br-int, patch-tun: ovs-vsctl -- set Bridge br-int mirrors=@m \ --id=@veth0 get Port veth0 \ --id=@eth1 get Port eth1 \ --id=@patch-tun get Port patch-tun \ --id=@br-int get Port br-int \ --id=@m create Mirror name=veth select-src-port=@eth1,@patch-tun,@br-int \ select-dst-port=@eth1,@patch-tun,@br-int output-port=@veth0 When finished, delete the mirror: ovs-vsctl clear Bridge br-int mirrors
On Controller:
[root@devcontroller fedora]# systemctl | grep neutr neutron-dhcp-agent.service loaded active running OpenStack Neutron DHCP Agent neutron-l3-agent.service loaded active running OpenStack Neutron Layer 3 Agent neutron-metadata-agent.service loaded active running OpenStack Neutron Metadata Agent neutron-server.service loaded active running OpenStack Neutron Server [root@devcontroller fedora]# systemctl | grep opens openstack-ceilometer-alarm-evaluator.service loaded active running OpenStack ceilometer alarm evaluation service openstack-ceilometer-alarm-notifier.service loaded active running OpenStack ceilometer alarm notification service openstack-ceilometer-api.service loaded active running OpenStack ceilometer API service openstack-ceilometer-central.service loaded active running OpenStack ceilometer central agent openstack-ceilometer-collector.service loaded active running OpenStack ceilometer collection service openstack-glance-api.service loaded active running OpenStack Image Service (code-named Glance) API server openstack-glance-registry.service loaded active running OpenStack Image Service (code-named Glance) Registry server openstack-heat-api-cfn.service loaded active running Openstack Heat CFN-compatible API Service openstack-heat-api.service loaded active running OpenStack Heat API Service openstack-heat-engine.service loaded active running Openstack Heat Engine Service openstack-keystone.service loaded active running OpenStack Identity Service (code-named Keystone) openstack-nova-api.service loaded active running OpenStack Nova API Server openstack-nova-cert.service loaded active running OpenStack Nova Cert Server openstack-nova-conductor.service loaded active running OpenStack Nova Conductor Server openstack-nova-consoleauth.service loaded active running OpenStack Nova VNC console auth Server openstack-nova-novncproxy.service loaded active running OpenStack Nova NoVNC Proxy Server openstack-nova-objectstore.service loaded active running OpenStack Nova Objectstore Server openstack-nova-scheduler.service loaded active running OpenStack Nova Scheduler Server
On Compute
[root@devcompute fedora]# systemctl | grep neutr [root@devcompute fedora]# systemctl | grep open openstack-ceilometer-compute.service loaded active running OpenStack ceilometer compute agent openstack-nova-compute.service loaded active running OpenStack Nova Compute Server openvswitch-nonetwork.service loaded active exited Open vSwitch Internal Unit openvswitch.service loaded active exited Open vSwitch
neutron port-list neutron port-delete id neutron net-list neutron dhcp-agent-list-hosting-net vx-net neutron dhcp-agent-network-remove <subnet UUID from previous command> vx-net neutron router-list neutron router-port-list vx-rtr neutron router-interface-delete vx-rtr <subnet_id> neutron router-gateway-clear vx-rtr <subnet_id> - the 172.x address neutron router-delete vx-rtr # if there orphaned port neutron port-update <port-id> --device_owner clear neutron port-delete <port-id> neutron router-delete <router-id> #### neutron subnet-list neutron subnet-list id|name neutron subnet-delete private-subnet neutron net-list neutron net-show private neutron net-delete private keystone tenant-list keystone tenant-delete demo neutron subnet-delete public-subnet neutron net-delete public
If the router is attached to any given network, and has a gateway set, first the gateway needs to be cleared and no ports should be in use. Then we can delete the Router.
Check for the attached ports to the router. If there are attached ports, clear them first.
Check Subnet List
neutron subnet-list
Check details of a subnet
neutron subnet-show <subnet-id>
Delete a Subnet
neutron subnet-delete <subnet-id>
List Configured Networks –
neutron net-list
Check details of a configured network –
neutron net-list <net-id>
Once all the subnets of a particular network are removed, the network can be deleted-
neutron net-delete <net-id>
In /etc/neutron/ml3plugin.ini the externalnetwork_bridge is set to 'brex'. The WAN Port of the router connecting with this external network will be assigned an IP from external network.
When no –provider: attribute is given, GRE network is created.
THIS WORK 100% with puppet setting.
net-create: ODL does not understand –shared!“
neutron net-create ext-net --shared --router:external=True
+---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | True | | id | 5d27cbcb-182e-4a02-8938-829a508f6186 | | name | ext-net | | provider:network_type | gre <------- | | provider:physical_network | <------- HERE | | provider:segmentation_id | 1 <------- | | router:external | True | | shared | True | | status | ACTIVE | | subnets | | | tenant_id | c69208fb1b7641d0b63f6437036d42d0 | +---------------------------+--------------------------------------+
subnet-create:
neutron subnet-create ext-net --name ext-subnet \ --allocation-pool start=192.168.201.200,end=192.168.201.230 \ --disable-dhcp --gateway 192.168.201.1 192.168.201.0/24
router-create
neutron router-create ext-router
Set the external GW for the router
neutron router-gateway-set ext-router ext-net
The router get an IP x.x.x.200 from external/physical network.
[root@DEVNETWORK fedora]# ip netns qrouter-4a4778c7-287f-4239-b05e-495250e4988d
ip netns exec qrouter-4a4778c7-287f-4239-b05e-495250e4988d ping 8.8.8.8 PING 8.8.8.8 (8.8.8.8) 56(84) bytes of data. 64 bytes from 8.8.8.8: icmp_seq=1 ttl=56 time=19.3 ms 64 bytes from 8.8.8.8: icmp_seq=2 ttl=56 time=19.2 ms
Simple network:
neutron net-create demo-net
+---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | True | | id | db849cba-9fb8-4096-a63a-f1268805ea4c | | name | demo-net | | provider:network_type | gre <------- HERE | | provider:physical_network | <------- HERE | | provider:segmentation_id | 2 <------- HERE | | router:external | False | | shared | False | | status | ACTIVE | | subnets | | | tenant_id | c69208fb1b7641d0b63f6437036d42d0 | +---------------------------+--------------------------------------+
neutron subnet-create demo-net --name demo-subnet --gateway 10.8.8.1 10.8.8.0/24
Create tenant router:
neutron router-create demo-router
Attach the router to the demo tenant subnet:
neutron router-interface-add demo-router demo-subnet
Add a GW to ext-net on demo-router:
neutron router-gateway-set demo-router ext-net
In this case the external network is GRE or VLAN. 'provider:networktype' and 'provider:segmentationid' are used for VXLAN adn GRE Network.
E.g:
neutron net-create admin-net --provider:network_type vxlan --provider:segmentation_id 1400
???:
neutron net-create --provider:physical_network=ph-eth0 --provider:network_type=vlan --provider:segmentation_id=1998 --shared --router:external=true GATEWAY_NET
gre example??:
xxxx
neutron subnet-create admin-net 10.100.5.0/24 --name admin-subnet neutron router-create admin-router neutron router-interface-add admin-router admin-subnet
nova boot --flavor m2.tiny --image $(nova image-list | grep 'cirros' | awk '{print $2}' | tail -1) --nic net-id=$(neutron net-list | grep -w admin-net | awk '{print $2}') vmvx1
--availability_zone=nova:fedora51
nova boot --flavor m2.tiny --image $(nova image-list | grep 'cirros' | awk '{print $2}' | tail -1) --nic net-id=$(neutron net-list | grep -w admin-net | awk '{print $2}') vmvx2
--availability_zone=nova:fedora52
nova get-vnc-console vmvx1 novnc
nova get-vnc-console vmvx2 novnc
cat ./myfile.txt
#cloud-config
password: mysecret
chpasswd: { expire: False }
ssh_pwauth: True
http://cloudinit.readthedocs.org/en/latest/topics/examples.html http://bazaar.launchpad.net/~cloud-init-dev/cloud-init/trunk/files/head:/doc/examples/
Boot VM with:
nova boot --flavor 2 --user-data myfile.txt
wget http://cloud-images.ubuntu.com/releases/14.04/release/ubuntu-14.04-server-cloudimg-amd64-disk1.img -P /var/kvm/images glance image-create --name="Ubuntu1404" --is-public=true --disk-format=qcow2 --container-format=bare < /var/kvm/images/ubuntu-14.04-server-cloudimg-amd64-disk1.img
Fedora with cloud init
http://download.fedoraproject.org/pub/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2 glance image-create --name="fedora21_64" --is-public=true --disk-format=qcow2 --container-format=bare < \ Fedora-Cloud-Base-20141203-21.x86_64.qcow2
nova flavor-create FLAVOR_NAME FLAVOR_ID RAM_IN_MB ROOT_DISK_IN_GB NUMBER_OF_VCPUS nova flavor-create --is-public true m2.tiny auto 512 10 1 #--rxtx-factor .1 nova flavor-access-add FLAVOR TENANT_ID nova flavor-delete FLAVOR_ID
[root@devcontroller fedora]# ovs-vsctl show
2b371710-bfa7-4a24-bcea-1d48a7f3bb08
Manager "tcp:10.10.11.4:6640"
is_connected: true
Bridge br-int
Controller "tcp:10.10.11.4:6633"
is_connected: true
fail_mode: secure
Port "gre-10.20.21.3"
Interface "gre-10.20.21.3"
type: gre
options: {key=flow, local_ip="10.20.21.2", remote_ip="10.20.21.3"}
Port "tap0d51e742-5f"
Interface "tap0d51e742-5f"
type: internal
Port br-int
Interface br-int
ovs_version: "2.3.0"
[root@devcontroller fedora]# ip netns
qdhcp-78234c84-7ced-489a-95ae-d08c7394f485
[root@devcontroller fedora]# ip netns exec qdhcp-78234c84-7ced-489a-95ae-d08c7394f485 ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
14: tap0d51e742-5f: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN group default
link/ether fa:16:3e:02:68:37 brd ff:ff:ff:ff:ff:ff
inet 10.100.5.3/24 brd 10.100.5.255 scope global tap0d51e742-5f
valid_lft forever preferred_lft forever
inet6 fe80::f816:3eff:fe02:6837/64 scope link
valid_lft forever preferred_lft forever
[root@devcontroller fedora]# ip netns exec qdhcp-78234c84-7ced-489a-95ae-d08c7394f485 ip neigh
10.100.5.2 dev tap0d51e742-5f lladdr fa:16:3e:63:ee:2b STALE
10.100.5.4 dev tap0d51e742-5f lladdr fa:16:3e:5f:3d:77 STALE
[root@devcontroller fedora]# ip netns exec qdhcp-78234c84-7ced-489a-95ae-d08c7394f485 ip ping 10.100.5.4
Object "ping" is unknown, try "ip help".
[root@devcontroller fedora]# ip netns exec qdhcp-78234c84-7ced-489a-95ae-d08c7394f485 ping 10.100.5.4
PING 10.100.5.4 (10.100.5.4) 56(84) bytes of data.
64 bytes from 10.100.5.4: icmp_seq=1 ttl=64 time=1.11 ms
64 bytes from 10.100.5.4: icmp_seq=2 ttl=64 time=1.72 ms
64 bytes from 10.100.5.4: icmp_seq=3 ttl=64 time=0.616 ms
^C
--- 10.100.5.4 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2003ms
rtt min/avg/max/mdev = 0.616/1.153/1.727/0.455 ms
[root@devcontroller fedora]#
[root@devcompute fedora]# ovs-vsctl show
4b35b56a-8122-4e59-944e-83f1b5496f79
Manager "tcp:10.10.11.4:6640"
is_connected: true
Bridge br-int
Controller "tcp:10.10.11.4:6633"
is_connected: true
fail_mode: secure
Port "gre-10.20.21.2"
Interface "gre-10.20.21.2"
type: gre
options: {key=flow, local_ip="10.20.21.3", remote_ip="10.20.21.2"}
Port br-int
Interface br-int
Port "tap6fec8afa-a1"
Interface "tap6fec8afa-a1"
Port "tap317f38e1-c5"
Interface "tap317f38e1-c5"
ovs_version: "2.3.0"
[root@devcompute fedora]# ip netns
Remove this file, it should not present at a fresh installation.
rm -rf /root/.my.cnf
Controller x.x.x.2 + Compute x.x.x.3 + Network x.x.x.5
[root@devnetwork fedora]# ovs-vsctl show
f53ce58d-f504-4e9e-9cc9-dd0a40598a80
Bridge brex
Port brex
Interface brex
type: internal
Port "ens5"
Interface "ens5"
Bridge br-tun
Port "gre-0a141503"
Interface "gre-0a141503"
type: gre
options: {df_default="true", in_key=flow, local_ip="10.20.21.5", out_key=flow, remote_ip="10.20.21.3"}
Port "gre-0a141502"
Interface "gre-0a141502"
type: gre
options: {df_default="true", in_key=flow, local_ip="10.20.21.5", out_key=flow, remote_ip="10.20.21.2"}
Port patch-int
Interface patch-int
type: patch
options: {peer=patch-tun}
Port br-tun
Interface br-tun
type: internal
Bridge br-int
fail_mode: secure
Port br-int
Interface br-int
type: internal
Port patch-tun
Interface patch-tun
type: patch
options: {peer=patch-int}
ovs_version: "2.3.1-git3282e51"
[root@devnetwork fedora]# cat /etc/sysconfig/network-scripts/ifcfg-ens5
DEVICE=ens5
DEVICETYPE=ovs
TYPE=OVSPort
OVS_BRIDGE=brex
ONBOOT=yes
BOOTPROTO=none
[root@devnetwork fedora]# cat /etc/sysconfig/network-scripts/ifcfg-brex
ONBOOT=yes
#IPADDR=192.168.201.2
#NETMASK=255.255.255.0
DEVICE=brex
DEVICETYPE=ovs
OVSBOOTPROTO=dhcp
TYPE=OVSBridge
OVSDHCPINTERFACES=ens5
OVS_EXTRA="set bridge brex other-config:hwaddr=de:ad:be:ef:11:09"
After setup openstack with puppet. If VM does not get IP, restart may help.
See this: http://docs.openstack.org/openstack-ops/content/network_troubleshooting.html
https://www.softwareab.net/wordpress/openstack-debugging-neutron-dhcp/
look for Debugging DHCP Issues with nova-network
If dnsmasq failed. On Network node:
killall dnsmasq systemctl restart neutron-dhcp...
[root@devcompute fedora]# systemctl status libvirtd.service
libvirtd.service - Virtualization daemon
Loaded: loaded (/usr/lib/systemd/system/libvirtd.service; enabled)
Active: active (running) since Wed 2015-02-04 22:31:19 CET; 17s ago
Docs: man:libvirtd(8)
http://libvirt.org
Main PID: 2127 (libvirtd)
CGroup: /system.slice/libvirtd.service
├─1206 /sbin/dnsmasq --conf-file=/var/lib/libvirt/dnsmasq/default.conf
├─2127 /usr/sbin/libvirtd --listen
└─2262 /usr/bin/pkcheck --action-id org.libvirt.unix.manage --process 2146,69077,162 --allow-user-interaction
Feb 04 22:31:19 devcompute.localdomain systemd[1]: Starting Virtualization daemon...
Feb 04 22:31:19 devcompute.localdomain systemd[1]: Started Virtualization daemon.
Feb 04 22:31:19 devcompute.localdomain libvirtd[2127]: libvirt version: 1.1.3.8, package: 1.fc20 (Fedora Project, 2014-11-16-03:48:03, buildhw-05.phx2.fedoraproject.org)
Feb 04 22:31:19 devcompute.localdomain libvirtd[2127]: Unable to lookup SELinux process context: Invalid argument
Feb 04 22:31:19 devcompute.localdomain dnsmasq[1206]: read /etc/hosts - 6 addresses
Feb 04 22:31:19 devcompute.localdomain dnsmasq[1206]: read /var/lib/libvirt/dnsmasq/default.addnhosts - 0 addresses
Feb 04 22:31:19 devcompute.localdomain dnsmasq-dhcp[1206]: read /var/lib/libvirt/dnsmasq/default.hostsfile
Feb 04 22:31:20 devcompute.localdomain libvirtd[2127]: Failed to open file '/proc/xen/capabilities': No such file or directory
Pass
instead of
Reboot compute node