Fixes for vCPE vG-MUX install script 47/19147/1
authorEric Multanen <eric.w.multanen@intel.com>
Tue, 17 Oct 2017 08:25:31 +0000 (01:25 -0700)
committerEric Multanen <eric.w.multanen@intel.com>
Tue, 17 Oct 2017 08:34:18 +0000 (01:34 -0700)
Addresses the following items:
- Do not setup auto configuration for eth1 and eth3
  These interfaces will be used by vpp so they need to be down
- Fix pci bus numbering for vpp interfaces
- Fixes to properly create the 'save_config' script
- Install linux-image-extra package to get uio_pci_generic
  driver module for the vpp dpdk interfaces
- fix build of honeycomb so the jvpp-ves module built by vpp
  can be found
- Patch libevel.so code to tweak the VES data produced
  so that sourceId and sourceName come from the openstack
  vnf_id property (instead of vm_uuid and vm_name)

Issue-ID: INT-63
Change-Id: I469c2377df9dd9920a5769628b2b14d043d55d63
Signed-off-by: Eric Multanen <eric.w.multanen@intel.com>
heat/vCPE/vgmux/base_vcpe_vgmux.env
heat/vCPE/vgmux/base_vcpe_vgmux.yaml
vnfs/vCPE/scripts/v_gmux_install.sh
vnfs/vCPE/vpp-ves-agent-for-vgmux/src/patches/vCPE-vG-MUX-libevel-fixup.patch [new file with mode: 0644]

index aad4784..e81afa7 100644 (file)
@@ -32,3 +32,4 @@
   hc2vpp_source_repo_branch: stable/1704
   vpp_patch_url: https://git.onap.org/demo/plain/vnfs/vCPE/vpp-ves-agent-for-vgmux/src/patches/Vpp-Add-VES-agent-for-vG-MUX.patch
   hc2vpp_patch_url: https://git.onap.org/demo/plain/vnfs/vCPE/vpp-ves-agent-for-vgmux/src/patches/Hc2vpp-Add-VES-agent-for-vG-MUX.patch
+  libevel_patch_url: https://git.onap.org/demo/plain/vnfs/vCPE/vpp-ves-agent-for-vgmux/src/patches/vCPE-vG-MUX-libevel-fixup.patch
index 4f12c64..ecdb1b1 100644 (file)
@@ -165,6 +165,10 @@ parameters:
     type: string
     label: Honeycomb Patch URL
     description: URL for Honeycomb patch for vG-MUX
+  libevel_patch_url:
+    type: string
+    label: libevel Patch URL
+    description: URL for libevel patch for vG-MUX
 
 #############
 #           #
@@ -245,6 +249,7 @@ resources:
             __hc2vpp_source_repo_branch__ : { get_param: hc2vpp_source_repo_branch }
             __vpp_patch_url__ : { get_param: vpp_patch_url }
             __hc2vpp_patch_url__ : { get_param: hc2vpp_patch_url }
+            __libevel_patch_url__ : { get_param: libevel_patch_url }
           template: |
             #!/bin/bash
 
@@ -267,6 +272,7 @@ resources:
             echo "__hc2vpp_source_repo_url__" > /opt/config/hc2vpp_source_repo_url.txt
             echo "__hc2vpp_source_repo_branch__" > /opt/config/hc2vpp_source_repo_branch.txt
             echo "__hc2vpp_patch_url__" > /opt/config/hc2vpp_patch_url.txt
+            echo "__libevel_patch_url__" > /opt/config/libevel_patch_url.txt
 
             # Download and run install script
             curl -k __repo_url_blob__/org.onap.demo/vnfs/vcpe/__install_script_version__/v_gmux_install.sh -o /opt/v_gmux_install.sh
index b52179d..8aa2306 100644 (file)
@@ -10,6 +10,7 @@ VPP_PATCH_URL=$(cat /opt/config/vpp_patch_url.txt)
 HC2VPP_SOURCE_REPO_URL=$(cat /opt/config/hc2vpp_source_repo_url.txt)
 HC2VPP_SOURCE_REPO_BRANCH=$(cat /opt/config/hc2vpp_source_repo_branch.txt)
 HC2VPP_PATCH_URL=$(cat /opt/config/hc2vpp_patch_url.txt)
+LIBEVEL_PATCH_URL=$(cat /opt/config/libevel_patch_url.txt)
 CLOUD_ENV=$(cat /opt/config/cloud_env.txt)
 
 # Convert Network CIDR to Netmask
@@ -31,15 +32,6 @@ then
 
        MTU=$(/sbin/ifconfig | grep MTU | sed 's/.*MTU://' | sed 's/ .*//' | sort -n | head -1)
 
-       IP=$(cat /opt/config/bng_mux_net_ipaddr.txt)
-       BITS=$(cat /opt/config/bng_mux_net_cidr.txt | cut -d"/" -f2)
-       NETMASK=$(cdr2mask $BITS)
-       echo "auto eth1" >> /etc/network/interfaces
-       echo "iface eth1 inet static" >> /etc/network/interfaces
-       echo "    address $IP" >> /etc/network/interfaces
-       echo "    netmask $NETMASK" >> /etc/network/interfaces
-       echo "    mtu $MTU" >> /etc/network/interfaces
-
        IP=$(cat /opt/config/oam_ipaddr.txt)
        BITS=$(cat /opt/config/oam_cidr.txt | cut -d"/" -f2)
        NETMASK=$(cdr2mask $BITS)
@@ -49,18 +41,7 @@ then
        echo "    netmask $NETMASK" >> /etc/network/interfaces
        echo "    mtu $MTU" >> /etc/network/interfaces
 
-       IP=$(cat /opt/config/mux_gw_net_ipaddr.txt)
-       BITS=$(cat /opt/config/mux_gw_net_cidr.txt | cut -d"/" -f2)
-       NETMASK=$(cdr2mask $BITS)
-       echo "auto eth3" >> /etc/network/interfaces
-       echo "iface eth3 inet static" >> /etc/network/interfaces
-       echo "    address $IP" >> /etc/network/interfaces
-       echo "    netmask $NETMASK" >> /etc/network/interfaces
-       echo "    mtu $MTU" >> /etc/network/interfaces
-
-       ifup eth1
        ifup eth2
-       ifup eth3
 fi
 
 # Download required dependencies
@@ -71,7 +52,7 @@ apt-get install --allow-unauthenticated -y wget openjdk-8-jdk apt-transport-http
 sleep 1
 
 # Install the tools required for download codes
-apt-get install -y expect git patch make
+apt-get install -y expect git patch make linux-image-extra-`uname -r`
 
 #Download and build the VPP codes
 cd /opt
@@ -91,7 +72,10 @@ expect -c "
 cd /opt
 apt-get install -y libcurl4-openssl-dev
 git clone http://gerrit.onap.org/r/demo
-cd demo/vnfs/VES5.0/evel/evel-library/bldjobs 
+wget -O vCPE-vG-MUX-libevel-fixup.patch ${LIBEVEL_PATCH_URL} 
+cd demo
+patch -p1 < ../vCPE-vG-MUX-libevel-fixup.patch
+cd vnfs/VES5.0/evel/evel-library/bldjobs 
 make
 cp ../libs/x86_64/libevel.so /usr/lib
 ldconfig
@@ -212,11 +196,11 @@ cpu {
 EOF
 
 cat > /etc/vpp/setup.gate << EOF
-set int state GigabitEthernet0/8/0 up
-set int ip address GigabitEthernet0/8/0 10.1.0.20/24
+set int state GigabitEthernet0/4/0 up
+set int ip address GigabitEthernet0/4/0 10.1.0.20/24
 
-set int state GigabitEthernet0/9/0 up
-set int ip address GigabitEthernet0/9/0 10.5.0.20/24
+set int state GigabitEthernet0/6/0 up
+set int ip address GigabitEthernet0/6/0 10.5.0.20/24
 
 create vxlan tunnel src 10.5.0.20 dst 10.5.0.21 vni 100
 EOF
@@ -338,7 +322,13 @@ EOF
 
 cd hc2vpp
 patch -p1 < ../Hc2vpp-Add-VES-agent-for-vG-MUX.patch
-mvn clean install
+p_version_snap=$(cat ves/ves-impl/pom.xml | grep -A 1 "jvpp-ves" | tail -1)
+p_version_snap=$(echo "${p_version%<*}")
+p_version_snap=$(echo "${p_version#*>}")
+p_version=$(echo "${p_version_snap%-*}")
+mkdir -p  ~/.m2/repository/io/fd/vpp/jvpp-ves/${p_version_snap}
+mvn install:install-file -Dfile=/usr/share/java/jvpp-ves-${p_version}.jar -DgroupId=io.fd.vpp -DartifactId=jvpp-ves -Dversion=${p_version_snap} -Dpackaging=jar
+mvn clean install -nsu -DskipTests=true
 l_version=$(cat pom.xml | grep "<version>" | head -1)
 l_version=$(echo "${l_version%<*}")
 l_version=$(echo "${l_version#*>}")
@@ -398,37 +388,37 @@ VPP_SETUP_GATE=/etc/vpp/setup.gate
 #
 write_startup_scripts()
 {
-       local cmd=${2}
-       local is_add=${1}
+       local cmd=\${2}
+       local is_add=\${1}
 
-       if [[ ${is_add} == add ]] ;then
+       if [[ \${is_add} == add ]] ;then
                while read -r line
                do
-                       if [[ ${line} == ${cmd} ]] ;then
+                       if [[ \${line} == \${cmd} ]] ;then
                                return 0
                        fi
-               done < ${VPP_SETUP_GATE}
+               done < \${VPP_SETUP_GATE}
 
-               echo "${cmd}" >> ${VPP_SETUP_GATE}
+               echo "\${cmd}" >> \${VPP_SETUP_GATE}
        else
                while read -r line
                do
-                       if [[ ${line} == ${cmd} ]] ;then
-                               sed -i "/${line}/d" ${VPP_SETUP_GATE}
+                       if [[ \${line} == \${cmd} ]] ;then
+                               sed -i "/\${line}/d" \${VPP_SETUP_GATE}
                                return 0
                        fi
-               done < ${VPP_SETUP_GATE}
+               done < \${VPP_SETUP_GATE}
        fi
 }
 
 # Saves the VES agent configuration to the startup script.
 #
 # Get the current VES agent configuration from the bash command:
-# $vppctl show ves agent
+# \$vppctl show ves agent
 #    Server Addr    Server Port Interval Enabled
 #    127.0.0.1        8080         10    True
 # Set the VES agent configuration with the bash command:
-# $vppctl set ves agent server 127.0.0.1 port 8080 intval 10
+# \$vppctl set ves agent server 127.0.0.1 port 8080 intval 10
 #
 save_ves_config()
 {
@@ -436,23 +426,23 @@ save_ves_config()
        local port=""
        local intval=""
 
-       local ves_config=`vppctl show ves agent | head -2 | tail -1`
-       if [ "${ves_config}" != "" ] ;then
-               server=`echo ${ves_config} | awk '{ print $1 }'`
-               port=`echo ${ves_config} | awk '{ print $2 }'`
-               intval=`echo ${ves_config} | awk '{ print $3 }'`
-               write_startup_scripts add "set ves agent server ${server} port ${port} intval ${intval}"
+       local ves_config=\`vppctl show ves agent | head -2 | tail -1\`
+       if [ "\${ves_config}" != "" ] ;then
+               server=\`echo \${ves_config} | awk '{ print \$1 }'\`
+               port=\`echo \${ves_config} | awk '{ print \$2 }'\`
+               intval=\`echo \${ves_config} | awk '{ print \$3 }'\`
+               write_startup_scripts add "set ves agent server \${server} port \${port} intval \${intval}"
        fi
 }
 
 # Save the VxLAN Tunnel Configuration to the startup script.
 #
 # Get the current VxLAN tunnel configuration with bash command:
-# $vppctl show vxlan tunnel
+# \$vppctl show vxlan tunnel
 #  [0] src 10.3.0.2 dst 10.1.0.20 vni 100 sw_if_index 1 encap_fib_index 0 fib_entry_index 7 decap_next l2
 #  [1] src 10.5.0.20 dst 10.5.0.21 vni 100 sw_if_index 2 encap_fib_index 0 fib_entry_index 8 decap_next l2
 # Set the VxLAN Tunnel with the bash command:
-# $vppctl create vxlan tunnel src 10.3.0.2 dst 10.1.0.20 vni 100
+# \$vppctl create vxlan tunnel src 10.3.0.2 dst 10.1.0.20 vni 100
 # vxlan_tunnel0
 save_vxlan_tunnel()
 {
@@ -462,12 +452,12 @@ save_vxlan_tunnel()
 
        vppctl show vxlan tunnel | while read line
        do
-               if [ "${line}" != "" ] ;then
-                       src=`echo ${line} | awk '{ print $3 }'`
-                       dst=`echo ${line} | awk '{ print $5 }'`
-                       vni=`echo ${line} | awk '{ print $7 }'`
+               if [ "\${line}" != "" ] ;then
+                       src=\`echo \${line} | awk '{ print \$3 }'\`
+                       dst=\`echo \${line} | awk '{ print \$5 }'\`
+                       vni=\`echo \${line} | awk '{ print \$7 }'\`
 
-                       write_startup_scripts add "create vxlan tunnel src ${src} dst ${dst} vni ${vni}"
+                       write_startup_scripts add "create vxlan tunnel src \${src} dst \${dst} vni \${vni}"
                fi
        done
 }
@@ -475,14 +465,14 @@ save_vxlan_tunnel()
 # Save the VxLAN tunnel L2 xconnect configuration to the startup script.
 #
 # Get the Current L2 Address configuration with bash command:
-# $vppctl show int addr
+# \$vppctl show int addr
 # local0 (dn):
 # vxlan_tunnel0 (up):
 #   l2 xconnect vxlan_tunnel1
 # vxlan_tunnel1 (up):
 #   l2 xconnect vxlan_tunnel0
 # Save the VxLAN tunnel L2 xconnect configuration with bash command:
-# $vppctl set interface l2 xconnect vxlan_tunnel0 vxlan_tunnel1
+# \$vppctl set interface l2 xconnect vxlan_tunnel0 vxlan_tunnel1
 #
 save_vxlan_xconnect()
 {
@@ -491,17 +481,17 @@ save_vxlan_xconnect()
 
        vppctl show int addr | while read line
        do
-               if [[ ${line} == vxlan_tunnel* ]] ;then
+               if [[ \${line} == vxlan_tunnel* ]] ;then
                        read next
-                       while [[ ${next} != l2* ]] || [[ ${next} == "" ]]
+                       while [[ \${next} != l2* ]] || [[ \${next} == "" ]]
                        do
-                               line=`echo ${next}`
+                               line=\`echo \${next}\`
                                read next
                        done
-                       if [[ ${next} == l2* ]] ;then
-                               ingress=`echo ${line} | awk '{ print $1 }'`
-                               egress=`echo ${next} | awk '{ print $3 }'`
-                               write_startup_scripts add "set interface l2 xconnect ${ingress} ${egress}"
+                       if [[ \${next} == l2* ]] ;then
+                               ingress=\`echo \${line} | awk '{ print \$1 }'\`
+                               egress=\`echo \${next} | awk '{ print \$3 }'\`
+                               write_startup_scripts add "set interface l2 xconnect \${ingress} \${egress}"
                        fi
                fi
        done
diff --git a/vnfs/vCPE/vpp-ves-agent-for-vgmux/src/patches/vCPE-vG-MUX-libevel-fixup.patch b/vnfs/vCPE/vpp-ves-agent-for-vgmux/src/patches/vCPE-vG-MUX-libevel-fixup.patch
new file mode 100644 (file)
index 0000000..639a7c6
--- /dev/null
@@ -0,0 +1,120 @@
+diff --git a/vnfs/VES5.0/evel/evel-library/code/evel_library/evel_event.c b/vnfs/VES5.0/evel/evel-library/code/evel_library/evel_event.c
+index ced29b2..892e4b6 100644
+--- a/vnfs/VES5.0/evel/evel-library/code/evel_library/evel_event.c
++++ b/vnfs/VES5.0/evel/evel-library/code/evel_library/evel_event.c
+@@ -166,7 +166,8 @@ void evel_init_header(EVENT_HEADER * const header,const char *const eventname)
+   header->last_epoch_microsec = tv.tv_usec + 1000000 * tv.tv_sec;
+   header->priority = EVEL_PRIORITY_NORMAL;
+   header->reporting_entity_name = strdup(openstack_vm_name());
+-  header->source_name = strdup(openstack_vm_name());
++  /* header->source_name = strdup(openstack_vm_name()); */
++  header->source_name = strdup(openstack_vnf_id()); /* vCPE quick hack */
+   header->sequence = event_sequence;
+   header->start_epoch_microsec = header->last_epoch_microsec;
+   header->major_version = EVEL_HEADER_MAJOR_VERSION;
+@@ -180,7 +181,8 @@ void evel_init_header(EVENT_HEADER * const header,const char *const eventname)
+   evel_init_option_string(&header->nfcnaming_code);
+   evel_init_option_string(&header->nfnaming_code);
+   evel_force_option_string(&header->reporting_entity_id, openstack_vm_uuid());
+-  evel_force_option_string(&header->source_id, openstack_vm_uuid());
++  /* evel_force_option_string(&header->source_id, openstack_vm_uuid()); */
++  evel_force_option_string(&header->source_id, openstack_vnf_id()); /* vCPE quick hack */
+   evel_init_option_intheader(&header->internal_field);
+   EVEL_EXIT();
+@@ -215,7 +217,8 @@ void evel_init_header_nameid(EVENT_HEADER * const header,const char *const event
+   header->last_epoch_microsec = tv.tv_usec + 1000000 * tv.tv_sec;
+   header->priority = EVEL_PRIORITY_NORMAL;
+   header->reporting_entity_name = strdup(openstack_vm_name());
+-  header->source_name = strdup(openstack_vm_name());
++  /* header->source_name = strdup(openstack_vm_name()); */
++  header->source_name = strdup(openstack_vnf_id()); /* vCPE quick hack */
+   header->sequence = event_sequence;
+   header->start_epoch_microsec = header->last_epoch_microsec;
+   header->major_version = EVEL_HEADER_MAJOR_VERSION;
+@@ -229,7 +232,8 @@ void evel_init_header_nameid(EVENT_HEADER * const header,const char *const event
+   evel_init_option_string(&header->nfcnaming_code);
+   evel_init_option_string(&header->nfnaming_code);
+   evel_force_option_string(&header->reporting_entity_id, openstack_vm_uuid());
+-  evel_force_option_string(&header->source_id, openstack_vm_uuid());
++  /* evel_force_option_string(&header->source_id, openstack_vm_uuid()); */
++  evel_force_option_string(&header->source_id, openstack_vnf_id()); /* vCPE quick hack */
+   evel_init_option_intheader(&header->internal_field);
+   EVEL_EXIT();
+diff --git a/vnfs/VES5.0/evel/evel-library/code/evel_library/metadata.c b/vnfs/VES5.0/evel/evel-library/code/evel_library/metadata.c
+index 11fef1b..d82f282 100644
+--- a/vnfs/VES5.0/evel/evel-library/code/evel_library/metadata.c
++++ b/vnfs/VES5.0/evel/evel-library/code/evel_library/metadata.c
+@@ -59,6 +59,11 @@ static char vm_uuid[MAX_METADATA_STRING+1] = {0};
+ static char vm_name[MAX_METADATA_STRING+1] = {0};
+ /**************************************************************************//**
++ * ID of the VNF extracted from the OpenStack metadata service.
++ *****************************************************************************/
++static char vnf_id[MAX_METADATA_STRING+1] = {0};
++
++/**************************************************************************//**
+  * How many metadata elements we allow for in the retrieved JSON.
+  *****************************************************************************/
+ static const int MAX_METADATA_TOKENS = 128;
+@@ -289,6 +294,19 @@ EVEL_ERR_CODES openstack_metadata(int verbosity)
+     {
+       EVEL_DEBUG("VM Name: %s", vm_name);
+     }
++    if (json_get_string(rx_chunk.memory,
++                                  tokens,
++                                  json_token_count,
++                                  "vnf_id",
++                                  vnf_id) != EVEL_SUCCESS)
++    {
++      rc = EVEL_BAD_METADATA;
++      EVEL_ERROR("Failed to extract VNF ID from OpenStack metadata");
++    }
++    else
++    {
++      EVEL_DEBUG("VNF Id: %s", vnf_id);
++    }
+   }
+ exit_label:
+@@ -318,6 +336,9 @@ void openstack_metadata_initialize()
+   strncpy(vm_name,
+           "Dummy VM name - No Metadata available",
+           MAX_METADATA_STRING);
++  strncpy(vnf_id,
++          "Dummy VNF ID - No Metadata available",
++          MAX_METADATA_STRING);
+ }
+ /**************************************************************************//**
+@@ -590,3 +611,13 @@ const char *openstack_vm_uuid()
+ {
+   return vm_uuid;
+ }
++
++/**************************************************************************//**
++ * Get the VM UUID provided by the metadata service.
++ *
++ * @returns VM UUID
++ *****************************************************************************/
++const char *openstack_vnf_id()
++{
++  return vnf_id;
++}
+diff --git a/vnfs/VES5.0/evel/evel-library/code/evel_library/metadata.h b/vnfs/VES5.0/evel/evel-library/code/evel_library/metadata.h
+index 1ee4409..b9ed21c 100644
+--- a/vnfs/VES5.0/evel/evel-library/code/evel_library/metadata.h
++++ b/vnfs/VES5.0/evel/evel-library/code/evel_library/metadata.h
+@@ -55,4 +55,11 @@ const char *openstack_vm_name();
+  *****************************************************************************/
+ const char *openstack_vm_uuid();
++/**************************************************************************//**
++ * Get the VNF ID provided by the metadata service.
++ *
++ * @returns VNF ID
++ *****************************************************************************/
++const char *openstack_vnf_id();
++
+ #endif