Fix bugs for infra_workload APIs 42/84642/1
authorXiaohua Zhang <xiaohua.zhang@windriver.com>
Tue, 9 Apr 2019 10:30:44 +0000 (10:30 +0000)
committerXiaohua Zhang <xiaohua.zhang@windriver.com>
Tue, 9 Apr 2019 10:30:44 +0000 (10:30 +0000)
Change-Id: Ieda8b2f7f68911bc348613cf7b18b40ea800b1e2
Issue-ID: MULTICLOUD-541
Signed-off-by: Xiaohua Zhang <xiaohua.zhang@windriver.com>
20 files changed:
fcaps/run.sh
fcaps/stop.sh
lenovo/run.sh
lenovo/stop.sh
ocata/run.sh
ocata/stop.sh
pike/run.sh
pike/stop.sh
share/common/msapi/helper.py
share/newton_base/registration/registration.py
share/newton_base/resource/infra_workload.py
share/newton_base/resource/infra_workload_helper.py
share/starlingx_base/registration/registration.py
share/starlingx_base/resource/infra_workload.py
starlingx/run.sh
starlingx/starlingx/urls.py
starlingx/stop.sh
windriver/run.sh
windriver/stop.sh
windriver/titanium_cloud/urls.py

index 5145002..0c1cd87 100644 (file)
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid
+memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid
 export PYTHONPATH=lib/share
 
 #service rabbitmq-server restart
index 56104f9..b09ccb9 100644 (file)
@@ -15,4 +15,4 @@
 
 #ps auxww | grep 'manage.py runserver 0.0.0.0:9011' | awk '{print $2}' | xargs kill -9
 ps auxww |grep 'uwsgi --http :9011 --module fcaps.wsgi --master' |awk '{print $2}' |xargs kill -9
-ps auxww | grep 'memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9
+ps auxww | grep 'memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9
index 0fa331f..48e26cd 100755 (executable)
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid
+memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid
 export PYTHONPATH=lib/share
 
 #nohup python manage.py runserver 0.0.0.0:9010 2>&1 &
index f43fab1..87f80ba 100755 (executable)
@@ -15,4 +15,4 @@
 
 #ps auxww | grep 'manage.py runserver 0.0.0.0:9010' | awk '{print $2}' | xargs kill -9
 ps auxww |grep 'uwsgi --http :9010 --module thinkcloud.wsgi --master' |awk '{print $2}' |xargs kill -9
-ps auxww | grep 'memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9
+ps auxww | grep 'memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9
index 5d02765..a1f44ed 100755 (executable)
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid
+memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid
 export PYTHONPATH=lib/share
 
 
index 1402a97..4e46b2c 100755 (executable)
@@ -15,4 +15,4 @@
 
 #ps auxww | grep 'manage.py runserver 0.0.0.0:9006' | awk '{print $2}' | xargs kill -9
 ps auxww |grep 'uwsgi --http :9006 --module ocata.wsgi --master' |awk '{print $2}' |xargs kill -9
-ps auxww | grep 'memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9
+ps auxww | grep 'memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9
index 738ac6c..3822c8a 100755 (executable)
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid
+memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid
 export PYTHONPATH=lib/share
 
 #nohup python manage.py runserver 0.0.0.0:9007 2>&1 &
index 9e433eb..38d101d 100755 (executable)
@@ -15,4 +15,4 @@
 
 #ps auxww | grep 'manage.py runserver 0.0.0.0:9007' | awk '{print $2}' | xargs kill -9
 ps auxww |grep 'uwsgi --http :9007 --module pike.wsgi --master' |awk '{print $2}' |xargs kill -9
-ps auxww | grep 'memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9
+ps auxww | grep 'memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9
index b2ff1d6..947966c 100644 (file)
@@ -145,18 +145,18 @@ class MultiCloudAAIHelper(object):
             retcode, content, status_code = \
                 restcall.req_to_aai(resource_url, "PUT", content=resource_info)
 
-            self._logger.debug(
-                ("_update_resoure,vimid:%(cloud_owner)s"
-                 "_%(cloud_region_id)s req_to_aai: %(resoure_id)s, "
-                 "return %(retcode)s, %(content)s, %(status_code)s")
-                % {
-                    "cloud_owner": cloud_owner,
-                    "cloud_region_id": cloud_region_id,
-                    "resoure_id": resoure_id,
-                    "retcode": retcode,
-                    "content": content,
-                    "status_code": status_code,
-                })
+            self._logger.debug(
+                ("_update_resoure,vimid:%(cloud_owner)s"
+                 "_%(cloud_region_id)s req_to_aai: %(resoure_id)s, "
+                 "return %(retcode)s, %(content)s, %(status_code)s")
+                % {
+                    "cloud_owner": cloud_owner,
+                    "cloud_region_id": cloud_region_id,
+                    "resoure_id": resoure_id,
+                    "retcode": retcode,
+                    "content": content,
+                    "status_code": status_code,
+                })
             return retcode, content
         # unknown cloud owner,region_id
         return (
@@ -197,17 +197,17 @@ class MultiCloudThreadHelper(object):
         # }
         # format of backlog:
         # {"<id value of backlog item>": <backlog item>, ...}
+        self.name = name or "default"
         self.backlog = {}
         # expired backlog items
         self.expired_backlog = {}
         self.lock = threading.Lock()
         self.state_ = 0  # 0: stopped, 1: started
         self.cache_prefix = "bi_"+name+"_"
-        self.cache_expired_prefix = "biex_"+name+"_"
+        self.cache_expired_prefix = "biex_"+self.name+"_"
 
         self.thread = MultiCloudThreadHelper.HelperThread(self)
-        self.thread.start()
-
+        # self.thread.start()
 
     def state(self):
         return self.state_
@@ -217,7 +217,7 @@ class MultiCloudThreadHelper(object):
         if 0 == self.state_:
             self.state_ = 1
             # self.thread = MultiCloudThreadHelper.HelperThread(self)
-            self.thread.start()
+            self.thread.start()
         else:
             pass
         self.lock.release()
@@ -227,9 +227,10 @@ class MultiCloudThreadHelper(object):
 
     def add(self, backlog_item):
         cache_for_query = None
-        if not hasattr(backlog_item, "worker"):
+        if not backlog_item.get("worker", None):
+            logger.warn("Fail to add backlog item: %s" % backlog_item)
             return None
-        if not hasattr(backlog_item, "id"):
+        if not backlog_item.get("id", None):
             backlog_item["id"] = str(uuid.uuid1())
         else:
             cache_for_query = {
@@ -237,7 +238,7 @@ class MultiCloudThreadHelper(object):
                 "status": backlog_item.get("status", None)
             }
 
-        if not hasattr(backlog_item, "repeat"):
+        if not backlog_item.get("repeat", None):
             backlog_item["repeat"] = 0
         backlog_item["timestamp"] = 0
 
@@ -248,8 +249,9 @@ class MultiCloudThreadHelper(object):
                       json.dumps(cache_for_query), 3600 * 24)
 
         self.expired_backlog.pop(backlog_item["id"], None)
-        self.backlog.update(backlog_item["id"], backlog_item)
+        self.backlog[backlog_item["id"]] = backlog_item
         # self.lock.release()
+        logger.debug("Add backlog item: %s" % backlog_item)
         return len(self.backlog)
 
     def get(self, backlog_id):
@@ -305,17 +307,19 @@ class MultiCloudThreadHelper(object):
             self.duration = 0
             self.owner = owner
             # debug: dump the callstack to determine the callstack, hence the lcm
-            logger.debug("HelperThread __init__ : %s" % traceback.format_exc())
+            logger.debug("HelperThread __init__ : %s" % traceback.format_exc())
 
         def run(self):
-            logger.debug("Start processing backlogs")
+            logger.debug("Thread %s starts processing backlogs" % self.owner.name)
             nexttimer = 0
             while self.owner.state_ == 1:  # and self.owner.count() > 0:
                 if nexttimer > 1000000:
                     # sleep in case of interval > 1 second
                     time.sleep(nexttimer // 1000000)
                 nexttimer = 30*1000000  # initial interval in us to be updated:30 seconds
-                for backlog_id, item in self.owner.backlog:
+                # logger.debug("self.owner.backlog len: %s" % len(self.owner.backlog))
+                for backlog_id, item in self.owner.backlog.items():
+                    # logger.debug("evaluate backlog item: %s" % item)
                     # check interval for repeatable backlog item
                     now = MultiCloudThreadHelper.get_epoch_now_usecond()
                     repeat_interval = item.get("repeat", 0)
@@ -331,10 +335,11 @@ class MultiCloudThreadHelper(object):
                             # not time to run this backlog item yet
                             continue
 
+                    # logger.debug("process backlog item: %s" % backlog_id)
                     worker = item.get("worker", None)
                     payload = item.get("payload", None)
                     try:
-                        item["status"] = worker(payload) or 0
+                        item["status"] = worker(*payload) or 0
                     except Exception as e:
                         item["status"] = e.message
                     cache_item_for_query = {
@@ -364,6 +369,6 @@ class MultiCloudThreadHelper(object):
             # while True:
             #     logger.debug("thread sleep for 5 seconds")
             #     time.sleep(5)  # wait forever, testonly
-            logger.debug("stop processing backlogs")
+            logger.debug("Thread %s stops processing backlogs" % self.owner.name)
             self.owner.state_ = 0
             # end of processing
index 550c394..a875cd8 100644 (file)
@@ -64,8 +64,7 @@ class Registry(APIView):
             backlog_item = {
                 "id": vimid,
                 "worker": self.register_helper.registryV0,
-                "payload": (self.register_helper,
-                            vimid, specified_project_idorname),
+                "payload": (vimid, specified_project_idorname),
                 "repeat": 0,
                 "status": (1,
                            "The registration process waits to"
@@ -120,7 +119,7 @@ class Registry(APIView):
             backlog_item = {
                 "id": vimid,
                 "worker": self.register_helper.unregistryV0,
-                "payload": (self.register_helper, vimid),
+                "payload": (vimid),
                 "repeat": 0,
                 "status": (1, "The registration process waits"
                               " to be scheduled to run")
index 7bf0795..db216ce 100644 (file)
@@ -36,7 +36,7 @@ class InfraWorkload(APIView):
     def __init__(self):
         self._logger = logger
 
-    def post(self, request, vimid=""):
+    def post(self, request, vimid="", requri=""):
         self._logger.info("vimid: %s" % (vimid))
         self._logger.info("data: %s" % (request.data))
         self._logger.debug("META: %s" % request.META)
@@ -529,13 +529,13 @@ class APIv1InfraWorkload(InfraWorkload):
         super(APIv1InfraWorkload, self).__init__()
         # self._logger = logger
 
-    def post(self, request, cloud_owner="", cloud_region_id=""):
+    def post(self, request, cloud_owner="", cloud_region_id="", requri=""):
         # self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" %
         #  (cloud_owner, cloud_region_id, request.data))
         # self._logger.debug("META: %s" % request.META)
 
         vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
-        return super(APIv1InfraWorkload, self).post(request, vimid)
+        return super(APIv1InfraWorkload, self).post(request, vimid, requri)
 
     def get(self, request, cloud_owner="", cloud_region_id="", requri=""):
         # self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" %
index bfc3fc8..ee8291b 100644 (file)
@@ -420,9 +420,9 @@ class InfraWorkloadHelper(object):
 
             # get stack status
             service_type = "orchestration"
-            resource_uri = "/stacks?id=%s" % stack_id if stack_id else "/stacks"
+            resource_uri = "/stacks/id=%s" % stack_id if stack_id else "/stacks"
             if stack_name:
-                resource_uri = "/stacks?name=%s" % stack_id if not stack_id else resource_uri
+                resource_uri = "/stacks?name=%s" % stack_name if not stack_id else resource_uri
 
             self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
             retcode, content, os_status = \
@@ -443,3 +443,55 @@ class InfraWorkloadHelper(object):
         except Exception as e:
             self._logger.error(e.message)
             return 12, "GET_FAILED", e.message
+
+
+    def workload_detail(self, vimid, stack_id, nexturi=None, otherinfo=None, project_idorname=None):
+        '''
+        get workload status by either stack id or name
+        :param vimid:
+        :param stack_id:
+        :param nexturi: stacks/<stack id>/<nexturi>
+        :param otherinfo:
+        :return:
+        '''
+        try:
+            # assume the workload_type is heat
+            cloud_owner, regionid = extsys.decode_vim_id(vimid)
+            # should go via multicloud proxy so that the selflink is updated by multicloud
+            retcode, v2_token_resp_json, os_status = \
+                helper.MultiCloudIdentityHelper(
+                    settings.MULTICLOUD_API_V1_PREFIX,
+                    cloud_owner, regionid, "/v2.0/tokens",
+                {"Project": project_idorname})
+
+            if retcode > 0 or not v2_token_resp_json:
+                errmsg = "authenticate fails:%s, %s, %s" % \
+                         (cloud_owner, regionid, v2_token_resp_json)
+                logger.error(errmsg)
+                return retcode, "GET_FAILED", errmsg
+
+            # get stack status
+            service_type = "orchestration"
+            resource_uri = "/stacks/%s" % stack_id
+            if nexturi:
+                resource_uri += "/" + nexturi
+
+            self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
+            retcode, content, os_status = \
+                helper.MultiCloudServiceHelper(cloud_owner, regionid,
+                                               v2_token_resp_json,
+                                               service_type, resource_uri,
+                                               None, "GET")
+
+            if retcode > 0 or not content:
+                errmsg = "Stack query %s response: %s" % (resource_uri, content)
+                self._logger.debug(errmsg)
+                return retcode, "GET_FAILED", errmsg
+
+            stack = content.get('stack', {})  # if retcode == 0 and content else []
+            stack_status = stack.get("stack_status", "GET_FAILED")
+
+            return retcode, stack_status, content
+        except Exception as e:
+            self._logger.error(e.message)
+            return 12, "GET_FAILED", e.message
index 507f0fb..cf0281f 100644 (file)
@@ -58,7 +58,7 @@ class APIv0Registry(newton_registration.Registry):
         backlog_item = {
             "id": vimid,
             "worker": worker_self.azcap_audit,
-            "payload": (worker_self, vimid, specified_project_idorname),
+            "payload": (vimid, specified_project_idorname),
             "repeat": 10*1000000,  # repeat every 10 seconds
         }
         gAZCapAuditThread.add(backlog_item)
@@ -88,6 +88,9 @@ class APIv1Registry(newton_registration.Registry):
                           % (cloud_owner, cloud_region_id))
 
         try:
+            # Get the specified tenant id
+            specified_project_idorname = request.META.get("Project", None)
+
             vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
 
             # vim registration will trigger the start the audit of AZ capacity
@@ -98,7 +101,7 @@ class APIv1Registry(newton_registration.Registry):
             backlog_item = {
                 "id": vimid,
                 "worker": worker_self.azcap_audit,
-                "payload": (worker_self, vimid),
+                "payload": (vimid, specified_project_idorname),
                 "repeat": 5 * 1000000,  # repeat every 5 seconds
             }
             gAZCapAuditThread.add(backlog_item)
index 6b06485..409d74e 100644 (file)
@@ -85,7 +85,7 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
                 backlog_item = {
                     "id": workloadid,
                     "worker": worker_self.workload_update,
-                    "payload": (worker_self, vimid, workloadid,
+                    "payload": (vimid, workloadid,
                                 request.data, specified_project_idorname),
                     "repeat": 0,  # one time job
                     # format of status: retcode:0 is ok, otherwise error code from http status, Status ENUM, Message
@@ -97,7 +97,9 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
                 gInfraWorkloadThread.add(backlog_item)
                 if 0 == gInfraWorkloadThread.state():
                     gInfraWorkloadThread.start()
-
+                # progress = worker_self.workload_update(
+                #     vimid, workloadid,
+                #     request.data, specified_project_idorname)
                 # now query the progress
                 backlog_item = gInfraWorkloadThread.get(workloadid)
                 if not backlog_item:
@@ -206,7 +208,7 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
                     settings.AAI_BASE_URL
                 )
                 progress_code, progress_status, progress_msg =\
-                    worker_self.workload_status(
+                    worker_self.workload_detail(
                         vimid, stack_id=workloadid,
                         project_idorname=specified_project_idorname)
 
@@ -277,7 +279,7 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
             backlog_item = {
                 "id": workloadid,
                 "worker": worker_self.workload_delete,
-                "payload": (worker_self, vimid, workloadid, request.data,
+                "payload": (vimid, workloadid, request.data,
                             specified_project_idorname),
                 "repeat": 0,  # one time job
                 # format of status: retcode:0 is ok, otherwise error code from http status, Status ENUM, Message
@@ -335,29 +337,29 @@ class APIv1InfraWorkload(InfraWorkload):
         super(APIv1InfraWorkload, self).__init__()
         # self._logger = logger
 
-    def post(self, request, cloud_owner="", cloud_region_id=""):
+    def post(self, request, cloud_owner="", cloud_region_id="", workloadid=""):
         # self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" %
         #  (cloud_owner, cloud_region_id, request.data))
         # self._logger.debug("META: %s" % request.META)
 
         vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
-        return super(APIv1InfraWorkload, self).post(request, vimid)
+        return super(APIv1InfraWorkload, self).post(request, vimid, workloadid)
 
-    def get(self, request, cloud_owner="", cloud_region_id="", requri=""):
+    def get(self, request, cloud_owner="", cloud_region_id="", workloadid=""):
         # self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" %
         #  (cloud_owner, cloud_region_id, request.data))
         # self._logger.debug("META: %s" % request.META)
 
         vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
-        return super(APIv1InfraWorkload, self).get(request, vimid, requri)
+        return super(APIv1InfraWorkload, self).get(request, vimid, workloadid)
 
-    def delete(self, request, cloud_owner="", cloud_region_id="", requri=""):
+    def delete(self, request, cloud_owner="", cloud_region_id="", workloadid=""):
         # self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" %
         #  (cloud_owner, cloud_region_id, request.data))
         # self._logger.debug("META: %s" % request.META)
 
         vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
-        return super(APIv1InfraWorkload, self).delete(request, vimid, requri)
+        return super(APIv1InfraWorkload, self).delete(request, vimid, workloadid)
 
 
 class InfraWorkloadHelper(infra_workload_helper.InfraWorkloadHelper):
@@ -503,9 +505,8 @@ class InfraWorkloadHelper(infra_workload_helper.InfraWorkloadHelper):
         # reset to make sure "files" are empty
         template_data["files"] = {}
 
-        template_data["stack_name"] = vf_module_id \
-            if not hasattr(template_data, "stack_name")\
-            else template_data["stack_name"]
+        template_data["stack_name"] =\
+            template_data.get("stack_name", vf_module_id)
 
         # authenticate
         cloud_owner, regionid = extsys.decode_vim_id(vimid)
index a2dc4a7..7b69d16 100755 (executable)
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid
+memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid
 export PYTHONPATH=lib/share
 uwsgi --http :9009 --module starlingx.wsgi --master --processes 4
 
index 5a76b33..ca9ef24 100644 (file)
@@ -38,7 +38,7 @@ urlpatterns = [
         r'infra_workload/?$',
         infra_workload.InfraWorkload.as_view()),
     url(r'^api/multicloud-starlingx/v0/(?P<vimid>[0-9a-zA-Z_-]+)/'
-        r'infra_workload/(?P<workloadid>[0-9a-zA-Z_-]*)/?$',
+        r'infra_workload/(?P<workloadid>[0-9a-zA-Z_-]+)/?$',
         infra_workload.InfraWorkload.as_view()),
     url(r'^api/multicloud-starlingx/v0/(?P<vimid>[0-9a-zA-Z_-]+)/',
         include('starlingx.proxy.urls')),
@@ -67,7 +67,7 @@ urlpatterns = [
         infra_workload.APIv1InfraWorkload.as_view()),
     url(r'^api/multicloud-starlingx/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
         r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/infra_workload/'
-        r'(?P<workloadid>[0-9a-zA-Z_-]*)/?$',
+        r'(?P<workloadid>[0-9a-zA-Z_-]+)/?$',
         infra_workload.APIv1InfraWorkload.as_view()),
     url(r'^api/multicloud-starlingx/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
         r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/',
index 87d67eb..0b734f3 100755 (executable)
@@ -15,4 +15,4 @@
 
 #ps auxww | grep 'manage.py runserver 0.0.0.0:9009' | awk '{print $2}' | xargs kill -9
 ps auxww |grep 'uwsgi --http :9009 --module starlingx.wsgi --master' |awk '{print $2}' |xargs kill -9
-ps auxww | grep 'memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9
+ps auxww | grep 'memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9
index 74aff7f..650c2c1 100644 (file)
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid
+memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid
 export PYTHONPATH=lib/share
 
 #nohup python manage.py runserver 0.0.0.0:9005 2>&1 &
index ab8a72d..5e7a6ac 100644 (file)
@@ -15,4 +15,4 @@
 
 #ps auxww | grep 'manage.py runserver 0.0.0.0:9005' | awk '{print $2}' | xargs kill -9
 ps auxww |grep 'uwsgi --http :9005 --module titanium_cloud.wsgi --master' |awk '{print $2}' |xargs kill -9
-ps auxww | grep 'memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9
+ps auxww | grep 'memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9
index 4d118dc..7a9f645 100644 (file)
@@ -83,7 +83,7 @@ urlpatterns = [
         r'infra_workload/?$',
         infra_workload.InfraWorkload.as_view()),
     url(r'^api/multicloud-titaniumcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/'
-        r'infra_workload/(?P<workloadid>[0-9a-zA-Z_-]*)/?$',
+        r'infra_workload/(?P<workloadid>[0-9a-zA-Z_-]+)/?$',
         infra_workload.InfraWorkload.as_view()),
     url(r'^api/multicloud-titaniumcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/',
         include('titanium_cloud.proxy.urls')),
@@ -122,7 +122,7 @@ urlpatterns = [
         infra_workload.APIv1InfraWorkload.as_view()),
     url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
         r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/infra_workload/'
-        r'(?P<workloadid>[0-9a-zA-Z_-]*)/?$',
+        r'(?P<workloadid>[0-9a-zA-Z_-]+)/?$',
         infra_workload.APIv1InfraWorkload.as_view()),
     url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
         r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/',