1 # ============LICENSE_START=======================================================
3 # ================================================================================
4 # Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
5 # ================================================================================
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
10 # http://www.apache.org/licenses/LICENSE-2.0
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17 # ============LICENSE_END=========================================================
19 # ECOMP is a trademark and service mark of AT&T Intellectual Property.
21 # -*- coding: utf-8 -*-
23 Tests data_format CLI commands
29 from click.testing import CliRunner
31 from dcae_cli.cli import cli
32 from dcae_cli.catalog import MockCatalog
35 TEST_DIR = os.path.dirname(__file__)
39 with open(path) as file:
40 return json.load(file)
45 obj = {'catalog': MockCatalog(purge_existing=True, db_name='dcae_cli.test.db', enforce_image=False),
46 'config': {'user': 'test-user'}}
49 spec_file = os.path.join(TEST_DIR, 'mocked_components', 'model', 'int-class.format.json')
50 cmd = "data_format add {:}".format(spec_file).split()
52 # succeed the first time
53 result = runner.invoke(cli, cmd, obj=obj)
55 assert result.exit_code == 0
57 # adding a duplicate is an error
58 result = runner.invoke(cli, cmd, obj=obj)
59 assert result.exit_code == 1
60 assert 'exists' in result.output.lower()
63 cmd = "data_format add --update {:}".format(spec_file).split()
64 result = runner.invoke(cli, cmd, obj=obj)
65 assert result.exit_code == 0
68 # light test of list format command
69 cmd = 'data_format list'.split()
70 df_spec = _get_spec(spec_file)
71 df_name = df_spec['self']['name']
72 assert df_name in runner.invoke(cli, cmd, obj=obj).output
75 # light test of component info
76 cmd = "data_format show {:}".format(df_name).split()
77 spec_str = runner.invoke(cli, cmd, obj=obj).output
78 assert df_spec == json.loads(spec_str)
81 bad_dir = os.path.join(TEST_DIR, 'mocked_components', 'model', 'baddir')
82 cmd = "data_format generate --keywords \"name:1.0.2\" {:}".format(bad_dir).split()
83 err_str = runner.invoke(cli, cmd, obj=obj).output
84 assert "does not exist" in err_str
86 empty_dir = os.path.join(TEST_DIR, 'mocked_components', 'model', 'emptydir')
91 cmd = "data_format generate --keywords \"name:1.0.2\" {:}".format(empty_dir).split()
92 err_str = runner.invoke(cli, cmd, obj=obj).output
93 assert "No JSON files found" in err_str
95 bad_json = os.path.join(TEST_DIR, 'mocked_components', 'model', 'badjson')
96 cmd = "data_format generate --keywords \"name:1.0.2\" {:}".format(bad_json).split()
97 err_str = runner.invoke(cli, cmd, obj=obj).output
98 assert "Bad JSON file" in err_str
100 generate_dir = os.path.join(TEST_DIR, 'mocked_components', 'model', 'generatedir')
101 cmd = "data_format generate --keywords name:1.0.2 {:} ".format(generate_dir).split()
102 actual = json.loads(runner.invoke(cli, cmd, obj=obj).output)
103 expected = json.loads('{\n "dataformatversion": "1.0.0", \n "jsonschema": {\n "$schema": "http://json-schema.org/draft-04/schema#", \n "description": "", \n "properties": {\n "foobar": {\n "description": "", \n "maxLength": 0, \n "minLength": 0, \n "pattern": "", \n "type": "string"\n }, \n "foobar2": {\n "description": "", \n "maxLength": 0, \n "minLength": 0, \n "pattern": "", \n "type": "string"\n }\n }, \n "type": "object"\n }, \n "self": {\n "description": "", \n "name": "name", \n "version": "1.0.2"\n }\n}\n')
104 assert actual == expected
106 generate_dir = os.path.join(TEST_DIR, 'mocked_components', 'model', 'generatedir')
107 cmd = "data_format generate name:1.0.2 {:} ".format(generate_dir).split()
108 actual = json.loads(runner.invoke(cli, cmd, obj=obj).output)
109 expected = json.loads('{\n "dataformatversion": "1.0.0", \n "jsonschema": {\n "$schema": "http://json-schema.org/draft-04/schema#", \n "description": "", \n "properties": {\n "foobar": {\n "description": "", \n "type": "string"\n }, \n "foobar2": {\n "description": "", \n "type": "string"\n }\n }, \n "type": "object"\n }, \n "self": {\n "description": "", \n "name": "name", \n "version": "1.0.2"\n }\n}\n'
111 assert actual == expected
113 generate_dir = os.path.join(TEST_DIR, 'mocked_components', 'model', 'generatedir', 'ex1.json')
114 cmd = "data_format generate name:1.0.2 {:} ".format(generate_dir).split()
115 actual = json.loads(runner.invoke(cli, cmd, obj=obj).output)
116 expected = json.loads('{\n "dataformatversion": "1.0.0", \n "jsonschema": {\n "$schema": "http://json-schema.org/draft-04/schema#", \n "additionalproperties": true, \n "description": "", \n "properties": {\n "foobar": {\n "description": "", \n "type": "string"\n }\n }, \n "required": [\n "foobar"\n ], \n "type": "object"\n }, \n "self": {\n "description": "", \n "name": "name", \n "version": "1.0.2"\n }\n}\n')
117 assert actual == expected
120 if __name__ == '__main__':
122 pytest.main([__file__, ])