diff --git "a/train (1).csv" "b/train (1).csv" new file mode 100644--- /dev/null +++ "b/train (1).csv" @@ -0,0 +1,17825 @@ +Endpoint,Description,Inputs,Output,Test_Code +/audit/rest/delete/{UUID}/,requesting to delete audit without authorization,"{ +audit_uuid = ""valid_audit_uuid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" +}","def test_audit_delete_without_authorization(anonymous_exec_api): + """""" + deleting the audits without authorization + """""" + r = anonymous_exec_api.audit_delete(""valid-audit-uuid"", params={}) + result = r.json() + test_assert.status(r, 401) + assert result['detail'] == ""Authentication credentials were not provided."" +" +/audit/rest/delete/{UUID}/,requesting to delete audit using valid existing UUID.Check the user type before performing the operation.,,"{ +""status"" : 204, +""response"" : success +}","def test_audit_delete(run_api, audit_delete): + """""" + deleting the audits + """""" + r = audit_delete + if run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 204) + + elif run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) +" +/audit/rest/delete/{UUID}/,"requesting to delete audit using invalid UUID. Check the user type before performing the operation. +","{ +audit_uuid = ""invalid_audit_uuid"" +}","{ +""status"" : 404/403, +""response"" : fail +}","def test_audit_delete_invalid_uuid(run_api): + """""" + deleting the audits with invalid uuid + """""" + r = run_api.audit_delete(""invalid-audit-uuid"", params={}) + if run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 404) + elif run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) +" +/audit/rest/delete/{UUID}/,requesting to delete audit using invalid tokens,"{ +audit_uuid = ""valid_audit_uuid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_audit_delete_with_invalid_token(invalid_exec_api): + """""" + deleting the audits with invalid token + """""" + r = invalid_exec_api.audit_delete(""valid-audit-uuid"", params={}) + result = r.json() + test_assert.status(r, 401) + assert result['detail'] == ""Invalid token."" +" +/audit/rest/detail/{UUID}/,requesting to fetch audit details without authorization,"{ +audit_uuid = ""valid_audit_uuid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_audit_details_without_authorization(anonymous_exec_api): + """""" + Audit details without authorization + """""" + r = anonymous_exec_api.audit_details(""valid-audit-uuid"", params={}) + result = r.json() + test_assert.status(r, 401) + assert result['detail'] == ""Authentication credentials were not provided."" +" +/audit/rest/detail/{UUID}/,requesting to fetch audit details using invalid token,"{ +audit_uuid = ""valid_audit_uuid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_audit_details_with_invalid_token(invalid_exec_api): + """""" + Audit details with invalid token + """""" + r = invalid_exec_api.audit_details(""valid-audit-uuid"", params={}) + result = r.json() + test_assert.status(r, 401) + assert result['detail'] == ""Invalid token."" +" +/audit/rest/detail/{UUID}/,"requesting to fetch audit details using invalid id.Check the user type before performing the operation. +","{ +audit_uuid = ""invalid_audit_uuid"" +}","{ +""status"" : 400 / 404 / 403, +""response"" : Details +}","def test_audit_details_invalid_uuid(run_api): + """""" + Audit details of invalid audit + """""" + r = run_api.audit_details(""invalid-audit-uuid"", params={}) + if run_api.user_type == USER_TYPE[""admin""]: + status_code = r.status_code + assert status_code in [400, 404] + elif run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) +" +/audit/rest/detail/{UUID}/,"requesting to fetch audit details for existing audit. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 200 / 403, +""response"" : Details +}","def test_audit_details(skip_if_manager, run_api, audit_details): + """""" + Audit details + """""" + r = audit_details + if run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 200) + + elif run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) +" +/audit/rest/detail/{UUID}/,requesting to fetch audit details for existing audit by manager,,,"PARAMETERS = [{""dest_obj"": OBJ_LIB, ""get_object"": True}] + +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_audit_details_manager(skip_if_not_manager, run_api, admin_exec_api, custom_lib_admin_operations, custom_lib_non_admin_operations, audit_details): + """""" + Audit list + """""" + params, r = custom_lib_admin_operations + rjson = r.json() + vm_name = rjson['name'] + r = admin_exec_api.audit_list({""obj_name"": vm_name}) + uuid = r.json()[""results""][0]['uuid'] + result = run_api.audit_details(uuid, params={}) + test_assert.status(result, manager_rights_response(endpoint, manages_user=False)) + + params, r = custom_lib_non_admin_operations + rjson = r.json() + vm_name = rjson['name'] + r = admin_exec_api.audit_list({""obj_name"": vm_name}) + uuid = r.json()[""results""][0]['uuid'] + result = run_api.audit_details(uuid, params={}) + test_assert.status(result, manager_rights_response(endpoint, manages_user=True))" +/audit/rest/list/,requesting to list audits. Check the user type before performing the operation.,,"{ +""status"" : 200 / 403, +""response"" : success +}","def test_audit_list(run_api, audit_list): + """""" + Audit list + """""" + template, r = audit_list + if run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, template, ""audit_list"", ""obj_name"") + test_assert.status(r, 200) + elif run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""manager""]: + test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) +" +/audit/rest/list/,requesting to list audits without authorization,,"{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" +}","def test_audit_list_without_authorization(anonymous_exec_api): + """""" + Audit list without authorization + """""" + r = anonymous_exec_api.audit_list() + result = r.json() + test_assert.status(r, 401) + assert result['detail'] == ""Authentication credentials were not provided."" +" +/audit/rest/list/,requesting to list audits with invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_audit_list_with_invalid_token(invalid_exec_api): + """""" + Audit list with invalid token + """""" + r = invalid_exec_api.audit_list() + result = r.json() + test_assert.status(r, 401) + assert result['detail'] == ""Invalid token."" +" +/audit/rest/list/,"requesting filtered list of audits. Check the user type before performing the operation. +","{ +servers, +user_filter, +obj_name_filter, +event_filter , +status_filter , +search_filter , +}","{ +""status"" : 403 / 200 +}","@pytest.mark.skip(reason=""cannot validate the audits"") +def test_audit_list_filter(run_api, server_list): + """""" + Audit list by filtering + """""" + servers = [server[""hostname""] for server in server_list.json()[""results""]] + user_filter = {""user"": choice([""colama"", ""manager"", ""vivekt""]), ""page_size"": 10} + obj_name_filter = {""obj_name"": choice(servers), ""page_size"": 10} + event_filter = {""event"": choice([""Build ISO List (TASK)"", ""Sync Share Paths (TASK)"", + ""Island Deploy (API)"", ""Delete Repo Store Files (TASK)""]), ""page_size"": 10} + status_filter = {""status"": choice([""Failure"", ""Success""]), ""page_size"": 10} + search_filter = {""search"": choice([""Refresh"", ""BuildISOList"", ""DeleteRepoStoreFiles"", + ""vivekt"", ""colama"", ""manager"", ""main"", ""mh"", ""mh-2""]), ""page_size"": 10} + filters = [user_filter, obj_name_filter, event_filter, status_filter, search_filter] + for filter in range(len(filters)): + r = run_api.audit_list(params=filters[filter]) + if run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 200) + elif run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""manager""]: + test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) +" +/config/rest/delete/,"requesting to delete the config values with valid data.Check the user type before performing the operation +",,"{ +""status"" : 403 +}","def test_config_delete(skip_if_admin, run_api, config_delete): + """""" + deleting the config values + """""" + params, r = config_delete + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) +" +/config/rest/delete/,"requesting to delete the config values when provided valid config name which can not be deleted(name='primary_server'). Check the user type before performing the operation. +","config_value = { ""name"": ""primary_server"" }","{ + ""status"": 401 / 403 +} +"," +def test_config_delete_non_deletable(run_api): + """""" + deleting the non deletable config values + """""" + config_value = { + ""name"": ""primary_server"", + } + r = run_api.config_delete(config_value) + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE['admin']: + test_assert.status(r, 400) +" +/config/rest/delete/,requesting to delete the config values when provided valid config name but without authorization,"config_value = { ""name"": ""primary_server"" }","{ + ""status"": 401, + ""message"": ""Authentication credentials were not provided"" +} +","def test_config_delete_without_authorization(anonymous_exec_api): + """""" + deleting the non deletable config values without authorization + """""" + config_value = { + ""name"": ""primary_server"", + } + r = anonymous_exec_api.config_delete(config_value) + test_assert.status(r, 401) + result = r.json() + assert result['detail'] == ""Authentication credentials were not provided."" +" +/config/rest/delete/,"deleting config using invalid name value. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","config_value = { + ""name"": ""123"", + ""value"": ""password"" + }","{ + ""status"": 400, + ""message"":""Config matching query does not exist."" +} +","def test_config_delete_invalid_name(run_api): + """""" + config delete invalid name + """""" + config_value = { + ""name"": ""123"", + ""value"": ""password"" + } + r = run_api.config_delete(config_value) + rjson = r.json() + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + elif run_api.user_type == USER_TYPE['admin']: + test_assert.status(r, 400) + assert rjson['error'] == ""Config matching query does not exist."", ""|> Json %s"" % rjson +" +/config/rest/disable_ssl,"disabling the ssl in config. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ + ""status"" : 400, + ""message"" : ""SSL is not enabled. So, can't disable it"" +}","def test_config_disable_ssl(skip_if_admin, run_api): + """""" + config disable ssl + """""" + r = run_api.config_disable_ssl() + rjson = r.json() + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + elif run_api.user_type == USER_TYPE['admin']: + test_assert.status(r, 400) + assert rjson['error'] == ""SSL is not enabled. So, can't disable it"", ""|> Json %s"" % rjson +" +/config/rest/enable_ssl/,"enabling the ssl in config. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ + ""status"" : 400, + ""message"" : ""Upload SSL certificates, before enabling SSL"" +}","def test_config_enable_ssl(skip_if_admin, run_api): + """""" + config enable ssl + """""" + r = run_api.config_enable_ssl() + rjson = r.json() + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + elif run_api.user_type == USER_TYPE['admin']: + test_assert.status(r, 400) + assert rjson['error'] == ""Upload SSL certificates, before enabling SSL"", ""|> Json %s"" % rjson +" +/config/rest/get,"fetching the list of config values. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ + ""status"": 200, + ""response"": success +}","def test_config_get(run_api, config_get): + """""" + fetching the list of config values + """""" + r = config_get + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 200) + elif run_api.user_type == USER_TYPE[""manager""]: + test_assert.status(r, manager_rights_response(endpoint)) +" +/config/rest/get,"fetching the list of config values without authorization. +",,"{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_config_get_without_authorization(anonymous_exec_api): + """""" + Fetching the values of variables stored in db without authorization + """""" + r = anonymous_exec_api.config_get() + test_assert.status(r, 401) + result = r.json() + assert result['detail'] == ""Authentication credentials were not provided."" + +" +/config/rest/get,"fetching the list of config values using invalid token +",,"{ + ""status"" : 401, + ""message"" : ""Invalid"" +}","def test_config_get_with_invalid_token(invalid_exec_api): + """""" + Fetching the values of variables stored in db with invalid token + """""" + r = invalid_exec_api.config_get() + test_assert.status(r, 401) + result = r.json() + assert result['detail'] == ""Invalid token."" +" +/config/rest/get_google_auth_client_id/,getting the Google authorization client_id successfully.,,"{ + ""status"" : 200, + ""response"" : client_id provided +}","def test_get_google_auth_client_id(run_api): + """""" + get google auth client_id + """""" + r = run_api.get_google_auth_client_id() + rjson = r.json() + test_assert.status(r, 200) + assert rjson['name'] == ""client_id"", ""|> Json %s"" % rjson +" +/config/rest/get/,"successfully fetching the list of config values.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ + ""status"": 200, + ""response"": Information of config list +} +"," +def test_config_get(run_api, config_get): + """""" + fetching the list of config values + """""" + r = config_get + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 200) + elif run_api.user_type == USER_TYPE[""manager""]: + test_assert.status(r, manager_rights_response(endpoint)) + +" +/config/rest/get/,"fetching the list of config values. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ + ""status"" : 200, + +}","@pytest.mark.parametrize(""name"", CONFIG_GET, indirect=True) +def test_config_get_name(run_api, config_get_name, name): + """""" + Fetching the values of variables stored in db + """""" + params, r = config_get_name + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 200) + elif run_api.user_type == USER_TYPE[""manager""]: + test_assert.status(r, manager_rights_response(endpoint)) +" +/config/rest/get/,"fetching the list of config values without authorization +","{ + ""name"" :""HELLO"" +}","{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_config_get_name_without_authorization(anonymous_exec_api): + """""" + Fetching the values of variables stored in db without authorization + """""" + r = anonymous_exec_api.config_get_name(name=""HELLO"") + test_assert.status(r, 401) + result = r.json() + assert result['detail'] == ""Authentication credentials were not provided."" + + +" +/config/rest/get/,fetching the list of config values without Authorization,,"{ + ""status"": 401, + ""message"": ""Authentication credentials were not provided"" +} +"," +def test_config_get_without_authorization(anonymous_exec_api): + """""" + Fetching the values of variables stored in db without authorization + """""" + r = anonymous_exec_api.config_get() + test_assert.status(r, 401) + result = r.json() + assert result['detail'] == ""Authentication credentials were not provided."" +" +/config/rest/get/,"fetching the list of config values using invalid token +","{ + ""name"" :""HELLO"" +}","{ + ""status"" : 401, + ""message"" : ""Invalid token"" +}","def test_config_get_name_with_invalid_token(invalid_exec_api): + """""" + Fetching the values of variables stored in db with invalid token + """""" + r = invalid_exec_api.config_get_name(name=""HELLO"") + test_assert.status(r, 401) + result = r.json() + assert result['detail'] == ""Invalid token."" +" +/config/rest/get/,"fetching the list of config values for invalid name. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""name"" :""HELLO"" +}","{ + ""status"" : 400, + +}","def test_config_get_invalid_name(run_api): + """""" + Fetching the values of invalid variable stored in db + """""" + r = run_api.config_get_name(name=""HELLO"") + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 400) + elif run_api.user_type == USER_TYPE[""manager""]: + test_assert.status(r, manager_rights_response(endpoint)) +" +/config/rest/ldap/,"fetching the LDAP details of config. Check the user type before performing the operation, only admin user type have the permission to perform such operations.",,"{ + ""status"": 400, + ""message"": ""No such file or directory: 'colama/ldap.json'"" +}","def test_config_ldap_get(run_api): + """""" + get config ldap + """""" + r = run_api.config_ldap_get() + rjson = r.json() + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + elif run_api.user_type == USER_TYPE['admin']: + test_assert.status(r, 400) + assert rjson['error'] == ""[Errno 2] No such file or directory: 'colama/ldap.json'"", ""|> Json %s"" % rjson +" +/config/rest/os-version/,successfully fetching the information of Os Version without Authorization,,"{ + ""status"": 200, + ""response"": Server version +} +","def test_config_osversion_without_authorization(anonymous_exec_api): + """""" + Fetching the information of Os Version without authorization + """""" + r = anonymous_exec_api.config_version() + test_assert.status(r, 200) +" +/config/rest/os-version/,"fetching the information of Os Version with invalid token +",,"{ + ""status"": 401, + ""message"": ""Invalid token "" +}","def test_config_osversion_with_invalid_token(invalid_exec_api): + """""" + Fetching the information of Os Version with invalid token + """""" + r = invalid_exec_api.config_version() + result = r.json() + test_assert.status(r, 401) + assert result['detail'] == ""Invalid token."" +" +/config/rest/set/,"setting the value of client_id config as ""None"".Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""name"": ""client_id"", + ""value"": None +}","{ +""status"" : 400, +""response"" : ""FAILURE"" +}","def test_config_set_None_client_id(run_api): + """""" + Set the client_id config value as None + """""" + config_value = { + ""name"": ""client_id"", + ""value"": None + } + r = run_api.config_set(config_value) + res = r.json() + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE['admin']: + test_assert.status(r, 400) + assert res[""result""] == 'FAILURE' + assert 'Invalid Client_id Value' in res[""error""], res + +" +/config/rest/set/,"setting the None value to secret config. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""name"": ""secret"", + ""value"": None +}","{ +""status"" : 400, +""response"" : 'Invalid secret_key Value' +}","def test_config_None_set_secret(run_api): + """""" + Set the secret-key config value as None + """""" + config_value = { + ""name"": ""secret"", + ""value"": None + } + r = run_api.config_set(config_value) + res = r.json() + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE['admin']: + test_assert.status(r, 400) + assert res[""result""] == 'FAILURE' + assert 'Invalid secret_key Value' in res[""error""], res + + +" +/config/rest/set/,"setting the invalid name to config. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""name"": ""client_id_invalid"" + }","{ +""status"" : 400, +""response"" : 'Not a supported config name' +}","def test_config_set_invalid_client_id(run_api): + """""" + Set the client_id config value, using invalid config key + """""" + config_value = { + ""name"": ""client_id_invalid"" + } + r = run_api.config_set(config_value) + res = r.json() + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE['admin']: + test_assert.status(r, 400) + assert res[""result""] == 'FAILURE' + assert 'not a supported config name' in res[""error""], res +" +/config/rest/set/,"setting the empty value to secret config. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""name"": ""secret"", + ""value"": """" +}","{ +""status"" : 200, +}","def test_config_set_secret_key(skip_if_admin, run_api): + """""" + Set the secret_key config value + """""" + config_value = { + ""name"": ""secret"", + ""value"": """" + } + r = run_api.config_set(config_value) + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE['admin']: + test_assert.status(r, 200) +" +/config/rest/set/,"setting the config without the name parameter. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""value"": ""password"" + }","{ + ""status"": 400, + ""message"":Please provide config name."" +} +","def test_config_set_without_name(run_api): + """""" + config set without name + """""" + config_value = { + ""value"": ""password"" + } + r = run_api.config_set(config_value) + rjson = r.json() + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + elif run_api.user_type == USER_TYPE['admin']: + test_assert.status(r, 400) + assert rjson['error'] == ""Please provide config name."", ""|> Json %s"" % rjson + +" +/config/rest/set/,setting the config values when provided with valid data but without Authorization,"config_value = {""name"": ""protected_mode"", + ""value"": ""test"" + } +","{ + ""status"": 401, + ""message"": ""Authentication credentials were not provided"" +} +","def test_config_set_without_authorization(anonymous_exec_api): + """""" + Set the config values without authorization + """""" + config_value = {""name"": ""protected_mode"", + ""value"": ""test"" + } + r = anonymous_exec_api.config_set(config_value) + test_assert.status(r, 401) + result = r.json() + assert result['detail'] == ""Authentication credentials were not provided."" +" +/config/rest/set/,setting the config values when provided with valid data but with invalid token,"config_value = {""name"": ""protected_mode"", + ""value"": ""test"" + } +","{ + ""status"": 401, + ""message"": ""Invalid Token"" +} +","def test_config_set_with_invalid_token(invalid_exec_api): + """""" + Set the config values without authorization + """""" + config_value = {""name"": ""protected_mode"", + ""value"": ""test"" + } + r = invalid_exec_api.config_set(config_value) + test_assert.status(r, 401) + result = r.json() + assert result['detail'] == ""Invalid token."" +" +/config/rest/set/,setting the config values when provided with invalid data.Check the user type before performing the operation.,"config_value = {""name"": ""protected_mode"", + ""value"": ""test"" + } +","{ + ""status"": 400 +}","PARAMETERS = [{ + ""name"": ""protected_mode"", + ""value"": ""test""}, { + ""name"": ""Hello"", + ""value"": ""test"" + }, ] + + +@pytest.mark.parametrize(""config_value"", PARAMETERS) +def test_config_set_invalid_data(run_api, config_value): + """""" + Set the config invalid values + """""" + r = run_api.config_set(config_value) + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE['admin']: + test_assert.status(r, 400) + +" +/config/rest/set/,"setting the config for non-editable keys. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""name"": ""ssl_cert"", + ""value"": ""password"" +}","{ + ""status"": 400, + ""message"": ""This can't be used to set ['ssl_cert', 'ssl_key']"" +} +","def test_config_set_non_editable_keys(run_api): + """""" + config set non editable keys + """""" + config_value = { + ""name"": ""ssl_cert"", + ""value"": ""password"" + } + r = run_api.config_set(config_value) + rjson = r.json() + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + elif run_api.user_type == USER_TYPE['admin']: + test_assert.status(r, 400) + assert rjson['error'] == ""This can't be used to set ['ssl_cert', 'ssl_key']"", ""|> Json %s"" % rjson + +" +/config/rest/set/,setting the config for client_id,"{ + ""name"": ""client_id"", + ""value"": """" + }","{ + ""status"" :200 +}","def test_config_set_client_id(skip_if_admin, run_api): + """""" + Set the client_id config value + """""" + config_value = { + ""name"": ""client_id"", + ""value"": """" + } + r = run_api.config_set(config_value) + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE['admin']: + test_assert.status(r, 200) + + +" +/config/rest/upload_ssl/,"uploading ssl for config without any file. Check the user type before performing the operation, only admin user type have the permission to perform such operations.",,"{ + ""status"": 400, + ""response"": ""No file was submitted"" +}","def test_config_upload_ssl_without_any_file(run_api): + """""" + config upload ssl without any file + """""" + r = run_api.config_upload_ssl() + rjson = r.json() + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + elif run_api.user_type == USER_TYPE['admin']: + test_assert.status(r, 400) + assert str(rjson['cert_file']) == ""['No file was submitted.']"", ""|> Json %s"" % rjson + assert str(rjson['key_file']) == ""['No file was submitted.']"", ""|> Json %s"" % rjson +" +/config/rest/version/,requesting to get Version and Build Number of the product without Authorization,,"{ +""status"" : 200 +}","def test_version_config_without_authorization(anonymous_exec_api): + """""" + Fetching the information of Version and Build Number without authorization + """""" + r = anonymous_exec_api.config_version() + test_assert.status(r, 200) +" +/config/rest/version/,requesting to get Version and Build Number of the product,,"{ + ""status"": 200, + ""response"": Version and build number +} +","def test_version_config(config_version): + """""" + Fetching the information of Version and Build Number + """""" + r = config_version + test_assert.status(r, 200) +" +/deploy/rest/add-tags/,successfully adding tags to deployment provided that the number of machines is equal to the number of tags.,"{ +""machine_list"": [machine_id], +""tags_list"": [[tage_name]] +}","{ + ""status"": 201, +}","def test_deploy_add_tags(run_api, deploy_image): + """""" + Add tags when no of machines is equal to number of tags + """""" + x, r = deploy_image + machine_id = r.json()[""uuid""] + tage_name = ""random_tag"" + params = {""machine_list"": [machine_id], ""tags_list"": [[tage_name]]} + response = run_api.deploy_add_tags(params=params) + test_assert.status(response, 201) + machine_details = run_api.deploy_details(machine_id).json() + all_tags = [tags['value'] for tags in machine_details['tags']] + assert tage_name in all_tags, ""|> Json %s"" % all_tags +" +/deploy/rest/add-tags/,adding tags to deployment provided that the number of machines is less than the number of tags.,"{ +""machine_list"": [machine_id], +""tags_list"": [[""test_tag1""], [""test_tag2""]] +}","{ + ""status"": 400, + ""message"": ""Not enough machines to add tags"" +}","def test_deploy_add_tag_with_less_machine(run_api, deploy_image): + """""" + Add tags when no of machines is less than number of tags provided + """""" + x, r = deploy_image + machine_id = r.json()[""uuid""] + params = {""machine_list"": [machine_id], ""tags_list"": [[""test_tag1""], [""test_tag2""]]} + response = run_api.deploy_add_tags(params=params) + test_assert.status(response, 400) + assert response.json()['error'] == 'Not enough machines to add tags to.' + +" +/deploy/rest/bulkops/,performing valid bulk operations on machines without Authorization,"{ + deploy_list = [""invalid""] +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_bulkops_without_authorization(anonymous_exec_api): + """""" + when requested with list of valid uuid without authorization + """""" + deploy_list = [""invalid""] + deploy = { + ""machine_list"": deploy_list, + ""op"": ""start"" + } + depl_bulkops = anonymous_exec_api.deploy_bulkops(deploy, wait=False) + depl_json = depl_bulkops.json() + test_assert.status(depl_bulkops, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" +" +/deploy/rest/bulkops/,performing valid bulk operations on machines when requested with list of valid machine UUIDs by manager who has rights over servers,,"{ +""status"" : 400 / 201 +}","PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_MANAGER_RIGHTS}] +PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_bulkops_delete_by_manager_with_server_rights(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + when requested with machines_list (all valid UUID) by manager who has rights over servers + """""" + # when manager have the rights on the server + deploy_id = custom_lib_non_admin_operations + params = { + ""machine_list"": [deploy_id], + ""op"": ""delete"" + } + ret = run_api.deploy_bulkops(params, wait=False) + test_assert.status(ret, 201) + + # when manager does not have the rights on the server + deploy_id = custom_lib_admin_operations + params = { + ""machine_list"": [deploy_id], + ""op"": ""delete"" + } + ret = run_api.deploy_bulkops(params, wait=False) + test_assert.status(ret, 400) +" +/deploy/rest/bulkops/,performing valid bulk operations on machines when requested with list of valid machine UUIDs by a manager who does not have rights over the servers,,"{ +""status"" : 400 +}","PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_bulkops_by_manager(skip_if_not_manager, custom_lib_admin_operations, run_api): + """""" + when manager does not manage the user nor the server + """""" + deploy_id = custom_lib_admin_operations + params = { + ""machine_list"": [deploy_id], + ""op"": ""start"" + } + ret = run_api.deploy_bulkops(params, wait=False) + test_assert.status(ret, 400) +" +/deploy/rest/bulkops/,performing valid bulk operations on machines when requested with list of valid machine UUIDs,"{ + deploy_list = [] +}","{ +""status"" : 400 / 201 +}","def test_deploy_bulkops(run_api, library_add_new_vm): + """""" + Bulk Operations on VM's with success and failed cases + """""" + deploy_list = [] + for i in range(2): + params, r = library_add_new_vm + lib_id = r[""uuid""] + r = run_api.deploy_image(lib_id) + x = r.json() + deploy_id = x[""uuid""] + wait_to_complete(run_api, task=x) + deploy_list.append(deploy_id) + + # [0, 1, 2, 4, 5] + # the above list consists of indexes of ""op"", operatons are for failed case and remaining are success + op = [""poweroff"", ""pause"", ""resume"", ""start"", ""delete"", ""start"", ""poweroff"", ""start"", ""pause"", ""resume"", ""poweroff"", ""delete""] + for i in range(len(op)): + deploy = { + ""machine_list"": deploy_list, + ""op"": op[i] + } + r = run_api.deploy_bulkops(deploy) + # failed operation indexes of op for failed case + if i < 3 or i == 4 or i == 5: + test_assert.status(r, 400) + # success operations + else: + test_assert.status(r, 201) +" +/deploy/rest/bulkops/,performing valid bulk operations on machines when requested with list of all invalid UUIDs,"{ + deploy_list = [""invalid""] +}","{ +""status"" : 400 +}","def test_deploy_bulkops_invalid_UUID(run_api): + """""" + when requested with list of invalid uuid + """""" + deploy_list = [""invalid""] + deploy = { + ""machine_list"": deploy_list, + ""op"": ""start"" + } + depl_bulkops = run_api.deploy_bulkops(deploy, wait=False) + depl_json = depl_bulkops.json() + test_assert.status(depl_bulkops, 400) + assert ""doesn't exist"" in depl_json[""failure""][0][""error""], depl_json +" +/deploy/rest/bulkops/,performing valid bulk operations on machines when requested with invalid token,"{ + deploy_list = [""invalid""] +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_bulkops_invalid_token(invalid_exec_api): + """""" + when requested with list of valid uuids with invalid token + """""" + deploy_list = [""invalid""] + deploy = { + ""machine_list"": deploy_list, + ""op"": ""start"" + } + depl_bulkops = invalid_exec_api.deploy_bulkops(deploy, wait=False) + depl_json = depl_bulkops.json() + test_assert.status(depl_bulkops, 401) + assert depl_json[""detail""] == ""Invalid token."" +" +/deploy/rest/bulkops/ ,performing invalid bulk operations on machines when requested with list of valid machine UUIDs but without Authorization,"{ + deploy_list = [""invalid""] +} +deploy = { + ""machine_list"": deploy_list, + ""op"": ""invalid"" + }","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_bulkops_without_authorization(anonymous_exec_api): + """""" + Invalid bulk operations without authorization + """""" + deploy_list = [""invalid""] + deploy = { + ""machine_list"": deploy_list, + ""op"": ""invalid"" + } + depl_bulkops = anonymous_exec_api.deploy_bulkops(deploy, wait=False) + depl_json = depl_bulkops.json() + test_assert.status(depl_bulkops, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" +" +/deploy/rest/bulkops/ ,performing invalid bulk operations on machines when requested with list of valid machine UUIDs but invalid token,"{ + deploy_list = [""invalid""] +} +deploy = { + ""machine_list"": deploy_list, + ""op"": ""invalid"" + }","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_bulkops_invalid_token(invalid_exec_api): + """""" + Invalid bulk operations using invalid token + """""" + deploy_list = [""invalid""] + deploy = { + ""machine_list"": deploy_list, + ""op"": ""invalid"" + } + depl_bulkops = invalid_exec_api.deploy_bulkops(deploy, wait=False) + depl_json = depl_bulkops.json() + test_assert.status(depl_bulkops, 401) + assert depl_json[""detail""] == ""Invalid token."" +" +/deploy/rest/bulkops/ ,performing invalid bulk operations on machines when requested with list of valid machine UUIDs,"{ + deploy_list = [] +} +deploy = { + ""machine_list"": deploy_list, + ""op"": ""invalid"" + }","{ +""status"" : 400 +}","def test_deploy_bulkops_invalid_ops(run_api, deploy_image): + """""" + Bulk Operations on VM's with success and failed cases + """""" + deploy_list = [] + params, r = deploy_image + x = r.json() + deploy_id = x[""UUID""] + deploy_list.append(deploy_id) + deploy = { + ""machine_list"": deploy_list, + ""op"": ""invalid"" + } + depl_bulkops = run_api.deploy_bulkops(deploy, wait=False) + depl_json = depl_bulkops.json() + test_assert.status(depl_bulkops, 400) + assert depl_json[""result""] == 'FAILURE', depl_json + assert 'Unsupported operation' in depl_json[""error""], depl_json +" +/deploy/rest/crash/{{UUID}}/,crashing a deployed image without Authorization,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_crash_without_authorization(anonymous_exec_api): + """""" + crashing a deployed image without authorization + """""" + deploy_id = ""invalid"" + depl_crash = anonymous_exec_api.deploy_crash(deploy_id, wait=False) + depl_json = depl_crash.json() + test_assert.status(depl_crash, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" +" +/deploy/rest/crash/{{UUID}}/,crashing a deployed image when requested with invalid token,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_crash_invalid_token(invalid_exec_api): + """""" + crashing a deployed image for invalid token + """""" + deploy_id = ""invalid"" + depl_crash = invalid_exec_api.deploy_crash(deploy_id, wait=False) + depl_json = depl_crash.json() + test_assert.status(depl_crash, 401) + assert depl_json[""detail""] == ""Invalid token."" +" +/deploy/rest/crash/{{UUID}}/,crashing a deployed image when Invalid UUID is provided,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 404, +""message"" : ""Machine matching query does not exist"" +}","def test_deploy_crash_invalid_UUID(run_api): + """""" + crashing a deployed image for invalid UUID + """""" + deploy_id = ""invalid"" + r = run_api.deploy_crash(deploy_id, wait=False) + test_assert.status(r, 404) + rjson = r.json() + assert ""Machine matching query does not exist"" in rjson[""error""], rjson" +/deploy/rest/crash/{{UUID}}/,crashing a deployed image for valid UUID by manager who has rights over servers,,,"endpoint = ""deploy_crash"" + +PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_crash_manager_server_right(skip_if_not_manager, run_api, custom_lib_admin_operations, custom_lib_non_admin_operations): + """""" + Crashing a Deployed Image by manager when have right on server + """""" + # When the user is not part of the group that the manager manages + deploy_id = custom_lib_admin_operations + r = run_api.deploy_crash(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_crash(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) +" +/deploy/rest/crash/{{UUID}}/,crashing a deployed image for valid UUID by manager who do not have rights over servers,,,"endpoint = ""deploy_crash"" + +PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_crash_manager_no_server_right(skip_if_not_manager, run_api, custom_lib_admin_operations, custom_lib_non_admin_operations): + """""" + Crashing a Deployed Image by manager when have no right on server + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_crash(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_crash(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) +" +/deploy/rest/crash/{{UUID}}/,crashing a deployed image for valid UUID by an admin user,,"{ +""status"" : 201 +}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_crash_admin(skip_if_not_admin, run_api, custom_lib_non_admin_operations): + """""" + Crashing a Deployed Image by Admin + """""" + # Admin check of Crashing a Deployed Image created by different user + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_crash(deploy_id) + test_assert.status(r, 201) +" +/deploy/rest/crash/{{UUID}}/,crashing a deployed image for valid UUID by a non-admin user,,"{ +""status"" : 403 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""]}] + +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_crash_non_admin(skip_if_not_non_admin, run_api, custom_lib_admin_operations): + """""" + Crashing a Deployed Image by non-admin + """""" + # Non-admin check of Crashing a Deployed Image created by different user + deploy_id = custom_lib_admin_operations + r = run_api.deploy_crash(deploy_id) + test_assert.status(r, 403) +" +/deploy/rest/crash/{{UUID}}/,crashing a deployed image for valid UUID,,"{ +""status"" : 201 +}","def test_deploy_crash(deploy_crash): + """""" + Crashing a Deployed Image + """""" + x, r = deploy_crash + test_assert.status(r, 201) +" +/deploy/rest/delete/{{UUID}}/,deleting the VM without Authorization,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_delete_without_authorization(anonymous_exec_api): + """""" + Deleting the VM without authorization + """""" + deploy_id = ""invalid"" + depl_delete = anonymous_exec_api.deploy_image_delete(deploy_id, {}, wait=False) + depl_json = depl_delete.json() + test_assert.status(depl_delete, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" +" +/deploy/rest/delete/{{UUID}}/,deleting the VM when requested with invalid token,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_delete_invalid_token(invalid_exec_api): + """""" + Deleting the VM using invalid token + """""" + deploy_id = ""invalid"" + depl_delete = invalid_exec_api.deploy_image_delete(deploy_id, {}, wait=False) + depl_json = depl_delete.json() + test_assert.status(depl_delete, 401) + assert depl_json[""detail""] == ""Invalid token."" +" +/deploy/rest/delete/{{UUID}}/,deleting the VM using valid data by non-admin user,,"{ +""status"" : 403 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL}] + +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_delete_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): + """""" + Deleting the VM by non-Admin + """""" + # Non-admin check for Deleting the Deployed VM created by different user. + deploy_id = custom_lib_admin_operations + r = run_api.deploy_image_delete(deploy_id, {}) + test_assert.status(r, 403) +" +/deploy/rest/delete/{{UUID}}/,deleting the VM using valid data by manager who has server rights,,,"endpoint = ""deploy_delete"" +PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""deploy_with"": SRV_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_delete_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Deleting the VM by Manager + """""" + # When the user is not part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_image_delete(deploy_id, {}) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_image_delete(deploy_id, {}) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) +" +/deploy/rest/delete/{{UUID}}/,deleting the VM using valid data by manager who does not have server rights,,,"endpoint = ""deploy_delete"" +PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_delete_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Deleting the VM by Manager + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_image_delete(deploy_id, {}) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_image_delete(deploy_id, {}) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) +" +/deploy/rest/delete/{{UUID}}/,deleting the VM using valid data by admin user,,"{ +""status"" : 201 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_delete_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Deleting the VM by Admin + """""" + # Admin check for Deleting the Deployed VM created by different user. + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_image_delete(deploy_id, {}) + test_assert.status(r, 201) +" +/deploy/rest/delete/{{UUID}}/,deleting the VM using valid data,,"{ +""status"" : 201 +}","def test_deploy_delete(deploy_delete): + """""" + Deleting the VM + """""" + x, r = deploy_delete + test_assert.status(r, 201) +" +/deploy/rest/delete/{{UUID}}/,deleting the VM using invalid UUID,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 404, +""message"" : ""Machine matching query does not exist"" +}","def test_deploy_delete_invalid_UUID(run_api): + """""" + Deleting the VM using invalid uuid + """""" + deploy_id = ""invalid"" + r = run_api.deploy_image_delete(deploy_id, {}) + test_assert.status(r, 404) + rjson = r.json() + assert ""Machine matching query does not exist"" in rjson[""error""], rjson +" +/deploy/rest/deploy/{{UUID}}/,provide server_list that is not under any group that the user is a part of,,"{ +""status"" : 400, +""message"" : 'You are not a part of the provided Group(s)' +}"," +def test_deploy_image_manager_server_list(skip_if_not_manager, run_api, library_add_new_vm): + """""" + deploy a VM image with provided server list, such that servers doesn't belong to any group, that user is a part of + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + server_list = list(run_api.clm_not_my_servers.keys()) + r = run_api.deploy_image(lib_id, server_list=server_list) + res = r.json() + test_assert.status(r, 400) + assert res[""result""] == 'FAILURE', res + assert ""Selected server(s) aren't under any group that you are a part of"" in res[""error""], res +" +/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with added filters,,,"library_count = 10 +prefix_name = ""filter_vmname_dep_list_"" + +@pytest.mark.parametrize(""lib_filter_kwargs"", [{""vm_names"": [f""{prefix_name}{rand_string()}"" for _ in range(library_count)]}], indirect=True) +def test_deploy_list_filter(run_api: apiops, lib_filter_kwargs): + """""" + Fetching the list of deployed images by adding filters + """""" + depl_res = [] + templates, res = lib_filter_kwargs + for r in res: + rjson = r.json() + depl_r = run_api.deploy_image(rjson[""UUID""]) + depl_res.append(depl_r) + try: + filter_on_input_result(run_api, library_count, templates, depl_res, prefix_name, run_api.deploy_list) + finally: + depl_UUIDs = [depl.json()[""UUID""] for depl in depl_res] + run_api.deploy_bulkops({""machine_list"": depl_UUIDs, ""op"": ""delete""}) + +" +/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with added created and update DateTime Filter,,"{ +""status"" : 400 +}","def test_deploy_filter_timefilter(run_api: apiops, library_add_new_vm): + """""" + Filter on created and update DateTime Filter + """""" + template, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id) + deploy_id = r.json()[""UUID""] + run_api.deploy_start(deploy_id) + r_details = run_api.deploy_details(deploy_id).json() + # utime and ctime is returned in the form '2023-09-14T17:59:39.173594Z' which needs conversion to '2023-09-14 17:59:39.173594' + str_utime = r_details['utime'].replace('T', ' ').replace('Z', '') + str_ctime = r_details['created_on'].replace('T', ' ').replace('Z', '') + datetime_utime = convert_datetime_stringform(r_details['utime']) + datetime_ctime = convert_datetime_stringform(r_details['created_on']) + + def handle_trigger_delay_filtering_for_created_on(created_start_date, created_end_date, created_date_range, utc=True): + """""" + Function to handle corner case if machine was created a day before and test get triggered on new day + """""" + if not utc: + created_start_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + created_end_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": created_start_date, ""created_end_date"": created_end_date, + ""created_date_range"": created_date_range, ""page_size"": 1}).json()['count'] == 1 + + assert datetime_ctime < datetime_utime, f""The details of the Deployment is {r_details}"" + # Filter on UTC time + # .... When the datetime is selected to be the same as in detail + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": str_ctime, + ""created_end_date"": str_ctime, ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.deploy_list({""UUID"": deploy_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.deploy_list({""UUID"": deploy_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date' and 'created_end_date' when passed blank string + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": """", ""created_end_date"": """", ""page_size"": 1}).json()['count'] == 1 + # ........Filter on 'created_start_date' and 'created_end_date' when created_start_date is greater than created_end_date + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on week's last day at 23:59:59.9999999 and test get triggered on new week at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on month's last day at 23:59:59.9999999 and test get triggered on new month at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on year last day at 23:59:59.9999999 and test get triggered on new year at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # .........When the created_date_range format is invalid + response = run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""invalid"", ""page_size"": 1}) + test_assert.status(response, 400) + assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() + # .........When the created_start_date and created_end_date has white spaces in them + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": "" "" + str_ctime + "" "", ""created_end_date"": "" "" + str_ctime + "" "", ""page_size"": 1}).json()['count'] == 1 + + # Filter on list time + # .... When the datetime is selected to be the same as in detail but having TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 but increase microsecond by 1 to fail filter + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.deploy_list({""UUID"": deploy_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.deploy_list({""UUID"": deploy_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 list and test get triggered at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 list and test get triggered at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on week's last day at 23:59:59.9999999 list and test get triggered on new week at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on month's last day at 23:59:59.9999999 list and test get triggered on new month at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on year's last day at 23:59:59.9999999 list and test get triggered on new year at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # .........When the created_date_range format is invalid + response = run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""invalid"", ""page_size"": 1}) + assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() + # .........When the created_start_date and created_end_date has white spaces in them + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", ""created_end_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", + ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + + def handle_trigger_delay_filtering_for_last_op(last_op_start_date, last_op_end_date, last_op_date_range, utc=True): + """""" + Function to handle corner case if machine has last operation a day before and test get triggered on new day + """""" + if not utc: + last_op_start_date = convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30' + last_op_end_date = convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30' + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": last_op_start_date, ""last_op_end_date"": last_op_end_date, + ""last_op_date_range"": last_op_date_range, ""page_size"": 1}).json()['count'] == 1 + # Filter on UTC time + # .... When the datetime is selected to be the same as in detail + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": str_utime, + ""last_op_end_date"": str_utime, ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'last_op_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'last_op_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'last_op_start_date' and 'last_op_end_date' when passed blank string + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": """", ""last_op_end_date"": """", ""page_size"": 1}).json()['count'] == 1 + # ........Filter on 'last_op_start_date' and 'last_op_end_date' when last_op_start_date is greater than last_op_end_date + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(microseconds=1)), + ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(microseconds=-1)), ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'last_op_start_date', 'last_op_end_date' and 'last_op_date_range'. + # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'today + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": str_utime, ""last_op_end_date"": str_utime, ""last_op_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine has done last operation yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"") + # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'yesterday + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": str_utime, ""last_op_end_date"": str_utime, ""last_op_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine has done last operation yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"") + # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'week + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": str_utime, ""last_op_end_date"": str_utime, ""last_op_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine has done last operation on week's last day at 23:59:59.9999999 and test get triggered on new week at 00:00:00.0000000 + handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"") + # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'month + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": str_utime, ""last_op_end_date"": str_utime, ""last_op_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine has done last operation on month's last day at 23:59:59.9999999 and test get triggered on new month at 00:00:00.0000000 + handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"") + # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'year' + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": str_utime, ""last_op_end_date"": str_utime, ""last_op_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine has done last operation on year's last day at 23:59:59.9999999 and test get triggered on new year at 00:00:00.0000000 + handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"") + # .........When the last_op_date_range format is invalid + response = run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": str_utime, ""last_op_end_date"": str_utime, ""last_op_date_range"": ""invalid"", ""page_size"": 1}) + test_assert.status(response, 400) + assert response.json()['last_op_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() + # .........When the last_op_start_date and last_op_end_date has white spaces in them + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": "" "" + str_utime + "" "", ""last_op_end_date"": "" "" + str_utime + "" "", ""page_size"": 1}).json()['count'] == 1 + + # Filter on list time + # .... When the datetime is selected to be the same as in detail but having TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', + ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'last_op_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 but increase microsecond by 1 to fail filter + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'last_op_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'last_op_start_date', 'last_op_end_date' and 'last_op_date_range'. + # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'today + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', + ""last_op_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine has done last operation yesterday at 23:59:59.9999999 list and test get triggered at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"", utc=False) + # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'yesterday + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', + ""last_op_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine has done last operation yesterday at 23:59:59.9999999 list and test get triggered at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"", utc=False) + # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'week + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', + ""last_op_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine has done last operation on week's last day at 23:59:59.9999999 list and test get triggered on new week at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"", utc=False) + # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'month + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', + ""last_op_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine has done last operation on month's last day at 23:59:59.9999999 list and test get triggered on new month at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"", utc=False) + # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'year' + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', + ""last_op_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine has done last operation on year's last day at 23:59:59.9999999 list and test get triggered on new year at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"", utc=False) + # .........When the last_op_date_range format is invalid + response = run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', + ""last_op_date_range"": ""invalid"", ""page_size"": 1}) + test_assert.status(response, 400) + assert response.json()['last_op_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() + # .........When the last_op_start_date and last_op_end_date has white spaces in them + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": "" "" + convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30' + "" "", ""last_op_end_date"": "" "" + convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30' + "" "", + ""last_op_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + + # ........Filter on 'created_start_date', 'created_end_date', 'last_op_start_date', 'last_op_end_date', 'created_date_range' and 'last_op_date_range' + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""last_op_start_date"": str_utime, + ""last_op_end_date"": str_utime, ""created_date_range"": ""today"", ""last_op_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + + run_api.deploy_stop(deploy_id) + run_api.deploy_image_delete(deploy_id) +" +/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'tags_list' param,"params = {""machine_list"": [machine_id], ""tags_list"": [[tag]]}",,"def test_deploy_fetch_with_tags(deploy_image, run_api): + """""" + Fetch list with tags + """""" + params, r = deploy_image + machine_id = r.json()[""UUID""] + tag = rand_string() + params = {""machine_list"": [machine_id], ""tags_list"": [[tag]]} + run_api.deploy_add_tags(params=params) + res = run_api.deploy_list(params={""tags"": tag}) + assert res.json()[""count""] == 1, ""The error is %s"" % res.json() +" +/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'state' param,"params = {""state"": state}",,"def test_deploy_list_with_machine_state_filter(run_api): + """""" + fetch list with deploy machine state filter + """""" + state = ""stopped"" + params = {""state"": state} + rjson = run_api.deploy_list(params).json() + for machines in rjson['results']: + assert machines['state'] == state, ""Json |> %s"" % machines +" +/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'scope' param set to invalid scope name,"params = {'scope': ""invalid"", 'UUID': machine_id}",,"def test_deploy_list_with_invaild_scope_name(run_api, deploy_image): + """""" + fetch list with invalid scope name + """""" + p, r = deploy_image + machine_id = r.json()['UUID'] + params = {'scope': ""invalid"", 'UUID': machine_id} + rjson = run_api.deploy_list(params).json() # 'my' is default scope gets applied on invalid scope + assert rjson['count'] == 1, ""The error is %s"" % rjson + +" +/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'scope' param set to 'all',"params = {'scope': ""all"", 'UUID': deploy_id}",,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_list_with_scope_all(skip_if_not_admin, run_api, custom_lib_non_admin_operations): + """""" + fetch list with scope all + """""" + deploy_id = custom_lib_non_admin_operations + params = {'scope': ""all"", 'UUID': deploy_id} + rjson = run_api.deploy_list(params).json() + assert rjson['count'] == 1, ""The error is %s"" % rjson +" +/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'mac' param,"params = {""mac"": mac}",,"def test_deploy_list_fetch_with_mac(run_api): + """""" + Fetch deploy list with 'mac' param + """""" + mac = ""5A:54:00:12:23:34"" + params = {""mac"": mac} + rjson = run_api.deploy_list(params).json() + for machines in rjson['results']: + all_macs = [network['mac'] for network in machines['machine']['hw']['networks']] + assert mac in all_macs, ""Json |> %s"" % machines + +" +/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'iso' param,"cdrom = [{ + ""type"": ""sata"", + ""iso"": rand_string(), + ""is_boot"": True, + ""boot_order"": 1 + } + ] +",,"def test_deploy_fetch_with_iso(run_api): + """""" + Fetch list with 'iso' + """""" + cdrom = [{ + ""type"": ""sata"", + ""iso"": rand_string(), + ""is_boot"": True, + ""boot_order"": 1 + } + ] + params, r = run_api.library_add_new_vm(cdrom=cdrom) + lib_id = r.json()[""UUID""] + response = run_api.deploy_image(lib_id) + machine_id = response.json()[""UUID""] + params = {""iso"": response.json()['machine']['hw']['cdrom'][-1]['iso']} + assert run_api.deploy_list(params).json()['count'] == 1 + run_api.deploy_image_delete(deploy_id=machine_id) + run_api.library_delete(lib_id) +" +/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'hvm_type' param,"params = {""hvm_type"": kvm}",,"def test_deploy_fetch_with_hvm_type(deploy_image, run_api): + """""" + Fetch list with 'hvm_type' + """""" + params, r = deploy_image + rjson = r.json() + kvm = rjson['machine']['hw']['hvm_type'] + params = {""hvm_type"": kvm} + rjson = run_api.deploy_list(params).json() + for machines in rjson['results']: + assert machines['machine']['hw']['hvm_type'] == kvm, ""Json |> %s"" % machines +" +/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'disks__UUID' param,"params = { +""disk_UUID"": valid_existing_disk +}",,"def test_deploy_list_fetch_with_disk_UUID(deploy_image, run_api): + """""" + Fetch deploy list with 'disks_UUID' param + """""" + template, r = deploy_image + rjson = r.json() + params = {""disk_UUID"": rjson['machine']['hw']['disks'][0]['UUID']} + assert run_api.deploy_list(params).json()['count'] == 1 +" +/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'arch' param,"params = {""arch"": arch}",,"def test_deploy_fetch_with_arch(deploy_image, run_api): + """""" + Fetch list with 'arch' + """""" + params, r = deploy_image + rjson = r.json() + mc_id = rjson['UUID'] + arch = rjson['machine']['hw']['arch'] + params = {""arch"": arch} + rjson = run_api.deploy_list(params).json() + all_UUID = [mc['UUID'] for mc in rjson['results']] + assert mc_id in all_UUID, ""|> Json %s"" % rjson + for machines in rjson['results']: + assert machines['machine']['hw']['arch'] == arch, ""Json |> %s"" % machines + +" +/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image,,"{ +""status"" : 200, +""response"" : machine details +}","@pytest.mark.skip() +def test_deploy_list(deploy_list): + """""" + Fetching the list of deployed images + """""" + r = deploy_list + test_assert.status(r, 200) +" +/deploy/rest/deploy/{{UUID}}/,deploying zero virtual machines by setting the 'count' param to zero,"{ +count : 0 +}","{ +""status"" : 400, +""message"" : ""Ensure this value is greater than or equal to 1"" +}","def test_deploy_image_zero_count(run_api, library_add_new_vm): + """""" + deploy a VM image with zero count value + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, count=0) + test_assert.status(r, 400) + res = r.json() + assert res[""result""] == 'FAILURE', res + assert 'Ensure this value is greater than or equal to 1' in res[""error""], res +" +/deploy/rest/deploy/{{UUID}}/,deploying VM with a 'count' parameter set to negative value,"{ +count : -2 +}","{ +""status"" : 400, +""message"" : ""Ensure this value is greater than or equal to 1"" +}"," +def test_deploy_image_negative_count(run_api, library_add_new_vm): + """""" + deploy a VM image with negative count value + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, count=-2) + test_assert.status(r, 400) + res = r.json() + assert res[""result""] == 'FAILURE', res + assert 'Ensure this value is greater than or equal to 1' in res[""error""], res +" +/deploy/rest/deploy/{{UUID}}/,deploying virtual machines with a count parameter set within system limits.,"{ +count : 2 +}","{ +""status"" : 200, +""response"" : success +}","def test_deploy_image_multiple_count(run_api, library_add_new_vm): + """""" + deploy a VM image with multiple count + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, count=2) + res = r.json() + if 'bulk_job_UUID' in res: + test_assert.status(r, 200) + for deployment in res[""deployments""]: + deploy_id = deployment[""UUID""] + r = run_api.deploy_image_delete(deploy_id, {}) + else: + test_assert.status(r, 400) + assert res[""result""] == 'FAILURE', res + assert 'Not enough RAM' in res[""error""], res + +" +/deploy/rest/deploy/{{UUID}}/,deploying multiple virtual machines with default synchronous behavior.,"{ +deploy_start=True +}","{ +""status"" : 200, +""response"" : success +}","def test_deploy_image_deploy_start_true_multiple_vm(run_api, library_add_new_vm): + """""" + deploy multiple VM's with deploy_start as True + """""" + params, r = library_add_new_vm + lib_id = r[""uuid""] + server_list = list(run_api.clm_my_servers.values()) + r = run_api.server_details(server_list[0]) + ram = r.json()[""total_ram""] + count = math.ceil(ram / 200) + 1 + r = run_api.deploy_image(lib_id, count=count, deploy_start=True, server_list=[server_list[0]]) + res = r.json() + test_assert.status(r, 400) + res[""result""] == 'FAILURE' + assert 'Not enough RAM' in res[""error""], res +" +/deploy/rest/deploy/{{UUID}}/,deploying an image without Authorization,"{ + deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_image_without_authorization(anonymous_exec_api): + deploy_id = ""invalid"" + depl_image = anonymous_exec_api.deploy_image(deploy_id, wait=False) + depl_json = depl_image.json() + test_assert.status(depl_image, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" +" +/deploy/rest/deploy/{{UUID}}/,deploying an image with valid data,,"{ +""status"" : 200, +""response"" : image deployed +}","def test_deploy_image_vm_self(deploy_image): + """""" + Deploy image + """""" + template, r = deploy_image + res = r.json() + test_assert.status(res, template, ""deploy_image"") + test_assert.status(r, 200) +" +/deploy/rest/deploy/{{UUID}}/,deploying an image with invalid token provided,"{ + deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_image_invalid_token(invalid_exec_api): + deploy_id = ""invalid"" + depl_image = invalid_exec_api.deploy_image(deploy_id, wait=False) + depl_json = depl_image.json() + test_assert.status(depl_image, 401) + assert depl_json[""detail""] == ""Invalid token."" +" +/deploy/rest/deploy/{{UUID}}/,deploying an image when invalid UUID is provided,"{ + UUID = ""zxyz"" + +}","{ +""status"" : 404, +""message"" : Machine does not exist +}","def test_deploy_image_invalid_UUID(run_api): + """""" + deploy with invalid UUID, The status code should be 404 + """""" + UUID = ""zxyz"" + ret = run_api.deploy_image(UUID, wait=False) + test_assert.status(ret, 404) + res = ret.json() + assert res[""result""] == 'FAILURE', res + assert 'does not exist' in res[""error""], res +" +/deploy/rest/deploy/{{UUID}}/,deploying an image by an admin user,,"{ +""status"" : 200, +""response"" : image deployed +}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_image_vm_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Deploying an Image by Admin + """""" + # Admin check of Starting a deployment created by different user + lib_id = custom_lib_non_admin_operations + r = run_api.deploy_image(lib_id) + test_assert.status(r, 200) + deploy_id = r.json()[""UUID""] + run_api.deploy_image_delete(deploy_id, params={}) +" +/deploy/rest/deploy/{{UUID}}/,deploying an image by a non-admin user,,"{ +""status"" : 403}","@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_image_vm_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): + """""" + Deploying an Image by Non-admin + """""" + # Non-admin check of Starting a deployment created by different user + lib_id = custom_lib_admin_operations + r = run_api.deploy_image(lib_id) + test_assert.status(r, 403) +" +/deploy/rest/deploy/{{UUID}}/,deploying an image by a manager,,,"endpoint = ""deploy_add"" + +PARAMETERS = [{""dest_obj"": OBJ_LIB}] +PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_LIB, ""deploy_with"": SRV_MANAGER_RIGHTS}] +PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_LIB, ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_image_vm_manager(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Deploying an Image by manager + """""" + # When the user is not part of the group that the manager manages + lib_id = custom_lib_admin_operations + r = run_api.deploy_image(lib_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + lib_id = custom_lib_non_admin_operations + r = run_api.deploy_image(lib_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) +" +/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with with null deployment strategy,"{ +deployment_strategy = None +}","{ +""status"" : 400, +""message"" : ""This field may not be null"" +}"," +def test_deploy_image_with_null_deployment_strategy(run_api, library_add_new_vm): + """""" + deploy a VM with null deployment strategy + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, deployment_strategy=None) + test_assert.status(r, 400) + res = r.json() + assert res[""result""] == 'FAILURE', res + assert 'This field may not be null' in res[""error""], res +" +/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with invalid deployment strategy,"{ +deployment_strategy=""invalid"" +}","{ + ""status"": 400, + ""message"": ""Not a valid choice"" +}","def test_deploy_image_invalid_deployment_strategy(run_api, library_add_new_vm): + """""" + deploy a VM image with invalid deployment strategy + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, deployment_strategy=""invalid"") + test_assert.status(r, 400) + res = r.json() + assert res[""result""] == 'FAILURE', res + assert 'not a valid choice' in res[""error""], res +" +/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with an empty vnc_password,"{ +vnc_password="""" +}","{ +""status"" : 400, +""message"" : ""This field may not be blank"" +}"," +def test_deploy_image_empty_vnc_password(run_api, library_add_new_vm): + """""" + deploy a VM image using empty vnc_password + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, vnc_password="""") + test_assert.status(r, 400) + res = r.json() + assert res[""result""] == 'FAILURE', res + assert 'This field may not be blank' in res[""error""], res +" +/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with a vnc_password that exceeds the 8 character limit,"{ +vnc_password=""+uCm7Z__YLP8kN(JwT{S]b*))Bvz:C[MRHbVkjlkhkl7GjL"" +}","{ +""status"" : 400, +""message"" : ""Ensure this field has no more than 8 characters"" +}","def test_deploy_image_too_long_vnc_password(run_api, library_add_new_vm): + """""" + deploy a VM image with vnc_password greater than 8 characters + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, vnc_password=""+uCm7Z__YLP8kN(JwT{S]b*))Bvz:C[MRHbVkjlkhkl7GjL"") + test_assert.status(r, 400) + res = r.json() + assert res[""result""] == 'FAILURE', res + assert 'Ensure this field has no more than 8 characters' in res[""error""], res +" +/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with a vnc_password that contains white spaces only,"{ +vnc_password="" "" +}","{ +""status"" : 400, +""message"" : ""This field may not be blank"" +}","def test_deploy_image_whitespace_vnc_password(run_api, library_add_new_vm): + """""" + deploy a VM image with valid vnc_password having only white spaces + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, vnc_password="" "") + test_assert.status(r, 400) + res = r.json() + assert res[""result""] == 'FAILURE', res + assert 'This field may not be blank' in res[""error""], res +" +/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with a vnc_password that contains special characters,"{ +vnc_password=""!@#$%"" +}","{ +""status"" : 200 +}","def test_deploy_image_special_char_vnc_password(run_api, library_add_new_vm): + """""" + deploy a VM image with vnc_password having special characters + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, vnc_password=""!@#$%"") + test_assert.status(r, 200) + deploy_id = r.json()[""UUID""] + r = run_api.deploy_image_delete(deploy_id, {}) +" +/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with a vnc_password set to null,"{ +vnc_password= None +}","{ +""status"" : 400, +""message"" : ""This field may not be null"" +}","def test_deploy_image_null_vnc_password(run_api, library_add_new_vm): + """""" + deploy a VM image using null vnc_password + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, vnc_password=None) + test_assert.status(r, 400) + res = r.json() + assert res[""result""] == 'FAILURE', res + assert 'This field may not be null' in res[""error""], res +" +/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with a valid vnc_password,"{ +vnc_password=""password"" +}","{ +""status"" : 200 +}","def test_deploy_image_valid_vnc_password(run_api, library_add_new_vm): + """""" + deploy a VM image using valid vnc_password + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, vnc_password=""password"") + test_assert.status(r, 200) + deploy_id = r.json()[""UUID""] + r = run_api.deploy_image_delete(deploy_id, {}) +" +/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with a count parameter exceeding system limits,,"{ +""status"" : 400, +""message"" : ""Not enough RAM"" +}","def test_deploy_image_count_exceeding_limits(run_api, library_add_new_vm): + """""" + deploy a VM image with count exceeding the system limits + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + server_list = list(run_api.clm_my_servers.values()) + ram_all = 0 + for server in server_list: + r = run_api.server_details(server) + ram_all += r.json()[""total_ram""] + count = math.ceil((ram_all / 200)) * random.randint(100, 1000) + r = run_api.deploy_image(lib_id, count=count) + test_assert.status(r, 400) + res = r.json() + assert res[""result""] == 'FAILURE', res + assert 'Not enough RAM' in res[""error""], res +" +/deploy/rest/deploy/{{UUID}}/,deploying a virtual machine where 'sync = False',"{ +sync=False +}","{ +""status"" : 200 +}","def test_deploy_image_sync_false(run_api, library_add_new_vm): + """""" + deploy a VM image with sync as false + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, sync=False) + test_assert.status(r, 200) + deploy_id = r.json()[""UUID""] + r = run_api.deploy_image_delete(deploy_id, {}) +" +/deploy/rest/deploy/{{UUID}}/,deploying a virtual machine where 'deploy_start = False',"{ +deploy_start=False +}","{ +""status"" : 200 +}","def test_deploy_image_deploy_start_false(run_api, library_add_new_vm): + """""" + deploy a VM image with deploy_start as false + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, deploy_start=False) + test_assert.status(r, 200) + deploy_id = r.json()[""UUID""] + r = run_api.deploy_image_delete(deploy_id, {}) +" +/deploy/rest/deploy/{{UUID}}/,deploying a virtual machine by providing group name that user isn't part of,,"{ +""status"" : 400, +""message"" : ""You are not a part of the provided Group(s)"" +}"," +def test_deploy_image_group_list(run_api, library_add_new_vm): + """""" + deploy a VM image with group_list, which user is not part of + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + group_list = [] + all_groups = list(run_api.clm_my_groups.keys()) + while 1: + name = f""test{random.randint(1, 1000)}"" + if name not in all_groups: + group_list.append(name) + break + + r = run_api.deploy_image(lib_id, group_list=group_list) + res = r.json() + test_assert.status(r, 400) + assert res[""result""] == 'FAILURE', res + assert 'You are not a part of the provided Group(s)' in res[""error""], res +" +/deploy/rest/deploy/{{UUID}}/,deploying a single VM with deploy_start set to True,"{ +lib_id, +deploy_start=True, +server_list=[server_list[0]] +}","{ +""status"" : 400, +""message"" : ""'Failed to deploy machine'"" +}","def test_deploy_image_deploy_start_true_one_vm(run_api): + """""" + deploy single VM with deploy_start as True + """""" + server_list = list(run_api.clm_my_servers.values()) + r = run_api.server_details(server_list[0]) + ram = r.json()[""total_ram""] + params, r = run_api.library_add_new_vm(ram=ram) + lib_id = r.json()[""UUID""] + r = run_api.deploy_image(lib_id, deploy_start=True, server_list=[server_list[0]]) + res = r.json() + test_assert.status(r, 400) + res[""result""] == 'FAILURE' + assert 'Failed to deploy machine' in res[""error""], res + run_api.library_delete(lib_id) +" +/deploy/rest/deploy/{{UUID}}/,deploying a single virtual machine with default synchronous behavior(sync=false),"{ +sync = False +}","{ +""status"" : 200, +""response"" : success +}","def test_deploy_image_sync_false(run_api, library_add_new_vm): + """""" + deploy a VM image with sync as false + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, sync=False) + test_assert.status(r, 200) + deploy_id = r.json()[""UUID""] + r = run_api.deploy_image_delete(deploy_id, {}) +" +/deploy/rest/details/{{UUID}}/,getting deployment details of a VM without authorization,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_details_without_authorization(anonymous_exec_api): + """""" + Getting Deploy details of the VM without authorization + """""" + deploy_id = ""invalid"" + depl_details = anonymous_exec_api.deploy_details(deploy_id) + depl_json = depl_details.json() + test_assert.status(depl_details, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" +" +/deploy/rest/details/{{UUID}}/,getting deployment details of a VM using valid machine id,,"{ +""status"" : 200 +}","def test_deploy_details(deploy_details): + """""" + Getting Deploy details of the VM + """""" + x, r = deploy_details + test_assert.status(r, 200) +" +/deploy/rest/details/{{UUID}}/,getting deployment details of a VM using invalid token,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_details_invalid_token(invalid_exec_api): + """""" + Getting Deploy details of the VM using invalid token + """""" + deploy_id = ""invalid"" + depl_details = invalid_exec_api.deploy_details(deploy_id) + depl_json = depl_details.json() + test_assert.status(depl_details, 401) + assert depl_json[""detail""] == ""Invalid token."" +" +/deploy/rest/details/{{UUID}}/,getting deployment details of a VM using invalid machine id,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 404 +}","def test_deploy_details_invalid_UUID(run_api): + """""" + Getting Deploy details of the VM using invalid id + """""" + deploy_id = ""invalid"" + r = run_api.deploy_details(deploy_id) + test_assert.status(r, 404) +" +/deploy/rest/details/{{UUID}}/,getting deployment details of a VM by non-admin user,,"{ +""status"" : 403 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL}] + +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_details_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): + """""" + Details of the VM by non-Admin + """""" + # Non-admin check for fetching details of a Deployed VM created by different user. + deploy_id = custom_lib_admin_operations + r = run_api.deploy_details(deploy_id) + test_assert.status(r, 403) +" +/deploy/rest/details/{{UUID}}/,getting deployment details of a VM by manager who has rights over server,,," +PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""deploy_with"": SRV_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_details_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Details of the VM by Manager + """""" + # When the user is not part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_details(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_details(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) +" +/deploy/rest/details/{{UUID}}/,getting deployment details of a VM by manager who does not haverights over server,,,"PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_details_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): """""" + Details of the VM by Manager + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_details(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_details(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) +" +/deploy/rest/details/{{UUID}}/,getting deployment details of a VM by Admin user,,"{ +""status"" : 200 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_details_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Details of the VM by Admin + """""" + # Admin check for fetching details of a Deployed VM created by different user. + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_details(deploy_id) + test_assert.status(r, 200) +" +/deploy/rest/mac_addr/((UUID}}/,getting MAC address without Authorization,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_mac_addr_without_authorization(anonymous_exec_api): + """""" + fetching the mac address of VM without authorization + """""" + deploy_id = ""invalid"" + depl_mac_addr = anonymous_exec_api.deploy_mac_addr(deploy_id) + depl_json = depl_mac_addr.json() + test_assert.status(depl_mac_addr, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" +" +/deploy/rest/mac_addr/((UUID}}/,getting MAC address when valid UUID is provided and machine is not connected to network.,,"{ +""status"" : 400, +""message"" : ""Mac Addr can only be fetched when machine is in running/pausing/paused state"" +}","def test_deploy_mac_addr_stopped_machine(run_api, deploy_image): + """""" + Get Mac Addr when machine is in stopped state + """""" + params, r = deploy_image + machine_id = r.json()[""UUID""] + res = run_api.deploy_mac_addr(deploy_id=machine_id) + test_assert.status(res, 400) + assert res.json()[""error""] == ""Mac Addr can only be fetched when machine is in running/pausing/paused state"" +" +/deploy/rest/mac_addr/((UUID}}/,getting MAC address when valid UUID is provided and machine is connected to network.,,"{ +""status"" : 200, +""response"" : ""MAC address of VM +}","def test_deploy_mac_addr(deploy_mac_addr): + """""" + fetching the mac address of VM + """""" + x, r = deploy_mac_addr + test_assert.status(r, 200) +" +/deploy/rest/mac_addr/((UUID}}/,getting MAC address when requested using invalid token,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_mac_addr_invalid_token(invalid_exec_api): + """""" + fetching the mac address of VM using invalid token + """""" + deploy_id = ""invalid"" + depl_mac_addr = invalid_exec_api.deploy_mac_addr(deploy_id) + depl_json = depl_mac_addr.json() + test_assert.status(depl_mac_addr, 401) + assert depl_json[""detail""] == ""Invalid token."" +" +/deploy/rest/mac_addr/((UUID}}/,getting MAC address when invalid UUID is provided,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 404, +""message"" : ""Machine matching query does not exist"" +}","def test_deploy_mac_addr_invalid_UUID(run_api): + """""" + fetching the mac address of VM using invalid machine_id + """""" + deploy_id = ""invalid"" + r = run_api.deploy_mac_addr(deploy_id) + test_assert.status(r, 404) + rjson = r.json() + assert ""Machine matching query does not exist"" in rjson[""error""], rjson + +" +/deploy/rest/pause/{{UUID}}/,pausing VM when valid UUID is provided and machine state is in running,,"{ +""status"" : 20, +""response"" :Machine state should be set to paused +}","def test_deploy_deploy_pause_self(deploy_pause): + """""" + Pausing the VM + """""" + x, r = deploy_pause + test_assert.status(r, 201) +" +/deploy/rest/pause/{{UUID}}/,pausing VM when valid UUID is provided and machine is in paused state,,"{ +""status"" : 400, +""message"" : ""Cannot perform pause operation on paused state of a machine"" +}","def test_deploy_deploy_pause_already_paused_vm(deploy_pause, run_api): + """""" + Pausing a vm that is already paused + """""" + x, r = deploy_pause + res = r.json() + deploy_id = res[""UUID""] + response = run_api.deploy_pause(deploy_id) + test_assert.status(response, 400) + rjson = response.json()[""error""] + assert rjson == ""Cannot perform pause operation on paused state of a machine"" +" +/deploy/rest/pause/{{UUID}}/,pausing VM using valid deploy_id by non-admin user,,"{ +""status"" : 403 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""]}] + +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_deploy_pause_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): + """""" + Pausing the VM by non-admin + """""" + # Non-admin check of Starting a deployment created by different user + deploy_id = custom_lib_admin_operations + r = run_api.deploy_pause(deploy_id) + test_assert.status(r, 403) +" +/deploy/rest/pause/{{UUID}}/,pausing VM using valid deploy_id by manager who has rights over server,,," +endpoint = ""deploy_pause"" + +PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_deploy_pause_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Pausing the VM by manager when have right on server + """""" + # When the user is not part of the group that the manager manages + deploy_id = custom_lib_admin_operations + r = run_api.deploy_pause(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_pause(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True))" +/deploy/rest/pause/{{UUID}}/,pausing VM using valid deploy_id by manager who do not have rights over server,,,"endpoint = ""deploy_pause"" + +PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_deploy_pause_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Pausing the VM by manager when have no right on server + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_pause(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_pause(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) +" +/deploy/rest/pause/{{UUID}}/,pausing VM using valid deploy_id by admin user,,"{ +""status"" : 201 +}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_deploy_pause_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Pausing the VM by Admin + """""" + # Admin check of Starting a deployment created by different user + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_pause(deploy_id) + test_assert.status(r, 201) +" +/deploy/rest/pause/{{UUID}}/,pausing a running VM without Authorization,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_deploy_pause_without_authorization(anonymous_exec_api): + """""" + Pausing the VM without authorization + """""" + deploy_id = ""invalid"" + depl_pause = anonymous_exec_api.deploy_pause(deploy_id, wait=False) + depl_json = depl_pause.json() + test_assert.status(depl_pause, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" +" +/deploy/rest/pause/{{UUID}}/,pausing a running VM when requested with invalid token,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_deploy_pause_invalid_token(invalid_exec_api): + """""" + Pausing the VM using invalid tokens + """""" + deploy_id = ""invalid"" + depl_pause = invalid_exec_api.deploy_pause(deploy_id, wait=False) + depl_json = depl_pause.json() + test_assert.status(depl_pause, 401) + assert depl_json[""detail""] == ""Invalid token."" +" +/deploy/rest/pause/{{UUID}}/,pausing a running VM when Invalid UUID is provided,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 404, +""message"" : ""Machine matching query does not exist"" +}","def test_deploy_deploy_pause_invalid_UUID(run_api): + """""" + Pausing the VM with invalid deploy_id + """""" + deploy_id = ""invalid"" + r = run_api.deploy_pause(deploy_id, wait=False) + test_assert.status(r, 404) + rjson = r.json() + assert ""Machine matching query does not exist"" in rjson[""error""], rjson +" +/deploy/rest/reboot/{{UUID}}/,rebooting a VM when it is in running state using valid id,,"{ +""status"" : 201 +}","def test_deploy_deploy_reboot_self(deploy_reboot): + """""" + Rebooting the VM + """""" + r = deploy_reboot + test_assert.status(r, 201) +" +/deploy/rest/reboot/{{UUID}}/,rebooting a running VM without authorization,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_deploy_reboot_without_authorization(anonymous_exec_api): + """""" + Rebooting a VM without authorization + """""" + deploy_id = ""invalid"" + depl_reboot = anonymous_exec_api.deploy_reboot(deploy_id, wait=False) + depl_json = depl_reboot.json() + test_assert.status(depl_reboot, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" + +" +/deploy/rest/reboot/{{UUID}}/,rebooting a running VM when requested with invalid token,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_deploy_reboot_invalid_token(invalid_exec_api): + """""" + Rebooting a VM using invalid token + """""" + deploy_id = ""invalid"" + depl_reboot = invalid_exec_api.deploy_reboot(deploy_id, wait=False) + depl_json = depl_reboot.json() + test_assert.status(depl_reboot, 401) + assert depl_json[""detail""] == ""Invalid token."" +" +/deploy/rest/reboot/{{UUID}}/,rebooting a running VM when Invalid UUID is provided,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 404, +""message"" : ""Machine matching query does not exist"" +}","def test_deploy_deploy_reboot_invalid_UUID(run_api): + """""" + Rebooting a VM using invalid UUID + """""" + deploy_id = ""invalid"" + r = run_api.deploy_reboot(deploy_id) + test_assert.status(r, 404) + rjson = r.json() + assert ""Machine matching query does not exist"" in rjson[""error""], rjson +" +/deploy/rest/reboot/{{UUID}}/,manager rebooting a VM when it is in running state using valid id where the manager has rights over the servers,,,"endpoint = ""deploy_reboot"" + +PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_deploy_reboot_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Rebooting the VM by manager when have right on server + """""" + # When the user is not part of the group that the manager manages + deploy_id = custom_lib_admin_operations + r = run_api.deploy_reboot(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_reboot(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) + run_api.deploy_stop(deploy_id) +" +/deploy/rest/reboot/{{UUID}}/,manager rebooting a VM when it is in running state using valid id where the manager do not have rights over the servers,,,"endpoint = ""deploy_reboot"" + +PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_deploy_reboot_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Rebooting the VM by manager when have no right on server + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_reboot(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_reboot(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + +" +/deploy/rest/reboot/{{UUID}}/,admin rebooting a VM when it is in running state using valid id,,"{ +""status"" : 201 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""]}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_deploy_reboot_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Rebooting the VM by Admin + """""" + # Admin check of Starting a deployment created by different user + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_reboot(deploy_id) + test_assert.status(r, 201) +" +/deploy/rest/reboot/{{UUID}}/,a non-admin user rebooting a VM when it is in running state using valid id,,"{ +""status"" : 403 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""]}] + +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_deploy_reboot_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): + """""" + Rebooting the VM by non-admin + """""" + # Non-admin check of Starting a deployment created by different user + deploy_id = custom_lib_admin_operations + r = run_api.deploy_reboot(deploy_id) + test_assert.status(r, 403) +" +/deploy/rest/reset/{{UUID}}/,resetting a VM without authorization,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_deploy_reset_without_authorization(anonymous_exec_api): + """""" + resetting a VM without authorization + """""" + deploy_id = ""invalid"" + depl_reset = anonymous_exec_api.deploy_reset(deploy_id, wait=False) + depl_json = depl_reset.json() + test_assert.status(depl_reset, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided.""" +/deploy/rest/reset/{{UUID}}/,resetting a VM when valid UUID is provided by manager who has rights over servers,,,"endpoint = ""deploy_reset"" + +PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_deploy_reset_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Resetting the VM by manager when have right on server + """""" + # When the user is not part of the group that the manager manages + deploy_id = custom_lib_admin_operations + r = run_api.deploy_reset(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_reset(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) + run_api.deploy_stop(deploy_id) +" +/deploy/rest/reset/{{UUID}}/,resetting a VM when valid UUID is provided by manager who do not have rights over servers,,,"endpoint = ""deploy_reset"" + +PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_deploy_reset_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Resetting the VM by manager when have no right on server + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_reset(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_reset(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) +" +/deploy/rest/reset/{{UUID}}/,resetting a VM when valid UUID is provided by an admin user,,"{ +""status"" : 201 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""]}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_deploy_reset_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Resetting the VM by Admin + """""" + # Admin check of Starting a deployment created by different user + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_reset(deploy_id) + test_assert.status(r, 201) +" +/deploy/rest/reset/{{UUID}}/,resetting a VM when valid UUID is provided by a non-admin user,,"{ +""status"" : 403 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""]}] + +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_deploy_reset_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): + """""" + Resetting the VM by non-admin + """""" + # Non-admin check of Starting a deployment created by different user + deploy_id = custom_lib_admin_operations + r = run_api.deploy_reset(deploy_id) + test_assert.status(r, 403) +" +/deploy/rest/reset/{{UUID}}/,resetting a VM when valid UUID is provided,,"{ +""status"" : 201 +}","def test_deploy_deploy_reset_self(deploy_reset): + """""" + Resetting the VM + """""" + r = deploy_reset + test_assert.status(r, 201) +" +/deploy/rest/reset/{{UUID}}/,resetting a VM when requested with invalid token,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_deploy_reset_invalid_token(invalid_exec_api): + """""" + resetting a VM for invalid token + """""" + deploy_id = ""invalid"" + depl_reset = invalid_exec_api.deploy_reset(deploy_id, wait=False) + depl_json = depl_reset.json() + test_assert.status(depl_reset, 401) + assert depl_json[""detail""] == ""Invalid token."" +" +/deploy/rest/reset/{{UUID}}/,resetting a VM when invalid UUID is provided,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 404, +""message"" : ""Machine matching query does not exist"" +}","def test_deploy_deploy_reset_invalid_UUID(run_api): + """""" + resetting a VM for invalid UUID + """""" + deploy_id = ""invalid"" + r = run_api.deploy_reset(deploy_id, wait=False) + test_assert.status(r, 404) + rjson = r.json() + assert ""Machine matching query does not exist"" in rjson[""error""], rjson +" +/deploy/rest/resume/{{UUID}}/,resuming a VM when valid UUID is provided and machine is in resumed state,,"{ +""status"" : 400, +""message"" : ""Cannot perform resume operation on running state of a machine"" +}","def test_deploy_deploy_resume_already_resumed_vm(deploy_resume, run_api): + """""" + resuming a VM which is already resumed + """""" + x, r = deploy_resume + res = r.json() + deploy_id = res[""UUID""] + response = run_api.deploy_resume(deploy_id) + test_assert.status(response, 400) + rjson = response.json()[""error""] + assert rjson == ""Cannot perform resume operation on running state of a machine"" +" +/deploy/rest/resume/{{UUID}}/,resuming a VM when valid UUID is provided and machine is in paused state,,"{ +""status"" : 200, +""response"" : Machine state set to running +}","def test_deploy_deploy_resume_self(deploy_resume): + """""" + Resuming the VM + """""" + x, r = deploy_resume + test_assert.status(r, 201) +" +/deploy/rest/resume/{{UUID}}/,resuming a VM by an admin user when valid UUID is provided and machine is in paused state,,"{ +""status"" : 201 +}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_deploy_resume_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Resuming the VM by Admin + """""" + # Admin check of Resuming a deployment created by different user + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_resume(deploy_id) + test_assert.status(r, 201) +" +/deploy/rest/resume/{{UUID}}/,resuming a VM by a non-admin user when valid UUID provided and machine is in paused state,,"{ +""status"" : 403 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""paused""]}] + + +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_deploy_resume_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): + """""" + Resuming the VM by non-admin + """""" + # Non-admin check of Starting a deployment created by different user + deploy_id = custom_lib_admin_operations + r = run_api.deploy_resume(deploy_id) + test_assert.status(r, 403) +" +/deploy/rest/resume/{{UUID}}/,"resuming a VM by a manager when valid UUID provided, machine is in paused state but manager do not have rights over servers",,,"endpoint = ""deploy_resume"" + +PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""paused""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_deploy_resume_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Resuming the VM by manager when have no right on server + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_resume(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_resume(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) +" +/deploy/rest/resume/{{UUID}}/,"resuming a VM by a manager when valid UUID provided, machine is in paused state and manager has rights over servers",,," +endpoint = ""deploy_resume"" +PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""paused""], ""deploy_with"": SRV_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_deploy_resume_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Resuming the VM by manager when have right on server + """""" + # When the user is not part of the group that the manager manages + deploy_id = custom_lib_admin_operations + r = run_api.deploy_resume(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_resume(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) +" +/deploy/rest/resume/{{UUID}}/,resuming a paused VM without Authorization,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_deploy_resume_without_authorization(anonymous_exec_api): + """""" + resuming a paused VM without authorization + """""" + deploy_id = ""invalid"" + depl_resume = anonymous_exec_api.deploy_resume(deploy_id, wait=False) + depl_json = depl_resume.json() + test_assert.status(depl_resume, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" +" +/deploy/rest/resume/{{UUID}}/,resuming a paused VM when requested with invalid token,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_deploy_resume_invalid_token(invalid_exec_api): + """""" + resuming a paused VM using invalid token + """""" + deploy_id = ""invalid"" + depl_resume = invalid_exec_api.deploy_resume(deploy_id, wait=False) + depl_json = depl_resume.json() + test_assert.status(depl_resume, 401) + assert depl_json[""detail""] == ""Invalid token."" +" +/deploy/rest/resume/{{UUID}}/,resuming a paused VM when Invalid UUID is provided,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 404, +""message"" : ""Machine matching query does not exist"" +}","def test_deploy_deploy_resume_invalid_UUID(run_api): + """""" + resuming a VM which is paused using invalid deploy_id + """""" + deploy_id = ""invalid"" + r = run_api.deploy_resume(deploy_id, wait=False) + test_assert.status(r, 404) + rjson = r.json() + assert ""Machine matching query does not exist"" in rjson[""error""], rjson +" +/deploy/rest/shutdown/{{UUID}}/,shutting down the deployment of machine when valid UUID is provided and machine is in stopped state,,"{ +""status"" : 400, +""message"" : ""Cannot perform shutdown operation on stopped state of a machine"" +}","def test_deploy_shutdown_already_stopped_vm(run_api, deploy_stop): + """""" + Shutdown the VM that is in stopped state + """""" + x, result = deploy_stop + deploy_id = x[""UUID""] + response = run_api.deploy_shutdown(deploy_id) + test_assert.status(response, 400) + rjson = response.json()[""error""] + assert rjson == ""Cannot perform shutdown operation on stopped state of a machine"" +" +/deploy/rest/shutdown/{{UUID}}/,shutting down the deployment of machine when valid UUID is provided and machine is in paused state,,"{ +""status"" : 400, +""message"" : ""Cannot perform shutdown operation on paused state of a machine"" +}","def test_deploy_shutdown_paused_vm(deploy_start, run_api): + """""" + Shutdown the VM which is in pause state + """""" + x, r = deploy_start + deploy_id = x[""UUID""] + run_api.deploy_pause(deploy_id, wait=True) + res = run_api.deploy_shutdown(deploy_id) + test_assert.status(res, 400) + assert res.json()[""error""] == ""Cannot perform shutdown operation on paused state of a machine. Try `STOP` instead."" +" +/deploy/rest/shutdown/{{UUID}}/,shutting down the deployment of machine when requested with invalid token,"{ + deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_shutdown_invalid_token(invalid_exec_api): + """""" + Shutdown the VM using invalid token + """""" + deploy_id = ""invalid"" + depl_shutdown = invalid_exec_api.deploy_shutdown(deploy_id, wait=False) + depl_json = depl_shutdown.json() + test_assert.status(depl_shutdown, 401) + assert depl_json[""detail""] == ""Invalid token."" +" +/deploy/rest/shutdown/{{UUID}}/,shutting down the deployment of machine when no machine exists for the deploy id,"{ + deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_shutdown_invalid_UUID(run_api): + """""" + Shutdown the VM using id for which machine does not exist + """""" + deploy_id = ""invalid"" + r = run_api.deploy_shutdown(deploy_id, wait=False) + test_assert.status(r, 404) + rjson = r.json() + assert ""Machine matching query does not exist"" in rjson[""error""], rjson +" +/deploy/rest/shutdown/{{UUID}}/,shutting down the deployment of machine that is in running state,,"{ +""status"" : 201, +""response"" : Machine shutdown +}","def test_deploy_shutdown_self(deploy_shutdown): + """""" + Shutdown the VM + """""" + param, r = deploy_shutdown + test_assert.status(r, 201) + +" +/deploy/rest/shutdown/{{UUID}}/,shutting down the deployment of machine by non-admin when valid UUID is provided and machine is in running state ,,"{ +""status"" : 403 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""]}] + +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_shutdown_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): + """""" + Shutdown the VM by non-admin + """""" + # Non-admin check of shutdown a deployment created by different user + deploy_id = custom_lib_admin_operations + r = run_api.deploy_shutdown(deploy_id) + test_assert.status(r, 403) +" +/deploy/rest/shutdown/{{UUID}}/,"shutting down the deployment of machine by manager when valid UUID is provided ,machine is in running state but manager do not have rights over servers",,,"PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_shutdown_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Rebooting the VM by manager when have no right on server + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_shutdown(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_shutdown(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) +" +/deploy/rest/shutdown/{{UUID}}/,"shutting down the deployment of machine by manager when valid UUID is provided ,machine is in running state and manager has rights over servers",,,"PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_shutdown_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Shutdown the VM by manager when have right on server + """""" + # When the user is not part of the group that the manager manages + deploy_id = custom_lib_admin_operations + r = run_api.deploy_shutdown(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_shutdown(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) +" +/deploy/rest/shutdown/{{UUID}}/,shutting down the deployment of machine without Authorization,"{ + deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}"," +def test_deploy_shutdown_without_authorization(anonymous_exec_api): + """""" + Shutdown the VM without authorization + """""" + deploy_id = ""invalid"" + depl_shutdown = anonymous_exec_api.deploy_shutdown(deploy_id, wait=False) + depl_json = depl_shutdown.json() + test_assert.status(depl_shutdown, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" +" +/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine without Authorization,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_snapshot_without_authorization(anonymous_exec_api): + """""" + Snapshot of the machine without authorization + """""" + deploy_id = ""invalid"" + depl_snapshot = anonymous_exec_api.deploy_snapshot(deploy_id, wait=False) + depl_json = depl_snapshot.json() + test_assert.status(depl_snapshot, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" +" +/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine with description,"{ +""description"" : ""description here"" +}","{ +""status"" : 201, +}","def test_deploy_snapshot_machine_with_description(run_api, deploy_image): + """""" + Snapshot of the machine while giving description + """""" + params, r = deploy_image + machine_id = r.json()[""UUID""] + description = ""Random"" + res = run_api.deploy_snapshot(deploy_id=machine_id, description=description) + snapshotted_machine_UUID = res.json()['snapshotted_machine_UUID'] + current_desp = run_api.library_details(UUID=snapshotted_machine_UUID, params={}).json()[""description""] + run_api.library_delete(snapshotted_machine_UUID, {}) + test_assert.status(res, 201) + assert current_desp == description, ""The error is %s"" % res.json() +" +/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine which is part of the island,,"{ +""message"" : ""Snapshot of machine which is part of island is not allowed"" +}","def test_deploy_snapshot_machine_part_of_island(run_api, ideploy_deploy): + """""" + Snapshot of the machine that is part of the island + """""" + params, r = ideploy_deploy + deploy_id = r.json()[""deploy_UUID""] + machine_id = run_api.ideploy_details(UUID=deploy_id).json()[""machines""][0][""UUID""] + res = run_api.deploy_snapshot(deploy_id=machine_id) + assert res.json()[""error""] == ""Snapshot of machine which is part of island is not allowed"" +" +/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine when valid UUID is provided and machine is in running state,,"{ +""status"" : 400, +""message"" : ""Cannot perform snapshot operation on running state of a machine""","def test_deploy_snapshot_running_vm(run_api, deploy_start): + """""" + Snapshot of the machine which is in running state + """""" + x, result = deploy_start + deploy_id = x[""UUID""] + response = run_api.deploy_snapshot(deploy_id, wait=False) + test_assert.status(response, 400) + rjson = response.json()[""error""] + assert rjson == ""Cannot perform snapshot operation on running state of a machine"" +" +/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine when requested using invalid token,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_snapshot_invalid_token(invalid_exec_api): + """""" + Snapshot of the machine using invalid tokens + """""" + deploy_id = ""invalid"" + depl_snapshot = invalid_exec_api.deploy_snapshot(deploy_id, wait=False) + depl_json = depl_snapshot.json() + test_assert.status(depl_snapshot, 401) + assert depl_json[""detail""] == ""Invalid token."" +" +/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine when Invalid UUID is provided,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 404, +""message"" : ""Machine matching query does not exist""","def test_deploy_snapshot_invalid_UUID(run_api): + """""" + Snapshot of the machine using an invalid machine uuid + """""" + deploy_id = ""invalid"" + r = run_api.deploy_snapshot(deploy_id, wait=False) + test_assert.status(r, 404) + rjson = r.json() + assert ""Machine matching query does not exist"" in rjson[""error""], rjson +" +/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine using a valid UUID and machine state is in stopped state,,"{ +""status"" : 201, +""response"" : Snapshot taken +}","def test_deploy_snapshot_self(deploy_snapshot): + """""" + Snapshot the VM + """""" + r = deploy_snapshot + test_assert.status(r, 201)" +/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine by non-admin user using valid UUID and machine state is in stopped state,,"{ +""status"" : 403 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""]}] + +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_snapshot_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): + """""" + Snapshot the VM by non-admin + """""" + # Non-admin check of Starting a deployment created by different user + deploy_id = custom_lib_admin_operations + r = run_api.deploy_snapshot(deploy_id) + test_assert.status(r, 403) +" +/deploy/rest/snapshot/{{UUID}}/,"taking snapshot of the virtual machine by manager using valid UUID, machine state is in stopped state but the manager do not have rights over the server",,,"endpoint = ""deploy_snapshot"" + +PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_snapshot_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Snapshot the VM by manager when have no right on server + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_snapshot(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_snapshot(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) +" +/deploy/rest/snapshot/{{UUID}}/,"taking snapshot of the virtual machine by manager using valid UUID, machine state is in stopped state and the manager has rights over the servers",,,"endpoint = ""deploy_snapshot"" +PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) + +def test_deploy_snapshot_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Snapshot the VM by manager when have right on server + """""" + # When the user is not part of the group that the manager manages + deploy_id = custom_lib_admin_operations + r = run_api.deploy_snapshot(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_snapshot(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) + run_api.deploy_stop(deploy_id) +" +/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine by Admin user using valid UUID and machine state is in stopped state,,"{ +""status"" : 201, +""response"" : Snapshot taken +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""]}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_snapshot_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Snapshot the VM by Admin + """""" + # Admin check of Starting a deployment created by different user + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_snapshot(deploy_id) + test_assert.status(r, 201) + rjson = r.json() + run_api.library_delete(rjson['snapshotted_machine_UUID'], {}) +" +/deploy/rest/start/{{UUID}}/,starting machine deployment when requested with invalid token,"{ + deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_start_invalid_token(invalid_exec_api): + """""" + starting machine deployment using invalid token + """""" + deploy_id = ""invalid"" + depl_start = invalid_exec_api.deploy_start(deploy_id, wait=False) + depl_json = depl_start.json() + test_assert.status(depl_start, 401) + assert depl_json[""detail""] == ""Invalid token."" +" +/deploy/rest/start/{{UUID}}/,starting machine deployment on a machine which is in paused state,,"{ +""status"" : 400, +""message"" : 'Cannot perform start operation on paused state of a machine' +}","def test_deploy_start_paused_vm(deploy_start, run_api): + """""" + starting a machine that is in paused state + """""" + x, r = deploy_start + deploy_id = x[""UUID""] + run_api.deploy_pause(deploy_id, wait=True) + res = run_api.deploy_start(deploy_id) + test_assert.status(res, 400) + assert res.json()[""error""] == 'Cannot perform start operation on paused state of a machine' +" +/deploy/rest/start/{{UUID}}/,starting deployment of machine without Authorization,"{ + deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_start_without_authorization(anonymous_exec_api): + """""" + starting machine deployment without authorization + """""" + deploy_id = ""invalid"" + depl_start = anonymous_exec_api.deploy_start(deploy_id, wait=False) + depl_json = depl_start.json() + test_assert.status(depl_start, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" +" +/deploy/rest/start/{{UUID}}/,starting deployment of machine using Valid UUID which is already in a running state,,"{ +""status"" : 400, +""message"" : ""Cannot perform start operation on running state of a machine"" +}","def test_deploy_start_already_running_vm(deploy_start, run_api): + """""" + starting a machine that is already running + """""" + x, r = deploy_start + deploy_id = x[""UUID""] + r = run_api.deploy_start(deploy_id, wait=False) + test_assert.status(r, 400) + rjson = r.json()[""error""] + assert rjson == ""Cannot perform start operation on running state of a machine"" +" +/deploy/rest/start/{{UUID}}/,starting deployment of machine using invalid id for which no machine exists,"{ + deploy_id = ""invalid"" +}","{ +""status"" : 404, +""message"" : ""Machine matching query does not exist"" +}","def test_deploy_start_invalid_UUID(run_api): + """""" + starting a nonexisting machine using UUID + """""" + deploy_id = ""invalid"" + r = run_api.deploy_start(deploy_id, wait=False) + test_assert.status(r, 404) + rjson = r.json() + assert ""Machine matching query does not exist"" in rjson[""error""], rjson +" +/deploy/rest/start/{{UUID}}/,"starting deployment of machine by manager with a valid UUID and machine is in stopped state , where manager has rights over servers",,," +endpoint = ""deploy_start"" + +PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""]}] +PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_MANAGER_RIGHTS}] +PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_start_vm_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Deploying a Image and Starting the VM and then Stopping + """""" + # When the user is not part of the group that the manager manages + deploy_id = custom_lib_admin_operations + r = run_api.deploy_start(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_start(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) + run_api.deploy_stop(deploy_id) +" +/deploy/rest/start/{{UUID}}/,"starting deployment of machine by manager with a valid UUID and machine is in stopped state , where manager does not have rights over servers",,," +endpoint = ""deploy_start"" + +PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""]}] +PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_MANAGER_RIGHTS}] +PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_start_vm_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Deploying a Image and Starting the VM and then Stopping + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_start(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + run_api.deploy_stop(deploy_id) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_start(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + run_api.deploy_stop(deploy_id) +" +/deploy/rest/start/{{UUID}}/,starting deployment of machine by admin with a valid UUID and machine is in stopped state,,"{ +""status"" : 201, +""response"" : Machine should got to running state +}"," +endpoint = ""deploy_start"" + +PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""]}] +PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_MANAGER_RIGHTS}] +PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_start_vm_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Deploying a Image and Starting the VM and then Stopping by Admin + """""" + # Admin check of Starting a deployment created by different user + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_start(deploy_id) + test_assert.status(r, 201) + run_api.deploy_stop(deploy_id)" +/deploy/rest/stop/{{UUID}}/,stopping deployment of a machine without authorization,"{ + deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_stop_without_authorization(anonymous_exec_api): + """""" + stopping machine deployment without authorization + """""" + deploy_id = ""invalid"" + depl_stop = anonymous_exec_api.deploy_stop(deploy_id, wait=False) + depl_json = depl_stop.json() + test_assert.status(depl_stop, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" +" +/deploy/rest/stop/{{UUID}}/,stopping deployment of a machine when valid UUID is provided and machine is in stopped state,,"{ +""status"" : 400, +""message"" : ""Cannot perform power off operation on stopped state of a machine"" +}","def test_deploy_stop_already_stopped_vm(run_api, deploy_stop): + """""" + stopping machine deployment when machine in stopped state + """""" + x, result = deploy_stop + deploy_id = x[""UUID""] + response = run_api.deploy_stop(deploy_id) + test_assert.status(response, 400) + rjson = response.json()[""error""] + assert rjson == ""Cannot perform power off operation on stopped state of a machine"" +" +/deploy/rest/stop/{{UUID}}/,stopping deployment of a machine when valid UUID is provided and machine is in running state,,"{ +""status"" : 201 +}","def test_deploy_stop_self(deploy_stop): + """""" + stopping machine deployment + """""" + x, r = deploy_stop + test_assert.status(r, 201)" +/deploy/rest/stop/{{UUID}}/,stopping deployment of a machine when requested with invalid token,"{ + deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_stop_invalid_token(invalid_exec_api): + """""" + stopping machine deployment with invalid token + """""" + deploy_id = ""invalid"" + depl_stop = invalid_exec_api.deploy_stop(deploy_id, wait=False) + depl_json = depl_stop.json() + test_assert.status(depl_stop, 401) + assert depl_json[""detail""] == ""Invalid token."" +" +/deploy/rest/stop/{{UUID}}/,stopping deployment of a machine when Invalid UUID is provided,"{ + deploy_id = ""invalid"" +}","{ +""status"" : 404, +""message"" : ""Machine matching query does not exist"" +}","def test_deploy_stop_invalid_UUID(run_api): + """""" + stopping machine deployment using a machine id for which machine does not exist + """""" + + deploy_id = ""invalid"" + r = run_api.deploy_stop(deploy_id, wait=False) + test_assert.status(r, 404) + rjson = r.json() + assert ""Machine matching query does not exist"" in rjson[""error""], rjson + +" +/group/rest/add-server/{id}/,"addition of server when server id is invalid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +server_group_list = { + ""servers_list"": [""0""] + } +}","{ +status : 400/404 +}","@pytest.mark.skip(reason=""Skipping this test because of it is returning 207 in place 400/404"") +@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) +def test_group_server_add_with_invalid_server_id(run_api, custom_group_admin_operations): + """""" + Add Server in Group with invalid server id + """""" + server_group_list = { + ""servers_list"": [""0""] + } + params, r = custom_group_admin_operations + res = r.json() + group_id = res['id'] + + r = run_api.group_add_server(server_group_list, group_id) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + status_code = r.status_code + assert status_code == 400 or status_code == 404 +" +/group/rest/add-server/{id}/,"addition of server when both group and server id are valid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +status : 403 / 202 +}","PARAMETERS = [{""action"": GROUP_ADD_SERVER}] + +@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) +def test_group_server_add(run_api, custom_group_admin_operations): + """""" + Add Server in Group + """""" + param, ret = custom_group_admin_operations + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(ret, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(ret, 202) + +" +/group/rest/add-server/{id}/,"addition of server when both group and server id are invalid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","server_group_list = { + ""servers_list"": [""0""] + }","{ +""status"" : 400 +""message"" : ""Group does not exist"" +}","def test_group_server_add_invalid_server_id_and_grp_id(run_api): + """""" + Add Server in Group with invalid server id and invalid group id + """""" + server_group_list = { + ""servers_list"": [""0""] + } + r = run_api.group_add_server(server_group_list, group_id=0) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + res = r.json() + assert res['error'] == ""Group does not exist"" + test_assert.status(r, 400) + +" +/group/rest/add-server/{id}/,addition of server to group without Authorization ,"server_group_list = { + ""servers_list"": [""0""] + }","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +} +","def test_group_server_add_without_authorization(anonymous_exec_api): + """""" + Add Server in Group without Authorization + """""" + + server_group_list = { + ""servers_list"": [""0""] + } + r = anonymous_exec_api.group_add_server(server_group_list, group_id=0) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Authentication credentials were not provided."" +" +/group/rest/add-server/{id}/,addition of server to group when requested with invalid token,"server_group_list = { ""servers_list"": [""0""] }","{ +""status"" : 401, +""message"" : ""Invalid token"" +} +"," +def test_group_server_add_with_invalid_token(invalid_exec_api): + """""" + Add Server in Group with invalid token + """""" + server_group_list = { + ""servers_list"": ['0'] + } + r = invalid_exec_api.group_add_server(server_group_list, group_id=0) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Invalid token."" +" +/group/rest/add-server/{id}/,"addition of server to group when invalid group id is provided. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +group_id=0, +}","{ + ""status"": 400, + ""message"": ""Group does not exist"" +}","def test_group_server_add_with_invalid_id(run_api, server_list): + """""" + Add Server in Group with invalid group id + """""" + r = server_list + res = r.json() + list_server = [result['UUID'] for result in res['results']] + server_group_list = { + ""servers_list"": list_server + } + r = run_api.group_add_server(server_group_list, group_id=0) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + res = r.json() + assert res['error'] == ""Group does not exist"" + test_assert.status(r, 400) +" +/group/rest/add-user/{id}/,requesting of user addition to group with invalid token,"{group_id=0, +user_ids=[0] +}","{""status"":401, +""message"": ""Invalid Token"" +}","def test_group_add_user_with_invalid_token(invalid_exec_api): + """""" + Adding user id into group with invalid token + """""" + template, r = invalid_exec_api.group_add_user(group_id=0, user_ids=[0]) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == 'Invalid token.' +" +/group/rest/add-user/{id}/,providing valid Group Id and User Id,"{ +""search"" :""vivekt"" +}","{ +""status"" : 202, +""response"" : Accepted","@pytest.mark.parametrize(""custom_group_admin_operations"", [{""action"": GROUP_ADD_MANAGER_AS_MANAGER}], indirect=True) +def test_group_add_user_by_manager(skip_if_not_manager, custom_group_admin_operations, run_api): + """""" + When provided user_id and group_id + """""" + template, r = custom_group_admin_operations + group_id = template['group_id'] + user_r = run_api.user_list({'search': 'vivekt'}) + user_id = user_r.json()['results'][0]['id'] + params, result = run_api.group_add_user(group_id, user_ids=[user_id]) + test_assert.status(result, 202) +" +/group/rest/add-user/{id}/,"providing invalid User Id. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{""user_id"" : 0}","{""status"" : 207 }","PARAMETERS = [{""action"": GROUP_ADD_USER}] + +@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) +def test_group_add_user_with_invalid_user_id(run_api, custom_group_admin_operations): + """""" + Adding invalid user id into group + """""" + params, r = custom_group_admin_operations + res = r.json() + group_id = res['id'] + template, r = run_api.group_add_user(group_id, user_ids=[0]) + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 207) +" +/group/rest/add-user/{id}/,"providing invalid Group Id. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{""group_id"" : 0}","{""status"":400, +""message"": ""'Group does not exist'"" +}"," +def test_group_add_user_with_invalid_group_id(run_api, admin_exec_api): + """""" + Adding users into invalid group id + """""" + user_result = admin_exec_api.user_list() + res = user_result.json() + user_ids = [result['id'] for result in res['results']] + template, r = run_api.group_add_user(group_id=0, user_ids=user_ids) + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + res = r.json() + assert res['error'] == 'Group does not exist' + test_assert.status(r, 400) +" +/group/rest/add-user/{id}/,adding user id to group without Authorization ,"{group_id=0, +user_ids=[0] +}","{status"":401, +""message"":'Authentication credentials were not provided.' +}","def test_group_add_user_without_authorization(anonymous_exec_api): + """""" + Adding user id into group without Authorization + """""" + template, r = anonymous_exec_api.group_add_user(group_id=0, user_ids=[0]) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == 'Authentication credentials were not provided.' +" +/group/rest/add/,"adding new group. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 201, +}","ef test_add_group(run_api, group_add): + """""" + Adding new Group + """""" + template, r = group_add + result = r.json() + if run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(template, result, ""group_add"") + test_assert.status(r, 201) + + elif run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + + elif run_api.user_type == USER_TYPE[""manager""]: + test_assert.status(r, manager_rights_response(endpoint)) +" +/group/rest/add/,adding new group without authorization.,,"{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_add_group_without_authorization(anonymous_exec_api): + """""" + Adding new Group without Authorization + """""" + params, r = anonymous_exec_api.group_add() + result = r.json() + test_assert.status(r, 401) + assert result['detail'] == ""Authentication credentials were not provided."" + +" +/group/rest/add/,"adding new group when invalid deployment strategy is provided. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""name"": ""test_group"", + ""deployment_strategy"": ""invalid"" + }","{ + ""status"" : 400, + ""message"" : ""Invalid deployment_strategy"" +}","def test_add_group_invalid_deployment_strategy(run_api): + """""" + provide invalid deployment_strategy + """""" + group = { + ""name"": ""test_group"", + ""deployment_strategy"": ""invalid"" + } + params, r = run_api.group_add(template=group) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + result = r.json() + test_assert.status(r, 400) + assert result['error'] == ""Invalid deployment_strategy"", ""|> Json %s"" % result +" +/group/rest/add/,"adding new group when group name field is missing. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 400, +""message"" : ""Group Name is required and it can not be blank"" +}","def test_add_group_with_group_name_field_missing(run_api): + """""" + Adding new Group with group name field missing + """""" + params, r = run_api.group_add(template={}) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + result = r.json() + test_assert.status(r, 400) + assert result['error'] == ""Group Name is required and it can not be blank"" + +" +/group/rest/add/,adding new group using invalid token. ,,"{ + ""status"" : 401, + ""message"" : ""Invalid token"" +}","def test_add_group_with_invalid_token(invalid_exec_api): + """""" + Adding new Group with invalid token + """""" + params, r = invalid_exec_api.group_add() + result = r.json() + test_assert.status(r, 401) + assert result['detail'] == ""Invalid token."" +" +/group/rest/add/,"adding new group by setting blank group name. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""name"": """" +}","{ +""status"" : 400, +""message"" : ""Group Name is required and it can not be blank"" +}"," +def test_add_group_with_blank_group_name(run_api): + """""" + Adding new Group with blank group name + """""" + params, r = run_api.group_add(template={""name"": """"}) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + result = r.json() + test_assert.status(r, 400) + assert result['error'] == ""Group Name is required and it can not be blank"" + +" +/group/rest/add/,"adding new group by manager +",,"{ + ""status"" :403 +}","def test_add_group_by_manager(skip_if_not_manager, group_add): + """""" + when group name is provided + """""" + template, r = group_add + test_assert.status(r, 403) +" +/group/rest/bulkdelete/,requesting bulk deletion of groups with invalid token,"{ +group_id_list=[0] +}","{ +""status"":401, +""message"": ""Invalid Token"" +}","def test_group_bulk_delete_with_invalid_token(invalid_exec_api): + """""" + delete group in bulk with invalid token + """""" + r = invalid_exec_api.group_bulk_delete(group_id_list=[0]) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == 'Invalid token.' +" +/group/rest/bulkdelete/,"providing valid group ids. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{""status"": 202/403, +""message"":Accepted}","PARAMETERS = [{""action"": GROUP_BULK_DELETE}] + +@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) +def test_group_bulk_delete(run_api, custom_group_admin_operations): + """""" + delete group in bulk + """""" + template, r = custom_group_admin_operations + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 202) +" +/group/rest/bulkdelete/,"providing invalid ids. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +invalid_group_list = [0] +}","{""status"":400/403, +""message"":Bad Request}","@pytest.mark.skip(reason=""Skipping this test because it is returning 207 at the place of 400"") +def test_group_bulk_delete_with_invalid_id(run_api): + """""" + delete groups in bulk with invalid id + """""" + invalid_group_list = [0] + r = run_api.group_bulk_delete(invalid_group_list) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 400) +" +/group/rest/bulkdelete/,performing bulk delete without Authorization ,"{ +group_id_list = [0] +}","{ +""status"":401, +""message"":'Authentication credentials were not provided.' +}","def test_group_bulk_delete_without_authorization(anonymous_exec_api): + """""" + delete group in bulk without Authorization + """""" + r = anonymous_exec_api.group_bulk_delete(group_id_list=[0]) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == 'Authentication credentials were not provided.' +" +/group/rest/delete/{id}/,requesting group deletion with invalid token,"{ +id=0 +}","{ + ""status"": 401, + ""message"": ""Invalid Token"" +}","def test_group_delete_with_invalid_token(invalid_exec_api): + """""" + Delete group with invalid token + """""" + r = invalid_exec_api.group_delete(id=0) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == 'Invalid token.' +" +/group/rest/delete/{id}/,"providing invalid id. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +id=0 +}","{ +""status"": 400/403, +""message"": ""Group that matches the query does not exist"" +}","def test_group_delete_with_invalid_id(run_api): + """""" + Delete a group with invalid id + """""" + r = run_api.group_delete(id=0) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + res = r.json() + assert res['error'] == 'Group matching query does not exist.' + test_assert.status(r, 400) +" +/group/rest/delete/{id}/,group deletion without authorization ,"{ +id=0 +}","{ + ""status"": 401, + ""message"" : 'Authentication credentials were not provided.' + +}"," +def test_group_delete_without_authorization(anonymous_exec_api): + """""" + Delete group without authorization + """""" + r = anonymous_exec_api.group_delete(id=0) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == 'Authentication credentials were not provided.' +" +/group/rest/delete/{id}/,"deleting group with valid group id. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ + ""status"": 204 +}","PARAMETERS = [{""action"": GROUP_DELETE}] + +@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) +def test_group_delete(run_api, custom_group_admin_operations): + """""" + delete a group + """""" + template, r = custom_group_admin_operations + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 204) +" +/group/rest/remove-server/{id}/,removing server from group without Authorization ,"{ +group_id = 0 +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +} +","def test_group_remove_server_without_authorization(anonymous_exec_api): + """""" + Remove server from group without authorization + """""" + r = anonymous_exec_api.group_remove_server(group_id=0, params={""servers_list"": ['0']}) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Authentication credentials were not provided."" +" +/group/rest/remove-server/{id}/,"removing server from group when server id is invalid.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 400/404 +}","@pytest.mark.skip(reason=""Skipping this test because of it is returning 207 in place 400/404"") +@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) +def test_group_remove_server_invalid_server_id(run_api, custom_group_admin_operations): + """""" + Remove server from group when server id is invalid + """""" + params, r = custom_group_admin_operations + res = r.json() + group_id = res['id'] + r = run_api.group_remove_server(group_id, params={""servers_list"": [""0""]}) + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + status_code = r.status_code + assert status_code == 400 or status_code == 404 +" +/group/rest/remove-server/{id}/,removing server from group when requested with invalid token,"{ +group_id = 0 +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +} +","def test_group_remove_server_with_invalid_token(invalid_exec_api): + """""" + Remove server from group with invalid token + """""" + r = invalid_exec_api.group_remove_server(group_id=0, params={""servers_list"": ['0']}) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Invalid token."" +" +/group/rest/remove-server/{id}/,"removing server from group when invalid group id is provided. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""servers_list"": [""0""], + ""group_id"" = 0 + }","{ +""status"" : 400 +""message"" : ""Group does not exist"" +}","def test_group_remove_server_invalid_group_id(run_api, server_list): + """""" + Remove server from group when group id is invalid + """""" + r = server_list + res = r.json() + list_server = [result['UUID'] for result in res['results']] + servers_list = { + ""servers_list"": list_server + } + r = run_api.group_remove_server(group_id=0, params={""servers_list"": servers_list}) + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + res = r.json() + assert res['error'] == ""Group does not exist"" + test_assert.status(r, 400) +" +/group/rest/remove-server/{id}/,"removing server from group when both group and server id are valid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 403 / 202 +}","PARAMETERS = [{""action"": GROUP_ADD_SERVER}] + + +@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) +def test_group_remove_server(skip_if_manager, run_api, custom_group_admin_operations): + """""" + Remove Server in Group + """""" + params, r = custom_group_admin_operations + group_id = params[""group_id""] + server_list = params[""server_list""] + r = run_api.group_remove_server(group_id, params={""servers_list"": server_list}) + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 202) +" +/group/rest/remove-server/{id}/,"removing server from group when both group and server id are invalid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +group_id = 0 +}","{ +""status"" : 400, +""message"" : ""Group does not exist"" +}","def test_group_remove_server_invalid_server_id_and_grp_id(run_api): + """""" + Remove server from group when both server id and group id is invalid + """""" + r = run_api.group_remove_server(group_id=0, params={""servers_list"": [""0""]}) + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + res = r.json() + assert res['error'] == ""Group does not exist"" + test_assert.status(r, 400) +" +/group/rest/remove-user/{id}/,deleting user from group without Authorization ,"{ +group_id = 0, +""users_list"": [0] +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" +} +","def test_group_remove_user_without_authorization(anonymous_exec_api): + """""" + Remove user from group without authorization + """""" + r = anonymous_exec_api.group_remove_user(group_id=0, params={""users_list"": [0]}) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Authentication credentials were not provided."" +" +/group/rest/remove-user/{id}/,"deleting user from group when user id is invalid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 400 / 404 +}","PARAMETERS = [{""action"": GROUP_ADD}] + + +@pytest.mark.skip(reason=""Skipping this test because of it is returning 207 in place 400/404"") +@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) +def test_group_remove_user_with_invalid_user_id(run_api, custom_group_admin_operations): + """""" + Remove user from group when invalid user id is provided + """""" + params, r = custom_group_admin_operations + res = r.json() + group_id = res['id'] + r = run_api.group_remove_user(group_id, params={""users_list"": [0]}) + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + status_code = r.status_code + assert status_code == 400 or status_code == 404 +" +/group/rest/remove-user/{id}/,deleting user from group when requested with invalid token,"{ +group_id = 0, +""users_list"": [0] +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +} +","def test_group_remove_user_with_invalid_token(invalid_exec_api): + """""" + Remove user from group with invalid token + """""" + r = invalid_exec_api.group_remove_user(group_id=0, params={""users_list"": [0]}) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Invalid token."" +" +/group/rest/remove-user/{id}/,"deleting user from group when invalid group id is provided. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +group_id = 0 +}","{ +""status"" : 400 / 403, +""message"" : ""Group does not exist"" +}","def test_group_remove_user_with_invalid_grp_id(run_api, admin_exec_api): + """""" + Remove user from group when invalid group id is provided + """""" + user_result = admin_exec_api.user_list() + res = user_result.json() + user_ids = [result['id'] for result in res['results']] + r = run_api.group_remove_user(group_id=0, params={""users_list"": user_ids}) + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + res = r.json() + assert res['error'] == ""Group does not exist"" + test_assert.status(r, 400) +" +/group/rest/remove-user/{id}/,"deleting user from group when both group and server id are valid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +group_id = 0 +}","{ +""status"" : 403 / 202 +}","@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) +def test_group_remove_user(skip_if_manager, run_api, custom_group_admin_operations): + """""" + Remove User in Group + """""" + params, r = custom_group_admin_operations + group_id = params[""group_id""] + user_list = params[""users_list""] + r = run_api.group_remove_user(group_id, params={""users_list"": user_list}) + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 202) + +" +/group/rest/remove-user/{id}/,"deleting user from group when both group and user id are invalid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +group_id = 0, +""users_list"": [0] +}","{ +""status"" : 400, +""message"" : ""Group does not exist"" +}","def test_group_remove_user_invalid_grp_and_user_id(run_api): + """""" + Remove user from group when invalid user id and group id are provided + """""" + r = run_api.group_remove_user(group_id=0, params={""users_list"": [0]}) + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + res = r.json() + assert res['error'] == ""Group does not exist"" + test_assert.status(r, 400) +" +/ideploy/rest/delete/{UUID}/,Delete a Private Island which you are not an owner of but as admin,,"{ +""status"" : 201, +} +","@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_delete_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): + """""" + Deleting the Deployed Island image by Admin + """""" + deploy_id = custom_ilib_non_admin_operations + r = run_api.ideploy_delete(deploy_id) + test_assert.status(r, 201) +" +/ideploy/rest/delete/{UUID}/,Delete a Private Island which you are not an owner of and not as admin,,"{ +""status"" : 403, +} +","@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_delete_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): + """""" + Deleting the Deployed Island image by non-Admin + """""" + deploy_id = custom_ilib_admin_operations + r = run_api.ideploy_delete(deploy_id) + test_assert.status(r, 403) +" +/ideploy/rest/deploy/{UUID}/,successful deployment of an island,,"{ +""status"" : 200, +} +","def test_ideploy_deploy_self(ideploy_deploy): + """""" + Deploy Island image + """""" + template, r = ideploy_deploy + test_assert.status(r, 200) + +" +/ideploy/rest/deploy/{UUID}/,Select a server for deployment,,"{ +""status"" : 201, +} +","def test_ideploy_deploy_select_server(run_api, ilibrary_add_new_island): + params, r = ilibrary_add_new_island + uuid = r.json()[""uuid""] + deploy_on = list(run_api.clm_my_servers.keys()) + r = run_api.ideploy_deploy(uuid, deploy_on) + x = r.json() + deploy_id = x[""deploy_uuid""] + r = run_api.ideploy_delete(deploy_id) + test_assert.status(r, 201) +" +/ideploy/rest/deploy/{UUID}/,Select a group for deployment,,"{ +""status"" : 201, +} +","def test_ideploy_deploy_select_group(run_api, ilibrary_add_new_island): + params, r = ilibrary_add_new_island + uuid = r.json()[""uuid""] + grp_list = list(run_api.clm_my_groups.keys()) + r = run_api.ideploy_deploy(uuid, group_list=grp_list) + x = r.json() + deploy_id = x[""deploy_uuid""] + r = run_api.ideploy_delete(deploy_id) + test_assert.status(r, 201) +" +/ideploy/rest/deploy/{UUID}/,provide tags in param,"{ + ""tag_list"": ""custom_tags"", + }","{ +""status"" : 200, +} +","def test_ideploy_deploy_with_tags(ilibrary_add_new_island, run_api): + """""" + provide tags in params + """""" + params, r = ilibrary_add_new_island + uuid = r.json()[""uuid""] + tag_name = ""custom_tags"" + params = { + ""tag_list"": [tag_name] + } + r = run_api.ideploy_deploy(uuid, **params) + rjson = r.json() + test_assert.status(r, 200) + isl_details = run_api.ideploy_details(rjson['deploy_uuid']).json() + all_tags = [tag['value'] for tag in isl_details['tags']] + assert tag_name in all_tags, ""|> Json %s"" % rjson + run_api.ideploy_delete(rjson['deploy_uuid']) +" +/ideploy/rest/deploy/{UUID}/,deploying an island without authorization,"{ +deploy_id=""invalid"" +}","{ +""status"":401, +""message"" : ""Authentication credentials were not provided."" +}","def test_ideploy_deploy_without_authorization(anonymous_exec_api): + """""" + deploy an island without authorization + """""" + deploy_id = ""invalid"" + idepl_deploy = anonymous_exec_api.ideploy_deploy(deploy_id, wait=False) + idepl_json = idepl_deploy.json() + test_assert.status(idepl_deploy, 401) + assert idepl_json[""detail""] == ""Authentication credentials were not provided."" +" +/ideploy/rest/deploy/{UUID}/,deploying an island using invalid uuid,"{ +deploy_id=""invalid"" +}","{ +""status"":404, +""message"" : ""failure"" +}","def test_ideploy_deploy_invalid_uuid(run_api): + """""" + deploy with invalid uuid + """""" + deploy_id = ""invalid"" + r = run_api.ideploy_deploy(deploy_id) + test_assert.status(r, 404) + res = r.json() + assert res[""result""] == 'FAILURE', res + assert 'does not exist' in res[""error""], res" +/ideploy/rest/deploy/{UUID}/,deploying an island by manager when the manager has the required permissions for deployment,,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_ideploy_deploy_manager_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): + """""" + Deploying an Island Image by manager when have right on server + """""" + # When the user is not part of the group that the manager manages + deploy_id = custom_ilib_admin_operations + r = run_api.ideploy_deploy(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + lib_id = custom_ilib_non_admin_operations + r = run_api.ideploy_deploy(lib_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) +" +/ideploy/rest/deploy/{UUID}/,deploying an island by manager when the manager do not have the required permissions for deployment,,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_ideploy_deploy_manager_no_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): + """""" + Deploying an Island Image by manager when have no server right + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + lib_id = custom_ilib_admin_operations + r = run_api.ideploy_deploy(lib_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + lib_id = custom_ilib_non_admin_operations + r = run_api.ideploy_deploy(lib_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + +" +/ideploy/rest/deploy/{UUID}/,deploying a island machine when requested with invalid token,"{ +deploy_id =""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_ideploy_deploy_invalid_token(invalid_exec_api): + """""" + deploying a island machine when requested with invalid token + """""" + deploy_id = ""invalid"" + idepl_deploy = invalid_exec_api.ideploy_deploy(deploy_id, wait=False) + idepl_json = idepl_deploy.json() + test_assert.status(idepl_deploy, 401) + assert idepl_json[""detail""] == ""Invalid token."" +" +/ideploy/rest/deploy/{UUID}/,"deploying a island machine using valid existing uuid and providing name param, where the name contains slash ","{ +name : ""test/island"" +}","{ + ""status"" : 404, + ""message"" : ""Name cannot contain `/`"" +}","def test_ideploy_deploy_name_contains_slash(ilibrary_add_new_island, run_api): + """""" + name contains '/' + """""" + params, r = ilibrary_add_new_island + uuid = r.json()[""uuid""] + r = run_api.ideploy_deploy(uuid, name=""test/island"") + rjson = r.json() + test_assert.status(r, 400) + assert rjson['error'] == ""Name cannot contain `/`"", ""|> Json %s"" % rjson + +" +/ideploy/rest/deploy/{UUID}/,deploy with a name,"{ + ""name"": ""test_island"", +}","{ +""status"" : 201, +} +"," +def test_ideploy_deploy_with_name(run_api, ilibrary_add_new_island): + """""" + island deployment using name parameter + """""" + params, r = ilibrary_add_new_island + uuid = r.json()[""uuid""] + r = run_api.ideploy_deploy(uuid, name=""test_island"") + x = r.json() + deploy_id = x[""deploy_uuid""] + r = run_api.ideploy_details(deploy_id) + rjson = r.json() + assert ""test_island"" in rjson[""island""][""name""], rjson + r = run_api.ideploy_delete(deploy_id) + test_assert.status(r, 201) +" +/ideploy/rest/deploy/{UUID}/,deploy a Public Island with Admin rights but not owner,,"{ +""status"" : 200, +} +","@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_deploy_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): + """""" + Deploying an Island Image by Admin + """""" + lib_id = custom_ilib_non_admin_operations + r = run_api.ideploy_deploy(lib_id) + x = r.json() + deploy_id = x[""deploy_uuid""] + test_assert.status(r, 200) + run_api.ideploy_delete(deploy_id) +" +/ideploy/rest/deploy/{UUID}/,deploy a Private Island which you are not an owner of and not as admin,,"{ +""status"" : 403, +} +","@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_deploy_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): + """""" + Deploying an Island Image by Non-admin + """""" + lib_id = custom_ilib_admin_operations + r = run_api.ideploy_deploy(lib_id) + test_assert.status(r, 403) +" +/ideploy/rest/deploy/{UUID}/,"deploy ""arch"":""aarch64"" type island and provide server which does not support it",,"{ +""status"" : 400, +""message"" : ""Either, Architecture of the selected Servers doesn't support 'aarch64' or the hvm_type of 'kvm' isn't supported"" +} +","def test_ideploy_island_with_aarch64(run_api, server_list_arm): + """""" + deploy ""arch"":""aarch64"" type island but server does not support it + """""" + params, r = run_api.library_add_new_vm(arch='aarch64') + rjson_lib = r.json() + + machine = { + ""uuid"": rjson_lib[""uuid""], + ""nic_update_id"": rjson_lib[""hw""][""networks""][0][""id""], + ""nic_delete_id"": rjson_lib[""hw""][""networks""][2][""id""] + } + island_params = template_add_ilibrary_one_machine(machine=machine) + params, r_isl = run_api.ilibrary_add_new_island(params=island_params) + uuid = r_isl.json()[""uuid""] + server_list = server_list_arm + deploy_on = server_list if server_list else list(run_api.clm_my_servers.keys()) + r = run_api.ideploy_deploy(uuid, deploy_on=deploy_on, name=""test_island"") + if server_list: + test_assert.status(r, 200) + rjson = r.json() + deploy_id = rjson[""deploy_uuid""] + run_api.ideploy_delete(deploy_id) + else: + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""Either, Architecture of the selected Servers doesn't support 'aarch64' or the hvm_type of 'kvm' isn't supported"", ""|> Json %s"" % rjson + run_api.ilibrary_delete(uuid) + run_api.library_delete(rjson_lib[""uuid""]) +" +/ideploy/rest/details/{UUID}/,fetching the details of deployed island machine without authorization,"{ +deploy_id =""invalid"" +}","{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_ideploy_details_without_authorization(anonymous_exec_api): + """""" + without authorization + """""" + deploy_id = ""invalid"" + r = anonymous_exec_api.ideploy_details(deploy_id) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""The error message is %s"" % rjson['detail'] + +" +/ideploy/rest/details/{UUID}/,fetching the details of deployed island machine when requested with invalid token,"{ +deploy_id =""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_ideploy_details_with_invalid_token(invalid_exec_api): + """""" + Invalid Token + """""" + deploy_id = ""invalid"" + r = invalid_exec_api.ideploy_details(deploy_id) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""The error message is %s"" % rjson['detail'] + + +" +/ideploy/rest/details/{UUID}/,fetching the deployment details of deployed island machine using invalid uuid ,"{ +deploy_id =""invalid"" +} + +","{ + ""status"" : 404, + ""message"" : ""Deployment of Island with uuid:invalid doesn't exists"" +}","def test_ideploy_details_with_invalid_uuid(run_api): + """""" + Details of Island uuid does not exists + """""" + deploy_id = ""invalid"" + r = run_api.ideploy_details(deploy_id) + test_assert.status(r, 404) + rjson = r.json() + assert rjson['error'] == ""Deployment of Island with uuid:invalid doesn't exists"", ""The error message is %s"" % rjson['error'] + +" +/ideploy/rest/details/{UUID}/,fetching details the deployed island machine using valid uuid,,"{ +""status"":200 +}","def test_ideploy_details(ideploy_details): + """""" + Getting Island deploy details + """""" + x, r = ideploy_details + test_assert.status(r, 200) +" +/ideploy/rest/details/{UUID}/,Confirm state transition from Deploying to Stopped,,Working as intended,"def test_ideploy_details_comfirm_state_from_deployimg_to_stop(ideploy_details): + """""" + Confirm state transition from Deploying to Stopped + """""" + x, r = ideploy_details + rjson = r.json() + assert rjson['state'] == ""stopped"", 'The error is %s' % rjson['state'] + +" +/ideploy/rest/details/{UUID}/,Confirm network segments that were deployed with the Island,,Attached as was intended,"def test_ideploy_details_comfirm_network_segments(ideploy_deploy, run_api): + """""" + Confirm network segments that were deployed with the Island + """""" + params, r = ideploy_deploy + network_segments = params['network_segments']['add'] + rjson = r.json() + deploy_id = rjson[""deploy_uuid""] + res = run_api.ideploy_details(deploy_id) + result = res.json() + r_network_segments = result['island']['network_segments'][2:] + + for i, j in zip(network_segments, r_network_segments): + assert i.get('name') == j.get('name') + assert i.get('description') == j.get('description') + assert i.get('enable_ipv4') == j.get('enable_ipv4') + + +" +/ideploy/rest/details/{UUID}/,Confirm Machines details with the deployment,,Created as was worked,"def test_ideploy_details_comfirm_machines_details(library_add_new_vm, run_api): + """""" + Confirm network segments that were deployed with the Island + """""" + params1, r1 = library_add_new_vm + island_name = rand_string() + mc_name = rand_string() + params = {""name"": island_name, + ""description"": f""This is description for {island_name}"", + ""is_public"": True, + ""machines"": {""add"": [{""uuid"": r1[""uuid""], ""name"": mc_name, ""description"": f""This is description for {mc_name}"", ""nics"": {""update"": []}, ""network_segments"": {""add"": []}}]}} + params, r = run_api.ilibrary_add_new_island(params=params) + island_uuid = r.json()[""uuid""] + res = run_api.ideploy_deploy(uuid=island_uuid) + deploy_uuid = res.json()[""deploy_uuid""] + r_details = run_api.ideploy_details(deploy_uuid) + result = r_details.json() + assert result['island']['name'] == f""{params['name']} #1"" + assert result[""island""]['description'] == params['description'] + assert result['machines'][0]['name'] == f""{params['machines']['add'][0]['name']} #1"" + assert result['machines'][0]['description'] == params['machines']['add'][0]['description'] + run_api.ideploy_delete(uuid=deploy_uuid) + run_api.ilibrary_delete(uuid=island_uuid, params={}) +" +/ideploy/rest/edit/{UUID}/,resuming the deployed island machine when requested with invalid token,"deploy_id = ""invalid-deploy_uuid"" +edit_param = { +""name"": ""modified_colama"", +""description"": ""testing for edit"", +""allow_duplicate_network"": False +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_ideploy_edit_with_invalid_token(invalid_exec_api): + """""" + Editing the Island deploy details with invalid token + """""" + deploy_id = ""invalid-deploy_uuid"" + edit_param = {""name"": ""modified_colama"", + ""description"": ""testing for edit"", + 'allow_duplicate_network': False + } + r = invalid_exec_api.ideploy_edit(deploy_id, params=edit_param) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Invalid token."" + + +" +/ideploy/rest/edit/{UUID}/,editting the deployed island machine without authorization,"{ +""name"": ""modified_colama"", +""description"": ""testing for edit"", + 'allow_duplicate_network': False +}","{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_ideploy_edit_without_authorization(anonymous_exec_api): + """""" + Editing the Island deploy details without authorization + """""" + deploy_id = ""invalid-deploy_uuid"" + edit_param = {""name"": ""modified_colama"", + ""description"": ""testing for edit"", + 'allow_duplicate_network': False + } + r = anonymous_exec_api.ideploy_edit(deploy_id, params=edit_param) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Authentication credentials were not provided."" + +" +/ideploy/rest/edit/{UUID}/,editing the deployment of island machine when requested with invalid token,"deploy_id = ""invalid-deploy_uuid"" +edit_param = { +""name"": ""modified_colama"", +""description"": ""testing for edit"", + ""allow_duplicate_network"": False +}","{ + ""status"" : 404, +}","def test_ideploy_edit_with_invalid_uuid(run_api): + """""" + Editing the Island deploy details by invalid uuid + """""" + deploy_id = ""invalid-deploy_uuid"" + edit_param = {""name"": ""modified_colama"", + ""description"": ""testing for edit"", + 'allow_duplicate_network': False + } + r = run_api.ideploy_edit(deploy_id, params=edit_param) + test_assert.status(r, 404) +" +/ideploy/rest/edit/{UUID}/,Editing a Deployment with no parameters,,"{ + ""status"" : 400, + ""message"" : ""This field is required"", +}","def test_ideploy_edit_no_parameters(run_api, ideploy_deploy): + """""" + Editing the Island deploy details with No Parameters + """""" + param, result = ideploy_deploy + rjson = result.json() + deploy_id = rjson[""deploy_uuid""] + edit_param = {} + r = run_api.ideploy_edit(deploy_id, params=edit_param) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['allow_duplicate_network'] == ['This field is required.'] + + +" +/ideploy/rest/edit/{UUID}/,Edit a Deployment which you are not an owner of but with Admin rights,"{ +""name"": ""modified_colama"", +""description"": ""testing for edit"", +""allow_duplicate_network"": False +}","{ + ""status"" : 202, +}","@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_edit_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): + """""" + Changing of Island Deployed Image by Admin + """""" + ideploy_id = custom_ilib_non_admin_operations + edit_param = {""name"": ""modified_colama"", ""description"": ""testing for edit"", 'allow_duplicate_network': False} + r = run_api.ideploy_edit(ideploy_id, params=edit_param) + test_assert.status(r, 202) +" +/ideploy/rest/edit/{UUID}/,Edit a Deployment which you are not an owner of and without Admin rights,"{ +""name"": ""modified_colama"", +""description"": ""testing for edit"", +""allow_duplicate_network"": False +}","{ + ""status"" : 403, +}"," +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_edit_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): + """""" + Changing of Island Deployed Image by non-Admin + """""" + # Non-admin check for changing details of a Deployed Image created by different user. + ideploy_id = custom_ilib_admin_operations + edit_param = {""name"": ""modified_colama"", ""description"": ""testing for edit"", 'allow_duplicate_network': False} + r = run_api.ideploy_edit(ideploy_id, params=edit_param) + test_assert.status(r, 403) +" +/ideploy/rest/edit/{UUID}/,adding duplicate MACs inside NIC of same Machine and set allow_duplicate_network as false,,"{ + ""status"" : 400, +}"," +def test_ideploy_edit_add_duplicate_mac_in_same_machine(run_api): + """""" + Editing the Island deploy details by Add duplicate MACs inside NIC of same Machine and set allow_duplicate_network as false + """""" + networks = template_networks() + if run_api.arch_type == ""aarch64"": + params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + else: + params1, r1 = run_api.library_add_new_vm(networks=networks) + + params3 = { + ""name"": ""test_ideploy"", + ""machines"": { + ""add"": [ + { + ""uuid"": r1.json()[""uuid""] + } + ] + }, + ""is_public"": False + } + params3, r3 = run_api.ilibrary_add_new_island(params=params3) + rjson3 = r3.json() + uuid = rjson3['uuid'] + + r = run_api.ideploy_deploy(uuid) + deploy_id = r.json()['deploy_uuid'] + r = run_api.ideploy_details(deploy_id) + rjson = r.json() + machine_uuid = rjson['machines'][0]['uuid'] + machine_mac = rjson['machines'][0]['machine']['hw']['networks'][0]['mac'] + island_uuid = rjson['island']['uuid'] + + params3 = { + ""updated_machines"": [ + { + ""uuid"": machine_uuid, + 'nics': { + 'add': [ + { + ""model"": ""virtio"", + 'mac': machine_mac + } + ] + } + } + ], + 'allow_duplicate_network': False + } + r = run_api.ideploy_edit(deploy_id, params=params3) + test_assert.status(r, 400) + res = r.json() + assert res['error'] == ""Island ["" + island_uuid + ""] getting duplicate mac_addresses. Use `allow_duplicate_network` to force continue..."" + run_api.ideploy_delete(deploy_id) + if 'error' not in rjson3.keys(): + run_api.ilibrary_delete(uuid, params3) + run_api.library_delete(r1.json()[""uuid""]) +" +/ideploy/rest/edit/{UUID}/,Add duplicate MACs inside NIC of different Machines and set allow_duplicate_network as false ,,"{ + ""status"" : 400, +}"," +def test_ideploy_edit_update_duplicate_mac_in_different_machines(run_api): + """""" + Editing the Island deploy details by Add duplicate MACs inside NIC of different Machines and set allow_duplicate_network as false + """""" + networks = template_networks() + if run_api.arch_type == ""aarch64"": + params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + params2, r2 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + else: + params1, r1 = run_api.library_add_new_vm(networks=networks) + params2, r2 = run_api.library_add_new_vm(networks=networks) + + params3 = { + ""name"": ""test_ideploy"", + ""machines"": { + ""add"": [ + { + ""uuid"": r1.json()[""uuid""] + }, + { + ""uuid"": r2.json()[""uuid""] + } + ] + }, + ""is_public"": False + } + params3, r3 = run_api.ilibrary_add_new_island(params=params3) + rjson3 = r3.json() + uuid = rjson3['uuid'] + r = run_api.ideploy_deploy(uuid) + deploy_id = r.json()['deploy_uuid'] + r = run_api.ideploy_details(deploy_id) + rjson = r.json() + machine1_uuid = rjson['machines'][0]['uuid'] + machine2_uuid = rjson['machines'][0]['uuid'] + island_uuid = rjson['island']['uuid'] + params3 = { + ""updated_machines"": [ + { + ""uuid"": machine1_uuid, + 'nics': { + 'add': [ + { + ""model"": ""virtio"", + 'mac': '56:54:00:0C:8A:4A' + } + ] + } + }, + { + ""uuid"": machine2_uuid, + 'nics': { + 'add': [ + { + ""model"": ""virtio"", + 'mac': '56:54:00:0C:8A:4A' + } + ] + } + } + ], + 'allow_duplicate_network': False + } + r = run_api.ideploy_edit(deploy_id, params=params3) + test_assert.status(r, 400) + res = r.json() + assert res['error'] == ""Island ["" + island_uuid + ""] getting duplicate mac_addresses. Use `allow_duplicate_network` to force continue..."" + run_api.ideploy_delete(deploy_id) + if 'error' not in rjson3.keys(): + run_api.ilibrary_delete(uuid, params3) + run_api.library_delete(r1.json()[""uuid""]) + run_api.library_delete(r2.json()[""uuid""]) + +" +/ideploy/rest/list/,successfully fetching the list of deployed islands using invalid uuid,,"{ +""status"":200, +""response"": list of deployed islands +}","def test_ideploy_list_invalid_uuid(run_api): + """""" + Fetching the list of deployed islands using invalid uuid + """""" + params = {""uuid"": ""invalid""} + r = run_api.ideploy_list(params) + test_assert.status(r, 200) +" +/ideploy/rest/list/,successfully fetching the list of deployed islands using invalid name,"{""name"": ""invalid""}","{ +""status"":200, +""response"": list of deployed islands +}"," +def test_ideploy_list_invalid_name(run_api): + params = {""name"": ""invalid""} + r = run_api.ideploy_list(params) + test_assert.status(r, 200) +" +/ideploy/rest/list/,"successfully fetching the list of deployed islands by adding filters. Check the user type before performing the operation. +",,"{ +""status"":200, +""response"": list of deployed islands +}","def test_ideploy_list_filter(run_api): + """""" + Fetching the list of deployed islands by adding filters + """""" + params, res, isl_res = [], [], [] + ideploy_count = 10 + arch = run_api.arch_type + prefix_name = f""filter_island_2_{rand_string()}_"" + isl_lib_name = [f""{prefix_name}{rand_string()}"" for _ in range(ideploy_count)] + networks = template_networks() + if arch == ""aarch64"": + params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + params2, r2 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + params3, r3 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + else: + params1, r1 = run_api.library_add_new_vm(networks=networks) + params2, r2 = run_api.library_add_new_vm(networks=networks) + params3, r3 = run_api.library_add_new_vm(networks=networks) + machine1 = { + ""uuid"": r1.json()[""uuid""], + ""nic_update_id"": r1.json()[""hw""][""networks""][0][""id""], + ""nic_delete_id"": r1.json()[""hw""][""networks""][2][""id""] + } + machine2 = { + ""uuid"": r2.json()[""uuid""], + ""nic_update_id"": r2.json()[""hw""][""networks""][1][""id""], + ""nic_delete_id"": r2.json()[""hw""][""networks""][0][""id""] + } + machine3 = { + ""uuid"": r3.json()[""uuid""], + ""nic_update_id"": r3.json()[""hw""][""networks""][2][""id""], + ""nic_delete_id"": r3.json()[""hw""][""networks""][1][""id""] + } + for i in range(ideploy_count): + param, r = run_api.ilibrary_add_new_island(machine1=machine1, machine2=machine2, + machine3=machine3, name=isl_lib_name[i]) + isl_uuid = r.json()[""uuid""] + params.append(param) + res.append(r) + isl_r = run_api.ideploy_deploy(isl_uuid) + isl_res.append(isl_r) + random_int = randint(0, 9) + name_filter = {""name"": params[random_int].get(""name"") + "" #1"", ""page_size"": ideploy_count} + uuid_filter = {""uuid"": isl_res[random_int].json().get(""deploy_uuid""), ""page_size"": ideploy_count} + owner_filter = {""owner"": ""colama"" if run_api.user_type == ""admin"" + else ""vivekt"" if run_api.user_type == ""non-admin"" + else ""manager"", ""search"": prefix_name, ""page_size"": ideploy_count} + exp_res = { + 0: [i.get(""name"") for i in params if i.get(""name"") + "" #1"" == name_filter.get(""name"")], + 1: [i.json().get(""deploy_uuid"") for i in isl_res if i.json().get(""deploy_uuid"") == uuid_filter.get(""uuid"")], + 2: [i.json().get(""owner"") for i in res], + } + filters = [name_filter, uuid_filter, owner_filter] + for filter in range(len(filters)): + r = run_api.ideploy_list(filters[filter]) + # check for valid response data with the filter parameters + if len(r.json().get(""results"")) != len(exp_res[filter]): + logging.error(f""error in filter: {filters[filter]}, the list of expected result for the filter is: {exp_res[filter]}, and the actual result is {r.json()}"") + assert False + test_assert.status(r, 200) + run_api.library_delete(r1.json()[""uuid""], params1) + run_api.library_delete(r2.json()[""uuid""], params2) + run_api.library_delete(r3.json()[""uuid""], params3) + for i in range(ideploy_count): + isl_rjson = isl_res[i].json() + if 'error' not in isl_rjson.keys(): + uuid = isl_rjson[""deploy_uuid""] + run_api.ideploy_delete(uuid) + ilib_rjson = res[i].json() + if 'error' not in ilib_rjson.keys(): + uuid = ilib_rjson[""uuid""] + run_api.ilibrary_delete(uuid, params[i]) +" +/ideploy/rest/list/,successfully fetching the list of deployed islands,,"{ +""status"":200, +""response"": list of deployed islands +}","def test_ideploy_list(ideploy_list): + """""" + Fetching the list of deployed islands + """""" + template, r = ideploy_list + test_assert.status(r, 200) +" +/ideploy/rest/list/,fetching the list of deployed islands which is filtered on created and update DateTime Filter,"{ + uuid = 'valid-deployment-uuid' +}",,"def test_ideploy_filter_timefilter(run_api: apiops, ilibrary_add_new_island): + """""" + Filter on created and update DateTime Filter + """""" + template, r = ilibrary_add_new_island + rjson = r.json() + ilib_id = rjson[""uuid""] + r = run_api.ideploy_deploy(ilib_id) + ideploy_id = r.json()[""deploy_uuid""] + r_details = run_api.ideploy_details(ideploy_id).json() + # utime and ctime is returned in the form '2023-09-14T17:59:39.173594Z' which needs conversion to '2023-09-14 17:59:39.173594' + str_ctime = r_details['ctime'].replace('T', ' ').replace('Z', '') + datetime_ctime = convert_datetime_stringform(r_details['ctime']) + + def handle_trigger_delay_filtering_for_created_on(created_start_date, created_end_date, created_date_range, utc=True): + """""" + Function to handle corner case if machine was created a day before and test get triggered on new day + """""" + if not utc: + created_start_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + created_end_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": created_start_date, ""created_end_date"": created_end_date, + ""created_date_range"": created_date_range, ""page_size"": 1}).json()['count'] == 1 + # Filter on UTC time + # .... When the datetime is selected to be the same as in detail + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": str_ctime, + ""created_end_date"": str_ctime, ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date' and 'created_end_date' when passed blank string + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": """", ""created_end_date"": """", ""page_size"": 1}).json()['count'] == 1 + # ........Filter on 'created_start_date' and 'created_end_date' when created_start_date is greater than created_end_date + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today + try: + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday + try: + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week + try: + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on week's last day at 23:59:59.9999999 and test get triggered on new week at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month + try: + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on month's last day at 23:59:59.9999999 and test get triggered on new month at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' + try: + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on year last day at 23:59:59.9999999 and test get triggered on new year at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # .........When the created_date_range format is invalid + response = run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""invalid"", ""page_size"": 1}) + test_assert.status(response, 400) + assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() + # .........When the created_start_date and created_end_date has whitespaces in them + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": "" "" + str_ctime + "" "", ""created_end_date"": "" "" + str_ctime + "" "", ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + + # Filter on IST time + # .... When the datetime is selected to be the same as in detail but having TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone IST, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone IST, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected to be the same as in detail but with tzone IST, i.e., timedelta being 19800 equivalent to +05:30 but increase microsecond by 1 to fail filter + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today + try: + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 IST and test get triggered at 00:00:00.0000000 IST + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday + try: + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 IST and test get triggered at 00:00:00.0000000 IST + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week + try: + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on week's last day at 23:59:59.9999999 IST and test get triggered on new week at 00:00:00.0000000 IST + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month + try: + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on month's last day at 23:59:59.9999999 IST and test get triggered on new month at 00:00:00.0000000 IST + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year + try: + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on year last day at 23:59:59.9999999 IST and test get triggered on new year at 00:00:00.0000000 IST + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # .........When the created_date_range format is invalid + response = run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""invalid"", ""page_size"": 1}) + test_assert.status(response, 400) + assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() + # .........When the created_start_date and created_end_date has whitespaces in them + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", ""created_end_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", + ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + run_api.ideploy_delete(ideploy_id) +" +/ideploy/rest/list/,fetching the list of deployed islands when provided with tag value,"{ +""tags"": tag_value +}","{ + ""status"": 200, + ""response"": list of deployed islands +}","def test_ideploy_list_by_tag_value(run_api, ideploy_details): + """""" + when provided with tag value + """""" + x, r = ideploy_details + detail = r.json() + tag_value = detail['tags'][0]['value'] + params = {""tags"": tag_value} + r = run_api.ideploy_list(params) + test_assert.status(r, 200) + rjson = r.json() + for island in rjson['results']: + result = run_api.ideploy_details(island['uuid']).json() + assert tag_value == result['tags'][0]['value'], ""|> Json %s"" % result +" +/ideploy/rest/list/,"fetching the list of deployed islands when provided with tag name in [""_sessionid"", ""_session_name"", ""_session_created_on""]","{ +tag_name = ""valid-name"" +}","{ + ""status"": 200, + ""response"": list of deployed islands +}","def test_ideploy_list_by_tag_name(run_api, ideploy_details): + """""" + when provided with tag name + """""" + x, r = ideploy_details + detail = r.json() + tag_value = detail['tags'][0]['name'] + params = {""tags"": tag_value} + r = run_api.ideploy_list(params) + test_assert.status(r, 200) + rjson = r.json() + for island in rjson['results']: + result = run_api.ideploy_details(island['uuid']).json() + assert tag_value == result['tags'][0]['name'], ""|> Json %s"" % result +" +/ideploy/rest/list/,fetching the list of deployed islands when no Token Provided,"{ + uuid = 'valid-existing-island-library-uuid' +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" +}","def test_ideploy_list_without_authorization(anonymous_exec_api): + """""" + Fetching the list of deployed islands without authorization + """""" + idepl_list = anonymous_exec_api.ideploy_list() + idepl_json = idepl_list.json() + test_assert.status(idepl_list, 401) + assert idepl_json[""detail""] == ""Authentication credentials were not provided."" + +" +/ideploy/rest/list/,fetching the list of deployed islands by user who does not own the deployed image,"{ + uuid = 'valid-deployment-uuid' +}","{ + ""status"": 200, + ""response"": list of deployed islands +}","def test_ideploy_list_island_not_owner(skip_if_non_admin, non_admin_exec_api, ideploy_deploy): + """""" + Fetching the list of deployed islands of other user + """""" + template, r = ideploy_deploy + idepl_list = non_admin_exec_api.ideploy_list() + user = non_admin_exec_api.user + owners = [result['island']['owner'] for result in idepl_list.json()['results'] if result['island']['owner'] != user] + test_assert.status(idepl_list, 200) + assert len(owners) == 0 +" +/ideploy/rest/list/,fetching the list of deployed islands by providing tag value,"{ +""scope"": ""all"" +}","{ + ""status"": 200, + ""response"": list of deployed islands +}","def test_ideploy_list_by_scope(run_api): + """""" + filter by scope + """""" + params = {""scope"": ""all""} + r = run_api.ideploy_list(params) + test_assert.status(r, 200) +" +/ideploy/rest/list/,fetching the list of deployed islands when requested with invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_ideploy_list_invalid_token(invalid_exec_api): + """""" + Fetching the list of deployed islands by invalid token + """""" + idepl_list = invalid_exec_api.ideploy_list() + idepl_json = idepl_list.json() + test_assert.status(idepl_list, 401) + assert idepl_json[""detail""] == ""Invalid token."" + +" +/ideploy/rest/list/,fetching the filtered list of deployed islands. Check the user type before performing the operation.,,"{ + ""status"": 200, + ""response"": filtered list of deployed islands +}","def test_ideploy_list_filter(run_api): + """""" + Fetching the list of deployed islands by adding filters + """""" + params, res, isl_res = [], [], [] + ideploy_count = 10 + arch = run_api.arch_type + prefix_name = f""filter_island_2_{rand_string()}_"" + isl_lib_name = [f""{prefix_name}{rand_string()}"" for _ in range(ideploy_count)] + networks = template_networks() + if arch == ""aarch64"": + params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + params2, r2 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + params3, r3 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + else: + params1, r1 = run_api.library_add_new_vm(networks=networks) + params2, r2 = run_api.library_add_new_vm(networks=networks) + params3, r3 = run_api.library_add_new_vm(networks=networks) + machine1 = { + ""uuid"": r1.json()[""uuid""], + ""nic_update_id"": r1.json()[""hw""][""networks""][0][""id""], + ""nic_delete_id"": r1.json()[""hw""][""networks""][2][""id""] + } + machine2 = { + ""uuid"": r2.json()[""uuid""], + ""nic_update_id"": r2.json()[""hw""][""networks""][1][""id""], + ""nic_delete_id"": r2.json()[""hw""][""networks""][0][""id""] + } + machine3 = { + ""uuid"": r3.json()[""uuid""], + ""nic_update_id"": r3.json()[""hw""][""networks""][2][""id""], + ""nic_delete_id"": r3.json()[""hw""][""networks""][1][""id""] + } + for i in range(ideploy_count): + param, r = run_api.ilibrary_add_new_island(machine1=machine1, machine2=machine2, + machine3=machine3, name=isl_lib_name[i]) + isl_uuid = r.json()[""uuid""] + params.append(param) + res.append(r) + isl_r = run_api.ideploy_deploy(isl_uuid) + isl_res.append(isl_r) + random_int = randint(0, 9) + name_filter = {""name"": params[random_int].get(""name"") + "" #1"", ""page_size"": ideploy_count} + uuid_filter = {""uuid"": isl_res[random_int].json().get(""deploy_uuid""), ""page_size"": ideploy_count} + owner_filter = {""owner"": ""colama"" if run_api.user_type == ""admin"" + else ""vivekt"" if run_api.user_type == ""non-admin"" + else ""manager"", ""search"": prefix_name, ""page_size"": ideploy_count} + exp_res = { + 0: [i.get(""name"") for i in params if i.get(""name"") + "" #1"" == name_filter.get(""name"")], + 1: [i.json().get(""deploy_uuid"") for i in isl_res if i.json().get(""deploy_uuid"") == uuid_filter.get(""uuid"")], + 2: [i.json().get(""owner"") for i in res], + } + filters = [name_filter, uuid_filter, owner_filter] + for filter in range(len(filters)): + r = run_api.ideploy_list(filters[filter]) + # check for valid response data with the filter parameters + if len(r.json().get(""results"")) != len(exp_res[filter]): + logging.error(f""error in filter: {filters[filter]}, the list of expected result for the filter is: {exp_res[filter]}, and the actual result is {r.json()}"") + assert False + test_assert.status(r, 200) + run_api.library_delete(r1.json()[""uuid""], params1) + run_api.library_delete(r2.json()[""uuid""], params2) + run_api.library_delete(r3.json()[""uuid""], params3) + for i in range(ideploy_count): + isl_rjson = isl_res[i].json() + if 'error' not in isl_rjson.keys(): + uuid = isl_rjson[""deploy_uuid""] + run_api.ideploy_delete(uuid) + ilib_rjson = res[i].json() + if 'error' not in ilib_rjson.keys(): + uuid = ilib_rjson[""uuid""] + run_api.ilibrary_delete(uuid, params[i]) +" +/ideploy/rest/pause/{UUID}/,snapshotting the deployed island machine,,"{ +""status"":201 +}","def test_ideploy_pause_self(ideploy_pause): + """""" + Pausing the Island + """""" + r = ideploy_pause + test_assert.status(r, 201) +" +/ideploy/rest/pause/{UUID}/,pausing the deployment of island machine when requested with invalid token,"{ +uid =""invalid"" +} + +","{ + ""status"" : 404, + ""message"" : ""Deployed island doesnt exist."" +}","def test_ideploy_pause_with_invalid_uuid(run_api): + """""" + test_ideploy_pause_without_authorization + """""" + uid = ""invalid"" + r = run_api.ideploy_pause(uuid=uid) + test_assert.status(r, 404) + assert r.json()[""error""] == ""Deployed Island Doesn't Exist"" +" +/ideploy/rest/pause/{UUID}/,pausing the deployment of a deployed island machine without authorization,"{ +uid =""invalid"" +}","{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_ideploy_pause_without_authorization(anonymous_exec_api): + """""" + test_ideploy_pause_without_authorization + """""" + uid = ""invalid"" + r = anonymous_exec_api.ideploy_pause(uuid=uid, wait=False) + test_assert.status(r, 401) + assert r.json()[""detail""] == ""Authentication credentials were not provided."" +" +/ideploy/rest/pause/{UUID}/,pausing the deployment of a deployed island machine when requested with invalid token,"{ +uid =""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_ideploy_pause_with_invalid_token(invalid_exec_api): + """""" + test_ideploy_pause_with_invalid_token + """""" + uid = ""Invalid"" + r = invalid_exec_api.ideploy_pause(uuid=uid, wait=False) + test_assert.status(r, 401) + assert r.json()[""detail""] == ""Invalid token."" +" +/ideploy/rest/pause/{UUID}/,Pause a Deployment which you are not an owner of but with Admin rights,,200 : job created,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_pause_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): + """""" + Pausing the Island by Admin + """""" + # Admin check of Starting a deployment created by different user + deploy_id = custom_ilib_non_admin_operations + r = run_api.ideploy_pause(deploy_id) + test_assert.status(r, 201) +" +/ideploy/rest/pause/{UUID}/,Pause a Deployment which you are not an owner of and without Admin rights,,401: UnAuthorized,"@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_pause_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): + """""" + Pausing the Island by non-admin + """""" + # Non-admin check of Starting a deployment created by different user + deploy_id = custom_ilib_admin_operations + r = run_api.ideploy_pause(deploy_id) + test_assert.status(r, 403) +" +/ideploy/rest/pause/{UUID}/,island deployment for a machine from running state to paused state,,"{ +""response"": machine paused +}","def test_ideploy_check_from_running_to_paused(run_api, ideploy_start): + """""" + island deploy from running state to paused state + """""" + x, r = ideploy_start + deploy_id = x[""deploy_uuid""] + res = run_api.ideploy_details(uuid=deploy_id).json() + initial_state = res[""state""] + if not initial_state == ""running"": + assert False, ""The machine is not in running state, current state of machine is %s"" % initial_state + run_api.ideploy_pause(uuid=deploy_id) + result = run_api.ideploy_details(uuid=deploy_id).json() + paused_network_segments = result['island']['network_segments'] + for pause in paused_network_segments: + if pause['name'] not in (""Default Public Segment"", ""HostOnly Segment""): + assert pause['status'] == ""inactive"", ""json |> %s"" % pause + final_state = result[""state""] + assert final_state == ""paused"", ""The error is %s"" % result +" +/ideploy/rest/pause/{UUID}/,Check for the transition of state from Running to Paused,,Working as intended," +def test_ideploy_check_from_running_to_paused(run_api, ideploy_start): + """""" + test_ideploy_check_from_running_to_paused + """""" + x, r = ideploy_start + deploy_id = x[""deploy_uuid""] + res = run_api.ideploy_details(uuid=deploy_id).json() + initial_state = res[""state""] + if not initial_state == ""running"": + assert False, ""The machine is not in running state, current state of machine is %s"" % initial_state + run_api.ideploy_pause(uuid=deploy_id) + result = run_api.ideploy_details(uuid=deploy_id).json() + paused_network_segments = result['island']['network_segments'] + for pause in paused_network_segments: + if pause['name'] not in (""Default Public Segment"", ""HostOnly Segment""): + assert pause['status'] == ""inactive"", ""json |> %s"" % pause + final_state = result[""state""] + assert final_state == ""paused"", ""The error is %s"" % result +" +/ideploy/rest/resume/{UUID}/,successful island deployment for a machine from paused state to running state,,"{ +""response"": machine paused +}","def test_ideploy_resume_checking_state_paused_to_running(ideploy_start, run_api): + """""" + Check for the transition of state from Paused to Running + """""" + res, r = ideploy_start + deploy_id = res[""deploy_uuid""] + run_api.ideploy_pause(deploy_id) + paused_r = run_api.ideploy_details(deploy_id) + paused_rjson = paused_r.json() + assert paused_rjson['state'] == 'paused', ""json |> %s"" % paused_rjson + run_api.ideploy_resume(deploy_id) + resume_r = run_api.ideploy_details(deploy_id) + resume_rjson = resume_r.json() + assert resume_rjson['state'] == 'running', ""json |> %s"" % resume_rjson +" +/ideploy/rest/resume/{UUID}/,starting the segment of the island by manager when he have rights over the server,,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_ideploy_segment_start_manager_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): + """""" + Starting the segment of the Island by manager when have right on server + """""" + # When the user is not part of the group that the manager manages + seg_id = custom_ilib_admin_operations + r = run_api.ideploy_segment_start(seg_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + run_api.ideploy_segment_stop(seg_id) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + seg_id = custom_ilib_non_admin_operations + r = run_api.ideploy_segment_start(seg_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) + run_api.ideploy_segment_stop(seg_id) +" +/ideploy/rest/resume/{UUID}/,snapshotting the deployed island machine,,"{ +""status"":201 +}","def test_ideploy_resume(ideploy_resume): + """""" + When provided with valid uuid + """""" + r = ideploy_resume + test_assert.status(r, 201) +" +/ideploy/rest/resume/{UUID}/,resuming the deployment of island machine when requested with invalid token,"{ +uid =""invalid"" +} + +","{ + ""status"" : 404, + ""message"" : ""Deployed island doesnt exist."" +}","def test_ideploy_resume_invalid_uuid(run_api): + """""" + When provided with invalid uuid + """""" + uuid = ""invalid"" + r = run_api.ideploy_resume(uuid) + test_assert.status(r, 404) + rjson = r.json() + assert rjson[""error""] == ""Deployed Island Doesn't Exist"", 'The error is %s' % rjson[""error""] +" +/ideploy/rest/resume/{UUID}/,resuming the deployment of a deployed island machine without authorization,"{ +uuid =""invalid"" +}","{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_ideploy_resume_without_authorization(anonymous_exec_api): + """""" + When provided without authorization + """""" + uuid = ""invalid"" + r = anonymous_exec_api.ideploy_resume(uuid, wait=False) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""The error message is %s"" % rjson['detail'] + + +" +/ideploy/rest/resume/{UUID}/,resuming the deployment of a deployed island machine when requested with invalid token,"{ +uuid =""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_ideploy_resume_invalid_token(invalid_exec_api): + """""" + When provided with invalid token + """""" + uuid = ""invalid"" + r = invalid_exec_api.ideploy_resume(uuid, wait=False) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""The error message is %s"" % rjson['detail'] +" +/ideploy/rest/resume/{UUID}/,Resume a Deployment which you are not an owner of but with Admin rights,,201 : job created," +@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_resume_with_admin_rights(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): + """""" + Resume a Deployment which you are not an owner of but with Admin rights + """""" + deploy_id = custom_ilib_non_admin_operations + r = run_api.ideploy_resume(deploy_id) + test_assert.status(r, 201) +" +/ideploy/rest/resume/{UUID}/,Resume a Deployment which you are not an owner of and without Admin rights,,403: UnAuthorized,"@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_resume_without_owner_and_admin(skip_if_admin, custom_ilib_admin_operations, run_api): + """""" + Resume a Deployment which you are not an owner of and without Admin rights + """""" + deploy_id = custom_ilib_admin_operations + r = run_api.ideploy_resume(deploy_id) + test_assert.status(r, 403) + rjson = r.json() + assert rjson['error'] == 'You do not have permission to perform this action.', ""json |> %s"" % rjson +" +/ideploy/rest/resume/{UUID}/,island deployment for a machine from paused state to running state by a manager who does not have permissions over the server,,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_ideploy_resume_by_manager_without_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): + """""" + Ideploy resume by manager without server right + """""" + # When Manager manages the user but not the server + deploy_id = custom_ilib_non_admin_operations + r = run_api.ideploy_resume(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + + # when manager does not manage the user nor the server + deploy_id = custom_ilib_admin_operations + r = run_api.ideploy_resume(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) +" +/ideploy/rest/segment_start/,starting the segment of the island by manager when he does not have right over the server,,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_ideploy_segment_start_manager_no_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): + """""" + Starting the segment of the Island by manager when have no right on server + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + seg_id = custom_ilib_admin_operations + r = run_api.ideploy_segment_start(seg_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + run_api.ideploy_segment_stop(seg_id) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + seg_id = custom_ilib_non_admin_operations + r = run_api.ideploy_segment_start(seg_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + run_api.ideploy_segment_stop(seg_id) +" +/ideploy/rest/segment_stop/,"stopping the segment of an island machine by a manager type of user, where the manager have rights over the servers",,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_ideploy_segment_stop_manager_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): + """""" + Stopping the segment of an Island by manager when have right on server + """""" + # When the user is not part of the group that the manager manages + seg_id = custom_ilib_admin_operations + r = run_api.ideploy_segment_stop(seg_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + seg_id = custom_ilib_non_admin_operations + r = run_api.ideploy_segment_stop(seg_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) +" +/ideploy/rest/segment_stop/,"stopping the segment of an island machine by a manager type of user, where the manager do not have rights over the servers",,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_ideploy_segment_stop_manager_no_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): + """""" + Stopping the segment of an Island by manager when have no right on server + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + seg_id = custom_ilib_admin_operations + r = run_api.ideploy_segment_stop(seg_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + seg_id = custom_ilib_non_admin_operations + r = run_api.ideploy_segment_stop(seg_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + + +" +/ideploy/rest/segment_stop/,"stopping the segment of an island machine by a manager type of user, where the manager do not have rights over the servers",,"{ +""status"" : 201, +""response"": segment stopped +}","def test_ideploy_segment_stop_valid_uuid(run_api, ideploy_details): + """""" + Stopping the segment of the Island + """""" + param, result = ideploy_details + seg_id = result.json()[""island""][""network_segments""][2][""uuid""] + r = run_api.ideploy_segment_start(seg_id) + r = run_api.ideploy_segment_stop(seg_id) + test_assert.status(r, 201) +" +/ideploy/rest/segment_stop/,stopping the segment of an island by admin user,,"{ +""status"" : 201, +""response"": segment stopped +}","@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_segment_stop_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): + """""" + Stopping the segment of an Island by Admin + """""" + # Admin check of Stopping a deployment created by different user + seg_id = custom_ilib_non_admin_operations + r = run_api.ideploy_segment_stop(seg_id) + test_assert.status(r, 201) +" +/ideploy/rest/segment_stop/,stopping the segment of an island by a non-admin user,,"{ +""status"" : 403 +}","@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_segment_stop_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): + """""" + Stopping the segment of an Island by non-admin + """""" + # Non-admin check of Stopping a deployment createdan different user + seg_id = custom_ilib_admin_operations + r = run_api.ideploy_segment_stop(seg_id) + test_assert.status(r, 403) +" +/ideploy/rest/segment_stop/,"stopping the segment of a island machine, where the segment is non-deployable. check the user type before performing the operation, only admin user type have the permission to perform such operations.",,"{ +""status"" : 400, +""message"" : ""No operation is allowed on the segment, as it is part of the library"""" + +}","def test_ideploy_stop_non_deployment_segment(ilibrary_details, run_api): + """""" + Stop a Segment which is part of Island (not Deployment) + """""" + r = ilibrary_details + rjson = r.json() + uuid = rjson['network_segments'][2]['uuid'] + res = run_api.ideploy_segment_start(uuid) + res = run_api.ideploy_segment_stop(uuid) + if run_api.user_type == USER_TYPE['non_admin']: + test_assert.status(res, 403) + result = res.json() + assert result['error'] == ""You do not have permission to perform this action."" + else: + test_assert.status(res, 400) + result = res.json() + assert result['error'] == f""No operation is allowed on {rjson['network_segments'][2]['name']} , as it is part of the library"" + + +" +/ideploy/rest/segment_stop/,stopping the segment of a island machine without authorization,"{ +seg_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" + +}","def test_ideploy_segment_stop_without_authorization(anonymous_exec_api): + """""" + Stopping the segment of the Island without authorization + """""" + seg_id = ""invalid"" + seg_stop = anonymous_exec_api.ideploy_segment_stop(seg_id, wait=False) + seg_json = seg_stop.json() + test_assert.status(seg_stop, 401) + assert seg_json[""detail""] == ""Authentication credentials were not provided."" +" +/ideploy/rest/segment_stop/,stopping the segment of a island machine using invalid token,"{ +seg_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token."" + +}","def test_ideploy_segment_stop_invalid_token(invalid_exec_api): + """""" + Stopping the segment of the Island using invalid token + """""" + seg_id = ""invalid"" + seg_stop = invalid_exec_api.ideploy_segment_stop(seg_id, wait=False) + seg_json = seg_stop.json() + test_assert.status(seg_stop, 401) + assert seg_json[""detail""] == ""Invalid token."" +" +/ideploy/rest/segment_stop/,stopping the segment of a island machine using invalid deployment uuid,"{ +seg_id = ""invalid"" +}","{ +""status"" : 404, +""response"": Failure +}","def test_ideploy_segment_stop_invalid_uuid(run_api): + """""" + Stopping the segment of the Island + """""" + seg_id = ""invalid"" + r = run_api.ideploy_segment_stop(seg_id) + test_assert.status(r, 404) +" +/ideploy/rest/segment_stop/,"stopping the segment of a island machine by changing the state of the machine from ""stopped"" to ""stopped"", where the island state and segment state is already in ""stopped"" state",,"{ +""response"" :operation successful +}","def test_ideploy_segment_stop_check_state_of_segments(ideploy_details, run_api): + """""" + Check for the transition of state from Stopped to Stopped (if Island state was Stopped and all Segments are Stopped) + """""" + param, result = ideploy_details + deploy_id = param[""deploy_uuid""] + + seg_id = result.json()[""island""][""network_segments""][2][""uuid""] + result = run_api.ideploy_segment_stop(seg_id) + result = run_api.ideploy_details(deploy_id) + rjson = result.json() + segments = [segment for segment in rjson[""island""][""network_segments""]][2:4] + + assert rjson['state'] == 'stopped', ""The error is %s"" % rjson + machines = rjson['machines'] + for machine in machines: + assert machine['state'] == 'stopped', ""The error is %s"" % (machine) + for segment in segments: + assert segment['status'] == 'inactive', ""The error is %s"" % (segment) +" +/ideploy/rest/segment_stop/,"stopping the segment of a island machine by changing the state of the machine from ""running"" to ""mixed"", where the island is already in running state","{ + ""machine_list"": machine_uuids, + ""op"": ""poweroff"" +}","{ +""response"" :operation successful +}","def test_ideploy_segment_stop_check_state_running_to_mixed(run_api, ideploy_start): + """""" + Check for the transition of state from Running to Mixed (if Island state was Running) + """""" + x, r = ideploy_start + deploy_id = x[""deploy_uuid""] + result = run_api.ideploy_details(deploy_id) + assert result.json()['state'] == ""running"", ""The error is %s"" % (result.json()['state']) + + machine_uuids = [mc[""uuid""] for mc in result.json()[""machines""]] + deploy_bulkops_params = { + ""machine_list"": machine_uuids, + ""op"": ""poweroff"" + } + run_api.deploy_bulkops(deploy_bulkops_params) + seg_ids = [segment[""uuid""] for segment in result.json()[""island""][""network_segments""]][2:4] + run_api.ideploy_segment_start(seg_ids[0]) + run_api.ideploy_segment_start(seg_ids[1]) + run_api.ideploy_segment_stop(seg_ids[0]) + r = run_api.ideploy_details(deploy_id) + rjson = r.json() + assert rjson['state'] == ""mixed"", ""The error is %s"" % (rjson) + + +" +/ideploy/rest/segment/start/{UUID}/,starting the segment of deployed island machine,,"{ +""status"":201 +}","def test_ideploy_segment_start_self(ideploy_start): + """""" + Start the Island + """""" + x, r = ideploy_start + test_assert.status(r, 201) +" +/ideploy/rest/segment/start/{UUID}/,starting the segment of a deployed island machine where the segment UUID does not exist,"{ +uid =""invalid"" +}","{ + ""status"" : 404, + ""message"" : ""Deployed island doesnt exist."" +}"," +def test_ideploy_segment_start_with_invalid_uuid(run_api): + """""" + segment UUID does not exist + """""" + uid = ""invalid"" + r = run_api.ideploy_start(uuid=uid) + test_assert.status(r, 404) + assert r.json()[""error""] == ""Deployed Island Doesn't Exist"" +" +/ideploy/rest/segment/start/{UUID}/,"starting island deployment for all segments of an island machine +","machine1 = { + ""uuid"": r1.json()[""uuid""], + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Default Public Segment"" + }, + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Seg1"" + } + ], + } + + } +params = { + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1], + }, + ""network_segments"": { + ""add"": [ + { + ""name"": ""Seg1"", + ""description"": ""string"", + ""enable_ipv4"": False + }, + { + ""name"": ""Seg2"", + ""description"": ""string"", + ""enable_ipv4"": False + }, + ] + } + }","{ +""response"": operation successful +}","def test_ideploy_deployment_starts_all_segment(run_api): + """""" + starting island deployment for all segments + """""" + networks = template_networks() + params1, r1 = run_api.library_add_new_vm(networks=networks) + machine1 = { + ""uuid"": r1.json()[""uuid""], + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Default Public Segment"" + }, + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Seg1"" + } + ], + } + + } + params = { + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1], + }, + ""network_segments"": { + ""add"": [ + { + ""name"": ""Seg1"", + ""description"": ""string"", + ""enable_ipv4"": False + }, + { + ""name"": ""Seg2"", + ""description"": ""string"", + ""enable_ipv4"": False + }, + ] + } + } + params, r = run_api.ilibrary_add_new_island(params=params) + island_uuid = r.json()[""uuid""] + res = run_api.ideploy_deploy(uuid=island_uuid) + deploy_uuid = res.json()[""deploy_uuid""] + run_api.ideploy_start(deploy_uuid) + r_details = run_api.ideploy_details(deploy_uuid) + result = r_details.json() + segment_list = result[""island""][""network_segments""] + for segment in segment_list: + if segment[""status""] != ""active"": + assert False, ""The error is %s"" % result + machine_uuids = [mc[""uuid""] for mc in r_details.json()[""machines""]] + deploy_bulkops_params = { + ""machine_list"": machine_uuids, + ""op"": ""poweroff"" + } + run_api.deploy_bulkops(deploy_bulkops_params) + run_api.ideploy_shutdown(deploy_uuid) + run_api.ideploy_delete(uuid=deploy_uuid) + run_api.ilibrary_delete(uuid=island_uuid) + run_api.library_delete(r1.json()[""uuid""]) +" +/ideploy/rest/segment/start/{UUID}/,starting island deployment an island machine from stopped state to running state,,"{ +""response"": operation successful +}","def test_ideploy_check_from_stopped_to_running(run_api, ideploy_start): + """""" + test_ideploy_check_from_stopped_to_running + """""" + x, r = ideploy_start + deploy_id = x[""deploy_uuid""] + result = run_api.ideploy_details(uuid=deploy_id).json() + final_state = result[""state""] + assert final_state == ""running"", 'The error is %s' % result +" +/ideploy/rest/segment/start/{UUID}/,starting a segment of a deployed island machine without Authorization,"{ +uid =""invalid"" +}","{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_ideploy_segment_start_without_authorization(anonymous_exec_api): + """""" + test_ideploy_without_authorization + """""" + uid = ""invalid"" + r = anonymous_exec_api.ideploy_start(uuid=uid, wait=False) + test_assert.status(r, 401) + assert r.json()[""detail""] == ""Authentication credentials were not provided."" +" +/ideploy/rest/segment/start/{UUID}/,starting a segment of a deployed island machine when requested with invalid token,"{ +uid =""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_ideploy_segment_start_with_invalid_token(invalid_exec_api): + """""" + test_ideploy_with_invalid_token + """""" + uid = ""invalid"" + r = invalid_exec_api.ideploy_start(uuid=uid, wait=False) + test_assert.status(r, 401) + assert r.json()[""detail""] == ""Invalid token."" +" +/ideploy/rest/snapshot/{UUID}/,taking snapshot of an island whenit is in running state,,"{ + ""status"" : 400, + ""message"" : ""Island snapshot is only allowed when all machines are in stopped state"" +}","def test_ideploy_snapshot_when_island_in_running_state(ideploy_start, run_api): + """""" + taking snapshot when island is in running state + """""" + x, r = ideploy_start + isl_id = x['deploy_uuid'] + r, rtask_details = run_api.ideploy_snapshot(uuid=isl_id) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""Island snapshot is only allowed when all machines are in stopped state"", ""|> Json %s"" % rjson +" +/ideploy/rest/snapshot/{UUID}/,taking island of a deployed island mahcine without authorization,"{ +uid =""invalid"" +}","{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_ideploy_snapshot_without_authorization(anonymous_exec_api): + """""" + taking snapshot of island without_authorization + """""" + uid = ""invalid"" + r, r_details = anonymous_exec_api.ideploy_snapshot(uuid=uid, wait=False) + test_assert.status(r, 401) + assert r.json()[""detail""] == ""Authentication credentials were not provided."" +" +/ideploy/rest/snapshot/{UUID}/,snapshotting the deployment of island machine when requested with invalid token,"{ +uid =""invalid"" +} + +","{ + ""status"" : 404, + ""message"" : ""Deployed island doesnt exist."" +}","def test_ideploy_snapshot_with_invalid_uuid(run_api): + """""" + snapshotting the deployment of island machine when requested with invalid token + """""" + uid = ""invalid"" + r, r_details = run_api.ideploy_snapshot(uuid=uid, wait=False) + test_assert.status(r, 404) + assert r.json()[""error""] == ""Deployed Island Doesn't Exist"" +" +/ideploy/rest/snapshot/{UUID}/,snapshotting the deployed island machine,,"{ +""status"":201 +}","def test_ideploy_snapshot_self(ideploy_snapshot): + """""" + Snapshot the Island + """""" + r = ideploy_snapshot + test_assert.status(r, 201) +" +/ideploy/rest/snapshot/{UUID}/,Snapshotting a deployment creates revision in Island,,,"def test_ideploy_snapshot_creates_revision(run_api, ideploy_deploy): + """""" + test_ideploy_snapshot_creates_revision + """""" + params, r = ideploy_deploy + deploy_id = r.json()[""deploy_uuid""] + r, rtask_details = run_api.ideploy_snapshot(uuid=deploy_id) + snapshotted_island_uid = rtask_details[""result""][""snapshotted_island_uuid""] + revision_count = run_api.ilibrary_details(uuid=snapshotted_island_uid).json()[""revision""] + run_api.ilibrary_delete(uuid=snapshotted_island_uid) + assert revision_count != 1, ""Revision count should not be 1, the error is {}"".format(rtask_details) +" +/ideploy/rest/snapshot/{UUID}/,Snapshot a Deployment which you are not an owner of but with Admin rights,,"{ +""status"": 201 +}"," +@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_snapshot_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): + """""" + Snapshot the Island by Admin + """""" + # Admin check of Starting a deployment created by different user + deploy_id = custom_ilib_non_admin_operations + r, rtask_details = run_api.ideploy_snapshot(deploy_id) + test_assert.status(r, 201) + run_api.ideploy_delete(deploy_id) + run_api.ilibrary_delete( + rtask_details['result']['snapshotted_island_uuid'], {}) +" +/ideploy/rest/snapshot/{UUID}/,Snapshot a Deployment which you are not an owner of and without Admin rights,,"{ +""status"": 403 +}","@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_snapshot_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): + """""" + Snapshot the Island by non-admin + """""" + # Non-admin check of Starting a deployment created by different user + deploy_id = custom_ilib_admin_operations + r, rtask = run_api.ideploy_snapshot(deploy_id) + test_assert.status(r, 403) +" +/ideploy/rest/snapshot/{UUID}/,Check for the transition of state from Stopped to Snapshotting,,,"def test_snapshot_check_from_stop_to_snapshotting(run_api, ilibrary_add_new_island): + """""" + check state transition from stop to snapshotting + """""" + params, r = ilibrary_add_new_island + island_uuid = r.json()[""uuid""] + r = run_api.ideploy_deploy(island_uuid) + deploy_id = r.json()[""deploy_uuid""] + r, current_state = run_api.deployment_snapshot_state_details(id=deploy_id) + assert current_state == ""snapshotting"", ""Current state is in {}"".format(current_state) + temp = wait_to_complete(run_api, r.json()) + snapshotted_island_uid = temp[""result""][""snapshotted_island_uuid""] + run_api.ideploy_delete(uuid=deploy_id) + run_api.ilibrary_delete(uuid=snapshotted_island_uid) +" +/ideploy/rest/snapshot/{UUID}/,add description in param,"{ + ""description"" : ""This is Test description"" +}","{ +""status"": 201 +}","def test_ideploy_snapshot_provided_description(ilibrary_add_new_island, run_api): + """""" + provide description when taking snaphot + """""" + params, r = ilibrary_add_new_island + island_uuid = r.json()[""uuid""] + r = run_api.ideploy_deploy(island_uuid) + deploy_id = r.json()['deploy_uuid'] + description = ""This is Test description"" + r, rtask_details = run_api.ideploy_snapshot(uuid=deploy_id, description=description) + snapshot_id = rtask_details[""result""][""snapshotted_island_uuid""] + test_assert.status(r, 201) + isl_details = run_api.ilibrary_details(snapshot_id).json() + assert isl_details['description'] == description, ""|> Json %s"" % isl_details + run_api.ideploy_delete(uuid=deploy_id) + run_api.ilibrary_delete(uuid=snapshot_id) +" +/ideploy/rest/start/{UUID}/,starting the deployment of island machine when requested with invalid token,"{ +uid =""invalid"" +} + +","{ + ""status"" : 404, + ""message"" : ""Deployed island doesnt exist."" +}","def test_ideploy_with_invalid_uuid(run_api): + """""" + starting the deployment of island machine when requested with invalid token + """""" + uid = ""invalid"" + r = run_api.ideploy_start(uuid=uid) + test_assert.status(r, 404) + assert r.json()[""error""] == ""Deployed Island Doesn't Exist"" +" +/ideploy/rest/start/{UUID}/,starting the deployment of island machine,,"{ +""status"":201 +}","def test_ideploy_start_self(ideploy_start): + """""" + Start the Island + """""" + x, r = ideploy_start + test_assert.status(r, 201) +" +/ideploy/rest/start/{UUID}/,"starting island machine by a manager , when the manager have right over the servers",,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_ideploy_start_manager_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): + """""" + Start the Island by manager when have right on server + """""" + # When the user is not part of the group that the manager manages + deploy_id = custom_ilib_admin_operations + r = run_api.ideploy_start(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + run_api.ideploy_stop(deploy_id) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_ilib_non_admin_operations + r = run_api.ideploy_start(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) + run_api.ideploy_stop(deploy_id) +" +/ideploy/rest/start/{UUID}/,"starting island machine by a manager , when the manager do not right over the servers",,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_ideploy_start_manager_no_server_right(skip_if_not_manager, + custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): + """""" + Start the Island by manager when have no right on server + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_ilib_admin_operations + r = run_api.ideploy_start(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + run_api.ideploy_stop(deploy_id) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_ilib_non_admin_operations + r = run_api.ideploy_start(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + run_api.ideploy_stop(deploy_id) +" +/ideploy/rest/start/{UUID}/,Starting a deployment starts all Segments inside of it,,,"def test_ideploy_deployment_starts_all_segment(run_api): + """""" + test_ideploy_deployment_starts_all_segment + """""" + networks = template_networks() + params1, r1 = run_api.library_add_new_vm(networks=networks) + machine1 = { + ""uuid"": r1.json()[""uuid""], + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Default Public Segment"" + }, + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Seg1"" + } + ], + } + + } + params = { + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1], + }, + ""network_segments"": { + ""add"": [ + { + ""name"": ""Seg1"", + ""description"": ""string"", + ""enable_ipv4"": False + }, + { + ""name"": ""Seg2"", + ""description"": ""string"", + ""enable_ipv4"": False + }, + ] + } + } + params, r = run_api.ilibrary_add_new_island(params=params) + island_uuid = r.json()[""uuid""] + res = run_api.ideploy_deploy(uuid=island_uuid) + deploy_uuid = res.json()[""deploy_uuid""] + run_api.ideploy_start(deploy_uuid) + r_details = run_api.ideploy_details(deploy_uuid) + result = r_details.json() + segment_list = result[""island""][""network_segments""] + for segment in segment_list: + if segment[""status""] != ""active"": + assert False, ""The error is %s"" % result + machine_uuids = [mc[""uuid""] for mc in r_details.json()[""machines""]] + deploy_bulkops_params = { + ""machine_list"": machine_uuids, + ""op"": ""poweroff"" + } + run_api.deploy_bulkops(deploy_bulkops_params) + run_api.ideploy_shutdown(deploy_uuid) + run_api.ideploy_delete(uuid=deploy_uuid) + run_api.ilibrary_delete(uuid=island_uuid) + run_api.library_delete(r1.json()[""uuid""]) + +" +/ideploy/rest/start/{UUID}/,starting a deployed island machine without authorization,"{ +uid =""invalid"" +}","{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_ideploy_start_without_authorization(anonymous_exec_api): + """""" + test_ideploy_without_authorization + """""" + uid = ""invalid"" + r = anonymous_exec_api.ideploy_start(uuid=uid, wait=False) + test_assert.status(r, 401) + assert r.json()[""detail""] == ""Authentication credentials were not provided."" +" +/ideploy/rest/start/{UUID}/,Start a Deployment which you are not an owner of but with Admin rights,,"{ +""status"": 201 +}","@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_start_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): + """""" + Start the Island by Admin + """""" + # Admin check of Starting a deployment created by different user + deploy_id = custom_ilib_non_admin_operations + r = run_api.ideploy_start(deploy_id) + test_assert.status(r, 201) + run_api.ideploy_stop(deploy_id) +" +/ideploy/rest/start/{UUID}/,Start a Deployment which you are not an owner of and without Admin rights,,"{ +""status"": 403 +}","@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_start_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): + """""" + Start the Island by non-admin + """""" + # Non-admin check of Starting a deployment created by different user + deploy_id = custom_ilib_admin_operations + r = run_api.ideploy_start(deploy_id) + test_assert.status(r, 403) + run_api.ideploy_stop(deploy_id) +" +/ideploy/rest/start/{UUID}/,Check for the transition of state from Stopped to Running,,Working as intended,"def test_ideploy_check_from_stopped_to_running(run_api, ideploy_start): + """""" + state transition from stopped to running + """""" + x, r = ideploy_start + deploy_id = x[""deploy_uuid""] + result = run_api.ideploy_details(uuid=deploy_id).json() + final_state = result[""state""] + assert final_state == ""running"", 'The error is %s' % result +" +/ideploy/rest/stop/{UUID}/,stopping the deployment of island machine using valid existing uuid,,"{ +""status"":201 +}","def test_ideploy_stop(ideploy_start, run_api): + """""" + When provided with valid uuid + """""" + params, r = ideploy_start + deploy_id = params['deploy_uuid'] + res = run_api.ideploy_stop(deploy_id) + test_assert.status(res, 201) +" +/ideploy/rest/stop/{UUID}/,stopping the deployment of island machine using invalid uuid,"{ +deploy_id =""invalid"" +} + +","{ + ""status"" : 404, + ""message"" : ""Deployed island doesnt exist."" +}","def test_ideploy_stop_invalid_uuid(run_api): + """""" + When Island Deployment uuid doesnot exist + """""" + deploy_id = ""invalid"" + r = run_api.ideploy_stop(deploy_id, error=True) + test_assert.status(r, 404) + rjson = r.json() + assert rjson['error'] == ""Deployed Island Doesn't Exist"", ""The error meassage is %s"" % rjson +" +/ideploy/rest/stop/{UUID}/,stopping the deployment of deployed island machine when requested with invalid token,"{ +deploy_id =""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_ideploy_stop_with_invalid_token(invalid_exec_api): + + deploy_id = ""invalid"" + r = invalid_exec_api.ideploy_stop(deploy_id, error=True) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""The error is message %s"" % rjson + +" +/ideploy/rest/stop/{UUID}/,"stopping island machine by a manager , when the manager have right over the servers",,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_ideploy_stop_by_manager_with_server_right(skip_if_not_manager, custom_ilib_non_admin_operations, custom_ilib_admin_operations, run_api): + """""" + Ideploy stop by manager with server right + """""" + # when the manager manages the user and server + deploy_id = custom_ilib_non_admin_operations + r = run_api.ideploy_stop(deploy_id, error=True) + print(r.status_code) + test_assert.status(r, manager_rights_response(endpoint, manages_server=True, manages_user=True)) + + # when the manager manages the server but does not manages user + deploy_id = custom_ilib_admin_operations + r = run_api.ideploy_stop(deploy_id, error=True) + test_assert.status(r, manager_rights_response(endpoint, manages_server=True, manages_user=False)) +" +/ideploy/rest/stop/{UUID}/,"stopping island machine by a manager , when the manager do not have right over the servers",,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_ideploy_stop_by_manager_without_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): + """""" + Ideploy stop by manager without server right + """""" + # When Manager manages the user but not the server + deploy_id = custom_ilib_non_admin_operations + r = run_api.ideploy_stop(deploy_id, error=True) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + + # when manager does not manage the user nor the server + deploy_id = custom_ilib_admin_operations + r = run_api.ideploy_stop(deploy_id, error=True) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) +" +/ideploy/rest/stop/{UUID}/,Stopping a deployment Stops all Segments inside of it,,,"def test_ideploy_stop_checking_state_of_segments(ideploy_start, run_api): + """""" + Stopping a deployment Stops all Segments inside of it + """""" + res, r = ideploy_start + deploy_id = res[""deploy_uuid""] + run_api.ideploy_stop(deploy_id) + run_api.ideploy_shutdown(deploy_id) + stop_r = run_api.ideploy_details(deploy_id) + stop_rjson = stop_r.json() + stop_network_segments = stop_rjson['island']['network_segments'] + for stop in stop_network_segments: + if stop['name'] not in (""Default Public Segment"", ""HostOnly Segment""): + assert stop['status'] == ""inactive"", ""json |> %s"" % stop + +" +/ideploy/rest/stop/{UUID}/,stopping a deployed island machine without authorization,"{ +deploy_id =""invalid"" +}","{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_ideploy_stop_without_authorizaton(anonymous_exec_api): + """""" + without authorization + """""" + deploy_id = ""invalid"" + r = anonymous_exec_api.ideploy_stop(deploy_id, error=True) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""The error is message %s"" % rjson + + +" +/ideploy/rest/stop/{UUID}/,Stop a Deployment which you are not an owner of but with Admin rights,,"{ +""status"": 201, +}","@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_stop_with_admin_rights(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): + """""" + stop a Deployment which you are not an owner of and with Admin rights + """""" + deploy_id = custom_ilib_non_admin_operations + r = run_api.ideploy_stop(deploy_id) + test_assert.status(r, 201) +" +/ideploy/rest/stop/{UUID}/,Stop a Deployment which you are not an owner of and without Admin rights,,"{ +""status"": 403, +""message"" : 'You do not have permission to perform this action.' +}","@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_stop_without_owner_and_admin_rights(skip_if_admin, custom_ilib_admin_operations, run_api): + """""" + Stop a Deployment which you are not an owner of and without Admin rights + """""" + deploy_id = custom_ilib_admin_operations + r = run_api.ideploy_stop(deploy_id, error=True) + test_assert.status(r, 403) + rjson = r.json() + assert rjson['error'] == 'You do not have permission to perform this action.', ""The error message is %s"" % rjson +" +/ideploy/rest/stop/{UUID}/,Check for the transition of state from Running to Stopped,,,"def test_ideploy_stop_checking_state_running_to_stop(ideploy_start, run_api): + """""" + Check for the transition of state from Running to Stopped + """""" + res, r = ideploy_start + deploy_id = res[""deploy_uuid""] + running_r = run_api.ideploy_details(deploy_id) + running_rjson = running_r.json() + assert running_rjson['state'] == 'running', ""json |> %s"" % running_rjson + run_api.ideploy_stop(deploy_id) + run_api.ideploy_shutdown(deploy_id) + stop_r = run_api.ideploy_details(deploy_id) + stop_rjson = stop_r.json() + assert stop_rjson['state'] == 'stopped', ""json |> %s"" % stop_rjson +" +/ilibrary/rest/add/,creating an island library and adding it when user is unauthorized,"{ + ""name"": ""test_ilibrary_add_required_params"", + ""is_public"": True +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" +}","def test_ilibrary_add_without_authorization(anonymous_exec_api): + """""" + Creating an Island Library without authorization + """""" + params = {} + params, r = anonymous_exec_api.ilibrary_add_new_island(params=params) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == 'Authentication credentials were not provided.' +" +/ilibrary/rest/add/,creating an island library and adding it when the segment name for NIC segment is different from what is to being added for this Island,"{ + ""name"": ""test_ilibrary_add_machine_with_other_nic"", + ""is_public"": True, + ""machines"": { + ""add"": [ + { + 'name': ""machine"", + ""uuid"": rjson[""uuid""], + ""nics"": { + ""add"": [ + { + ""model"": ""virtio"", + ""segment"": ""Other_segment"" + } + ] + } + } + ] + } + }","{ +""status"" : 400, +""message"" : ""Provided name of Segment isn't part of this Island"" +}","def test_ilibrary_add_machine_other_nic(run_api, library_add_new_vm): + """""" + Add segment name for NIC segment as different from what is to being added for this Island + """""" + params, rjson = library_add_new_vm + params = { + ""name"": ""test_ilibrary_add_machine_with_other_nic"", + ""is_public"": True, + ""machines"": { + ""add"": [ + { + 'name': ""machine"", + ""uuid"": rjson[""uuid""], + ""nics"": { + ""add"": [ + { + ""model"": ""virtio"", + ""segment"": ""Other_segment"" + } + ] + } + } + ] + } + } + params, r = run_api.ilibrary_add_new_island(params=params) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""Provided name [Other_segment] of Segment isn't part of this Island"" + +" +/ilibrary/rest/add/,creating an island library and adding it when start_ip has value greater than that of end_ip,"{ + ""name"": ""test_ilibrary_add_bigger_start_ip"", + ""is_public"": True, + ""network_segments"": { + ""add"": [ + { + ""name"": ""test_segment"", + ""enable_ipv4"": True, + ""bridge_ip"": ""192.168.98.0"", + ""network_subnet"": ""255.255.255.0"", + ""enable_dhcp"": True, + ""start_ip"": ""192.168.98.150"", + ""end_ip"": ""192.168.98.1"" + } + ] + } + }","{ +""status"" : 400, +""message"" : ""end_ip must be higher than start_ip"" +}","def test_ilibrary_add_bigger_start_ip(run_api): + """""" + Creating an Island Library where start ip is bigger than end ip + """""" + params = { + ""name"": ""test_ilibrary_add_bigger_start_ip"", + ""is_public"": True, + ""network_segments"": { + ""add"": [ + { + ""name"": ""test_segment"", + ""enable_ipv4"": True, + ""bridge_ip"": ""192.168.98.0"", + ""network_subnet"": ""255.255.255.0"", + ""enable_dhcp"": True, + ""start_ip"": ""192.168.98.150"", + ""end_ip"": ""192.168.98.1"" + } + ] + } + } + params, r = run_api.ilibrary_add_new_island(params=params) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == 'end_ip must be higher than start_ip' +" +/ilibrary/rest/add/,creating an island library and adding it when segment with `Default-Public-Segment` name,"{ + ""name"": ""test_ilibrary_add_bigger_start_ip"", + ""is_public"": True, + ""network_segments"": { + ""add"": [ + { + 'name': 'Default-Public-Segment', + ""enable_ipv4"": True, + ""bridge_ip"": ""192.168.98.0"", + ""network_subnet"": ""255.255.255.0"", + ""enable_dhcp"": True, + ""start_ip"": ""192.168.98.1"", + ""end_ip"": ""192.168.98.150"" + } + ] + } + }","{ +""status"" : 400, +""message"" : ""NetworkSegment name cannot contain any whitespace nor any special characters other than '_' or '-'"" +}","def test_ilibrary_add_default_segmennt_name(run_api): + """""" + Creating an Island Library with segment name as 'Default Public Segment' + """""" + params = { + ""name"": ""test_ilibrary_add_bigger_start_ip"", + ""is_public"": True, + ""network_segments"": { + ""add"": [ + { + 'name': 'Default-Public-Segment', + ""enable_ipv4"": True, + ""bridge_ip"": ""192.168.98.0"", + ""network_subnet"": ""255.255.255.0"", + ""enable_dhcp"": True, + ""start_ip"": ""192.168.98.1"", + ""end_ip"": ""192.168.98.150"" + } + ] + } + } + params, r = run_api.ilibrary_add_new_island(params=params) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['network_segments']['add'][0]['name'] == [""NetworkSegment name cannot contain any whitespace nor any special characters other than '_' or '-'""] + + +" +/ilibrary/rest/add/,creating an island library and adding it when required fields are not provided,"{ +}","{ +""status"" : 400, +""message"" : ""Required fields should be provided"" +}","def test_ilibrary_add_without_params(run_api): + """""" + Creating an Island Library without params + """""" + params = {} + params, r = run_api.ilibrary_add_new_island(params=params) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['name'] == ['This field is required.'] + assert rjson['is_public'] == ['This field is required.'] +" +/ilibrary/rest/add/,creating an island library and adding it when requested with invalid token,"{ +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_ilibrary_add_with_invalid_token(invalid_exec_api): + """""" + Creating an Island Library with invalid token + """""" + params = {} + params, r = invalid_exec_api.ilibrary_add_new_island(params=params) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == 'Invalid token.' +" +/ilibrary/rest/add/,creating an island library and adding it when provided with NIC id of machine which is not part of the current machine,"{ + ""name"": ""test_ilibrary_add_machine_with_other_nic_id"", + ""is_public"": True, + ""machines"": { + ""add"": [ + { + 'name': ""machine"", + ""uuid"": r1.json()[""uuid""], + ""nics"": { + ""update"": [ + { + 'id': second_nic, + ""model"": ""virtio"" + } + ] + } + } + ] + } + }","{ +""status"" : 400, +""message"" : ""The provided nic with id isn't part of this machine"" +}","def test_ilibrary_add_machine_other_nic_id(run_api): + """""" + Adding Machine with id of NIC which is not part of this machine but some other machine + """""" + networks = template_networks() + if run_api.arch_type == ""aarch64"": + params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + params2, r2 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + else: + params1, r1 = run_api.library_add_new_vm(networks=networks) + params2, r2 = run_api.library_add_new_vm(networks=networks) + + # first_nic = r1.json()['hw']['networks'][0]['id'] + second_nic = r2.json()['hw']['networks'][0]['id'] + params = { + ""name"": ""test_ilibrary_add_machine_with_other_nic_id"", + ""is_public"": True, + ""machines"": { + ""add"": [ + { + 'name': ""machine"", + ""uuid"": r1.json()[""uuid""], + ""nics"": { + ""update"": [ + { + 'id': second_nic, + ""model"": ""virtio"" + } + ] + } + } + ] + } + } + params, r = run_api.ilibrary_add_new_island(params=params) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""The provided nic with id ["" + str(second_nic) + ""] isn't part of this machine"" + run_api.library_delete(r1.json()[""uuid""]) + run_api.library_delete(r2.json()[""uuid""]) +" +/ilibrary/rest/add/,creating an island library and adding it when provided start_ip and/or end_ip value is out of range as that of bridge_ip/Subnet range,"{ + ""name"": ""test_ilibrary_add_ips_out_of_range"", + ""is_public"": True, + ""network_segments"": { + ""add"": [ + { + ""name"": ""test_segment"", + ""enable_ipv4"": True, + ""bridge_ip"": ""192.168.98.0"", + ""network_subnet"": ""255.255.255.0"", + ""enable_dhcp"": True, + ""start_ip"": ""191.168.10.1"", + ""end_ip"": ""191.168.10.150"" + } + ] + } + } +","{ +""status"" : 400, +""message"" : ""start_ip and/or end_ip should lie between inclusive range "" +}","def test_ilibrary_add_ips_out_of_range(run_api): + """""" + Creating an Island Library with out of range start ip, end ip + """""" + params = { + ""name"": ""test_ilibrary_add_ips_out_of_range"", + ""is_public"": True, + ""network_segments"": { + ""add"": [ + { + ""name"": ""test_segment"", + ""enable_ipv4"": True, + ""bridge_ip"": ""192.168.98.0"", + ""network_subnet"": ""255.255.255.0"", + ""enable_dhcp"": True, + ""start_ip"": ""191.168.10.1"", + ""end_ip"": ""191.168.10.150"" + } + ] + } + } + params, r = run_api.ilibrary_add_new_island(params=params) + test_assert.status(r, 400) + rjson = r.json() + assert ""start_ip and/or end_ip should lie between inclusive range of"" in rjson['error'] +" +/ilibrary/rest/add/,creating an island library and adding it when provided segment without name,"{ + ""name"": ""test_ilibrary_add_bigger_start_ip"", + ""is_public"": True, + ""network_segments"": { + ""add"": [ + { + ""enable_ipv4"": True, + ""bridge_ip"": ""192.168.98.0"", + ""network_subnet"": ""255.255.255.0"", + ""enable_dhcp"": True, + ""start_ip"": ""192.168.98.1"", + ""end_ip"": ""192.168.98.150"" + } + ] + } + }","{ +""status"" : 400, +""message"" : ""This field must not be blank"" +}","def test_ilibrary_add_without_segmennt_name(run_api): + """""" + Creating an Island Library without segment name + """""" + params = { + ""name"": ""test_ilibrary_add_bigger_start_ip"", + ""is_public"": True, + ""network_segments"": { + ""add"": [ + { + ""enable_ipv4"": True, + ""bridge_ip"": ""192.168.98.0"", + ""network_subnet"": ""255.255.255.0"", + ""enable_dhcp"": True, + ""start_ip"": ""192.168.98.1"", + ""end_ip"": ""192.168.98.150"" + } + ] + } + } + params, r = run_api.ilibrary_add_new_island(params=params) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['network_segments']['add'][0]['name'] == ['This field is required.'] +" +/ilibrary/rest/add/,creating an island library and adding it when provided machine with no name,"{ + ""name"": ""test_ilibrary_add_machine_from_other_island"", + ""machines"": { + ""add"": [ + { + 'name': """", + ""uuid"": rjson[""uuid""] + } + ] + }, + ""is_public"": True + }","{ +""status"" : 201, +""response"" : success , island library created +}"," +def test_ilibrary_add_machine_with_no_name(run_api, library_add_new_vm): + """""" + Creating an Island Library of machine with no name + """""" + params, rjson = library_add_new_vm + params = { + ""name"": ""test_ilibrary_add_machine_from_other_island"", + ""machines"": { + ""add"": [ + { + 'name': """", + ""uuid"": rjson[""uuid""] + } + ] + }, + ""is_public"": True + } + params1, r1 = run_api.ilibrary_add_new_island(params=params) + test_assert.status(r1, 201) + rjson1 = r1.json() + run_api.library_delete(rjson[""uuid""], params) + if 'error' not in rjson.keys(): + uuid = rjson1[""uuid""] + run_api.ilibrary_delete(uuid, params1) +" +/ilibrary/rest/add/,creating an island library and adding it when provided Island with no name,"{ + ""name"": """", + ""is_public"": True +}","{ +""status"" : 400, +""message"" : ""This field must not be blank"" +}","def test_ilibrary_add_empty_island_name(run_api): + """""" + Creating an Island Library with empty island name + """""" + params = { + ""name"": """", + ""is_public"": True + } + params, r = run_api.ilibrary_add_new_island(params=params) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['name'] == ['This field may not be blank.'] +" +/ilibrary/rest/add/,creating an island library and adding it when only the required params,"{ + ""name"": ""test_ilibrary_add_required_params"", + ""is_public"": True +}","{ +""status"" : 201, +""response"" : success , island library created +}","def test_ilibrary_add_required_params(run_api): + """""" + Creating an Island Library with required params + """""" + params = { + ""name"": ""test_ilibrary_add_required_params"", + ""is_public"": True + } + params, r = run_api.ilibrary_add_new_island(params=params) + test_assert.status(r, 201) + rjson = r.json() + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) +" +/ilibrary/rest/add/,creating an island library and adding it when machine which is a part of another Island is provided,"{ + ""name"": ""test_ilibrary_add_machine_from_other_island"", + ""machines"": { + ""add"": [ + { + ""uuid"": uuid + } + ] + }, + ""is_public"": True + }","{ +""status"" : 400, +""message"" : ""Adding machine which already is a part of an Island isn't supported..."" +}","def test_ilibrary_add_machine_from_other_island(run_api, ilibrary_add_new_island): + """""" + Creating an Island Library by adding machine from another island + """""" + params, r = ilibrary_add_new_island + rjson = r.json() + machines = rjson['machines'] + uuid = machines[0]['uuid'] + params = { + ""name"": ""test_ilibrary_add_machine_from_other_island"", + ""machines"": { + ""add"": [ + { + ""uuid"": uuid + } + ] + }, + ""is_public"": True + } + params, r = run_api.ilibrary_add_new_island(params=params) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""Adding machine which already is a part of an Island isn't supported..."" +" +/ilibrary/rest/add/,creating an island library and adding it when invalid UUID of machine is provided,"{ + ""name"": ""test_ilibrary_add_invalid_uuid_machine"", + ""machines"": { + ""add"": [ + { + ""uuid"": ""invalid-uuid"" + } + ] + }, + ""is_public"": True + }","{ +""status"" : 400, +""message"" : ""Valid UUID must be provided"" +}","def test_ilibrary_add_invalid_uuid_machine(run_api): + """""" + Creating an Island Library with invalid uuid + """""" + params = { + ""name"": ""test_ilibrary_add_invalid_uuid_machine"", + ""machines"": { + ""add"": [ + { + ""uuid"": ""invalid-uuid"" + } + ] + }, + ""is_public"": True + } + params, r = run_api.ilibrary_add_new_island(params=params) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['machines']['add'][0]['uuid'] == ['Must be a valid UUID.'] +" +/ilibrary/rest/add/,creating an island library and adding it,,"{ +""status"" : 201, +""response"" : success +}","def test_ilibrary_add(run_api, ilibrary_add_new_island): + """""" + Creating an Island Library + """""" + params, r = ilibrary_add_new_island + test_assert.status(r, 201) +" +/ilibrary/rest/bulk_delete/,sucessful deletion of island library,,"{ +""status"" : 204 +}","def test_ilibrary_bulk_delete(ilibrary_bulk_delete): + """""" + Deleting multiple Island Library + """""" + params, r = ilibrary_bulk_delete + test_assert.status(r, 204) +" +/ilibrary/rest/bulk_delete/,"deleting the island library using island_list, where the list is set to null","{ +""island_list"" :None +}","{ +""status"":400, +""message"" : ""island_list cannot be null or empty"" +}","def test_ilibrary_bulk_delete_null_island_list(run_api): + """""" + Deleting ilibrary with empty and null island_list + """""" + islands = { + ""island_list"": None + } + r = run_api.ilibrary_bulk_delete(islands) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""island_list cannot be null or empty"", ""|> Json %s"" % rjson +" +/ilibrary/rest/bulk_delete/,"deleting the island library using island_list, where the list is set to empty string","{ +""island_list"" : """" +}","{ +""status"":400, +""message"" : ""island_list cannot be null or empty"" +}","def test_ilibrary_bulk_delete_empty_island_list(run_api): + """""" + Deleting ilibrary with empty and null island_list + """""" + islands = { + ""island_list"": """" + } + r = run_api.ilibrary_bulk_delete(islands) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""island_list cannot be null or empty"", ""|> Json %s"" % rjson" +/ilibrary/rest/bulk_delete/,"deleting the island library using island_list, where the list is set to empty list","{ +""island_list"" :[] +}","{ +""status"":400, +""message"" : ""island_list cannot be null or empty"" +}","def test_ilibrary_bulk_delete_empty_list_island_list(run_api): + """""" + Deleting ilibrary with empty and null island_list + """""" + islands = { + ""island_list"": [] + } + r = run_api.ilibrary_bulk_delete(islands) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""island_list cannot be null or empty"", ""|> Json %s"" % rjson" +/ilibrary/rest/bulk_delete/,deleting the island library using invalid data type of island_list,"{ +""island_list"": ""string"" +}","{ +""status"":400, +""message"" : ""Please provide the list of uuids not strings"" +}","def test_ilibrary_bulk_delete_invalid_data_type(run_api): + """""" + Deleting ilibrary with invalid data type island_list + """""" + islands = { + ""island_list"": ""string"" + } + r = run_api.ilibrary_bulk_delete(islands) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""Please provide the list of uuids not strings"", ""|> Json %s"" % rjson +" +/ilibrary/rest/clone/{UUID}/,creating a clone of an Island Library without Authorization,"{ + uuid = 'valid-ilibrary-uuid' +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" +}","def test_ilibrary_clone_without_authorization(anonymous_exec_api): + """""" + Creating a clone of an Island Library without authorization + """""" + uuid = 'valid-ilibrary-uuid' + params, r = anonymous_exec_api.ilibrary_clone_island(uuid) + test_assert.status(r, 401) + res = r.json() + assert res['detail'] == ""Authentication credentials were not provided."" +" +/ilibrary/rest/clone/{UUID}/,creating a clone of an Island Library with some name and empty description,"{ + ""name"": ""test_clone"", + ""description"": """" + }","{ + ""status"": 200, + ""response"": island library cloned +}","def test_ilibrary_clone_with_name_empty_description(run_api, ilibrary_add_new_island): + """""" + Creating a clone of an Island Library with name and empty description + """""" + params1, r1 = ilibrary_add_new_island + uuid = r1.json()[""uuid""] + clone = { + ""name"": ""test_clone"", + ""description"": """" + } + params, r = run_api.ilibrary_clone_island(uuid, params=clone) + test_assert.status(r, 200) + rjson = r.json() + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) + +" +/ilibrary/rest/clone/{UUID}/,creating a clone of an Island Library when requested with invalid token,"{ + uuid = 'invalid-ilibrary-uuid' +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_ilibrary_clone_with_invalid_token(invalid_exec_api): + """""" + Creating a clone of an Island Library with invalid token + """""" + uuid = 'invalid-ilibrary-uuid' + params, r = invalid_exec_api.ilibrary_clone_island(uuid) + test_assert.status(r, 401) + res = r.json() + assert res['detail'] == ""Invalid token."" +" +/ilibrary/rest/clone/{UUID}/,creating a clone of an Island Library when Island UUID does not exist,"{ + uuid = 'invalid-ilibrary-uuid' +}","{ + ""status"": 404, + ""response"": not found +}","def test_ilibrary_clone_invalid_uuid(run_api): + """""" + Creating a clone of an Island Library with invalid uuid + """""" + uuid = 'invalid-ilibrary-uuid' + params, r = run_api.ilibrary_clone_island(uuid) + test_assert.status(r, 404) +" +/ilibrary/rest/clone/{UUID}/,creating a clone of an Island Library by non-admin user,"{ + ""name"": ""test_clone"", + ""description"": ""cloning private island without admin rights"" + }","{ + ""status"": 403 +}","PARAMETERS = [{""dest_obj"": OBJ_ISL}] + +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ilibrary_clone_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): + """""" + Creating a clone of an private Island Library without admin rights whose owner is not current user + """""" + ilib_id = custom_ilib_admin_operations + clone = { + ""name"": ""test_clone"", + ""description"": ""cloning private island without admin rights"" + } + params, r = run_api.ilibrary_clone_island(ilib_id, params=clone) + test_assert.status(r, 403) +" +/ilibrary/rest/clone/{UUID}/,creating a clone of an Island Library,,"{ + ""status"": 200, + ""response"": island library cloned +}","def test_ilibrary_clone(run_api, ilibrary_clone_island): + """""" + Creating a clone of an Island Library + """""" + params, r = ilibrary_clone_island + result = r.json() + test_assert.status(params, result, ""ilibrary_clone"") + test_assert.status(r, 200) +" +/ilibrary/rest/clone/{UUID}/,creating a clone of an ilibrary without name and description,"{ +}","{ + ""status"": 400, + ""response"": field required +}","def test_ilibrary_clone_without_name_and_description(run_api, ilibrary_add_new_island): + """""" + Creating a clone of an Island Library without name and without description + """""" + params1, r1 = ilibrary_add_new_island + uuid = r1.json()[""uuid""] + params, r = run_api.ilibrary_clone_island(uuid, params={}) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['name'] == ['This field is required.'] +" +/ilibrary/rest/clone/{UUID}/,creating a clone of an ilibrary with name but no description,"{ + ""name"": ""test_clone"" +}","{ + ""status"": 200, + ""response"": island library cloned +}","def test_ilibrary_clone_with_name_only(run_api, ilibrary_add_new_island): + """""" + Creating a clone of an Island Library with name only + """""" + params1, r1 = ilibrary_add_new_island + uuid = r1.json()[""uuid""] + clone = { + ""name"": ""test_clone"" + } + params, r = run_api.ilibrary_clone_island(uuid, params=clone) + test_assert.status(r, 200) + rjson = r.json() + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) + +" +/ilibrary/rest/clone/{UUID}/,clone a Public Island and check is_public flag on cloned island is False,"{ + ""name"": ""test_ilibrary_clone_public_island"", + ""is_public"": True + }","{ + ""status"": 200, + ""response"": island library cloned +}","def test_ilibrary_clone_public_island(run_api): + """""" + Creating a clone of an public Island Library and checking is_public flag on cloned island is False + """""" + params = { + ""name"": ""test_ilibrary_clone_public_island"", + ""is_public"": True + } + params, r = run_api.ilibrary_add_new_island(params=params) + rjson = r.json() + uuid = rjson[""uuid""] + clone = { + ""name"": ""test_clone"", + ""description"": ""cloning private island by admin whose owner is not admin"" + } + params, r = run_api.ilibrary_clone_island(uuid, params=clone) + rjson1 = r.json() + test_assert.status(r, 200) + assert rjson1['is_public'] is False + if 'error' not in rjson1.keys(): + uuid = rjson1[""uuid""] + run_api.ilibrary_delete(uuid, params) + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) +" +/ilibrary/rest/clone/{UUID}/,Clone a Private Island which you are not an owner of but with Admin Rights,"{ + ""name"": ""test_clone"", + ""description"": ""cloning private island by admin whose owner is not admin"" +}","{ + ""status"": 200, + ""response"": island library cloned +}","PARAMETERS = [{""dest_obj"": OBJ_ISL}] + + +@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ilibrary_clone_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): + """""" + Creating a clone of an private Island Library by admin whose owner is not admin user + """""" + ilib_id = custom_ilib_non_admin_operations + clone = { + ""name"": ""test_clone"", + ""description"": ""cloning private island by admin whose owner is not admin"" + } + params, r = run_api.ilibrary_clone_island(ilib_id, params=clone) + test_assert.status(r, 200) + rjson = r.json() + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) +" +/ilibrary/rest/delete/{UUID}/,deleting island library without Authorization,"{ + uuid = 'valid-island-library-uuid' +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" +}","def test_ilib_delete_without_authentication(anonymous_exec_api): + """""" + Delete Island without authorization + """""" + r = anonymous_exec_api.ilibrary_delete(""valid-island-uuid"", {}) + test_assert.status(r, 401) + res = r.json() + assert res['detail'] == 'Authentication credentials were not provided.' +" +/ilibrary/rest/delete/{UUID}/,deleting island library when requested with invalid token,"{ + uuid = 'invalid-island-library-uuid' +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_ilib_delete_invalid_token(invalid_exec_api): + """""" + Delete Island with invalid token + """""" + r = invalid_exec_api.ilibrary_delete(""invalid-island-uuid"", {}) + test_assert.status(r, 401) + res = r.json() + assert res['detail'] == 'Invalid token.' +" +/ilibrary/rest/delete/{UUID}/,deleting island library when Island UUID does not exist,"{ + uuid = 'invalid-island-library-uuid' +}","{ + ""status"": 404, + ""message"": ""Not found"" +}","def test_ilib_delete_invalid_uuid(run_api): + """""" + Delete Island with invalid uuid + """""" + r = run_api.ilibrary_delete(""invalid-island-uuid"", {}) + test_assert.status(r, 404) + assert res['detail'] == 'Not found.' +" +/ilibrary/rest/delete/{UUID}/,deleting island library for existing valid data,,"{ + ""status"": 204, + ""response"": Island library deleted +}","def test_ilib_delete(ilibrary_delete): + """""" + Deleting the Ilibrary + """""" + r = ilibrary_delete + test_assert.status(r, 204) +" +/ilibrary/rest/delete/{UUID}/,deleting an Island which has next revisions,"{ + uuid = 'valid-existing-island-library-uuid' +}","{ + ""status"": 400, + ""response"": failure +}","def test_ilib_delete_with_next_revision(run_api, ilibrary_add_new_island): + """""" + Delete Island which has next revision + """""" + template, r = ilibrary_add_new_island + isl_uuid = r.json()[""uuid""] + r = run_api.ideploy_deploy(isl_uuid) + deploy_id = r.json()[""deploy_uuid""] + r, rtask_details = run_api.ideploy_snapshot(deploy_id) + run_api.ideploy_delete(deploy_id) + r = run_api.ilibrary_delete(isl_uuid, {}) + test_assert.status(r, 400) + run_api.ilibrary_delete(rtask_details['result']['snapshotted_island_uuid'], {}) + r = run_api.ilibrary_delete(isl_uuid, {}) +" +/ilibrary/rest/delete/{UUID}/,deleting an Island which has existing deployments,"{ + uuid = 'valid-existing-island-library-uuid' +}","{ + ""status"": 400, + ""response"": failure +}","def test_ilib_delete_deployed(run_api, ilibrary_add_new_island): + """""" + Delete Island which has existing deployments + """""" + template, r = ilibrary_add_new_island + isl_uuid = r.json()[""uuid""] + r = run_api.ideploy_deploy(isl_uuid) + deploy_id = r.json()[""deploy_uuid""] + r = run_api.ilibrary_delete(isl_uuid, {}) + test_assert.status(r, 400) + run_api.ideploy_delete(deploy_id) +" +/ilibrary/rest/delete/{UUID}/,deleting a public Island by user with Admin rights but not owner of the library,"{ + uuid = 'valid-existing-island-library-uuid' +}","{ + ""status"": 204, + ""response"": Island library deleted +}","def test_public_ilib_delete_admin(skip_if_not_admin, run_api, non_admin_exec_api): + """""" + Deleting the public Ilibrary by Admin + """""" + # Admin check for deleting the public Ilibrary created by different user. + networks = template_networks() + params, r_lib = non_admin_exec_api.library_add_new_vm(networks=networks) + rjson_lib = r_lib.json() + + machine = { + ""uuid"": rjson_lib[""uuid""], + ""nic_update_id"": rjson_lib[""hw""][""networks""][0][""id""], + ""nic_delete_id"": rjson_lib[""hw""][""networks""][2][""id""] + } + island = template_add_ilibrary_one_machine(machine=machine) + island['is_public'] = True + params, r_isl = non_admin_exec_api.ilibrary_add_new_island(params=island) + rjson_isl = r_isl.json() + ilib_id = rjson_isl[""uuid""] + r = run_api.ilibrary_delete(ilib_id, {}) + test_assert.status(r, 204) + if 'error' not in rjson_lib.keys(): + uuid = rjson_lib[""uuid""] + non_admin_exec_api.library_delete(uuid, params) +" +/ilibrary/rest/delete/{UUID}/,deleting a public Island by an non-admin user who does not own the library,"{ + uuid = 'valid-existing-island-library-uuid' +}","{ + ""status"": 403, + ""response"": unauthorized +}","def test_public_ilib_delete_non_admin(skip_if_not_non_admin, run_api, admin_exec_api, non_admin_exec_api): + """""" + Deleting the public Ilibrary by Non-Admin + """""" + # Non-Admin check for deleting the public Ilibrary created by different user. + networks = template_networks() + params, r_lib = admin_exec_api.library_add_new_vm(networks=networks) + rjson_lib = r_lib.json() + + machine = { + ""uuid"": rjson_lib[""uuid""], + ""nic_update_id"": rjson_lib[""hw""][""networks""][0][""id""], + ""nic_delete_id"": rjson_lib[""hw""][""networks""][2][""id""] + } + island = template_add_ilibrary_one_machine(machine=machine) + island['is_public'] = True + params, r_isl = admin_exec_api.ilibrary_add_new_island(params=island) + rjson_isl = r_isl.json() + ilib_id = rjson_isl[""uuid""] + r = run_api.ilibrary_delete(ilib_id, {}) + test_assert.status(r, 403) + if 'error' not in rjson_isl.keys(): + uuid = rjson_isl[""uuid""] + admin_exec_api.ilibrary_delete(uuid, params) + if 'error' not in rjson_lib.keys(): + uuid = rjson_lib[""uuid""] + admin_exec_api.library_delete(uuid, params) + +" +/ilibrary/rest/delete/{UUID}/,deleting a Private Island by an admin user where the admin does not own the island,"{ + uuid = 'valid-existing-island-library-uuid' +}","{ + ""status"": 204, + ""response"": Island library deleted +}","PARAMETERS = [{""dest_obj"": OBJ_ISL}] + +@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ilib_delete_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): + """""" + Deleting the private Ilibrary by Admin + """""" + # Admin check for deleting the private Ilibrary created by different user. + ilib_id = custom_ilib_non_admin_operations + r = run_api.ilibrary_delete(ilib_id, {}) + test_assert.status(r, 204) +" +/ilibrary/rest/delete/{UUID}/,deleting a Island by manager,"{ + uuid = 'valid-existing-island-library-uuid' +}","{ + ""status"": 403 +}","endpoint = ""ilibrary_delete"" + +PARAMETERS = [{""dest_obj"": OBJ_ISL}] + +@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ilib_delete_manager(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): + """""" + Delete the Ilibrary by Manager + """""" + # When the user is not part of the group that the manager manages + ilib_id = custom_ilib_admin_operations + r = run_api.ilibrary_delete(ilib_id, {}) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False)) + + # When the user is part of the group that the manager manages + ilib_id = custom_ilib_non_admin_operations + r = run_api.ilibrary_delete(ilib_id, {}) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) +" +/ilibrary/rest/delete/{UUID}/,deleting a Island by an non-admin user who does not own the library,"{ + uuid = 'valid-existing-island-library-uuid' +}","{ + ""status"": 403, + ""response"": unauthorized +}","@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ilib_delete_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): + """""" + Deleting the private Ilibrary by non-Admin + """""" + # Non-admin check for deleting the private Ilibrary created by different user. + ilib_id = custom_ilib_admin_operations + r = run_api.ilibrary_delete(ilib_id, {}) + test_assert.status(r, 403) +" +/ilibrary/rest/details/{UUID}/,fetching details of public machines present in private island library,"{ + ""name"": ""Machine1"", + ""is_public"": False, + ""machines"": { + ""add"": [machine1], + }, + }","{ + ""response"" : success +}","def test_ilibrary_details_with_private_island_with_public_machine(run_api): + """""" + To check machine type with public island + """""" + params1, r1 = run_api.library_add_new_vm(networks=networks, is_public=True) + machine1 = { + ""uuid"": r1.json()[""uuid""], + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Default Public Segment"" + }, + + ], + } + + } + params = { + ""name"": ""Machine1"", + ""is_public"": False, + ""machines"": { + ""add"": [machine1], + }, + } + params, r = run_api.ilibrary_add_new_island(params=params) + result = r.json()[""machines""] + island_id = r.json()[""uuid""] + run_api.ilibrary_delete(uuid=island_id) + run_api.library_delete(r1.json()[""uuid""]) + for machine in result: + if machine[""is_public""]: + assert False, ""The machine is still public in private island and the json is %s"" % r.json() + + +" +/ilibrary/rest/details/{UUID}/,fetching details of private island library from public island,"{ + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1], + }, + }","{ + ""response"" : success +}","def test_ilibrary_details_with_edit_public_island_to_private_island(skip_if_not_admin, run_api): + """""" + To check machine type with private island + """""" + params1, r1 = run_api.library_add_new_vm(networks=networks) + machine1 = { + ""uuid"": r1.json()[""uuid""], + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Default Public Segment"" + }, + + ], + } + + } + params = { + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1], + }, + } + params, r = run_api.ilibrary_add_new_island(params=params) + island_id = r.json()[""uuid""] + params, r = run_api.ilibrary_edit_island(uuid=island_id, params={""is_public"": False}) + res = r.json()[""machines""] + run_api.ilibrary_delete(uuid=island_id) + run_api.library_delete(r1.json()[""uuid""]) + for machine in res: + if machine[""is_public""]: + assert False, ""The json is %s"" % r.json() +" +/ilibrary/rest/details/{UUID}/,fetching details of island library without Authorization,"{ + uid = ""valid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" +}","def test_ilibrary_details_without_authorization(anonymous_exec_api): + """""" + Details of Ilibrary without authorization + """""" + uid = ""valid"" + r = anonymous_exec_api.ilibrary_details(uuid=uid) + result = r.json() + test_assert.status(r, 401) + assert result['detail'] == ""Authentication credentials were not provided."" +" +/ilibrary/rest/details/{UUID}/,fetching details of island library with no NIC and island type is private,"{ + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1, machine2, machine3], + }, + + }","{ + ""response"" : success +}","def test_ilibrary_details_with_island_type_Zero_NIC(run_api, library_add_three_vm): + """""" + Detail of island_type when all machines have assigned with No NIC's and island type is private + """""" + r1, r2, r3 = library_add_three_vm + machine1 = { + ""uuid"": r1.json()[""uuid""], + + } + machine2 = { + ""uuid"": r2.json()[""uuid""], + + } + machine3 = { + ""uuid"": r3.json()[""uuid""], + + } + params = { + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1, machine2, machine3], + }, + + } + + params, r = run_api.ilibrary_add_new_island(params=params) + island_type = r.json()[""island_type""] + island_id = r.json()[""uuid""] + run_api.ilibrary_delete(uuid=island_id) + assert island_type == ""private"", ""The json is %s"" % r.json() +" +/ilibrary/rest/details/{UUID}/,fetching details of island library with invalid token,"{ + uid = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_ilibrary_details_with_invalid_token(invalid_exec_api): + """""" + Details of Ilibrary with invalid token + """""" + uid = ""invalid"" + r = invalid_exec_api.ilibrary_details(uuid=uid) + result = r.json() + test_assert.status(r, 401) + assert result['detail'] == ""Invalid token."" +" +/ilibrary/rest/details/{UUID}/,fetching details of island library where island type is set to public,"{ + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1, machine2, machine3], + }, +}","{ + ""response"" : success +}","def test_ilibrary_details_with_island_type_public(run_api, library_add_three_vm): + """""" + Detail of island_type when all machines have NIC as Default Public Segment + """""" + r1, r2, r3 = library_add_three_vm + machine1 = { + ""uuid"": r1.json()[""uuid""], + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Default Public Segment"" + + } + ], + } + + } + machine2 = { + ""uuid"": r2.json()[""uuid""], + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Default Public Segment"" + } + ], + } + + } + machine3 = { + ""uuid"": r3.json()[""uuid""], + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Default Public Segment"" + } + ], + } + + } + params = { + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1, machine2, machine3], + }, + } + + params, r = run_api.ilibrary_add_new_island(params=params) + island_type = r.json()[""island_type""] + island_id = r.json()[""uuid""] + run_api.ilibrary_delete(uuid=island_id) + assert island_type == ""public"", ""The json is %s"" % r.json() +" +/ilibrary/rest/details/{UUID}/,fetching details of island library when private machine is added to public island,"{ + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1], + }, + }","{ + ""response"" : success +}","def test_ilibrary_details_with_public_island_with_private_machine(run_api): + """""" + To check machine type when Private machine is added to public island + """""" + params1, r1 = run_api.library_add_new_vm(networks=networks, is_public=False) + machine1 = { + ""uuid"": r1.json()[""uuid""], + ""is_public"": False, + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Default Public Segment"" + }, + + ], + } + + } + params = { + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1], + }, + } + params, r = run_api.ilibrary_add_new_island(params=params) + result = r.json()[""machines""] + island_id = r.json()[""uuid""] + run_api.ilibrary_delete(uuid=island_id) + run_api.library_delete(r1.json()[""uuid""]) + for machine in result: + if not machine[""is_public""]: + assert False, ""The machine is still private in public island and the json is %s"" % r.json() + +" +/ilibrary/rest/details/{UUID}/,fetching details of island library when island has one machine nic as Default and other machine nic as empty and island type is partial,"{ + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1, machine2, machine3], + }, + + }","{ + ""response"" : success +}","def test_ilibrary_details_with_island_type_partial(run_api, library_add_three_vm): + """""" + Detail of island_type when island has one machine nic as Default and other machine nic as empty and island type is partial + """""" + r1, r2, r3 = library_add_three_vm + machine1 = { + ""uuid"": r1.json()[""uuid""], + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Default Public Segment"" + } + ], + } + + } + machine2 = { + ""uuid"": r2.json()[""uuid""], + + } + machine3 = { + ""uuid"": r3.json()[""uuid""], + + } + params = { + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1, machine2, machine3], + }, + + } + params, r = run_api.ilibrary_add_new_island(params=params) + island_type = r.json()[""island_type""] + island_id = r.json()[""uuid""] + run_api.ilibrary_delete(uuid=island_id) + assert island_type == ""partial"", ""The json is %s"" % r.json() +" +/ilibrary/rest/details/{UUID}/,fetching details of island library when all machines are assigned with multiple NICs and island type is public,"{ + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1, machine2, machine3], + }, + ""network_segments"": { + ""add"": [ + { + ""name"": ""Seg1"", + ""description"": ""string"", + ""enable_ipv4"": False + }, + { + ""name"": ""Seg2"", + ""description"": ""string"", + ""enable_ipv4"": False + }, + { + ""name"": ""Seg3"", + ""description"": ""string"", + ""enable_ipv4"": False + } + ] + } + }","{ + ""response"" : success +}","def test_ilibrary_details_with_island_type_public_with_three_segement(run_api, library_add_three_vm): + """""" + Detail of island_type when island all machines are assigned with multiple NICs and island type is public + """""" + r1, r2, r3 = library_add_three_vm + machine1 = { + ""uuid"": r1.json()[""uuid""], + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Default Public Segment"" + }, + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Seg1"" + } + ], + } + + } + machine2 = { + ""uuid"": r2.json()[""uuid""], + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Seg1"" + }, + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Seg2"" + } + ], + } + + } + machine3 = { + ""uuid"": r3.json()[""uuid""], + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Seg2"" + }, + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Seg3"" + } + ], + } + + } + params = { + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1, machine2, machine3], + }, + ""network_segments"": { + ""add"": [ + { + ""name"": ""Seg1"", + ""description"": ""string"", + ""enable_ipv4"": False + }, + { + ""name"": ""Seg2"", + ""description"": ""string"", + ""enable_ipv4"": False + }, + { + ""name"": ""Seg3"", + ""description"": ""string"", + ""enable_ipv4"": False + } + ] + } + + } + + params, r = run_api.ilibrary_add_new_island(params=params) + island_type = r.json()[""island_type""] + island_id = r.json()[""uuid""] + run_api.ilibrary_delete(uuid=island_id) + assert island_type == ""public"", ""The json is %s"" % r.json() + +" +/ilibrary/rest/details/{UUID}/,fetching details of island library provided with valid UUID,"{ +ilib_id +}","{ + ""status"": 200, + ""response"" : success +}","def test_ilibrary_details_with_valid_uuid(run_api, ilibrary_add_new_island): + """""" + Details of Ilibrary with valid uuid + """""" + params, r = ilibrary_add_new_island + lib_uuid = r.json()[""uuid""] + result = run_api.ilibrary_list_island(params={""uuid"": lib_uuid}) + x = result.json() + test_assert.status(result, 200) + for island_lib in x['results']: + assert island_lib['uuid'] == lib_uuid, ""Json is %s"" % x +" +/ilibrary/rest/details/{UUID}/,fetching details of island library provided with invalid UUID,"{ + uid = ""invalid"" +}","{ + ""status"": 404, + ""message"": ""Not Found"" +}","def test_ilibrary_details_with_invalid_uuid(run_api): + """""" + Details of Ilibrary with invalid uuid + """""" + uid = ""invalid"" + r = run_api.ilibrary_details(uuid=uid) + test_assert.status(r, 404) +" +/ilibrary/rest/details/{UUID}/,fetching details of island library by non-admin user,"{ +lib_id +}","{ + ""status"": 403, + ""message"": ""You do not have permission to perform this action."" +}","PARAMETERS = [{""dest_obj"": OBJ_ISL}] + +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ilibrary_details_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): + """""" + Details of Ilibrary by non-Admin + """""" + # Non-admin check for fetching details of the Ilibrary created by different user. + ilibrary_id = custom_ilib_admin_operations + r = run_api.ilibrary_details(ilibrary_id) + test_assert.status(r, 403) + assert r.json()[""error""] == ""You do not have permission to perform this action."" +" +/ilibrary/rest/details/{UUID}/,fetching details of island library by manager,"{ +ilib_id +}",,"endpoint = ""ilibrary_details"" +networks = template_networks() + +PARAMETERS = [{""dest_obj"": OBJ_ISL}] +@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ilibrary_details_manager(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): + """""" + Details of Ilibrary by Manager + """""" + # When the user is not part of the group that the manager manages + ilibrary_id = custom_ilib_admin_operations + r = run_api.ilibrary_details(ilibrary_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False)) + + # When the user is part of the group that the manager manages + ilibrary_id = custom_ilib_non_admin_operations + r = run_api.ilibrary_details(ilibrary_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) +" +/ilibrary/rest/details/{UUID}/,fetching details of island library by admin user,"{ +ilib_id +}","{ + ""status"": 200, + ""response"": success +}","@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ilibrary_details_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): + """""" + Details of Ilibrary by Admin + """""" + # Admin check for fetching details of the Ilibrary created by different user. + ilibrary_id = custom_ilib_non_admin_operations + r = run_api.ilibrary_details(ilibrary_id) + test_assert.status(r, 200) +" +/ilibrary/rest/details/{UUID}/,fetching details of island library,,"{ + ""status"": 200, + ""response"" : success +}","def test_ilibrary_details(ilibrary_details): + """""" + Getting Ilibrary details + """""" + r = ilibrary_details + test_assert.status(r, 200) +" +/ilibrary/rest/edit/{UUID}/,updating two segments giving same name,"params = { + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'add': [ + { + 'name': 'network1' + }, + { + 'name': 'network2' + } + ] + }, + } +params1 = { + 'network_segments': { + 'update': [ + { + 'uuid': seg_id, + 'name': 'network2' + } + ] + }, + ""is_public"": True + }","{ + ""status"": 400, + ""message"": ""The segment name(s) is/are already taken for other island"" +}","def test_ilibrary_edit_update_segment_with_existing_name(run_api): + """""" + Editing an Island Library segment with existing name + """""" + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'add': [ + { + 'name': 'network1' + }, + { + 'name': 'network2' + } + ] + }, + } + params, r = run_api.ilibrary_add_new_island(params=params) + rjson = r.json() + uuid = rjson[""uuid""] + segment_list = rjson['network_segments'] + + for segment in segment_list: + if segment['name'] == 'network1': + seg_id = segment['uuid'] + + params1 = { + 'network_segments': { + 'update': [ + { + 'uuid': seg_id, + 'name': 'network2' + } + ] + }, + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(uuid, params=params1) + test_assert.status(r, 400) + res = r.json() + assert res['error'] == ""The segment name(s) {'network2'} is/are already taken for the island"" + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) +" +/ilibrary/rest/edit/{UUID}/,updating two segment with same name,"params = { + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'add': [ + { + 'name': 'network1' + }, + { + 'name': 'network2' + } + ] + } + + } +params1 = { + 'network_segments': { + 'update': [ + { + 'uuid': seg1_id, + 'name': 'network3' + }, + { + 'uuid': seg2_id, + 'name': 'network3' + } + ] + }, + ""is_public"": True + }","{ + ""status"": 400, + ""message"": ""Segment name should be unique for an island"" +}","def test_ilibrary_edit_update_two_segment_same_name(run_api): + """""" + Editing an Island Library Update Segment with same name + """""" + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'add': [ + { + 'name': 'network1' + }, + { + 'name': 'network2' + } + ] + } + + } + params, r = run_api.ilibrary_add_new_island(params=params) + rjson = r.json() + uuid = rjson[""uuid""] + segment_list = rjson['network_segments'] + + for segment in segment_list: + if segment['name'] == 'network1': + seg1_id = segment['uuid'] + if segment['name'] == 'network2': + seg2_id = segment['uuid'] + + params1 = { + 'network_segments': { + 'update': [ + { + 'uuid': seg1_id, + 'name': 'network3' + }, + { + 'uuid': seg2_id, + 'name': 'network3' + } + ] + }, + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(uuid, params=params1) + test_assert.status(r, 400) + res = r.json() + assert res['error'] == ""Segment name should be unique for an island"" + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) + +" +/ilibrary/rest/edit/{UUID}/,updating the same segment twice in a single API,"params = { + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'add': [ + { + 'name': 'network1' + } + ] + } + } +params1 = { + 'network_segments': { + 'update': [ + { + 'uuid': seg_id, + 'name': 'network2' + }, + { + 'uuid': seg_id, + 'name': 'network2' + } + ] + }, + ""is_public"": True + }","{ + ""status"": 400, + ""message"": ""Segment name should be unique for an island"" +}","def test_ilibrary_edit_update_same_segment_twice(run_api): + """""" + Editing an Island Library Update same Segment twice in a single API + """""" + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'add': [ + { + 'name': 'network1' + } + ] + } + + } + params, r = run_api.ilibrary_add_new_island(params=params) + rjson = r.json() + uuid = rjson[""uuid""] + segment_list = rjson['network_segments'] + + for segment in segment_list: + if segment['name'] == 'network1': + seg_id = segment['uuid'] + + params = { + 'network_segments': { + 'update': [ + { + 'uuid': seg_id, + 'name': 'network2' + }, + { + 'uuid': seg_id, + 'name': 'network2' + } + ] + }, + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(uuid, params=params1) + test_assert.status(r, 400) + res = r.json() + assert res['error'] == ""Segment name should be unique for an island"" + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) + +" +/ilibrary/rest/edit/{UUID}/,updating the same machine more than once,"params3 = { + ""name"": ""test_ilibrary"", + ""machines"": { + ""add"": [ + { + ""uuid"": r1.json()[""uuid""] + } + ] + }, + ""is_public"": False + } +params3 = { + ""machines"": { + ""update"": [ + { + ""uuid"": machine_uuid, + 'description': 'description' + }, + { + ""uuid"": machine_uuid, + 'description': 'desc' + } + ] + }, + ""is_public"": False + }","{ + ""status"": 400, + ""message"": ""Updating the same machine more than once may result in an Unexpected value change. Hence, Aborting..."" +}","def test_ilibrary_edit_update_same_machine_twice(run_api): + """""" + Editing an Island Library by updating same machine twice + """""" + networks = template_networks() + if run_api.arch_type == ""aarch64"": + params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + else: + params1, r1 = run_api.library_add_new_vm(networks=networks) + + params3 = { + ""name"": ""test_ilibrary"", + ""machines"": { + ""add"": [ + { + ""uuid"": r1.json()[""uuid""] + } + ] + }, + ""is_public"": False + } + params3, r3 = run_api.ilibrary_add_new_island(params=params3) + rjson3 = r3.json() + uuid = rjson3['uuid'] + machine_uuid = rjson3['machines'][0]['uuid'] + + params3 = { + ""machines"": { + ""update"": [ + { + ""uuid"": machine_uuid, + 'description': 'description' + }, + { + ""uuid"": machine_uuid, + 'description': 'desc' + } + ] + }, + ""is_public"": False + } + params, r = run_api.ilibrary_edit_island(uuid, params=params4) + test_assert.status(r, 400) + res = r.json() + assert res['error'] == ""Updating the same machine more than once may result in an Unexpected value change. Hence, Aborting..."" + if 'error' not in rjson3.keys(): + run_api.ilibrary_delete(uuid, params3) + run_api.library_delete(r1.json()[""uuid""]) + +" +/ilibrary/rest/edit/{UUID}/,updating NIC which is a part of some other machine,"params3 = { + ""name"": ""test_ilibrary"", + ""machines"": { + ""add"": [ + { + ""uuid"": r1.json()[""uuid""] + } + ] + }, + ""is_public"": False + } + +params4 = { + ""machines"": { + ""update"": [ + { + ""uuid"": r1.json()[""uuid""], + ""nics"": { + ""update"": [ + { + 'id': second_nic, + ""model"": ""virtio"" + } + ] + } + } + ] + }, + ""is_public"": False + }","{ + ""status"": 400, + ""message"": ""The provided nic with id isn't part of this machine"" +}","def test_ilibrary_edit_update_nics_of_other_island(run_api): + """""" + Editing an Island Library by updating nic which is part of another island library + """""" + networks = template_networks() + if run_api.arch_type == ""aarch64"": + params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + params2, r2 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + else: + params1, r1 = run_api.library_add_new_vm(networks=networks) + params2, r2 = run_api.library_add_new_vm(networks=networks) + + # first_nic = r1.json()['hw']['networks'][0]['id'] + second_nic = r2.json()['hw']['networks'][0]['id'] + + params3 = { + ""name"": ""test_ilibrary"", + ""machines"": { + ""add"": [ + { + ""uuid"": r1.json()[""uuid""] + } + ] + }, + ""is_public"": False + } + params3, r3 = run_api.ilibrary_add_new_island(params=params3) + rjson3 = r3.json() + uuid = rjson3['uuid'] + + params3 = { + ""machines"": { + ""update"": [ + { + ""uuid"": r1.json()[""uuid""], + ""nics"": { + ""update"": [ + { + 'id': second_nic, + ""model"": ""virtio"" + } + ] + } + } + ] + }, + ""is_public"": False + } + params, r = run_api.ilibrary_edit_island(uuid, params=params4) + test_assert.status(r, 400) + res = r.json() + assert res['error'] == ""The provided nic with id ["" + str(second_nic) + ""] isn't part of this machine"" + if 'error' not in rjson3.keys(): + run_api.ilibrary_delete(uuid, params) + run_api.library_delete(r1.json()[""uuid""]) + run_api.library_delete(r2.json()[""uuid""]) +" +/ilibrary/rest/edit/{UUID}/,updating machine with no segment name,"{ + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'update': [ + { + 'uuid': network_segment['uuid'], + 'name': '' + } + ] + + }, + ""is_public"": True + }","{ +""status"" : 400, +""message"" :""This field cannot be blank"" +}","def test_ilibrary_edit_segments_with_no_name(run_api): + """""" + Editing an Island Library segment with no name + """""" + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } + params, r = run_api.ilibrary_add_new_island(params=params) + rjson = r.json() + uuid = rjson[""uuid""] + network_segment = rjson['network_segments'][0] + params = { + 'network_segments': { + 'update': [ + { + 'uuid': network_segment['uuid'], + 'name': '' + } + ] + + }, + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(uuid, params=params) + test_assert.status(r, 400) + res = r.json() + assert res['network_segments']['update'][0]['name'] == ['This field may not be blank.'] + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) +" +/ilibrary/rest/edit/{UUID}/,updating machine with no machine name,"{ + ""name"": ""test_ilibrary"", + ""machines"": { + ""add"": [ + { + ""uuid"": rjson[""uuid""] + } + ] + }, + ""is_public"": False, + ""machines"": { + ""update"": [ + { + 'name': """", + ""uuid"": machine_uuid + } + ] + }, + ""is_public"": False + }","{ +""status"" : 201, +""response"" : success +}","def test_ilibrary_edit_with_no_machine_name(run_api, library_add_new_vm): + """""" + Editing an Island Library with no machine name + """""" + params, rjson = library_add_new_vm + params = { + ""name"": ""test_ilibrary"", + ""machines"": { + ""add"": [ + { + ""uuid"": rjson[""uuid""] + } + ] + }, + ""is_public"": False + } + params1, r1 = run_api.ilibrary_add_new_island(params=params) + rjson1 = r1.json() + uuid = rjson1['uuid'] + machine_uuid = rjson1['machines'][0]['uuid'] + params = { + ""machines"": { + ""update"": [ + { + 'name': """", + ""uuid"": machine_uuid + } + ] + }, + ""is_public"": False + } + params, r = run_api.ilibrary_edit_island(uuid, params=params) + test_assert.status(r, 201) + if 'error' not in rjson1.keys(): + run_api.ilibrary_delete(uuid, params1) +" +/ilibrary/rest/edit/{UUID}/,updating and deleting same machine,"params3 = { + ""name"": ""test_ilibrary"", + ""machines"": { + ""add"": [ + { + ""uuid"": r1.json()[""uuid""] + } + ] + }, + ""is_public"": False + } +params2 = { + ""machines"": { + ""update"": [ + { + ""uuid"": machine_uuid, + 'description': 'description' + } + ], + 'delete': [ + { + ""uuid"": machine_uuid, + } + ], + }, + ""is_public"": False + }","{ + ""status"": 400, + ""message"": ""A machine cannot have both Deletion and Updation in same API call"" +}","def test_ilibrary_edit_update_and_delete_same_machine(run_api): + """""" + Editing an Island Library by updating and deleting same machine + """""" + networks = template_networks() + if run_api.arch_type == ""aarch64"": + params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + else: + params1, r1 = run_api.library_add_new_vm(networks=networks) + + params3 = { + ""name"": ""test_ilibrary"", + ""machines"": { + ""add"": [ + { + ""uuid"": r1.json()[""uuid""] + } + ] + }, + ""is_public"": False + } + params3, r3 = run_api.ilibrary_add_new_island(params=params3) + rjson3 = r3.json() + uuid = rjson3['uuid'] + machine_uuid = rjson3['machines'][0]['uuid'] + + params2 = { + ""machines"": { + ""update"": [ + { + ""uuid"": machine_uuid, + 'description': 'description' + } + ], + 'delete': [ + { + ""uuid"": machine_uuid, + } + ], + }, + ""is_public"": False + } + params, r = run_api.ilibrary_edit_island(uuid, params=params2) + test_assert.status(r, 400) + res = r.json() + assert res['error'] == ""A machine cannot have both Deletion and Updation in same API call"" + if 'error' not in rjson3.keys(): + run_api.ilibrary_delete(uuid, params3) + run_api.library_delete(r1.json()[""uuid""]) + + +" +/ilibrary/rest/edit/{UUID}/,updating and deleting a segment name which already exists ,"{ + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'add': [ + { + 'name': 'network1' + } + ] + }, + 'network_segments': { + 'update': [ + { + 'uuid': seg_id, + 'name': 'network2' + } + ], + 'delete': [ + { + 'uuid': seg_id, + } + ] + }, + ""is_public"": True + }","{ +""status"" : 400, +""message"" : ""The Segment shouldn't have both Updation and Deletion in same API call"" +}","def test_ilibrary_edit_update_and_delete_segment_same_name(run_api): + """""" + Editing an Island Library update a Segment name which already exists but is being deleted + """""" + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'add': [ + { + 'name': 'network1' + } + ] + } + + } + params, r = run_api.ilibrary_add_new_island(params=params) + rjson = r.json() + uuid = rjson[""uuid""] + segment_list = rjson['network_segments'] + + for segment in segment_list: + if segment['name'] == 'network1': + seg_id = segment['uuid'] + + params = { + 'network_segments': { + 'update': [ + { + 'uuid': seg_id, + 'name': 'network2' + } + ], + 'delete': [ + { + 'uuid': seg_id, + } + ] + }, + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(uuid, params=params) + test_assert.status(r, 400) + res = r.json() + assert res['error'] == ""The Segment shouldn't have both Updation and Deletion in same API call"" + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) +" +/ilibrary/rest/edit/{UUID}/,editing Island which you are not an owner of but with Admin rights,"{ + ""name"": ""test_ilibrary"", + ""is_public"": True + ""name"": ""test_ilibrary_edit"", + ""is_public"": True + }","{ +""status"" : 201, +""response"" : success +}","def test_ilibrary_edit_admin(skip_if_not_admin, run_api, non_admin_exec_api): + """""" + Editing an priate Island Library by admin whose owner is not admin + """""" + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } + params, r_isl = non_admin_exec_api.ilibrary_add_new_island(params=params) + rjson_isl = r_isl.json() + ilib_id = rjson_isl[""uuid""] + params = { + ""name"": ""test_ilibrary_edit"", + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(ilib_id, params=params) + test_assert.status(r, 201) + if 'error' not in rjson_isl.keys(): + r = run_api.ilibrary_delete(ilib_id, {}) +" +/ilibrary/rest/edit/{UUID}/,editing an island-library with no description,"{ + ""name"": ""test_ilibrary"", + ""is_public"": True + ""name"": ""test_ilibrary_edit"", + ""is_public"": True + }","{ +""status"" : 201, +""response"" : success +}","def test_ilibrary_edit_with_no_description(run_api, library_add_new_vm): + """""" + Editing an Island Library with no description + """""" + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } + params, r = run_api.ilibrary_add_new_island(params=params) + rjson = r.json() + uuid = rjson[""uuid""] + params = { + ""name"": ""test_ilibrary_edit"", + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(uuid, params=params) + test_assert.status(r, 201) + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) +" +/ilibrary/rest/edit/{UUID}/,editing an Island with no Segments,"{ + ""name"": ""test_ilibrary"", + ""is_public"": True + 'network_segments': {}, + ""is_public"": True + }","{ +""status"" : 201, +""response"" : success +}","def test_ilibrary_edit_with_no_segments(run_api): + """""" + Editing an Island Library with no segments + """""" + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } + params, r = run_api.ilibrary_add_new_island(params=params) + rjson = r.json() + uuid = rjson[""uuid""] + params = { + 'network_segments': {}, + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(uuid, params=params) + test_assert.status(r, 201) + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) +" +/ilibrary/rest/edit/{UUID}/,editing an island library which has next revision,"{ + 'name': 'test', + ""is_public"": False + }","{ +""status"" : 403, +""response"" : forbidden +}","def test_ilibrary_edit_has_next_revision(run_api, ilibrary_add_new_island): + """""" + Editing an Island Library which has next revision + """""" + template, r = ilibrary_add_new_island + isl_uuid = r.json()[""uuid""] + r = run_api.ideploy_deploy(isl_uuid) + deploy_id = r.json()[""deploy_uuid""] + r, rtask_details = run_api.ideploy_snapshot(deploy_id) + run_api.ideploy_delete(deploy_id) + params = { + 'name': 'test', + ""is_public"": False + } + params, r = run_api.ilibrary_edit_island(isl_uuid, params=params) + test_assert.status(r, 403) + run_api.ilibrary_delete(rtask_details['result']['snapshotted_island_uuid'], {}) + r = run_api.ilibrary_delete(isl_uuid, {}) +" +/ilibrary/rest/edit/{UUID}/,editing an Island library when requested with invalid token,"{ +uuid = 'invalid-uuid' +params = { + ""name"": ""test_ilibrary"", + ""is_public"": True +} +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_ilibrary_edit_with_invalid_token(invalid_exec_api): + """""" + Editing an Island Library with invalid token + """""" + uuid = 'invalid-uuid' + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } + params, r = invalid_exec_api.ilibrary_edit_island(uuid, params=params) + test_assert.status(r, 401) + res = r.json() + assert res['detail'] == ""Invalid token."" +" +/ilibrary/rest/edit/{UUID}/,editing an Island library when invalid UUID is provided,"{ +uuid = 'invalid-uuid' +params = { + ""name"": ""test_ilibrary"", + ""is_public"": True +} +}","{ + ""status"": 404, +}","def test_ilibrary_edit_invalid_uuid(run_api): + """""" + Editing an Island Library with invalid uuid + """""" + uuid = 'invalid-uuid' + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(uuid, params=params) + test_assert.status(r, 404) +" +/ilibrary/rest/edit/{UUID}/,"editing an Island library successfully. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{""name"": ""add"", ""is_public"": False}","{ + ""status"": 403 +}","endpoint = ""ilibrary_edit"" +PARAMETERS = [{""dest_obj"": OBJ_ISL}] + +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ilibrary_edit(run_api, ilibrary_edit_island, custom_ilib_admin_operations, custom_ilib_non_admin_operations): + """""" + Editing an Island Library + """""" + params, r = ilibrary_edit_island + test_assert.status(r, 201) + + # Adding non_admin check of Editing an Island Library created by different user + if run_api.user_type == USER_TYPE[""non_admin""]: + lib_id = custom_ilib_admin_operations + params = {""name"": ""add"", ""is_public"": False} + params, r = run_api.ilibrary_edit_island(lib_id, params=params) + test_assert.status(r, 403) + + # Adding a Manager check of Editing an Island Library created by a user of his/her group + # and also when it's not the case + if run_api.user_type == USER_TYPE[""manager""]: + # When the user is not part of the group that the manager manages + lib_id = custom_ilib_admin_operations + params = {""name"": ""add"", ""is_public"": False} + params, r = run_api.ilibrary_edit_island(lib_id, params=params) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False)) + + # When the user is part of the group that the manager manages + lib_id = custom_ilib_non_admin_operations + params = {""name"": ""add"", ""is_public"": False} + params, r = run_api.ilibrary_edit_island(lib_id, params=params) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) +" +/ilibrary/rest/edit/{UUID}/,editing an island library segment,"{ + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'add': [ + { + 'name': 'seg1' + }, + { + 'name': 'seg2' + }, + { + 'name': 'seg3' + }, + ] + }, + 'network_segments': { + 'update': [ + { + 'uuid': seg1_id, + 'name': 'seg3' + }, + { + 'uuid': seg2_id, + 'name': 'seg1' + }, + { + 'uuid': seg3_id, + 'name': 'seg2' + } + ] + }, + ""is_public"": True + }","{ +""status"" : 201, +""response"" : success +}","def test_ilibrary_edit_segments(run_api): + """""" + Editing an Island Library segments + """""" + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'add': [ + { + 'name': 'seg1' + }, + { + 'name': 'seg2' + }, + { + 'name': 'seg3' + }, + ] + }, + } + params, r = run_api.ilibrary_add_new_island(params=params) + rjson = r.json() + uuid = rjson[""uuid""] + segment_list = rjson['network_segments'] + + for segment in segment_list: + if segment['name'] == 'seg1': + seg1_id = segment['uuid'] + elif segment['name'] == 'seg2': + seg2_id = segment['uuid'] + elif segment['name'] == 'seg3': + seg3_id = segment['uuid'] + + params = { + 'network_segments': { + 'update': [ + { + 'uuid': seg1_id, + 'name': 'seg3' + }, + { + 'uuid': seg2_id, + 'name': 'seg1' + }, + { + 'uuid': seg3_id, + 'name': 'seg2' + } + ] + }, + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(uuid, params=params) + test_assert.status(r, 201) + res = r.json() + segment_list = res['network_segments'] + + for segment in segment_list: + if segment['uuid'] == seg1_id: + assert segment['name'] == 'seg3' + elif segment['uuid'] == seg2_id: + assert segment['name'] == 'seg1' + elif segment['uuid'] == seg3_id: + assert segment['name'] == 'seg2' + + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) + +" +/ilibrary/rest/edit/{UUID}/,editing an Island Library by deleting segment which is part of another island library,"params1 = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } +params2 = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } +params3 = { + 'network_segments': { + 'delete': [ + { + 'uuid': network_segment2['uuid'] + } + ] + + }, + ""is_public"": True + }","{ + ""status"": 400, + ""message"": ""The Segment uuid doesn't exist in the island"" +}","def test_ilibrary_edit_delete_segments_of_other_island(run_api): + """""" + Editing an Island Library by deleting segment which is part of another island library + """""" + params1 = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } + params1, r1 = run_api.ilibrary_add_new_island(params=params1) + rjson1 = r1.json() + uuid1 = rjson1[""uuid""] + + params2 = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } + params2, r2 = run_api.ilibrary_add_new_island(params=params2) + rjson2 = r2.json() + uuid2 = rjson2[""uuid""] + + network_segment2 = rjson2['network_segments'][0] + params3 = { + 'network_segments': { + 'delete': [ + { + 'uuid': network_segment2['uuid'] + } + ] + + }, + ""is_public"": True + } + params3, r3 = run_api.ilibrary_edit_island(uuid1, params=params3) + test_assert.status(r3, 400) + res = r3.json() + assert res['error'] == ""The Segment uuid ["" + network_segment2['uuid'] + ""] doesn't exist in the island"" + if 'error' not in rjson1.keys(): + run_api.ilibrary_delete(uuid1, params1) + if 'error' not in rjson2.keys(): + run_api.ilibrary_delete(uuid2, params2) +" +/ilibrary/rest/edit/{UUID}/,editing an ilibrary without token,"{ +uuid = 'valid-uuid' +params = { + ""name"": ""test_ilibrary"", + ""is_public"": True +} +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" +}","def test_ilibrary_edit_without_authorization(anonymous_exec_api): + """""" + Editing an Island Library without authorization + """""" + uuid = 'valid-uuid' + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } + params, r = anonymous_exec_api.ilibrary_edit_island(uuid, params=params) + test_assert.status(r, 401) + res = r.json() + assert res['detail'] == ""Authentication credentials were not provided."" +" +/ilibrary/rest/edit/{UUID}/,editing an ilibrary with no name,"{ + ""name"": ""test_ilibrary"", + ""is_public"": True, + ""description"": ""testing"", + ""is_public"": True + }","{ +""status"" : 201, +""response"" : success +}","def test_ilibrary_edit_with_no_name(run_api): + """""" + Editing an Island Library with no name + """""" + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } + params, r = run_api.ilibrary_add_new_island(params=params) + rjson = r.json() + uuid = rjson[""uuid""] + params = { + ""description"": ""testing"", + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(uuid, params=params) + test_assert.status(r, 201) + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) +" +/ilibrary/rest/edit/{UUID}/,editing an ilibrary with no machines operations,"{ +uuid = 'valid-uuid' +params = { + ""name"": ""test_ilibrary"", + ""is_public"": True, + ""machines"": {}, + ""is_public"": True + +} +}","{ +""status"" : 201, +""response"" : success +}","def test_ilibrary_edit_with_no_machine_operation(run_api): + """""" + Editing an Island Library with no machine operation + """""" + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } + params, r = run_api.ilibrary_add_new_island(params=params) + rjson = r.json() + uuid = rjson[""uuid""] + params = { + 'machines': {}, + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(uuid, params=params) + test_assert.status(r, 201) + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) +" +/ilibrary/rest/edit/{UUID}/,deleting NIC which is a part of some other machine,"params3 = { + ""name"": ""test_ilibrary"", + ""machines"": { + ""add"": [ + { + ""uuid"": r1.json()[""uuid""] + } + ] + }, + ""is_public"": False + } +params4 = { + ""machines"": { + ""update"": [ + { + ""uuid"": machine_uuid, + ""nics"": { + ""delete"": [ + { + 'id': second_nic + } + ] + } + } + ] + }, + ""is_public"": False + }","{ + ""status"": 400, + ""message"": ""The NIC with the given id isn't part of the provided machine"" +}","def test_ilibrary_edit_delete_nic_of_other_machine(run_api): + """""" + Editing an Island Library by Deleting NIC which is not part of this machine but some other machine + """""" + networks = template_networks() + if run_api.arch_type == ""aarch64"": + params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + params2, r2 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + else: + params1, r1 = run_api.library_add_new_vm(networks=networks) + params2, r2 = run_api.library_add_new_vm(networks=networks) + + # first_nic = r1.json()['hw']['networks'][0]['id'] + second_nic = r2.json()['hw']['networks'][0]['id'] + + params3 = { + ""name"": ""test_ilibrary"", + ""machines"": { + ""add"": [ + { + ""uuid"": r1.json()[""uuid""] + } + ] + }, + ""is_public"": False + } + params3, r3 = run_api.ilibrary_add_new_island(params=params3) + rjson3 = r3.json() + uuid = rjson3['uuid'] + machine_uuid = rjson3['machines'][0]['uuid'] + params3 = { + ""machines"": { + ""update"": [ + { + ""uuid"": machine_uuid, + ""nics"": { + ""delete"": [ + { + 'id': second_nic + } + ] + } + } + ] + }, + ""is_public"": False + } + params, r = run_api.ilibrary_edit_island(uuid, params=params4) + test_assert.status(r, 400) + res = r.json() + assert res['error'] == ""The NIC with the given id '"" + str(second_nic) + ""' isn't part of the provided machine"" + if 'error' not in rjson3.keys(): + run_api.ilibrary_delete(uuid, params) + run_api.library_delete(r1.json()[""uuid""]) + run_api.library_delete(r2.json()[""uuid""]) +" +/ilibrary/rest/edit/{UUID}/,deleting Island with UUID of machine which is part of other Island,"params2 = { + ""name"": ""test_ilibrary"", + ""machines"": { + ""add"": [ + { + ""uuid"": rjson1[""uuid""] + } + ] + }, + ""is_public"": False + } +params3 = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } +params4 = { + ""machines"": { + ""delete"": [ + { + ""uuid"": machine_uuid + } + ] + }, + ""is_public"": False + }","{ + ""status"": 404, + ""response"": forbidden +}","def test_ilibrary_edit_delete_machine_of_another_island(run_api, library_add_new_vm): + """""" + Editing an Island Library by deleting uuid of machine which is part of another island + """""" + + params1, rjson1 = library_add_new_vm + params2 = { + ""name"": ""test_ilibrary"", + ""machines"": { + ""add"": [ + { + ""uuid"": rjson1[""uuid""] + } + ] + }, + ""is_public"": False + } + params2, r2 = run_api.ilibrary_add_new_island(params=params2) + rjson2 = r2.json() + uuid2 = rjson2['uuid'] + machine_uuid = rjson2['machines'][0]['uuid'] + + params3 = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } + params3, r3 = run_api.ilibrary_add_new_island(params=params3) + rjson3 = r3.json() + uuid3 = rjson3[""uuid""] + + params4 = { + ""machines"": { + ""delete"": [ + { + ""uuid"": machine_uuid + } + ] + }, + ""is_public"": False + } + params, r = run_api.ilibrary_edit_island(uuid3, params=params4) + test_assert.status(r, 404) + if 'error' not in rjson2.keys(): + run_api.ilibrary_delete(uuid2, params2) + if 'error' not in rjson3.keys(): + run_api.ilibrary_delete(uuid3, params3) + +" +/ilibrary/rest/edit/{UUID}/,deleting an island library segment which is connected to NIC,"params = { + ""name"": ""test_ilibrary"", + ""machines"": { + ""add"": [ + { + ""uuid"": r1.json()[""uuid""] + } + ] + }, + 'network_segments': { + 'add': [ + { + 'name': 'network1' + } + ] + }, + ""is_public"": False + } +params3 = { + ""machines"": { + ""update"": [ + { + ""uuid"": machine_uuid, + ""nics"": { + ""add"": [ + { + ""model"": ""virtio"", + 'segment': 'network1' + } + ] + } + } + ] + }, + ""is_public"": False + }","{ + ""status"": 201, + ""response"" : success +}","def test_ilibrary_edit_delete_segment_connected_to_nic(run_api): + """""" + Editing an Island Library by Deleteing a Segment connected to NIC and check NICs final connection + """""" + networks = template_networks() + if run_api.arch_type == ""aarch64"": + params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + else: + params1, r1 = run_api.library_add_new_vm(networks=networks) + + params = { + ""name"": ""test_ilibrary"", + ""machines"": { + ""add"": [ + { + ""uuid"": r1.json()[""uuid""] + } + ] + }, + 'network_segments': { + 'add': [ + { + 'name': 'network1' + } + ] + }, + ""is_public"": False + } + + params, r3 = run_api.ilibrary_add_new_island(params=params) + rjson3 = r3.json() + uuid = rjson3['uuid'] + segment_list = rjson3['network_segments'] + + for segment in segment_list: + if segment['name'] == 'network1': + seg_uuid = segment['uuid'] + + machine_uuid = rjson3['machines'][0]['uuid'] + params3 = { + ""machines"": { + ""update"": [ + { + ""uuid"": machine_uuid, + ""nics"": { + ""add"": [ + { + ""model"": ""virtio"", + 'segment': 'network1' + } + ] + } + } + ] + }, + ""is_public"": False + } + params, r = run_api.ilibrary_edit_island(uuid, params=params3) + nics = r.json()['machines'][0]['hw']['networks'] + for nic in nics: + if nic['segment'] == 'network1': + nic_id = nic['id'] + + params = { + 'network_segments': { + 'delete': [ + { + 'uuid': seg_uuid + } + ] + }, + ""is_public"": False + } + params, r = run_api.ilibrary_edit_island(uuid, params=params) + test_assert.status(r, 201) + rjson = r.json() + + nics = rjson['machines'][0]['hw']['networks'] + assert isinstance(nic_id, int) + for nic in nics: + if nic['id'] == nic_id: + assert nic['segment'] is None + + if 'error' not in rjson3.keys(): + run_api.ilibrary_delete(uuid, params3) + run_api.library_delete(r1.json()[""uuid""]) +" +/ilibrary/rest/edit/{UUID}/,adding and updating the segment giving the same name that already exists,"params = { + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'add': [ + { + 'name': 'network1' + } + ] + } + } +params1 = { + 'network_segments': { + 'add': [ + { + 'name': 'network2' + } + ], + 'update': [ + { + 'uuid': seg_id, + 'name': 'network2' + } + ] + }, + ""is_public"": True + }","{ + ""status"": 400, + ""message"": ""Segment name should be unique for an island"" +}","def test_ilibrary_edit_add_and_update_segment_same_name(run_api): + """""" + Editing an Island Library add and update segment with same name + """""" + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'add': [ + { + 'name': 'network1' + } + ] + } + + } + params, r = run_api.ilibrary_add_new_island(params=params) + rjson = r.json() + uuid = rjson[""uuid""] + segment_list = rjson['network_segments'] + for segment in segment_list: + if segment['name'] == 'network1': + seg_id = segment['uuid'] + params1 = { + 'network_segments': { + 'add': [ + { + 'name': 'network2' + } + ], + 'update': [ + { + 'uuid': seg_id, + 'name': 'network2' + } + ] + }, + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(uuid, params=params1) + test_assert.status(r, 400) + res = r.json() + assert res['error'] == ""Segment name should be unique for an island"" + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) +" +/ilibrary/rest/edit/{UUID}/,adding a Segment name which already exists but was deleted,"params = { + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'add': [ + { + 'name': 'network1' + } + ] + } + + } +params1 = { + 'network_segments': { + 'add': [ + { + 'name': 'network1' + } + ], + 'delete': [ + { + 'uuid': seg_id, + } + ] + }, + ""is_public"": True + }","{ + ""status"": 201, + ""response"" : success +}","def test_ilibrary_edit_add_and_delete_segment_same_name(run_api): + """""" + Editing an Island Library Add a Segment name which already exists but is being deleted + """""" + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'add': [ + { + 'name': 'network1' + } + ] + } + + } + params, r = run_api.ilibrary_add_new_island(params=params) + rjson = r.json() + uuid = rjson[""uuid""] + segment_list = rjson['network_segments'] + + for segment in segment_list: + if segment['name'] == 'network1': + seg_id = segment['uuid'] + + params1 = { + 'network_segments': { + 'add': [ + { + 'name': 'network1' + } + ], + 'delete': [ + { + 'uuid': seg_id, + } + ] + }, + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(uuid, params=params1) + test_assert.status(r, 201) + # res = r.json() + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) + +" +/ilibrary/rest/edit/{UUID}/,adding segment with same name that already exists,"params = { + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'add': [ + { + 'name': 'network1' + } + ] + }, + } +params1 = { + 'network_segments': { + 'add': [ + { + 'name': 'network1' + } + ] + }, + ""is_public"": True + }","{ + ""status"": 400, + ""message"": ""The segment name(s) is/are already taken for other island"" +}","def test_ilibrary_edit_add_segment_with_existing_name(run_api): + """""" + Editing an Island Library add segment with existing name + """""" + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'add': [ + { + 'name': 'network1' + } + ] + }, + } + params, r = run_api.ilibrary_add_new_island(params=params) + rjson = r.json() + uuid = rjson[""uuid""] + + params1 = { + 'network_segments': { + 'add': [ + { + 'name': 'network1' + } + ] + }, + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(uuid, params=params1) + test_assert.status(r, 400) + res = r.json() + assert res['error'] == ""The segment name(s) {'network1'} is/are already taken for the island"" + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) +" +/ilibrary/rest/list/,fetching list of details of island library by an admin user,,"{ +""status"" : 200, +""response"" : success +}","PARAMETERS = [{""dest_obj"": OBJ_ISL}] + + +@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ilibrary_details_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): + """""" + Details of Ilibrary by Admin + """""" + # Admin check for fetching details of the Ilibrary created by different user. + ilibrary_id = custom_ilib_non_admin_operations + r = run_api.ilibrary_list_island({""uuid"": ilibrary_id}) + test_assert.status(r, 200) + assert r.json()[""count""] == 0 +" +/ilibrary/rest/list/,fetching list of details of ilibrary without token and authorization,,"{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" +}","def test_ilibrary_list_without_token(anonymous_exec_api): + """""" + Fetch ilibrary list without token + """""" + r = anonymous_exec_api.ilibrary_list_island() + result = r.json() + test_assert.status(r, 401) + assert result['detail'] == ""Authentication credentials were not provided."" +" +/ilibrary/rest/list/,fetching list of details of ilibrary without providing any specific params,,"{ +""status"" : 200, +""response"" : success , list provided +}","def test_ilibrary_list_without_params(run_api, ilibrary_add_new_island): + """""""""""" + Lists all the Island Library + """""""""""" + params, r = ilibrary_add_new_island + r = run_api.ilibrary_list_island() + test_assert.status(r, 200) +" +/ilibrary/rest/list/,fetching list of details of ilibrary with name that does not exist,"{ +name = ""invalid +}","{ +""status"" : 200, +""response"" : success , empty list +}","def test_ilibrary_list_with_invalid_name(run_api): + """""" + Fetch ilibrary list using invalid name + """""" + r = run_api.ilibrary_list_island(params={""name"": rand_string() + ""$$""}) + result = r.json() + test_assert.status(r, 200) + assert result[""count""] == 0 +" +/ilibrary/rest/list/,"fetching list of details of ilibrary with added filters. Check the user type before performing the operation. +",,"{ +""status"" : 200, +""response"" : success , filtered list +}","def test_ilibrary_list_filter(run_api): + """""" + Getting the lists of Island Library by adding filters + """""" + params, res = [], [] + ilibrary_count = 10 + arch = run_api.arch_type + prefix_name = f""filter_island_1_{rand_string()}_"" + isl_lib_name = [f""{prefix_name}{rand_string()}"" for _ in range(ilibrary_count)] + networks = template_networks() + if arch == ""aarch64"": + params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + params2, r2 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + params3, r3 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + else: + params1, r1 = run_api.library_add_new_vm(networks=networks) + params2, r2 = run_api.library_add_new_vm(networks=networks) + params3, r3 = run_api.library_add_new_vm(networks=networks) + machine1 = { + ""uuid"": r1.json()[""uuid""], + ""nic_update_id"": r1.json()[""hw""][""networks""][0][""id""], + ""nic_delete_id"": r1.json()[""hw""][""networks""][2][""id""] + } + machine2 = { + ""uuid"": r2.json()[""uuid""], + ""nic_update_id"": r2.json()[""hw""][""networks""][1][""id""], + ""nic_delete_id"": r2.json()[""hw""][""networks""][0][""id""] + } + machine3 = { + ""uuid"": r3.json()[""uuid""], + ""nic_update_id"": r3.json()[""hw""][""networks""][2][""id""], + ""nic_delete_id"": r3.json()[""hw""][""networks""][1][""id""] + } + for i in range(ilibrary_count): + param, r = run_api.ilibrary_add_new_island(machine1=machine1, machine2=machine2, + machine3=machine3, name=isl_lib_name[i]) + params.append(param) + res.append(r) + random_int = randint(0, 9) + name_filter = {""name"": res[random_int].json().get(""name""), ""page_size"": ilibrary_count} + uuid_filter = {""uuid"": res[random_int].json().get(""uuid""), ""page_size"": ilibrary_count} + owner_filter = {""owner"": ""colama"" if run_api.user_type == ""admin"" + else ""vivekt"" if run_api.user_type == ""non-admin"" + else ""manager"", ""search"": prefix_name, ""page_size"": ilibrary_count} + island_type_filter = {""island_type"": choice([""private"", ""public""]), ""search"": prefix_name, ""page_size"": ilibrary_count} + filters = [name_filter, uuid_filter, owner_filter, island_type_filter] + exp_res = { + 0: [i.get(""name"") for i in params if i.get(""name"") == name_filter.get(""name"")], + 1: [i.json().get(""uuid"") for i in res if i.json().get(""uuid"") == uuid_filter.get(""uuid"")], + 2: [i.json().get(""owner"") for i in res], + 3: [i.json().get(""island_type"") for i in res if i.json().get(""island_type"") == island_type_filter.get(""island_type"")] + } + for filter in range(len(filters)): + r = run_api.ilibrary_list_island(filters[filter]) + # check for valid response data with the filter parameters + if len(r.json().get(""results"")) != len(exp_res[filter]): + logging.error(f""error in filter: {filters[filter]}, the list of expected result for the filter is: {exp_res[filter]}, and the actual result is {r.json()}"") + assert False + + test_assert.status(r, 200) + run_api.library_delete(r1.json()[""uuid""], params1) + run_api.library_delete(r2.json()[""uuid""], params2) + run_api.library_delete(r3.json()[""uuid""], params3) + for i in range(ilibrary_count): + rjson = res[i].json() + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params[i]) +" +/ilibrary/rest/list/,fetching list of details of ilibrary when some UUID is provided that does not exist ,"{ +uuid = ""invalid +}","{ +""status"" : 200, +""response"" : success , empty list +}","def test_ilibrary_list_with_invalid_uuid(run_api): + """""" + Fetch ilibrary list using invalid uuid + """""" + uid = ""invalid"" + r = run_api.ilibrary_list_island(params={""uuid"": uid}) + test_assert.status(r, 200) + +" +/ilibrary/rest/list/,fetching list of details of ilibrary using the name parameter,,"{ +""status"" : 200, +""response"" : success, list with specific name provided +}","def test_ilibrary_list_with_name(run_api, ilibrary_add_new_island): + """""" + Fetch ilibrary list valid name + """""" + params, r = ilibrary_add_new_island + lib_name = r.json()[""name""] + result = run_api.ilibrary_list_island(params={""name"": lib_name}) + test_assert.status(result, 200) +" +/ilibrary/rest/list/,fetching list of details of ilibrary for some existing UUID,,"{ +""status"" : 200, +""response"" : success , list provided +}","def test_ilibrary_list_with_uuid(run_api, ilibrary_add_new_island): + """""" + Fetch ilibrary list using uuid + """""" + params, r = ilibrary_add_new_island + ilib_uuid = r.json()[""uuid""] + result = run_api.ilibrary_list_island(params={""uuid"": ilib_uuid}) + test_assert.status(result, 200) +" +/ilibrary/rest/list/,fetching list of details of ilibrary by adding filters on created and update DateTime filter,,"{ +""status"" : 200, +""response"" : success , filtered list provided +}"," +def test_ilibrary_filter_timefilter(run_api: apiops, ilibrary_add_new_island): + """""" + Filter on created and update DateTime Filter + """""" + template, r = ilibrary_add_new_island + rjson = r.json() + ilib_id = rjson[""uuid""] + # utime and ctime is returned in the form '2023-09-14T17:59:39.173594Z' which needs conversion to '2023-09-14 17:59:39.173594' + str_ctime = rjson['ctime'].replace('T', ' ').replace('Z', '') + datetime_ctime = convert_datetime_stringform(rjson['ctime']) + + def handle_trigger_delay_filtering_for_created_on(created_start_date, created_end_date, created_date_range, utc=True): + """""" + Function to handle corner case if ilibrary image was created a day before and test get triggered on new day + """""" + if not utc: + created_start_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + created_end_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": created_start_date, ""created_end_date"": created_end_date, + ""created_date_range"": created_date_range, ""page_size"": 1}).json()['count'] == 1 + # Filter on UTC time + # .... When the datetime is selected to be the same as in detail + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": str_ctime, + ""created_end_date"": str_ctime, ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date' and 'created_end_date' when passed blank string + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": """", ""created_end_date"": """", ""page_size"": 1}).json()['count'] == 1 + # ........Filter on 'created_start_date' and 'created_end_date' when created_start_date is greater than created_end_date + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today + try: + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday + try: + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week + try: + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on week's last day at 23:59:59.9999999 and test get triggered on new week at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month + try: + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on month's last day at 23:59:59.9999999 and test get triggered on new month at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' + try: + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on year last day at 23:59:59.9999999 and test get triggered on new year at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # .........When the created_date_range format is invalid + response = run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""invalid"", ""page_size"": 1}) + test_assert.status(response, 400) + assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() + # .........When the created_start_date and created_end_date has whitespaces in them + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": "" "" + str_ctime + "" "", ""created_end_date"": "" "" + str_ctime + "" "", ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + + # Filter on IST time + # .... When the datetime is selected to be the same as in detail but having TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone IST, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone IST, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected to be the same as in detail but with tzone IST, i.e., timedelta being 19800 equivalent to +05:30 but increase microsecond by 1 to fail filter + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today + try: + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday + try: + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 IST and test get triggered at 00:00:00.0000000 IST + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week + try: + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on week's last day at 23:59:59.9999999 IST and test get triggered on new week at 00:00:00.0000000 IST + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month + try: + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on month's last day at 23:59:59.9999999 IST and test get triggered on new month at 00:00:00.0000000 IST + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' + try: + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on year last day at 23:59:59.9999999 IST and test get triggered on new year at 00:00:00.0000000 IST + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # .........When the created_date_range format is invalid + response = run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""invalid"", ""page_size"": 1}) + test_assert.status(response, 400) + assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() + # .........When the created_start_date and created_end_date has whitespaces in them + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", ""created_end_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", + ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 +" +/ilibrary/rest/list/,fetching list of details of ilibrary by a non-admin user,,"{ +""status"" : 200, +""response"" : success +}","@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ilibrary_list_non_admin(skip_if_admin, custom_ilib_admin_operations, run_api): + """""" + Details of Ilibrary by non-Admin + """""" + # Non-admin check for fetching details of the Ilibrary created by different user. + ilibrary_id = custom_ilib_admin_operations + r = run_api.ilibrary_list_island({""uuid"": ilibrary_id}) + test_assert.status(r, 200) + assert r.json()[""count""] == 0 +" +/ilibrary/rest/list/,fetching list of details of ilibrary,,"{ +""status"" : 200, +""response"" : success +}","def test_ilibrary_list(ilibrary_list): + """""" + Getting the lists of Island Library + """""" + r = ilibrary_list + # test_assert.status(r, template, ""library_list"", ""name"") + test_assert.status(r, 200) +" +/ilibrary/rest/revisions/,getting the list of revisions in island library ,,"{ + ""status"": 200, + ""response"": revision list provided +}","def test_ilibrary_revisions(ilibrary_revisions): + """""" + Getting the lists of revisions in Island Library + """""" + r = ilibrary_revisions + test_assert.status(r, 200) +" +/ilibrary/rest/revisions/,getting the list of revisions in ilibrary with filters,"FILTERS = [ + { + 'page': 1 + }, + { + 'page_size': 1 + }, + { + 'page': 1, + 'page_size': 1 + } +]","{ + ""status"": 200, + ""response"": revision list provided +}","FILTERS = [ + { + 'page': 1 + }, + { + 'page_size': 1 + }, + { + 'page': 1, + 'page_size': 1 + } +] + + +@pytest.mark.parametrize('filter', FILTERS) +def test_ilibrary_revisions_page_num(run_api, ideploy_deploy, filter): + """""" + Getting the lists of revisions in Island Library adding filters + """""" + params, r = ideploy_deploy + x = r.json() + deploy_id = x[""deploy_uuid""] + r, rtask_details = run_api.ideploy_snapshot(deploy_id) + snapshot_id = rtask_details['result']['snapshotted_island_uuid'] + r = run_api.ilibrary_revisions(snapshot_id, filter) + test_assert.status(r, 200) + run_api.ilibrary_delete(snapshot_id, {}) +" +/ilibrary/rest/revisions/,getting the list of revisions in ilibrary when requested using invalid token,"{ + uuid = 'invalid-island-library-uuid' +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_ilibrary_revisions_invalid_token(invalid_exec_api): + """""" + Getting the lists of revisions in Island Library with invalid token + """""" + uuid = 'invalid-island-library-uuid' + r = invalid_exec_api.ilibrary_revisions(uuid) + test_assert.status(r, 401) + res = r.json() + assert res['detail'] == 'Invalid token.' +" +/ilibrary/rest/revisions/,getting the list of revisions in ilibrary without Authorization,"{ + uuid = 'valid-island-library-uuid' +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" +}","def test_ilibrary_revisions_without_authorization(anonymous_exec_api): + """""" + Getting the lists of revisions in Island Library without authorization + """""" + uuid = 'valid-island-library-uuid' + r = anonymous_exec_api.ilibrary_revisions(uuid) + test_assert.status(r, 401) + res = r.json() + assert res['detail'] == 'Authentication credentials were not provided.' +" +/ilibrary/rest/revisions/,getting the list of revisions in ilibrary when Island UUID does not exist,"{ + uuid = 'invalid-island-library-uuid' +}","{ + ""status"": 404, + ""response"": not found +}","def test_ilibrary_revisions_invalid_uuid(run_api): + """""" + Getting the lists of revisions in Island Library with invalid uuid + """""" + uuid = 'invalid-island-library-uuid' + r = run_api.ilibrary_revisions(uuid) + test_assert.status(r, 404) +" +/library/rest/add,adding vm to library without Authorization,,"{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_add_vm_to_library_without_authorization(anonymous_exec_api): + """""" + without authorization + """""" + + params, response = anonymous_exec_api.library_add_new_vm(noraise=True) + test_assert.status(response, 401) + rjson = response.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) +" +/library/rest/add,adding vm to library when requested with invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_add_vm_to_library_invaild_token(invalid_exec_api): + """""" + invalid Token + """""" + + params, response = invalid_exec_api.library_add_new_vm(noraise=True) + test_assert.status(response, 401) + rjson = response.json() + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) +" +/library/rest/add,adding vm to library when provided with valid data,,"{ + ""status"": 201, + ""response"": Machine details +} +","def test_add_vm_to_library_with_vaild_data(run_api,): + """""" + When provided with valid data + """""" + params, response = run_api.library_add_new_vm() + test_assert.status(response, 201) + UUID = response.json()[""UUID""] + run_api.library_delete(UUID, {}) +" +/library/rest/add,adding vm to library when multiple bootable cds and same boot order is passed,"cdrom = [{ ""type"": ""sata"", ""iso"": """", ""is_boot"": True, ""boot_order"": 1 }, { ""type"": ""sata"", ""iso"": """", ""is_boot"": True, ""boot_order"": 1 }]","{ +""status"" : 400, +""response"" : Bad request +}","def test_add_vm_to_library_multiple_bootable_cds_with_same_boot_order(run_api): + """""" + If multiple bootable cds with same boot order is passed + """""" + cdrom = [{ + ""type"": ""sata"", + ""iso"": """", + ""is_boot"": True, + ""boot_order"": 1 + }, + { + ""type"": ""sata"", + ""iso"": """", + ""is_boot"": True, + ""boot_order"": 1 + }] + + params, response = run_api.library_add_new_vm(cdrom=cdrom, noraise=True) + test_assert.status(response, 400) +" +/library/rest/add,"adding vm to library when machine name contains ""#""","{ 'name': newtxt, 'noraise': True }","{ + ""status"": 401, + ""message"": ""Name cannot contain '/' or '#"" +} +","def test_add_vm_to_library_with_name_contains_hash(run_api): + """""" + if machine name contains ""#"" + """""" + + txt = rand_string() + random_index = random.randint(0, len(txt)) + + newtxt = txt[:random_index] + random.choice(['#', '/']) + txt[random_index:] + kwargs = { + 'name': newtxt, + 'noraise': True + } + params, response = run_api.library_add_new_vm(**kwargs) + test_assert.status(response, 400) + rjson = response.json() + assert rjson[""error""] == ""Name cannot contain '/' or '#"", ""The error message is {}"".format(rjson[""error""]) +" +/library/rest/add,adding vm to library when disks of IDE type are passed with is_uefi set to True,"disks = [{""size"": 20, ""port"": ""hda"", ""type"": ""ide"", ""format"": ""qcow2"", ""is_boot"": False}] +","{ +""status"" : 400, +""response"" : Bad request +}"," +def test_add_vm_to_library_ide_type_passed_with_uefi_true(run_api): + """""" + if ide type passed with uefi true + """""" + disks = [{""size"": 20, ""port"": ""hda"", ""type"": ""ide"", ""format"": ""qcow2"", ""is_boot"": False}] + params, response = run_api.library_add_new_vm(disks=disks, noraise=True, is_uefi=True) + test_assert.status(response, 400) +" +/library/rest/adddisk/{{UUID}}/ ,adding disk to library without Authorization,"{ +lib_id = ""doesnotexits"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_lib_add_disk_without_authorization(anonymous_exec_api): + """""" + without authorization + """""" + lib_id = ""doesnotexits"" + r = anonymous_exec_api.library_add_disk(lib_id) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) +" +/library/rest/adddisk/{{UUID}}/ ,adding disk to library when requested with invalid token,"{ +lib_id = ""doesnotexits"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_lib_add_disk_with_invalid_token(invalid_exec_api): + """""" + with invalid token + """""" + lib_id = ""doesnotexits"" + r = invalid_exec_api.library_add_disk(lib_id) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) +" +/library/rest/adddisk/{{UUID}}/ ,adding disk to library When provided correct UUID and correct data,,"{ +""status"" : 201, +""response"" : Disks should be added to lib +}"," +PARAMETERS = [{""dest_obj"": OBJ_LIB}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_lib_add_disk_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + adding disk to a library by Admin + """""" + # Admin check for adding disk to a library created by different user. + lib_id = custom_lib_non_admin_operations + r = run_api.library_add_disk(lib_id) + test_assert.status(r, 201) +" +/library/rest/adddisk/{{UUID}}/ ,adding disk to library if tried to add IDE type disks for UEFI enabled library,"params = { + 'type': 'ide', + 'port': 'hdc' + }","{ +""status"" : 400 +}","def test_lib_add_disk_with_uefi_enabled(run_api, ): + lib_params, r = run_api.library_add_new_vm(noraise=True, is_uefi=True) + lib_UUID = r.json()[""UUID""] + params = { + 'type': 'ide', + 'port': 'hdc' + } + r = run_api.library_add_disk(lib_UUID, params) + test_assert.status(r, 400) + run_api.library_delete(lib_UUID, lib_params) +" +/library/rest/adddisk/{{UUID}}/ ,adding disk to library if tried to add a disk with same boot order as existing disk,"params = { + 'boot_order': 1 + }","{ +""status"" : 400, +""response"" : Bad Request +}","def test_lib_add_disk_with_same_boot_order(run_api, library_add_new_vm): + lib_params, r = library_add_new_vm + lib_UUID = r[""UUID""] + params = { + 'boot_order': 1 + } + r = run_api.library_add_disk(lib_UUID, params) + test_assert.status(r, 400) +" +/library/rest/boot types/,getting boot type list when Requested,,"{ +""status"" : 200, +""response"" : Boot type list +}","def test_library_boottypes(run_api): + """""" + Getting the list of Boot type + """""" + r = run_api.library_boottypes() + result = r.json() + test_assert.status(result, LIBRARY_BOOT_TYPE, ""library_boottypes"") + test_assert.status(r, 200) +" +/library/rest/boottypes/,getting boot type list when requested with invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_library_boottypes_with_invalid_token(invalid_exec_api): + """""" + Getting the list of Boot type when invalid token provided + """""" + r = invalid_exec_api.library_boottypes() + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The Error is {}"".format(rjson['detail']) +" +/library/rest/bulkdelete/,deployment of deletion of machines in bulk without Authorization,"machine = { ""machine_list"": [] }","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_library_bulk_delete_without_authorization(anonymous_exec_api): + """""" + without authorization + """""" + machine = { + ""machine_list"": [] + } + res = anonymous_exec_api.library_bulkdelete(machine) + + test_assert.status(res, 401) + rjson = res.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The Error is {}"".format(rjson['detail']) +" +/library/rest/bulkdelete/,deployment of deletion of machines in bulk when requested with invalid token,"machine = { ""machine_list"": [] }","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_library_bulk_delete_with_invalid_token(invalid_exec_api): + """""" + Invalid token + """""" + machine = { + ""machine_list"": [] + } + res = invalid_exec_api.library_bulkdelete(machine) + + test_assert.status(res, 401) + rjson = res.json() + assert rjson['detail'] == ""Invalid token."", ""|> The Error is {}"".format(rjson['detail']) +" +/library/rest/bulkdelete/,deployment of deletion of machines in bulk when requested with invalid token,"machine = { ""machine_list"": ['invalid'] }","{ +""status"" : 400, +""message"" : Machine matching query does not exist."" +}","def test_library_bulk_delete_invalid_id(run_api): + """""" + provide invalid machine id + """""" + machine = { + ""machine_list"": ['invalid'] + } + res = run_api.library_bulkdelete(machine) + + test_assert.status(res, 400) + rjson = res.json() + assert rjson['failure'][0]['error'] == ""Machine matching query does not exist."", ""|> The Error is {}"".format(rjson) +" +/library/rest/bulkdelete/,deployment of deletion of machines in bulk when passed a list UUIDs of all deletable machines,,"{ +""status"" : 204, +""response"" : ""Machine deleted successfully"" +}","def test_library_bulk_delete(library_bulkdelete): + """""" + Deleting multiple VM's + """""" + params, r = library_bulkdelete + test_assert.status(r, 204) +" +/library/rest/bulkdelete/,deployment of deletion of machines in bulk when empty list of UUIDs is passed ,"machine = { ""machine_list"": [] }","{ +""status"" : 400 +}","def test_library_bulk_delete_with_empty_list(run_api): + """""" + When empty list is passed + """""" + machine = { + ""machine_list"": [] + } + res = run_api.library_bulkdelete(machine) + + test_assert.status(res, 400) + rjson = res.json() + assert rjson['error'] == ""machine_list cannot be null or empty"", ""|> Json %s"" % rjson +" +/library/rest/clone/{{UUID}}/,cloning library without Authorization,"{ +UUID = 'doesnotexits' +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_library_clone_without_authorization(anonymous_exec_api): + """""" + clone without authorization + """""" + + UUID = 'doesnotexits' + clone_params, clone_r = anonymous_exec_api.library_clone_vm(UUID) + test_assert.status(clone_r, 401) + rjson = clone_r.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The Error is {}"".format(rjson['detail']) +" +/library/rest/clone/{{UUID}}/,cloning library when requested with invalid token,"{ +UUID = 'doesnotexits' +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_library_clone_invaild_token(invalid_exec_api): + """""" + clone request with invalid token + """""" + + UUID = 'doesnotexits' + clone_params, clone_r = invalid_exec_api.library_clone_vm(UUID) + test_assert.status(clone_r, 401) + rjson = clone_r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The Error is {}"".format(rjson['detail']) +" +/library/rest/clone/{{UUID}}/,cloning library when Provided with machine UUID that does not exist,"{ +UUID = 'doesnotexits' +}","{ +""status"" : 404, +""message"" : ""Clone : Machine not found"" +}","def test_library_clone_with_wrong_machine_UUID(library_add_new_vm, run_api): + """""" + when Provided machine UUID does not exist + """""" + UUID = 'doesnotexits' + clone_params, clone_r = run_api.library_clone_vm(UUID) + test_assert.status(clone_r, 404) + rjson = clone_r.json() + assert rjson['error'] == ""Clone: Machine not found"", ""|> The error message is {}"".format(rjson['error']) +" +/library/rest/clone/{{UUID}}/,cloning library when duplicate mac provided,"networks = [ + { + ""type"": ""bridge"", + ""model"": ""virtio"", + ""segment"": ""Default Public Segment"", + ""mac"": generate_mac_address() + } + ]","{ +""status"" : 400, +""message"" : ""Mac is already present"" +}","@pytest.mark.skip(""Return 400 but create a clone of vm"") +def test_library_clone_duplicate_mac(run_api): + """""" + library clone with duplicate mac provided + """""" + networks = [ + { + ""type"": ""bridge"", + ""model"": ""virtio"", + ""segment"": ""Default Public Segment"", + ""mac"": generate_mac_address() + } + ] + params, r = run_api.library_add_new_vm(networks=networks) + rjson = r.json() + mac = rjson['hw']['networks'][-1]['mac'] + name = rjson['name'] + cl_name = rand_string() + clone = { + ""mac_list"": [mac,], + ""name"": cl_name, + ""description"": ""This is test description for %s"" % cl_name, + } + UUID = rjson['UUID'] + clone_params, clone_r = run_api.library_clone_vm(UUID, clone) + test_assert.status(clone_r, 400) + clone_rjson = clone_r.json() + assert clone_rjson['error'] == ""Mac is already present in %s"" % name, ""|> The Error is {}"".format(clone_rjson) + run_api.library_delete(UUID) +" +/library/rest/clone/{{UUID}}/,cloning library when clone name is empty,"clone = { + ""mac_list"": [], + ""name"": """", + ""description"": ""This is test description for %s"", + } +","{ +""status"" : 400, +""message"" : ""Please provide clone name"" +}","def test_library_clone_with_empty_name(library_add_new_vm, run_api): + """""" + Empty name + """""" + clone = { + ""mac_list"": [], + ""name"": """", + ""description"": ""This is test description for %s"", + } + params, r = library_add_new_vm + UUID = r['UUID'] + clone_params, clone_r = run_api.library_clone_vm(UUID, clone) + test_assert.status(clone_r, 400) + rjson = clone_r.json() + assert rjson['error'] == ""Please provide clone name"", ""|> The Error is {}"".format(rjson) +" +/library/rest/clone/{{UUID}}/,cloning library when clone name contains #,"clone = { + ""mac_list"": [], + ""name"": cl_name, + ""description"": ""This is test description for %s"" % cl_name, + }","{ +""status"" : 400, +""message"" : ""Name cannot contain '/' or '#'"" +}","def test_library_clone_name_contains_hash(library_add_new_vm, run_api): + """""" + When clone name contains # + """""" + txt = rand_string() + random_index = random.randint(0, len(txt)) + + newtxt = txt[:random_index] + random.choice(['#', '/']) + txt[random_index:] + cl_name = f""{newtxt}_cl"" + clone = { + ""mac_list"": [], + ""name"": cl_name, + ""description"": ""This is test description for %s"" % cl_name, + } + params, r = library_add_new_vm + UUID = r.get('UUID', 'doesnotexits') + clone_params, clone_r = run_api.library_clone_vm(UUID, clone) + test_assert.status(clone_r, 400) + rjson = clone_r.json() + assert rjson['error'] == ""Name cannot contain '/' or '#"", ""|> The error is {}"".format(rjson['error']) +" +/library/rest/clone/{{UUID}}/,"cloning a library when provided with valid data. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 200, +""response"" : Data of newly cloned machine +}","endpoint = ""lib_clone"" +PARAMETERS = [{""dest_obj"": OBJ_LIB}] + + +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_library_clone(library_clone_vm, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Cloning VM + """""" + template, r = library_clone_vm + result = r.json() + test_assert.status(template, result, ""library_clone"") + test_assert.status(r, 200) + + # Adding non_admin check to Clone a Library Image created by different user + if run_api.user_type == USER_TYPE[""non_admin""]: + lib_id = custom_lib_admin_operations + param, r = run_api.library_clone_vm(lib_id) + test_assert.status(r, 403) + + if run_api.user_type == USER_TYPE[""manager""]: + # When the user is not part of the group that the manager manages + lib_id = custom_lib_admin_operations + param, r = run_api.library_clone_vm(lib_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False)) + + # When the user is part of the group that the manager manages + lib_id = custom_lib_non_admin_operations + param, r = run_api.library_clone_vm(lib_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) + param[""UUID""] = lib_id + clone_UUID = r.json()['UUID'] + run_api.library_delete(clone_UUID, param)" +/library/rest/ctypes/,getting the console type when requested without Authorization,,"{ +""status"" : 200, +""response"" : console type details displayed +}","def test_library_ctypes_without_authorization(anonymous_exec_api): + """""" + without authorization + """""" + r = anonymous_exec_api.library_console_types() + result = r.json() + test_assert.status(result, LIBRARY_CONSOLE_TYPE, ""library_ctypes"") + test_assert.status(r, 200) +" +/library/rest/delete/{UUID}/,deleting a library without Authorization,"{ +lib_id = 'wrong' +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_lib_delete_without_authorization(anonymous_exec_api): + """""" + without authorization + """""" + lib_id = 'wrong' + ret = anonymous_exec_api.library_delete(lib_id) + test_assert.status(ret, 401) + rjson = ret.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The Error is {}"".format(rjson['detail']) +" +/library/rest/delete/{UUID}/,deleting a library when UUID exists and it has next revision/ deployment exists,,"{ +""status"" : 400, +""response"" : Bad Request +}","def test_lib_delete_with_deployment_exists(run_api, library_add_new_vm): + """""" + When UUID exists and it has next revision/ deployment exists + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + deploy = run_api.deploy_image(lib_id) + r = run_api.library_delete(lib_id, {}) + test_assert.status(r, 400) + deployjson = deploy.json() + run_api.deploy_image_delete(deployjson['UUID'], {}) +" +/library/rest/delete/{UUID}/,deleting a library when requested with invalid token,"{ +lib_id = 'wrong' +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_lib_delete_with_invalid_token(invalid_exec_api): + """""" + without authorization + """""" + lib_id = 'wrong' + ret = invalid_exec_api.library_delete(lib_id) + test_assert.status(ret, 401) + rjson = ret.json() + assert rjson['detail'] == ""Invalid token."", ""|> The Error is {}"".format(rjson['detail']) +" +/library/rest/delete/{UUID}/,deleting a library when machine UUID does not exist,"{ + lib_id = ""invalid"" + +}","{ +""status"" : 404, +""message"" : Machine DoesNotExist +}","def test_lib_delete_with_invalid_UUID(run_api): + """""" + When machine UUID does not exist + """""" + lib_id = ""invalid"" + ret = run_api.library_delete(lib_id) + test_assert.status(ret, 404) +" +/library/rest/delete/{UUID}/,deleting a library by manager when provided with valid UUID,,,"endpoint = ""lib_delete"" + +PARAMETERS = [{""dest_obj"": OBJ_LIB}] + + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_lib_delete_manager(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Delete the Library by Manager + """""" + # When the user is not part of the group that the manager manages + lib_id = custom_lib_admin_operations + r = run_api.library_delete(lib_id, {}) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False)) + + # When the user is part of the group that the manager manages + lib_id = custom_lib_non_admin_operations + r = run_api.library_delete(lib_id, {}) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) +" +/library/rest/delete/{UUID}/,deleting a library by admin when provided with valid UUID,,"{ +""status"" : 204 +}","PARAMETERS = [{""dest_obj"": OBJ_LIB}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_lib_delete_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Deleting the Library by Admin + """""" + # Admin check for deleting the Library created by different user. + lib_id = custom_lib_non_admin_operations + r = run_api.library_delete(lib_id, {}) + test_assert.status(r, 204) + +" +/library/rest/details/{UUID}/ ,getting library details when requested with invalid token,"{ + UUID = 'valid_UUID' +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_lib_details_with_invalid_token(invalid_exec_api): + """""" + invalid token + """""" + UUID = 'invalid' + r = invalid_exec_api.library_details(UUID, {}) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The Error is {}"".format(rjson['detail']) +" +/library/rest/details/{UUID}/ ,getting library details when provided without Authorization,"{ + UUID = 'valid_UUID' +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}"," +def test_lib_details_without_authorization(anonymous_exec_api): + """""" + without authorization + """""" + UUID = 'invalid' + r = anonymous_exec_api.library_details(UUID, {}) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The Error is {}"".format(rjson['detail']) +" +/library/rest/details/{UUID}/ ,getting library details when provided with invalid UUID,"{ + UUID = 'invalid' +}","{ +""status"" : 200, +""message"" : ""Machine DoesNotExist"" +}","def test_lib_details_with_invalid_UUID(run_api): + """""" + when provided invalid UUID + """""" + UUID = 'invalid' + r = run_api.library_details(UUID, {}) + test_assert.status(r, 404) + rjson = r.json() + assert rjson['error'] == ""Machine Details: Machine not found"", ""|> The error message is %s"" % (rjson['error']) +" +/library/rest/details/{UUID}/ ,getting library details requested by an admin user,,"{ +""status"" : 200, +""response"" : Library details displayed +}","PARAMETERS = [{""dest_obj"": OBJ_LIB}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_lib_details_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Details of the Library by Admin + """""" + # Admin check for fetching details of the Library created by different user. + lib_id = custom_lib_non_admin_operations + r = run_api.library_details(lib_id, {}) + test_assert.status(r, 200) +" +/library/rest/details/{UUID}/ ,getting library details requested by a non-admin user,,"{ +""status"" : 403 +}"," +PARAMETERS = [{""dest_obj"": OBJ_LIB}] + +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_lib_details_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): + """""" + Details of the Library by non-Admin + """""" + # Non-admin check for fetching details of the Library created by different user. + lib_id = custom_lib_admin_operations + r = run_api.library_details(lib_id, {}) + test_assert.status(r, 403) + +" +/library/rest/details/{UUID}/ ,getting library details ,,"{ +""status"" : 200, +""response"" : Library details displayed +}","def test_lib_details(library_details): + """""" + Getting the Library details + """""" + x, r = library_details + test_assert.status(r, 200) +" +/library/rest/dformattypes/,getting the details of DiskFormat Type without Authorization,,"{ +""status"" : 200, +""message"" : DiskFormat type list +}","def test_library_dformattypes_without_authorization(anonymous_exec_api): + """""" + Getting the list of disk format types + """""" + r = anonymous_exec_api.library_disk_format_type() + result = r.json() + test_assert.status(result, LIBRARY_DISK_FORMAT_TYPE, ""library_dformattypes"") + test_assert.status(r, 200) +" +/library/rest/dformattypes/,getting the details of DiskFormat Type when requested with invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_library_dformattypes_with_invalid_token(invalid_exec_api): + """""" + Getting the list of disk format types + """""" + r = invalid_exec_api.library_disk_format_type() + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The Error is {}"".format(rjson['detail']) +" +/library/rest/dformattypes/,getting the details of DiskFormat Type,,"{ +""status"" : 200, +""message"" : DiskFormat type list +}","def test_library_dformattypes(run_api): + """""" + Getting the list of disk format types + """""" + r = run_api.library_disk_format_type() + result = r.json() + test_assert.status(result, LIBRARY_DISK_FORMAT_TYPE, ""library_dformattypes"") + test_assert.status(r, 200) +" +/library/rest/dtypes/,getting DiskBus Type list when requested without authorization,,"{ +""status"" : 200, +""message"" : DiskBus type list +}","def test_library_dtypes_without_authorization(anonymous_exec_api): + """""" + Getting the list of disk type without authorization + """""" + r = anonymous_exec_api.library_disk_type() + result = r.json() + test_assert.status(result, LIBRARY_DISK_TYPE, ""library_boottypes"") + test_assert.status(r, 200) +" +/library/rest/dtypes/,getting DiskBus Type list when requested with invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_library_dtypes_with_invalid_token(invalid_exec_api): + """""" + Getting the list of disk type + """""" + r = invalid_exec_api.library_disk_type() + result = r.json() + test_assert.status(r, 401) + assert result['detail'] == ""Invalid token."", ""|> The Error is {}"".format(result['detail']) +" +/library/rest/dtypes/,getting DiskBus Type list When Requested,,"{ +""status"" : 200, +""message"" : DiskBus type list +}","def test_library_dtypes(run_api): + """""" + Getting the list of disk type + """""" + r = run_api.library_disk_type() + result = r.json() + test_assert.status(result, LIBRARY_DISK_TYPE, ""library_boottypes"") + test_assert.status(r, 200) +" +/library/rest/edit/{UUID}/,updation of serialport in a library,"serialports = [{ + ""source_type"": ""pty"", + ""target_type"": ""isa-serial"", + }] + + +updated_serialports = [{ + ""source_type"": ""pty"", + ""target_type"": ""pci-serial"", + }] + +","{ +""status"" : 201 +}","def test_library_edit_serialport(run_api): + """""" + update serialport + """""" + serialports = [{ + ""source_type"": ""pty"", + ""target_type"": ""isa-serial"", + }] + p, r = run_api.library_add_new_vm(serialports=serialports) + lib_id = r.json()['UUID'] + updated_serialports = [{ + ""source_type"": ""pty"", + ""target_type"": ""pci-serial"", + }] + params = {'hw': {'serialports': updated_serialports}} + res = run_api.library_edit(lib_id, params) + test_assert.status(res, 201) + rjson = res.json() + for serialport in rjson['hw']['serialports']: + assert serialport['source_type'] == 'pty', ""|> Json %s"" % rjson + assert serialport['target_type'] == 'pci-serial', ""|> Json %s"" % rjson + run_api.library_delete(lib_id) +" +/library/rest/edit/{UUID}/,updation of network in a library with invalid mac,"networks = [{ + ""type"": ""bridge"", + ""model"": ""virtio"", + ""segment"": ""Default Public Segment"", + } + ] + +update_network = [{ + ""mac"": ""invalid"" + }] + + +","{ +""status"" : 400, +""message"" : ""MAC address is not correct"" +}","def test_library_edit_network_invalid_mac(run_api): + """""" + update network with invalid mac + """""" + networks = [{ + ""type"": ""bridge"", + ""model"": ""virtio"", + ""segment"": ""Default Public Segment"", + } + ] + params, r = run_api.library_add_new_vm(networks=networks) + update_netork = [{ + ""mac"": ""invalid"" + }] + params = {'hw': {'networks': update_netork}} + lib_id = r.json()[""UUID""] + res = run_api.library_edit(lib_id, params) + test_assert.status(res, 400) + rjson = res.json() + assert rjson['error'] == ""MAC address `invalid` is not correct"", ""|> Json %s"" % rjson + run_api.library_delete(lib_id, {}) +" +/library/rest/edit/{UUID}/,updation of library without Authorization,"{ + lib_id = ""doesnotexits"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_library_edit_without_authorization(anonymous_exec_api): + """""" + without authorization + """""" + lib_id = ""doesnotexits"" + r = anonymous_exec_api.library_edit(lib_id, {""hw"": {}}) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) +" +/library/rest/edit/{UUID}/,updation of library with network type host and segment Default Public Segment,"networks = [{ + ""type"": ""bridge"", + ""model"": ""virtio"", + ""segment"": ""Default Public Segment"", + } + ] + + +update_netork = [{ + ""type"": ""host"", + ""model"": ""virtio"", + ""segment"": ""Default Public Segment"", + }] +","{ +""status"" : 400, +""message"" :""Network type `host` can only be connected to `HostOnly Segment`, your provided input for segment is `Default Public Segment`."" +}","def test_library_edit_with_network_type_host_segment_default_public(run_api): + """""" + Library update with network type host and segment Default Public Segment + """""" + networks = [{ + ""type"": ""bridge"", + ""model"": ""virtio"", + ""segment"": ""Default Public Segment"", + } + ] + params, r = run_api.library_add_new_vm(networks=networks) + update_netork = [{ + ""type"": ""host"", + ""model"": ""virtio"", + ""segment"": ""Default Public Segment"", + }] + params = {'hw': {'networks': update_netork}} + lib_id = r.json()[""UUID""] + res = run_api.library_edit(lib_id, params) + test_assert.status(res, 400) + rjson = res.json() + assert rjson['error'] == ""Network type `host` can only be connected to `HostOnly Segment`, your provided input for segment is `Default Public Segment`."", ""|> Ther error is %s"" % rjson + run_api.library_delete(lib_id, {}) + +" +/library/rest/edit/{UUID}/,updation of library with network type changed to host and segment is set to Default Public Segment,"networks = [{ + ""type"": ""host"", + ""model"": ""virtio"", + ""segment"": ""HostOnly Segment"", + } + ] + +update_netork = [{ + ""type"": ""bridge"", + ""model"": ""virtio"", + ""segment"": ""HostOnly Segment"", + }] +","{ +""status"" : 400, +""message"" : ""Network type `bridge` can only be connected to `Default Public Segment`, your provided input for segment is `HostOnly Segment`."" +}","def test_library_edit_with_network_type_bridge_segment_HostOnly(run_api): + """""" + Library update with network type host and segment Default Public Segment + """""" + networks = [{ + ""type"": ""host"", + ""model"": ""virtio"", + ""segment"": ""HostOnly Segment"", + } + ] + params, r = run_api.library_add_new_vm(networks=networks) + update_netork = [{ + ""type"": ""bridge"", + ""model"": ""virtio"", + ""segment"": ""HostOnly Segment"", + }] + params = {'hw': {'networks': update_netork}} + lib_id = r.json()[""UUID""] + res = run_api.library_edit(lib_id, params) + test_assert.status(res, 400) + rjson = res.json() + assert rjson['error'] == ""Network type `bridge` can only be connected to `Default Public Segment`, your provided input for segment is `HostOnly Segment`."", ""|> Ther error is %s"" % rjson + run_api.library_delete(lib_id, {}) + + +" +/library/rest/edit/{UUID}/,updation of library when UUID exists and it has next revision,,"{ +""status"" : 403, +""message"" : ""Next Revision Exists , Edit Permission Not Allowed"" +}","def test_library_edit_with_revision_exists(library_add_new_vm, run_api): + """""" + When machine with UUID Does Not Exist + """""" + parmas, r = library_add_new_vm + lib_id = r['UUID'] + res = run_api.deploy_image(lib_id=lib_id) + deploy_id = res.json()['UUID'] + revision = run_api.deploy_snapshot(deploy_id=deploy_id) + edit_r = run_api.library_edit(lib_id, {""hw"": {}}) + edit_rjson = edit_r.json() + test_assert.status(edit_r, 403) + assert edit_rjson['result'] == ""Next_revision Exists: Edit permission not allowed"", "">| The error message is %s"" % (edit_rjson['result']) + run_api.deploy_image_delete(deploy_id, {}) + revision_id = revision.json()['snapshotted_machine_UUID'] + run_api.library_delete(revision_id) +" +/library/rest/edit/{UUID}/,updation of library when requested with invalid token,"{ + lib_id = ""doesnotexits"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_library_edit_with_invalid_token(invalid_exec_api): + """""" + with invalid token + """""" + lib_id = ""doesnotexits"" + r = invalid_exec_api.library_edit(lib_id, {""hw"": {}}) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) +" +/library/rest/edit/{UUID}/,updation of disks in a library using the size param,"disks = {""update"": [ + { + ""UUID"": r['hw']['disks'][0]['UUID'], + ""port"": ""sdz"", + ""type"": r['hw']['disks'][0]['type'], + ""size"": 5 + } + ] + } +","{ +""status"" : 400, +""message"" : """"Modifying the disk size during Library Edit is not permitted"", +}"," +def test_library_edit_disk_size_param(library_add_new_vm, run_api): + """""" + Update disk with 'size' param + """""" + p, r = library_add_new_vm + lib_id = r['UUID'] + disks = {""update"": [ + { + ""UUID"": r['hw']['disks'][0]['UUID'], + ""port"": ""sdz"", + ""type"": r['hw']['disks'][0]['type'], + ""size"": 5 + } + ] + } + params = {""hw"": {""disks"": disks}} + r = run_api.library_edit(lib_id, params) + test_assert.status(r, 400) + rjson = r.json() + rjson['error'] == ""Modifying the disk size during Library Edit is not permitted"", ""|> json %s"" % rjson +" +/library/rest/edit/{UUID}/,updation of disks in a library,"disks = {""update"": [ + { + ""UUID"": r['hw']['disks'][0]['UUID'], + ""port"": ""sdz"", + ""type"": r['hw']['disks'][0]['type'] + } + ] + }","{ +""status"" : 201 +}","def test_library_edit_update_disk(library_add_new_vm, run_api): + """""" + Update disk + """""" + p, r = library_add_new_vm + lib_id = r['UUID'] + disks = {""update"": [ + { + ""UUID"": r['hw']['disks'][0]['UUID'], + ""port"": ""sdz"", + ""type"": r['hw']['disks'][0]['type'] + } + ] + } + params = {""hw"": {""disks"": disks}} + r = run_api.library_edit(lib_id, params) + test_assert.status(r, 201) + rjson = r.json() + assert rjson['hw']['disks'][0]['port'] == 'sdz', ""|> json %s"" % rjson +" +/library/rest/edit/{UUID}/,updation of disk when invalid UUID provided,,"{ +""status"" : 404, +""message"" : ""Disk with UUID does not exist"" +}","def test_library_edit_invalid_disk_UUID(library_add_new_vm, run_api): + """""" + update disk with invalid UUID + """""" + p, r = library_add_new_vm + lib_id = r['UUID'] + disk_UUID = str(UUID.UUID4()) + # disk_UUID = 'invalid' it gives {'hw': {'disks': {'update': [{'UUID': ['Must be a valid UUID.']}]}}} + disks = {""update"": [ + { + ""UUID"": disk_UUID, + ""port"": ""sdz"", + ""type"": r['hw']['disks'][0]['type'] + } + ] + } + params = {""hw"": {""disks"": disks}} + res = run_api.library_edit(lib_id, params) + test_assert.status(res, 404) + rjson = res.json() + assert rjson['error'] == f""Disk with UUID {disk_UUID} does not exist"", ""|> json %s"" % rjson +" +/library/rest/edit/{UUID}/,updation of cdrom in a library,"cdrom = [ + { + ""type"": ""ide"", + ""is_boot"": False + } + ] + + +updated_cdrom = [ + { + ""type"": ""sata"", + ""is_boot"": False + } + ] +","{ +""status"" : 201 +}","def test_library_edit_cdrom(run_api): + """""" + update cdrom with valid data + """""" + cdrom = [ + { + ""type"": ""ide"", + ""is_boot"": False + } + ] + p, r = run_api.library_add_new_vm(cdrom=cdrom) + lib_id = r.json()['UUID'] + updated_cdrom = [ + { + ""type"": ""sata"", + ""is_boot"": False + } + ] + params = {'hw': {'cdrom': updated_cdrom}} + res = run_api.library_edit(lib_id, params) + test_assert.status(res, 201) + rjson = res.json() + for cdrom in rjson['hw']['cdrom']: + assert cdrom['type'] == 'sata', ""|> Json %s"" % rjson + assert cdrom['is_boot'] is False, ""|> Json %s"" % rjson + run_api.library_delete(lib_id) +" +/library/rest/edit/{UUID}/,updation of arch param of library,,"{ +""status"" : 400, +""message"" : ""Architecture of a Machine cannot be modified."" +}","def test_library_edit_arch(library_add_new_vm, run_api): + """""" + Edit the architecture of vm + """""" + p, r = library_add_new_vm + lib_id = r['UUID'] + params = {'hw': {'arch': 'aarch64'}} + res = run_api.library_edit(lib_id, params) + test_assert.status(res, 400) + rjson = res.json() + assert rjson['error'] == ""Architecture of a Machine cannot be modified."", ""|> The error is %s"" % rjson +" +/library/rest/edit/{UUID}/,"edition of details when UUID exists and it doesn't have next revision. Check the user type before performing the operation. +",,"{ +""status"" : 201, +""response"" : Details updated +}"," +endpoint = ""lib_edit"" +PARAMETERS = [{""dest_obj"": OBJ_LIB}] + + +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_library_edit(run_api, custom_lib_admin_operations, custom_lib_non_admin_operations): + """""" + Editing the details of VM + """""" + if run_api.arch_type == ""aarch64"": + params, r = run_api.library_add_new_vm(arch=""aarch64"", type=""virtio"", port=""vda"") + else: + params, r = run_api.library_add_new_vm() + rjson = r.json() + lib_id = r.json()[""UUID""] + if run_api.arch_type == ""aarch64"": + dist_add_param = {""type"": ""virtio"", ""port"": ""vdz""} + else: + dist_add_param = {} + r = run_api.library_edit(lib_id, params={""hw"": {""disks"": template_library_edit_disk_add(**dist_add_param)}}) + test_assert.status(params, rjson, ""library_edit"") + test_assert.status(r, 201) + + if 'error' not in rjson.keys(): + UUID = rjson[""UUID""] + run_api.library_delete(UUID, params) + + # Adding non_admin check of Editing a Library Image created by different user + if run_api.user_type == USER_TYPE[""non_admin""]: + lib_id = custom_lib_admin_operations + r = run_api.library_edit(lib_id, {""hw"": {}}) + test_assert.status(r, 403) + + # Adding a Manager check of Editing a deployment info created by a user of his/her group + # and also when it's not the case + if run_api.user_type == USER_TYPE[""manager""]: + # When the user is not part of the group that the manager manages + lib_id = custom_lib_admin_operations + r = run_api.library_edit(lib_id, {""hw"": {}}) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False)) + + # When the user is part of the group that the manager manages + lib_id = custom_lib_non_admin_operations + r = run_api.library_edit(lib_id, {""hw"": {}}) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) +" +​/library​/rest​/hvmtypes​/,fetching the hypervisor type when requested without Authorization,,"{ +""status"" : 200, +""response"" : list of hypervisor type +}","def test_library_hvmtypes_without_authorization(anonymous_exec_api): + """""" + without authorization + """""" + r = anonymous_exec_api.library_hvmtypes() + result = r.json() + test_assert.status(result, LIBRARY_HVM_TYPE, ""library_hvmtypes"") + test_assert.status(r, 200) +" +​/library​/rest​/hvmtypes​/,fetching the hypervisor type when requested with invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_library_hvmtypes_with_invalid_token(invalid_exec_api): + """""" + with invalid token + """""" + r = invalid_exec_api.library_hvmtypes() + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) +" +/library/rest/layerdetail/{UUID}/,fetching the layer details using a lib_id without Authorization,"{ + lib_id = ""doesnotexits"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_library_layerdetail_without_authorization(anonymous_exec_api): + """""" + without authorization + """""" + lib_id = ""doesnotexits"" + r = anonymous_exec_api.library_layerdetail(lib_id, params={}) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) + + +" +/library/rest/layerdetail/{UUID}/,fetching the layer details using a lib_id for which there is no existing machine,"{ + lib_id = ""doesnotexits"" +}","{ +""status"" : 404, +""response"" : Machine with UUID does not exist +}","def test_library_layerdetails_with_invalid_uid(run_api): + """""" + when machine with UUID does not exists. + """""" + lib_id = ""doesnotexits"" + r = run_api.library_layerdetail(lib_id, params={}) + test_assert.status(r, 404) +" +/library/rest/layerdetail/{UUID}/,fetching the layer details using a lib_id but with invalid token,"{ + lib_id = ""doesnotexits"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_library_layerdetail_with_invalid_token(invalid_exec_api): + """""" + with invalid token + """""" + lib_id = ""doesnotexits"" + r = invalid_exec_api.library_layerdetail(lib_id, params={}) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) +" +/library/rest/layerdetail/{UUID}/,fetching the layer details of an existing machine,,"{ +""status"" : 200, +""response"" : details of layer +}","def test_library_layerdetail(library_layerdetail): + """""" + Getting the detail of layer + """""" + template, r = library_layerdetail + result = r.json() + test_assert.status(result, template, ""library_layerdetail"") + test_assert.status(r, 200) +" +/library/rest/layerlist/,requesting to get the list of layer from library ,,"{ +""status"" : 200, +""response"" : list of layer +}","def test_library_layer_list(run_api): + """""" + Getting the list of layer + """""" + r = run_api.library_layer_list() + test_assert.status(r, 200) +" +/library/rest/layerlist/, requesting without Authorization to get the list of layer from library ,,"{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_library_layer_list_without_authorization(anonymous_exec_api): + """""" + without authorization + """""" + + r = anonymous_exec_api.library_layer_list() + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) +" +/library/rest/layerlist/, requesting with invalid token to get the list of layer from the library,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_library_layer_list_with_invalid_token(invalid_exec_api): + """""" + with invalid token + """""" + r = invalid_exec_api.library_layer_list() + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) +" +/library/rest/list/,requesting the list of VM present in the library without Authorization,,"{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_library_list_without_authorization(anonymous_exec_api): + """""" + without authorization + """""" + r = anonymous_exec_api.library_list(params={}) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) +" +/library/rest/list/,requesting the list of VM present in the library with invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_library_list_with_invalid_token(invalid_exec_api): + """""" + with invalid token + """""" + r = invalid_exec_api.library_list(params={}) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) +" +/library/rest/list/,getting the list of VM present in the library,,"{ +""status"" : 200, +""response"" : ""list of VM +}","def test_library_list(library_list): + """""" + Getting the list of VM present in the library + """""" + template, r = library_list + test_assert.status(r, template, ""library_list"", ""name"") + test_assert.status(r, 200) +" +/library/rest/list/,getting list of vm present in library by filtering it based on created and update DateTime,,"{ +""status"" : 400 +}","def test_library_filter_timefilter(run_api: apiops, library_add_new_vm): + """""" + Filter on created and update DateTime Filter + """""" + template, rjson = library_add_new_vm + lib_id = rjson[""UUID""] + # utime and ctime is returned in the form '2023-09-14T17:59:39.173594Z' which needs conversion to '2023-09-14 17:59:39.173594' + str_ctime = rjson['ctime'].replace('T', ' ').replace('Z', '') + datetime_ctime = convert_datetime_stringform(rjson['ctime']) + + def handle_trigger_delay_filtering_for_created_on(created_start_date, created_end_date, created_date_range, utc=True): + """""" + Function to handle corner case if library image was created a day before and test get triggered on new day + """""" + if not utc: + created_start_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + created_end_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": created_start_date, ""created_end_date"": created_end_date, + ""created_date_range"": created_date_range, ""page_size"": 1}).json()['count'] == 1 + # Filter on UTC time + # .... When the datetime is selected to be the same as in detail + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, + ""created_end_date"": str_ctime, ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.library_list({""UUID"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.library_list({""UUID"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date' and 'created_end_date' when passed blank string + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": """", ""created_end_date"": """", ""page_size"": 1}).json()['count'] == 1 + # ........Filter on 'created_start_date' and 'created_end_date' when created_start_date is greater than created_end_date + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on week's last day at 23:59:59.9999999 and test get triggered on new week at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on month's last day at 23:59:59.9999999 and test get triggered on new month at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on year last day at 23:59:59.9999999 and test get triggered on new year at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # .........When the created_date_range format is invalid + response = run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""invalid"", ""page_size"": 1}) + test_assert.status(response, 400) + assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() + + # .........When the created_start_date and created_end_date has white spaces in them + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": "" "" + str_ctime + "" "", ""created_end_date"": "" "" + str_ctime + "" "", ""page_size"": 1}).json()['count'] == 1 + + # Filter on list time + # .... When the datetime is selected to be the same as in detail but having TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 but increase microsecond by 1 to fail filter + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.library_list({""UUID"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.library_list({""UUID"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 list and test get triggered at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 list and test get triggered at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on week's last day at 23:59:59.9999999 list and test get triggered on new week at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on month's last day at 23:59:59.9999999 list and test get triggered on new month at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on year last day at 23:59:59.9999999 list and test get triggered on new year at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # .........When the created_date_range format is invalid + response = run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""invalid"", ""page_size"": 1}) + test_assert.status(response, 400) + assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() + # .........When the created_start_date and created_end_date has white spaces in them + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", ""created_end_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", + ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + +" +/library/rest/list/,filtering the list of library details based on created and updated DateTime Filter,,,"def test_library_filter_timefilter(run_api: apiops, library_add_new_vm): + """""" + Filter on created and update DateTime Filter + """""" + template, rjson = library_add_new_vm + lib_id = rjson[""UUID""] + # utime and ctime is returned in the form '2023-09-14T17:59:39.173594Z' which needs conversion to '2023-09-14 17:59:39.173594' + str_ctime = rjson['ctime'].replace('T', ' ').replace('Z', '') + datetime_ctime = convert_datetime_stringform(rjson['ctime']) + + def handle_trigger_delay_filtering_for_created_on(created_start_date, created_end_date, created_date_range, utc=True): + """""" + Function to handle corner case if library image was created a day before and test get triggered on new day + """""" + if not utc: + created_start_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + created_end_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": created_start_date, ""created_end_date"": created_end_date, + ""created_date_range"": created_date_range, ""page_size"": 1}).json()['count'] == 1 + # Filter on UTC time + # .... When the datetime is selected to be the same as in detail + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, + ""created_end_date"": str_ctime, ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.library_list({""UUID"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.library_list({""UUID"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date' and 'created_end_date' when passed blank string + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": """", ""created_end_date"": """", ""page_size"": 1}).json()['count'] == 1 + # ........Filter on 'created_start_date' and 'created_end_date' when created_start_date is greater than created_end_date + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on week's last day at 23:59:59.9999999 and test get triggered on new week at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on month's last day at 23:59:59.9999999 and test get triggered on new month at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on year last day at 23:59:59.9999999 and test get triggered on new year at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # .........When the created_date_range format is invalid + response = run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""invalid"", ""page_size"": 1}) + test_assert.status(response, 400) + assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() + + # .........When the created_start_date and created_end_date has white spaces in them + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": "" "" + str_ctime + "" "", ""created_end_date"": "" "" + str_ctime + "" "", ""page_size"": 1}).json()['count'] == 1 + + # Filter on list time + # .... When the datetime is selected to be the same as in detail but having TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 but increase microsecond by 1 to fail filter + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.library_list({""UUID"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.library_list({""UUID"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 list and test get triggered at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 list and test get triggered at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on week's last day at 23:59:59.9999999 list and test get triggered on new week at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on month's last day at 23:59:59.9999999 list and test get triggered on new month at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on year last day at 23:59:59.9999999 list and test get triggered on new year at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # .........When the created_date_range format is invalid + response = run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""invalid"", ""page_size"": 1}) + test_assert.status(response, 400) + assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() + # .........When the created_start_date and created_end_date has white spaces in them + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", ""created_end_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", + ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + + +" +/library/rest/list/,fetching the list of virtual machine with added Date Time filter using the fetch_all_rev parameter,,,"def test_library_timefilter_fetch_all_rev(run_api): + """""" + Filter on DateTime filter using 'fetch_all_rev' + """""" + vmname = f""fetch_rev.{datetime.now()}"" + params, r = run_api.library_add_new_vm(name=vmname) + lib_id = r.json()[""uuid""] + # Str_ctime for the lower revision machine + str_ctime_lower_revision = r.json()['ctime'].replace('T', ' ').replace('Z', '') + x = run_api.deploy_image(lib_id=lib_id) + machine_id = x.json()[""uuid""] + res = run_api.deploy_snapshot(deploy_id=machine_id) + snapshotted_machine_uuid = res.json()[""snapshotted_machine_uuid""] + # str_ctime for the upper revison machine after snapshotting + str_ctime_upper_revision = run_api.library_details(uuid=snapshotted_machine_uuid, params={}).json()['ctime'].replace('T', ' ').replace('Z', '') + # ........When the tag 'fetch_all_rev' is set to true + response = run_api.library_list({""name"": vmname, ""created_start_date"": str_ctime_lower_revision, ""created_end_date"": str_ctime_upper_revision, + ""fetch_all_revs"": ""true""}).json() + assert response[""count""] == 2, ""The json is %s"" % response + run_api.deploy_image_delete(deploy_id=machine_id) + run_api.library_delete(uuid=snapshotted_machine_uuid, params={""full_tree"": True})" +/library/rest/list/,fetching the list of virtual machine with added Date Time filter,,,"def test_library_filter_timefilter(run_api: apiops, library_add_new_vm): + """""" + Filter on created and update DateTime Filter + """""" + template, rjson = library_add_new_vm + lib_id = rjson[""uuid""] + # utime and ctime is returned in the form '2023-09-14T17:59:39.173594Z' which needs conversion to '2023-09-14 17:59:39.173594' + str_ctime = rjson['ctime'].replace('T', ' ').replace('Z', '') + datetime_ctime = convert_datetime_stringform(rjson['ctime']) + + def handle_trigger_delay_filtering_for_created_on(created_start_date, created_end_date, created_date_range, utc=True): + """""" + Function to handle corner case if library image was created a day before and test get triggered on new day + """""" + if not utc: + created_start_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + created_end_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": created_start_date, ""created_end_date"": created_end_date, + ""created_date_range"": created_date_range, ""page_size"": 1}).json()['count'] == 1 + # Filter on UTC time + # .... When the datetime is selected to be the same as in detail + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": str_ctime, + ""created_end_date"": str_ctime, ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.library_list({""uuid"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.library_list({""uuid"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date' and 'created_end_date' when passed blank string + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": """", ""created_end_date"": """", ""page_size"": 1}).json()['count'] == 1 + # ........Filter on 'created_start_date' and 'created_end_date' when created_start_date is greater than created_end_date + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today + try: + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday + try: + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week + try: + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on week's last day at 23:59:59.9999999 and test get triggered on new week at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month + try: + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on month's last day at 23:59:59.9999999 and test get triggered on new month at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' + try: + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on year last day at 23:59:59.9999999 and test get triggered on new year at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # .........When the created_date_range format is invalid + response = run_api.library_list({""uuid"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""invalid"", ""page_size"": 1}) + test_assert.status(response, 400) + assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() + + # .........When the created_start_date and created_end_date has whitespaces in them + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": "" "" + str_ctime + "" "", ""created_end_date"": "" "" + str_ctime + "" "", ""page_size"": 1}).json()['count'] == 1 + + # Filter on IST time + # .... When the datetime is selected to be the same as in detail but having TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone IST, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone IST, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected to be the same as in detail but with tzone IST, i.e., timedelta being 19800 equivalent to +05:30 but increase microsecond by 1 to fail filter + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.library_list({""uuid"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.library_list({""uuid"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today + try: + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 IST and test get triggered at 00:00:00.0000000 IST + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday + try: + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 IST and test get triggered at 00:00:00.0000000 IST + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week + try: + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on week's last day at 23:59:59.9999999 IST and test get triggered on new week at 00:00:00.0000000 IST + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month + try: + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on month's last day at 23:59:59.9999999 IST and test get triggered on new month at 00:00:00.0000000 IST + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' + try: + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on year last day at 23:59:59.9999999 IST and test get triggered on new year at 00:00:00.0000000 IST + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # .........When the created_date_range format is invalid + response = run_api.library_list({""uuid"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""invalid"", ""page_size"": 1}) + test_assert.status(response, 400) + assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() + # .........When the created_start_date and created_end_date has whitespaces in them + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", ""created_end_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", + ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + +" +/library/rest/list/,fetching the list of virtual machine ,,,"@pytest.mark.parametrize(""lib_filter_kwargs"", [{""vm_names"": [f""{prefix_name}{rand_string()}"" for _ in range(library_count)]}], indirect=True) +def test_library_list_filter(run_api: apiops, lib_filter_kwargs): + """""" + Getting the list of VM present in the library by adding filters + """""" + templates, res = lib_filter_kwargs + # check for valid response data with the filter parameters + filter_on_input_result(run_api, library_count, templates, res, prefix_name, run_api.library_list) + + +" +/library/rest/list/,fetching the details list from library using the disks_uuid parameter,,"{ +""reponse"" :success +}","def test_library_list_fetch_with_disk_uuid(library_add_new_vm, run_api): + """""" + Fetch list with 'disks_uuid' param + """""" + template, rjson = library_add_new_vm + params = {""disk_uuid"": rjson['hw']['disks'][0]['uuid']} + assert run_api.library_list(params).json()['count'] == 1 +" +/library/rest/list/,fetching the details list from library using the mac parameter,"{ +mac = ""5A:54:00:12:23:34"" +}","{ +""reponse"" :success +}","def test_library_list_fetch_with_mac(library_add_new_vm, run_api): + """""" + Fetch list with 'mac' param + """""" + template, rjson = library_add_new_vm + mac = ""5A:54:00:12:23:34"" + params = {""mac"": mac} + rjson = run_api.library_list(params).json() + for machines in rjson['results']: + all_macs = [netwok['mac'] for netwok in machines['hw']['networks']] + assert mac in all_macs, ""Json |> %s"" % machines +" +/library/rest/list/,fetching the details list from library using the arch parameter,,"{ +""reponse"" :success +}","def test_library_fetch_with_arch(library_add_new_vm, run_api): + """""" + Fetch list with 'arch' + """""" + params, rjson = library_add_new_vm + arch = rjson['hw']['arch'] + params = {""arch"": arch} + rjson = run_api.library_list(params).json() + for machines in rjson['results']: + assert machines['hw']['arch'] == arch, ""Json |> %s"" % machines +" +/library/rest/list/,fetching the details list from library with tags,,"{ +""reponse"" :success +}","def test_library_fetch_with_tags(library_add_new_vm, run_api): + """""" + Fetch list with tags + """""" + # using _sessionid + params, rjson = library_add_new_vm + # using tag + vm_uuid = rjson['uuid'] + tag_params, result = run_api.tag_add(vm_uuid) + tag_value = tag_params['tag_list'][-1]['value'] + params = {""_sessionid"": rjson['tags'][0][""value""], ""tags"": tag_value} + assert run_api.library_list(params).json()['count'] == 1 + +" +/library/rest/list/,fetching the filtered details list from library using search parameter,,"{ +""reponse"" :success +}","def test_library_list_with_search_contains_uuid(library_add_new_vm, run_api): + """""" + fetch list search filter + """""" + p, r = library_add_new_vm + uuid = r['uuid'] + params = {'search': f""uuid={uuid}""} + assert run_api.library_list(params).json()['count'] == 1 +" +/library/rest/list/,fetching the details list from library using invalid value of scope parameter,"{ +'scope': ""invaild"", +'uuid' +}",,"def test_library_list_with_invaild_scope_name(run_api, library_add_new_vm): + """""" + fetch list with invaild scope name + """""" + p, r = library_add_new_vm + lib_id = r['uuid'] + params = {'scope': ""invaild"", 'uuid': r['uuid']} + rjson = run_api.library_list(params).json() # 'all' is default scope gets applied on invalid scope + for machines in rjson['results']: + assert machines['uuid'] == lib_id, ""Json |> %s"" % machines + +" +/library/rest/list/,"fetching the details list from library using ""scope"" parameter","{ +'scope': ""public"" +}","{ +""reponse"" :success +}","def test_library_list_with_public_scope(run_api, library_add_new_vm): + """""" + fetch list with public scope name + """""" + p, r = library_add_new_vm + params = {'scope': ""public""} + rjson = run_api.library_list(params).json() + for machines in rjson['results']: + assert machines['is_public'] is True, ""Json |> %s"" % machines +" +/library/rest/list/,"fetching the details list from library using ""kvm_type"" parameter","{ +""hvm_type"" +}","{ +""reponse"" :success +}","def test_library_fetch_with_kvm_type(library_add_new_vm, run_api): + """""" + Fetch list with 'kvm_type' + """""" + params, rjson = library_add_new_vm + kvm = rjson['hw']['hvm_type'] + params = {""hvm_type"": kvm} + rjson = run_api.library_list(params).json() + for machines in rjson['results']: + assert machines['hw']['hvm_type'] == kvm, ""Json |> %s"" % machines +" +/library/rest/list/,fetching the details list from library with ISO,"cdrom = [{ + ""type"": ""sata"", + ""iso"": rand_string(), + ""is_boot"": True, + ""boot_order"": 1 + } + ]","{ +""reponse"" :success +}","def test_library_fetch_with_iso(run_api): + """""" + Fetch list with 'iso' + """""" + cdrom = [{ + ""type"": ""sata"", + ""iso"": rand_string(), + ""is_boot"": True, + ""boot_order"": 1 + } + ] + params, r = run_api.library_add_new_vm(cdrom=cdrom) + rjson = r.json() + params = {""iso"": rjson['hw']['cdrom'][-1]['iso']} + assert run_api.library_list(params).json()['count'] == 1 + run_api.library_delete(rjson['uuid']) +" +/library/rest/nmodeltypes/,updation of cd rom in a library,,"{ +""status"" : 200, +""response"" : Types of Network Model +}","def test_library_nmodeltypes(run_api): + """""" + Getting the type of Network Model + """""" + r = run_api.library_nmodeltypes() + result = r.json() + test_assert.status(result, LIBRARY_NETWORK_MODEL_TYPE, ""library_nmodeltypes"") + test_assert.status(r, 200) +" +/library/rest/nmodeltypes/,requesting the types of NetworkModel using with invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_library_nmodeltypes_with_invalid_token(invalid_exec_api): + """""" + with invalid token + """""" + r = invalid_exec_api.library_nmodeltypes() + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) +" +/library/rest/nmodeltypes/,requesting the types of NetworkModel without Authorization,,"{ +""status"" : 200, +""response"" : Types of Network Model +}","def test_library_nmodeltypes_without_authorization(anonymous_exec_api): + """""" + without authorization + """""" + r = anonymous_exec_api.library_nmodeltypes() + test_assert.status(r, 200) + result = r.json() + test_assert.status(result, LIBRARY_NETWORK_MODEL_TYPE, ""library_nmodeltypes"") +" +/library/rest/ntypes/,requesting the network type list,,200: Network Type List,"def test_library_ntypes(run_api): + """""" + Getting the list of Network type + """""" + r = run_api.library_ntypes() + result = r.json() + test_assert.status(result, LIBRARY_NETWORK_TYPE, ""library_ntypes"") + test_assert.status(r, 200) +" +/library/rest/ntypes/,fetching the list of network types of library when requested with invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_library_ntypes_with_invalid_token(invalid_exec_api): + """""" + with invalid token + """""" + r = invalid_exec_api.library_ntypes() + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) +" +/library/rest/revisions/,requesting the revision list of library without Authorization,"{ +machine_UUID : 'doesnotexits' +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_library_revisions_without_authorization(anonymous_exec_api): + """""" + without authorization + """""" + r = anonymous_exec_api.library_revisions('doesnotexits') + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) +" +/library/rest/revisions/,requesting the revision list of library when machine with the provided UUID does not exist,"{ +machine_UUID : 'doesnotexits' +}","{ +""status"" : 404, +""message"" : ""Machine with given UUID does not exist"" +}","def test_library_revisions_with_invaild_UUID(run_api): + """""" + library revision machine does not exist + """""" + r = run_api.library_revisions('doesnotexits') + test_assert.status(r, 404) + rjson = r.json() + assert rjson['detail'] == ""Machine with given UUID does not exist"", ""|> The error message is %s"" % rjson +" +/library/rest/revisions/,requesting the revision list of library when invalid token provided,"{ +machine_UUID : 'doesnotexits' +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_library_revisions_with_invalid_token(invalid_exec_api): + """""" + with invalid token + """""" + r = invalid_exec_api.library_revisions('doesnotexits') + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) +" +/library/rest/revisions/,requesting the revision list of library,,"{ +""status"" : 200, +""response"" : Revision list of library +}","def test_library_revisions(library_revisions): + """""" + revision list of library + """""" + params, r = library_revisions + test_assert.status(r, 200) +" +/library/rest/segment_list/,fetching of segment list from library,,"{ +""status"" :200, +""response"" :success +}","def test_library_segmentlist(library_segmentlist): + """""" + fetch segment list + """""" + params, r = library_segmentlist + test_assert.status(r, 200) +" +/library/rest/segment_list/,fetch segment list for library for brigde type of NIC,"{ +'nic_type': 'bridge' +}",,"def test_library_segment_with_nic_type(library_add_new_vm, run_api): + """""" + Fetch library segment with nic type + """""" + p, res = library_add_new_vm + params = {'nic_type': 'bridge'} + r1 = run_api.library_segmentlist(params).json() + for segment in r1['results']: + assert segment['network_type'] == 'public' + params = {'nic_type': 'host'} + r2 = run_api.library_segmentlist(params).json() + for segment in r2['results']: + assert segment['network_type'] == 'hostOnly' + +" +/library/rest/segment_list/,"fetch segment list for library by setting the ""network_type"" parameter","{ +""network_type"" :hostOnly +}",,"def test_library_segmentlist_with_network_type(library_add_new_vm, run_api): + """""" + fetch segmentlist with network type + """""" + p, r = library_add_new_vm + params = {'network_type': 'hostOnly'} + r = run_api.library_segmentlist(params).json() + for segment in r['results']: + segment['network_type'] == 'hostOnly' +" +/library/rest/segment_list/,"fetch segment list for library by setting search parameter to ""host""","{ +search : ""host"" +}",,"def test_library_segmentlist_with_search_param(library_add_new_vm, run_api): + """""" + fetch segmentlist with search params + """""" + p, r = library_add_new_vm + params = {'search': 'host'} + r = run_api.library_segmentlist(params).json() + for segment in r['results']: + segment['network_type'] == 'hostOnly' +" +/library/rest/upload_disk/{UUID}/,uploading disk when the disk size does not match,,"{ +""status"" : 400, +""message"" : ""Disk size mismatch."" +}","def test_library_upload_disk_mismatch_disk_size(library_add_new_vm, run_api): + """""" + Mismatch disk size + """""" + p, r = library_add_new_vm + lib_id = r['UUID'] + disk_UUID = r['hw']['disks'][0]['UUID'] + r = run_api.library_upload_disk(lib_id, disk_UUID) + test_assert.status(r, 400) + rjson = r.json() + assert re.match(r'Disk size mismatch. Uploaded disk size: (\d+), old disk size: (\d+)', rjson['error']), ""json %s"" % rjson +" +/library/rest/upload_disk/{UUID}/,uploading disk when machine UUID provided is invalid,"{ +lib_id = ""invalid"", +disk_UUID = ""invalid"" +} +","{ +""status"" : 404, +""message"" : ""Upload Disk: Machine not found"" +}","def test_library_upload_disk_invalid_machine_UUID(run_api): + """""" + Invalid machine UUID + """""" + lib_id = ""invalid"" + disk_UUID = ""invalid"" + r = run_api.library_upload_disk(lib_id, disk_UUID) + test_assert.status(r, 404) + rjson = r.json() + assert rjson['error'] == 'Upload Disk: Machine not found', ""json %s"" % rjson +" +/library/rest/upload_disk/{UUID}/,uploading disk when disk_UUID provided is invalid,,"{ +""status"" : 404, +""message"" : ""Upload Disk: Disk not found"" +}","def test_library_upload_disk_invalid_disk_UUID(library_add_new_vm, run_api): + """""" + Invalid disk UUID + """""" + p, r = library_add_new_vm + lib_id = r['UUID'] + disk_UUID = ""invalid"" + r = run_api.library_upload_disk(lib_id, disk_UUID) + test_assert.status(r, 404) + rjson = r.json() + assert rjson['error'] == 'Upload Disk: Disk not found', ""json %s"" % rjson +" +/library/rest/viewmachinelist/,"getting the list of machines where ""scope"" param is set to public","params = { + ""scope"": 'public' + }","{ +""status"" : 200, +""response"" : Machine Details list +}","def test_library_viewmachinelist_with_public_scope(run_api): + """""" + provide ""scope"" as public + """""" + params = { + ""scope"": 'public' + } + r = run_api.library_viewmachinelist(params) + test_assert.status(r, 200) +" +/library/rest/viewmachinelist/,getting the list of machines when requested without Authorization,,"{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_library_viewmachinelist_without_authorization(anonymous_exec_api): + """""" + without authorization + """""" + r = anonymous_exec_api.library_viewmachinelist() + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) +" +/library/rest/viewmachinelist/,getting the list of machines when requested with params- disk_size_min and disk_size_max,"params = { + 'disk_size_min': 0, + 'disk_size_max': 10000 + }","{ +""status"" : 200, +""response"" : Machine Details list +}","def test_library_viewmachinelist_with_min_and_max_disk(run_api): + """""" + provide disk_size_min and disk_size_max params + """""" + params = { + 'disk_size_min': 0, + 'disk_size_max': 10000 + } + r = run_api.library_viewmachinelist(params) + test_assert.status(r, 200) +" +/library/rest/viewmachinelist/,getting the list of machines when requested with params - ram_min and ram_max,"params = { 'ram_min': 0, 'ram_max': 10000 }","{ +""status"" : 200, +""response"" : Machine Details list +}","def test_library_viewmachinelist_with_min_and_max_ram(run_api): + """""" + provide ram_min and ram_max params + """""" + params = { + 'ram_min': 0, + 'ram_max': 10000 + } + r = run_api.library_viewmachinelist(params) + test_assert.status(r, 200) +" +/library/rest/viewmachinelist/,getting the list of machines when requested with param - page_size and page no,"params = { + 'page_size': 1, + 'page_no': 1 + }","{ +""status"" : 200, +""response"" : Machine Details list +}","def test_library_viewmachinelist_with_page_size_and_page_no(run_api): + """""" + provide page_size and page_no + """""" + params = { + 'page_size': 1, + 'page_no': 1 + } + r = run_api.library_viewmachinelist(params) + test_assert.status(r, 200) +" +/library/rest/viewmachinelist/,getting the list of machines when requested with invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_library_viewmachinelist_with_invalid_token(invalid_exec_api): + """""" + with invalid token + """""" + r = invalid_exec_api.library_viewmachinelist() + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) +" +/library/rest/viewmachinelist/,getting the list of machines when requested using the search parameter.,params = { 'search': 'machine' },"{ +""status"" : 200, +""response"" : Machine Details list +}","def test_library_viewmachinelist_search_parameter(run_api): + """""" + provide search parameter + """""" + params = { + 'search': 'machine' + } + r = run_api.library_viewmachinelist(params) + test_assert.status(r, 200) +" +/library/rest/viewmachinelist/,getting the list of machines when requested,,"{ +""status"" : 200, +""response"" : Machine Details list +}","def test_library_viewmachinelist(run_api): + """""" + getting the list of machines + """""" + r = run_api.library_viewmachinelist() + test_assert.status(r, 200) +" +/profile/rest/get/,"fetching list of profiles. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 200, +""response"" : list of profiles +}","def test_profile_list(run_api, profile_list): + """""" + Fetch list of all profiles + """""" + r = profile_list + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 200) +" +/profile/rest/get/,fetching list of profiles without authorization,,"{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" +}","def test_profile_list_without_authorization(anonymous_exec_api): + """""" + Fetch list of all profiles without authorization + """""" + r = anonymous_exec_api.profile_list() + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Authentication credentials were not provided."" +" +/profile/rest/get/,fetching list of profiles using invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_profile_list_with_invalid_token(invalid_exec_api): + """""" + Fetch list of all profiles with invalid token + """""" + r = invalid_exec_api.profile_list() + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Invalid token."" +" +/profile/rest/self/,fetching details of self profile without authorization,,"{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" +}"," +def test_profile_self_without_authorization(anonymous_exec_api): + """""" + Fetching details of self profile without authorization + """""" + r = anonymous_exec_api.profile_self() + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Authentication credentials were not provided."" +" +/profile/rest/self/,fetching details of self profile with invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_profile_self_with_invalid_token(invalid_exec_api): + """""" + Fetching details of self profile with invalid token + """""" + r = invalid_exec_api.profile_self() + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Invalid token."" +" +/profile/rest/self/,fetching details of self profile,,"{ +""status"" : 200, +""response"" : Self profile details +}","def test_profile_self(run_api, profile_self): + """""" + Fetching details of self profile + """""" + r = profile_self + res = r.json() + assert res['username'] == run_api.user + test_assert.status(r, 200) +" +/profile/rest/set_group/{user_id}/,setting group to profile without authorization,"{ +groups = { + ""add"": [], + ""remove"": ""valid_group_name"" +}, +user_id = id +} ","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" +}","def test_profile_set_group_without_authorization(anonymous_exec_api): + """""" + Set group to profile without authorization + """""" + groups = { + ""add"": [], + ""remove"": [""valid-group-name""] + } + r = anonymous_exec_api.profile_set_group(user_id=id, params=groups) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == 'Authentication credentials were not provided.' +" +/profile/rest/set_group/{user_id}/,setting group to profile using invalid token,"{ +groups = { + ""add"": [], + ""remove"": ""valid_group_name"" +}, +user_id = id +} ","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_profile_set_group_with_invalid_token(invalid_exec_api): + """""" + Set group to profile with invalid token + """""" + groups = { + ""add"": [], + ""remove"": [""valid-group-name""] + } + r = invalid_exec_api.profile_set_group(user_id = id, params = groups) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == 'Invalid token.' +" +/profile/rest/set_group/{user_id}/,"setting group to profile for valid User ID and valid group names.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 201, +""response"" : success +}","def test_profile_set_group(profile_set_group, run_api): + """""" + Set group to profile + """""" + r = profile_set_group + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 201) +" +/profile/rest/set_group/{user_id}/,"setting group to profile for invalid ID. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +groups = { + ""add"": [], + ""remove"": ""valid_group_name"" +}, +user_id = 0 +} ","{ +""status"" : 400, +""response"" : failure +}","def test_profile_set_group_invalid_user_id(run_api): + """""" + Set group to profile by invalid user id + """""" + groups = { + ""add"": [], + ""remove"": [random.choice(GROUPS)] + } + r = run_api.profile_set_group(user_id=0, params=groups) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 400) +" +/profile/rest/set_group/{user_id}/,"setting group to profile for invalid group names.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +groups = { + ""add"": [], + ""remove"": [invalid_group_name] +}, +user_id = id +} ","{ +""status"" : 400, +""message"" : ""Group matching query does not exist"" +}","def test_profile_set_group_invalid_group_name(run_api, admin_exec_api): + """""" + Set group to profile by invalid group name + """""" + groups = { + ""add"": [], + ""remove"": [""invalid-group-name""] + } + r = admin_exec_api.profile_list() + res = r.json() + profile = random.choice(res) + id = profile['id'] + r = run_api.profile_set_group(user_id=id, params=groups) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + rjson = r.json() + test_assert.status(r, 400) + assert rjson['error'] == ""Group matching query does not exist."" +" +/rtask/rest/children/{UUID}/,fetching the list of children jobs without authorization,"{ +uuid = ""valid_uuid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_rtask_list_children_without_authorization(anonymous_exec_api): + """""" + Fetching the List of childrens of a job without authorization + """""" + r = anonymous_exec_api.rtask_list_children(""invalid-uuid"") + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Authentication credentials were not provided."" +" +/rtask/rest/children/{UUID}/,fetching the list of children jobs when requested with invalid UUID,"{ +uuid = ""invalid_uuid"" +}","{ +""status"" : 400 / 404, +""response"" : Bad request +}","def test_rtask_list_children_invalid_uuid(run_api): + """""" + Fetching the List of childrens of a job having invalid uuid + """""" + r = run_api.rtask_list_children(""invalid-uuid"") + status_code = r.status_code + assert status_code in [400, 404] +" +/rtask/rest/children/{UUID}/,fetching the list of children jobs when requested with invalid token,"{ +uuid = ""valid_uuid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_rtask_list_children_with_invalid_token(invalid_exec_api): + """""" + Fetching the List of childrens of a job with invalid token + """""" + r = invalid_exec_api.rtask_list_children(""invalid-uuid"") + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Invalid token."" +" +/rtask/rest/children/{UUID}/,"fetching the list of children jobs using valid data +",,"{ +""status"" : 200, +""response"" : Children Task listed +}","def test_rtask_list_children(rtask_list_children): + """""" + Fetching the List of children of a island deploy job + """""" + params, r = rtask_list_children + rjson = r.json() + test_assert.status(r, 200) + assert rjson[""count""] == len(params[""machines""][""add""]) + assert rjson[""results""][0][""type_name""] == ""Deploy"" +" +/rtask/rest/delete/{UUID}/,deleting task without authorization,"{ +uuid = ""valid_uuid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_rtask_delete_without_authorization(anonymous_exec_api): + """""" + Deleting the task without authorization + """""" + r = anonymous_exec_api.rtask_delete(""valid-uuid"") + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Authentication credentials were not provided."" +" +/rtask/rest/delete/{UUID}/,deleting task when requested with invalid token,"{ +uuid = ""valid_uuid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_rtask_delete_with_invalid_token(invalid_exec_api): + """""" + Deleting the task with invalid token + """""" + r = invalid_exec_api.rtask_delete(""valid-uuid"") + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Invalid token."" +" +/rtask/rest/delete/{UUID}/,"deleting task of valid UUID.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 403 / 204, +""response"" : Task deleted successfully +}","def test_rtask_delete(run_api, rtask_delete): + """""" + Deleting the task + """""" + r = rtask_delete + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 204) +" +/rtask/rest/delete/{UUID}/,"deleting task of invalid UUID. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +uuid = ""invalid_uuid"" +}","{ +""status"" : 400 / 404, +""response"" : Bad request +}","def test_rtask_delete_invalid_uuid(run_api): + """""" + Deleting the task with invalid token + """""" + r = run_api.rtask_delete(""invalid-uuid"") + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + status_code = r.status_code + assert status_code in [404, 400] +" +/rtask/rest/detail/{UUID}/,getting details of task without authorization,"{ +uuid = ""valid_uuid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_rtask_details_without_authorization(anonymous_exec_api): + """""" + Getting details of Task without authorization + """""" + r = anonymous_exec_api.rtask_details(""valid-uuid"") + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Authentication credentials were not provided."" +" +/rtask/rest/detail/{UUID}/,getting details of task with invalid token,"{ +uuid = ""valid_uuid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_rtask_details_with_invalid_token(invalid_exec_api): + """""" + Getting details of Task with invalid token + """""" + r = invalid_exec_api.rtask_details(""valid-uuid"") + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Invalid token."" +" +/rtask/rest/detail/{UUID}/,getting details of task for valid uuid,,"{ +""status"" : 200, +""response"" : Details provided +}","def test_rtask_details(rtask_details): + """""" + Getting details of Specific Task + """""" + params, r = rtask_details + res = r.json() + test_assert.status(res, params, ""rtask_details"", ""server"") + test_assert.status(r, 200) +" +/rtask/rest/detail/{UUID}/,getting details of task for invalid uuid,"{ +uuid = ""invalid_uuid"" +}","{ +""status"" : 400, +""response"" : Bad request +}","@pytest.mark.skip(reason=""Skipping this test because it is returning 404 in place of 400"") +def test_rtask_details_invalid_uuid(run_api): + """""" + Getting details of Task by providing invalid uuid + """""" + r = run_api.rtask_details(""invalid-uuid"") + # res = r.json() + test_assert.status(r, 400) +" +/rtask/rest/list/,fetching the list of jobs with added filters,,"{ +""status"" : 200, +""response"" : listed jobs +}","PARAMETERS = [ + {""page"": 1, ""page_size"": 5}, + {""search"": ""finished""}, + {""ordering"": ""mtime""}, + {""ordering"": ""-mtime""}, + {""ordering"": ""status""}, + {""ordering"": ""-status""}, + {""ordering"": ""job_type""}, + {""ordering"": ""-job_type""} +] + +@pytest.mark.parametrize(""filter"", PARAMETERS) +def test_rtask_list_with_filter(run_api, filter): + """""" + Fetching the List of Jobs based on filter + """""" + r = run_api.rtask_list(filter) + test_assert.status(r, 200) +" +/rtask/rest/list/,fetching the list of jobs when requested with invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_rtask_list_with_invalid_token(invalid_exec_api): + """""" + Fetching the List of Jobs with invalid token + """""" + r = invalid_exec_api.rtask_list() + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Invalid token."" +" +/rtask/rest/list/,fetching the list of jobs without authorization,,"{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_rtask_list_without_authorization(anonymous_exec_api): + """""" + Fetching the List of Jobs without authorization + """""" + r = anonymous_exec_api.rtask_list() + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Authentication credentials were not provided."" +" +/rtask/rest/list/,"fetching the list of jobs when requested with ordering param as ""status""",,"{ +""status"" : 200, +""response"" : listed jobs in ascending order +}","def test_rtask_list_status(rtask_list_status): + """""" + Listing the status of rtasks + """""" + params, r = rtask_list_status + test_assert.status(r, 200) +" +/rtask/rest/list/,fetching the list of jobs,,"{ +""status"" : 200, +""response"" : listed jobs +}","def test_rtask_list(rtask_list): + """""" + Fetching the List of Jobs + """""" + r = rtask_list + test_assert.status(r, 200) +" +​/rtask​/rest​/rlist​/,fetching the list of remote tasks with filters,,"{ +""status"" : 200, +""response"" : Remote Task listed +}","PARAMETERS = [ + {""page"": 1, ""page_size"": 5}, + {""search"": ""finished""}, + {""ordering"": ""mtime""}, + {""ordering"": ""-mtime""}, + {""ordering"": ""status""}, + {""ordering"": ""-status""}, + {""ordering"": ""job_type""}, + {""ordering"": ""-job_type""} +] + +@pytest.mark.parametrize(""filter"", PARAMETERS) +def test_rtask_rlist_with_filter(run_api, filter): + """""" + Fetching the List of Jobs based on filter + """""" + r = run_api.rtask_rlist(filter) + test_assert.status(r, 200) +" +​/rtask​/rest​/rlist​/,fetching the list of remote tasks with customized filters,,"{ +""status"" : 200, +""response"" : Filtered remote task listed +}","@pytest.mark.skip(reason=""cannot validate the remote tasks"") +def test_rtask_rlist_filter(run_api): + """""" + Fetching the List of Jobs by adding filters + """""" + servers = [server[""hostname""] for server in run_api.server_list().json()[""results""]] + random_server = randint(0, 2) + owner_filter = {""user"": choice([1, 2, 3])} + task_for_filter = {""task_for"": servers[random_server]} + task_on_filter = {""task_on"": servers[random_server]} + status_filter = {""status"": choice(['created', 'delegated', 'started', 'finished', + 'failed', 'cancel', 'cancelling', 'cancelled'])} + search_filter = {""search"": choice([""Refresh"", ""BuildISOList"", ""DeleteRepoStoreFiles"", + DEFAULT_ADMIN_ACCOUNT['user'], DEFAULT_NON_ADMIN_ACCOUNT['user'], DEFAULT_MANAGER_ACCOUNT['user'], ""main"", ""mh"", ""mh-2""])} + filters = [owner_filter, task_for_filter, task_on_filter, status_filter, search_filter] + for filter in range(len(filters)): + r = run_api.rtask_rlist(filters[filter]) + test_assert.status(r, 200) +" +​/rtask​/rest​/rlist​/,fetching the list of remote tasks when requested with invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_rtask_rlist_with_invalid_token(invalid_exec_api): + """""" + Fetching the List of Jobs with invalid token + """""" + r = invalid_exec_api.rtask_rlist() + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Invalid token."" +" +​/rtask​/rest​/rlist​/,fetching the list of remote jobs / tasks using valid data,,"{ +""status"" : 200, +""response"" : Remote Task listed +}","def test_rtask_rlist(rtask_rlist): + """""" + Fetching the List of Jobs + """""" + r = rtask_rlist + test_assert.status(r, 200) +" +/server/rest/backup_complete/,"creating a backup complete token for the server using invalid token. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + 'token': 'invalid' +}","{ + ""status"" : 400, + ""message"" : ""Invalid token"" +}","def test_server_backup_complete_with_invalid_token(run_api): + """""" + testing server backup_complete using invalid token + """""" + params = { + 'token': 'invalid' + } + r = run_api.server_backup_complete(params) + rjson = r.json() + if run_api.user_type == 'admin': + test_assert.status(r, 400) + assert rjson['result'] == 'FAILURE', rjson + assert rjson['error'] == 'Invalid Token', rjson + else: + test_assert.status(r, 403) + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson +" +/server/rest/backup_manifest/,"back-up manifest api operation of server using invalid token. check the user type before performing the operation, only admin user type have the permission to perform such operations. + ","{ +""token"" :""invalid"" +}","{ + ""status"" : 400, + ""message"" : ""Token Invalid"" +}","def test_server_backup_manifest_invalid_token(run_api): + """""" + testing backup-manifest api using invalid token + """""" + params = {""token"": ""invalid""} + r = run_api.server_backup_manifest(params) + rjson = r.json() + if run_api.user_type == 'admin': + test_assert.status(r, 400) + assert rjson['reason'] == 'Invalid Token', ""|> Json %s"" % rjson + else: + test_assert.status(r, 403) + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson +" +/server/rest/backup_manifest/,"back-up manifest api operation of server using empty string token. check the user type before performing the operation, only admin user type have the permission to perform such operations. + ","{ +""token"" :"""" +}","{ + ""status"" : 400, + ""message"" : ""Token required"" +}","@pytest.mark.skip(""Skipping this because it returns status code :- 500 "") +def test_server_backup_manifest_empty_token(run_api): + """""" + testing backup-manifest api using empty string token + """""" + params = {""token"": """"} + r = run_api.server_backup_manifest(params) + rjson = r.json() + if run_api.user_type == 'admin': + test_assert.status(r, 400) + assert rjson['error'] == 'Token Required', ""|> Json %s"" % rjson + else: + test_assert.status(r, 403) + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson +" +/server/rest/set_commitable_ram/,"setting server id to commitable_ram_percent which is greater than 100 for a server. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + 'commitable_ram_percent': 150 +}","{ +""status"" : 400, +""message"" : ""commitable_ram_percent should be less than or equal to 100"" +} +"," +def test_server_set_commmitable_ram_commitable_ram_percent_is_greater_than_100(run_api): + """""" + server set commmitable ram is greater than 100 + """""" + params = { + 'commitable_ram_percent': 150 + } + r = run_api.server_set_commmitable_ram('invalid', params) + if run_api.user_type != 'admin': + test_assert.status(r, 403) + rjson = r.json() + rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + else: + test_assert.status(r, 400) + rjson = r.json() + rjson['error'] == ""commitable_ram_percent should be less than or equal to 100"", ""|> json %s"" % rjson +" +/server/rest/set_commitable_ram/,"setting negative value to commitable_ram _percent for server. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + 'commitable_ram_percent': -1 +}","{ +""status"" : 400, +""message"" : ""commitable_ram_percent should be greater than 0"" +} +","def test_server_set_commmitable_ram_commitable_ram_percent_is_negative(run_api): + """""" + server set commmitable ram is negative + """""" + params = { + 'commitable_ram_percent': -1 + } + r = run_api.server_set_commmitable_ram('invalid', params) + if run_api.user_type != 'admin': + test_assert.status(r, 403) + rjson = r.json() + rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + else: + test_assert.status(r, 400) + rjson = r.json() + rjson['error'] == ""commitable_ram_percent should be greater than 0"", ""|> json %s"" % rjson +" +/server/rest/set_commitable_ram/,"setting invalid server id to commitable_ram_percent for a server. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + 'commitable_ram_percent': 100 +}","{ +""status"" : 404, +""message"" : ""Sever not found"" +} +","def test_server_set_commmitable_invalid_id(run_api): + """""" + server invalid server id + """""" + params = { + 'commitable_ram_percent': 100 + } + r = run_api.server_set_commmitable_ram('invalid', params) + if run_api.user_type != 'admin': + test_assert.status(r, 403) + rjson = r.json() + rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + else: + test_assert.status(r, 404) + rjson = r.json() + rjson['error'] == ""Server not found"", ""|> json %s"" % rjson +" +/server/rest/set_commitable_ram/,"setting invalid server id to commitable_ram_percent for a server. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 400, +""message"" : ""commitable_ram_percent is required"" +} +","def test_server_set_commmitable_without_params(run_api): + """""" + server with set commmitable ram + """""" + r = run_api.server_set_commmitable_ram('invalid', {}) + if run_api.user_type != 'admin': + test_assert.status(r, 403) + rjson = r.json() + rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + else: + test_assert.status(r, 400) + rjson = r.json() + rjson['error'] == ""commitable_ram_percent is required"", ""|> json %s"" % rjson +" +/server/rest/test_connection/,testing the connection to the server with invalid port,"{ + ""ip"" + ""port"": 70000, + ""username"", + ""password"", + }","{ +""status"" : 400, +""message"" : ""Ensure this value is less than or equal to 65535"" +} +","def test_server_test_connection_invalid_port(run_api): + """""" + Testing the Connection to the Server with invalid port + """""" + params = { + ""ip"": run_api.node_ip, + ""port"": 70000, + ""username"": DEFAULT_ROOT_ACCOUNT[""user""], + ""password"": DEFAULT_ROOT_ACCOUNT[""password""] + } + r = run_api.server_test_connection(params=params) + test_assert.status(r, 400) + res = r.json() + assert 'FAILURE' in res[""result""], res + assert ""Ensure this value is less than or equal to 65535"" in res[""error""], res +" +/server/rest/test_connection/,testing the connection to the server with incorrect port number,"{ + ""ip"" + ""port"": 424, + ""username"", + ""password"", + }","{ +""status"" : 200, +""message"" : ""Unable to connect to port""} +","def test_server_test_connection_incorrect_port(run_api): + """""" + Testing the Connection to the Server with incorrect port + """""" + params = { + ""ip"": run_api.node_ip, + ""port"": 424, + ""username"": DEFAULT_ROOT_ACCOUNT[""user""], + ""password"": DEFAULT_ROOT_ACCOUNT[""password""] + } + r = run_api.server_test_connection(params=params) + test_assert.status(r, 200) + result = r.json() + assert result[""ssh""][""success""] is False, result + assert ""Unable to connect to port"" in result[""ssh""][""error""], result +" +/server/rest/test_connection/,testing the connection to the server,"{ + ""ip"" + ""port"": 22, + ""username"", + ""password"", + }","{ +""status"" : 200, +""response"" :success +}","def test_server_test_connection(run_api): + """""" + Testing the Connection to the Server + """""" + params = { + ""ip"": run_api.node_ip, + ""port"": 22, + ""username"": DEFAULT_ROOT_ACCOUNT[""user""], + ""password"": DEFAULT_ROOT_ACCOUNT[""password""] + } + r = run_api.server_test_connection(params=params) + test_assert.status(r, 200) + result = r.json() + assert result[""ssh""][""success""] == 1, result +" +/server/rest/test_connection/,testing th using invalid credentials,"{ + ""ip"" + ""port"": 22, + ""username"": ""invalid"", + ""password"":""invalid"", + }","{ +""status"" : 200, +""message"" : ""Authentication failed"" +}","def test_server_test_connection_invalid_credentials(run_api): + """""" + Testing the Connection to the Server with invalid credentials + """""" + params = { + ""ip"": run_api.node_ip, + ""port"": 22, + ""username"": ""invalid"", + ""password"": ""invalid"" + } + r = run_api.server_test_connection(params=params) + test_assert.status(r, 200) + result = r.json() + assert result[""ssh""][""success""] is False, result + assert result[""ssh""][""error""] == ""Authentication failed."", result +" +/servers/rest/add/,"adding new server. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 201 +}","def test_server_add(run_api, server_add_new): + """""" + Add Server + """""" + template, result = server_add_new + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(result, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(result, 201) +" +/servers/rest/backup-token/,"creating a backup token for the server. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ + ""status"" : 201, + ""response"" : success +}"," +def test_server_backup_token(run_api): + """""" + create a backup token for the server + """""" + r = run_api.server_backup_token() + rjson = r.json() + if run_api.user_type == 'admin': + test_assert.status(r, 201) + assert ""token"" in rjson, rjson + else: + test_assert.status(r, 403) + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson +" +/servers/rest/bulkops/,performing bulk operations on non-existing servers where valid operation is requested,"{ + ""server_list"": ['invalid list'], + ""op"": 'valid' +}","{ +""status"" : 400, +""message"" : ""Server does not exists"" +}","def test_sever_bulkops_invalid_server_id(skip_if_not_admin, run_api): + """""" + invalid server id + """""" + SERVER_BULK_OPS = ['syncrepo', 'delete'] + for ops in SERVER_BULK_OPS: + bulkops = { + ""server_list"": 'invalid', + ""op"": ops + } + r = run_api.server_bulkops(bulkops) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['failure'][0]['error'] == 'Server does not exist', ""|> Json %s"" % rjson +" +/servers/rest/bulkops/,"performing bulk operations on multiple existing servers where valid operation is requested. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""server_list"": ['valid list'], + ""op"": 'valid' +}","{ +""status"" : 202, +""response"" : success +}","@pytest.mark.parametrize(""operation"", SERVER_BULK_OPS, indirect=True) +def test_server_bulkops(run_api, server_bulkops, operation): + """""" + Bulk Operations in Server + """""" + r = server_bulkops + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 202) + +" +/servers/rest/bulkops/,performing bulk operations on multiple existing servers where invalid operation is requested,"{ + ""server_list"": ['valid list'], + ""op"": 'invalid' +}","{ +""status"" : 400, +""message"" : ""Unsupported operation. Available options are: [ 'syncrepo', 'delete', 'upgrade', 'lock_server', 'unlock_server', 'mark_for_maintenance', 'unmark_for_maintenance' ]"" +}","def test_server_bulkops_invalid_operation(skip_if_not_admin, run_api): + """""" + invalid bulkops operation + """""" + bulkops = { + ""server_list"": 'invalid', + ""op"": 'invalid' + } + r = run_api.server_bulkops(bulkops) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""Unsupported operation. Available options are: ['syncrepo', 'delete', 'upgrade', 'lock_server', 'unlock_server', 'mark_for_maintenance', 'unmark_for_maintenance']"", ""|> Json %s"" % rjson" +/servers/rest/bulkops/,"performing api bulk operations on server using empty list of server _list.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""server_list"": [], + ""op"": 'mark_for_maintenance' +}","{ + ""status"" : 400, + ""message"" : ""server_list cannot be null or empty"" +}","def test_server_bulkops_empty_server_list(run_api): + """""" + testing server bulkops api using params as empty server list + """""" + bulkops = { + ""server_list"": [], + ""op"": 'mark_for_maintenance' + } + r = run_api.server_bulkops(bulkops) + rjson = r.json() + if run_api.user_type == 'non-admin': + test_assert.status(r, 403) + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + else: + test_assert.status(r, 400) + assert rjson[""result""] == ""FAILURE"", rjson + assert ""server_list cannot be null or empty"" in rjson[""error""], rjson +" +/servers/rest/delete/{{UUID}}/,requesting to delete server by searching with valid data for an existing deployment,"{ + 'search': server_name +}","{ +""status"" : 400, +""message"" : ""Cannot delete a server while deployment exists"" +}","def test_server_delete_while_deployments_exits(skip_if_not_admin, deploy_image, run_api): + """""" + delete a server while deployments exist + """""" + p, r = deploy_image + server_name = r.json()['server'] + params = { + 'search': server_name + } + res = run_api.server_list(params).json() + server_id = res['results'][0]['uuid'] + r = run_api.server_delete(server_id) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == 'Cannot delete a server while deployments exist', ""|> Json %s"" % rjson +" +/servers/rest/delete/{{UUID}}/,"requesting to delete server by searching with invalid server_id.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +server_id = ""invalid"" +}","{ +""status"" : 404, +}","def test_server_delete_invalid_id(run_api): + """""" + invalid server id + """""" + server_id = 'invalid' + r = run_api.server_delete(server_id) + if run_api.user_type == 'admin': + test_assert.status(r, 404) + rjson = r.json() + assert rjson['error'] == 'Delete: Server not found', ""|> Json %s"" % rjson + else: + test_assert.status(r, 403) + rjson = r.json() + rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson +" +/servers/rest/delete/{{UUID}}/,"deleting server using invalid uuid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +""server_id"" :'invalid' +}","{ + ""status"" : 404, + ""message"" : ""Delete : server not found"" +}","def test_server_delete_invalid_id(run_api): + """""" + invalid server id + """""" + server_id = 'invalid' + r = run_api.server_delete(server_id) + if run_api.user_type == 'admin': + test_assert.status(r, 404) + rjson = r.json() + assert rjson['error'] == 'Delete: Server not found', ""|> Json %s"" % rjson + else: + test_assert.status(r, 403) + rjson = r.json() + rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson +" +/servers/rest/delete/{{UUID}}/,"deleting a server when its status is set to ""online"". Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +""status"": ""Online"", +""total_machine"": 0 +} +","{ + ""status"" : 400, + ""message"" : ""Cannot delete a Server which is in Online state"" +}","def test_server_delete_status_online(run_api): + """""" + delete a server when it's status is Online + """""" + params = {""status"": ""Online"", ""total_machine"": 0} + _, server_list = run_api.filter_servers_matching_with_criteria(params, list(run_api.clm_my_servers.values())) + if server_list: + r = run_api.server_delete(server_list[0]) + rjson = r.json() + if run_api.user_type == 'admin': + test_assert.status(r, 400) + assert ""Cannot delete a Server which is in Online state"" in rjson[""error""], rjson + else: + test_assert.status(r, 403) + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson +" +/servers/rest/detail/{{UUID}}/,getting details of server of non-existing id,"{ +server_id = ""invalid"" +}","{ +""status"" : 404, +""message"" : ""Not found"" +}","def test_server_details_invalid_uuid(run_api): + """""" + fetch server details with invalid uuid + """""" + server_id = 'invalid' + r = run_api.server_details(server_id) + test_assert.status(r, 404) + rjson = r.json() + assert rjson['detail'] == 'Not found.', ""|> json %s"" % rjson +" +/servers/rest/detail/{{UUID}}/,getting details of server of existing id,,"{ +""status"" : 200 +}","def test_server_details(server_details): + """""" + Getting details of Server + """""" + template, r = server_details + result = r.json() + test_assert.status(template, result, ""server_details"") + test_assert.status(r, 200) +" +/servers/rest/list/,getting the list of servers using invalid group id,"{ + 'group_id': invalid_group_id +}","{ +""response"" : failure +}","def test_server_list_by_invalid_group_id(run_api): + """""" + fetch server list by group + """""" + group_id = 0 + params = { + 'group_id': group_id + } + res = run_api.server_list(params).json() + assert res['count'] == 0, ""|> Json %s"" % res + +" +/servers/rest/list/,getting the list of servers using group name,"{ + 'group_name': group_name +}","{ +""response"" : server list +}","def test_server_list_by_group_name(run_api): + """""" + fetch server list by group name + """""" + group_name = rand_string(10) + params = { + 'group_name': group_name + } + res = run_api.server_list(params).json() + assert res['count'] == 0, ""|> Json %s"" % res +" +/servers/rest/list/,getting the list of servers using group id,"{ + 'group_id': group_id +}","{ +""response"" : server list +}","def test_server_list_by_group_id(skip_if_not_admin, group_add, run_api): + """""" + fetch server list by group id + """""" + params, r = group_add + rjson = r.json() + group_id = rjson['id'] + group_name = rjson['name'] + servers_list = { + ""servers_list"": list(run_api.clm_my_servers.values()) + } + run_api.group_add_server(servers_list, group_id) + params = { + 'group_id': group_id + } + servers = run_api.server_list(params).json() + for server in servers['results']: + server_details = run_api.server_details(server['uuid']).json() + server_in_groups = [group['name'] for group in server_details['groups']] + assert group_name in server_in_groups, ""|> Json %s"" % server_details +" +/servers/rest/list/,getting the list of servers excluding some servers using server UUID,"{ + 'uuid': server_uuid +}","{ +""response"" : server list +}","def test_server_list_by_uuid(run_api): + """""" + fetch server list based on server uuid + """""" + server_list = run_api.server_list().json() + server_uuid = choice([server['uuid'] for server in server_list['results']]) + params = { + 'uuid': server_uuid + } + result = run_api.server_list(params).json() + for server in result['results']: + assert server['uuid'] == server_uuid, ""|> json %s"" % server +" +/servers/rest/list/,getting the list of servers excluding some servers using search parameter,"{ + 'search': hostname +}","{ +""response"" : server list +}","def test_server_list_by_search(run_api): + """""" + fetch server list based on search params + """""" + server_list = run_api.server_list().json() + hostname = choice([server['hostname'] for server in server_list['results']]) + params = { + 'search': hostname + } + result = run_api.server_list(params).json() + for server in result['results']: + assert server['hostname'] == hostname, ""|> json %s"" % server + +" +/servers/rest/list/,getting the list of servers excluding some servers using group_name,"{ +exclude_group_name = [server_list_to_exclude] +}","{ +""response"" : server list +}","def test_server_list_by_excluding_group_name(run_api): + """""" + fetch server list by excluding_group name + """""" + group_name = rand_string(10) + params = { + 'exclude_group_name': group_name + } + res = run_api.server_list(params).json() + assert res['count'] == 0, ""|> Json %s"" % res +" +/servers/rest/list/,getting the list of servers excluding some servers using group_id,"{ +exclude_group_id = [server_list_to_exclude] +}","{ +""response"" : server list +}","def test_server_list_by_excluding_group(skip_if_not_admin, run_api, group_add): + """""" + fetch server list by excluding group + """""" + p, r = group_add + rjson = r.json() + params = { + 'exclude_group_id': rjson['id'] + } + # New added group does not have any server so it will return all the server list + servers1 = run_api.server_list(params).json() + # Fetching the server list + servers2 = run_api.server_list().json() + assert servers1['count'] == servers2['count'] +" +/servers/rest/list/,getting the list of servers,,"{ +""status"" : 200, +""response"" : server list +}","def test_server_list(server_list): + """""" + Getting the list Server + """""" + r = server_list + test_assert.status(r, SERVER_LIST, ""server_list"", ""hostname"") + test_assert.status(r, 200) +" +/servers/rest/list/,fetching the server list by setting the scope parameter,"{ +""scope"" : ""my"" +}","{ +""status"" :200 +}","def test_server_list_by_scope(run_api): + """""" + fetch server list using scope :- 'my' + """""" + params = { + 'scope': ""my"" + } + res = run_api.server_list(params) + test_assert.status(res, 200)" +/servers/rest/list/,fetching the server list by setting the replication status,"{ +""status"" :""installing"" +} +",,"def test_server_list_by_installation_status(run_api): + """""" + fetch server list by replication status + """""" + params = { + 'status': ""Installing"" + } + res = run_api.server_list(params).json() + assert res['count'] == 0, ""|> Json %s"" % res +" +/servers/rest/syncrepo/,"syncing the layers on server. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" :201, +""reponse"" : success +}","@pytest.mark.skip(reason=""having issue in this testcase"") +def test_server_syncrepo(run_api, server_syncrepo): + """""" + Sync the layers on server + """""" + r = server_syncrepo + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 201) +" +/servers/rest/syncrepo/{{UUID}},"syncing layers on server using existing UUID and server is running.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 403 / 201 +}","def test_server_syncrepo(run_api, server_syncrepo): + """""" + Sync the layers on server + """""" + r = server_syncrepo + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 201) +" +/servers/rest/upgradeserver/{UUID}/,"updating server using valid existing data.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 201, +""response"" : server updated +}","def test_server_upgradeserver(run_api, server_upgradeserver): + """""" + Updating sever + """""" + r = server_upgradeserver + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 201) +" +/servers/rest/upgradeserver/{UUID}/,"updating server using invalid data.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +server_id = ""invalid"" +}","{ +""status"" : 404, +""message"" : ""Server not found"" +}","def test_server_upgrade_invalid_uuid(run_api): + """""" + server upgrade with invalid server id + """""" + server_id = 'invalid' + r = run_api.server_upgrade(server_id) + if run_api.user_type == 'admin': + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == 'Server Upgrade API isn\'t implemented. Use ServerBulkOps with ""upgrade"" as operation to upgrade Managed Hosts', ""|> Json %s"" % rjson + else: + test_assert.status(r, 403) + rjson = r.json() + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson +" +/shares/rest/add/{{UUID}}/,adding new object to vm ,,"{ +""status"" : 201, +""response"" : success +}","def test_shares_add(shares_add): + """""" + Adding new object to the vm + """""" + template, r = shares_add + test_assert.status(r, 201) +" +/shares/rest/list/,fetching the shares list of machine,,"{ +""status"" : 201, +""response"" : success +}","def test_shares_list(shares_list): + """""" + Fetch list of shares of machine + """""" + r = shares_list + test_assert.status(r, 200) +" +/tags/rest/add/{UUID}/,adding tag without authorization,"{ +vm_uuid = ""valid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" +}","def test_tags_add_without_authorization(anonymous_exec_api): + """""" + without authorization + """""" + vm_uuid = ""invalid"" + p, r = anonymous_exec_api.tag_add(vm_uuid,) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) +" +/tags/rest/add/{UUID}/,adding tag using invalid token,"{ +vm_uuid = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_tags_add_invalid_token(invalid_exec_api): + """""" + invalid token + """""" + + vm_uuid = ""invalid"" + p, r = invalid_exec_api.tag_add(vm_uuid) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) +" +/tags/rest/add/{UUID}/,adding tag for valid existing UUID without name attribute in body,"{ +""tag_list"" = + [ + { + ""value"": ""494"", + ""description"": ""test"" + } + ] +}","{ +""status"" : 400, +""response"" : ""This field is required"" +}","def test_tags_add_without_name(run_api, library_add_new_vm): + """""" + add tag without name + """""" + params, r = library_add_new_vm + vm_uuid = r['uuid'] + + params = {""tag_list"": [{""value"": ""494"", ""description"": ""test""}]} + tag_params, result = run_api.tag_add(vm_uuid, params) + test_assert.status(result, 400) + rjson = result.json() + msg = rjson['tag_list'][0]['name'][0] + assert msg == ""This field is required."", ""The error message is %s"" % (msg) +" +/tags/rest/add/{UUID}/,adding tag for valid existing UUID with empty name attribute in the body,"{ +""tag_list"" = + [ + { + ""name"" : """" + ""value"": ""494"", + ""description"": ""test"" + } + ] +}","{ +""status"" : 400, +""response"" : ""This field may not be blank"" +}","def test_tags_add_with_empty_name(run_api, library_add_new_vm): + """""" + add tag with empty name + """""" + params, r = library_add_new_vm + vm_uuid = r['uuid'] + + params = {""tag_list"": [{""name"": """", ""value"": ""494"", ""description"": ""test""}]} + tag_params, result = run_api.tag_add(vm_uuid, params) + test_assert.status(result, 400) + rjson = result.json() + msg = rjson['tag_list'][0]['name'][0] + assert msg == ""This field may not be blank."", ""The error message is %s"" % (msg) +" +/tags/rest/add/{UUID}/,adding tag for valid existing UUID of machine,,"{ +""status"" : 201, +""response"" : added tag +}","def test_tags_add(run_api, library_add_new_vm): + """""" + add tag with valid data + """""" + params, r = library_add_new_vm + vm_uuid = r['uuid'] + + # Add Tag + tag_params, result = run_api.tag_add(vm_uuid) + test_assert.status(result, 201) + res = run_api.tag_list(tag_params, filter_search={""object_uuid"": vm_uuid}) + results = res.json()[""results""] + tag = results[-1] + r = run_api.tag_delete(id=tag[""id""], params={}) +" +/tags/rest/add/{UUID}/,adding tag for invalid UUID of machine,"{ +vm_uuid = ""invalid"" +}","{ +""status"" : 404, +""response"" : ""No object with given uuid"" +}","def test_tags_add_with_invalid_uuid(run_api): + """""" + add tag with invalid uuid + """""" + vm_uuid = ""invalid"" + + tag_params, result = run_api.tag_add(vm_uuid,) + test_assert.status(result, 404) + rjson = result.json() + assert rjson['error'] == ""No object with given uuid"", ""The error message is %s"" % (rjson['error']) +" +/tags/rest/delete/{id}/,requesting to delete tag without authorization,"{ +tag_id = id +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" +}","def test_tags_delete_without_authorization(anonymous_exec_api): + """""" + without authorization + """""" + tag_id = 0 + r = anonymous_exec_api.tag_delete(id=tag_id, params={}) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) +" +/tags/rest/delete/{id}/,"requesting to delete tag with valid existing id and the tag is not in [session_id, session_created_on, session name, deployment ip, deployment mac]",,"{ +""status"" : 204, +""response"" : tag deleted +}","def test_tags_delete(library_add_new_vm, run_api): + """""" + tag delete + """""" + params, r = library_add_new_vm + vm_uuid = r['uuid'] + tag_params, result = run_api.tag_add(vm_uuid) + res = run_api.tag_list(tag_params, filter_search={""object_uuid"": vm_uuid}) + results = res.json()[""results""] + newtag = [tag for tag in results if tag['name'] not in ('_sessionid', '_session_created_on')][0] + r = run_api.tag_delete(id=newtag[""id""], params={}) + test_assert.status(r, 204) +" +/tags/rest/delete/{id}/,"requesting to delete tag with valid existing id and the tag is in [session_id, session_created_on, session name, deployment ip, deployment mac]",,"{ +""status"" : 400, +""message"" : ""Delete not allowed"" +}","def test_tags_delete_with_undeletable_tag(library_add_new_vm, run_api): + """""" + tag delete tags are '_sessionid', '_session_created_on' + """""" + params, r = library_add_new_vm + vm_uuid = r['uuid'] + tag_params, result = run_api.tag_add(vm_uuid) + res = run_api.tag_list(tag_params, filter_search={""object_uuid"": vm_uuid}) + results = res.json()[""results""] + newtag = [tag for tag in results if tag['name'] in ('_sessionid', '_session_created_on')][0] + r = run_api.tag_delete(id=newtag[""id""], params={}) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['result'] == ""Delete not allowed"", ""The error message is %s"" % (rjson['result']) +" +/tags/rest/delete/{id}/,requesting to delete tag using invalid token,"{ +tag_id = id +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}"," +def test_tags_delete_invalid_token(invalid_exec_api): + """""" + invalid token + """""" + tag_id = 0 + r = invalid_exec_api.tag_delete(id=tag_id, params={}) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) +" +/tags/rest/delete/{id}/,requesting to delete tag using invalid tag_id,"{ +tag_id = id +}","{ +""status"" : 404, +""message"" : ""Tag does not exist"" +}","def test_tags_delete_with_invalid_id(run_api): + """""" + tag delete invalid id + """""" + tag_id = 0 + r = run_api.tag_delete(id=tag_id, params={}) + test_assert.status(r, 404) + rjson = r.json() + assert rjson['error'] == ""Tag does not exist"", ""The error message is %s"" % (rjson['error']) +" +/tags/rest/list/,requesting to fetch list of tags without authorization,"{ +'page' = 1, +'page_size' = 1 +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" +}","def test_tags_list_without_authorization(anonymous_exec_api): + """""" + without authorization + """""" + r = anonymous_exec_api.tag_list({}, {}) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) +" +/tags/rest/list/,requesting to fetch list of tags with Page and Page Size,"{ +'page' = 1, +'page_size' = 1 +}","{ +""status"" : 200, +""message"" : tag list for specific page +}","def test_tags_list_with_page_and_pagesize(run_api): + """""" + when requested with page and page size + """""" + params = {'page': 1, 'page_size': 1} + r = run_api.tag_list(params, {}) + test_assert.status(r, 200)" +/tags/rest/list/,requesting to fetch list of tags with invalid token,"{ +'page' = None, +'page_size' = None +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}"," +def test_tags_list_invalid_token(invalid_exec_api): + """""" + invalid token + """""" + r = invalid_exec_api.tag_list({}, {}) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) +" +/user/rest/add-group/{id}/,"requesting to add user to non-existing group. Check the user type before performing the operation. +","{ +user_id = ""valid"" , +group_names = ""invalid_group_list"" +}","{ +""status"" : 403 / 404, +""message"" : ""Group matching query does not exist."" +}","def test_user_add_group_invalid_grp_name(run_api, admin_exec_api): + """""" + Adding user into invalid group name + """""" + groups_name = { + 'groups': ['0'] + } + user_result = admin_exec_api.user_list() + res = user_result.json() + user_ids = [result['id'] for result in res['results']] + user_id = random.choice(user_ids) + template, r = run_api.user_add_group(user_id, groups_name) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + res = r.json() + assert res['error'] == ""Provided groups must be a list of Group's UUIDs."" + test_assert.status(r, 400) +" +/user/rest/add-group/{id}/,"requesting to add user to group using valid user_id where group name provided is an integer instead of string. Check the user type before performing the operation. +","{ +user_id = ""valid"" , +group_names = ""invalid_group_list_datatype"" +}","{ +""status"" : 403 / 400, +""response"" : ""Provided group names must be a list of strings"" +}","def test_user_add_group_invalid_data(run_api, admin_exec_api): + """""" + Provide integer instead of string in group name list + """""" + groups_name = { + 'groups': [1] + } + user_result = admin_exec_api.user_list() + res = user_result.json() + user_ids = [result['id'] for result in res['results']] + user_id = random.choice(user_ids) + template, r = run_api.user_add_group(user_id, groups_name) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + res = r.json() + assert res['error'] == ""Provided groups must be a list of Group's UUIDs."", ""|> Json %s"" % res + test_assert.status(r, 400) + +" +/user/rest/add-group/{id}/,requesting to add user to group using invalid user_id.Check the user type before performing the operation.,"{ +user_id = ""invalid"" , +group_names = ""valid_group_list"" +}","{ +""status"" : 403 / 404 +}","PARAMETERS = [{""action"": GROUP_ADD}] + +@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) +def test_user_add_group_invalid_user_id(run_api, custom_group_admin_operations): + """""" + Adding invalid user id into group + """""" + params, r = custom_group_admin_operations + res = r.json() + group_name = { + 'groups': [res['name']] + } + template, r = run_api.user_add_group(user_id=0, groups=group_name) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 404) +" +/user/rest/add-group/{id}/,"requesting to add user to existing group using valid id.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +user_id = ""valid"" , +group_names = ""valid_group_list"" +}","{ +""status"" : 403 / 201, +""response"" : success +}","def test_user_add_group(run_api, user_add_group): + """""" + Adding multiple users into group + """""" + r = user_add_group + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 201) +" +/user/rest/add-group/{id}/,requesting to add user to existing group using valid id but without authorization,"{ +user_id = ""valid"" , +group_names = ""valid_group_list"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_user_add_group_without_authorization(anonymous_exec_api): + """""" + Adding user into group without authorization + """""" + groups_name = { + 'groups': ['0'] + } + template, r = anonymous_exec_api.user_add_group(user_id=0, groups=groups_name) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Authentication credentials were not provided."" +" +/user/rest/add-group/{id}/,requesting to add user to existing group using valid id but invalid token,"{ +user_id = ""valid"" , +group_names = ""valid_group_list"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_user_add_group_with_invalid_token(invalid_exec_api): + """""" + Adding user into group with invalid token + """""" + groups_name = { + 'groups': ['0'] + } + template, r = invalid_exec_api.user_add_group(user_id=0, groups=groups_name) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Invalid token."" +" +/user/rest/change_ownership/,"changing ownership of user where the owner is valid but destination user does not exist. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + owner = 'colama' + dest_user = 'doesnotexistuser' +}","{ +""status"" : 400, +""message"" : ""Either User owner or dest_user does not exist..."" +} +","def test_user_change_ownership_user_doesnot_exits(run_api): + """""" + user does not exits + """""" + owner = 'colama' + dest_user = 'doesnotexistuser' + r = run_api.user_change_ownership(owner, dest_user) + if run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + rjson = r.json() + test_assert.status(r, 400) + assert rjson['error'] == f""Either User '{owner}' or '{dest_user}' does not exist..."", ""|> Json %s"" % rjson + elif run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + rjson = r.json() + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson +" +/user/rest/change_ownership/,"changing ownership of user where the owner and destination user are the same. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + owner = 'colama' + dest_user = 'colama' +}","{ +""status"" : 400, +""message"" : 'The dest_user and the owner should be different' +} +","def test_user_change_ownership_when_owner_and_dest_user_are_same(run_api): + """""" + user change_ownership when owner and dest user are same + """""" + owner = 'colama' + dest_user = 'colama' + r = run_api.user_change_ownership(owner, dest_user) + if run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + rjson = r.json() + test_assert.status(r, 400) + assert rjson['error'] == 'The dest_user and the owner should be different', ""|> Json %s"" % rjson + elif run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + rjson = r.json() + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson" +/user/rest/change_ownership/,"changing ownership of user where the destination user does not have right over the owner. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + owner = 'colama' + dest_user = 'manager' +}","{ +""status"" : 400, +""message"" : ""'manager' as a Manager user, does not have right over 'colama' or 'manager'"" +} +","def test_user_change_owner_doesnot_have_right(skip_if_admin, run_api): + """""" + user does not have right over user + """""" + owner = 'colama' + dest_user = 'manager' + r = run_api.user_change_ownership(owner, dest_user) + if run_api.user_type == USER_TYPE[""manager""]: + rjson = r.json() + test_assert.status(r, 400) + assert rjson['error'] == ""'manager' as a Manager user, does not have right over 'colama' or 'manager'"", ""|> Json %s"" % rjson + elif run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + rjson = r.json() + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson +" +/user/rest/change_ownership/,"changing ownership of a user by a manager , where the manager does not have rights over the users. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +owner = ""vivekt"" +dest_user = ""manager"" +}","{ +""status"" : 400, +""message"":""Manager doesn't have full right over the user. Make sure 'vivekt' doesn't have any deployment on the server that the 'manager' user as Manager doesn't handle"" +}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_user_change_ownership_manager_does_not_have_deployment_server(skip_if_not_manager, run_api, custom_lib_non_admin_operations): + """""" + To test user_change_ownership endpoint when manager does not have full rights over the user + """""" + _ = custom_lib_non_admin_operations + owner = ""vivekt"" + dest_user = ""manager"" + res = run_api.user_change_ownership(owner, dest_user) + test_assert.status(res, 400) + rjson = res.json() + assert rjson['error'] == ""Manager doesn't have full right over the user. Make sure 'vivekt' doesn't have any deployment on the server that the 'manager' user as Manager doesn't handle"", ""|> Json %s"" % rjson +" +/user/rest/detail/{id},"fetching the details of user. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 403 / 200 +}","def test_user_details(run_api, user_details): + """""" + Fetching the Details of User + """""" + params, r = user_details + res = r.json() + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(params, res, ""user_details"") + test_assert.status(r, 200) +" +/user/rest/detail/{id},fetching the details of user without authorization,,"{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_user_detail_without_token(anonymous_exec_api): + """""" + Fetching the user details without token + """""" + params, result = anonymous_exec_api.user_details() + r = result.json() + test_assert.status(result, 401) + assert r['detail'] == ""Authentication credentials were not provided."" +" +/user/rest/detail/{id},"fetching the details of user using valid id. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +user_id = valid_user_id +}","{ +""status"" : 403 / 200 +}","def test_user_detail_with_valid_id(run_api): + """""" + Fetching the Details of User with valid id + """""" + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + params, res = run_api.user_details(id=run_api.user_id) + test_assert.status(res, 403) + + elif run_api.user_type == USER_TYPE[""admin""]: + params, res = run_api.user_details(id=run_api.user_id) + test_assert.status(res, 200) + +" +/user/rest/detail/{id},fetching the details of user using invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_user_details_with_invalid_token(invalid_exec_api): + """""" + Fetching the details of the user using invalid token + """""" + params, result = invalid_exec_api.user_details() + r = result.json() + test_assert.status(result, 401) + assert r['detail'] == ""Invalid token."" +" +/user/rest/detail/{id},fetching the details of user using invalid id,"{ +user_id = ""invalid"" +}","{ +""status"" : 404 +}","def test_user_detail_with_invalid_id(run_api): + """""" + Fetching the details using invalid id + + """""" + params, r = run_api.user_details(id=""invalid"") + test_assert.status(r, 404) + +" +/user/rest/list/,"fetching the list of users. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 200/ 403, +""response"" : fetched list of users +}","def test_user_list(run_api, user_list): + """""" + Fetching the List of User + """""" + r = user_list + if run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 200) + elif run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) +" +/user/rest/list/,fetching the list of users without authorization,,"{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_user_list_without_token(anonymous_exec_api): + """""" + Fetch group list with unauthorized + """""" + r = anonymous_exec_api.group_list({}) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Authentication credentials were not provided."" +" +/user/rest/list/,fetching the list of users with invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_user_list_with_invalid_token(invalid_exec_api): + """""" + Fetch group list with invalid token + """""" + r = invalid_exec_api.group_list({}) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Invalid token."" +" +/user/rest/list/,"fetching the list of users using the search param . Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 200/ 403, +""response"" : fetched list of users when search param provided +}","def test_user_list_with_search_params(run_api, user_list): + """""" + user list with search params + """""" + if run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + result = run_api.user_list(params={""search"": run_api.user}) + test_assert.status(result, 200) + elif run_api.user_type == USER_TYPE[""non_admin""]: + result = run_api.user_list(params={""search"": run_api.user}) + test_assert.status(result, 403) +" +/user/rest/list/,"fetching the list of users using the group_id parameter. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 200/ 403, +""response"" : fetched list of users with the provided group_id +} ","def test_user_list_with_group_id(run_api, admin_exec_api): + """""" + Fetch user list in a group with group-id + """""" + params, r = admin_exec_api.group_add() + group_uid = r.json()[""id""] + if run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + res = run_api.user_list(params={""group_id"": group_uid}) + test_assert.status(res, 200) + elif run_api.user_type == USER_TYPE[""non_admin""]: + res = run_api.user_list(params={""group_id"": group_uid}) + test_assert.status(res, 403) + r = admin_exec_api.group_delete(group_uid) +" +/user/rest/list/,"fetching the list of users using filters. Check the user type before performing the operation. +",,"{ +""status"" : 200/ 403, +} ","@pytest.mark.xfail +def test_user_list_filter(skip_if_invalid_groups, run_api, user_list): + """""" + Fetching the List of User by filtering + """""" + groups = skip_if_invalid_groups + group_filter = {""group_id"": choice(groups), ""page_size"": 10} + exclude_group_filter = {""exclude_group_id"": choice(groups), ""page_size"": 10} + is_manager_filter = {""is_manager"": choice([True, False]), ""page_size"": 10} + r = user_list + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + # expected result of users with exclude group filter + exclude_group_ids = [] + for i in r.json()[""results""]: + group_ids = [] + for j in i.get(""groups""): + group_ids.append(j.get(""id"")) + if exclude_group_filter.get(""exclude_group_id"") in group_ids and len(group_ids) - 1 >= 1: + exclude_group_ids.append(i.get(""id"")) + # expected result of users with is_manager filter + manager_check = [] + for i in r.json()[""results""]: + is_manager = [] + for j in i.get(""groups""): + is_manager.append(j.get(""is_manager"")) + if is_manager_filter.get(""is_manager"") is True and is_manager_filter.get(""is_manager"") in is_manager: + manager_check.append(True) + elif is_manager_filter.get(""is_manager"") is False and True not in is_manager: + manager_check.append(False) + exp_res = { + 0: [group_filter.get(""group_id"") for i in r.json()[""results""] for j in i.get(""groups"") if j.get(""id"") == group_filter.get(""group_id"")], + 1: exclude_group_ids, + 2: manager_check + } + filters = [group_filter, exclude_group_filter, is_manager_filter] + for filter in range(len(filters)): + params = filters[filter] + r = run_api.user_list(params) + # check for valid response data with the filter parameters + if r.json()[""count""] != len(exp_res[filter]): + logging.error(f""error in filter: {filters[filter]}, the list of expected result for the filter is: {exp_res[filter]}, and the actual result is {r.json()}"") + assert False + test_assert.status(r, 200) +" +/user/rest/list/,"fetching the list of users setting the is_manager param set to True. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 200/ 403, +} ","def test_user_list_is_manager_is_true(run_api): + """""" + fetch user list when is_manager is true + """""" + params = { + 'is_manager': True + } + r = run_api.user_list(params) + rjson = r.json() + if run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 200) + for users in rjson['results']: + is_manager = [group[""is_manager""] for group in users['groups']] + assert True in is_manager, ""The error is %s"" % rjson + elif run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson +" +/user/rest/list/,"fetching the list of users setting the is_manager param set to False. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 200/ 403, +} ","def test_user_list_is_manager_is_false(run_api): + """""" + fetch user list when is_manager is false + """""" + params = { + 'is_manager': False + } + r = run_api.user_list(params) + rjson = r.json() + if run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 200) + for users in rjson['results']: + is_manager = [group[""is_manager""] for group in users['groups']] + assert False in is_manager, ""The error is %s"" % rjson + elif run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson +" +/user/rest/logout,requesting to logout user without authorization,,"{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_user_logout_without_authorization(anonymous_exec_api): + """""" + Logout the user + """""" + r = anonymous_exec_api.user_logout() + result = r.json() + test_assert.status(r, 401) + assert result['detail'] == ""Authentication credentials were not provided."" +" +/user/rest/logout,requesting to logout user using invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_user_logout_with_invalid_token(invalid_exec_api): + """""" + Logout the user + """""" + res = invalid_exec_api.user_logout() + result = res.json() + test_assert.status(res, 401) + assert result['detail'] == ""Invalid token."" +" +/user/rest/logout,requesting to logout user,,"{ +""status"" : 200, +""response"" : user logged out successfully +}","def test_user_logout(user_logout): + """""" + Logout the user + """""" + r = user_logout + test_assert.status(r, 200) +" +/user/rest/self/,fetching the data of logged in user without authorization,,"{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_user_self_without_authorization(anonymous_exec_api): + """""" + Fetching the data of logged in user without authorization + """""" + r = anonymous_exec_api.user_self() + result = r.json() + test_assert.status(r, 401) + assert result['detail'] == ""Authentication credentials were not provided."" +" +/user/rest/self/,fetching the data of logged in user using invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_user_self_with_invalid_token(invalid_exec_api): + """""" + Fetching the data of logged in user with invalid token + """""" + r = invalid_exec_api.user_self() + result = r.json() + test_assert.status(r, 401) + assert result['detail'] == ""Invalid token."" +" +/user/rest/self/,fetching the data of logged in user,,200: should return object of user currently logged in,"def test_user_self(user_self): + """""" + Fetching the data of logged in user + """""" + r = user_self + test_assert.status(r, 200) +" +audit/rest/list,getting the audit list without authorization,,"{""status"":401, +""message"":""Authentication credentials were not provided."" +}","def test_audit_list_without_authorization(anonymous_exec_api): + """""" + Audit list without authorization + """""" + r = anonymous_exec_api.audit_list() + result = r.json() + test_assert.status(r, 401) + assert result['detail'] == ""Authentication credentials were not provided.""" +ideploy/rest/add-tags,successful deployment operation when equal number of deployed islands and tags provided ,"{""island_list"": [""UUID1"", ""UUID2""], ""tags_list"": [[""tag1""], [""tag2""]]}","{""status"": 201, ""message"": ""Created""}","def test_ideploy_add_tags(ideploy_deploy, run_api): + """""" + ideploy add tags + """""" + p, r = ideploy_deploy + uuid = r.json()['deploy_uuid'] + tag_name = ""test_tag"" + params = { + ""island_list"": [ + uuid + ], + ""tags_list"": [ + [ + tag_name + ] + ] + } + r = run_api.ideploy_add_tag(params) + test_assert.status(r, 201) + island_detail = run_api.ideploy_details(uuid).json() + all_tags = [tag['value'] for tag in island_detail['tags']] + assert tag_name in all_tags, ""|> Json %s"" % island_detail +" +ideploy/rest/add-tags,"providing non-empty island_list and empty tags_list, expecting an error for not enough tags.","{""island_list"": [""UUID1""], ""tags_list"": []}","{""status"": 400, ""message"": ""Not enough tags provided.""}","def test_ideploy_add_tags_empty_island_list(run_api): + """""" + invalid id + """""" + params = {""island_list"": [""UUID1""], + ""tags_list"": [] + } + r = run_api.ideploy_add_tag(params) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""{'island_list': [ErrorDetail(string='This list may not be empty.', code='empty')]}"", ""|> Json %s"" % rjson +" +ideploy/rest/add-tags,"providing more number of tags than islands, expecting an error of not enough islands to add tags","{""island_list"": [""UUID1""], ""tags_list"": [[""tag1""], [""tag2""]]}","{""status"": 400, ""message"": ""Not enough islands to add tags to.""}","def test_ideploy_add_tags_more_tag_count(run_api): + """""" + ideploy add tags more than island comut + """""" + tag_name = ""test_tag"" + params = { + ""island_list"": [ + ""invalid"" + ], + ""tags_list"": [ + [ + tag_name + ], + [ + tag_name + ] + ] + } + r = run_api.ideploy_add_tag(params) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""Not enough islands to add tags to."", ""|> Json %s"" % rjson +" +ideploy/rest/add-tags,providing more number of islands than tags and eventually expecting an error of not enough tags provided,"{""island_list"": [""UUID1"", ""UUID2"", ""UUID3""], ""tags_list"": [[""tag1""], [""tag2""]]}","{""status"": 400, ""message"": ""Not enough tags provided.""}","def test_ideploy_add_tags_more_tag_count(run_api): + """""" + ideploy add tags more than island comut + """""" + tag_name = ""test_tag"" + params = { + ""island_list"": [ + ""invalid"" + ], + ""tags_list"": [ + [ + tag_name + ], + [ + tag_name + ] + ] + } + r = run_api.ideploy_add_tag(params) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""Not enough islands to add tags to."", ""|> Json %s"" % rjson + +" +ideploy/rest/add-tags,"providing invalid UUID in island_list, expecting an error for invalid island UUID.","{""island_list"": [""invalid_UUID""], ""tags_list"": [[""tag1""]]}","{""status"": 400, ""message"": ""Invalid island UUID.""}"," +def test_ideploy_add_tags_invalid_island_id(run_api): + """""" + invalid id + """""" + params = { + ""island_list"": [ + ""inUUID"" + ], + ""tags_list"": [ + [ + ""tag_name"" + ] + ] + } + r = run_api.ideploy_add_tag(params) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""DeployedIsland matching query does not exist."", ""|> Json %s"" % rjson +" +ideploy/rest/add-tags,"providing empty island_list and non-empty tags_list, expecting an error for not enough islands.","{""island_list"": [], ""tags_list"": [[""tag1""]]}","{""status"": 400, ""message"": ""Not enough islands to add tags to.""}","def test_ideploy_add_tags_empty_island_list(run_api): + """""" + invalid id + """""" + params = { + ""island_list"": [ + ], + ""tags_list"": [ + [ + ""tag_name"" + ] + ] + } + r = run_api.ideploy_add_tag(params) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""{'island_list': [ErrorDetail(string='This list may not be empty.', code='empty')]}"", ""|> Json %s"" % rjson +" +ideploy/rest/add-tags,"empty input data, expecting an error for missing required fields. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{""status"": 400, ""message"": ""Input data is missing required 'island_list' and 'tags_list' keys.""}","def test_ideploy_change_ownership_with_missing_fields(run_api): + """""" + change ownership with missing 'owner' field + """""" + params = { + ""deployment_uuids"": [ + ""invalid"" + ], + ""dest_user"": ""manager"" + } + r = run_api.ideploy_change_ownership(params) + if run_api.user_type == USER_TYPE['non_admin']: + test_assert.status(r, 403) + rjson = r.json() + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + else: + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""owner or dest_user cannot be null or empty"", ""|> Json %s"" % rjson +" +ideploy/rest/change_ownership,Successful change of ownership from one user to another where both users exist and the requester has the necessary permissions,"{ + ""deployment_uuids"": [ + deploy_id + ], + ""owner"": ""vivekt"", + ""dest_user"": ""manager"" + }","{""status"": 200, ""message"": ""Operation performed successfully without any error""}"," +@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_ideploy_change_ownership(skip_if_non_admin, custom_ilib_non_admin_operations, run_api): + """""" + Successful change of ownership from one user to another + """""" + deploy_id = custom_ilib_non_admin_operations + params = { + ""deployment_uuids"": [ + deploy_id + ], + ""owner"": ""vivekt"", + ""dest_user"": ""manager"" + } + r = run_api.ideploy_change_ownership(params) + test_assert.status(r, 200) + island_detail = run_api.ideploy_details(deploy_id).json() + assert island_detail['island']['owner'] == 'manager', ""|> Json %s"" % island_detail + ilib_id = island_detail['island']['deploy_for']['uuid'] + run_api.ideploy_delete(deploy_id) + run_api.ilibrary_delete(ilib_id) + +" +ideploy/rest/change_ownership,"Partial success in changing ownership where some UUIDs fail. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""deployment_uuids"": [ + ""invalid"" + ], + ""owner"": ""vivekt"", + ""dest_user"": ""manager"" + }","{""status"": 207, ""message"": ""These objects failed to change their ownership: [\""invalid_UUID\""]""}","def test_ideploy_change_ownership_invalid_id(skip_if_non_admin, run_api): + """""" + Partial success in changing ownership where some UUIDs fail. + """""" + params = { + ""deployment_uuids"": [ + ""invalid"" + ], + ""owner"": ""vivekt"", + ""dest_user"": ""manager"" + } + r = run_api.ideploy_change_ownership(params) + if run_api.user_type == USER_TYPE['non_admin']: + test_assert.status(r, 403) + rjson = r.json() + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + else: + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""The count of provided UUIDs doesn't match with the count of existing Deployments. Make sure that the provided UUIDs are valid, the deployment(s) is/are not a part of any Island, they belong to the 'vivekt' user and are for one category, either DeployedMachine or DeployedIsland"", ""|> Json %s"" % rjson + +" +ideploy/rest/change_ownership,chaning ownership of an invalid deployed island from non-admin by an admin user ,"{ + ""deployment_uuids"": [""invalid""], + ""owner"", + ""dest_user"", +}","{ +""status"" : 400, +""message"":""Make sure that the provided UUIDs are valid"" +}","def test_ideploy_change_ownership_invalid_uuid(skip_if_not_admin, non_admin_exec_api, run_api): + """""" + To change ownership of invalid deployed island from non-admin user to admin user by admin + """""" + params = { + ""deployment_uuids"": [""invalid""], + ""owner"": non_admin_exec_api.user, + ""dest_user"": run_api.user + } + res = run_api.ideploy_change_ownership(params) + rjson = res.json() + test_assert.status(res, 400) + assert ""Make sure that the provided UUIDs are valid"" in rjson[""error""], rjson +" +ideploy/rest/change_ownership,chaning ownership from admin to non-admin of an deployed island machine by an admin user ,"{ + ""deployment_uuids"" + ""owner"" + ""dest_user"" +}","{ +""status"" : 400, +""message"":""The provided UUIDs might belong to the DeployedMachine. Trigger the correct API"" +}","def test_ideploy_change_ownership_with_deployed_machine_uuid(skip_if_not_admin, deploy_image, non_admin_exec_api, run_api): + """""" + To change ownership of deployed machine from admin user to non-admin user by admin + """""" + template, r = deploy_image + deploy_id = r.json()[""uuid""] + params = { + ""deployment_uuids"": [deploy_id], + ""owner"": run_api.user, + ""dest_user"": non_admin_exec_api.user + } + res = run_api.ideploy_change_ownership(params) + rjson = res.json() + test_assert.status(res, 400) + assert f""The provided UUIDs ['{deploy_id}'] might belong to the DeployedMachine. Trigger the correct API"" in rjson[""error""], rjson +" +ideploy/rest/change_ownership,"Changing ownership with invalid deployment UUIDs format. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""deployment_uuids"": + {}, + ""owner"": ""vivekt"", + ""dest_user"": ""manager"" + }","{""status"": 400, ""message"": ""Error message explaining invalid input format for UUIDs""}","def test_ideploy_change_ownership_with_invalid_data_type(run_api): + """""" + invalid input format for changing ownership + """""" + params = { + ""deployment_uuids"": + {}, + ""owner"": ""vivekt"", + ""dest_user"": ""manager"" + } + r = run_api.ideploy_change_ownership(params) + if run_api.user_type == USER_TYPE['non_admin']: + test_assert.status(r, 403) + rjson = r.json() + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + else: + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""deployment_uuids cannot be null or empty"", ""|> Json %s"" % rjson +" +ideploy/rest/change_ownership,"Attempting to change ownership with an empty list of UUIDs. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""deployment_uuids"": [], + ""owner"": ""vivekt"", + ""dest_user"": ""manager"" +}","{""status"": 400, ""message"": ""please provide list of uuids""}","def test_ideploy_change_ownership_empty_list_uuid(run_api): + """""" + change ownership with an empty list of UUIDs. + """""" + params = { + ""deployment_uuids"": [], + ""owner"": ""vivekt"", + ""dest_user"": ""manager"" + } + r = run_api.ideploy_change_ownership(params) + if run_api.user_type == USER_TYPE['non_admin']: + test_assert.status(r, 403) + rjson = r.json() + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + else: + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == 'please provide list of uuids', ""|> %s"" % rjson" +ideploy/rest/change_ownership,Attempting to change ownership where the owner does not exist.,"{""deployment_UUIDs"": [""UUID1"", ""UUID2""], ""owner"": ""nonexistentowner"", ""dest_user"": ""newowner""}","{""status"": 400, ""message"": ""Either User 'nonexistentowner' or 'newowner' does not exist...""}","def test_ideploy_change_ownership_for_not_existing_owner(skip_if_not_admin, run_api, custom_lib_non_admin_operations): + """""" + To change ownership of deployed machine if one of the user do not exit + """""" + params = { + ""deployment_uuids"": ['invalid'], + ""owner"":""non-exiting-user"", + ""dest_user"": ""manager"" + } + r = run_api.ideploy_change_ownership(params) + if run_api.user_type == USER_TYPE['non_admin']: + test_assert.status(r, 403) + rjson = r.json() + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + else: + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""Owner does not exist..."", ""|> Json %s"" % rjson +" +ideploy/rest/change_ownership,"attempting to change ownership where the destination user does not exist check. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""deployment_uuids"": ['invalid'], + ""owner"": ""colama"", + ""dest_user"": ""non-exiting-user"" + }","{""status"": 400, ""message"": ""Either User 'currentowner' or 'nonexistentuser' does not exist...""}","def test_ideploy_change_ownership_user_does_not_exits(run_api): + """""" + One of the user does not exits + """""" + params = { + ""deployment_uuids"": ['invalid'], + ""owner"": ""colama"", + ""dest_user"": ""non-exiting-user"" + } + r = run_api.ideploy_change_ownership(params) + if run_api.user_type == USER_TYPE['non_admin']: + test_assert.status(r, 403) + rjson = r.json() + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + else: + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""Either User 'colama' or 'non-exiting-user' does not exist..."", ""|> Json %s"" % rjson +" +ideploy/rest/change_ownership,"Attempting to change ownership when the owner and destination user are the same. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""deployment_UUIDs"": [ + ""string"" + ], + ""owner"": ""colama"", + ""dest_user"": ""colama"" + }","{""status"": 400, ""message"": ""The dest_user and the owner should be different""}","def test_ideploy_change_ownership_same_owner_and_dest_owner(run_api): + """""" + ideploy change ownership + """""" + params = { + ""deployment_UUIDs"": [ + ""string"" + ], + ""owner"": ""colama"", + ""dest_user"": ""colama"" + } + r = run_api.ideploy_change_ownership(params) + if run_api.user_type == USER_TYPE['non_admin']: + test_assert.status(r, 403) + rjson = r.json() + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + else: + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""The dest_user and the owner should be different"", ""|> Json %s"" % rjson" +ideploy/rest/deploy_filter_fields/,successful filtering of the fields of deployed island machine ,,"{ + ""status"":200, + ""response"":list of filters +}","def test_ideploy_deploy_filter_fields(run_api): + """""" + ideploy deploy filter fields + """""" + r = run_api.ideploy_filter_fields() + test_assert.status(r, 200) +" +license/rest/licenses_check,checking the license when day params is negative,,"{ + ""result"": ""FAILURE"", + ""message"": ""Value of `days` cannot be negative"" +}","def test_license_check_when_day_is_negative(run_api): + """""" + license check when day is negative + """""" + r = run_api.license_check(days=-1) + rjson = r.json() + test_assert.status(r, 400) + assert rjson['error'] == ""Value of `days` cannot be negative"", ""The error %s"" % rjson +" +license/rest/licenses_check,checking the license when day params is zero,,"{ +""statuas"": 200, +""response"" : licence status +}","def test_license_check_when_day_is_zero(run_api): + """""" + license check when day is 0 + """""" + r = run_api.license_check() + rjson = r.json() + test_assert.status(r, 200) + assert rjson['warn'] is False, ""The error %s"" % rjson + assert rjson['msg'] == ""All good!"", ""The error %s"" % rjson +" +license/rest/licenses_check,checking license when day params is equal to duration of license,,"{ +""statuas"": 200, +""response"" : licence status +}","def test_license_check_when_day_is_equal_to_duration(admin_exec_api, run_api): + """""" + license check day is equal to duration + """""" + res = admin_exec_api.license_list() + license_list = res.json() + active_license_list = [licenses for licenses in license_list['results'] if licenses['state'] == 'active'] + durations = [json.loads(lic['data'])[""duration""] for lic in active_license_list] + duration = max(durations) + total_duration = duration + math.ceil(5 * duration / 100) + r = run_api.license_check(days=total_duration) + rjson = r.json() + test_assert.status(r, 200) + assert rjson['msg'] == ""Some License(s) are expiring soon"", ""The error %s"" % rjson +" +/deploy/rest/change_ownership/,changing ownership of deployed machine by admin user from non-admin to manager user ,"{ + ""deployment_uuids"": [uuid], + ""owner"", + ""dest_user"": ""manager"" + }","{ +""status"" :200 +}","def test_deploy_change_ownership(skip_if_non_admin, non_admin_exec_api, run_api): + """""" + To change ownership of deployed machine from non-admin user to manager by admin + """""" + params, r = non_admin_exec_api.library_add_new_vm() + lib_id = r.json()[""uuid""] + r = non_admin_exec_api.deploy_image(lib_id=lib_id, deploy_on=list(run_api.clm_my_servers.keys())) + uuid = r.json()['uuid'] + params = { + ""deployment_uuids"": [uuid], + ""owner"": non_admin_exec_api.user, + ""dest_user"": ""manager"" + } + res = run_api.deploy_change_ownership(params=params) + test_assert.status(res, 200) + new_owner = run_api.deploy_details(deploy_id=uuid).json()['owner'] + assert new_owner == ""manager"" + run_api.deploy_image_delete(deploy_id=uuid) + run_api.library_delete(uuid=lib_id) +" +/deploy/rest/change_ownership/,changing ownership of deployed machine when the owner and destination owner are same,"{ + ""deployment_uuids"": [uuid], + ""owner"": prev_owner, + ""dest_user"": prev_owner + }","{ + ""status"" : 400, + ""message"" : 'The dest_user and the owner should be different' +}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_change_ownership_for_same_users(skip_if_not_admin, run_api, custom_lib_non_admin_operations): + """""" + To change ownership of deployed machine if The dest_user and the owner should be same + """""" + deploy_id = custom_lib_non_admin_operations + uuid = deploy_id + prev_owner = run_api.deploy_details(deploy_id=uuid).json()['owner'] + params = { + ""deployment_uuids"": [uuid], + ""owner"": prev_owner, + ""dest_user"": prev_owner + } + res = run_api.deploy_change_ownership(params=params) + test_assert.status(res, 400) + assert res.json()['error'] == 'The dest_user and the owner should be different'" +/deploy/rest/change_ownership/,changing ownership of deployed machine by manager user from non-admin to manager user ,"{ + ""deployment_uuids"": [uuid], + ""owner"": owner, + ""dest_user"": dest_user + }","{ + ""status"" : 400, + ""message"" : "" 'manager' as a Manager user, does not have right over "" +}","@pytest.mark.parametrize(""manager_exec_api"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_change_ownership_without_rights(skip_if_not_non_admin, run_api, manager_exec_api, library_add_new_vm): + """""" + To change ownership of deployed machine from non-admin user to admin by manager + """""" + params, r = library_add_new_vm + lib_id = r[""uuid""] + r = run_api.deploy_image(lib_id=lib_id) + uuid = r.json()['uuid'] + owner = run_api.user + dest_user = ""colama"" + params = { + ""deployment_uuids"": [uuid], + ""owner"": owner, + ""dest_user"": dest_user + } + res = manager_exec_api.deploy_change_ownership(params=params) + test_assert.status(res, 400) + assert res.json()['error'] == f""'manager' as a Manager user, does not have right over '{owner}' or '{dest_user}'"" + manager_exec_api.deploy_image_delete(deploy_id=uuid) + manager_exec_api.library_delete(uuid=lib_id) +" +/deploy/rest/change_ownership/,changing ownership of deployed machine when one of the used does not exist,"{ + ""deployment_uuids"": [uuid], + ""owner"": prev_owner, + ""dest_user"": invalid_user + }","{ + ""status"" : 400, + ""message"" : ""Either User '{prev_owner}' or '{invalid_user}' does not exist..."" +}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_change_ownership_for_not_existing_user(skip_if_not_admin, run_api, custom_lib_non_admin_operations): + """""" + To change ownership of deployed machine if one of the user do not exist + """""" + deploy_id = custom_lib_non_admin_operations + uuid = deploy_id + invalid_user = rand_string() + prev_owner = run_api.deploy_details(deploy_id=uuid).json()['owner'] + params = { + ""deployment_uuids"": [uuid], + ""owner"": prev_owner, + ""dest_user"": invalid_user + } + res = run_api.deploy_change_ownership(params=params) + test_assert.status(res, 400) + assert res.json()['error'] == f""Either User '{prev_owner}' or '{invalid_user}' does not exist..."" + + +" +/deploy/rest/change_ownership/,changing ownership of deployed machine by admin from admin user to non-admin,"{ + ""deployment_uuids"": [deploy_id], + ""owner"" + ""dest_user"" + }","{ + ""status"" : 400, + ""message"" : ""The provided UUIDs might belong to the DeployedIsland. Trigger the correct API"" +}","def test_deploy_change_ownership_with_island_uuid(skip_if_not_admin, ideploy_deploy, non_admin_exec_api, run_api): + """""" + To change ownership of deployed island from admin user to non-admin user by admin + """""" + template, r = ideploy_deploy + deploy_id = r.json()[""deploy_uuid""] + params = { + ""deployment_uuids"": [deploy_id], + ""owner"": run_api.user, + ""dest_user"": non_admin_exec_api.user + } + res = run_api.deploy_change_ownership(params=params) + rjson = res.json() + test_assert.status(res, 400) + assert f""The provided UUIDs ['{deploy_id}'] might belong to the DeployedIsland. Trigger the correct API"" in rjson[""error""], rjson +" +/deploy/rest/configure_autostart/,"setting a virtual machine to auto-start when the host machine reboots. Check the user type before performing the operation, only admin user type have the permission to perform such operations.",,"{ + ""status"" : 200 +}","def test_deploy_configure_autostart_vm_self(run_api, configure_autostart): + """""" + Set to Auto-start a VM when the host machine reboots + """""" + x, r = configure_autostart + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + else: + test_assert.status(r, 200) +" +/deploy/rest/configure_autostart/,setting a virtual machine to auto-start when the host machine reboots by an admin user ,,"{ + ""status"" : 200 +}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_configure_autostart_vm_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Set to Auto-start a VM when the host machine reboots by Admin + """""" + # Admin check for Auto-start a VM created by different user + deploy_id = custom_lib_non_admin_operations + r = run_api.configure_autostart(deploy_id) + test_assert.status(r, 200) +" +/deploy/rest/configure_autostart/,setting a virtual machine to auto-start when the host machine reboots by a non-admin user ,,"{ + ""status"" : 403 +}","@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_configure_autostart_vm_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): + """""" + Set to Auto-start a VM when the host machine reboots by non-admin + """""" + # Non-admin check for Auto-start a VM created by different user + deploy_id = custom_lib_admin_operations + r = run_api.configure_autostart(deploy_id) + test_assert.status(r, 403) +" +/deploy/rest/configure_autostart/,setting a virtual machine to auto-start when the host machine reboots by a manager when manager has rights over the server,,,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_configure_autostart_vm_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Set to Auto-start a VM when the host machine reboots + """""" + # When the user is not part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_admin_operations + r = run_api.configure_autostart(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.configure_autostart(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) + +" +/deploy/rest/configure_autostart/,setting a virtual machine to auto-start when the host machine reboots by a manager when manager does not have rights over the server,,,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_configure_autostart_vm_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Set to Auto-start a VM when the host machine reboots + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.configure_autostart(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.configure_autostart(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + + +" +/deploy/rest/configure_autostart/,setting a virtual machine to auto-start when the host machine reboots by an admin user using invalid uuid,,"{ + ""status"" : 400, + ""message"" : 'Autostart is not allowed on machines which are part of island' +}","def test_deploy_configure_autostart_vm_in_island(skip_if_not_admin, run_api, ideploy_deploy): + """""" + Snapshot of the machine that is part of the island + """""" + params, r = ideploy_deploy + deploy_id = r.json()[""deploy_uuid""] + machine_id = run_api.ideploy_details(uuid=deploy_id).json()[""machines""][0][""uuid""] + res = run_api.configure_autostart(uuid=machine_id) + test_assert.status(res, 400) + assert res.json()[""error""] == 'Autostart is not allowed on machines which are part of island' +" +/deploy/rest/crash/,crashing a deployed machine successfully,,"{ + ""status"" : 201 +}","def test_deploy_crash(deploy_crash): + """""" + Crashing a Deployed Image + """""" + x, r = deploy_crash + test_assert.status(r, 201) +" +/deploy/rest/crash/,crashing a deployed machine successfully by an admin,,"{ + ""status"" : 201 +}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_crash_admin(skip_if_not_admin, run_api, custom_lib_non_admin_operations): + """""" + Crashing a Deployed Image by Admin + """""" + # Admin check of Crashing a Deployed Image created by different user + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_crash(deploy_id) + test_assert.status(r, 201) +" +/deploy/rest/crash/,crashing a deployed machine successfully by a non-admin,,"{ + ""status"" : 403 +}","@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_crash_non_admin(skip_if_not_non_admin, run_api, custom_lib_admin_operations): + """""" + Crashing a Deployed Image by non-admin + """""" + # Non-admin check of Crashing a Deployed Image created by different user + deploy_id = custom_lib_admin_operations + r = run_api.deploy_crash(deploy_id) + test_assert.status(r, 403) +" +/deploy/rest/crash/,"crashing a deployed machine successfully by a manager, when manager has rights on server.",,,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_crash_manager_server_right(skip_if_not_manager, run_api, custom_lib_admin_operations, custom_lib_non_admin_operations): + """""" + Crashing a Deployed Image by manager when have right on server + """""" + # When the user is not part of the group that the manager manages + deploy_id = custom_lib_admin_operations + r = run_api.deploy_crash(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_crash(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) +" +/deploy/rest/crash/,"crashing a deployed machine successfully by a manager, when manager does not have rights on server.",,,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_crash_manager_no_server_right(skip_if_not_manager, run_api, custom_lib_admin_operations, custom_lib_non_admin_operations): + """""" + Crashing a Deployed Image by manager when have no right on server + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_crash(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_crash(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + + +" +/deploy/rest/crash/,crashing a deployed machine using uuid for which machine does not exists,"{ +deploy_id = ""invalid"" +}","{ + ""status"" : 404, + ""message"" : ""Machine matching query does not exist"" +}","def test_deploy_crash_invalid_uuid(run_api): + """""" + crashing deployed machine using invalid uuid + """""" + deploy_id = ""invalid"" + r = run_api.deploy_crash(deploy_id, wait=False) + test_assert.status(r, 404) + rjson = r.json() + assert ""Machine matching query does not exist"" in rjson[""error""], rjson +" +/deploy/rest/crash/,crashing a deployed machine without authorization,"{ +deploy_id = ""invalid"" +}","{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_deploy_crash_without_authorization(anonymous_exec_api): + """""" + crashing deployed machine without authorization + """""" + deploy_id = ""invalid"" + depl_crash = anonymous_exec_api.deploy_crash(deploy_id, wait=False) + depl_json = depl_crash.json() + test_assert.status(depl_crash, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" + +" +/deploy/rest/crash/,crashing a deployed machine using invalid token,"{ +deploy_id = ""invalid"" +}","{ + ""status"" : 401, + ""message"" : ""Invalid token."" +}","def test_deploy_crash_invalid_token(invalid_exec_api): + """""" + crashing deployed machine using invalid token + """""" + deploy_id = ""invalid"" + depl_crash = invalid_exec_api.deploy_crash(deploy_id, wait=False) + depl_json = depl_crash.json() + test_assert.status(depl_crash, 401) + assert depl_json[""detail""] == ""Invalid token."" +" +/deploy/rest/delete/,deleteing virtual machine of a deployed machine,,"{ +""sttaus"" : 201 +}","def test_deploy_delete(deploy_delete): + """""" + Deleting the VM + """""" + x, r = deploy_delete + test_assert.status(r, 201) +" +/deploy/rest/delete/,deleteing virtual machine of a deployed machine by an admin user,,"{ +""sttaus"" : 201 +}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_delete_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Deleting the VM by Admin + """""" + # Admin check for Deleting the Deployed VM created by different user. + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_image_delete(deploy_id, {}) + test_assert.status(r, 201) +" +/deploy/rest/delete/,deleteing virtual machine of a deployed machine by a non-admin user,,"{ +""sttaus"" : 403 +}","@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_delete_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): + """""" + Deleting the VM by non-Admin + """""" + # Non-admin check for Deleting the Deployed VM created by different user. + deploy_id = custom_lib_admin_operations + r = run_api.deploy_image_delete(deploy_id, {}) + test_assert.status(r, 403) +" +/deploy/rest/delete/,"deleteing virtual machine of a deployed machine by a manager, when manager has right over the server",,,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_delete_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Deleting the VM by Manager + """""" + # When the user is not part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_image_delete(deploy_id, {}) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_image_delete(deploy_id, {}) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) + +" +/deploy/rest/delete/,"deleteing virtual machine of a deployed machine by a manager, when manager does not have right over the server",,,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_delete_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Deleting the VM by Manager + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_image_delete(deploy_id, {}) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_image_delete(deploy_id, {}) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + + +" +/deploy/rest/delete/,deleting image of deployed machine without authorization,"{ +deply_id : ""invalid"" +}","{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_deploy_delete_without_authorization(anonymous_exec_api): + """""" + deleting image of deployed machine without authorization + """""" + deploy_id = ""invalid"" + depl_delete = anonymous_exec_api.deploy_image_delete(deploy_id, {}, wait=False) + depl_json = depl_delete.json() + test_assert.status(depl_delete, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" + + +" +/deploy/rest/delete/,deleting image of deployed machine using invalid token,"{ +deply_id : ""invalid"" +}","{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_deploy_delete_invalid_token(invalid_exec_api): + """""" + deleting image of deployed machine using invalid token + """""" + deploy_id = ""invalid"" + depl_delete = invalid_exec_api.deploy_image_delete(deploy_id, {}, wait=False) + depl_json = depl_delete.json() + test_assert.status(depl_delete, 401) + assert depl_json[""detail""] == ""Invalid token."" + + +" +/deploy/rest/delete/,deleting image of deployed machine which is part of the island,,"{ + ""status"" : 400, + ""message"" : ""Cannot perform delete operation on machine which is part of an Island"" +}","def test_deploy_delete_machine_part_of_island(run_api, ideploy_deploy): + """""" + Deletion of the machine that is part of the island + """""" + params, r = ideploy_deploy + deploy_id = r.json()[""deploy_uuid""] + machine_id = run_api.ideploy_details(uuid=deploy_id).json()[""machines""][0][""uuid""] + res = run_api.deploy_image_delete(deploy_id=machine_id) + test_assert.status(res, 400) + assert res.json()[""error""] == ""Cannot perform delete operation on machine which is part of an Island"" + +" +/deploy/rest/delete/,deleting image of deployed machine which is protected,,"{ + ""status"" : 400, + ""message"" : ""Cannot perform delete operation on machine because it has been protected"" +}","def test_deploy_delete_protected_machine(run_api, deploy_protect): + """""" + Deletion of the machine that is protected + """""" + r = deploy_protect + deploy_id = r.json()[""uuid""] + res = run_api.deploy_image_delete(deploy_id) + test_assert.status(res, 400) + assert res.json()[""error""] == ""Cannot perform delete operation on machine because it has been protected"" +" +/deploy/rest/deploylist,getting list of image of deployed machine,,"{ +""status"":200 +}","def test_deploy_deploylist(deploy_deploylist): + """""" + Getting deploy details of VM + """""" + r = deploy_deploylist + test_assert.status(r, 200) +" +/deploy/rest/deploylist,getting filtered list of image of deployed machine,,"{ + ""response"" : success +}","@pytest.mark.parametrize(""lib_filter_kwargs"", [{""vm_names"": [f""{prefix_name}{rand_string()}"" for _ in range(library_count)]}], indirect=True) +def test_deploy_deploylist_filter(run_api: apiops, lib_filter_kwargs): + """""" + Fetching the list of deployed images by adding filters + """""" + depl_res = [] + templates, res = lib_filter_kwargs + for r in res: + rjson = r.json() + depl_r = run_api.deploy_image(rjson[""uuid""]) + depl_res.append(depl_r) + try: + filter_on_input_result(run_api, library_count, templates, depl_res, prefix_name, run_api.deploy_deploylist) + finally: + depl_uuids = [depl.json()[""uuid""] for depl in depl_res] + run_api.deploy_bulkops({""machine_list"": depl_uuids, ""op"": ""delete""}) +" +/deploy/rest/deploylist,getting list of image of deployed machine using tag_list,,"{ + ""response"" : success +}","def test_deploylist_machine_tag_detail(run_api, deploy_image): + """""" + Get machine based on tag filter + """""" + params, r = deploy_image + machine_id = r.json()[""uuid""] + tag = rand_string() + params = {""machine_list"": [machine_id], ""tags_list"": [[tag]]} + run_api.deploy_add_tags(params=params) + res = run_api.deploy_deploylist(params={""tags"": tag}) + rjson = res.json() + # assert rjson[""count""] == 1, ""The error is %s"" % res.json() + for machine in rjson['results']: + machine_details = run_api.deploy_details(machine['uuid']).json() + all_tags = [tags['value'] for tags in machine_details['tags']] + assert tag in all_tags, ""|> Json %s"" % all_tags +" +/deploy/rest/deploylist,getting list of image of deployed machine using server parameter,,"{ + ""response"" : success +}","def test_deploy_list_with_deploylist_machine_server_filter(run_api, deploy_image): + """""" + fetch list with server + """""" + p, r = deploy_image + server = r.json()['server'] + params = {""server"": server} + rjson = run_api.deploy_deploylist(params).json() + for machines in rjson['results']: + assert machines['server'] == server, ""Json |> %s"" % machines" +/deploy/rest/deploylist,getting list of image of deployed machine using session_id parameter,,"{ + ""response"" : success +}","def test_deploy_list_with_session_id_filter(deploy_image, run_api): + """""" + Fetch list with session_id + """""" + params, r = deploy_image + rjson = r.json() + machine_id = rjson['uuid'] + session_id = rjson[""machine""][""tags""][0][""value""] + params = {""_sessionid"": session_id, ""uuid"": machine_id} + assert run_api.deploy_deploylist(params).json()[""count""] == 1 +" +/deploy/rest/deploylist,getting list of image of deployed machine using auto-start parameter,,"{ + ""response"" : success +}","def test_deploy_list_with_autostart_filter(deploy_image, run_api): + """""" + Fetch list with autostart filter + """""" + params, r = deploy_image + rjson = r.json() + autostart = rjson[""AutoStart""] + params = {""autostart"": autostart} + rjson = run_api.deploy_deploylist(params).json() + for machines in rjson['results']: + assert machines['AutoStart'] == autostart, ""Json |> %s"" % machines +" +/deploy/rest/deploylist,getting list of image of deployed machine using state parameter,,"{ + ""response"" : success +}","def test_deploy_list_with_machine_state_filter(run_api, deploy_image): + """""" + fetch list with deploy machine state filter + """""" + p, r = deploy_image + state = r.json()['state'] + params = {""state"": state} + rjson = run_api.deploy_deploylist(params).json() + for machines in rjson['results']: + assert machines['state'] == state, ""Json |> %s"" % machines +" +/deploy/rest/deploylist,getting list of image of deployed machine using invalid value of group_name parameter,"{ +group_name: ""invalid"" +}","{ +""response"" :Select a valid choice, the one you provided is not one of the available choices.' +}","def test_deploy_list_filter_with_invalid_group(run_api): + """""" + Testing invalid group name filtering + """""" + group_name = ""invalid"" + params = {'group': group_name} + response = run_api.deploy_deploylist(params) + x = response.json() + test_assert.status(response, 400) + assert x[""group""] == [f'Select a valid choice. {group_name} is not one of the available choices.'] +" +/deploy/rest/deploylist,getting count of all images in a deployed machine ,,"{ + ""status"" : 200 +}","def check_count_deploylist(run_api, deploy_id, params={}): + """""" + getting count of all images in a deployed machine + """""" + r = run_api.deploy_deploylist(params) + res = r.json() + test_assert.status(r, 200) + count = 0 + for dict in res['results']: + if dict[""uuid""] == deploy_id: + count += 1 + break + return count +" +/deploy/rest/deploylist,"getting list of image of deployed machine by setting scope to ""all"" by a non-admin user. ","{ +scope : ""all"" +}",,"@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_deploylist_non_admin_scope_all(skip_if_not_non_admin, custom_lib_admin_operations, run_api): + """""" + DeployList with scope=all of the VM by non-Admin + """""" + deploy_id = custom_lib_admin_operations + count = check_count_deploylist(run_api, deploy_id, params={'scope': 'all', 'uuid': deploy_id}) + assert count == 0" +/deploy/rest/deploylist,"getting list of image of deployed machine by setting scope to ""all"" by a manager who has rights over the server","{ +scope : ""all"" +}",,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_details_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Details of the VM by Manager + """""" + # When the user is not part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_admin_operations + count = check_count_deploylist(run_api, deploy_id, params={'scope': 'all', 'uuid': deploy_id}) + assert count == 0 + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + count = check_count_deploylist(run_api, deploy_id, params={'scope': 'all', 'uuid': deploy_id}) + assert count == 1 + +" +/deploy/rest/deploylist,"getting list of image of deployed machine by setting scope to ""all"" by a manager who does not have rights over the server","{ +scope : ""all"" +}",,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_details_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Details of the VM by Manager + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + count = check_count_deploylist(run_api, deploy_id, params={'scope': 'all', 'uuid': deploy_id}) + assert count == 0 + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + count = check_count_deploylist(run_api, deploy_id, params={'scope': 'all', 'uuid': deploy_id}) + assert count == 0" +/ideploy/rest/edit/,editing an image of a deployed island,,"{ + ""response"" : success +}"," +@pytest.mark.parametrize(""cpucount"", CPU_COUNT, indirect=True) +@pytest.mark.parametrize(""ram"", RAM, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_edit(run_api, custom_lib_admin_operations, deploy_edit, cpucount, ram): + """""" + Editing the VM + """""" + params, r = deploy_edit + test_assert.status(r, params, ""deploy_edit"") +" +/ideploy/rest/edit/,"editing an image of a deployed island by deploying it first and than starting, and than stopping by admin ",,"{ + ""status"" : 202 +}","@pytest.mark.parametrize(""cpucount"", [1], indirect=True) +@pytest.mark.parametrize(""ram"", [100], indirect=True) +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_edit_vm_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api, cpucount, ram): + """""" + Deploying a Image and Starting the VM and then Stopping by Admin + """""" + # Admin check of Starting a deployment created by different user + deploy_id = custom_lib_non_admin_operations + params, r = run_api.deploy_edit(deploy_id, cpus=cpucount, ram=ram) + test_assert.status(r, 202) +" +/ideploy/rest/edit/,"editing an image of a deployed island by deploying it first and than starting, and than stopping by non-admin ",,"{ + ""status"" : 403 +}","@pytest.mark.parametrize(""cpucount"", [1], indirect=True) +@pytest.mark.parametrize(""ram"", [100], indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_edit_vm_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api, cpucount, ram): + """""" + Deploying a Image and Starting the VM and then Stopping + """""" + # Non-admin check of Starting a deployment created by different user + deploy_id = custom_lib_admin_operations + params, r = run_api.deploy_edit(deploy_id, cpus=cpucount, ram=ram) + test_assert.status(r, 403) +" +/ideploy/rest/edit/,"editing an image of a deployed island by deploying it first and than starting, and than stopping by manager who has rights over the server",,,"@pytest.mark.parametrize(""cpucount"", [1], indirect=True) +@pytest.mark.parametrize(""ram"", [100], indirect=True) +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_edit_vm_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api, cpucount, ram): + """""" + Deploying a Image and Starting the VM and then Stopping + """""" + # When the user is not part of the group that the manager manages + deploy_id = custom_lib_admin_operations + params, r = run_api.deploy_edit(deploy_id, cpus=cpucount, ram=ram) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + params, r = run_api.deploy_edit(deploy_id, cpus=cpucount, ram=ram) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) + + +" +/ideploy/rest/edit/,"editing an image of a deployed island by deploying it first and than starting, and than stopping by manager who does not have right over the server",,,"@pytest.mark.parametrize(""cpucount"", [1], indirect=True) +@pytest.mark.parametrize(""ram"", [100], indirect=True) +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_edit_vm_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api, cpucount, ram): + """""" + Deploying a Image and Starting the VM and then Stopping + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + params, r = run_api.deploy_edit(deploy_id, cpus=cpucount, ram=ram) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + params, r = run_api.deploy_edit(deploy_id, cpus=cpucount, ram=ram) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + + +" +/ideploy/rest/edit/,editing an image of a deployed island which is running state,,,"def test_deploy_edit_running_vm(run_api, deploy_start): + """""" + Edit a running VM + """""" + params, x = deploy_start + machine_id = x.json()[""uuid""] + params, r = run_api.deploy_edit(deploy_id=machine_id) + test_assert.status(r, 400), ""The error is %s"" % r.json() + assert r.json()[""error""] == 'Machine can only be edited when it is in stopped state' +" +/ideploy/rest/edit/,editing an image of a deployed island for machine that does not exists,"{ +uuid : ""invalid"" +}","{ + ""status"" : 404, + ""message"" : 'Machine with uuid does not exist' +}","def test_deploy_edit_invalid_name(run_api): + """""" + Edit a machine that do not exist + """""" + uuid = ""invalid"" + params, r = run_api.deploy_edit(deploy_id=uuid) + test_assert.status(r, 404), ""The error is %s"" % r.json() + assert r.json()[""error""] == 'Machine with uuid [invalid] does not exist' +" +/ideploy/rest/edit/,"editing an image of a deployed island for machine , by providing machine name that contains slash.","{ +uuid : ""/invalid/name"" +}","{ + ""status"" : 400, + ""message"" : ""Name cannot contain '/'. "" +}","def test_deploy_edit_with_invalid_name(run_api, deploy_image): + """""" + To edit the machine and pass the name with '/' in it + """""" + params, r = deploy_image + machine_id = r.json()[""uuid""] + invalid_name = ""/invalid/name/"" + params, x = run_api.deploy_edit(deploy_id=machine_id, name=invalid_name) + test_assert.status(x, 400) + assert x.json()[""error""] == ""Name cannot contain '/'"" +" +/ideploy/rest/edit/,editing an image of a deployed island for machine that does not exists,,"{ + ""status"" : 400, + ""message"" : 'Name cannot be longer than 100 characters' +}","def test_deploy_edit_with_very_long__name(run_api, deploy_image): + """""" + To edit the machine and pass the name with more than 100 characters + """""" + invalid_name = rand_string(char_size=101) + params, r = deploy_image + machine_id = r.json()[""uuid""] + params, x = run_api.deploy_edit(deploy_id=machine_id, name=invalid_name) + test_assert.status(x, 400) + assert x.json()[""error""] == 'Name cannot be longer than 100 characters' +" +/deploy/rest/get_ip/,fetching the IP of a deployed machine which is currently in stopped state,,"{ + ""status"" : 400, + ""message"" : ""Machine is not in running state so cannot fetch ip"" +}","def test_deploy_get_ip_stopped_machine(run_api, deploy_image): + """""" + get_ip of a deployed machine which is stopped + """""" + template, r = deploy_image + deploy_id = r.json()[""uuid""] + r = run_api.deploy_get_ip(deploy_id) + res = r.json() + test_assert.status(r, 400) + assert res[""result""] == ""FAILURE"" + assert f""Machine with uuid `{deploy_id}` is not in running state so cannot fetch ip"" in res[""error""], res + + +" +/deploy/rest/protect/,successfully protecting the machine deployment,,"{ + ""status"" : 200 +}","def test_deploy_protect(deploy_protect): + """""" + Protect a mc Deployment + """""" + r = deploy_protect + test_assert.status(r, 200) +" +/deploy/rest/protect/,protecting the machine deployment by an admin user,,"{ + ""status"" : 200 +}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_protect_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Protect a mc Deployment by Admin + """""" + # Admin check for Protect a mc Deployment created by different user. + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_protect(deploy_id) + test_assert.status(r, 200) + r = run_api.deploy_unprotect(deploy_id) +" +/deploy/rest/protect/,protecting the machine deployment by non-admin user,,"{ + ""status"" : 403 +}","@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_protect_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): + """""" + Protect a mc Deployment by non-Admin + """""" + # Non-admin check for Protect a mc Deployment by different user. + deploy_id = custom_lib_admin_operations + r = run_api.deploy_protect(deploy_id) + test_assert.status(r, 403) + r = run_api.deploy_unprotect(deploy_id) +" +/deploy/rest/protect/,protecting the machine deployment by manager who has rights over the server,,,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_protect_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Protect a mc Deployment + """""" + # When the user is not part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_protect(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + r = run_api.deploy_unprotect(deploy_id) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_protect(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) + r = run_api.deploy_unprotect(deploy_id) +" +/deploy/rest/protect/,protecting the machine deployment by manager who does not have rights over the server,,,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_protect_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Protect a mc Deployment + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_protect(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + r = run_api.deploy_unprotect(deploy_id) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_protect(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + r = run_api.deploy_unprotect(deploy_id) +" +/deploy/rest/protect/,protecting the machine deployment using deploy id for which deployment does not exist,,"{ + ""status"" : 404, + ""message"" : ""Deployed Machine Protect: Unable to find deployment"" +}"," +def test_deploy_protect_invalid_uuid(run_api): + """""" + Protect a invalid mc Deployment + """""" + deploy_id = ""invalid"" + r = run_api.deploy_protect(deploy_id) + res = r.json() + test_assert.status(r, 404) + assert res[""result""] == 'FAILURE', res + assert ""DeployedMachineProtect: Unable to find deployment"" in res[""error""], res +" +/deploy/rest/protect/,protecting the machine deployment which is a part of island,,"{ + ""status"" : 400, + ""message"" :""Protecting deployment which is part of island is not allowed"" +}"," +def test_deploy_protect_island_machine_uuid(run_api, ideploy_details): + """""" + Protect a mc Deployment which is part of an island + """""" + param, result = ideploy_details + x = result.json() + machine_deploy_id = x[""machines""][0][""uuid""] + r = run_api.deploy_protect(machine_deploy_id) + res = r.json() + test_assert.status(r, 400) + assert res[""result""] == 'FAILURE', res + assert ""Protecting deployment which is part of island is not allowed"" in res[""error""], res +" +/deploy/rest/unprotect/,unprotecting the machine deployment successfully,,"{ + ""status"" : 200 +}","def test_deploy_unprotect(deploy_unprotect): + """""" + Un-Protect a mc Deployment + """""" + r = deploy_unprotect + test_assert.status(r, 200) +" +/deploy/rest/unprotect/,unprotecting the machine deployment by an admin user,,"{ + ""status"" : 200 +}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_unprotect_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Un-Protect a mc Deployment by Admin + """""" + # Admin check for Un-Protect a mc Deployment created by different user. + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_protect(deploy_id) + r = run_api.deploy_unprotect(deploy_id) + test_assert.status(r, 200) +" +/deploy/rest/unprotect/,unprotecting the machine deployment by a non-admin user,,"{ + ""status"" : 403 +}","@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_unprotect_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): + """""" + Un-Protect a mc Deployment by non-Admin + """""" + # Non-admin check for Un-Protect a mc Deployment by different user. + deploy_id = custom_lib_admin_operations + r = run_api.deploy_protect(deploy_id) + r = run_api.deploy_unprotect(deploy_id) + test_assert.status(r, 403) +" +/deploy/rest/unprotect/,unprotecting the machine deployment by manager who does not have rights over the server,,,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_unprotect_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Un-Protect a mc Deployment + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_protect(deploy_id) + r = run_api.deploy_unprotect(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_protect(deploy_id) + r = run_api.deploy_unprotect(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + + +" +/deploy/rest/unprotect/,unprotecting the machine deployment using deploy id for which deployment does not exist,"{ +deploy_id : ""invalid"" +}","{ + ""status"" : 404, + ""message"" : ""Deployed Machine Unprotect: Unable to find deployment"" +}","def test_deploy_unprotect_invalid_uuid(run_api): + """""" + Un-Protect a invalid mc Deployment + """""" + deploy_id = ""invalid"" + r = run_api.deploy_unprotect(deploy_id) + res = r.json() + test_assert.status(r, 404) + assert res[""result""] == 'FAILURE', res + assert ""DeployedMachineUnprotect: Unable to find deployment"" in res[""error""], res +" +/deploy/rest/unprotect/,"unprotecting the machine deployment where the deployment is a part of island. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ + ""status"" : 400, + ""message"" : ""UnProtecting deployment which is part of island is not allowed"" +}"," +def test_deploy_unprotect_island_machine_uuid(run_api, ideploy_details): + """""" + Un-Protect a mc Deployment which is part of an island + """""" + param, result = ideploy_details + x = result.json() + machine_deploy_id = x[""machines""][0][""uuid""] + r = run_api.deploy_unprotect(machine_deploy_id) + res = r.json() + test_assert.status(r, 400) + assert res[""result""] == 'FAILURE', res + assert ""UnProtecting deployment which is part of island is not allowed"" in res[""error""], res +" +/group/rest/add-ldap-user/,"adding ldap user to the group using group id that does not exist. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +group_id : 0 +}","{ + ""status"" : 400, + ""message"" : ""Group does not exist"" +}","def test_group_add_ldap_user_invalid_group_id(run_api): + """""" + group add ldap user invalid group id + """""" + group_id = 0 + r = run_api.group_add_ldap_user(group_id, params={""users_list"": []}) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""Group does not exist"", ""|> Json %s"" % rjson +" +/group/rest/add-ldap-user/,"adding ldap user to the group without the user list. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ + ""status"" : 400 +}","@pytest.mark.parametrize(""custom_group_admin_operations"", [{""action"": GROUP_ADD}], indirect=True) +def test_group_add_ldap_userwithout_user_list(custom_group_admin_operations, run_api): + """""" + Group add ldap user with out user list + """""" + params, r = custom_group_admin_operations + group_id = r.json()['id'] + r = run_api.group_add_ldap_user(group_id, params={}) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""'users_list'"", ""|> Json %s"" % rjson +" +/group/rest/promote-to-manager/,"promoting a normal user to manager.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ + ""status"" : 202 +}","@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) +def test_group_promote_to_manager(run_api, custom_group_admin_operations): + """""" + Promote from Normal user to Manager + """""" + params, r = custom_group_admin_operations + group_id = params[""group_id""] + users_list = params[""users_list""] + ret = run_api.promote_to_manager(group_id, params={""users_list"": users_list}) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(ret, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(ret, 202) +" +/group/rest/promote-to-manager/,"promoting a normal user to manager using invalid group id.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + group_id = 0 + users_list = [0] +}","{ + ""status"" : 400, + ""message"" : ""Group does not exist."" +}","def test_group_promote_to_manager_invaild_group_id(run_api): + """""" + invalid group id + """""" + group_id = 0 + users_list = [0] + r = run_api.promote_to_manager(group_id, params={""users_list"": users_list}) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == 'Group does not exist', ""|> Json %s"" % rjson +" +/group/rest/promote-to-manager/,"promoting a normal user to manager using invalid user id. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + group_id, + users_list = [0] +}","{ + ""status"" : 207, + ""message"" : User [0] isn't part of the group"" +}","@pytest.mark.parametrize(""custom_group_admin_operations"", [{""action"": GROUP_ADD}], indirect=True) +def test_group_promote_to_manager_invaild_user_id(custom_group_admin_operations, run_api): + """""" + User does not exits + """""" + params, r = custom_group_admin_operations + group_id = r.json()['id'] + group_name = params['name'] + users_list = [0] + r = run_api.promote_to_manager(group_id, params={""users_list"": users_list}) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 207) + rjson = r.json() + assert rjson['failure'][0]['error'] == f""User [0] isn't part of the group [{group_name}]"", ""|> Json %s"" % rjson + + +" +/group/rest/promote-to-manager/,"promoting a user to manager, where the user already has the managerial rights.",{,"{ + ""status"" : 207, + ""message"" :""User already has 'Manager' rights"" +}","@pytest.mark.parametrize(""custom_group_admin_operations"", [{""action"": GROUP_ADD_MANAGER_AS_MANAGER}], indirect=True) +def test_group_promote_to_manager_who_has_already_manager_rights(skip_if_not_manager, custom_group_admin_operations, admin_exec_api): + """""" + Group promote to manager who has already manager rights + """""" + params, r = custom_group_admin_operations + group_id = params['group_id'] + users_list = params['users_list'] + r = admin_exec_api.promote_to_manager(group_id, params={""users_list"": users_list}) + test_assert.status(r, 207) + rjson = r.json() + assert rjson['failure'][0]['error'] == ""User already has 'Manager' rights"", ""|> Json %s"" % rjson + +" +/group/rest/promote-to-manager/,"promoting a normal user to manager without providing the user list. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ + ""status"" : 400 +}","@pytest.mark.parametrize(""custom_group_admin_operations"", [{""action"": GROUP_ADD}], indirect=True) +def test_group_promote_to_manager_without_user_list(custom_group_admin_operations, run_api): + """""" + Group promote to manager with out user list + """""" + params, r = custom_group_admin_operations + group_id = r.json()['id'] + r = run_api.promote_to_manager(group_id, params={}) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""'users_list'"", ""|> Json %s"" % rjson +" +/group/rest/update/,"updating a group successfully. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ + ""status"" : 202 +}","@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) +def test_group_update(skip_if_manager, run_api, custom_group_admin_operations): + """""" + Update group + """""" + template, r = custom_group_admin_operations + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + result = r.json() + test_assert.status(template, result, ""group_update"") + test_assert.status(r, 202) +" +/group/rest/update/,updating a group where the manager is part of that group.,,,"@pytest.mark.parametrize(""custom_group_admin_operations"", [{""action"": GROUP_ADD_MANAGER_AS_MANAGER}], indirect=True) +def test_group_update_as_manager_of_the_group(skip_if_not_manager, run_api, custom_group_admin_operations): + """""" + Update group for which the manager is part of + """""" + # if not run_api.user_type != USER_TYPE[""manager""]: + # pytest.skip(""skipped"") + params, r = custom_group_admin_operations + params, r = run_api.group_update(params[""group_id""]) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) +" +/group/rest/update/,updating a group where the manager is not part of that group.,,,"@pytest.mark.parametrize(""custom_group_admin_operations"", [{""action"": GROUP_ADD_MANAGER_AS_NORMAL}], indirect=True) +def test_group_update_as_non_manager_of_the_group(skip_if_not_manager, run_api, custom_group_admin_operations): + """""" + Update group for which the manager is not part of + """""" + # if not run_api.user_type != USER_TYPE[""manager""]: + # pytest.skip(""skipped"") + params, r = custom_group_admin_operations + params, r = run_api.group_update(params[""group_id""]) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False)) + + +" +/group/rest/update/,updating a group by a manager,,," +@pytest.mark.parametrize(""custom_group_admin_operations"", [{""action"": GROUP_ADD_MANAGER_AS_MANAGER}], indirect=True) +def test_group_update_as_manager(skip_if_not_manager, custom_group_admin_operations, run_api): + """""" + Group Update by Manager + """""" + params, r = custom_group_admin_operations + params, r = run_api.group_update(params[""group_id""]) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) +" +/group/rest/update/,"updating a group by providing empty group name. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""name"": """", + ""deployment_strategy"": ""roundrobin"" + }","{ + ""status"" : 400, + ""message"" : ""Group Name is required and it can not be blank"" +}","def test_group_update_blank_name(skip_if_not_admin, group_add, run_api): + """""" + update blank group name + """""" + params, r = group_add + group_id = r.json()['id'] + group_param = { + ""name"": """", + ""deployment_strategy"": ""roundrobin"" + } + updated_param, r = run_api.group_update(group_id, group_param) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + result = r.json() + test_assert.status(r, 400) + assert result['error'] == ""Group Name is required and it can not be blank"" +" +,Fetch the ip of Deployed machine,,,"def deploy_get_ip(self, uuid): + """""" + Fetch the ip of Deployed machine + """""" + url = endpoints[""deploy_get_ip""] % uuid + deployment = self.get(url) + return deployment +" +,Get the information about a specific machine using lib id,,,"def deploy_machine_get(self, uuid, wait=False): + """""" + Get the information about a specific machine using lib id + """""" + url = endpoints[""deploy_get""] % uuid + deployment = self.get(url) + if wait: + depl_json = deployment.json() + wait_to_complete(self, depl_json, wait) + return deployment +" +,To get console details of deployed machine,,,"def deploy_machine_console(self, uuid): + """""" + To get console details of deployed machine + """""" + url = endpoints[""deploy_console""] % uuid + return self.get(url) +" +,Changes ownership of machine,,,"def deploy_change_ownership(self, params={}): + """""" + Changes ownership of machine + """""" + url = endpoints[""deploy_change_ownership""] + return self.post(url, json=params) +" +,Adding tags to machines,,,"def deploy_add_tags(self, params={}): + """""" + Adding tags to machines + """""" + url = endpoints[""deploy_add_tags""] + return self.post(url, json=params) +" +,Checking license warning message,,,"def license_check(self, days=0): + """""" + Checking license warning message + """""" + params = {""days"": days} + url = create_filtered_url(endpoints['license_check'], params) + return self.get(url) +" +,Listing all license,,,"def license_list(self, params={}): + """""" + Listing all license + """""" + url = endpoints[""license_list""] + if params: + url = create_filtered_url(url, params) + return self.get(url) +" +,retrieves deployment snapshot details,,,"def deployment_snapshot_state_details(self, id): + """""" + retrieves deployment snapshot details + """""" + url = endpoints[""ideploy_snapshot""] % id + r = self.post(url) + current_state = self.ideploy_details(uuid=id).json()[""state""] + return r, current_state +" +,demote a user from manager role,,,"def demote_from_manager(self, id, params={""users_list"": []}): + """""" + demote a user from manager role + """""" + url = endpoints[""demote_from_manager""] % id + r = self.post(url, json=params) + return r +" +,promotes a user with a specified ID to a manager role,,,"def promote_to_manager(self, id, params={""users_list"": []}): + """""" + promotes a user with a specified ID to a manager role + """""" + url = endpoints[""promote_to_manager""] % id + r = self.post(url, json=params) + return r +" +,Set the value of autostart and autoresume,,,"def configure_autostart(self, uuid, params={}): + """""" + Set the value of autostart and autoresume. + """""" + url = endpoints[""configure_autostart""] % uuid + r = self.put(url, json=params) + return r +" +,retrieves host-only information for island deployment,,,"def ideploy_filter_fields(self): + """""" + get deploy filtter fields + """""" + url = endpoints[""ideploy_filter_fields""] + return self.get(url) +" +,adding tag to island deployment,,,"def ideploy_add_tag(self, param): + """""" + add tag + """""" + url = endpoints[""ideploy_add_tag""] + r = self.post(url, json=param) + return r +" +,Change ownership of an island deployment,,,"def ideploy_change_ownership(self, param={}): + """""" + Change ownership of an island + """""" + url = endpoints[""ideploy_change_ownership""] + r = self.post(url, json=param) + return r +" +,Stops segment on an island Deployment,,,"def ideploy_segment_stop(self, uuid, **kwargs): + """""" + Stops segment on an island Deployment + """""" + url = endpoints[""ideploy_segment_stop""] % uuid + r = self.post(url) + rjson = r.json() + wait = kwargs.get('wait', True) + wait_to_complete(self, rjson, wait) + return r +" +,Starts segment on an island Deployment,,,"def ideploy_segment_start(self, uuid, **kwargs): + """""" + Starts segment on an island Deployment + """""" + url = endpoints[""ideploy_segment_start""] % uuid + r = self.post(url) + rjson = r.json() + wait = kwargs.get('wait', True) + wait_to_complete(self, rjson, wait) + return r +" +,stops all present machines associated with a given UUID for island deployment,,,"def ideploy_stop(self, uuid, error=False): + """""" + stops all present machines associated with a given UUID for island deployment + """""" + if error: + url = endpoints[""ideploy_stop""] % uuid + r = self.post(url) + return r + + # The current operation is on Shutdown which being graceful won't work for dummy machines. So, the way it goes is + # it stops all the present machines and then trigger the API for Island Shutdown to stop segments and process cleanup. + + r_details = self.ideploy_details(uuid) + try: + machine_uuids = [mc[""uuid""] for mc in r_details.json()[""machines""]] + except Exception: + # Avoid raising error in api_ops, but returned value will get it raised in returned function if needed + # NOTE: Avoiding for the case where user does not have permission to fetch the detail but still called as part of Cleanup + return None + deploy_bulkops_params = { + ""machine_list"": machine_uuids, + ""op"": ""poweroff"" + } + return self.deploy_bulkops(deploy_bulkops_params) +" +,Shuts down an island Deployment,,,"def ideploy_shutdown(self, uuid, **kwargs): + """""" + Shuts down an island Deployment + """""" + url = endpoints[""ideploy_shutdown""] % uuid + r = self.post(url) + rjson = r.json() + wait_to_complete(self, rjson) + return r +" +,Starts an island Deployment,,,"def ideploy_start(self, uuid, **kwargs): + """""" + Starts an island Deployment + """""" + url = endpoints[""ideploy_start""] % uuid + r = self.post(url) + rjson = r.json() + wait = kwargs.get(""wait"", True) + wait_to_complete(self, rjson, wait) + return r +" +,Takes Snapshot of an island Deployment,,,"def ideploy_snapshot(self, uuid, **kwargs): + """""" + Takes Snapshot of an island Deployment + """""" + url = endpoints[""ideploy_snapshot""] % uuid + description = kwargs.get('description', '') + params = {} + if description: + params['description'] = description + r = self.post(url, json=params) + rjson = r.json() + wait = kwargs.get(""wait"", True) + rtask_details = wait_to_complete(self, rjson, wait, wait_time=60) + return r, rtask_details +" +,Resumes an island Deployment,,,"def ideploy_resume(self, uuid, **kwargs): + """""" + Resumes an island Deployment + """""" + url = endpoints[""ideploy_resume""] % uuid + r = self.post(url) + rjson = r.json() + wait = kwargs.get('wait', True) # If we tried with anonymous_exec_api or invalid_exec_api it does not have error or job_uuid so raise Exception + wait_to_complete(self, rjson, wait) + return r +" +,Pauses an island Deployment,,,"def ideploy_pause(self, uuid, **kwargs): + """""" + Pauses an island Deployment + """""" + url = endpoints[""ideploy_pause""] % uuid + r = self.post(url) + rjson = r.json() + wait = kwargs.get(""wait"", True) + wait_to_complete(self, rjson, wait) + return r +" +,Fetches the lists of island Deployments,,,"def ideploy_list(self, params={}): + """""" + Fetches the lists of island Deployments + """""" + url = endpoints[""ideploy_list""] + if params: + url = create_filtered_url(url, params) + r = self.get(url) + return r +" +,Edits an island Deployment,,,"def ideploy_edit(self, uuid, **kwargs): + """""" + Edits an island Deployment + """""" + params = kwargs[""params""] + url = endpoints[""ideploy_edit""] % uuid + r = self.put(url, json=params) + return r +" +,Fecthes details of an island Deployment,,,"def ideploy_details(self, uuid, **kwargs): + """""" + Fecthes details of an island Deployment + """""" + url = endpoints[""ideploy_details""] % uuid + r = self.get(url) + return r +" +,Deploys an island library,,,"def ideploy_deploy(self, uuid, deploy_on=[], **kwargs): + """""" + Deploys an island library + """""" + wait = kwargs.get(""wait"", True) + payload = {} + payload[""server_list""] = deploy_on + if not deploy_on: + payload[""server_list""] = self.deploy_on + if kwargs.get(""group_list""): + payload[""group_list""] = kwargs[""group_list""] + if kwargs.get(""name""): + payload[""name""] = kwargs[""name""] + if kwargs.get(""tag_list""): + payload[""tag_list""] = kwargs['tag_list'] + url = endpoints[""ideploy_deploy""] % uuid + r = self.post(url, json=payload) + rjson = r.json() + wait_to_complete(self, rjson, wait) + return r +" +,Deletes an island Deployment,,,"def ideploy_delete(self, uuid, wait=True, **kwargs): + """""" + Deletes an island Deployment + """""" + url = endpoints[""ideploy_delete""] % uuid + r = self.delete(url) + rjson = r.json() + wait_to_complete(self, rjson, wait) + return r +" +,Fetch lists of island library revisions.,,,"def ilibrary_revisions(self, uuid, params={}): + """""" + Fetch lists of island library revisions. + """""" + url = endpoints[""ilibrary_revisionslist""] + params['uuid'] = uuid + if params: + url = create_filtered_url(url, params) + ret = self.get(url) + return ret +" +,Fetch lists of island libraries.,,,"def ilibrary_list_island(self, params={}): + """""" + Fetch lists of island libraries. + """""" + url = endpoints[""ilibrary_list""] + if params: + url = create_filtered_url(url, params) + ret = self.get(url, json=params) + return ret +" +,Edit island library.,,,"def ilibrary_edit_island(self, uuid, noraise=0, **kwargs): + """""" + Edit island library. + """""" + if ""params"" not in kwargs: + params = template_ilibrary_edit_island(**kwargs) + else: + params = kwargs[""params""] + ilib_url = endpoints[""ilibrary_edit""] % uuid + ret = self.put(ilib_url, json=params) + status_code = ret.status_code + rjson = ret.json() + rjson[""status_code""] = status_code + return params, ret +" +,Fetch Details of island library.,,,"def ilibrary_details(self, uuid): + """""" + Fetch Details of island library. + """""" + url = endpoints[""ilibrary_details""] % uuid + ret = self.get(url) + return ret +" +,"Deleting bulk of Island Library +",,,"def ilibrary_bulk_delete(self, params): + """""" + Deleting bulk of Island Library + """""" + url = endpoints[""ilibrary_bulk_delete""] + ret = self.delete(url, json=params) + return ret +" +,"Delete island library. +",,,"def ilibrary_delete(self, uuid, params={}): + """""" + Delete island library. + """""" + url = endpoints[""ilibrary_delete""] % uuid + ret = self.delete(url, json=params) + return ret +" +,"Clone an island Library +",,,"def ilibrary_clone_island(self, uuid, params=None): + """""" + Clone an island Library + """""" + url = endpoints[""ilibrary_clone""] % uuid + if params is None: + params = template_ilibrary_clone_island() + ret = self.post(url, json=params) + return params, ret +" +,"Add new island library. +",,,"def ilibrary_add_new_island(self, noraise=0, **kwargs): + """""" + Add new island library. + """""" + if 'params' not in kwargs: + params = template_ilibrary_add_new_island(**kwargs) + else: + params = kwargs[""params""] + ilib_url = endpoints[""ilibrary_add""] + ret = self.post(ilib_url, json=params) + status_code = ret.status_code + rjson = ret.json() + rjson[""status_code""] = status_code + if noraise: + return params, ret + if 'params' not in kwargs: + assert int(status_code) == 201, f""Json response:- {rjson}"" + return params, ret +" +,"Template for deploy Bulk Operations +",,,"def template_deploy_bulkops(self, library_add_new_vm, count=10, op='stop'): + """""" + Template for deploy Bulk Operations + """""" + deploy_list = [] + for i in range(0, count): + params, r = library_add_new_vm + lib_id = r[""uuid""] + deploy_url = endpoints[""deploy_add""] % lib_id + r = self.post(deploy_url) + x = r.json() + deploy_id = x[""uuid""] + deploy_list.append(deploy_id) + # op = ""delete"" + deploy = { + ""machine_list"": deploy_list, + ""op"": op + } + # wait for deployment tasks to be finished + wait_to_complete(self, x) + return deploy +" +,"List of shares of a machine +",,,"def shares_list(self): + """""" + List of shares of a machine + """""" + share_url = endpoints[""shares_list""] + return self.get(share_url) +" +,Adding new object to the vm,,,"def shares_add(self, vm_uuid): + """""" + Adding new object to the vm + """""" + url = endpoints[""shares_add""] % vm_uuid + return self.post(url) +" +,changing user ownership,,,"def user_change_ownership(self, owner=None, dest_user=None): + """""" + user change ownership + """""" + params = { + 'owner': owner, + 'dest_user': dest_user + } + user_url = endpoints[""user_change_ownership""] + return self.post(user_url, json=params) +" +,Logging out the user,,,"def user_logout(self): + """""" + Logging out the user + """""" + user_url = endpoints[""user_logout""] + return self.post(user_url) +" +,returning the list of users,,,"def user_list(self, params={}): + """""" + Returns the list of users + """""" + user_url = endpoints[""user_list""] + if params: + user_url = create_filtered_url(user_url, params) + return self.get(user_url) +" +,Fetching the details of User,,,"def user_details(self, **kwargs): + """""" + Fetching the details of User + """""" + params = template_user_details() + id = kwargs.get(""id"", params[""id""]) + user_url = endpoints[""user_details""] % id + return params, self.get(user_url) +" +,Deleting the particular task,,,"def rtask_delete(self, uuid): + """""" + Deleting the particular task + """""" + rtask_url = endpoints[""rtask_delete""] % uuid + return self.delete(rtask_url) +" +,Fetching the details of a task,,,"def rtask_details(self, uuid): + """""" + Fetching the details of a task + """""" + rtask_url = endpoints[""rtask_details""] % uuid + return self.get(rtask_url) +" +,Listing the status of the task,,,"def rtask_list_status(self, params): + """""" + Listing the status of the task + """""" + rtask_url = endpoints[""rtask_liststatus""] + return self.post(rtask_url, json=params) +" +,Listing the children tasks ,,,"def rtask_list_children(self, uuid): + """""" + Listing the tasks list + """""" + rtask_url = endpoints[""rtask_list_children""] % uuid + return self.get(rtask_url) +" +,Listing the tasks,,,"def rtask_list(self, params={}): + """""" + Listing the tasks list + """""" + rtask_url = endpoints[""rtask_list""] + if params: + rtask_url = create_filtered_url(rtask_url, params) + return self.get(rtask_url) +" +,Listing the tasks rlist,,,"def rtask_rlist(self, params={}): + """""" + Listing the tasks rlist + """""" + rtask_url = endpoints[""rtask_rlist""] + if params: + rtask_url = create_filtered_url(rtask_url, params) + return self.get(rtask_url) +" +,Set group for profile,,,"def profile_set_group(self, user_id, params): + """""" + Set group for profile + """""" + url = endpoints['profile_set_group'] % user_id + return self.post(url, json=params) +" +,Fetch self profile details,,,"def profile_self(self): + """""" + Fetch self profile details + """""" + url = endpoints['profile_self'] + return self.get(url) +" +,Fetching details of profile by user-id,,,"def profile_details(self, user_id): + """""" + Fetching details of profile by user-id + """""" + url = endpoints['profile_details'] % user_id + return self.get(url) +" +,listing all profiles,,,"def profile_list(self): + """""" + List all profiles + """""" + url = endpoints['profile_list'] + return self.get(url) +" +,get google auth client_id,,,"def get_google_auth_client_id(self): + """""" + get google auth client_id + """""" + url = endpoints['get_google_auth_client_id'] + return self.get(url) +" +,uploading ssl to config,,,"def config_upload_ssl(self, params={}): + """""" + config upload ssl + """""" + url = endpoints['config_upload_ssl'] + cert_file = params.get('cert_file', None) + key_file = params.get('key_file', None) + files = {""cert_file"": cert_file, + ""key_file"": key_file} + return self.post(url, files=files) +" +,enabling ssl to config,,,"def config_enable_ssl(self): + """""" + config enable ssl + """""" + url = endpoints['config_enable_ssl'] + return self.put(url) +" +,disabling ssl to config,,,"def config_disable_ssl(self): + """""" + config disable ssl + """""" + url = endpoints['config_disable_ssl'] + return self.put(url) +" +,get ldap of config ,,,"def config_ldap_get(self): + """""" + get config ldap + """""" + url = endpoints['config_ldap_get'] + return self.get(url) +" +,setting config values,,,"def config_set(self, params): + """""" + Set the config values + """""" + config_url = endpoints[""config_set""] + return self.post(config_url, json=params) +" +,Getting the osversion,,,"def config_osversion(self): + """""" + Getting the osversion + """""" + config_url = endpoints[""config_osversion""] + return self.get(config_url) + +" +,Getting the values of variables stored in DB,,,"def config_get_name(self, name): + """""" + Getting the values of variables stored in DB + """""" + config_url = endpoints[""config_get_name""] % name + return self.get(config_url) +" +,Getting the values of config,,,"def config_get(self): + """""" + Getting the values of config + """""" + config_url = endpoints[""config_get""] + return self.get(config_url) +" +,"delete the config values +",,,"def config_delete(self, params): + """""" + delete the config values + """""" + config_url = endpoints[""config_delete""] + return self.delete(config_url, json=params) +" +,"Fetching the Build Number and Version +",,,"def config_version(self): + """""" + Fetching the Build Number and Version + """""" + config_url = endpoints[""config_version""] + return self.get(config_url) +" +,Adding user into multiple groups,,,"def user_add_group(self, user_id, groups=[]): + """""" + Adding user into multiple groups + """""" + name = f""pytest_clm_{rand_string()}_group"" + group_template = template_add_group(name) + template = template_user_add_group(group_template) + if groups: + template[""groups""] = groups + user_url = endpoints[""user_add_group""] % user_id + return template, self.post(user_url, json=template) +" +,removing users from a group,,,"def group_remove_user(self, group_id, params={""users_list"": []}): + """""" + Adding multiple users to a group + """""" + group_url = endpoints[""remove_user_group""] % group_id + return self.post(group_url, json=params) +" +,Adding multiple users to a group,,,"def group_add_user(self, group_id, user_ids=[]): + """""" + Adding multiple users to a group + """""" + template = template_group_add_user() + if user_ids: + template[""users_list""] = user_ids + group_url = endpoints[""add_user_group""] % group_id + return template, self.post(group_url, json=template) +" +,Showing the details of deployed image,,,"def deploy_details(self, deploy_id): + """""" + Showing the details of deployed image + """""" + deploy_url = endpoints[""deploy_details""] % deploy_id + return self.get(deploy_url) +" +,Shutting down the deployed machine,,,"def deploy_shutdown(self, deploy_id, wait=True): + """""" + Shutting down the deployed machine + """""" + deploy_url = endpoints[""deploy_shutdown""] % deploy_id + depl_shutdown = self.get(deploy_url) + depl_json = depl_shutdown.json() + wait_to_complete(self, depl_json, wait) + return depl_shutdown +" +,Deploying the details of a particular VM,,,"def deploy_deploylist(self, params={}): + """""" + Deploying the details of a particular VM + """""" + deploy_url = endpoints[""deploy_deploylist""] + if params: + deploy_url = create_filtered_url(deploy_url, params) + return self.get(deploy_url, json=params) +" +,List of deployed images,,,"def deploy_list(self, params={}): + """""" + List of deployed images + """""" + deploy_url = endpoints[""deploy_list""] + if params: + deploy_url = create_filtered_url(deploy_url, params) + return self.get(deploy_url) +" +,Bulk operations on deployed machines,,,"def deploy_bulkops(self, params, wait=True): + """""" + Bulk operations on deployed machines + """""" + deploy_url = endpoints[""deploy_bulkops""] + depl_bulk = self.post(deploy_url, json=params) + if wait: + tasks = depl_bulk.json()['success'] + for i in range(len(tasks)): + task = tasks[i] + wait_to_complete(self, task, wait) + return depl_bulk +" +,Un-Protecting the protected deployed machine,,,"def deploy_unprotect(self, deploy_id): + """""" + Un-Protecting the protected deployed machine + """""" + deploy_url = endpoints[""deploy_unprotect""] % deploy_id + return self.post(deploy_url) +" +,Protecting the deployed machine,,,"def deploy_protect(self, deploy_id): + """""" + Protecting the deployed machine + """""" + deploy_url = endpoints[""deploy_protect""] % deploy_id + return self.post(deploy_url) +" +,Taking snapshot of the deployed machine,,,"def deploy_snapshot(self, deploy_id, description=None, wait=True): + """""" + Taking snapshot of the deployed machine + """""" + deploy_url = endpoints[""deploy_snapshot""] % deploy_id + data = {""description"": description} if description else {} + depl_snapshot = self.post(deploy_url, json=data) + depl_json = depl_snapshot.json() + wait_to_complete(self, depl_json, wait, wait_time=60) + return depl_snapshot" +,Resetting the deployed machine,,,"def deploy_reset(self, deploy_id, wait=True): + """""" + Resetting the deployed machine + """""" + deploy_url = endpoints[""deploy_reset""] % deploy_id + depl_reset = self.get(deploy_url) + depl_json = depl_reset.json() + wait_to_complete(self, depl_json, wait) + return depl_reset +" +,Rebooting the deployed machine,,,"def deploy_reboot(self, deploy_id, wait=True): + """""" + Rebooting the deployed machine + """""" + deploy_url = endpoints[""deploy_reboot""] % deploy_id + depl_reboot = self.get(deploy_url) + depl_json = depl_reboot.json() + wait_to_complete(self, depl_json, wait) + return depl_reboot +" +,Resuming the deployed machine,,,"def deploy_resume(self, deploy_id, wait=True): + """""" + Resuming the deployed machine + """""" + deploy_url = endpoints[""deploy_resume""] % deploy_id + depl_resume = self.get(deploy_url) + depl_json = depl_resume.json() + wait_to_complete(self, depl_json, wait) + return depl_resume +" +,Pausing the deployed machine,,,"def deploy_pause(self, deploy_id, wait=True): + """""" + Pausing the deployed machine + """""" + deploy_url = endpoints[""deploy_pause""] % deploy_id + depl_pause = self.get(deploy_url) + depl_json = depl_pause.json() + wait_to_complete(self, depl_json, wait) + return depl_pause +" +,Stopping the deployed machine,,,"def deploy_stop(self, deploy_id, wait=True): + """""" + Stopping the deployed machine + """""" + deploy_url = endpoints[""deploy_stop""] % deploy_id + depl_stop = self.get(deploy_url) + depl_json = depl_stop.json() + wait_to_complete(self, depl_json, wait) + return depl_stop +" +,Starting the deployed machine,,,"def deploy_start(self, deploy_id, wait=True): + """""" + Starting the deployed machine + """""" + deploy_url = endpoints[""deploy_start""] % deploy_id + depl_start = self.get(deploy_url) + depl_json = depl_start.json() + wait_to_complete(self, depl_json, wait) + return depl_start +" +,Retrieve XML of deployed machine,,,"def deploy_retrieve_xml(self, deploy_id): + """""" + Retrieve XML of deployed machine + """""" + deploy_url = endpoints[""deploy_XML""] % deploy_id + return self.get(deploy_url) +" +,Getting the mac address of a VM,,,"def deploy_mac_addr(self, deploy_id): + """""" + Getting the mac address of a VM + """""" + deploy_url = endpoints[""deploy_mac_addr""] % deploy_id + depl_mac = self.get(deploy_url) + return depl_mac +" +,Edit the XML for deployed machine,,,"def deploy_edit_xml(self, deploy_id): + """""" + Edit the XML for deployed machine + """""" + params = template_deploy_retrieve_xml() + deploy_url = endpoints[""deploy_XML""] % deploy_id + return params, self.post(deploy_url, json=params) +" +,Edit the Deployed Image,,,"def deploy_edit(self, deploy_id, **kwargs): + """""" + Edit the Deployed Image + """""" + params = template_deploy_edit() + if 'cpus' in kwargs.keys(): + params[""cpu""] = kwargs['cpus'] + if 'ram' in kwargs.keys(): + params[""ram""] = kwargs['ram'] + if 'name' in kwargs.keys(): + params[""name""] = kwargs['name'] + deploy_url = endpoints[""deploy_edit""] % deploy_id + return params, self.put(deploy_url, json=params) +" +,Crashing a Deployed Image,,,"def deploy_crash(self, deploy_id, wait=True): + """""" + Crashing a Deployed Image + """""" + deploy_url = endpoints[""deploy_crash""] % deploy_id + depl_crash = self.get(deploy_url) + depl_json = depl_crash.json() + wait_to_complete(self, depl_json, wait) + return depl_crash +" +,Get the details of deployed image,,,"def deploy_get_image(self, lib_id): + """""" + Get the details of deployed image + """""" + deploy_url = endpoints[""deploy_add""] % lib_id + deployment = self.post(deploy_url) + depl_json = deployment.json() + wait_to_complete(self, depl_json) + return deployment" +,Delete deployed Library,,,"def deploy_image_delete(self, deploy_id, params={}, wait=True): + """""" + Delete deployed Library + """""" + delete_deploy_url = endpoints[""deploy_delete""] % deploy_id + depl_del = self.delete(delete_deploy_url, json=params) + depl_json = depl_del.json() + wait_to_complete(self, depl_json, wait) + return depl_del +" +,filters a list of servers based on criteria provided in a dictionary,,,"def filter_servers_matching_with_criteria(run_api, criteria_dict, server_list): + """""" + Filters out the server_list based on criteria provided in the criteria_dict. + """""" + if not criteria_dict: + return [], [] + filtered_servers_hostname_list = [] + filtered_servers_id_list = [] + for server_id in server_list: + r = run_api.server_details(server_id) + rjson = r.json() + match = True + for key, value in criteria_dict.items(): + if key in rjson and rjson[key] != value: + match = False + break + if match: + filtered_servers_hostname_list.append(rjson[""hostname""]) + filtered_servers_id_list.append(rjson[""uuid""]) + return filtered_servers_hostname_list, filtered_servers_id_list" +,deploy a library,,,"def deploy_image(self, lib_id, name=None, deploy_on=[], wait=True, **kwargs): + """""" + Deploy Library + """""" + servers_list = deploy_on + if not deploy_on: + servers_list = self.deploy_on + data = {""name"": name, ""server_list"": servers_list} if name else {""server_list"": servers_list} + data.update(kwargs) + deploy_url = endpoints[""deploy_add""] % lib_id + deployment = self.post(deploy_url, json=data) + if wait: + depl_json = deployment.json() + wait_to_complete(self, depl_json, wait) + return deployment +" +,adding server to group,,,"def group_add_server(self, params, group_id): + """""" + Add Server in group + """""" + server_url = endpoints[""add_server_group""] % group_id + return self.post(server_url, json=params)" +,"database replication task on a specified server, allowing for optional waiting until completion.",,,"def server_dbreplication(self, server_id, wait=False, params={}): + """""" + dbreplication on given server + """""" + template = { + ""mhost_remote_user"": ""string"", + ""mhost_remote_password"": ""string"", + ""mysql_username"": """", + ""mysql_password"": """", + ""mysql_dir"": ""/var/lib"", + ""port"": 22 + } + template.update(params) + server_url = endpoints[""server_dbreplication""] % server_id + task = self.post(server_url, json=template) + tjson = task.json() + if 'job_uuid' in tjson and wait: + wait_to_complete(self, tjson, wait) + return task + +" +,request to a server to cancel a sync repository task,,,"def server_cancelsyncrepo(self, task_id): + """""" + cancel a syncrepo task + """""" + server_url = endpoints[""server_cancelsyncrepo""] % task_id + res = self.post(server_url) + return res +" +,request to a server to create a sync repoistory task,,,"def server_syncrepo(self, server_id, wait=False): + """""" + create a syncrepo task + """""" + server_url = endpoints[""server_syncrepo""] % server_id + task = self.get(server_url) + tjson = task.json() + if 'job_uuid' in tjson and wait: + wait_to_complete(self, tjson, wait) + return task +" +,check if the server is ready to be upgraded.,,,"def server_readytoupgrade(self): + """""" + server reeady to upgrade + """""" + server_url = endpoints[""server_readytoupgrade""] + return self.get(server_url)" +,creates a server backup token,,,"def server_backup_token(self, params={}): + """""" + create server backup token + """""" + server_url = endpoints[""server_backup_token""] + return self.post(server_url, json=params) +" +,retrieve a server backup manifest,,,"def server_backup_manifest(self, params={}): + """""" + server_backup manifest + """""" + server_url = endpoints[""server_backup_manifest""] + if params: + server_url = create_filtered_url(server_url, params) + return self.get(server_url, json=params)" +,server completing backup process,,,"def server_backup_complete(self, params={}): + """""" + server backup complete + """""" + server_url = endpoints[""server_backup_complete""] + return self.post(server_url, json=params) +" +,preparing server backup process,,,"def server_prepare_for_backup(self, params={}): + """""" + server_prepare_for_backup + """""" + server_url = endpoints[""server_prepare_for_backup""] + return self.post(server_url, json=params) +" +,retrieving response data related to messed layers UUIDs from a server,,,"def server_messed_layers_uuids_response(self, server_id): + """""" + server_messed_layers_uuids_response path + """""" + server_url = endpoints[""server_messed_layers_uuids_response""] % server_id + return self.get(server_url) + +" +,retrieves UUIDs of messed layers from a server using its ID.,,,"def server_messed_layers_uuids(self, server_id): + """""" + server_messed_layers_uuids + """""" + server_url = endpoints[""server_messed_layers_uuids""] % server_id + return self.get(server_url) +" +,to fetch the SQL data directory path for a specified server ID.,,,"def server_fetch_sql_datadir_path(self, server_id, params={}): + """""" + server fetch sql datadir path + """""" + server_url = endpoints[""server_fetch_sql_datadir_path""] % server_id + return self.post(server_url, json=params) +" +,fixes messed layers for a server,,,"def server_fix_messed_layers(self, server_id): + """""" + server fix messed layers + """""" + server_url = endpoints[""server_fix_messed_layers""] % server_id + return self.get(server_url)" +,setting commitable_ram of a server,,,"def server_set_commmitable_ram(self, server_id, params={}): + """""" + server set_commmitable_ram + """""" + server_url = endpoints[""server_set_commmitable_ram""] % server_id + return self.post(server_url, json=params) +" +,"retrieves the number of remaining tasks associated with a server, optionally filtered by server ID if provided.",,,"def server_remaining_task(self, server_id=""""): + """""" + No of remaining task + """""" + server_url = endpoints[""server_remaning_task""] + if server_id: + server_url = create_filtered_url(server_url, {'server_uuid': server_id}) + r = self.get(server_url) + return r +" +,upgrade a server,,,"def server_upgrade(self, server_id, params={}): + """""" + upgrade a server + """""" + server_url = endpoints[""server_upgradeserver""] % server_id + r = self.put(server_url, json=params) + return r" +,deleting a server,,,"def server_delete(self, server_id): + """""" + Delete a server + """""" + server_url = endpoints[""server_delete""] % server_id + r = self.delete(server_url) + return r" +,"to test the connection, passing optional parameters in JSON format.",,,"def server_test_connection(self, params={}): + """""" + Test the Connection to the Server + """""" + url = endpoints[""server_test_connection""] + return self.post(url, json=params) +" +,fetches details of a server using its ID,,,"def server_details(self, server_id): + """""" + Fetch server details + """""" + server_url = endpoints[""server_details""] % server_id + r = self.get(server_url) + return r" +,"retrieves a list of servers using specified parameters, if any",,,"def server_list(self, params={}): + """""" + Listing the server + """""" + server_url = endpoints[""server_list""] + if params: + server_url = create_filtered_url(server_url, params) + r = self.get(server_url, json=params) + return r" +,unmarks a server for maintenance by sending a request to an admin execution API with the server's ID.,,,"def server_unmark_for_maintenance(admin_exec_api, server_id): + """""" + UnMark server for maintenance + """""" + bulkops = { + ""server_list"": [server_id], + ""op"": 'unmark_for_maintenance' + } + admin_exec_api.server_bulkops(bulkops) +" +,mark a server for maintenance using an administrative execution API.,,,"def server_mark_for_maintenance(admin_exec_api, server_id): + """""" + Mark server for maintenance + """""" + bulkops = { + ""server_list"": [server_id], + ""op"": 'mark_for_maintenance' + } + admin_exec_api.server_bulkops(bulkops)" +,"performing bulk operations on a server by sending a request with specified parameters, including an optional operation parameter.",,,"def server_bulkops(self, params, operation=None): + """""" + Server Bulk Operations + """""" + if operation: + params[""op""] = operation + server_url = endpoints[""server_bulkops""] + return self.post(server_url, json=params)" +,adding new server,,,"def server_add_new(self, params): + """""" + Add new Server + """""" + url = endpoints[""server_add""] + return self.post(url, json=params) +" +,fetches a list of tags with optional filtering based on parameters and object UUID.,,,"def tag_list(self, params, filter_search): + """""" + Listing of all the tags + """""" + uuid = """" + if filter_search.get(""object_uuid""): + uuid = '&object_uuid=' + filter_search.get(""object_uuid"") + url = endpoints[""tags_list""] + result = self.get(url, json=params) + if result.status_code == 401: + return result # if we pass invalid token + result = result.json() + search = ""?page_size="" + str(result['count']) + url = endpoints[""tags_list""] + search + uuid + return self.get(url, json=params) +" +,deleting a tag,,,"def tag_delete(self, id, params): + """""" + Deleting added tag + """""" + url = endpoints[""tags_delete""] % id + return self.delete(url, json=params) +" +,adding new tag,,,"def tag_add(self, vm_uuid, params={}): + """""" + Adding new tag + """""" + if not params: + params = template_tag_add() + url = endpoints[""tags_add""] % vm_uuid + return params, self.post(url, json=params)" +,adding a LDAP user,,,"def group_add_ldap_user(self, group_id, params={'users_list': []}): + """""" + Added ldap user + """""" + group_url = endpoints[""add_ldap_user""] % group_id + return self.post(group_url, json=params)" +,deletes a list of groups either with or without additional parameters,,,"def group_bulk_delete(self, group_id_list, no_params=False): + """""" + Delete given list of groups. + """""" + url = endpoints[""group_bulk_delete""] + params = {""group_list"": group_id_list} + if no_params: + params = {} + return self.post(url, json=params)" +,"updates groups, using a provided UUID and optional parameters, defaulting to a template if none are provided",,,"def group_update(self, uuid, params={}): + """""" + Update groups. + """""" + if not params: + params = template_add_group() + url = endpoints[""group_update""] % uuid + return params, self.put(url, json=params)" +,to list groups,,,"def group_list(self, params={}): + """""" + List groups. + """""" + url = endpoints[""group_list""] + if params: + url = create_filtered_url(url, params) + return self.get(url, json=params) +" +,"Get details of created group +",,,"def group_details(self, uuid, params={}): + """""" + Get details of group created. + """""" + url = endpoints[""group_details""] % uuid + return self.get(url, json=params)" +, remove multiple servers from a specified group with the group ID and a list of servers to be removed as parameters.,,,"def group_remove_server(self, group_id, params={""servers_list"": []}): + """""" + Removing multiple servers from a group + """""" + group_url = endpoints[""remove_server_group""] % group_id + return self.post(group_url, json=params) +" +,deleting group,,,"def group_delete(self, id, params={}): + """""" + Delete group. + """""" + url = endpoints[""group_delete""] % id + return self.delete(url, json=params) +" +,adding new group,,,"def group_add(self, name=None, template=None): + """""" + Add new group + """""" + params = template_add_group(name) + if template is not None: + params = template + + url = endpoints[""add_group""] + return params, self.post(url, json=params) +" +,deleting the audit log,,,"def audit_delete(self, uuid, **kwargs): + """""" + Delete the audit log + """""" + url = endpoints[""audit_delete""] % uuid + return self.delete(url, json=kwargs['params']) +" +,fetching the details of audit,,,"def audit_details(self, uuid, **kwargs): + """""" + Detail of the audit + """""" + url = endpoints[""audit_details""] % uuid + return self.get(url, json=kwargs['params'])" +,listing audit logs,,,"def audit_list(self, params={}): + """""" + Listing the audit logs + """""" + url = endpoints[""audit_list""] + if params: + url = create_filtered_url(url, params) + return self.get(url, json=params)" +,uploads a Non-Volatile RAM (NVRAM) file to a library identified by 'lib_id' using a specified template.,,,"def library_upload_nvram(self, lib_id, file=None, template=""/opt/infracc/infracc/nvram_templates/OVMF_VARS.fd""): + """""" + upload nvram + """""" + lib_dir = os.path.dirname(os.path.abspath(__file__)) + nvram_path = os.path.join(os.path.dirname(lib_dir), + ""data/OVMF_VARS.fd"") + files = {'file': open(nvram_path, '+rb')} + data = {'template': template} + url = endpoints['lib_upload_nvram'] % lib_id + return self.post(url, data=data, files=files)" +,upload disk to library,,,"def library_upload_disk(self, lib_id, disk_uuid=None): + """""" + Upload disk + """""" + lib_dir = os.path.dirname(os.path.abspath(__file__)) + ova_path = os.path.join(os.path.dirname(lib_dir), + ""data/TinyLinux.ova"") + files = {""file"": open(ova_path, 'rb')} + data = {""disk_uuid"": disk_uuid} + url = endpoints['lib_upload_disk'] % lib_id + return self.post(url, data=data, files=files)" +,retrieves a list of filter fields from a library,,,"def library_filter_fields(self): + """""" + Listing library filter fields + """""" + url = endpoints['lib_filter_fields'] + return self.get(url)" +,retrieves a list of available machines from a library,,,"def library_viewmachinelist(self, params={}): + """""" + Listing the machine available + """""" + url = endpoints[""lib_viewmachinelist""] + if params: + url = create_filtered_url(url, params) + return self.get(url, json=params)" +,retrieves a list of storage target types from a library,,,"def library_starget_types(self): + """""" + Listing the starget types + """""" + return self.get(endpoints[""lib_starget_types""])" +,retrieves a list of storage source types from a library,,,"def library_ssource_types(self): + """""" + Listing the ssource types + """""" + return self.get(endpoints[""lib_ssource_types""])" +,retrieves a list of socket mode types of a library,,,"def library_socketmode_types(self): + """""" + Listing the socketmode types + """""" + return self.get(endpoints[""lib_socketmode_types""])" +,Listing the segments of a library,,,"def library_segmentlist(self, params={}): + """""" + Listing the segments + """""" + url = endpoints[""lib_segmentlist""] + if params: + url = create_filtered_url(url, params) + return self.get(url, json=params)" +,Listing the revisions of Library,,,"def library_revisions(self, uuid): + """""" + Listing the revisions of Library + """""" + url = endpoints[""lib_revisions""] % uuid + return self.get(url)" +,retrieve and list choices for NVRAM templates from a library,,,"def library_nvram_template_choices(self): + """""" + Listing the nvram template choices + """""" + return self.get(endpoints[""lib_nvram_template_choices""])" +,returns a list of available network types,,,"def library_ntypes(self): + """""" + Listing the type of network available + """""" + url = endpoints[""lib_ntypes""] + return self.get(url)" +,Listing the types of network model,,,"def library_nmodeltypes(self): + """""" + Listing the types of network model + """""" + url = endpoints[""lib_nmodeltypes""] + return self.get(url) +" +,Listing the VM present in the library,,,"def library_list(self, params): + """""" + Listing the VM present in the library + """""" + url = endpoints[""lib_list""] + if params: + url = create_filtered_url(url, params) + return self.get(url, json=params)" +,Listing the type of layer,,,"def library_layer_list(self): + """""" + Listing the type of layer + """""" + url = endpoints[""lib_layer_list""] + return self.get(url) +" +,Fetching the detail of layer of an VM,,,"def library_layerdetail(self, uuid, params): + """""" + Fetching the detail of layer of an VM + """""" + url = endpoints[""lib_layerdetail""] % uuid + return self.get(url, json=params)" +,Listing the available hypervisor type,,,"def library_hvmtypes(self): + """""" + Listing the available hypervisor type + """""" + url = endpoints[""lib_hvmtypes""] + return self.get(url)" +,editing a VM in a library,,,"def library_edit(self, lib_id, params): + """""" + Edit vm in library. + """""" + url = endpoints[""lib_edit""] % lib_id + return self.put(url, json=params)" +,Listing the details of particular VM of library,,,"def library_details(self, uuid, params): + """""" + Listing the details of particular VM + """""" + if uuid is None: + logging.debug(""Machine uuid is Not none"") + url = endpoints[""lib_details""] % uuid + return self.get(url, json=params)" +,listing the disk format types of library,,,"def library_disk_format_type(self): + """""" + Listing the disk format types + """""" + return self.get(endpoints[""lib_dformattypes""])" +,Listing the types of disk in a library,,,"def library_disk_type(self): + """""" + Listing the types of disk + """""" + return self.get(endpoints[""lib_dtypes""]) +" +,Listing the Deployment Strategies in a library,,,"def library_deployment_strategies(self): + """""" + Listing the Deployment Strategies + """""" + return self.get(endpoints[""lib_deployment_strategies""])" +,Listing the Console type in a library,,,"def library_console_types(self): + """""" + Listing the Console type + """""" + return self.get(endpoints[""lib_ctypes""]) +" +,Cloning the VM present in the library,,,"def library_clone_vm(self, uuid, params=None): + """""" + Cloning the VM present in the library + """""" + if uuid is None: + raise Exception(""Machine uuid should Not be none"") + clone_url = endpoints[""lib_clone""] % uuid + if not params: + params = template_library_clone_vm() + res = self.post(clone_url, json=params) + return params, res +" +,Delete multiple VM in the library,,,"def library_bulkdelete(self, machine_details): + """""" + Delete multiple VM in the library + """""" + lib_url = endpoints[""lib_bulk_delete""] + return self.delete(lib_url, json=machine_details)" +,retrieving the details of boot types in a library,,,"def library_boottypes(self): + """""" + Return library boot types. + """""" + res = self.get(endpoints[""lib_boottypes""]) + return res" +,"importing an OVA file into a library, optionally waiting for the operation to complete.",,,"def library_import_ova(self, wait=True): + """""" + Deploy Library + """""" + lib_dir = os.path.dirname(os.path.abspath(__file__)) + ova_path = os.path.join(os.path.dirname(lib_dir), + ""data/TinyLinux.ova"") + import_url = endpoints[""lib_import_ova""] + + files = {""file"": open(ova_path, 'rb')} + lib_name = ""ova_"" + str(uuid.uuid4())[0:10] + data = {""name"": lib_name} + resp = self.post(import_url, data=data, files=files) + if wait: + resp_json = resp.json() + wait_to_complete(self, resp_json) + return lib_name, resp + +" +,deleting a VM from a library,,,"def library_delete(self, uuid, params={}): + """""" + Delete vm library. + """""" + url = endpoints[""lib_delete""] % uuid + f""/?full_tree={params.get('full_tree', 'false')}"" + ret = self.delete(url, json=params) + return ret" +,"adds a disk to a library,by specifying a UUID and optional parameters, defaulting to a virtio disk type if no parameters are provided.",,,"def library_add_disk(self, uuid, params={}): + """""" + adds a disk to a library by specifying a UUID + """""" + disk_url = endpoints[""lib_add_disk""] % uuid + disks = template_library_add_disk() + # same type of disk cannot be added. ""sata"" type disk already there in library so adding ""virtio"" type disk + if params: + disks.update(params) + else: + disks[""port""] = ""vdb"" + disks[""type""] = ""virtio"" + new_disk = {""disks"": [disks]} + ret = self.post(disk_url, json=new_disk) + return ret" +,add a new virtual machine to a library,,,"def library_add_new_vm(self, **kwargs): + """""" + Create vm in library. + """""" + noraise = kwargs.pop(""noraise"", False) + if ""arch"" not in kwargs: + kwargs[""arch""] = self.arch_type + params = template_library_add_new_vm(**kwargs) + url = endpoints[""lib_add""] + ret = self.post(url, json=params) + status_code = ret.status_code + rjson = ret.json() + rjson[""status_code""] = status_code + if noraise: + return params, ret + assert int(status_code) == 201, f""Json response:- {rjson}"" + return params, ret" +,"updates the state of servers by fetching details from designated endpoints, based on certain conditions, and storing the updated server states in an instance variable.",,,"def update_server_states(self): + """""" + Updates the state of servers based on certain conditions. + + This method checks if the user is an admin and if the tests are set to be sequential. If both conditions are met, + it retrieves server details from the designated endpoints and updates the state of all servers accordingly. + Server details are fetched and stored using their unique UUIDs. The updated server state is then stored in the + instance variable `all_server_state`. + """""" + if self.user_type == USER_TYPE[""admin""] and self.is_sequential_test: + store_all_server_values = {} # Dictionary to store server details + page = 1 + page_size = 10 + while True: + # Fetching the server details using server_list with pagination + response = self.get(f""{endpoints['server_list']}?page={page}&page_size={page_size}"").json() + servers = response['results'] + # Storing each server details + for server in servers: + server_uuid = server['uuid'] + store_all_server_values[server_uuid] = {} # Initialize server details dictionary + for key in DEFAULT_SERVER_DETAILS: + # Check if the key exists in the server response before assigning + if key in server: + store_all_server_values[server_uuid][key] = server[key] + # Check if there are more pages to fetch + if response['next'] is not None: + page += 1 + else: + break # No more pages to fetch, break out of the loop + # Update the instance variable with the new server states + self.all_server_state = store_all_server_values" +,sends an authenticated PUT request to a specified URL.,,,"def put(self, url, **kwargs): + """""" + Authinticated get request. + """""" + return self.do_opration(""PUT"", url, **kwargs)" +,sends an authenticated DELETE request to a specified URL.,,,"def delete(self, url, **kwargs): + """""" + Authinticated delete request. + """""" + return self.do_opration(""DELETE"", url, **kwargs)" +,sends an authenticated POST request to a specified URL.,,,"def post(self, url, **kwargs): + """""" + Authinticated get request. + """""" + # import pdb; pdb.set_trace() + return self.do_opration(""POST"", url, **kwargs)" +,sends an authenticated GET request to a specified URL.,,,"def get(self, url, **kwargs): + """""" + Authinticated get request. + """""" + return self.do_opration(""GET"", url, **kwargs)" +,"HTTP request with the given method (GET, POST, etc.) to the provided URL, including optional parameters, and logs the method and URL.",,,"def do_opration(self, method, url, **kwargs): + """""" + Perform given method on url. + """""" + uri = self.get_url(url) + logging.debug(method + "" "" + uri) + if kwargs != {}: + logging.debug(f""params : {kwargs}"") + return requests.request(method.upper(), + uri, + headers=self.headers, + **kwargs, verify=False)" +,"retrieves the IDs of users who are managed by the current user, assuming the current user is a manager and traverses through the groups they manage to collect the user IDs.",,,"def get_users_under_me(self): + """""" + Get users under my right as Manager + """""" + if self.user_type != USER_TYPE[""manager""]: + return [] + my_groups = [] if self.clm_my_groups is None else self.clm_my_groups.values() + user_ids_under_me = set() + for group_id in my_groups: + tmp_user_ids = set() + resp = self.get(endpoints[""user_list""] + f""?group_id={group_id}"") + """""" + Returns response as: + { + 'count': 2, 'next': None, 'previous': None, + 'results': [ + {'id': 1, 'username': 'colama', 'email': 'colama@coriolis.co.in', 'groups': [ + {'id': 1, 'deployment_strategy': 'fill_first', 'is_manager': False, 'name': 'colama_group', 'permissions': []}, + {'id': 3020, 'deployment_strategy': 'fill_first', 'is_manager': False, 'name': 'group1', 'permissions': []}, + {'id': 3021, 'deployment_strategy': 'fill_first', 'is_manager': False, 'name': 'VIVEK TRIPATHI', 'permissions': []}], 'is_staff': True}, + {'id': 3, 'username': 'manager', 'email': '', 'groups': [ + {'id': 2, 'deployment_strategy': 'fill_first', 'is_manager': True, 'name': 'manager_group', 'permissions': []}, + {'id': 3020, 'deployment_strategy': 'fill_first', 'is_manager': False, 'name': 'group1', 'permissions': []}, + {'id': 3021, 'deployment_strategy': 'fill_first', 'is_manager': False, 'name': 'group2', 'permissions': []}], 'is_staff': False} + ]} + """""" + for user in resp.json()[""results""]: + # Filter user groups that was fetched for list + user[""groups""] = list(filter(lambda user_group: user_group[""id""] == group_id, user[""groups""])) + if user[""id""] == self.user_id and user[""groups""][0][""is_manager""] is False: + # If the current group fetched has user manager with no manager rights then loop to the next group + tmp_user_ids = set() + break + if user[""id""] != self.user_id: + tmp_user_ids.add(user[""id""]) + user_ids_under_me = user_ids_under_me.union(tmp_user_ids) + return list(user_ids_under_me)" +,"etrieves the user's groups, excluding test groups",,,"def get_self_groups(self): + """""" + Get groups after test groups are removed + """""" + resp = self.get(endpoints[""user_self""]) + if resp.status_code != 200: + return None + my_groups = {} + for group in resp.json()[""groups""]: + my_groups[group[""name""]] = group[""id""] + return my_groups" +,"retrieves servers that belong to the user's group(s) based on their membership in certain groups specified, distinguishing between servers in the user's group(s) and those not in their group(s).",,,"def get_servers_in_my_group(self): + """""" + get url + """""" + if not self.clm_my_groups: + return None, None + resp1 = self.get(endpoints[""server_list""] + ""?status=Online"") + resp2 = self.get(endpoints[""server_list""] + ""?status=Locked"") + servers_in_my_group = {} + servers_not_in_my_group = {} + for server in resp1.json()[""results""] + resp2.json()[""results""]: + added = False + all_groups = self.get(endpoints[""server_details""] % server[""uuid""]).json()[""groups""] + for group in all_groups: + if group[""name""] in self.clm_my_groups: + servers_in_my_group[server[""hostname""]] = server[""uuid""] + added = True + break + if not added: + servers_not_in_my_group[server[""hostname""]] = server[""uuid""] + return servers_in_my_group, servers_not_in_my_group" +,Returns the data of logged in user,,,"def user_self(self): + """""" + Returns the data of logged in user + """""" + user_url = endpoints[""user_self""] + return self.get(user_url) +" +,"generates a URL by combining a schema, node IP, and a provided URL path, typically for accessing a resource on a server",,,"def get_url(self, url): + """""" + get url + """""" + return ""%s://%s:8086%s"" % (self.url_schema, self.node_ip, url)" +,"to authenticate a user by sending a POST request to a server endpoint with provided credentials, retries up to three times in case of JSON decoding errors, and returns the authentication token.",,,"def auth(self): + """""" + login to server. + """""" + url = ""%s://%s:8086%s"" % (self.url_schema, + self.node_ip, + endpoints[""auth""]) + login = {""username"": self.user, + ""password"": self.passwd} + for i in range(3): + try: + r = requests.post(url, login, verify=False) + res = r.json() + break + except JSONDecodeError: + if i == 2: + raise Exception(f""The user '{self.user}' with the provided password might not exist on the server '{self.node_ip}'"") + assert ""token"" in res.keys() + return res[""token""]" +,"initializes an API instance with server details, including authentication, URL schema handling, and retrieving user and server information.",,,"def __init__(self, node_ip, user, passwd, user_type, arch_type, auto_ops, system_admin_user, system_admin_passwd, deploy_on, is_sequential_test=False): + """""" + create api instance with server. + """""" + self.node_ip = node_ip + self.user = user + self.passwd = passwd + self.user_type = user_type + self.arch_type = arch_type + self.auto_ops = auto_ops + self.deploy_on = deploy_on + self.system_admin_user = system_admin_user + self.system_admin_passwd = system_admin_passwd + self.url_schema = ""http"" + if '://' in self.node_ip: + split_node_ip = self.node_ip.split('://') + url_schema = split_node_ip[0] + self.node_ip = split_node_ip[1] + if url_schema == 'https': + self.url_schema = 'https' + elif url_schema not in ['http', 'https']: + raise Exception(""Please provide --node with http or https only."") + self.authtoken = ""Token %s"" % (self.auth()) + self.headers = {""Authorization"": ""Token %s"" % (self.auth()), + ""Accept"": ""application/json""} + self.user_id = self.user_self().json()[""id""] + self.clm_my_groups = self.get_self_groups() + self.clm_my_servers, self.clm_not_my_servers = self.get_servers_in_my_group() + self.clm_users_under_me = self.get_users_under_me() + self.is_sequential_test = is_sequential_test + self.all_server_state = {} + self.update_server_states() +" +,"Function to generate random string naming +",,,"def rand_string(char_size=8): + """""" + Function to generate random string naming + """""" + sys_random = random.SystemRandom() + + letters = string.ascii_letters + string.digits + txt = ''.join(sys_random.choices(letters, k=char_size)) + return txt + +" +,adds a disk template with customizable properties,,,"def template_library_add_disk(**kwargs): + """""" + Add disk template. + """""" + disks = { + ""is_boot"": kwargs.get(""is_boot"", False), + ""size"": kwargs.get(""size"", 1), + ""port"": kwargs.get(""port"", ""sda""), + ""type"": kwargs.get(""type"", ""sata""), + ""format"": kwargs.get(""format"", ""qcow2""), + ""layer"": kwargs.get(""layer"", {}), + } + return disks" +,"adds a disk to a template library, with customizable attributes like size, port, type, and format.",,,"def template_library_edit_disk_add(**kwargs): + """""" + Edit Library disk template. + """""" + disks = {""add"": + [ + { + ""is_boot"": kwargs.get(""is_boot"", False), + ""size"": kwargs.get(""size"", 1), + ""port"": kwargs.get(""port"", ""sdb""), + ""type"": kwargs.get(""type"", ""sata""), + ""format"": kwargs.get(""format"", ""qcow2""), + ""layer"": kwargs.get(""layer"", {}) + }, + ] + } + return disks" +,returns a network configuration with a bridge connection,,,"def template_to_fail_network_settings_1(arch=""x86_64""): + """""" + returns a network configuration with a bridge connection + """""" + network = [ + { + ""type"": ""bridge"", + ""model"": ""e1000"" if arch == ""x86_64"" else ""virtio"", + ""segment"": ""HostOnly Segment"", + } + ] + return network" +,generates network settings for a virtual machine,,,"def template_to_fail_network_settings_2(arch=""x86_64""): + """""" + generates network settings for a virtual machine + """""" + network = [ + { + ""type"": ""host"", + ""model"": ""e1000"" if arch == ""x86_64"" else ""virtio"", + ""segment"": ""Default Public Segment"", + } + ] + return network +" +,"generates default network settings for virtual machines, allowing customization of the network model and segment, with different segments and optional MAC addresses.",,,"def template_default_network_settings(arch=""x86_64"", **kwargs): + """""" + generates default network settings for virtual machines + """""" + network = [ + { + ""type"": ""bridge"", + ""model"": kwargs.get(""model"", ""e1000"" if arch == ""x86_64"" else ""virtio""), + ""segment"": ""Default Public Segment"", + }, + { + ""type"": ""host"", + ""model"": kwargs.get(""model"", ""e1000"" if arch == ""x86_64"" else ""virtio""), + ""segment"": ""HostOnly Segment"", + }, + { + ""type"": ""bridge"", + ""model"": kwargs.get(""model"", ""e1000"" if arch == ""x86_64"" else ""virtio""), + ""segment"": ""Default Public Segment"", + ""mac"": ""auto"" + }, + { + ""type"": ""bridge"", + ""model"": kwargs.get(""model"", ""e1000"" if arch == ""x86_64"" else ""virtio""), + ""segment"": ""Default Public Segment"", + ""mac"": ""5A:54:00:12:23:34"" + } + ] + + return network" +,"enerates a virtual machine configuration with customizable parameters such as CPU, RAM, disk type, network model, and more, with default values set based on the architecture provided.",,,"def template_library_add_new_vm(arch=""x86_64"", **kwargs): + """""" + Add VM in database + """""" + disk_type = kwargs.get(""type"", ""sata"" if arch == ""x86_64"" else ""virtio"") + disk_port = kwargs.get(""port"", ""sda"" if arch == ""x86_64"" else ""vda"") + netw_model = kwargs.get(""nw_model"", ""e1000"" if arch == ""x86_64"" else ""virtio"") + vmname = kwargs.get(""name"", f""vmname_{rand_string()}"") + is_public = kwargs.get(""is_public"", False) + vm = {""name"": vmname, + ""description"": f""This is test description for {vmname}"", + ""is_public"": is_public, + ""hw"": {""cpus"": kwargs.get(""cpus"", 1), + ""ram"": kwargs.get(""ram"", 100), + ""hvm_type"": kwargs.get(""hvm_type"", ""kvm""), + ""is_x64"": kwargs.get(""is_x64"", True), + ""arch"": arch, + ""is_uefi"": kwargs.get(""is_uefi"", True), + ""boot"": kwargs.get(""boot"", ""hd""), + ""console"": kwargs.get(""console"", ""vnc""), + ""cdrom"": kwargs.get(""cdrom"", []), + ""networks"": kwargs.get(""networks"", template_default_network_settings(arch, model=netw_model)), + ""disks"": kwargs.get(""disks"", [template_library_add_disk(type=disk_type, port=disk_port)]), + ""serialports"": kwargs.get(""serialports"", []), + ""segments"": kwargs.get(""segments"", []) + } + } + return vm" +,Return template for clone vm,,,"def template_library_clone_vm(): + """""" + Return template for clone vm. + """""" + cl_name = f""{rand_string()}_cl"" + clone = { + ""mac_list"": [], + ""name"": cl_name, + ""description"": ""This is test description for %s"" % cl_name, + } + return clone" +,"generates a template for adding a new group, using a randomly generated name if no name is provided.",,,"def template_add_group(name=None): + """""" + Return template for add new group. + """""" + if not name: + name = f""pytest_clm_{rand_string()}_group"" + group = { + ""name"": name + } + return group" +,"creates a tag with a dynamically generated name, a fixed value, and a specified description.",,,"def template_tag_add(): + tag = { + 'tag_list': [ + { + ""name"": f""tag_{rand_string()}"", + ""value"": ""2020"", + ""description"": ""test"" + } + ] + } + return tag" +,"creates a template for adding a new server with specified hostname, username, password, IP address, port, and synchronization setting.",,,"def template_server_add_new(): + """""" + Template for Adding new Server + """""" + hostname = ""colama-cicd-testcasevm.corp.coriolis.in"" + username = ""root"" + password = ""coriolis"" + ip = ""172.21.51.27"" + port = ""22"" + sync = ""False"" + server = { + ""hostname"": hostname, + ""port"": port, + ""username"": username, + ""password"": password, + ""ip"": ip, + ""sync"": sync, + } + return server" +,"prepares a bulk operation template for a list of servers by extracting their UUIDs and setting the operation type to ""up_server"".",,,"def template_server_bulkops(server_list): + """""" + Template for server bulk operations + """""" + r = server_list + result = r.json() + list_server = [] + result_dict = result[""results""] + for d in result_dict: + list_server.append(d['uuid']) + operation = ""up_server"" + bulkops = { + ""server_list"": list_server, + ""op"": operation + } + return bulkops" +,creates a dictionary containing details of a server,,,"def template_server_details(): + """""" + Template for Server Details + """""" + hostname = ""colama-cicd-testcasevm.corp.coriolis.in"" + details = { + ""hostname"": hostname + } + return details" +,"randomly selects a server from a given list and adds it to a server group, returning the server group list +",,,"def template_group_add_server(server_list): + """""" + Template for Add server in group + """""" + # r = server_list + # result = r.json() + # list_server = [] + # obj_name = ""colama-cicd-testcasevm.corp.coriolis.in"" + # # result_dict = result[""results""] + # for d in result_dict: + # if d['hostname'] == obj_name: + # list_server.append(d['uuid']) + list_server = [random.choice(server_list)] + server_group_list = { + ""servers_list"": list_server + } + return server_group_list" +,defines a template for deploying a virtual machine,,,"def template_deploy_edit(): + """""" + Template for Editing VM + """""" + cpu = 1 + ram = 100 + deploy = { + ""ram"": ram, + ""cpu"": cpu, + } + return deploy + +" +,generates an XML template with a modified note body and indicates that the XML has been edited.,,,"def template_deploy_retrieve_xml(): + """""" + Template for Editing XML + """""" + xml = ""This file has been Modified "" + xml_temp = { + ""xml"": xml, + ""is_xml_edited"": 1 + } + return xml_temp" +,"generates a template for user details with a specified ID, username, email, and an empty list of groups.",,,"def template_user_details(): + """""" + Template for User details + """""" + details = { + ""id"": 1, + ""username"": ""colama"", + ""email"": ""colama@coriolis.co.in"", + ""groups"": [] + } + return details +" +,"creates a template for adding multiple users to a group, with the user IDs provided in a list.",,,"def template_group_add_user(): + """""" + Template for adding multiple users into group + """""" + id = [] + id.append(1) + details = { + ""users_list"": id + } + return details" +,creates a template for adding a user to multiple groups by storing the user's name in a list and returning a dictionary containing the user's groups.,,,"def template_user_add_group(name): + """""" + Template for adding user into multiple groups + """""" + id = [] + id.append(name) + details = { + ""groups"": id + } + return details" +,"generates configuration for adding an island with a single machine, including network segment creation and NIC configuration, with random names and settings",,,"def template_add_ilibrary_one_machine(**kwargs): + """""" + Add Island with single machine + """""" + island_name = kwargs.get(""name"", f""island_{rand_string()}"") + machine_name = f""machine_{rand_string()}"" + machine = kwargs.get(""machine"", {}) + segment_add = f""segment_{rand_string()}"" + + island = { + ""name"": island_name, + ""description"": ""string"", + ""machines"": { + ""add"": [ + { + ""uuid"": machine[""uuid""], + ""name"": machine_name, + ""description"": ""string"", + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": ""virtio"", + ""segment"": segment_add + } + ], + ""delete"": [ + { + ""id"": machine[""nic_delete_id""] + } + ] + } + } + ] + }, + ""is_public"": False, + ""network_segments"": { + ""add"": [ + { + ""name"": segment_add, + ""description"": ""string"", + ""enable_ipv4"": False + } + ] + } + } + return island" +,"generates a template for adding a new island with three machines to a database, each with network interface configurations and associated network segments",,,"def template_ilibrary_add_new_island(**kwargs): + """""" + Add Island with 3 machines in database + """""" + island_name = kwargs.get(""name"", f""island_{rand_string()}"") + machine1_name = f""machine_{rand_string()}"" + machine2_name = f""machine_{rand_string()}"" + machine3_name = f""machine_{rand_string()}"" + machine1 = kwargs.get(""machine1"", {}) + machine2 = kwargs.get(""machine2"", {}) + machine3 = kwargs.get(""machine3"", {}) + segment_add = f""segment_{rand_string()}"" + segment_update = f""segment_{rand_string()}"" + networks = kwargs.get(""networks"", template_networks()) + + island = { + ""name"": island_name, + ""description"": ""string"", + ""machines"": { + ""add"": [ + { + ""uuid"": machine1[""uuid""], + ""name"": machine1_name, + ""description"": ""string"", + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": segment_add + } + ], + ""update"": [ + { + ""id"": machine1[""nic_update_id""], + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[1].get(""model"", ""virtio""), + ""segment"": segment_update + } + ], + ""delete"": [ + { + ""id"": machine1[""nic_delete_id""] + } + ] + } + }, + { + ""uuid"": machine3[""uuid""], + ""name"": machine3_name, + ""description"": ""string"", + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[1].get(""model"", ""virtio""), + ""segment"": segment_add + } + ], + ""update"": [ + { + ""id"": machine3[""nic_update_id""], + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[2].get(""model"", ""virtio""), + ""segment"": segment_update + } + ], + ""delete"": [ + { + ""id"": machine3[""nic_delete_id""] + } + ] + } + }, + { + ""uuid"": machine2[""uuid""], + ""name"": machine2_name, + ""description"": ""string"", + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[2].get(""model"", ""virtio""), + ""segment"": segment_add + } + ], + ""update"": [ + { + ""id"": machine2[""nic_update_id""], + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": segment_update + } + ], + ""delete"": [ + { + ""id"": machine2[""nic_delete_id""] + } + ] + } + } + ] + }, + ""is_public"": False, + ""network_segments"": { + ""add"": [ + { + ""name"": segment_add, + ""description"": ""string"", + ""enable_ipv4"": False + }, + { + ""name"": segment_update, + ""description"": ""string"", + ""enable_ipv4"": False + } + ] + } + } + return island +" +,"constructs and updates a virtual island configuration, including adding, updating, and deleting virtual machines and network segments in a database",,,"def template_ilibrary_edit_island(**kwargs): + """""" + Add VM in database + """""" + island_name = f""island_{rand_string()}"" + machine1_name = f""machine_{rand_string()}"" + machine2_name = f""machine_{rand_string()}"" + segment_update_name = f""segment_{rand_string()}"" + segment_1 = f""segment_{rand_string()}"" + segment_2 = f""segment_{rand_string()}"" + + machine1 = kwargs.get(""machine1"", {}) + machine2 = kwargs.get(""machine2"", {}) + machine3 = kwargs.get(""machine3"", {}) + segments = kwargs.get(""segments"", {}) + networks = kwargs.get(""networks"", template_networks()) + + island = { + ""name"": island_name, + ""description"": ""string"", + ""machines"": { + ""add"": [ + { + ""uuid"": machine1[""uuid""], + ""name"": machine1_name, + ""description"": ""string"", + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": segment_1 + } + ], + ""update"": [ + { + ""id"": machine1[""nic_update_id""], + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": ""virtio"", + ""segment"": segment_2 + } + ], + ""delete"": [ + { + ""id"": machine1[""nic_delete_id""] + } + ] + } + } + ], + ""update"": [ + { + ""uuid"": machine2[""uuid""], + ""name"": machine2_name, + ""description"": ""string"", + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": ""virtio"", + ""segment"": segment_1 + } + ], + ""update"": [ + { + ""id"": machine2[""nic_update_id""], + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": ""virtio"", + ""segment"": segment_update_name + } + ], + ""delete"": [ + { + ""id"": machine2[""nic_delete_id""] + } + ] + } + } + ], + ""delete"": [ + { + ""uuid"": machine3[""uuid""] + } + ] + }, + ""network_segments"": { + ""add"": [ + { + ""name"": segment_1, + ""description"": ""string"", + ""enable_ipv4"": False + }, + { + ""name"": segment_2, + ""description"": ""string"", + ""enable_ipv4"": False + } + ], + ""update"": [ + { + ""uuid"": segments[""uuid1""], + ""name"": segment_update_name, + ""description"": ""string"", + ""enable_ipv4"": False + } + ], + ""delete"": [ + { + ""uuid"": segments[""uuid2""] + } + ] + }, + ""is_public"": False + } + return island +" +,generates a template for a clone island with a random name and a description including that name,,,"def template_ilibrary_clone_island(): + """""" + Return template for clone island. + """""" + cl_name = f""{rand_string()}_cl"" + clone = { + ""name"": cl_name, + ""description"": ""This is test description for %s"" % cl_name, + } + return clone +" +,"generates a deployment request template for deploying an Island, updating network segments and machine configurations accordingly",,,"def template_ideploy_deploy(**kwargs): + """""" + Returns trmplate for Deploying an Island + """""" + machine1_name = f""machine_{rand_string()}"" + machine2_name = f""machine_{rand_string()}"" + segment_update_name = f""segment_{rand_string()}"" + segment_1 = f""segment_{rand_string()}"" + segment_2 = f""segment_{rand_string()}"" + + machine1 = kwargs.get(""machine1"", {}) + machine2 = kwargs.get(""machine2"", {}) + segments = kwargs.get(""segments"", {}) + deploy_req = { + ""network_segments"": { + ""add"": [ + { + ""name"": segment_1, + ""description"": ""string"", + ""enable_ipv4"": False + }, + { + ""name"": segment_2, + ""description"": ""string"", + ""enable_ipv4"": False + } + ], + ""update"": [ + { + ""uuid"": segments[""uuid1""], + ""name"": segment_update_name, + ""description"": ""string"", + ""enable_ipv4"": False + } + ], + ""delete"": [ + { + ""uuid"": segments[""uuid2""] + } + ] + }, + ""updated_machines"": [ + { + ""uuid"": machine2[""uuid""], + ""name"": machine2_name, + ""description"": ""string"", + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": ""virtio"", + ""segment"": segment_1 + } + ], + ""update"": [ + { + ""id"": machine2[""nic_update_id""], + ""mac"": ""string"", + ""type"": ""bridge"", + ""model"": ""virtio"", + ""segment"": segment_update_name + } + ], + ""delete"": [ + { + ""id"": machine2[""nic_delete_id""] + } + ] + } + }, + { + ""uuid"": machine1[""uuid""], + ""name"": machine1_name, + ""description"": ""string"", + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": ""virtio"", + ""segment"": segment_1 + } + ], + ""update"": [ + { + ""id"": machine1[""nic_update_id""], + ""mac"": ""string"", + ""type"": ""bridge"", + ""model"": ""virtio"", + ""segment"": segment_update_name + } + ], + ""delete"": [ + { + ""id"": machine1[""nic_delete_id""] + } + ] + } + } + ], + ""force_nw_duplication"": True + } + return deploy_req + +"