diff --git a/CHANGELOG.md b/CHANGELOG.md index f36aa56..915756c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,18 @@ All notable changes to this project will be documented in this file. +## 1.3.0 - 2023-05-03 +### Breaking Changes: +- No Change. + +### New features: +- No Change. + +### Bug fixes: +- Row update with a bit broken. Users can now set row values equal to null with JSON null value. +- Users can also now update any individual row value, previously rows with not "null" would set "required" and would mean that updates would also require said values. + + ## 1.3.0 - 2023-03-09 ### Breaking Changes: - No Change. diff --git a/Makefile b/Makefile index 3b56683..0c5e9fe 100644 --- a/Makefile +++ b/Makefile @@ -22,13 +22,13 @@ build-core: # @echo "Now migrating" # @docker-compose run api python /home/tapis/manage.py migrate_schemas --shared -local-deploy: build-core +up: build-core @echo "Running docker-compose up" @docker-compose up -d api # Running the pgrest/test.py file -test: local-deploy +test: @docker-compose run api python /home/tapis/manage.py test -v 2 diff --git a/pgrest/db_transactions/table_data.py b/pgrest/db_transactions/table_data.py index d225bf1..8f28ddb 100644 --- a/pgrest/db_transactions/table_data.py +++ b/pgrest/db_transactions/table_data.py @@ -246,8 +246,10 @@ def update_row_with_pk(table_name, pk_id, data, tenant, primary_key, db_instance logger.info(f"Updating row with pk \'{pk_id}\' in {tenant}.{table_name}...") command = f"UPDATE {tenant}.{table_name} SET" for col in data: - if type(data[col]) == str: + if isinstance(data[col], str): command = f"{command} {col} = (\'{data[col]}\'), " + elif isinstance(data[col], type(None)): + command = f"{command} {col} = (NULL), " else: command = f"{command} {col} = ({data[col]}), " if type(pk_id) == 'int' or type(pk_id) == 'float': @@ -277,8 +279,10 @@ def update_rows_with_where(table_name, data, tenant, db_instance, search_params= logger.info(f"Updating rows in {tenant}.{table_name}...") command = f"UPDATE {tenant}.{table_name} SET" for col in data: - if type(data[col]) == str: + if isinstance(data[col], str): command = f"{command} {col} = (\'{data[col]}\'), " + elif isinstance(data[col], type(None)): + command = f"{command} {col} = (NULL), " else: command = f"{command} {col} = ({data[col]}), " diff --git a/pgrest/tests.py b/pgrest/tests.py index c580cad..5020fb1 100644 --- a/pgrest/tests.py +++ b/pgrest/tests.py @@ -343,6 +343,52 @@ def test_update_row(self): content_type='application/json') self.assertEqual(response.status_code, 200) + def test_update_row_with_null_value(self): + # We should be able to change a row value from filled to null without issue, if it's nullable. + # first, we need to create row + root_url = self.init_resp_1["result"]["root_url"] + table_name = self.init_resp_1["result"]["table_name"] + data = {"col_one": "hello", "col_two": 100, "col_three": 90, "col_four": False, "col_five": "hehe"} + response = self.client.post(f'/v3/pgrest/data/{root_url}', + data=json.dumps({"data": data}), + **auth_headers, + content_type='application/json') + self.assertEqual(response.status_code, 200) + response = self.client.get(f'/v3/pgrest/data/{root_url}', **auth_headers) + self.assertEqual(len(response.json()["result"]), 1) + self.assertEqual(response.status_code, 200) + + # now, update + data = {"col_two": None} + row_id = response.json()["result"][0]['_pkid'] + response = self.client.put(f'/v3/pgrest/data/{root_url}/{row_id}', + **auth_headers, + data=json.dumps({"data": data}), + content_type='application/json') + self.assertEqual(response.status_code, 200) + + def test_update_row_with_null_value_on_non_nullable_column_400(self): + root_url = self.init_resp_1["result"]["root_url"] + table_name = self.init_resp_1["result"]["table_name"] + data = {"col_one": "hello", "col_two": 100, "col_three": 90, "col_four": False, "col_five": "hehe"} + response = self.client.post(f'/v3/pgrest/data/{root_url}', + data=json.dumps({"data": data}), + **auth_headers, + content_type='application/json') + self.assertEqual(response.status_code, 200) + response = self.client.get(f'/v3/pgrest/data/{root_url}', **auth_headers) + self.assertEqual(len(response.json()["result"]), 1) + self.assertEqual(response.status_code, 200) + + # now, update + data = {"col_four": None} + row_id = response.json()["result"][0]['_pkid'] + response = self.client.put(f'/v3/pgrest/data/{root_url}/{row_id}', + **auth_headers, + data=json.dumps({"data": data}), + content_type='application/json') + self.assertEqual(response.status_code, 400) + def test_update_row_wrong_data_type_400(self): # first, we need to create row root_url = self.init_resp_1["result"]["root_url"] @@ -356,6 +402,7 @@ def test_update_row_wrong_data_type_400(self): response = self.client.get(f'/v3/pgrest/data/{root_url}', **auth_headers) self.assertEqual(len(response.json()["result"]), 1) self.assertEqual(response.status_code, 200) + # now, update data = {"col_two": "haha"} row_id = response.json()["result"][0]['_pkid'] @@ -388,6 +435,7 @@ def test_update_row_nonexistent_column_400(self): response = self.client.get(f'/v3/pgrest/data/{root_url}', **auth_headers) self.assertEqual(len(response.json()["result"]), 1) self.assertEqual(response.status_code, 200) + # now, update data = {"col_where": 30} row_id = response.json()["result"][0]['_pkid'] @@ -1405,17 +1453,17 @@ def test_all_search_parameters(self): # Nonexistence checks def test_get_nonexistent_view(self): response = self.client.get(f'/v3/pgrest/views/this_view_does_not_exist', **auth_headers) - print(response) + #print(response) self.assertEqual(response.status_code, 404) def test_get_nonexistent_manage_view(self): response = self.client.get(f'/v3/pgrest/manage/views/22', **auth_headers) - print(response) + #print(response) self.assertEqual(response.status_code, 404) def test_delete_nonexistent_view(self): response = self.client.delete(f'/v3/pgrest/manage/views/22', **auth_headers) - print(response) + #print(response) self.assertEqual(response.status_code, 404) def test_raw_sql_view_creation(self): diff --git a/pgrest/utils.py b/pgrest/utils.py index 1eaa9b5..41ee8c9 100644 --- a/pgrest/utils.py +++ b/pgrest/utils.py @@ -73,31 +73,9 @@ def create_validate_schema(columns, tenant, existing_enum_names): schema_update = dict() schema_create = dict() - for key, key_info in columns.items(): - key_type = key_info["data_type"].lower() - info_dict = dict() - if key_type in ["varchar", "char"]: - try: - val_length = int(key_info["char_len"]) - info_dict.update({"type": "string", "maxlength": val_length}) - except KeyError: - raise KeyError(f"Unable to create table. {key_type} data types requires char_len field.") - elif key_type == "text": - info_dict.update({"type": "string"}) - elif key_type == "serial": - info_dict.update({"type": "integer"}) - elif key_type == "date": - info_dict.update({"type": "string"}) - elif key_type == "timestamp": - info_dict.update({"type": "string"}) - elif '[]' in key_type: - info_dict.update({"type": "list"}) - elif key_type in existing_enum_names or f'{tenant}.{key_type}' in existing_enum_names: - info_dict.update({"type": "string"}) - else: - info_dict.update({"type": key_type}) - schema_update[key] = info_dict - + # We once had different update and create schemas. I couldn't figure out a valid reason for this. + # Only difference is schema_update should never have "required" field. As users should always be able + # to update a row's value. for key, key_info in columns.items(): key_type = key_info["data_type"] info_dict = dict() @@ -120,13 +98,17 @@ def create_validate_schema(columns, tenant, existing_enum_names): info_dict.update({"type": key_type}) if "null" in key_info.keys(): - if not key_info["null"]: - info_dict.update({"required": True, - "nullable": True}) - else: + if key_info["null"]: #true info_dict.update({"required": False, "nullable": True}) - schema_create[key] = info_dict + else: #false + info_dict.update({"required": True, + "nullable": False}) + schema_create[key] = info_dict.copy() + + # Update schema should never have required fields. Users should always be able to update a row's value. + info_dict.update({"required": False}) + schema_update[key] = info_dict.copy() return schema_create, schema_update