From 15713fa9a733494ea1983427daa39b341d8e5dd6 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Tue, 15 Oct 2024 14:33:22 +0800 Subject: [PATCH 01/70] move DataFile model to non-deprecated namespace --- db/tables/operations/import_.py | 2 +- mathesar/admin.py | 6 +---- mathesar/api/db/viewsets/data_files.py | 2 +- mathesar/api/dj_filters.py | 3 +-- mathesar/api/serializers/data_files.py | 6 ++--- mathesar/imports/base.py | 1 - ...ove_datafile_table_imported_to_and_more.py | 23 +++++++++++++++++ mathesar/models/__init__.py | 1 + mathesar/models/base.py | 25 +++++++++++++++++++ mathesar/models/deprecated.py | 21 ---------------- mathesar/tests/api/conftest.py | 3 ++- mathesar/tests/api/test_data_file_api.py | 6 +---- mathesar/tests/conftest.py | 3 ++- mathesar/tests/imports/test_csv.py | 10 ++------ mathesar/tests/imports/test_excel.py | 13 ++-------- mathesar/tests/imports/test_json.py | 10 ++------ mathesar/tests/rpc/tables/test_t_metadata.py | 3 +-- mathesar/utils/datafiles.py | 2 +- 18 files changed, 69 insertions(+), 71 deletions(-) create mode 100644 mathesar/migrations/0018_remove_datafile_table_imported_to_and_more.py diff --git a/db/tables/operations/import_.py b/db/tables/operations/import_.py index 5c6bd31b0c..0748950fa8 100644 --- a/db/tables/operations/import_.py +++ b/db/tables/operations/import_.py @@ -7,7 +7,7 @@ from db.columns.operations.alter import _transform_column_alter_dict from db.tables.operations.create import prepare_table_for_import from db.encoding_utils import get_sql_compatible_encoding -from mathesar.models.deprecated import DataFile +from mathesar.models.base import DataFile from mathesar.imports.csv import get_file_encoding, get_sv_reader, process_column_names diff --git a/mathesar/admin.py b/mathesar/admin.py index 9c195a904f..d8512b8c11 100644 --- a/mathesar/admin.py +++ b/mathesar/admin.py @@ -1,9 +1,8 @@ from django.contrib import admin from django.contrib.auth.admin import UserAdmin -from mathesar.models.deprecated import Table, Schema, DataFile +from mathesar.models.base import DataFile from mathesar.models.users import User -from mathesar.models.query import Exploration class MathesarUserAdmin(UserAdmin): @@ -19,8 +18,5 @@ class MathesarUserAdmin(UserAdmin): ) -admin.site.register(Table) -admin.site.register(Schema) admin.site.register(DataFile) admin.site.register(User, MathesarUserAdmin) -admin.site.register(Exploration) diff --git a/mathesar/api/db/viewsets/data_files.py b/mathesar/api/db/viewsets/data_files.py index 4c3eb1a29f..fc20278de3 100644 --- a/mathesar/api/db/viewsets/data_files.py +++ b/mathesar/api/db/viewsets/data_files.py @@ -10,7 +10,7 @@ import mathesar.api.exceptions.generic_exceptions.base_exceptions as base_api_exceptions from mathesar.api.exceptions.error_codes import ErrorCodes from mathesar.errors import InvalidTableError -from mathesar.models.deprecated import DataFile +from mathesar.models.base import DataFile from mathesar.api.pagination import DefaultLimitOffsetPagination from mathesar.api.serializers.data_files import DataFileSerializer from mathesar.utils.datafiles import create_datafile diff --git a/mathesar/api/dj_filters.py b/mathesar/api/dj_filters.py index 1a0f670763..ebd3cdfda5 100644 --- a/mathesar/api/dj_filters.py +++ b/mathesar/api/dj_filters.py @@ -3,7 +3,7 @@ PropertyOrderingFilter ) -from mathesar.models.deprecated import DataFile +from mathesar.models.base import DataFile class CharInFilter(PropertyBaseInFilter, PropertyCharFilter): @@ -11,7 +11,6 @@ class CharInFilter(PropertyBaseInFilter, PropertyCharFilter): class DataFileFilter(PropertyFilterSet): - database = CharInFilter(field_name='table_imported_to__schema__database__name', lookup_expr='in') name = CharInFilter(field_name='name', lookup_expr='in') sort_by = PropertyOrderingFilter( diff --git a/mathesar/api/serializers/data_files.py b/mathesar/api/serializers/data_files.py index 89cf7f9907..ff08547fa1 100644 --- a/mathesar/api/serializers/data_files.py +++ b/mathesar/api/serializers/data_files.py @@ -5,7 +5,7 @@ from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin from mathesar.errors import URLNotReachable, URLInvalidContentTypeError -from mathesar.models.deprecated import DataFile +from mathesar.models.base import DataFile SUPPORTED_URL_CONTENT_TYPES = {'text/csv', 'text/plain'} @@ -21,7 +21,7 @@ class DataFileSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer) class Meta: model = DataFile fields = [ - 'id', 'file', 'table_imported_to', 'user', 'header', 'delimiter', + 'id', 'file', 'user', 'header', 'delimiter', 'escapechar', 'quotechar', 'paste', 'url', 'created_from', 'max_level', 'sheet_index' ] extra_kwargs = { @@ -32,7 +32,7 @@ class Meta: } # We only currently support importing to a new table, so setting a table via API is invalid. # User should be set automatically, not submitted via the API. - read_only_fields = ['user', 'table_imported_to', 'created_from'] + read_only_fields = ['user', 'created_from'] write_only_fields = ['paste', 'url'] def save(self, **kwargs): diff --git a/mathesar/imports/base.py b/mathesar/imports/base.py index a43ba100b3..d1755e86a3 100644 --- a/mathesar/imports/base.py +++ b/mathesar/imports/base.py @@ -34,6 +34,5 @@ def create_table_from_data_file(data_file, name, schema, comment=None): ) table.import_verified = False table.save() - data_file.table_imported_to = table data_file.save() return table diff --git a/mathesar/migrations/0018_remove_datafile_table_imported_to_and_more.py b/mathesar/migrations/0018_remove_datafile_table_imported_to_and_more.py new file mode 100644 index 0000000000..3bb884ef83 --- /dev/null +++ b/mathesar/migrations/0018_remove_datafile_table_imported_to_and_more.py @@ -0,0 +1,23 @@ +# Generated by Django 4.2.11 on 2024-10-14 15:42 + +from django.db import migrations, models +import mathesar.models.base + + +class Migration(migrations.Migration): + + dependencies = [ + ('mathesar', '0017_explorations_schema_oid'), + ] + + operations = [ + migrations.RemoveField( + model_name='datafile', + name='table_imported_to', + ), + migrations.AlterField( + model_name='datafile', + name='file', + field=models.FileField(upload_to=mathesar.models.base.DataFile._user_directory_path), + ), + ] diff --git a/mathesar/models/__init__.py b/mathesar/models/__init__.py index f180d82bfd..aa15e36285 100644 --- a/mathesar/models/__init__.py +++ b/mathesar/models/__init__.py @@ -1,3 +1,4 @@ # We need to do this to register the model correctly in Django settings from .users import User # noqa from .shares import SharedQuery, SharedTable # noqa +from .query import Exploration # noqa diff --git a/mathesar/models/base.py b/mathesar/models/base.py index 83b01e331b..90eb919560 100644 --- a/mathesar/models/base.py +++ b/mathesar/models/base.py @@ -1,3 +1,6 @@ +import os + +from django.conf import settings from django.db import models from encrypted_fields.fields import EncryptedCharField import psycopg @@ -157,3 +160,25 @@ class Explorations(BaseModel): display_options = models.JSONField(null=True) display_names = models.JSONField(null=True) description = models.CharField(null=True) + + +class DataFile(BaseModel): + def _user_directory_path(instance, filename): + user_identifier = instance.user.username if instance.user else 'anonymous' + # file will be uploaded to MEDIA_ROOT/user_/ + return os.path.join(user_identifier, filename) + + created_from_choices = models.TextChoices("created_from", "FILE PASTE URL") + file_type_choices = models.TextChoices("type", "CSV TSV JSON") + + file = models.FileField(upload_to=_user_directory_path) + user = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.CASCADE) + created_from = models.CharField(max_length=128, choices=created_from_choices.choices) + type = models.CharField(max_length=128, choices=file_type_choices.choices) + base_name = models.CharField(max_length=100) + header = models.BooleanField(default=True) + max_level = models.IntegerField(default=0, blank=True) + sheet_index = models.IntegerField(default=0) + delimiter = models.CharField(max_length=1, default=',', blank=True) + escapechar = models.CharField(max_length=1, blank=True) + quotechar = models.CharField(max_length=1, default='"', blank=True) diff --git a/mathesar/models/deprecated.py b/mathesar/models/deprecated.py index 9b1b3dbd01..da8f63d762 100644 --- a/mathesar/models/deprecated.py +++ b/mathesar/models/deprecated.py @@ -692,7 +692,6 @@ def insert_records_to_existing_table(self, existing_table, data_files, mappings= data_file = data_files[0] try: table, _ = insert_from_select(from_table, target_table, engine, col_mappings) - data_file.table_imported_to = existing_table except Exception as e: # ToDo raise specific exceptions. raise e @@ -887,26 +886,6 @@ def drop(self): reset_reflection(db_name=self.table.schema.database.name) -class DataFile(BaseModel): - created_from_choices = models.TextChoices("created_from", "FILE PASTE URL") - file_type_choices = models.TextChoices("type", "CSV TSV JSON") - - file = models.FileField(upload_to=model_utils.user_directory_path) - user = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.CASCADE) - created_from = models.CharField(max_length=128, choices=created_from_choices.choices) - type = models.CharField(max_length=128, choices=file_type_choices.choices) - table_imported_to = models.ForeignKey(Table, related_name="data_files", blank=True, - null=True, on_delete=models.SET_NULL) - - base_name = models.CharField(max_length=100) - header = models.BooleanField(default=True) - max_level = models.IntegerField(default=0, blank=True) - sheet_index = models.IntegerField(default=0) - delimiter = models.CharField(max_length=1, default=',', blank=True) - escapechar = models.CharField(max_length=1, blank=True) - quotechar = models.CharField(max_length=1, default='"', blank=True) - - class PreviewColumnSettings(BaseModel): customized = models.BooleanField() template = models.CharField(max_length=255) diff --git a/mathesar/tests/api/conftest.py b/mathesar/tests/api/conftest.py index bcb36e81dd..87a27a9ef0 100644 --- a/mathesar/tests/api/conftest.py +++ b/mathesar/tests/api/conftest.py @@ -7,7 +7,8 @@ from db.columns.operations.select import get_column_attnum_from_name from db.tables.operations.select import get_oid_from_table -from mathesar.models.deprecated import Table, DataFile, Column as ServiceLayerColumn +from mathesar.models.deprecated import Table, Column as ServiceLayerColumn +from mathesar.models.base import DataFile from db.metadata import get_empty_metadata from mathesar.state import reset_reflection diff --git a/mathesar/tests/api/test_data_file_api.py b/mathesar/tests/api/test_data_file_api.py index deec6fe884..896f20a083 100644 --- a/mathesar/tests/api/test_data_file_api.py +++ b/mathesar/tests/api/test_data_file_api.py @@ -8,7 +8,7 @@ from mathesar.api.exceptions.error_codes import ErrorCodes from mathesar.imports import csv -from mathesar.models.deprecated import DataFile +from mathesar.models.base import DataFile from mathesar.errors import InvalidTableError @@ -16,10 +16,6 @@ def verify_data_file_data(data_file, data_file_dict): assert data_file_dict['id'] == data_file.id assert data_file_dict['file'] == f'http://testserver/media/{data_file.file.name}' assert data_file_dict['created_from'] == data_file.created_from - if data_file.table_imported_to: - assert data_file_dict['table_imported_to'] == data_file.table_imported_to.id - else: - assert data_file_dict['table_imported_to'] is None if data_file.user: assert data_file_dict['user'] == data_file.user.id else: diff --git a/mathesar/tests/conftest.py b/mathesar/tests/conftest.py index 27fdbc0118..89919b5980 100644 --- a/mathesar/tests/conftest.py +++ b/mathesar/tests/conftest.py @@ -22,7 +22,8 @@ import mathesar.tests.conftest from mathesar.imports.base import create_table_from_data_file -from mathesar.models.deprecated import Schema, Table, Connection, DataFile +from mathesar.models.base import DataFile +from mathesar.models.deprecated import Schema, Table, Connection from mathesar.models.deprecated import Column as mathesar_model_column from mathesar.models.users import DatabaseRole, SchemaRole, User diff --git a/mathesar/tests/imports/test_csv.py b/mathesar/tests/imports/test_csv.py index 304f2b37ae..76acfea825 100644 --- a/mathesar/tests/imports/test_csv.py +++ b/mathesar/tests/imports/test_csv.py @@ -3,7 +3,8 @@ from django.core.files import File from sqlalchemy import text -from mathesar.models.deprecated import DataFile, Schema +from mathesar.models.base import DataFile +from mathesar.models.deprecated import Schema from mathesar.errors import InvalidTableError from mathesar.imports.base import create_table_from_data_file from mathesar.imports.csv import get_sv_dialect, get_sv_reader @@ -162,13 +163,6 @@ def test_csv_upload_with_duplicate_table_name(data_file, schema): create_table_from_data_file(data_file, table_name, schema) -@pytest.mark.skip(reason="We removed models used in the `create_table_from_data_file` setup function") -def test_csv_upload_table_imported_to(data_file, schema): - table = create_table_from_data_file(data_file, "NASA", schema) - data_file.refresh_from_db() - assert data_file.table_imported_to == table - - get_dialect_test_list = [ (",", '"', "", "mathesar/tests/data/patents.csv"), ("\t", '"', "", "mathesar/tests/data/patents.tsv"), diff --git a/mathesar/tests/imports/test_excel.py b/mathesar/tests/imports/test_excel.py index 8009989144..0dfe882d31 100644 --- a/mathesar/tests/imports/test_excel.py +++ b/mathesar/tests/imports/test_excel.py @@ -2,7 +2,8 @@ from django.core.files import File -from mathesar.models.deprecated import DataFile, Schema +from mathesar.models.base import DataFile +from mathesar.models.deprecated import Schema from mathesar.imports.base import create_table_from_data_file from db.schemas.utils import get_schema_oid_from_name from psycopg.errors import DuplicateTable @@ -74,13 +75,3 @@ def test_excel_upload_with_duplicate_table_name(data_file, engine_with_schema): with pytest.raises(DuplicateTable): create_table_from_data_file(data_file, table_name, schema) - - -@pytest.mark.skip(reason="We removed models used in the `create_table_from_data_file` setup function") -def test_excel_upload_table_imported_to(data_file, engine_with_schema): - engine, schema_name = engine_with_schema - schema_oid = get_schema_oid_from_name(schema_name, engine) - schema = Schema.objects.get(oid=schema_oid) - table = create_table_from_data_file(data_file, "NASA", schema) - data_file.refresh_from_db() - assert data_file.table_imported_to == table diff --git a/mathesar/tests/imports/test_json.py b/mathesar/tests/imports/test_json.py index 20e7fe2347..c4f691fe77 100644 --- a/mathesar/tests/imports/test_json.py +++ b/mathesar/tests/imports/test_json.py @@ -3,7 +3,8 @@ from django.core.files import File from sqlalchemy import text -from mathesar.models.deprecated import DataFile, Schema +from mathesar.models.base import DataFile +from mathesar.models.deprecated import Schema from mathesar.imports.base import create_table_from_data_file from db.schemas.operations.create import create_schema_via_sql_alchemy from db.schemas.utils import get_schema_oid_from_name @@ -80,10 +81,3 @@ def test_json_upload_with_duplicate_table_name(data_file, schema): with pytest.raises(DuplicateTable): create_table_from_data_file(data_file, table_name, schema) - - -@pytest.mark.skip(reason="We removed models used in the `create_table_from_data_file` setup function") -def test_json_upload_table_imported_to(data_file, schema): - table = create_table_from_data_file(data_file, "NASA", schema) - data_file.refresh_from_db() - assert data_file.table_imported_to == table diff --git a/mathesar/tests/rpc/tables/test_t_metadata.py b/mathesar/tests/rpc/tables/test_t_metadata.py index 57a9d93448..79d547aa4b 100644 --- a/mathesar/tests/rpc/tables/test_t_metadata.py +++ b/mathesar/tests/rpc/tables/test_t_metadata.py @@ -4,8 +4,7 @@ Fixtures: monkeypatch(pytest): Lets you monkeypatch an object for testing. """ -from mathesar.models.base import TableMetaData, Database, Server -from mathesar.models.deprecated import DataFile +from mathesar.models.base import TableMetaData, Database, Server, DataFile from mathesar.rpc.tables import metadata diff --git a/mathesar/utils/datafiles.py b/mathesar/utils/datafiles.py index e9ab31d6f7..22e16ef69e 100644 --- a/mathesar/utils/datafiles.py +++ b/mathesar/utils/datafiles.py @@ -13,7 +13,7 @@ from mathesar.errors import URLDownloadError from mathesar.imports.csv import is_valid_csv, get_sv_dialect, get_file_encoding from mathesar.imports.json import is_valid_json, validate_json_format -from mathesar.models.deprecated import DataFile +from mathesar.models.base import DataFile ALLOWED_FILE_FORMATS = ['csv', 'tsv', 'json', 'xls', 'xlsx', 'xlsm', 'xlsb', 'odf', 'ods', 'odt'] From 270db1ea3c8f122070c4dfae59b1f42decdc826b Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Tue, 15 Oct 2024 14:47:00 +0800 Subject: [PATCH 02/70] remove shared object models --- ...table_table_delete_sharedquery_and_more.py | 23 +++++++++++++++ mathesar/models/__init__.py | 1 - mathesar/models/shares.py | 28 ------------------- 3 files changed, 23 insertions(+), 29 deletions(-) create mode 100644 mathesar/migrations/0019_remove_sharedtable_table_delete_sharedquery_and_more.py delete mode 100644 mathesar/models/shares.py diff --git a/mathesar/migrations/0019_remove_sharedtable_table_delete_sharedquery_and_more.py b/mathesar/migrations/0019_remove_sharedtable_table_delete_sharedquery_and_more.py new file mode 100644 index 0000000000..1a240e43ce --- /dev/null +++ b/mathesar/migrations/0019_remove_sharedtable_table_delete_sharedquery_and_more.py @@ -0,0 +1,23 @@ +# Generated by Django 4.2.11 on 2024-10-15 06:42 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('mathesar', '0018_remove_datafile_table_imported_to_and_more'), + ] + + operations = [ + migrations.RemoveField( + model_name='sharedtable', + name='table', + ), + migrations.DeleteModel( + name='SharedQuery', + ), + migrations.DeleteModel( + name='SharedTable', + ), + ] diff --git a/mathesar/models/__init__.py b/mathesar/models/__init__.py index aa15e36285..d6e0f8de62 100644 --- a/mathesar/models/__init__.py +++ b/mathesar/models/__init__.py @@ -1,4 +1,3 @@ # We need to do this to register the model correctly in Django settings from .users import User # noqa -from .shares import SharedQuery, SharedTable # noqa from .query import Exploration # noqa diff --git a/mathesar/models/shares.py b/mathesar/models/shares.py deleted file mode 100644 index 15f9cc3cda..0000000000 --- a/mathesar/models/shares.py +++ /dev/null @@ -1,28 +0,0 @@ -import uuid -from django.db import models - -from mathesar.models.base import BaseModel - - -class SharedEntity(BaseModel): - slug = models.UUIDField(unique=True, default=uuid.uuid4, editable=False) - enabled = models.BooleanField(default=True) - - class Meta: - abstract = True - - @classmethod - def get_by_slug(self, slug): - return self.objects.filter(slug=slug, enabled=True).first() - - -class SharedTable(SharedEntity): - table = models.ForeignKey( - 'Table', on_delete=models.CASCADE, related_name='shared_table' - ) - - -class SharedQuery(SharedEntity): - query = models.ForeignKey( - 'Exploration', on_delete=models.CASCADE, related_name='shared_query' - ) From 72af0af09bfca65a1cb18c61761b81ef9b891f60 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Tue, 15 Oct 2024 14:54:52 +0800 Subject: [PATCH 03/70] remove deprecated Exploration model --- .../migrations/0020_delete_exploration.py | 16 + mathesar/models/__init__.py | 1 - mathesar/models/query.py | 450 ------------------ mathesar/tests/query/test_base.py | 83 ---- 4 files changed, 16 insertions(+), 534 deletions(-) create mode 100644 mathesar/migrations/0020_delete_exploration.py delete mode 100644 mathesar/models/query.py delete mode 100644 mathesar/tests/query/test_base.py diff --git a/mathesar/migrations/0020_delete_exploration.py b/mathesar/migrations/0020_delete_exploration.py new file mode 100644 index 0000000000..4926069217 --- /dev/null +++ b/mathesar/migrations/0020_delete_exploration.py @@ -0,0 +1,16 @@ +# Generated by Django 4.2.11 on 2024-10-15 06:54 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('mathesar', '0019_remove_sharedtable_table_delete_sharedquery_and_more'), + ] + + operations = [ + migrations.DeleteModel( + name='Exploration', + ), + ] diff --git a/mathesar/models/__init__.py b/mathesar/models/__init__.py index d6e0f8de62..1f6b3b1e86 100644 --- a/mathesar/models/__init__.py +++ b/mathesar/models/__init__.py @@ -1,3 +1,2 @@ # We need to do this to register the model correctly in Django settings from .users import User # noqa -from .query import Exploration # noqa diff --git a/mathesar/models/query.py b/mathesar/models/query.py deleted file mode 100644 index a82450f4d4..0000000000 --- a/mathesar/models/query.py +++ /dev/null @@ -1,450 +0,0 @@ -from django.db import models -from frozendict import frozendict - -from db.queries.base import DBQuery, InitialColumn, JoinParameter -from db.queries.operations.process import get_transforms_with_summarizes_speced -from db.transforms.operations.deserialize import deserialize_transformation -from db.transforms.operations.serialize import serialize_transformation -from db.transforms.base import Summarize -from db.functions.base import ( - Count, - ArrayAgg, - Sum, - Median, - Mode, - Percentage_True, - Max, - Min, - Mean, - PeakTime, - PeakMonth, -) -from db.functions.packed import DistinctArrayAgg - -from mathesar.api.exceptions.query_exceptions.exceptions import DeletedColumnAccess -from mathesar.models.validators import ( - DictValidator, - InitialColumnsValidator, - ListOfDictValidator, - TransformationsValidator, -) -from mathesar.models.base import BaseModel -from mathesar.state.cached_property import cached_property -from mathesar.models.deprecated import Column -from mathesar.models.relation import Relation -from mathesar.state import get_cached_metadata - - -class Exploration(BaseModel, Relation): - name = models.CharField( - max_length=128, - ) - - description = models.TextField(null=True, blank=True) - - base_table = models.ForeignKey( - "Table", on_delete=models.CASCADE, related_name="queries" - ) - - # sequence of dicts - initial_columns = models.JSONField( - validators=[ - ListOfDictValidator(field_name="initial_columns"), - InitialColumnsValidator(field_name="initial_columns"), - ], - ) - - # sequence of dicts - transformations = models.JSONField( - null=True, - blank=True, - validators=[ - ListOfDictValidator(field_name="transformations"), - TransformationsValidator(field_name="transformations"), - ], - ) - - # dict of aliases to display options - display_options = models.JSONField( - null=True, - blank=True, - validators=[ - DictValidator(field_name="display_options"), - ], - ) - - # dict of aliases to display names - display_names = models.JSONField( - null=True, - blank=True, - validators=[ - DictValidator(field_name="display_names"), - ], - ) - - def get_records(self, **kwargs): - return self.db_query.get_records(**kwargs) - - # TODO add engine from base_table.schema._sa_engine - def sa_num_records(self, **kwargs): - return self.db_query.get_count(**kwargs) - - @property - def output_columns_described(self): - """ - Returns columns' description, which is to be returned verbatim by the - `queries/[id]/columns` endpoint. - """ - return tuple( - self._describe_query_column(sa_col) - for sa_col in self.db_query.sa_output_columns - ) - - @property - def output_columns_simple(self): - return tuple(sa_col.name for sa_col in self.db_query.sa_output_columns) - - @property - def initial_columns_described(self): - return tuple( - { - "alias": initial_col_alias, - "display_name": self._get_display_name_for_alias(initial_col_alias), - "type": dj_col.db_type.id, - "type_options": dj_col._sa_column.type_options, - "display_options": dj_col.display_options, - } - for initial_col_alias, dj_col in self._map_of_initial_col_alias_to_dj_column.items() - ) - - def _describe_query_column(self, sa_col): - alias = sa_col.name - initial_db_column = self._get_db_initial_column_by_alias(alias) - is_initial_column = initial_db_column is not None - output = dict( - alias=alias, - display_name=self._get_display_name_for_alias(alias), - type=sa_col.db_type.id, - type_options=sa_col.type_options, - display_options=self._get_display_options_for_alias(alias), - is_initial_column=is_initial_column, - ) - optionals = dict( - input_column_name=None, - input_table_name=None, - input_table_id=None, - input_alias=None, - ) - output = output | optionals - if is_initial_column: - initial_dj_column = _get_dj_column_for_initial_db_column( - initial_db_column, self._database - ) - output = output | dict( - input_column_name=initial_dj_column.name, - input_table_name=initial_dj_column.table.name, - input_table_id=initial_dj_column.table.id, - ) - else: - input_alias = self.db_query.get_input_alias_for_output_alias(alias) - output = output | dict(input_alias=input_alias) - return output - - def _get_db_initial_column_by_alias(self, alias): - for db_initial_column in self._db_initial_columns: - if db_initial_column.alias == alias: - return db_initial_column - - @property - def all_columns_description_map(self): - return { - alias: self._describe_query_column(sa_col) - for alias, sa_col in self.db_query.all_sa_columns_map.items() - } - - def replace_transformations_with_processed_transformations(self): - """ - The transformations attribute is normally specified via a HTTP request. Now we're - introducing the concept of processed transformations, where we look at the - transformations and we find transformations that may be partially specified, if any, and - replace them with transformations resulting from processing them. The frontend then - reflects our updated transformations. - - We're keeping this functionality somewhat separate from the default/simpler transformation - pipeline. Meaning that it is not enabled by default and has to be triggered on demand (by - calling this method). That is for multiple reasons. - - Whereas before the transformations attribute was a one-way flow from the client, - now it's something that the backend may redefine. This a significant complication of the - data flow. For example, if you replace transformations on a saved Exploration and save it - again, we must trigger a reflection, which can have a performance impact. Also, frontend - must expect that certain transformations might alter the transformation pipeline, which - would then need reflecting by frontend; that might be a breaking change. - - Note, currently we only need transformation processing when using the `query/run` - endpoint, which means that we don't need to update any persisted queries, which means that - we don't need to trigger reflection. - """ - self.transformations = self._processed_transformations - - @property - def _processed_transformations(self): - return tuple( - serialize_transformation(db_transformation) - for db_transformation in self._processed_db_transformations - ) - - @property - def _processed_db_transformations(self): - """ - Currently, the only transformation processing we're doing is finishing (when partial) the - specification of Summarize transforms. - - Note, different from _db_transformations, because this can effectively rewrite the - transformations pipeline. And we might not want to do that every time db_transformations - is accessed, due to possible performance costs. - - If it weren't for performance costs, we might consider replacing _db_transformations with - this: the effect would be that a persisted query could have different summarizations in - django database than what is being evaluated in Postgres. - """ - return get_transforms_with_summarizes_speced( - db_query=self.db_query, - engine=self._sa_engine, - metadata=get_cached_metadata(), - ) - - @property - def db_query(self): - return DBQuery( - base_table_oid=self.base_table.oid, - initial_columns=self._db_initial_columns, - engine=self._sa_engine, - transformations=self._db_transformations, - name=self.name, - metadata=get_cached_metadata(), - ) - - # TODO reused; consider using cached_property - @property - def _db_initial_columns(self): - return tuple( - _db_initial_column_from_json(json_col) for json_col in self.initial_columns - ) - - @property - def _db_transformations(self): - """No processing necessary.""" - if self.transformations: - return tuple( - deserialize_transformation(json) for json in self.transformations - ) - - def _get_display_name_for_alias(self, alias): - return self._alias_to_display_name.get(alias) - - def _get_display_options_for_alias(self, alias): - display_options = None - # Try getting display options from this model's field - if self.display_options: - display_options = self.display_options.get(alias) - # Try getting display options from Dj column, if this is an initial column - if display_options is None: - dj_col = self._map_of_initial_col_alias_to_dj_column.get(alias) - if dj_col: - display_options = dj_col.display_options - # Try recursively repeating these steps for its parent alias, if it can be found - if display_options is None: - parent_alias = self.db_query.map_of_output_alias_to_input_alias.get(alias) - if parent_alias: - display_options = self._get_display_options_for_alias(parent_alias) - return display_options - - @cached_property - def _alias_to_display_name(self): - alias_to_display_name = {} - if self.display_names is not None: - alias_to_display_name.update(self.display_names) - return alias_to_display_name - - @property - def _sa_engine(self): - return self.base_table._sa_engine - - @property - def _database(self): - return self.base_table.schema.database - - def add_defaults_to_display_names(self): - """ - We have some logic for producing default display names. This method fetches those default - display names and merges them with previously-stored display names. Previously-stored - display names take precedence. - """ - current_display_names = self.display_names or dict() - self.display_names = self._default_display_names | current_display_names - - @property - def _default_display_names(self): - """ - Returns default display options for initial columns merged with default display options for - summarizations. Does not return current display names (as stored in the `display_names` - attribute), though they are used when generating some of the default display names. - """ - current_display_names = self.display_names or dict() - default_display_names_for_initial_columns = ( - self._default_display_names_for_initial_columns - ) - current_display_names = ( - default_display_names_for_initial_columns | current_display_names - ) - default_display_names_for_summarize_transforms = ( - self._get_default_display_names_for_summarize_transforms( - current_display_names - ) - ) - default_display_names = ( - default_display_names_for_summarize_transforms - | default_display_names_for_initial_columns - ) - return default_display_names - - @property - def _default_display_names_for_initial_columns(self): - return { - alias: dj_col.name - for alias, dj_col in self._map_of_initial_col_alias_to_dj_column.items() - } - - def _get_default_display_names_for_summarize_transforms( - self, current_display_names - ): - default_display_names = dict() - if not current_display_names: - return default_display_names - summarize_transforms = [ - db_transform - for db_transform in self.db_query.transformations - if isinstance(db_transform, Summarize) - ] - for summarize_transform in summarize_transforms: - # Find default display names for grouping output aliases - for output_alias in summarize_transform.grouping_output_aliases: - default_display_name = _get_default_display_name_for_group_output_alias( - summarize_transform, - output_alias, - current_display_names, - ) - if default_display_name: - default_display_names[output_alias] = default_display_name - # Find default display names for aggregation output aliases - for agg_col_spec in summarize_transform.aggregation_col_specs: - input_alias = agg_col_spec.get("input_alias") - output_alias = agg_col_spec.get("output_alias") - agg_function = agg_col_spec.get("function") - default_display_name = _get_default_display_name_for_agg_output_alias( - output_alias, - input_alias, - agg_function, - current_display_names, - ) - if default_display_name: - default_display_names[output_alias] = default_display_name - return default_display_names - - @property - def _map_of_initial_col_alias_to_dj_column(self): - dj_column_ids = [col["id"] for col in self.initial_columns] - dj_columns = Column.objects.filter(pk__in=dj_column_ids) - initial_col_aliases = [ - initial_col["alias"] for initial_col in self.initial_columns - ] - return frozendict( - zip( - initial_col_aliases, - dj_columns, - ) - ) - - -def _get_dj_column_for_initial_db_column(initial_column, database): - oid = initial_column.reloid - attnum = initial_column.attnum - return Column.objects.get( - table__oid=oid, attnum=attnum, table__schema__database=database - ) - - -def _get_column_pair_from_id(col_id): - try: - col = Column.objects.get(id=col_id) - except Column.DoesNotExist: - raise DeletedColumnAccess(col_id) - return col.table.oid, col.attnum - - -def _db_initial_column_from_json(col_json): - column_pair = _get_column_pair_from_id(col_json["id"]) - reloid = column_pair[0] - attnum = column_pair[1] - alias = col_json["alias"] - jp_path = [ - _join_parameter_from_json(jp_json) for jp_json in col_json.get("jp_path", []) - ] - return InitialColumn( - reloid=reloid, - attnum=attnum, - alias=alias, - jp_path=jp_path if jp_path else None, - ) - - -def _join_parameter_from_json(jp_json): - left_col_id = jp_json[0] - left_oid, left_attnum = _get_column_pair_from_id(left_col_id) - right_col_id = jp_json[1] - right_oid, right_attnum = _get_column_pair_from_id(right_col_id) - return JoinParameter( - left_oid=left_oid, - left_attnum=left_attnum, - right_oid=right_oid, - right_attnum=right_attnum, - ) - - -def _get_default_display_name_for_agg_output_alias( - output_alias, - input_alias, - agg_function, - current_display_names, -): - if output_alias and input_alias and agg_function: - map_of_agg_function_to_suffix = { - DistinctArrayAgg.id: " distinct list", - ArrayAgg.id: " list", - Count.id: " count", - Sum.id: " sum", - Max.id: " max", - Median.id: " median", - Mode.id: " mode", - Percentage_True.id: " percentage true", - Min.id: " min", - Mean.id: " mean", - PeakTime.id: " peak_time", - PeakMonth.id: " peak_month", - } - suffix_to_add = map_of_agg_function_to_suffix.get(agg_function) - if suffix_to_add: - input_alias_display_name = current_display_names.get(input_alias) - if input_alias_display_name: - return input_alias_display_name + suffix_to_add - - -def _get_default_display_name_for_group_output_alias( - summarize_transform, - output_alias, - current_display_names, -): - input_alias = summarize_transform.map_of_output_alias_to_input_alias[output_alias] - input_alias_display_name = current_display_names.get(input_alias) - return input_alias_display_name diff --git a/mathesar/tests/query/test_base.py b/mathesar/tests/query/test_base.py deleted file mode 100644 index 05005a7291..0000000000 --- a/mathesar/tests/query/test_base.py +++ /dev/null @@ -1,83 +0,0 @@ -import pytest - -from mathesar.models.query import Exploration -from db.queries.base import DBQuery, InitialColumn -from db.transforms import base as transforms_base - - -@pytest.mark.skip(reason="We removed models used in the `create_table_from_data_file` setup function") -def test_convert_to_db_query(create_patents_table, get_uid): - base_table_dj = create_patents_table(table_name=get_uid()) - col1_dj = base_table_dj.get_column_by_name('Center') - col2_dj = base_table_dj.get_column_by_name('Case Number') - initial_columns_json = [ - { - 'id': col1_dj.id, - 'alias': 'col1', - }, - { - 'id': col2_dj.id, - 'alias': 'col2', - }, - ] - oid = base_table_dj.oid - attnum1 = col1_dj.attnum - attnum2 = col2_dj.attnum - initial_columns = [ - InitialColumn( - oid, - attnum1, - alias='col1', - jp_path=None, - ), - InitialColumn( - oid, - attnum2, - alias='col2', - jp_path=None, - ), - ] - transformations_json = [ - dict( - type="limit", - spec=5, - ), - dict( - type="offset", - spec=15, - ), - ] - transformations = [ - transforms_base.Limit(5), - transforms_base.Offset(15), - ] - name = "some query" - ui_query = Exploration( - name=name, - base_table=base_table_dj, - initial_columns=initial_columns_json, - transformations=transformations_json, - ) - wanted_db_query = DBQuery( - base_table_oid=oid, - initial_columns=initial_columns, - engine=ui_query._sa_engine, - transformations=transformations, - name=name, - ) - actual_db_query = ui_query.db_query - assert actual_db_query.name == wanted_db_query.name - assert actual_db_query.base_table_oid == wanted_db_query.base_table_oid - for actual, wanted in zip( - actual_db_query.initial_columns, - wanted_db_query.initial_columns - ): - assert actual.alias == wanted.alias - assert actual.jp_path == wanted.jp_path - assert actual.reloid == wanted.reloid - assert actual.attnum == wanted.attnum - for actual, wanted in zip( - actual_db_query.transformations, - wanted_db_query.transformations - ): - assert actual == wanted From 96acad9bde855277dc045c7fb0cb8829d627e8d2 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Tue, 15 Oct 2024 15:07:48 +0800 Subject: [PATCH 04/70] remove unused validators --- mathesar/migrations/0001_initial.py | 9 +- mathesar/models/validators.py | 145 ---------------------------- 2 files changed, 4 insertions(+), 150 deletions(-) delete mode 100644 mathesar/models/validators.py diff --git a/mathesar/migrations/0001_initial.py b/mathesar/migrations/0001_initial.py index 5cf1128be7..8a4e9a4607 100644 --- a/mathesar/migrations/0001_initial.py +++ b/mathesar/migrations/0001_initial.py @@ -9,7 +9,6 @@ import django.db.models.manager import django.utils.timezone import mathesar.models.relation -import mathesar.models.validators import mathesar.utils.models @@ -116,10 +115,10 @@ class Migration(migrations.Migration): ('updated_at', models.DateTimeField(auto_now=True)), ('name', models.CharField(max_length=128)), ('description', models.TextField(blank=True, null=True)), - ('initial_columns', models.JSONField(validators=[mathesar.models.validators.ListOfDictValidator(field_name='initial_columns'), mathesar.models.validators.InitialColumnsValidator(field_name='initial_columns')])), - ('transformations', models.JSONField(blank=True, null=True, validators=[mathesar.models.validators.ListOfDictValidator(field_name='transformations'), mathesar.models.validators.TransformationsValidator(field_name='transformations')])), - ('display_options', models.JSONField(blank=True, null=True, validators=[mathesar.models.validators.DictValidator(field_name='display_options')])), - ('display_names', models.JSONField(blank=True, null=True, validators=[mathesar.models.validators.DictValidator(field_name='display_names')])), + ('initial_columns', models.JSONField()), + ('transformations', models.JSONField(blank=True, null=True)), + ('display_options', models.JSONField(blank=True, null=True)), + ('display_names', models.JSONField(blank=True, null=True)), ('base_table', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='queries', to='mathesar.table')), ], options={ diff --git a/mathesar/models/validators.py b/mathesar/models/validators.py deleted file mode 100644 index e62d06a50d..0000000000 --- a/mathesar/models/validators.py +++ /dev/null @@ -1,145 +0,0 @@ -from django.utils.deconstruct import deconstructible -from mathesar.api.exceptions.validation_exceptions.exceptions import InvalidValueType, DictHasBadKeys - - -@deconstructible -class ListOfDictValidator: - - def __init__(self, field_name): - if field_name is not None: - self.field_name = field_name - - def __call__(self, value): - if not isinstance(value, list): - message = f"{value} should be a list." - raise InvalidValueType(message, field=self.field_name) - for subvalue in value: - if not isinstance(subvalue, dict): - message = f"{value} should contain only dicts." - raise InvalidValueType(message, field=self.field_name) - - def __eq__(self, other): - return ( - isinstance(other, ListOfDictValidator) and self.field_name == other.field_name - ) - - -@deconstructible -class InitialColumnsValidator: - - def __init__(self, field_name): - if field_name is not None: - self.field_name = field_name - - def __call__(self, value): - for initial_col in value: - keys = set(initial_col.keys()) - obligatory_keys = { - "id", - "alias", - } - missing_obligatory_keys = obligatory_keys.difference(keys) - if missing_obligatory_keys: - message = ( - f"{initial_col} doesn't contain" - f" following obligatory keys: {missing_obligatory_keys}." - ) - raise DictHasBadKeys(message, field=self.field_name) - optional_keys = { - "jp_path", - } - valid_keys = { - *obligatory_keys, - *optional_keys, - } - unexpected_keys = keys.difference(valid_keys) - if unexpected_keys: - message = f"{initial_col} contains unexpected keys: {unexpected_keys}." - raise DictHasBadKeys(message, field=self.field_name) - jp_path = initial_col.get('jp_path') - jp_path_validator = JpPathValidator(self.field_name) - jp_path_validator(jp_path) - - def __eq__(self, other): - return ( - isinstance(other, InitialColumnsValidator) and self.field_name == other.field_name - ) - - -@deconstructible -class JpPathValidator: - - def __init__(self, field_name): - if field_name: - self.field_name = field_name - - def __call__(self, value): - if value: - if not isinstance(value, list): - message = f"jp_path must be a list, instead: {value}." - raise InvalidValueType( - message, - field=self.field_name, - ) - for jp in value: - if not isinstance(jp, list): - message = f"jp_path elements must be 2-item lists, instead: {jp}." - raise InvalidValueType( - message, - field=self.field_name, - ) - for col_id in jp: - if not isinstance(col_id, int): - message = ( - "jp_path elements must only contain integer column" - f" ids, instead: {jp}." - ) - raise InvalidValueType( - message, - field=self.field_name, - ) - - def __eq__(self, other): - return ( - isinstance(other, JpPathValidator) and self.field_name == other.field_name - ) - - -@deconstructible -class TransformationsValidator: - - def __init__(self, field_name): - if field_name is not None: - self.field_name = field_name - - def __call__(self, value): - for transformation in value: - if "type" not in transformation: - message = "Each 'transformations' sub-dict must have a 'type' key." - raise DictHasBadKeys(message, field=self.field_name) - if "spec" not in transformation: - message = "Each 'transformations' sub-dict must have a 'spec' key." - raise DictHasBadKeys(message, field=self.field_name) - - def __eq__(self, other): - return ( - isinstance(other, TransformationsValidator) and self.field_name == other.field_name - ) - - -@deconstructible -class DictValidator: - - def __init__(self, field_name): - if field_name is not None: - self.field_name = field_name - - def __call__(self, value): - if not isinstance(value, dict): - message = f"{value} should be a dict." - raise InvalidValueType(message, field=self.field_name) - - def __eq__(self, other): - return ( - isinstance(other, TransformationsValidator) and self.field_name == other.field_name - ) From 2cd551b5015844c84aba22608347fc3037339b1a Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Tue, 15 Oct 2024 15:24:46 +0800 Subject: [PATCH 05/70] remove unused Permissions Role models --- mathesar/models/users.py | 34 ---------------- mathesar/tests/conftest.py | 79 +------------------------------------- 2 files changed, 1 insertion(+), 112 deletions(-) diff --git a/mathesar/models/users.py b/mathesar/models/users.py index 301ea76314..fcd965856d 100644 --- a/mathesar/models/users.py +++ b/mathesar/models/users.py @@ -1,10 +1,6 @@ -from django.conf import settings from django.contrib.auth.models import AbstractUser from django.db import models -from mathesar.models.base import BaseModel -from mathesar.models.deprecated import Connection, Schema - class User(AbstractUser): # Name fields are changed to mitigate some of the issues in @@ -20,33 +16,3 @@ class User(AbstractUser): def metadata_privileges(self, database_id): return 'read write' - - -class Role(models.TextChoices): - MANAGER = 'manager', 'Manager' - EDITOR = 'editor', 'Editor' - VIEWER = 'viewer', 'Viewer' - - -class DatabaseRole(BaseModel): - user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='database_roles') - database = models.ForeignKey(Connection, on_delete=models.CASCADE) - role = models.CharField(max_length=10, choices=Role.choices) - - class Meta: - constraints = [ - models.UniqueConstraint(fields=['user', 'database'], name='unique_database_role') - ] - default_related_name = 'database_role' - - -class SchemaRole(BaseModel): - user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='schema_roles') - schema = models.ForeignKey(Schema, on_delete=models.CASCADE) - role = models.CharField(max_length=10, choices=Role.choices) - - class Meta: - constraints = [ - models.UniqueConstraint(fields=['user', 'schema'], name='unique_schema_role') - ] - default_related_name = 'schema_role' diff --git a/mathesar/tests/conftest.py b/mathesar/tests/conftest.py index 89919b5980..6d75fc2903 100644 --- a/mathesar/tests/conftest.py +++ b/mathesar/tests/conftest.py @@ -25,7 +25,7 @@ from mathesar.models.base import DataFile from mathesar.models.deprecated import Schema, Table, Connection from mathesar.models.deprecated import Column as mathesar_model_column -from mathesar.models.users import DatabaseRole, SchemaRole, User +from mathesar.models.users import User from fixtures.utils import create_scoped_fixtures, get_fixture_value import conftest @@ -545,80 +545,3 @@ def user_tom(): user.save() yield user user.delete() - - -@pytest.fixture -def db_manager_client_factory(user_bob): - def _db_manager_client(schema): - role = 'manager' - client = APIClient() - client.login(username=user_bob.username, password='password') - DatabaseRole.objects.create(user=user_bob, database=schema.database, role=role) - return client - return _db_manager_client - - -@pytest.fixture -def db_editor_client_factory(user_turdy): - def _db_editor_client(schema): - role = 'editor' - client = APIClient() - client.login(username=user_turdy.username, password='password') - DatabaseRole.objects.create(user=user_turdy, database=schema.database, role=role) - return client - return _db_editor_client - - -@pytest.fixture -def schema_manager_client_factory(user_alice): - def _schema_manager_client(schema): - role = 'manager' - client = APIClient() - client.login(username=user_alice.username, password='password') - SchemaRole.objects.create(user=user_alice, schema=schema, role=role) - return client - return _schema_manager_client - - -@pytest.fixture -def schema_viewer_client_factory(user_jerry): - def _schema_viewer_client(schema): - role = 'viewer' - client = APIClient() - client.login(username=user_jerry.username, password='password') - SchemaRole.objects.create(user=user_jerry, schema=schema, role=role) - return client - return _schema_viewer_client - - -@pytest.fixture -def db_viewer_schema_manager_client_factory(user_tom): - def _db_viewer_schema_manager_client(schema): - schema_role = 'manager' - db_role = 'viewer' - - client = APIClient() - client.login(username=user_tom.username, password='password') - DatabaseRole.objects.create(user=user_tom, database=schema.database, role=db_role) - SchemaRole.objects.create(user=user_tom, schema=schema, role=schema_role) - return client - return _db_viewer_schema_manager_client - - -@pytest.fixture -def superuser_client_factory(client): - """ - A facade for the `client` fixture - to the same behaviour as other role based client factories - """ - def _client(schema): - return client - return _client - - -@pytest.fixture -def anonymous_client_factory(): - def _client(schema): - client = APIClient() - return client - return _client From e5c43f228cfd7b1c6015e67c4b596a42d6a5ab0d Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Tue, 15 Oct 2024 15:49:06 +0800 Subject: [PATCH 06/70] remove tests for unused import types --- mathesar/tests/imports/test_excel.py | 77 -------------------------- mathesar/tests/imports/test_json.py | 83 ---------------------------- 2 files changed, 160 deletions(-) delete mode 100644 mathesar/tests/imports/test_excel.py delete mode 100644 mathesar/tests/imports/test_json.py diff --git a/mathesar/tests/imports/test_excel.py b/mathesar/tests/imports/test_excel.py deleted file mode 100644 index 0dfe882d31..0000000000 --- a/mathesar/tests/imports/test_excel.py +++ /dev/null @@ -1,77 +0,0 @@ -import pytest - -from django.core.files import File - -from mathesar.models.base import DataFile -from mathesar.models.deprecated import Schema -from mathesar.imports.base import create_table_from_data_file -from db.schemas.utils import get_schema_oid_from_name -from psycopg.errors import DuplicateTable - - -@pytest.fixture -def data_file(patents_excel_filepath): - with open(patents_excel_filepath, "rb") as excel_file: - data_file = DataFile.objects.create(file=File(excel_file), type='excel') - return data_file - - -def check_excel_upload(table, table_name, schema, num_records, row, cols): - assert table is not None - assert table.name == table_name - assert table.schema == schema - assert table.sa_num_records() == num_records - assert table.get_records()[0] == row - for col in cols: - assert col in table.sa_column_names - - -@pytest.mark.skip(reason="We removed models used in the `create_table_from_data_file` setup function") -def test_excel_upload(data_file, engine_with_schema): - engine, schema_name = engine_with_schema - schema_oid = get_schema_oid_from_name(schema_name, engine) - schema = Schema.objects.get(oid=schema_oid) - table_name = "NASA 1" - table = create_table_from_data_file(data_file, table_name, schema) - - num_records = 1393 - expected_row = ( - 1, - "NASA Kennedy Space Center", - "Application", - "KSC-12871", - "0", - "13/033,085", - "Polyimide Wire Insulation Repair System", - None, - ) - expected_cols = [ - "Center", - "Status", - "Case Number", - "Patent Number", - "Application SN", - "Title", - "Patent Expiration Date", - ] - check_excel_upload( - table, table_name, schema, num_records, expected_row, expected_cols - ) - - -@pytest.mark.skip(reason="msar.add_mathesar_table no longer raises an exception if a table with the same name already exists in the database.") -@pytest.mark.skip(reason="We removed models used in the `create_table_from_data_file` setup function") -def test_excel_upload_with_duplicate_table_name(data_file, engine_with_schema): - table_name = "NASA 2" - - engine, schema_name = engine_with_schema - schema_oid = get_schema_oid_from_name(schema_name, engine) - schema = Schema.objects.get(oid=schema_oid) - table = create_table_from_data_file(data_file, table_name, schema) - assert table is not None - assert table.name == table_name - assert table.schema == schema - assert table.sa_num_records() == 1393 - - with pytest.raises(DuplicateTable): - create_table_from_data_file(data_file, table_name, schema) diff --git a/mathesar/tests/imports/test_json.py b/mathesar/tests/imports/test_json.py deleted file mode 100644 index c4f691fe77..0000000000 --- a/mathesar/tests/imports/test_json.py +++ /dev/null @@ -1,83 +0,0 @@ -import pytest - -from django.core.files import File -from sqlalchemy import text - -from mathesar.models.base import DataFile -from mathesar.models.deprecated import Schema -from mathesar.imports.base import create_table_from_data_file -from db.schemas.operations.create import create_schema_via_sql_alchemy -from db.schemas.utils import get_schema_oid_from_name -from psycopg.errors import DuplicateTable - -TEST_SCHEMA = "import_json_schema" - - -@pytest.fixture -def data_file(patents_json_filepath): - with open(patents_json_filepath, "rb") as json_file: - data_file = DataFile.objects.create(file=File(json_file), type='json') - return data_file - - -@pytest.fixture() -def schema(engine, test_db_model): - create_schema_via_sql_alchemy(TEST_SCHEMA, engine) - schema_oid = get_schema_oid_from_name(TEST_SCHEMA, engine) - yield Schema.current_objects.create(oid=schema_oid, database=test_db_model) - with engine.begin() as conn: - conn.execute(text(f'DROP SCHEMA "{TEST_SCHEMA}" CASCADE;')) - - -def check_json_upload(table, table_name, schema, num_records, row, cols): - assert table is not None - assert table.name == table_name - assert table.schema == schema - assert table.sa_num_records() == num_records - assert table.get_records()[0] == row - for col in cols: - assert col in table.sa_column_names - - -@pytest.mark.skip(reason="We removed models used in the `create_table_from_data_file` setup function") -def test_json_upload(data_file, schema): - table_name = "NASA 1" - table = create_table_from_data_file(data_file, table_name, schema) - - num_records = 1393 - expected_row = ( - 1, - "NASA Kennedy Space Center", - "Application", - "KSC-12871", - "0", - "13/033,085", - "Polyimide Wire Insulation Repair System", - '', - ) - expected_cols = [ - "Center", - "Status", - "Case Number", - "Patent Number", - "Application SN", - "Title", - "Patent Expiration Date", - ] - check_json_upload( - table, table_name, schema, num_records, expected_row, expected_cols - ) - - -@pytest.mark.skip(reason="msar.add_mathesar_table no longer raises an exception if a table with the same name already exists in the database.") -def test_json_upload_with_duplicate_table_name(data_file, schema): - table_name = "NASA 2" - - table = create_table_from_data_file(data_file, table_name, schema) - assert table is not None - assert table.name == table_name - assert table.schema == schema - assert table.sa_num_records() == 1393 - - with pytest.raises(DuplicateTable): - create_table_from_data_file(data_file, table_name, schema) From 8ce26bd8609c3355d493eaed4194e361b40439e3 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Tue, 15 Oct 2024 16:04:02 +0800 Subject: [PATCH 07/70] add migrations for removed permissions role models --- ..._schema_remove_schemarole_user_and_more.py | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 mathesar/migrations/0021_remove_schemarole_schema_remove_schemarole_user_and_more.py diff --git a/mathesar/migrations/0021_remove_schemarole_schema_remove_schemarole_user_and_more.py b/mathesar/migrations/0021_remove_schemarole_schema_remove_schemarole_user_and_more.py new file mode 100644 index 0000000000..d4fac53596 --- /dev/null +++ b/mathesar/migrations/0021_remove_schemarole_schema_remove_schemarole_user_and_more.py @@ -0,0 +1,27 @@ +# Generated by Django 4.2.11 on 2024-10-15 08:02 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('mathesar', '0020_delete_exploration'), + ] + + operations = [ + migrations.RemoveField( + model_name='schemarole', + name='schema', + ), + migrations.RemoveField( + model_name='schemarole', + name='user', + ), + migrations.DeleteModel( + name='DatabaseRole', + ), + migrations.DeleteModel( + name='SchemaRole', + ), + ] From 8cfdee6041a499b1d3bcd0c3495dae5a363bfae7 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Tue, 15 Oct 2024 16:48:04 +0800 Subject: [PATCH 08/70] remove deprecated model usage in remaining tests --- mathesar/tests/api/conftest.py | 157 ------------------ mathesar/tests/conftest.py | 219 -------------------------- mathesar/tests/database/test_types.py | 38 ----- mathesar/tests/imports/test_csv.py | 159 ------------------- 4 files changed, 573 deletions(-) delete mode 100644 mathesar/tests/database/test_types.py diff --git a/mathesar/tests/api/conftest.py b/mathesar/tests/api/conftest.py index 87a27a9ef0..3e8662c9c6 100644 --- a/mathesar/tests/api/conftest.py +++ b/mathesar/tests/api/conftest.py @@ -7,7 +7,6 @@ from db.columns.operations.select import get_column_attnum_from_name from db.tables.operations.select import get_oid_from_table -from mathesar.models.deprecated import Table, Column as ServiceLayerColumn from mathesar.models.base import DataFile from db.metadata import get_empty_metadata from mathesar.state import reset_reflection @@ -26,32 +25,6 @@ def _create_data_file(file_path, file_name): return _create_data_file -@pytest.fixture -def self_referential_table(create_table, get_uid): - return create_table( - table_name=get_uid(), - schema_name=get_uid(), - csv_filepath='mathesar/tests/data/self_referential_table.csv', - ) - - -@pytest.fixture -def _create_tables_from_files(create_table, get_uid): - def _create(*csv_files): - table_names = [get_uid() for i in range(len(csv_files))] - schema_name = get_uid() - return tuple( - create_table( - table_name=Path(csv_filepath).stem, - schema_name=schema_name, - csv_filepath=csv_filepath, - ) - for table_name, csv_filepath - in zip(table_names, csv_files) - ) - return _create - - @pytest.fixture def table_for_reflection(engine): schema_name = 'a_new_schema' @@ -68,133 +41,3 @@ def table_for_reflection(engine): yield schema_name, table_name, engine with engine.begin() as conn: conn.execute(text(f'DROP SCHEMA {schema_name} CASCADE;')) - - -@pytest.fixture -def column_test_table(patent_schema): - engine = patent_schema._sa_engine - column_list_in = [ - Column("mycolumn0", INTEGER, primary_key=True), - Column("mycolumn1", INTEGER, nullable=False), - Column("mycolumn2", INTEGER, server_default="5"), - Column("mycolumn3", VARCHAR), - ] - db_table = SATable( - "anewtable", - MetaData(bind=engine), - *column_list_in, - schema=patent_schema.name - ) - db_table.create() - db_table_oid = get_oid_from_table(db_table.name, db_table.schema, engine) - table = Table.current_objects.create(oid=db_table_oid, schema=patent_schema) - metadata = get_empty_metadata() - for sa_column in column_list_in: - attnum = get_column_attnum_from_name(db_table_oid, sa_column.name, engine, metadata=metadata) - ServiceLayerColumn.current_objects.get_or_create( - table=table, - attnum=attnum, - ) - return table - - -@pytest.fixture -def column_test_table_with_service_layer_options(patent_schema): - engine = patent_schema._sa_engine - column_list_in = [ - Column("mycolumn0", INTEGER, primary_key=True), - Column("mycolumn1", BOOLEAN), - Column("mycolumn2", INTEGER), - Column("mycolumn3", VARCHAR), - Column("mycolumn4", VARCHAR), - Column("mycolumn5", VARCHAR), - Column("mycolumn6", TIMESTAMP), - ] - column_data_list = [{}, - {'display_options': {'input': "dropdown", 'custom_labels': {"TRUE": "yes", "FALSE": "no"}}}, - {'display_options': {'show_as_percentage': True, 'number_format': 'english'}}, - {}, - {}, - {}, - {'display_options': {'format': 'YYYY-MM-DD hh:mm'}}] - db_table = SATable( - "anewtable", - MetaData(bind=engine), - *column_list_in, - schema=patent_schema.name - ) - db_table.create() - db_table_oid = get_oid_from_table(db_table.name, db_table.schema, engine) - table = Table.current_objects.create(oid=db_table_oid, schema=patent_schema) - service_columns = [] - for column_data in zip(column_list_in, column_data_list): - attnum = get_column_attnum_from_name(db_table_oid, column_data[0].name, engine, metadata=get_empty_metadata()) - display_options = column_data[1].get('display_options', None) - first_column = ServiceLayerColumn.current_objects.get_or_create( - table=table, - attnum=attnum, - display_options=display_options, - )[0] - service_columns.append(first_column) - return table, service_columns - - -@pytest.fixture -def library_ma_tables(db_table_to_dj_table, library_db_tables): - reset_reflection() - return { - table_name: db_table_to_dj_table(db_table) - for table_name, db_table - in library_db_tables.items() - } - - -@pytest.fixture -def payments_ma_table(db_table_to_dj_table, payments_db_table): - reset_reflection() - return db_table_to_dj_table(payments_db_table) - - -@pytest.fixture -def players_ma_table(db_table_to_dj_table, players_db_table): - reset_reflection() - return db_table_to_dj_table(players_db_table) - - -@pytest.fixture -def athletes_ma_table(db_table_to_dj_table, athletes_db_table): - reset_reflection() - return db_table_to_dj_table(athletes_db_table) - - -@pytest.fixture -def table_with_unknown_types(create_schema, get_uid, engine): - prefix = "unknown_types" - schema_name = f"schema_{prefix}_{get_uid()}" - schema = create_schema(schema_name) - db_name = schema.database.name - table_name = f"table_{prefix}_{get_uid()}" - fq_table_name = f"\"{schema_name}\".\"{table_name}\"" - query = f""" - SET search_path="{schema_name}"; - CREATE EXTENSION IF NOT EXISTS citext; - CREATE TABLE {fq_table_name} ( - text_column CITEXT, - point_column POINT - ); - INSERT INTO {fq_table_name} (text_column, point_column) - VALUES - ('Row 1', '(1.23, 4.56)'), - ('Row 2', '(7.89, 0.12)'), - ('Row 3', '(3.45, 6.78)'); - """ - with engine.connect() as conn: - conn.execute(text(query)) - conn.commit() - reset_reflection(db_name=db_name) - # NOTE filtering by name is impossible here, because db object names are a dynamic properties, not model fields - all_tables = Table.current_objects.all() - for table in all_tables: - if table.name == table_name: - return table - raise Exception("Should never happen.") diff --git a/mathesar/tests/conftest.py b/mathesar/tests/conftest.py index 6d75fc2903..4160355c5f 100644 --- a/mathesar/tests/conftest.py +++ b/mathesar/tests/conftest.py @@ -21,10 +21,7 @@ from db.schemas.utils import get_schema_oid_from_name import mathesar.tests.conftest -from mathesar.imports.base import create_table_from_data_file from mathesar.models.base import DataFile -from mathesar.models.deprecated import Schema, Table, Connection -from mathesar.models.deprecated import Column as mathesar_model_column from mathesar.models.users import User from fixtures.utils import create_scoped_fixtures, get_fixture_value @@ -85,66 +82,6 @@ def ignore_all_dbs_except_default(SES_dj_databases): del SES_dj_databases[entry_name] -# TODO consider renaming dj_db to target_db -def create_dj_db(request): - """ - Like create_db, but adds the new db to Django's settings.DATABASES dict. - """ - add_db_to_dj_settings = get_fixture_value( - request, - mathesar.tests.conftest.add_db_to_dj_settings - ) - create_db = get_fixture_value( - request, - conftest.create_db - ) - - def _create_and_add(db_name): - create_db(db_name) - add_db_to_dj_settings(db_name) - credentials = settings.DATABASES.get(db_name) - database_model = Connection.current_objects.create( - name=db_name, - db_name=db_name, - username=credentials['USER'], - password=credentials['PASSWORD'], - host=credentials['HOST'], - port=credentials['PORT'] - ) - return database_model - yield _create_and_add - - -# defines: -# FUN_create_dj_db -# CLA_create_dj_db -# MOD_create_dj_db -# SES_create_dj_db -create_scoped_fixtures(globals(), create_dj_db) - - -@pytest.fixture(scope="function", autouse=True) -def test_db_model(request, test_db_name, django_db_blocker): - add_db_to_dj_settings = get_fixture_value( - request, - mathesar.tests.conftest.add_db_to_dj_settings - ) - - add_db_to_dj_settings(test_db_name) - with django_db_blocker.unblock(): - credentials = settings.DATABASES.get(test_db_name) - database_model = Connection.current_objects.create( - name=test_db_name, - db_name=test_db_name, - username=credentials['USER'], - password=credentials['PASSWORD'], - host=credentials['HOST'], - port=credentials['PORT'] - ) - yield database_model - database_model.delete() - - def add_db_to_dj_settings(request): """ If the Django layer should be aware of a db, it should be added to settings.DATABASES dict. @@ -273,73 +210,6 @@ def multiple_sheets_excel_filepath(): return 'mathesar/tests/data/excel_parsing/multiple_sheets.xlsx' -@pytest.fixture -def db_table_to_dj_table(engine, create_schema): - """ - Factory creating Django Table models from DB/SA tables. - """ - def _create_ma_table(db_table): - schema_name = db_table.schema - dj_schema = create_schema(schema_name) - db_table_oid = get_oid_from_table( - db_table.name, schema_name, engine - ) - dj_table, _ = Table.current_objects.get_or_create( - oid=db_table_oid, schema=dj_schema - ) - return dj_table - yield _create_ma_table - - -@pytest.fixture -def empty_nasa_table(patent_schema, engine_with_schema): - engine, _ = engine_with_schema - NASA_TABLE = 'NASA Schema List' - db_table = SATable( - NASA_TABLE, MetaData(bind=engine), - Column('id', Integer, primary_key=True), - schema=patent_schema.name, - ) - db_table.create() - db_table_oid = get_oid_from_table(db_table.name, db_table.schema, engine) - table = Table.current_objects.create(oid=db_table_oid, schema=patent_schema) - - yield table - - table.delete_sa_table() - table.delete() - - -@pytest.fixture -def patent_schema(create_schema): - PATENT_SCHEMA = 'Patents' - yield create_schema(PATENT_SCHEMA) - - -@pytest.fixture -def reservations_schema(create_schema): - RESERVATIONS_SCHEMA = 'Reservations' - yield create_schema(RESERVATIONS_SCHEMA) - - -# TODO rename to create_ma_schema -@pytest.fixture -def create_schema(test_db_model, create_db_schema): - """ - Creates a DJ Schema model factory, making sure to cache and clean up new instances. - """ - engine = test_db_model._sa_engine - - def _create_schema(schema_name): - create_db_schema(schema_name, engine) - schema_oid = get_schema_oid_from_name(schema_name, engine) - schema_model, _ = Schema.current_objects.get_or_create(oid=schema_oid, database=test_db_model) - return schema_model - - yield _create_schema - # NOTE: Schema model is not cleaned up. Maybe invalidate cache? - - # TODO rename to create_mathesar_db_table @pytest.fixture def create_mathesar_table(create_db_schema): @@ -355,101 +225,12 @@ def _create_mathesar_table( yield _create_mathesar_table -@pytest.fixture -def create_reservations_table(engine_with_schema, reservations_schema): - engine, _ = engine_with_schema - table_name = 'Exclusion Check' - schema_name = reservations_schema.name - cols = [ - Column('id', Integer, primary_key=True), - Column('room_number', Integer), - Column('check_in_date', Date), - Column('check_out_date', Date) - ] - insert_data = [ - (1, 1, '11/10/2023', '11/15/2023'), - (2, 1, '11/16/2023', '11/20/2023') - ] - sa_table = create_test_table(table_name, cols, insert_data, schema_name, engine) - table_oid = get_oid_from_table(sa_table.name, schema_name, engine) - table = Table.current_objects.create(oid=table_oid, schema=reservations_schema) - yield table - table.delete_sa_table() - table.delete() - - -@pytest.fixture -def create_patents_table(patents_csv_filepath, patent_schema, create_table): - schema_name = patent_schema.name - csv_filepath = patents_csv_filepath - - def _create_table(table_name, schema_name=schema_name): - return create_table( - table_name=table_name, - schema_name=schema_name, - csv_filepath=csv_filepath, - ) - - return _create_table - - -@pytest.fixture -def patents_table(create_patents_table, uid): - return create_patents_table(f"table_patents_{uid}") - - -# TODO rename to create_ma_table_from_csv -@pytest.fixture -def create_table(create_schema): - def _create_table(table_name, schema_name, csv_filepath): - data_file = _get_datafile_for_path(csv_filepath) - schema_model = create_schema(schema_name) - return create_table_from_data_file(data_file, table_name, schema_model) - return _create_table - - def _get_datafile_for_path(path): with open(path, 'rb') as file: datafile = DataFile.objects.create(file=File(file), type='csv') return datafile -@pytest.fixture -def create_column(): - def _create_column(table, column_data): - attnum = table.add_column(column_data)[0] - column = mathesar_model_column.current_objects.get_or_create(attnum=attnum, table=table) - return column[0] - return _create_column - - -@pytest.fixture -def custom_types_schema_url(schema, live_server): - return f"{live_server}/{schema.database.name}/{schema.id}" - - -@pytest.fixture -def create_column_with_display_options(): - def _create_column(table, column_data): - column = table.add_column(column_data) - attnum = get_column_attnum_from_name( - table.oid, - [column.name], - table.schema._sa_engine, - metadata=get_empty_metadata() - ) - # passing table object caches sa_columns, missing out any new columns - # So table.id is passed to get new instance of table. - column = mathesar_model_column.current_objects.get_or_create( - attnum=attnum, - table_id=table.id, - display_options=column_data.get('display_options', None) - ) - return column[0] - - return _create_column - - @pytest.fixture def user_alice(): user = User.objects.create( diff --git a/mathesar/tests/database/test_types.py b/mathesar/tests/database/test_types.py deleted file mode 100644 index c74be77335..0000000000 --- a/mathesar/tests/database/test_types.py +++ /dev/null @@ -1,38 +0,0 @@ -from mathesar.database.types import UIType -from mathesar.models.deprecated import Connection -from db.types.base import known_db_types - - -def _verify_type_mapping(supported_ui_types): - valid_ui_types = {ui_type for ui_type in UIType} - valid_db_types = {db_type for db_type in known_db_types} - seen_db_types = [] - seen_ui_types = [] - for ui_type in supported_ui_types: - # Verify ui types - assert ui_type in valid_ui_types - # Ensure ui types are not repeated. - assert ui_type not in seen_ui_types - seen_ui_types.append(ui_type) - - # Verify id - assert hasattr(ui_type, 'id') - assert isinstance(ui_type.display_name, str) - - # Verify display_name - assert hasattr(ui_type, 'display_name') - assert isinstance(ui_type.display_name, str) - - # Verify DB types - assert hasattr(ui_type, 'db_types') - for db_type in ui_type.db_types: - assert db_type in valid_db_types - # Ensure types are not repeated. - assert db_type not in seen_db_types - seen_db_types.append(db_type) - - -def test_type_mapping(): - databases = Connection.objects.all() - for database in databases: - _verify_type_mapping(database.supported_ui_types) diff --git a/mathesar/tests/imports/test_csv.py b/mathesar/tests/imports/test_csv.py index 76acfea825..cab7e4e9f7 100644 --- a/mathesar/tests/imports/test_csv.py +++ b/mathesar/tests/imports/test_csv.py @@ -1,166 +1,7 @@ import pytest -from django.core.files import File -from sqlalchemy import text - -from mathesar.models.base import DataFile -from mathesar.models.deprecated import Schema from mathesar.errors import InvalidTableError -from mathesar.imports.base import create_table_from_data_file from mathesar.imports.csv import get_sv_dialect, get_sv_reader -from db.schemas.operations.create import create_schema_via_sql_alchemy -from db.schemas.utils import get_schema_oid_from_name -from db.constants import COLUMN_NAME_TEMPLATE -from psycopg.errors import DuplicateTable - -TEST_SCHEMA = "import_csv_schema" - - -@pytest.fixture -def data_file(patents_csv_filepath): - with open(patents_csv_filepath, "rb") as csv_file: - data_file = DataFile.objects.create(file=File(csv_file), type='csv') - return data_file - - -@pytest.fixture -def headerless_data_file(headerless_patents_csv_filepath): - with open(headerless_patents_csv_filepath, "rb") as csv_file: - data_file = DataFile.objects.create(file=File(csv_file), header=False, type='csv') - return data_file - - -@pytest.fixture -def col_names_with_spaces_data_file(col_names_with_spaces_csv_filepath): - with open(col_names_with_spaces_csv_filepath, "rb") as csv_file: - data_file = DataFile.objects.create(file=File(csv_file), type='csv') - return data_file - - -@pytest.fixture -def col_headers_empty_data_file(col_headers_empty_csv_filepath): - with open(col_headers_empty_csv_filepath, "rb") as csv_file: - data_file = DataFile.objects.create(file=File(csv_file), type='csv') - return data_file - - -@pytest.fixture() -def schema(engine, test_db_model): - create_schema_via_sql_alchemy(TEST_SCHEMA, engine) - schema_oid = get_schema_oid_from_name(TEST_SCHEMA, engine) - yield Schema.current_objects.create(oid=schema_oid, database=test_db_model) - with engine.begin() as conn: - conn.execute(text(f'DROP SCHEMA "{TEST_SCHEMA}" CASCADE;')) - - -def check_csv_upload(table, table_name, schema, num_records, row, cols): - assert table is not None - assert table.name == table_name - assert table.schema == schema - assert table.sa_num_records() == num_records - assert table.get_records()[0] == row - for col in cols: - assert col in table.sa_column_names - - -@pytest.mark.skip(reason="We removed models used in the `create_table_from_data_file` setup function") -def test_csv_upload(data_file, schema): - table_name = "NASA 1" - table = create_table_from_data_file(data_file, table_name, schema) - - num_records = 1393 - expected_row = ( - 1, - "NASA Kennedy Space Center", - "Application", - "KSC-12871", - "0", - "13/033,085", - "Polyimide Wire Insulation Repair System", - None, - ) - expected_cols = [ - "Center", - "Status", - "Case Number", - "Patent Number", - "Application SN", - "Title", - "Patent Expiration Date", - ] - check_csv_upload( - table, table_name, schema, num_records, expected_row, expected_cols - ) - - -@pytest.mark.skip(reason="We removed models used in the `create_table_from_data_file` setup function") -def test_headerless_csv_upload(headerless_data_file, schema): - table_name = "NASA no headers" - table = create_table_from_data_file(headerless_data_file, table_name, schema) - - num_records = 1393 - expected_row = ( - 1, - "NASA Kennedy Space Center", - "Application", - "KSC-12871", - "0", - "13/033,085", - "Polyimide Wire Insulation Repair System", - None, - ) - expected_cols = [COLUMN_NAME_TEMPLATE + str(i) for i in range(7)] - - check_csv_upload( - table, table_name, schema, num_records, expected_row, expected_cols - ) - - -@pytest.mark.skip(reason="We removed models used in the `create_table_from_data_file` setup function") -def test_col_names_with_spaces_csv(col_names_with_spaces_data_file, schema): - table_name = "Column names with spaces" - table = create_table_from_data_file(col_names_with_spaces_data_file, table_name, schema) - - num_records = 2 - expected_row = ( - 1, - "foo", - "bar", - ) - expected_cols = ["id", "a", "b"] - - check_csv_upload( - table, table_name, schema, num_records, expected_row, expected_cols - ) - - -@pytest.mark.skip(reason="We removed models used in the `create_table_from_data_file` setup function") -def test_col_headers_empty_csv(col_headers_empty_data_file, schema): - table_name = "Empty column header" - table = create_table_from_data_file(col_headers_empty_data_file, table_name, schema) - - num_records = 2 - expected_row = (1, "aa", "bb", "cc", "dd") - expected_cols = ["id", "Column 0", "Column 1", "col2", "Column 3"] - - check_csv_upload( - table, table_name, schema, num_records, expected_row, expected_cols - ) - - -@pytest.mark.skip(reason="msar.add_mathesar_table no longer raises an exception if a table with the same name already exists in the database.") -@pytest.mark.skip(reason="We removed models used in the `create_table_from_data_file` setup function") -def test_csv_upload_with_duplicate_table_name(data_file, schema): - table_name = "NASA 2" - - table = create_table_from_data_file(data_file, table_name, schema) - assert table is not None - assert table.name == table_name - assert table.schema == schema - assert table.sa_num_records() == 1393 - - with pytest.raises(DuplicateTable): - create_table_from_data_file(data_file, table_name, schema) get_dialect_test_list = [ From 7fb8613915e53627ddeaddc362d8d33170ee16f3 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Tue, 15 Oct 2024 16:54:53 +0800 Subject: [PATCH 09/70] remove deprecated model use from migrations, signals --- mathesar/apps.py | 1 - mathesar/migrations/0005_release_0_1_4.py | 3 +-- .../0007_users_permissions_remodel.py | 4 +--- mathesar/signals.py | 21 ------------------- 4 files changed, 2 insertions(+), 27 deletions(-) delete mode 100644 mathesar/signals.py diff --git a/mathesar/apps.py b/mathesar/apps.py index f6f4db771e..660ebe2b6c 100644 --- a/mathesar/apps.py +++ b/mathesar/apps.py @@ -17,5 +17,4 @@ class MathesarConfig(AppConfig): def ready(self): """Perform initialization tasks.""" - import mathesar.signals # noqa post_migrate.connect(_prepare_database_model) diff --git a/mathesar/migrations/0005_release_0_1_4.py b/mathesar/migrations/0005_release_0_1_4.py index 0849b2a6e3..35f529308b 100644 --- a/mathesar/migrations/0005_release_0_1_4.py +++ b/mathesar/migrations/0005_release_0_1_4.py @@ -1,7 +1,6 @@ from django.db import migrations, models, connection import django.contrib.postgres.fields import encrypted_fields.fields -import mathesar.models.deprecated def column_order_to_jsonb_postgres_fwd(apps, schema_editor): @@ -12,7 +11,7 @@ def column_order_to_jsonb_postgres_fwd(apps, schema_editor): migrations.AlterField( model_name='tablesettings', name='column_order', - field=models.JSONField(blank=True, default=None, null=True, validators=[mathesar.models.deprecated.validate_column_order]), + field=models.JSONField(blank=True, default=None, null=True), ), diff --git a/mathesar/migrations/0007_users_permissions_remodel.py b/mathesar/migrations/0007_users_permissions_remodel.py index 1f790a1622..1238a405ce 100644 --- a/mathesar/migrations/0007_users_permissions_remodel.py +++ b/mathesar/migrations/0007_users_permissions_remodel.py @@ -5,8 +5,6 @@ import django.db.models.deletion import encrypted_fields.fields -import mathesar.models.deprecated - class Migration(migrations.Migration): @@ -29,7 +27,7 @@ class Migration(migrations.Migration): migrations.AlterField( model_name='tablesettings', name='column_order', - field=models.JSONField(blank=True, default=None, null=True, validators=[mathesar.models.deprecated.validate_column_order]), + field=models.JSONField(blank=True, default=None, null=True), ), migrations.RenameModel(old_name='UIQuery', new_name='Exploration'), migrations.CreateModel( diff --git a/mathesar/signals.py b/mathesar/signals.py deleted file mode 100644 index 0997730ed7..0000000000 --- a/mathesar/signals.py +++ /dev/null @@ -1,21 +0,0 @@ -from django.db.models.signals import post_save -from django.dispatch import receiver - -from mathesar.models.deprecated import ( - Column, Table, _set_default_preview_template, -) -from mathesar.state.django import reflect_new_table_constraints - - -@receiver(post_save, sender=Table) -def sync_table_constraints(**kwargs): - # When a table is created, we want to immediately make the appropriate - # Constraint model instances for that table's constraints. - if kwargs['created']: - reflect_new_table_constraints(kwargs['instance']) - - -@receiver(post_save, sender=Column) -def compute_preview_column_settings(**kwargs): - instance = kwargs['instance'] - _set_default_preview_template(instance.table) From 6d280fdf7d07d850d4826f06de1c74e9f72ec809 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Tue, 15 Oct 2024 17:03:32 +0800 Subject: [PATCH 10/70] remove unused import function --- mathesar/imports/base.py | 38 -------------------------------------- 1 file changed, 38 deletions(-) delete mode 100644 mathesar/imports/base.py diff --git a/mathesar/imports/base.py b/mathesar/imports/base.py deleted file mode 100644 index d1755e86a3..0000000000 --- a/mathesar/imports/base.py +++ /dev/null @@ -1,38 +0,0 @@ -from mathesar.models.deprecated import Table -from mathesar.imports.csv import create_db_table_from_csv_data_file -from mathesar.imports.excel import create_db_table_from_excel_data_file -from mathesar.imports.json import create_db_table_from_json_data_file -from db.tables.operations.select import get_oid_from_table -from mathesar.errors import InvalidTableError - -SAMPLE_SIZE = 20000 -CHECK_ROWS = 10 - - -def create_table_from_data_file(data_file, name, schema, comment=None): - engine = schema._sa_engine - if data_file.type == 'csv' or data_file.type == 'tsv': - db_table = create_db_table_from_csv_data_file( - data_file, name, schema, comment=comment - ) - elif data_file.type == 'json': - db_table = create_db_table_from_json_data_file( - data_file, name, schema, comment=comment - ) - elif data_file.type == 'excel': - db_table = create_db_table_from_excel_data_file( - data_file, name, schema, comment=comment - ) - else: - raise InvalidTableError - db_table_oid = get_oid_from_table(db_table.name, db_table.schema, engine) - # Using current_objects to create the table instead of objects. objects - # triggers re-reflection, which will cause a race condition to create the table - table = Table.current_objects.get( - oid=db_table_oid, - schema=schema, - ) - table.import_verified = False - table.save() - data_file.save() - return table From 23b019569eb673071e3aa00cbccd091840a9ffe7 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Tue, 15 Oct 2024 17:10:57 +0800 Subject: [PATCH 11/70] remove deprecated model use from remaining exception class --- .../exceptions/database_exceptions/exceptions.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/mathesar/api/exceptions/database_exceptions/exceptions.py b/mathesar/api/exceptions/database_exceptions/exceptions.py index ed176fa07a..7e45350eb0 100644 --- a/mathesar/api/exceptions/database_exceptions/exceptions.py +++ b/mathesar/api/exceptions/database_exceptions/exceptions.py @@ -9,7 +9,6 @@ MathesarAPIException, get_default_exception_detail, ) -from mathesar.models.deprecated import Constraint class UniqueViolationAPIException(MathesarAPIException): @@ -32,21 +31,6 @@ def __init__( ): if details is None and table is not None: details = {} - try: - constraint_oid = get_constraint_oid_by_name_and_table_oid( - exception.orig.diag.constraint_name, - table.oid, - table._sa_engine - ) - constraint = Constraint.objects.get(table=table, oid=constraint_oid) - details = { - "constraint": constraint.id, - "constraint_columns": [c.id for c in constraint.columns], - } - except TypeError: - details = { - "constraint": None, - } details.update( { "original_details": exception.orig.diag.message_detail, From e2cc891630248f1aa5f53f61894358602c8360c7 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Tue, 15 Oct 2024 17:26:08 +0800 Subject: [PATCH 12/70] add forgotten migration --- .../0022_alter_tablesettings_column_order.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 mathesar/migrations/0022_alter_tablesettings_column_order.py diff --git a/mathesar/migrations/0022_alter_tablesettings_column_order.py b/mathesar/migrations/0022_alter_tablesettings_column_order.py new file mode 100644 index 0000000000..b3fe3ec693 --- /dev/null +++ b/mathesar/migrations/0022_alter_tablesettings_column_order.py @@ -0,0 +1,19 @@ +# Generated by Django 4.2.11 on 2024-10-15 09:24 + +from django.db import migrations, models +import mathesar.models.deprecated + + +class Migration(migrations.Migration): + + dependencies = [ + ('mathesar', '0021_remove_schemarole_schema_remove_schemarole_user_and_more'), + ] + + operations = [ + migrations.AlterField( + model_name='tablesettings', + name='column_order', + field=models.JSONField(blank=True, default=None, null=True, validators=[mathesar.models.deprecated.validate_column_order]), + ), + ] From 8ffe233f7f3570cdde741c5ffa210a8f1e4501a2 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Tue, 15 Oct 2024 18:00:31 +0800 Subject: [PATCH 13/70] remove mathesar state wrangling functions --- mathesar/apps.py | 13 -- mathesar/imports/csv.py | 3 - mathesar/imports/excel.py | 3 - mathesar/imports/json.py | 3 - mathesar/models/deprecated.py | 15 +- mathesar/state/base.py | 52 ------- mathesar/state/cached_property.py | 164 --------------------- mathesar/state/django.py | 233 ------------------------------ mathesar/tests/api/conftest.py | 1 - mathesar/tests/conftest.py | 17 --- mathesar/urls.py | 1 - mathesar/utils/explorations.py | 4 +- mathesar/utils/models.py | 2 - mathesar/views.py | 8 - 14 files changed, 3 insertions(+), 516 deletions(-) delete mode 100644 mathesar/state/base.py delete mode 100644 mathesar/state/cached_property.py delete mode 100644 mathesar/state/django.py diff --git a/mathesar/apps.py b/mathesar/apps.py index 660ebe2b6c..d3f12bd571 100644 --- a/mathesar/apps.py +++ b/mathesar/apps.py @@ -1,20 +1,7 @@ from django.apps import AppConfig -from django.conf import settings -from django.db.models.signals import post_migrate - - -def _prepare_database_model(**kwargs): - from mathesar.state import make_sure_initial_reflection_happened # noqa - # TODO fix test DB loading to make this unnecessary - if not settings.TEST: - make_sure_initial_reflection_happened() class MathesarConfig(AppConfig): """Initialization manager.""" name = "mathesar" - - def ready(self): - """Perform initialization tasks.""" - post_migrate.connect(_prepare_database_model) diff --git a/mathesar/imports/csv.py b/mathesar/imports/csv.py index e1b45236c5..6786c74a5f 100644 --- a/mathesar/imports/csv.py +++ b/mathesar/imports/csv.py @@ -12,8 +12,6 @@ from db.constants import COLUMN_NAME_TEMPLATE from psycopg2.errors import IntegrityError, DataError -from mathesar.state import reset_reflection - # The user-facing documentation replicates these delimiter characters. If you # change this variable, please update the documentation as well. ALLOWED_DELIMITERS = ",\t:|;" @@ -159,5 +157,4 @@ def create_db_table_from_csv_data_file(data_file, name, schema, comment=None): drop_table(name=name, schema=schema.name, engine=engine) column_names_alt = get_alternate_column_names(column_names) table = insert_records_from_csv_data_file(name, schema, column_names_alt, engine, comment, data_file) - reset_reflection(db_name=db_model.name) return table diff --git a/mathesar/imports/excel.py b/mathesar/imports/excel.py index 74d1c36b08..9692994dfc 100644 --- a/mathesar/imports/excel.py +++ b/mathesar/imports/excel.py @@ -10,8 +10,6 @@ from psycopg2.errors import IntegrityError, DataError from sqlalchemy.exc import IntegrityError as sqlalchemy_integrity_error -from mathesar.state import reset_reflection - def insert_records_from_dataframe(name, schema, column_names, engine, comment, dataframe): table = create_string_column_table( @@ -63,5 +61,4 @@ def create_db_table_from_excel_data_file(data_file, name, schema, comment=None): column_names_alt = get_alternate_column_names(column_names) table = insert_records_from_dataframe(name, schema, column_names_alt, engine, comment, dataframe) - reset_reflection(db_name=db_model.name) return table diff --git a/mathesar/imports/json.py b/mathesar/imports/json.py index 7cfe93097d..0908f944c7 100644 --- a/mathesar/imports/json.py +++ b/mathesar/imports/json.py @@ -13,8 +13,6 @@ from psycopg2.errors import IntegrityError, DataError from sqlalchemy.exc import IntegrityError as sqlalchemy_integrity_error -from mathesar.state import reset_reflection - def is_valid_json(data): try: @@ -98,5 +96,4 @@ def create_db_table_from_json_data_file(data_file, name, schema, comment=None): column_names_alt = get_alternate_column_names(column_names) table = insert_records_from_json_data_file(name, schema, column_names_alt, engine, comment, json_filepath, max_level) - reset_reflection(db_name=db_model.name) return table diff --git a/mathesar/models/deprecated.py b/mathesar/models/deprecated.py index da8f63d762..d9fd8c12d1 100644 --- a/mathesar/models/deprecated.py +++ b/mathesar/models/deprecated.py @@ -7,6 +7,7 @@ from django.core.exceptions import ValidationError from django.db import models from django.db.models import JSONField +from django.utils.functional import cached_property from encrypted_fields.fields import EncryptedCharField from db.columns import utils as column_utils from db.columns.operations.create import create_column, duplicate_column @@ -50,8 +51,6 @@ from mathesar.utils.prefetch import PrefetchManager, Prefetcher from mathesar.database.base import create_mathesar_engine from mathesar.database.types import UIType, get_ui_type_from_db_type -from mathesar.state import make_sure_initial_reflection_happened, get_cached_metadata, reset_reflection -from mathesar.state.cached_property import cached_property from mathesar.api.exceptions.database_exceptions.base_exceptions import ProgrammingAPIException @@ -60,7 +59,6 @@ class DatabaseObjectManager(PrefetchManager): def get_queryset(self): - make_sure_initial_reflection_happened() return super().get_queryset() @@ -230,12 +228,10 @@ def description(self): def update_sa_schema(self, update_params): result = model_utils.update_sa_schema(self, update_params) - reset_reflection(db_name=self.database.name) return result def delete_sa_schema(self): drop_schema_via_name(self._sa_engine, self.name, cascade=True) - reset_reflection(db_name=self.database.name) def clear_name_cache(self): cache_key = f"{self.database.name}_schema_name_{self.oid}" @@ -444,7 +440,6 @@ def add_column(self, column_data): self.oid, column_data, ) - reset_reflection(db_name=self.schema.database.name) return result def alter_column(self, column_attnum, column_data): @@ -454,7 +449,6 @@ def alter_column(self, column_attnum, column_data): column_attnum, column_data, ) - reset_reflection(db_name=self.schema.database.name) return result def drop_column(self, column_attnum): @@ -463,7 +457,6 @@ def drop_column(self, column_attnum): column_attnum, self.schema._sa_engine, ) - reset_reflection(db_name=self.schema.database.name) def duplicate_column(self, column_attnum, copy_data, copy_constraints, name=None): result = duplicate_column( @@ -474,7 +467,6 @@ def duplicate_column(self, column_attnum, copy_data, copy_constraints, name=None copy_data=copy_data, copy_constraints=copy_constraints, ) - reset_reflection(db_name=self.schema.database.name) return result def get_preview(self, column_definitions): @@ -507,7 +499,6 @@ def update_sa_table(self, update_params): def delete_sa_table(self): result = drop_table(self.name, self.schema.name, self.schema._sa_engine, cascade=True) - reset_reflection(db_name=self.schema.database.name) return result def get_record(self, id_value): @@ -561,7 +552,6 @@ def add_constraint(self, constraint_obj): add_constraint_via_sql_alchemy(constraint_obj, engine=self._sa_engine) ) result = Constraint.current_objects.create(oid=constraint_oid, table=self) - reset_reflection(db_name=self.schema.database.name) return result def get_column_name_id_bidirectional_map(self): @@ -624,7 +614,6 @@ def move_columns(self, columns_to_move, target_table): self.save() remainder_column_names = column_names_id_map.keys() - column_names_to_move self.update_column_reference(remainder_column_names, column_names_id_map) - reset_reflection(db_name=self.schema.database.name) return extracted_sa_table, remainder_sa_table def split_table( @@ -656,7 +645,6 @@ def split_table( extracted_table.update_column_reference(extracted_column_names, column_names_id_map) remainder_table = Table.current_objects.get(schema__database=self.schema.database, oid=remainder_table_oid) remainder_table.update_column_reference(remainder_column_names, column_names_id_map) - reset_reflection(db_name=self.schema.database.name) remainder_fk_column = Column.objects.get(table=remainder_table, attnum=linking_fk_column_attnum) return extracted_table, remainder_table, remainder_fk_column @@ -883,7 +871,6 @@ def drop(self): self.name ) self.delete() - reset_reflection(db_name=self.table.schema.database.name) class PreviewColumnSettings(BaseModel): diff --git a/mathesar/state/base.py b/mathesar/state/base.py deleted file mode 100644 index 9fc5d7c4a9..0000000000 --- a/mathesar/state/base.py +++ /dev/null @@ -1,52 +0,0 @@ -from mathesar.state.django import reflect_db_objects, clear_dj_cache -from mathesar.state.metadata import reset_cached_metadata, get_cached_metadata -from mathesar.state.cached_property import clear_cached_property_cache - - -def make_sure_initial_reflection_happened(): - if not _has_initial_reflection_happened(): - reset_reflection() - - -def reset_reflection(db_name=None): - """ - Resets our reflection of what's on Postgres databases. Reset meaning that information is - either deleted (to be refreshed on demand) or is preemptively refreshed. - - We have following forms of state (aka reflection), and all are reset by this routine: - - Django cache (django.core.cache), - - Django models (mathesar.models namespace), - - SQLAlchemy MetaData. - - Note, this causes immediate calls to Postgres. - """ - clear_dj_cache() - clear_cached_property_cache() - set_initial_reflection_happened() - reset_cached_metadata() - _trigger_django_model_reflection(db_name) - - -def _trigger_django_model_reflection(db_name): - reflect_db_objects(metadata=get_cached_metadata(), db_name=db_name) - - -def set_initial_reflection_happened(has_it_happened=True): - """ - Many, probably most, of our state-dependent routines presume that reflection has occured. Since - those routines are not adapted to check whether that's true, this is a mechanism to ensure that - at least one reflection has happened. That, together with us triggering re-reflection after - each mutation, should keep state up-to-date. - - Only public for testing fixture purposes. Should not otherwise be called outside this file. - """ - global _initial_reflection_happened - _initial_reflection_happened = has_it_happened - - -def _has_initial_reflection_happened(): - global _initial_reflection_happened - return _initial_reflection_happened - - -_initial_reflection_happened = False diff --git a/mathesar/state/cached_property.py b/mathesar/state/cached_property.py deleted file mode 100644 index dcbfcf67e2..0000000000 --- a/mathesar/state/cached_property.py +++ /dev/null @@ -1,164 +0,0 @@ -import uuid -import logging - -# A globally unique object that's used to signal a cache-miss. -NO_VALUE = object() - -logger = logging.getLogger(__name__) - - -def cached_property(fn): - """ - Caches property values, similarly to django.utils.functional.cached_property, but in a central - cache, which means we can clear all property caches via a central method call, which is - necessary for managing our state. - """ - return _cached_property(fn) - - -def key_cached_property(key_fn): - """ - Like cached_property, but takes a key_fn, which is expected to be a function that takes the - instance on which this property is accessed and returns a key that this property should use - when indexing in the central cache. - """ - return lambda fn: _cached_property(fn, key_fn=key_fn) - - -def clear_cached_property_cache(): - """ - Clear caches of all cached properties. - """ - logger.debug("clear_cached_property_cache") - global _central_ache - _central_cache = {} # noqa: F841 - - -class _cached_property: - def __init__(self, fn, key_fn=None): - self.key_fn = key_fn - self.original_get_fn = fn - self.attribute_name = None - - def __set_name__(self, _, name): - """ - Make note of the attribute name. - """ - if self.attribute_name is None: - self.attribute_name = name - elif name != self.attribute_name: - raise TypeError( - "Cannot assign the same cached_property to two different names " - f"({self.attribute_name!r} and {name!r})." - ) - - def __get__(self, instance, _): - key = self._get_ip_key(instance=instance) - cached_value = _get_from_central_cache(key=key) - if cached_value is not NO_VALUE: - return cached_value - else: - assert self.original_get_fn is not None - new_value = self.original_get_fn(instance) - _set_on_central_cache(key=key, value=new_value) - return new_value - - def __set__(self, instance, value): - key = self._get_ip_key(instance) - _set_on_central_cache(key=key, value=value) - - def __delete__(self, instance): - key = self._get_ip_key(instance) - _delete_from_central_cache(key) - - def _get_ip_key(self, instance): - """ - Gets an instance-and-property-specific key (abbreviated instance-property key or ip key) - for indexing in the central cache. - """ - if self._should_derive_ip_keys_from_key_fn(): - ip_key = self._get_key_fn_derived_ip_key(instance) - else: - ip_key = self._get_random_ip_key(instance) - return ip_key - - def _should_derive_ip_keys_from_key_fn(self): - """ - If a key_fn is provided, it will always be used for deriving ip keys. - """ - return self.key_fn is not None - - def _get_key_fn_derived_ip_key(self, instance): - """ - Calls the key_fn with the instance. That is expected to produce a hashable key that will be - used for indexing in the central cache. Allows sharing the same central cache entry - between multiple pieces of code (not necessarily properties). - - NOTE key_fn-derived ip keys are not cached, because we don't have a mechanism for - re-triggering ip key generation. We could implement that via a flag that would be - reset by a `mathesar.state.reset_reflection()` call. However, key_fn calls are currently - cheap. - """ - if self.key_fn is not None: - ip_key = self.key_fn(instance) - return ip_key - - def _get_random_ip_key(self, instance): - """ - Gets a random ip key. No cache-sharing possible. - """ - # https://docs.python.org/3/library/uuid.html#uuid.uuid4 - ip_key = self._get_ip_key_from_instance_cache(instance) - if ip_key is not NO_VALUE: - return ip_key - else: - ip_key = uuid.uuid4() - self._set_ip_key_on_instance_cache(instance, ip_key) - return ip_key - - def _get_ip_key_from_instance_cache(self, instance): - """ - Get the instance-and-property-specific central cache key that's cached on this instance. - """ - property_key = self._get_property_key() - instance_cache = self._get_instance_cache(instance) - return instance_cache.get(property_key, NO_VALUE) - - def _set_ip_key_on_instance_cache(self, instance, ip_key): - property_key = self._get_property_key() - instance_cache = self._get_instance_cache(instance) - instance_cache[property_key] = ip_key - - def _get_instance_cache(self, instance): - """ - An instance cache is an instance-specific cache whose purpose is to hold the - instance-and-property-specific keys to the central cache. It holds the central cache keys indexed by - property-specific keys (see _get_property_key). - """ - return instance.__dict__ - - def _get_property_key(self): - """ - This key is property-specific (e.g. the Column name property), but not instance-specific. - Together with the instance cache, it let's you store instance-and-property-specific central - cache keys. - """ - return f'_property_key__{self.attribute_name}' - - -def _get_from_central_cache(key): - global _central_cache - return _central_cache.get(key, NO_VALUE) - - -def _set_on_central_cache(key, value): - global _central_cache - _central_cache[key] = value - - -def _delete_from_central_cache(key): - global _central_cache - _central_cache[key] = NO_VALUE - - -_central_cache = {} diff --git a/mathesar/state/django.py b/mathesar/state/django.py deleted file mode 100644 index 4feef16dd0..0000000000 --- a/mathesar/state/django.py +++ /dev/null @@ -1,233 +0,0 @@ -import logging -from collections import defaultdict -from functools import reduce - -import operator -from django.core.cache import cache as dj_cache -from django.db.models import Prefetch, Q -from sqlalchemy.exc import OperationalError - -from db.columns.operations.select import get_column_attnums_from_tables -from db.constraints.operations.select import get_constraints_with_oids -from db.schemas.operations.select import get_mathesar_schemas_with_oids -from db.tables.operations.select import get_table_oids_from_schemas -# We import the entire models_deprecated.deprecated module to avoid a circular import error -from mathesar.models import deprecated as models_deprecated -from mathesar.api.serializers.shared_serializers import DisplayOptionsMappingSerializer, \ - DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY -from mathesar.database.base import create_mathesar_engine - - -logger = logging.getLogger(__name__) - - -def clear_dj_cache(): - logger.debug('clear_dj_cache') - dj_cache.clear() - - -# NOTE: All querysets used for reflection should use the .current_objects manager -# instead of the .objects manger. The .objects manager calls reflect_db_objects when a -# queryset is created, and will recurse if used in these functions. - - -def reflect_db_objects(metadata, db_name=None): - databases = models_deprecated.Connection.current_objects.all() - if db_name is not None: - databases = databases.filter(name=db_name) - sync_databases_status(databases) - for database in databases: - if database.deleted is False: - reflect_schemas_from_database(database) - schemas = models_deprecated.Schema.current_objects.filter(database=database).prefetch_related( - Prefetch('database', queryset=databases) - ) - reflect_tables_from_schemas(schemas, metadata=metadata) - tables = models_deprecated.Table.current_objects.filter(schema__in=schemas).prefetch_related( - Prefetch('schema', queryset=schemas) - ) - reflect_columns_from_tables(tables, metadata=metadata) - reflect_constraints_from_database(database) - else: - models_deprecated.Schema.current_objects.filter(database=database).delete() - - -def sync_databases_status(databases): - """Update status and check health for current Connection Model instances.""" - for db in databases: - try: - db._sa_engine.connect() - db._sa_engine.dispose() - _set_db_is_deleted(db, False) - except (OperationalError, KeyError): - _set_db_is_deleted(db, True) - - -def _set_db_is_deleted(db, deleted): - """ - Assures that a Django Connection model's `deleted` field is equal to the `deleted` - parameter, updating if necessary. Takes care to `save()` only when an update has been performed, - to save on the noteworthy performance cost. - """ - if db.deleted is not deleted: - db.deleted = deleted - db.save() - - -# TODO pass in a cached engine instead of creating a new one -def reflect_schemas_from_database(database): - engine = create_mathesar_engine(database) - db_schema_oids = { - schema['oid'] for schema in get_mathesar_schemas_with_oids(engine) - } - - schemas = [] - for oid in db_schema_oids: - schema = models_deprecated.Schema(oid=oid, database=database) - schemas.append(schema) - models_deprecated.Schema.current_objects.bulk_create(schemas, ignore_conflicts=True) - for schema in models_deprecated.Schema.current_objects.all().select_related('database'): - if schema.database == database and schema.oid not in db_schema_oids: - # Deleting Schemas are a rare occasion, not worth deleting in bulk - schema.delete() - engine.dispose() - - -def reflect_tables_from_schemas(schemas, metadata): - if len(schemas) < 1: - return - engine = schemas[0]._sa_engine - schema_oids = [schema.oid for schema in schemas] - db_table_oids = { - (table['oid'], table['schema_oid']) - for table in get_table_oids_from_schemas(schema_oids, engine, metadata=metadata) - } - tables = [] - for oid, schema_oid in db_table_oids: - schema = next(schema for schema in schemas if schema.oid == schema_oid) - table = models_deprecated.Table(oid=oid, schema=schema) - tables.append(table) - models_deprecated.Table.current_objects.bulk_create(tables, ignore_conflicts=True) - # Calling signals manually because bulk create does not emit any signals - deleted_tables = [] - for table in models_deprecated.Table.current_objects.filter(schema__in=schemas).select_related('schema'): - if (table.oid, table.schema.oid) not in db_table_oids: - deleted_tables.append(table.id) - - models_deprecated.Table.current_objects.filter(id__in=deleted_tables).delete() - - -def reflect_columns_from_tables(tables, metadata): - if len(tables) < 1: - return - engine = tables[0]._sa_engine - table_oids = [table.oid for table in tables] - attnum_tuples = get_column_attnums_from_tables(table_oids, engine, metadata=metadata) - - _create_reflected_columns(attnum_tuples, tables) - - _delete_stale_columns(attnum_tuples, tables) - # Manually trigger preview templates computation signal - for table in tables: - models_deprecated._set_default_preview_template(table) - - _invalidate_columns_with_incorrect_display_options(tables) - - -def _invalidate_columns_with_incorrect_display_options(tables): - columns_with_invalid_display_option = [] - columns = models_deprecated.Column.current_objects.filter(table__in=tables) - for column in columns: - if column.display_options: - # If the type of column has changed, existing display options won't be valid anymore. - serializer = DisplayOptionsMappingSerializer( - data=column.display_options, - context={DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY: column.db_type} - ) - if not serializer.is_valid(raise_exception=False): - columns_with_invalid_display_option.append(column.id) - if len(columns_with_invalid_display_option) > 0: - models_deprecated.Column.current_objects.filter(id__in=columns_with_invalid_display_option).update(display_options=None) - - -def _create_reflected_columns(attnum_tuples, tables): - columns = [] - for attnum, table_oid in attnum_tuples: - table = next(table for table in tables if table.oid == table_oid) - column = models_deprecated.Column(attnum=attnum, table=table, display_options=None) - columns.append(column) - models_deprecated.Column.current_objects.bulk_create(columns, ignore_conflicts=True) - - -def _delete_stale_columns(attnum_tuples, tables): - attnums_mapped_by_table_oid = defaultdict(list) - for table in tables: - # Incase a table does not contain any column, it won't show up in the `attnum_tuples` - # In such cases we should delete all the column on Django - attnums_mapped_by_table_oid[table.oid] = [] - for attnum, table_oid in attnum_tuples: - attnums_mapped_by_table_oid[table_oid].append(attnum) - stale_columns_conditions = [] - for table_oid, attnums in attnums_mapped_by_table_oid.items(): - table = next(table for table in tables if table.oid == table_oid) - stale_columns_conditions.append(Q(table=table) & ~Q(attnum__in=attnums)) - stale_columns_query = reduce( - operator.or_, - stale_columns_conditions - ) - models_deprecated.Column.objects.filter(stale_columns_query).delete() - - -# TODO pass in a cached engine instead of creating a new one -def reflect_constraints_from_database(database): - engine = create_mathesar_engine(database) - db_constraints = get_constraints_with_oids(engine) - map_of_table_oid_to_constraint_oids = defaultdict(list) - for db_constraint in db_constraints: - table_oid = db_constraint['conrelid'] - constraint_oid = db_constraint['oid'] - map_of_table_oid_to_constraint_oids[table_oid].append(constraint_oid) - - table_oids = map_of_table_oid_to_constraint_oids.keys() - tables = models_deprecated.Table.current_objects.filter(oid__in=table_oids) - constraint_objs_to_create = [] - for table in tables: - constraint_oids = map_of_table_oid_to_constraint_oids.get(table.oid, []) - for constraint_oid in constraint_oids: - constraint_obj = models_deprecated.Constraint(oid=constraint_oid, table=table) - constraint_objs_to_create.append(constraint_obj) - models_deprecated.Constraint.current_objects.bulk_create(constraint_objs_to_create, ignore_conflicts=True) - _delete_stale_dj_constraints(db_constraints, database) - engine.dispose() - - -def _delete_stale_dj_constraints(known_db_constraints, database): - """ - Deletes stale Constraint Django model instances in this database. A constraint is stale when it - is not in the provided `known_db_constraints` structure. - """ - known_db_constraint_oids = set( - known_db_constraint['oid'] - for known_db_constraint - in known_db_constraints - ) - stale_dj_constraints = models_deprecated.Constraint.current_objects.filter( - ~Q(oid__in=known_db_constraint_oids), - table__schema__database=database, - ) - stale_dj_constraints.delete() - - -# TODO pass in a cached engine instead of creating a new one -def reflect_new_table_constraints(table): - engine = create_mathesar_engine(table.schema.database) - db_constraints = get_constraints_with_oids(engine, table_oid=table.oid) - constraints = [ - models_deprecated.Constraint.current_objects.get_or_create( - oid=db_constraint['oid'], - table=table - ) - for db_constraint in db_constraints - ] - engine.dispose() - return constraints diff --git a/mathesar/tests/api/conftest.py b/mathesar/tests/api/conftest.py index 3e8662c9c6..f2aeeccd96 100644 --- a/mathesar/tests/api/conftest.py +++ b/mathesar/tests/api/conftest.py @@ -9,7 +9,6 @@ from db.tables.operations.select import get_oid_from_table from mathesar.models.base import DataFile from db.metadata import get_empty_metadata -from mathesar.state import reset_reflection @pytest.fixture diff --git a/mathesar/tests/conftest.py b/mathesar/tests/conftest.py index 4160355c5f..73b5de38c4 100644 --- a/mathesar/tests/conftest.py +++ b/mathesar/tests/conftest.py @@ -26,8 +26,6 @@ from fixtures.utils import create_scoped_fixtures, get_fixture_value import conftest -from mathesar.state import reset_reflection -from mathesar.state.base import set_initial_reflection_happened from db.metadata import get_empty_metadata from db.tests.columns.utils import create_test_table @@ -56,21 +54,6 @@ def django_db_modify_db_settings( return -@pytest.fixture(autouse=True) -def reflection_fixture(): - """ - During setup, makes sure reflection is reset when one of our models' querysets is next - accessed. During teardown, eagerly resets reflection; unfortunately that currently causes - redundant reflective calls to Postgres. - """ - logger = logging.getLogger('mark_reflection_as_not_having_happened') - logger.debug('setup') - set_initial_reflection_happened(False) - yield - reset_reflection() - logger.debug('teardown') - - @pytest.fixture(scope="session", autouse=True) def ignore_all_dbs_except_default(SES_dj_databases): """ diff --git a/mathesar/urls.py b/mathesar/urls.py index 7c66b8942f..f67c89da2c 100644 --- a/mathesar/urls.py +++ b/mathesar/urls.py @@ -19,7 +19,6 @@ path('api/rpc/v0/', views.MathesarRPCEntryPoint.as_view()), path('api/db/v0/', include(db_router.urls)), path('api/ui/v0/', include(ui_router.urls)), - path('api/ui/v0/reflect/', views.reflect_all, name='reflect_all'), path('auth/password_reset_confirm', MathesarPasswordResetConfirmView.as_view(), name='password_reset_confirm'), path('auth/login/', superuser_exist(LoginView.as_view(redirect_authenticated_user=True)), name='login'), path('auth/create_superuser/', superuser_must_not_exist(SuperuserFormView.as_view()), name='superuser_create'), diff --git a/mathesar/utils/explorations.py b/mathesar/utils/explorations.py index f3b288df07..1f91f5c413 100644 --- a/mathesar/utils/explorations.py +++ b/mathesar/utils/explorations.py @@ -1,4 +1,5 @@ from db.engine import create_future_engine_with_custom_types +from db.metadata import get_empty_metadata from db.records.operations.select import get_count from db.queries.base import DBQuery, InitialColumn, JoinParameter from db.queries.operations.process import get_transforms_with_summarizes_speced @@ -22,7 +23,6 @@ from mathesar.api.utils import process_annotated_records from mathesar.models.base import Explorations, ColumnMetaData, Database from mathesar.rpc.columns.metadata import ColumnMetaDataRecord -from mathesar.state import get_cached_metadata def list_explorations(database_id, schema_oid=None): @@ -76,7 +76,7 @@ def run_exploration(exploration_def, conn, limit=100, offset=0): conn.info.dbname, conn.info.port ) - metadata = get_cached_metadata() + metadata = get_empty_metadata() base_table_oid = exploration_def["base_table_oid"] initial_columns = exploration_def['initial_columns'] processed_initial_columns = [] diff --git a/mathesar/utils/models.py b/mathesar/utils/models.py index 85904a0847..5cb07ce9be 100644 --- a/mathesar/utils/models.py +++ b/mathesar/utils/models.py @@ -12,7 +12,6 @@ from mathesar.api.exceptions.error_codes import ErrorCodes from mathesar.api.exceptions.generic_exceptions import base_exceptions as base_api_exceptions -from mathesar.state import reset_reflection def user_directory_path(instance, filename): @@ -34,7 +33,6 @@ def update_sa_table(table, validated_data): try: data = _update_columns_side_effector(table, validated_data) alter_table(table.name, table.oid, table.schema.name, table.schema._sa_engine, data) - reset_reflection(db_name=table.schema.database.name) # TODO: Catch more specific exceptions except InvalidTypeError as e: raise e diff --git a/mathesar/views.py b/mathesar/views.py index e5eabc9fbf..d523d15782 100644 --- a/mathesar/views.py +++ b/mathesar/views.py @@ -13,7 +13,6 @@ from mathesar.rpc.servers.configured import list_ as get_servers_list from mathesar.rpc.tables import list_with_metadata as tables_list from mathesar.api.ui.serializers.users import UserSerializer -from mathesar.state import reset_reflection from mathesar import __version__ @@ -102,13 +101,6 @@ class MathesarRPCEntryPoint(LoginRequiredMixin, RPCEntryPoint): pass -@login_required -@api_view(['POST']) -def reflect_all(_): - reset_reflection() - return Response(status=status.HTTP_200_OK) - - @login_required def home(request): database_list = get_database_list(request) From a08988587d4cdfe5f18691e9b56dfe64d19240db Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Tue, 15 Oct 2024 18:06:55 +0800 Subject: [PATCH 14/70] remove unused imports --- .../database_exceptions/exceptions.py | 3 --- mathesar/models/deprecated.py | 2 +- mathesar/state/__init__.py | 1 - mathesar/tests/api/conftest.py | 25 ------------------- mathesar/tests/conftest.py | 9 ------- mathesar/views.py | 3 --- 6 files changed, 1 insertion(+), 42 deletions(-) diff --git a/mathesar/api/exceptions/database_exceptions/exceptions.py b/mathesar/api/exceptions/database_exceptions/exceptions.py index 7e45350eb0..c19fec083f 100644 --- a/mathesar/api/exceptions/database_exceptions/exceptions.py +++ b/mathesar/api/exceptions/database_exceptions/exceptions.py @@ -1,8 +1,5 @@ from rest_framework import status -from db.constraints.operations.select import ( - get_constraint_oid_by_name_and_table_oid, -) from db.columns.exceptions import InvalidTypeError from mathesar.api.exceptions.error_codes import ErrorCodes from mathesar.api.exceptions.generic_exceptions.base_exceptions import ( diff --git a/mathesar/models/deprecated.py b/mathesar/models/deprecated.py index d9fd8c12d1..0c828c9ea8 100644 --- a/mathesar/models/deprecated.py +++ b/mathesar/models/deprecated.py @@ -47,6 +47,7 @@ from mathesar.models.base import BaseModel from mathesar.models.relation import Relation +from mathesar.state import get_cached_metadata from mathesar.utils import models as model_utils from mathesar.utils.prefetch import PrefetchManager, Prefetcher from mathesar.database.base import create_mathesar_engine @@ -677,7 +678,6 @@ def insert_records_to_existing_table(self, existing_table, data_files, mappings= col_mappings = [[from_col.name, target_col.name] for from_col, target_col in mappings] else: col_mappings = None - data_file = data_files[0] try: table, _ = insert_from_select(from_table, target_table, engine, col_mappings) except Exception as e: diff --git a/mathesar/state/__init__.py b/mathesar/state/__init__.py index 21f59e41a1..e39b9a9680 100644 --- a/mathesar/state/__init__.py +++ b/mathesar/state/__init__.py @@ -1,2 +1 @@ -from mathesar.state.base import make_sure_initial_reflection_happened, reset_reflection # noqa: F401 from mathesar.state.metadata import get_cached_metadata # noqa: F401 diff --git a/mathesar/tests/api/conftest.py b/mathesar/tests/api/conftest.py index f2aeeccd96..288a23f257 100644 --- a/mathesar/tests/api/conftest.py +++ b/mathesar/tests/api/conftest.py @@ -1,14 +1,7 @@ -from pathlib import Path - from django.core.files import File import pytest -from sqlalchemy import Column, INTEGER, VARCHAR, MetaData, BOOLEAN, TIMESTAMP, text -from sqlalchemy import Table as SATable -from db.columns.operations.select import get_column_attnum_from_name -from db.tables.operations.select import get_oid_from_table from mathesar.models.base import DataFile -from db.metadata import get_empty_metadata @pytest.fixture @@ -22,21 +15,3 @@ def _create_data_file(file_path, file_name): return data_file return _create_data_file - - -@pytest.fixture -def table_for_reflection(engine): - schema_name = 'a_new_schema' - table_name = 'a_new_table' - with engine.begin() as conn: - conn.execute(text(f'CREATE SCHEMA {schema_name};')) - with engine.begin() as conn: - conn.execute( - text( - f'CREATE TABLE {schema_name}.{table_name}' - f' (id INTEGER, name VARCHAR);' - ) - ) - yield schema_name, table_name, engine - with engine.begin() as conn: - conn.execute(text(f'DROP SCHEMA {schema_name} CASCADE;')) diff --git a/mathesar/tests/conftest.py b/mathesar/tests/conftest.py index 73b5de38c4..850397aa41 100644 --- a/mathesar/tests/conftest.py +++ b/mathesar/tests/conftest.py @@ -2,7 +2,6 @@ This inherits the fixtures in the root conftest.py """ import pytest -import logging import responses from copy import deepcopy @@ -12,12 +11,7 @@ from django.db import connection as dj_connection from rest_framework.test import APIClient -from sqlalchemy import Column, MetaData, Integer, Date -from sqlalchemy import Table as SATable - -from db.tables.operations.select import get_oid_from_table from db.tables.operations.create import create_mathesar_table as actual_create_mathesar_table -from db.columns.operations.select import get_column_attnum_from_name from db.schemas.utils import get_schema_oid_from_name import mathesar.tests.conftest @@ -25,9 +19,6 @@ from mathesar.models.users import User from fixtures.utils import create_scoped_fixtures, get_fixture_value -import conftest -from db.metadata import get_empty_metadata -from db.tests.columns.utils import create_test_table @pytest.fixture diff --git a/mathesar/views.py b/mathesar/views.py index d523d15782..1fe64cc29f 100644 --- a/mathesar/views.py +++ b/mathesar/views.py @@ -3,9 +3,6 @@ from django.contrib.auth.mixins import LoginRequiredMixin from django.shortcuts import render, redirect from modernrpc.views import RPCEntryPoint -from rest_framework import status -from rest_framework.decorators import api_view -from rest_framework.response import Response from mathesar.rpc.databases.configured import list_ as databases_list from mathesar.rpc.explorations import list_ as explorations_list From 7daaf8e666ff4221002c657f47214b5e48f9d7a6 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Tue, 15 Oct 2024 18:11:39 +0800 Subject: [PATCH 15/70] remove remaining import of deprecated models --- mathesar/utils/preview.py | 109 -------------------------------------- 1 file changed, 109 deletions(-) diff --git a/mathesar/utils/preview.py b/mathesar/utils/preview.py index a9f05cc98a..fea13a9162 100644 --- a/mathesar/utils/preview.py +++ b/mathesar/utils/preview.py @@ -1,116 +1,7 @@ import re -from db.constraints.utils import ConstraintType -from mathesar.models.deprecated import Column, Constraint, Table - - -def _preview_info_by_column_id(fk_constraints, previous_path=None, exising_columns=None): - if previous_path is None: - previous_path = [] - if exising_columns is None: - exising_columns = [] - preview_info = {} - preview_columns = exising_columns - for fk_constraint in fk_constraints: - constrained_column = fk_constraint.columns[0] - # For now only single column foreign key is used. - referent_column = fk_constraint.referent_columns[0] - referent_table = Table.objects.select_related('settings__preview_settings').get(id=referent_column.table_id) - referent_table_settings = referent_table.settings - preview_template = referent_table_settings.preview_settings.template - preview_data_column_ids = column_ids_from_preview_template(preview_template) - preview_data_columns = Column.objects.filter(id__in=preview_data_column_ids) - current_position = (constrained_column.id, referent_column.id) - current_path = previous_path + [current_position] - # Extract the template for foreign key columns of the referent table - referent_preview_info, referent_preview_columns = get_preview_info( - referent_table.id, - preview_data_columns, - current_path, - exising_columns - ) - preview_columns = preview_columns + referent_preview_columns - for column_key, column_value in referent_preview_info.items(): - # Replace the foreign key column id with the respective template of the referent table - preview_template = preview_template.replace(f'{{{column_key}}}', f'{column_value["template"]}') - path_prefix = compute_path_prefix(current_path) - - for preview_data_column_id in preview_data_column_ids: - if preview_data_column_id not in referent_preview_info: - column_alias_name = compute_path_str(path_prefix, preview_data_column_id) - # Replace the column id in the template with the path alias - # To avoid conflict in case of multiple column referencing same table - preview_template = preview_template.replace(f'{{{preview_data_column_id}}}', f'{{{column_alias_name}}}') - initial_column = {'id': preview_data_column_id, "alias": column_alias_name, "jp_path": current_path} - preview_columns.append(initial_column) - preview_info[constrained_column.id] = {"template": preview_template, 'path': current_path} - return preview_info, preview_columns - - -def compute_path_str(path_prefix, preview_data_column_id): - column_alias_name = f'{path_prefix}__col__{preview_data_column_id}' - return column_alias_name - - -def compute_path_prefix(paths): - path_prefix = "___".join([f"{path[0]}__{path[1]}" for path in paths]) - return path_prefix - - -def column_ids_from_preview_template(preview_template): - preview_data_column_str_ids = column_alias_from_preview_template(preview_template) - preview_data_column_ids = list(map(int, preview_data_column_str_ids)) - return preview_data_column_ids - def column_alias_from_preview_template(preview_template): preview_columns_extraction_regex = r'\{(.*?)\}' preview_data_column_ids = re.findall(preview_columns_extraction_regex, preview_template) return preview_data_column_ids - - -def get_preview_info(referrer_table_pk, restrict_columns=None, path=None, existing_columns=None): - if path is None: - path = [] - if existing_columns is None: - existing_columns = [] - table_constraints = Constraint.objects.filter(table_id=referrer_table_pk).select_related('table__schema__database') - fk_constraints = [ - table_constraint - for table_constraint in table_constraints - if table_constraint.type == ConstraintType.FOREIGN_KEY.value - ] - if restrict_columns: - fk_constraints = filter( - _get_filter_restricted_columns_fn(restrict_columns), - fk_constraints - ) - fk_constraints = filter( - _get_filter_out_circular_dependency_columns_fn(path), - fk_constraints - ) - - preview_info, columns = _preview_info_by_column_id( - fk_constraints, - path, - existing_columns - ) - return preview_info, columns - - -def _get_filter_restricted_columns_fn(restricted_columns): - def _filter_restricted_columns(fk_constraint): - constrained_column = fk_constraint.columns[0] - return constrained_column in restricted_columns - - return _filter_restricted_columns - - -def _get_filter_out_circular_dependency_columns_fn(path): - - def _filter_out_circular_dependency_columns(fk_constraint): - constrained_column = fk_constraint.columns[0] - referent_column = fk_constraint.referent_columns[0] - return (constrained_column.id, referent_column.id) not in path - - return _filter_out_circular_dependency_columns From 9b5c56d9bf11617a2d7b77926d24ee4c2276bb0e Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Tue, 15 Oct 2024 18:20:20 +0800 Subject: [PATCH 16/70] remove all deprecated models, reset migrations --- ...e_remove_databaserole_database_and_more.py | 111 +++ ...ove_datafile_table_imported_to_and_more.py | 23 - ...table_table_delete_sharedquery_and_more.py | 23 - .../migrations/0020_delete_exploration.py | 16 - ..._schema_remove_schemarole_user_and_more.py | 27 - .../0022_alter_tablesettings_column_order.py | 19 - mathesar/models/deprecated.py | 940 ------------------ 7 files changed, 111 insertions(+), 1048 deletions(-) create mode 100644 mathesar/migrations/0018_remove_constraint_table_remove_databaserole_database_and_more.py delete mode 100644 mathesar/migrations/0018_remove_datafile_table_imported_to_and_more.py delete mode 100644 mathesar/migrations/0019_remove_sharedtable_table_delete_sharedquery_and_more.py delete mode 100644 mathesar/migrations/0020_delete_exploration.py delete mode 100644 mathesar/migrations/0021_remove_schemarole_schema_remove_schemarole_user_and_more.py delete mode 100644 mathesar/migrations/0022_alter_tablesettings_column_order.py delete mode 100644 mathesar/models/deprecated.py diff --git a/mathesar/migrations/0018_remove_constraint_table_remove_databaserole_database_and_more.py b/mathesar/migrations/0018_remove_constraint_table_remove_databaserole_database_and_more.py new file mode 100644 index 0000000000..170348a4a4 --- /dev/null +++ b/mathesar/migrations/0018_remove_constraint_table_remove_databaserole_database_and_more.py @@ -0,0 +1,111 @@ +# Generated by Django 4.2.11 on 2024-10-15 10:18 + +from django.db import migrations, models +import mathesar.models.base + + +class Migration(migrations.Migration): + + dependencies = [ + ('mathesar', '0017_explorations_schema_oid'), + ] + + operations = [ + migrations.RemoveField( + model_name='constraint', + name='table', + ), + migrations.RemoveField( + model_name='databaserole', + name='database', + ), + migrations.RemoveField( + model_name='databaserole', + name='user', + ), + migrations.RemoveField( + model_name='exploration', + name='base_table', + ), + migrations.RemoveField( + model_name='schema', + name='database', + ), + migrations.RemoveField( + model_name='schemarole', + name='schema', + ), + migrations.RemoveField( + model_name='schemarole', + name='user', + ), + migrations.RemoveField( + model_name='sharedquery', + name='query', + ), + migrations.RemoveField( + model_name='sharedtable', + name='table', + ), + migrations.RemoveField( + model_name='table', + name='import_target', + ), + migrations.RemoveField( + model_name='table', + name='schema', + ), + migrations.RemoveField( + model_name='tablesettings', + name='preview_settings', + ), + migrations.RemoveField( + model_name='tablesettings', + name='table', + ), + migrations.RemoveField( + model_name='datafile', + name='table_imported_to', + ), + migrations.AlterField( + model_name='datafile', + name='file', + field=models.FileField(upload_to=mathesar.models.base.DataFile._user_directory_path), + ), + migrations.DeleteModel( + name='Column', + ), + migrations.DeleteModel( + name='Connection', + ), + migrations.DeleteModel( + name='Constraint', + ), + migrations.DeleteModel( + name='DatabaseRole', + ), + migrations.DeleteModel( + name='Exploration', + ), + migrations.DeleteModel( + name='PreviewColumnSettings', + ), + migrations.DeleteModel( + name='Schema', + ), + migrations.DeleteModel( + name='SchemaRole', + ), + migrations.DeleteModel( + name='SharedQuery', + ), + migrations.DeleteModel( + name='SharedTable', + ), + migrations.DeleteModel( + name='Table', + ), + migrations.DeleteModel( + name='TableSettings', + ), + ] diff --git a/mathesar/migrations/0018_remove_datafile_table_imported_to_and_more.py b/mathesar/migrations/0018_remove_datafile_table_imported_to_and_more.py deleted file mode 100644 index 3bb884ef83..0000000000 --- a/mathesar/migrations/0018_remove_datafile_table_imported_to_and_more.py +++ /dev/null @@ -1,23 +0,0 @@ -# Generated by Django 4.2.11 on 2024-10-14 15:42 - -from django.db import migrations, models -import mathesar.models.base - - -class Migration(migrations.Migration): - - dependencies = [ - ('mathesar', '0017_explorations_schema_oid'), - ] - - operations = [ - migrations.RemoveField( - model_name='datafile', - name='table_imported_to', - ), - migrations.AlterField( - model_name='datafile', - name='file', - field=models.FileField(upload_to=mathesar.models.base.DataFile._user_directory_path), - ), - ] diff --git a/mathesar/migrations/0019_remove_sharedtable_table_delete_sharedquery_and_more.py b/mathesar/migrations/0019_remove_sharedtable_table_delete_sharedquery_and_more.py deleted file mode 100644 index 1a240e43ce..0000000000 --- a/mathesar/migrations/0019_remove_sharedtable_table_delete_sharedquery_and_more.py +++ /dev/null @@ -1,23 +0,0 @@ -# Generated by Django 4.2.11 on 2024-10-15 06:42 - -from django.db import migrations - - -class Migration(migrations.Migration): - - dependencies = [ - ('mathesar', '0018_remove_datafile_table_imported_to_and_more'), - ] - - operations = [ - migrations.RemoveField( - model_name='sharedtable', - name='table', - ), - migrations.DeleteModel( - name='SharedQuery', - ), - migrations.DeleteModel( - name='SharedTable', - ), - ] diff --git a/mathesar/migrations/0020_delete_exploration.py b/mathesar/migrations/0020_delete_exploration.py deleted file mode 100644 index 4926069217..0000000000 --- a/mathesar/migrations/0020_delete_exploration.py +++ /dev/null @@ -1,16 +0,0 @@ -# Generated by Django 4.2.11 on 2024-10-15 06:54 - -from django.db import migrations - - -class Migration(migrations.Migration): - - dependencies = [ - ('mathesar', '0019_remove_sharedtable_table_delete_sharedquery_and_more'), - ] - - operations = [ - migrations.DeleteModel( - name='Exploration', - ), - ] diff --git a/mathesar/migrations/0021_remove_schemarole_schema_remove_schemarole_user_and_more.py b/mathesar/migrations/0021_remove_schemarole_schema_remove_schemarole_user_and_more.py deleted file mode 100644 index d4fac53596..0000000000 --- a/mathesar/migrations/0021_remove_schemarole_schema_remove_schemarole_user_and_more.py +++ /dev/null @@ -1,27 +0,0 @@ -# Generated by Django 4.2.11 on 2024-10-15 08:02 - -from django.db import migrations - - -class Migration(migrations.Migration): - - dependencies = [ - ('mathesar', '0020_delete_exploration'), - ] - - operations = [ - migrations.RemoveField( - model_name='schemarole', - name='schema', - ), - migrations.RemoveField( - model_name='schemarole', - name='user', - ), - migrations.DeleteModel( - name='DatabaseRole', - ), - migrations.DeleteModel( - name='SchemaRole', - ), - ] diff --git a/mathesar/migrations/0022_alter_tablesettings_column_order.py b/mathesar/migrations/0022_alter_tablesettings_column_order.py deleted file mode 100644 index b3fe3ec693..0000000000 --- a/mathesar/migrations/0022_alter_tablesettings_column_order.py +++ /dev/null @@ -1,19 +0,0 @@ -# Generated by Django 4.2.11 on 2024-10-15 09:24 - -from django.db import migrations, models -import mathesar.models.deprecated - - -class Migration(migrations.Migration): - - dependencies = [ - ('mathesar', '0021_remove_schemarole_schema_remove_schemarole_user_and_more'), - ] - - operations = [ - migrations.AlterField( - model_name='tablesettings', - name='column_order', - field=models.JSONField(blank=True, default=None, null=True, validators=[mathesar.models.deprecated.validate_column_order]), - ), - ] diff --git a/mathesar/models/deprecated.py b/mathesar/models/deprecated.py deleted file mode 100644 index 0c828c9ea8..0000000000 --- a/mathesar/models/deprecated.py +++ /dev/null @@ -1,940 +0,0 @@ -from functools import reduce - -from bidict import bidict - -from django.conf import settings -from django.core.cache import cache -from django.core.exceptions import ValidationError -from django.db import models -from django.db.models import JSONField -from django.utils.functional import cached_property -from encrypted_fields.fields import EncryptedCharField -from db.columns import utils as column_utils -from db.columns.operations.create import create_column, duplicate_column -from db.columns.operations.alter import alter_column -from db.columns.operations.drop import drop_column -from db.columns.operations.select import ( - get_column_description, - get_column_attnum_from_names_as_map, get_column_name_from_attnum, - get_map_of_attnum_to_column_name, get_map_of_attnum_and_table_oid_to_column_name, -) -from db.constraints.operations.create import add_constraint_via_sql_alchemy -from db.constraints.operations.drop import drop_constraint -from db.constraints.operations.select import get_constraint_record_from_oid -from db.constraints import utils as constraint_utils -from db.dependents.dependents_utils import get_dependents_graph, has_dependents -from db.metadata import get_empty_metadata -from db.records.operations.delete import bulk_delete_records, delete_record -from db.records.operations.insert import insert_record_or_records -from db.records.operations.select import get_column_cast_records, get_count, get_record -from db.records.operations.select import get_records -from db.records.operations.update import update_record -from db.schemas.operations.drop import drop_schema_via_name -from db.schemas.operations.select import get_schema_description -from db.schemas import utils as schema_utils -from db.tables import utils as table_utils -from db.tables.operations.drop import drop_table -from db.tables.operations.move_columns import move_columns_between_related_tables -from db.tables.operations.select import ( - get_oid_from_table, - reflect_table_from_oid, - get_table_description, - reflect_tables_from_oids -) -from db.tables.operations.split import extract_columns_from_table -from db.records.operations.insert import insert_from_select -from db.tables.utils import get_primary_key_column - -from mathesar.models.base import BaseModel -from mathesar.models.relation import Relation -from mathesar.state import get_cached_metadata -from mathesar.utils import models as model_utils -from mathesar.utils.prefetch import PrefetchManager, Prefetcher -from mathesar.database.base import create_mathesar_engine -from mathesar.database.types import UIType, get_ui_type_from_db_type -from mathesar.api.exceptions.database_exceptions.base_exceptions import ProgrammingAPIException - - -NAME_CACHE_INTERVAL = 60 * 5 - - -class DatabaseObjectManager(PrefetchManager): - def get_queryset(self): - return super().get_queryset() - - -class ReflectionManagerMixin(models.Model): - """ - Used to reflect objects that exists on the user database but does not have a equivalent mathesar reference object. - """ - # The default manager, current_objects, does not reflect database objects. - # This saves us from having to deal with Django trying to automatically reflect db - # objects in the background when we might not expect it. - current_objects = models.Manager() - objects = DatabaseObjectManager() - - class Meta: - abstract = True - - def __str__(self): - return f"{self.__class__.__name__}" - - -class DatabaseObject(ReflectionManagerMixin, BaseModel): - """ - Objects that can be referenced using a database identifier - """ - oid = models.PositiveIntegerField() - - class Meta: - abstract = True - - def __str__(self): - return f"{self.__class__.__name__}: {self.oid}" - - def __repr__(self): - return f'<{self.__class__.__name__}: {self.oid}>' - - -# TODO: Replace with a proper form of caching -# See: https://github.com/centerofci/mathesar/issues/280 -_engine_cache = {} - - -class Connection(ReflectionManagerMixin, BaseModel): - name = models.CharField(max_length=128, unique=True) - db_name = models.CharField(max_length=128) - username = EncryptedCharField(max_length=255) - password = EncryptedCharField(max_length=255) - host = models.CharField(max_length=255) - port = models.IntegerField() - current_objects = models.Manager() - # TODO does this need to be defined, given that ReflectionManagerMixin defines an identical attribute? - objects = DatabaseObjectManager() - deleted = models.BooleanField(blank=True, default=False) - - @property - def _sa_engine(self): - global _engine_cache - # We're caching this since the engine is used frequently. - db_name = self.name - was_cached = db_name in _engine_cache - if was_cached: - engine = _engine_cache.get(db_name) - model_utils.ensure_cached_engine_ready(engine) - else: - engine = create_mathesar_engine(self) - _engine_cache[db_name] = engine - return engine - - @property - def supported_ui_types(self): - """ - At the moment we don't actually filter our UIType set based on whether or not a UIType's - constituent DB types are supported. - """ - return UIType - - def __repr__(self): - return f'{self.__class__.__name__}: {self.name}, {self.id}' - - @classmethod - def create_from_settings_key(cls, db_key): - """ - Get an ethereal instance of the model from Django settings. - - This is only supported for Postgres DBs (e.g., it won't work on an - SQLite3 internal DB; that returns NoneType) - - Args: - db_key: This should be the key of the DB in settings.DATABASES - """ - db_info = settings.DATABASES[db_key] - if 'postgres' in db_info['ENGINE']: - return cls( - name=db_key, - db_name=db_info['NAME'], - username=db_info['USER'], - password=db_info['PASSWORD'], - host=db_info['HOST'], - port=db_info['PORT'], - ) - - def save(self, **kwargs): - db_name = self.name - # invalidate cached engine as db credentials might get changed. - if _engine_cache.get(db_name): - _engine_cache[db_name].dispose() - del _engine_cache[db_name] - return super().save() - - -class Schema(DatabaseObject): - database = models.ForeignKey('Connection', on_delete=models.CASCADE, - related_name='schemas') - - class Meta: - constraints = [ - models.UniqueConstraint(fields=["oid", "database"], name="unique_schema") - ] - - @property - def _sa_engine(self): - return self.database._sa_engine - - @property - def name(self): - cache_key = f"{self.database.name}_schema_name_{self.oid}" - try: - schema_name = cache.get(cache_key) - if schema_name is None: - schema_name = schema_utils.get_schema_name_from_oid( - self.oid, self._sa_engine - ) - cache.set(cache_key, schema_name, NAME_CACHE_INTERVAL) - return schema_name - # We catch this error, since it lets us decouple the cadence of - # overall DB reflection from the cadence of cache expiration for - # schema names. Also, it makes it obvious when the DB layer has - # been altered, as opposed to other reasons for a 404 when - # requesting a schema. - except TypeError: - return 'MISSING' - - @property - def has_dependents(self): - return has_dependents( - self.oid, - self._sa_engine - ) - - # Returns only schema-scoped dependents on the top level - # However, returns dependents from other schemas for other - # objects down the graph. - # E.g: TableA from SchemaA depends on TableB from SchemaB - # SchemaA won't return as a dependent for SchemaB, however - # TableA will be a dependent of TableB which in turn depends on its schema - def get_dependents(self, exclude=None): - if exclude is None: - exclude = [] - return get_dependents_graph( - self.oid, - self._sa_engine, - exclude - ) - - @property - def description(self): - return get_schema_description(self.oid, self._sa_engine) - - def update_sa_schema(self, update_params): - result = model_utils.update_sa_schema(self, update_params) - return result - - def delete_sa_schema(self): - drop_schema_via_name(self._sa_engine, self.name, cascade=True) - - def clear_name_cache(self): - cache_key = f"{self.database.name}_schema_name_{self.oid}" - cache.delete(cache_key) - - -class ColumnNamePrefetcher(Prefetcher): - def filter(self, column_attnums, columns): - if len(columns) < 1: - return [] - table = list(columns)[0].table - return get_map_of_attnum_to_column_name( - table.oid, - column_attnums, - table._sa_engine, - metadata=get_cached_metadata(), - ) - - def mapper(self, column): - return column.attnum - - def reverse_mapper(self, column): - # We return maps mostly, so a reverse mapper is not needed - pass - - def decorator(self, column, name): - setattr(column, 'name', name) - - -class ColumnPrefetcher(Prefetcher): - def filter(self, _, tables): - if len(tables) < 1: - return [] - columns = reduce(lambda column_objs, table: column_objs + list(table.columns.all()), tables, []) - table_oids = [table.oid for table in tables] - - def _get_column_names_from_tables(table_oids): - if len(tables) > 0: - engine = list(tables)[0]._sa_engine - else: - return [] - return get_map_of_attnum_and_table_oid_to_column_name( - table_oids, - engine=engine, - metadata=get_cached_metadata(), - ) - - return ColumnNamePrefetcher( - filter=lambda column_attnums, columns: _get_column_names_from_tables(table_oids), - mapper=lambda column: (column.attnum, column.table.oid) - ).fetch(columns, 'columns__name', Column, []) - - def reverse_mapper(self, column): - return [column.table_id] - - def decorator(self, table, columns): - pass - - -_sa_table_prefetcher = Prefetcher( - filter=lambda oids, tables: reflect_tables_from_oids( - oids, list(tables)[0]._sa_engine, metadata=get_cached_metadata() - ) if len(tables) > 0 else [], - mapper=lambda table: table.oid, - # A filler statement, just used to satisfy the library. It does not affect the prefetcher in - # any way as we bypass reverse mapping if the prefetcher returns a dictionary - reverse_mapper=lambda table: table.oid, - decorator=lambda table, _sa_table: setattr( - table, - '_sa_table', - _sa_table - ) -) - - -class Table(DatabaseObject, Relation): - # These are fields whose source of truth is in the model - MODEL_FIELDS = ['import_verified'] - current_objects = models.Manager() - objects = DatabaseObjectManager( - # TODO Move the Prefetcher into a separate class and replace lambdas with proper function - _sa_table=_sa_table_prefetcher, - columns=ColumnPrefetcher, - ) - schema = models.ForeignKey('Schema', on_delete=models.CASCADE, - related_name='tables') - import_verified = models.BooleanField(blank=True, null=True) - import_target = models.ForeignKey('Table', blank=True, null=True, on_delete=models.SET_NULL) - is_temp = models.BooleanField(blank=True, null=True) - - class Meta: - constraints = [ - models.UniqueConstraint(fields=["oid", "schema"], name="unique_table") - ] - - def validate_unique(self, exclude=None): - # Ensure oid is unique on db level - if Table.current_objects.filter( - oid=self.oid, schema__database=self.schema.database - ).exists(): - raise ValidationError("Table OID is not unique") - super().validate_unique(exclude=exclude) - - def save(self, *args, **kwargs): - if self._state.adding: - self.validate_unique() - super().save(*args, **kwargs) - - # TODO referenced from outside so much that it probably shouldn't be private - # TODO use below decorator in place of cached_property to prevent redundant reflection from - # redundant model instances. - # - # @key_cached_property( - # key_fn=lambda table: ( - # 'sa_table', - # table.schema.database.name, - # table.oid, - # ) - # ) - @cached_property - def _sa_table(self): - # We're caching since we want different Django Table instances to return the same SA - # Table, when they're referencing the same Postgres table. - try: - sa_table = reflect_table_from_oid( - oid=self.oid, - engine=self._sa_engine, - metadata=get_cached_metadata(), - ) - # We catch these errors, since it lets us decouple the cadence of - # overall DB reflection from the cadence of cache expiration for - # table names. Also, it makes it obvious when the DB layer has - # been altered, as opposed to other reasons for a 404 when - # requesting a table. - except (TypeError, IndexError): - sa_table = table_utils.get_empty_table("MISSING") - return sa_table - - # NOTE: it's a problem that we have both _sa_table and _enriched_column_sa_table. at the moment - # it has to be this way because enriched column is not always interachangeable with sa column. - @property - def _enriched_column_sa_table(self): - return column_utils.get_enriched_column_table( - table=self._sa_table, - engine=self._sa_engine, - metadata=get_empty_metadata(), - ) - - @property - def primary_key_column_name(self): - pk_column = get_primary_key_column(self._sa_table) - return pk_column.name - - @property - def sa_columns(self): - return self._enriched_column_sa_table.columns - - @property - def _sa_engine(self): - return self.schema._sa_engine - - @property - def name(self): - return self._sa_table.name - - @property - def sa_column_names(self): - return self.sa_columns.keys() - - @property - def sa_constraints(self): - return self._sa_table.constraints - - @property - def has_dependents(self): - return has_dependents( - self.oid, - self.schema._sa_engine - ) - - @property - def description(self): - return get_table_description(self.oid, self._sa_engine) - - def get_dependents(self, exclude=None): - if exclude is None: - exclude = [] - return get_dependents_graph( - self.oid, - self.schema._sa_engine, - exclude - ) - - def get_ui_dependents(self): - """ - Returns all service layer dependents. For now only Data Explorer Query is considered - """ - - return { - 'queries': self.queries.all().values_list('id', flat=True) - } - - def add_column(self, column_data): - result = create_column( - self.schema._sa_engine, - self.oid, - column_data, - ) - return result - - def alter_column(self, column_attnum, column_data): - result = alter_column( - self.schema._sa_engine, - self.oid, - column_attnum, - column_data, - ) - return result - - def drop_column(self, column_attnum): - drop_column( - self.oid, - column_attnum, - self.schema._sa_engine, - ) - - def duplicate_column(self, column_attnum, copy_data, copy_constraints, name=None): - result = duplicate_column( - self.oid, - column_attnum, - self.schema._sa_engine, - new_column_name=name, - copy_data=copy_data, - copy_constraints=copy_constraints, - ) - return result - - def get_preview(self, column_definitions): - return get_column_cast_records( - self.schema._sa_engine, self._sa_table, column_definitions - ) - - # TODO unused? delete if so - @property - def sa_all_records(self): - return get_records( - table=self._sa_table, - engine=self.schema._sa_engine, - fallback_to_default_ordering=True, - ) - - def sa_num_records(self, filter=None, search=None): - if search is None: - search = [] - return get_count( - table=self._sa_table, - engine=self.schema._sa_engine, - filter=filter, - search=search, - ) - - def update_sa_table(self, update_params): - result = model_utils.update_sa_table(self, update_params) - return result - - def delete_sa_table(self): - result = drop_table(self.name, self.schema.name, self.schema._sa_engine, cascade=True) - return result - - def get_record(self, id_value): - return get_record(self._sa_table, self.schema._sa_engine, id_value) - - # TODO consider using **kwargs instead of forwarding params one-by-one - def get_records( - self, - limit=None, - offset=None, - filter=None, - order_by=None, - group_by=None, - search=None, - duplicate_only=None, - ): - if order_by is None: - order_by = [] - if search is None: - search = [] - return get_records( - table=self._sa_table, - engine=self.schema._sa_engine, - limit=limit, - offset=offset, - filter=filter, - order_by=order_by, - group_by=group_by, - search=search, - duplicate_only=duplicate_only, - fallback_to_default_ordering=True, - ) - - def create_record_or_records(self, record_data): - return insert_record_or_records(self._sa_table, self.schema._sa_engine, record_data) - - def update_record(self, id_value, record_data): - return update_record(self._sa_table, self.schema._sa_engine, id_value, record_data) - - def delete_record(self, id_value): - return delete_record(self._sa_table, self.schema._sa_engine, id_value) - - def bulk_delete_records(self, id_values): - return bulk_delete_records(self._sa_table, self.schema._sa_engine, id_values) - - def add_constraint(self, constraint_obj): - # The max here has the effect of filtering for the largest OID, which is - # the most newly-created constraint. Other methods (e.g., trying to get - # a constraint by name when it wasn't set here) are even less robust. - constraint_oid = max( - add_constraint_via_sql_alchemy(constraint_obj, engine=self._sa_engine) - ) - result = Constraint.current_objects.create(oid=constraint_oid, table=self) - return result - - def get_column_name_id_bidirectional_map(self): - columns = Column.objects.filter(table_id=self.id).select_related('table__schema__database').prefetch('name') - columns_map = bidict({column.name: column.id for column in columns}) - return columns_map - - def get_column_name_type_map(self): - columns = Column.objects.filter(table_id=self.id) - columns_map = [(column.name, column.db_type) for column in columns] - return columns_map - - def get_column_by_name(self, name): - columns = self.get_columns_by_name(name_list=[name]) - if len(columns) > 0: - return columns[0] - - def get_columns_by_name(self, name_list): - columns_by_name_dict = { - col.name: col - for col - in Column.objects.filter(table=self) - if col.name in name_list - } - return [ - columns_by_name_dict[col_name] - for col_name - in name_list - ] - - def move_columns(self, columns_to_move, target_table): - # Collect various information about relevant columns before mutating - columns_attnum_to_move = [column.attnum for column in columns_to_move] - target_table_oid = target_table.oid - column_names_to_move = [column.name for column in columns_to_move] - target_columns_name_id_map = target_table.get_column_name_id_bidirectional_map() - column_names_id_map = self.get_column_name_id_bidirectional_map() - - # Mutate on Postgres - extracted_sa_table, remainder_sa_table = move_columns_between_related_tables( - source_table_oid=self.oid, - target_table_oid=target_table_oid, - column_attnums_to_move=columns_attnum_to_move, - schema=self.schema.name, - engine=self._sa_engine - ) - engine = self._sa_engine - - # Replicate mutation on Django, so that Django-layer-specific information is preserved - extracted_table_oid = get_oid_from_table(extracted_sa_table.name, extracted_sa_table.schema, engine) - remainder_table_oid = get_oid_from_table(remainder_sa_table.name, remainder_sa_table.schema, engine) - target_table.oid = extracted_table_oid - target_table.save() - # Refresh existing target table columns to use correct attnum preventing conflicts with the moved column - existing_target_column_names = target_columns_name_id_map.keys() - target_table.update_column_reference(existing_target_column_names, target_columns_name_id_map) - # Add the moved column - target_table.update_column_reference(column_names_to_move, column_names_id_map) - self.oid = remainder_table_oid - self.save() - remainder_column_names = column_names_id_map.keys() - column_names_to_move - self.update_column_reference(remainder_column_names, column_names_id_map) - return extracted_sa_table, remainder_sa_table - - def split_table( - self, - columns_to_extract, - extracted_table_name, - column_names_id_map, - relationship_fk_column_name - ): - # Collect various information about relevant columns before mutating - columns_attnum_to_extract = [column.attnum for column in columns_to_extract] - extracted_column_names = [column.name for column in columns_to_extract] - remainder_column_names = column_names_id_map.keys() - extracted_column_names - - # Mutate on Postgres - extracted_table_oid, remainder_table_oid, linking_fk_column_attnum = extract_columns_from_table( - self.oid, - columns_attnum_to_extract, - extracted_table_name, - self.schema.name, - self._sa_engine, - relationship_fk_column_name - ) - # Replicate mutation on Django, so that Django-layer-specific information is preserved - extracted_table = Table(oid=extracted_table_oid, schema=self.schema) - extracted_table.save() - - # Update attnum as it would have changed due to columns moving to a new table. - extracted_table.update_column_reference(extracted_column_names, column_names_id_map) - remainder_table = Table.current_objects.get(schema__database=self.schema.database, oid=remainder_table_oid) - remainder_table.update_column_reference(remainder_column_names, column_names_id_map) - remainder_fk_column = Column.objects.get(table=remainder_table, attnum=linking_fk_column_attnum) - - return extracted_table, remainder_table, remainder_fk_column - - def update_column_reference(self, column_names, column_name_id_map): - """ - Will update the columns specified via column_names to have the right attnum and to be part - of this table. - """ - column_names_attnum_map = get_column_attnum_from_names_as_map( - self.oid, - column_names, - self._sa_engine, - metadata=get_cached_metadata(), - ) - column_objs = [] - for column_name, column_attnum in column_names_attnum_map.items(): - column_id = column_name_id_map[column_name] - column = Column.current_objects.get(id=column_id) - column.table_id = self.id - column.attnum = column_attnum - column_objs.append(column) - Column.current_objects.bulk_update(column_objs, fields=['table_id', 'attnum']) - - def insert_records_to_existing_table(self, existing_table, data_files, mappings=None): - from_table = self._sa_table - target_table = existing_table._sa_table - engine = self._sa_engine - if mappings: - col_mappings = [[from_col.name, target_col.name] for from_col, target_col in mappings] - else: - col_mappings = None - try: - table, _ = insert_from_select(from_table, target_table, engine, col_mappings) - except Exception as e: - # ToDo raise specific exceptions. - raise e - return table - - def suggest_col_mappings_for_import(self, existing_table): - temp_table_col_list = self.get_column_name_type_map() - target_table_col_list = existing_table.get_column_name_type_map() - temp_table_name_id_map = self.get_column_name_id_bidirectional_map() - target_table_name_id_map = existing_table.get_column_name_id_bidirectional_map() - column_mappings = column_utils.find_match(temp_table_col_list, target_table_col_list, self._sa_engine) - - # Convert python list indices to django ids. - mappings = [ - ( - temp_table_name_id_map[ - temp_table_col_list[from_col][0] # from_column name - ], - target_table_name_id_map[ - target_table_col_list[target_col][0] # target_column name - ] - ) for from_col, target_col in column_mappings - ] - return mappings - - -class Column(ReflectionManagerMixin, BaseModel): - table = models.ForeignKey('Table', on_delete=models.CASCADE, related_name='columns') - attnum = models.IntegerField() - display_options = JSONField(null=True, default=None) - - class Meta: - constraints = [ - models.UniqueConstraint(fields=["attnum", "table"], name="unique_column") - ] - - def __str__(self): - return f"{self.__class__.__name__}: {self.table_id}-{self.attnum}" - - def __getattribute__(self, name): - try: - return super().__getattribute__(name) - except AttributeError as e: - # Blacklist Django attribute names that cause recursion by trying to fetch an invalid cache. - # TODO Find a better way to avoid finding Django related columns - blacklisted_attribute_names = ['resolve_expression', '_prefetched_objects_cache'] - if name not in blacklisted_attribute_names: - return getattr(self._sa_column, name) - else: - raise e - - current_objects = models.Manager() - objects = DatabaseObjectManager( - name=ColumnNamePrefetcher - ) - - @property - def _sa_engine(self): - return self.table._sa_engine - - # TODO probably shouldn't be private: a lot of code already references it. - @property - def _sa_column(self): - return self.table.sa_columns[self.name] - - # TODO use below decorator in place of cached_property to prevent redundant reflection from - # redundant model instances. - # - # @key_cached_property( - # key_fn=lambda column: ( - # "column name", - # column.table.schema.database.name, - # column.table.schema.name, - # column.table.oid, - # column.attnum, - # ) - # ) - @cached_property - def name(self): - name = get_column_name_from_attnum( - self.table.oid, - self.attnum, - self._sa_engine, - metadata=get_cached_metadata(), - ) - assert type(name) is str - if name is None: - raise ProgrammingAPIException( - Exception( - "attempted to access column's name after it was dropped" - ) - ) - else: - return name - - @property - def description(self): - return get_column_description(self.table.oid, self.attnum, self._sa_engine) - - @property - def ui_type(self): - if self.db_type: - return get_ui_type_from_db_type(self.db_type) - - @property - def db_type(self): - return self._sa_column.db_type - - @property - def has_dependents(self): - return has_dependents( - self.table.oid, - self._sa_engine, - self.attnum - ) - - def get_dependents(self, exclude): - return get_dependents_graph( - self.table.oid, - self._sa_engine, - exclude, - self.attnum - ) - - -class Constraint(DatabaseObject): - table = models.ForeignKey('Table', on_delete=models.CASCADE, related_name='constraints') - - class Meta: - constraints = [ - models.UniqueConstraint(fields=["oid", "table"], name="unique_constraint") - ] - - # TODO try to cache this for an entire request - @property - def _constraint_record(self): - engine = self.table.schema.database._sa_engine - return get_constraint_record_from_oid(self.oid, engine, get_cached_metadata()) - - @property - def name(self): - return self._constraint_record['conname'] - - @property - def type(self): - return constraint_utils.get_constraint_type_from_char(self._constraint_record['contype']) - - @cached_property - def columns(self): - column_attnum_list = self._constraint_record['conkey'] - return Column.objects.filter(table=self.table, attnum__in=column_attnum_list).order_by("attnum") - - @cached_property - def referent_columns(self): - column_attnum_list = self._constraint_record['confkey'] - if column_attnum_list: - foreign_relation_oid = self._constraint_record['confrelid'] - columns = Column.objects.filter( - table__oid=foreign_relation_oid, - table__schema=self.table.schema, - attnum__in=column_attnum_list - ).order_by("attnum") - return columns - - @property - def ondelete(self): - action_char = self._constraint_record['confdeltype'] - return constraint_utils.get_constraint_action_from_char(action_char) - - @property - def onupdate(self): - action_char = self._constraint_record['confupdtype'] - return constraint_utils.get_constraint_action_from_char(action_char) - - @property - def deferrable(self): - return self._constraint_record['condeferrable'] - - @property - def match(self): - type_char = self._constraint_record['confmatchtype'] - return constraint_utils.get_constraint_match_type_from_char(type_char) - - def drop(self): - drop_constraint( - self.table._sa_table.name, - self.table._sa_table.schema, - self.table.schema._sa_engine, - self.name - ) - self.delete() - - -class PreviewColumnSettings(BaseModel): - customized = models.BooleanField() - template = models.CharField(max_length=255) - - -def validate_column_order(value): - """ - Custom validator to ensure that all elements in the list are positive integers. - """ - if not all(isinstance(item, int) and item > 0 for item in value): - raise ValidationError("All elements of column order must be positive integers.") - - -class TableSettings(ReflectionManagerMixin, BaseModel): - preview_settings = models.OneToOneField(PreviewColumnSettings, on_delete=models.CASCADE) - table = models.OneToOneField(Table, on_delete=models.CASCADE, related_name="settings") - column_order = JSONField(null=True, blank=True, default=None, validators=[validate_column_order]) - - def save(self, **kwargs): - # Cleans the fields before saving by running respective field validator(s) - try: - self.clean_fields() - except ValidationError as e: - raise e - super().save(**kwargs) - - -def _create_table_settings(tables): - # TODO Bulk create preview settings to improve performance - for table in tables: - preview_column_settings = PreviewColumnSettings.objects.create(customized=False) - TableSettings.current_objects.create(table=table, preview_settings=preview_column_settings) - - -def _set_default_preview_template(table): - if not table.settings.preview_settings.customized: - preview_template = compute_default_preview_template(table) - preview_settings = table.settings.preview_settings - preview_settings.template = preview_template - preview_settings.save() - - -def compute_default_preview_template(table): - columns = Column.current_objects.filter(table=table).prefetch_related( - 'table', - 'table__schema', - 'table__schema__database' - ).order_by('attnum') - preview_column = None - primary_key_column = None - for column in columns: - if column.primary_key: - primary_key_column = column - else: - preview_column = column - break - if preview_column is None: - preview_column = primary_key_column - - if preview_column: - preview_template = f"{{{preview_column.id}}}" - else: - # The table does not contain any column, show blank in such scenario. - preview_template = "" - return preview_template From 48d03aaf574e99934f605880a842da6937c1e4e4 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Tue, 15 Oct 2024 18:32:42 +0800 Subject: [PATCH 17/70] remove unused import module --- mathesar/imports/excel.py | 64 --------------------------------------- 1 file changed, 64 deletions(-) delete mode 100644 mathesar/imports/excel.py diff --git a/mathesar/imports/excel.py b/mathesar/imports/excel.py deleted file mode 100644 index 9692994dfc..0000000000 --- a/mathesar/imports/excel.py +++ /dev/null @@ -1,64 +0,0 @@ -import pandas - -from db.constants import ID, ID_ORIGINAL -from db.tables.operations.alter import update_pk_sequence_to_latest -from mathesar.database.base import create_mathesar_engine -from db.records.operations.insert import insert_records_from_excel -from db.tables.operations.create import create_string_column_table -from db.tables.operations.drop import drop_table -from mathesar.imports.utils import get_alternate_column_names, process_column_names -from psycopg2.errors import IntegrityError, DataError -from sqlalchemy.exc import IntegrityError as sqlalchemy_integrity_error - - -def insert_records_from_dataframe(name, schema, column_names, engine, comment, dataframe): - table = create_string_column_table( - name=name, - schema_oid=schema.oid, - column_names=column_names, - engine=engine, - comment=comment, - ) - if ID_ORIGINAL in column_names: - dataframe.rename(columns={ID: ID_ORIGINAL}, inplace=True) - insert_records_from_excel( - table, - engine, - dataframe, - ) - return table - - -def remove_empty_rows_and_columns_from_dataframe(df): - if df.iloc[0].isna().any(): - - # drop rows with all NaN values - df.dropna(how='all', inplace=True) - - # drop columns with all NaN values - df.dropna(axis=1, how='all', inplace=True) - - if all(df.columns.str.startswith('Unnamed')): - df.columns = df.iloc[0] - df = df[1:] - - return df - - -def create_db_table_from_excel_data_file(data_file, name, schema, comment=None): - db_model = schema.database - engine = create_mathesar_engine(db_model) - header_row = 0 if data_file.header else None - dataframe = remove_empty_rows_and_columns_from_dataframe( - pandas.read_excel(data_file.file.path, data_file.sheet_index, header=header_row) - ) - column_names = process_column_names(dataframe.columns) - try: - table = insert_records_from_dataframe(name, schema, column_names, engine, comment, dataframe) - update_pk_sequence_to_latest(engine, table) - except (IntegrityError, DataError, sqlalchemy_integrity_error): - drop_table(name=name, schema=schema.name, engine=engine) - column_names_alt = get_alternate_column_names(column_names) - table = insert_records_from_dataframe(name, schema, column_names_alt, engine, comment, dataframe) - - return table From ba7f2ab47c0644c7e0be9413b8a6110d5eafa355 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 11:40:44 +0800 Subject: [PATCH 18/70] remove engine usage from import modules --- mathesar/imports/csv.py | 46 ---------------------------------------- mathesar/imports/json.py | 38 --------------------------------- 2 files changed, 84 deletions(-) diff --git a/mathesar/imports/csv.py b/mathesar/imports/csv.py index 6786c74a5f..182b106501 100644 --- a/mathesar/imports/csv.py +++ b/mathesar/imports/csv.py @@ -3,7 +3,6 @@ import clevercsv as csv from db.tables.operations.alter import update_pk_sequence_to_latest -from mathesar.database.base import create_mathesar_engine from db.records.operations.insert import insert_records_from_csv from db.tables.operations.create import create_string_column_table from db.tables.operations.drop import drop_table @@ -113,48 +112,3 @@ def get_sv_reader(file, header, dialect=None): file.seek(0) return reader - - -def insert_records_from_csv_data_file(name, schema, column_names, engine, comment, data_file): - dialect = csv.dialect.SimpleDialect(data_file.delimiter, data_file.quotechar, - data_file.escapechar) - encoding = get_file_encoding(data_file.file) - table = create_string_column_table( - name=name, - schema_oid=schema.oid, - column_names=column_names, - engine=engine, - comment=comment, - ) - insert_records_from_csv( - table, - engine, - data_file.file.path, - column_names, - data_file.header, - delimiter=dialect.delimiter, - escape=dialect.escapechar, - quote=dialect.quotechar, - encoding=encoding - ) - return table - - -def create_db_table_from_csv_data_file(data_file, name, schema, comment=None): - db_model = schema.database - engine = create_mathesar_engine(db_model) - sv_filename = data_file.file.path - header = data_file.header - dialect = csv.dialect.SimpleDialect(data_file.delimiter, data_file.quotechar, - data_file.escapechar) - with open(sv_filename, 'rb') as sv_file: - sv_reader = get_sv_reader(sv_file, header, dialect=dialect) - column_names = process_column_names(sv_reader.fieldnames) - try: - table = insert_records_from_csv_data_file(name, schema, column_names, engine, comment, data_file) - update_pk_sequence_to_latest(engine, table) - except (IntegrityError, DataError): - drop_table(name=name, schema=schema.name, engine=engine) - column_names_alt = get_alternate_column_names(column_names) - table = insert_records_from_csv_data_file(name, schema, column_names_alt, engine, comment, data_file) - return table diff --git a/mathesar/imports/json.py b/mathesar/imports/json.py index 0908f944c7..9c699e139d 100644 --- a/mathesar/imports/json.py +++ b/mathesar/imports/json.py @@ -2,7 +2,6 @@ from json.decoder import JSONDecodeError from db.tables.operations.alter import update_pk_sequence_to_latest -from mathesar.database.base import create_mathesar_engine from db.records.operations.insert import insert_records_from_json from db.tables.operations.create import create_string_column_table from db.tables.operations.drop import drop_table @@ -60,40 +59,3 @@ def get_column_names_from_json(data_file, max_level): return all_keys else: return get_flattened_keys(data, max_level) - - -def insert_records_from_json_data_file(name, schema, column_names, engine, comment, json_filepath, max_level): - table = create_string_column_table( - name=name, - schema_oid=schema.oid, - column_names=column_names, - engine=engine, - comment=comment, - ) - insert_records_from_json( - table, - engine, - json_filepath, - column_names, - max_level - ) - return table - - -def create_db_table_from_json_data_file(data_file, name, schema, comment=None): - db_model = schema.database - engine = create_mathesar_engine(db_model) - json_filepath = data_file.file.path - max_level = data_file.max_level - column_names = process_column_names( - get_column_names_from_json(json_filepath, max_level) - ) - try: - table = insert_records_from_json_data_file(name, schema, column_names, engine, comment, json_filepath, max_level) - update_pk_sequence_to_latest(engine, table) - except (IntegrityError, DataError, sqlalchemy_integrity_error): - drop_table(name=name, schema=schema.name, engine=engine) - column_names_alt = get_alternate_column_names(column_names) - table = insert_records_from_json_data_file(name, schema, column_names_alt, engine, comment, json_filepath, max_level) - - return table From c7081474d9d1688f1269681ae93e43d07f861f81 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 12:09:19 +0800 Subject: [PATCH 19/70] remove old engine creator --- mathesar/database/base.py | 20 -------------------- 1 file changed, 20 deletions(-) delete mode 100644 mathesar/database/base.py diff --git a/mathesar/database/base.py b/mathesar/database/base.py deleted file mode 100644 index d6384d3d76..0000000000 --- a/mathesar/database/base.py +++ /dev/null @@ -1,20 +0,0 @@ -from db import engine - - -def create_mathesar_engine(db_model): - """Create an SQLAlchemy engine using stored credentials.""" - import logging - logger = logging.getLogger('create_mathesar_engine') - logger.debug('enter') - credentials = _get_credentials_for_db_model(db_model) - return engine.create_future_engine_with_custom_types(**credentials) - - -def _get_credentials_for_db_model(db_model): - return dict( - username=db_model.username, - password=db_model.password, - hostname=db_model.host, - database=db_model.db_name, - port=db_model.port, - ) From c8b1fbe131eae8f66c83f6249bb143dc702155b9 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 12:14:11 +0800 Subject: [PATCH 20/70] remove unused Relation model base --- mathesar/migrations/0001_initial.py | 5 ++--- mathesar/models/relation.py | 7 ------- 2 files changed, 2 insertions(+), 10 deletions(-) delete mode 100644 mathesar/models/relation.py diff --git a/mathesar/migrations/0001_initial.py b/mathesar/migrations/0001_initial.py index 8a4e9a4607..216d9e4009 100644 --- a/mathesar/migrations/0001_initial.py +++ b/mathesar/migrations/0001_initial.py @@ -8,7 +8,6 @@ import django.db.models.deletion import django.db.models.manager import django.utils.timezone -import mathesar.models.relation import mathesar.utils.models @@ -102,7 +101,7 @@ class Migration(migrations.Migration): ('import_target', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='mathesar.table')), ('schema', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tables', to='mathesar.schema')), ], - bases=(models.Model, mathesar.models.relation.Relation), + bases=(models.Model,), managers=[ ('current_objects', django.db.models.manager.Manager()), ], @@ -124,7 +123,7 @@ class Migration(migrations.Migration): options={ 'abstract': False, }, - bases=(models.Model, mathesar.models.relation.Relation), + bases=(models.Model,), ), migrations.CreateModel( name='TableSettings', diff --git a/mathesar/models/relation.py b/mathesar/models/relation.py deleted file mode 100644 index 3f82afc42c..0000000000 --- a/mathesar/models/relation.py +++ /dev/null @@ -1,7 +0,0 @@ -# NOTE can't use python's ABC library, due to a conflict with Django models' meta classes -class Relation: - def get_records(self, **kwargs): - raise Exception("must be implemented by subclass") - - def sa_num_records(self, **kwargs): - raise Exception("must be implemented by subclass") From 67b2b2fa442deffc69e9e5e6f645fe79285d18f0 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 12:17:21 +0800 Subject: [PATCH 21/70] remove metadata cache function --- mathesar/state/__init__.py | 1 - mathesar/state/metadata.py | 21 --------------------- 2 files changed, 22 deletions(-) delete mode 100644 mathesar/state/__init__.py delete mode 100644 mathesar/state/metadata.py diff --git a/mathesar/state/__init__.py b/mathesar/state/__init__.py deleted file mode 100644 index e39b9a9680..0000000000 --- a/mathesar/state/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from mathesar.state.metadata import get_cached_metadata # noqa: F401 diff --git a/mathesar/state/metadata.py b/mathesar/state/metadata.py deleted file mode 100644 index 95bbc47315..0000000000 --- a/mathesar/state/metadata.py +++ /dev/null @@ -1,21 +0,0 @@ -from db.metadata import get_empty_metadata -from django_request_cache import cache_for_request - - -@cache_for_request -def get_cached_metadata(): - """ - Cached to minimize reflection queries to Postgres. - """ - return _metadata_cache - - -def reset_cached_metadata(): - """ - Resets MetaData cache to empty. - """ - global _metadata_cache - _metadata_cache = get_empty_metadata() - - -_metadata_cache = get_empty_metadata() From 194341d53a368bc1067a776e85a9a159de52f4a5 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 12:38:08 +0800 Subject: [PATCH 22/70] remove unused testing cruft --- mathesar/tests/api/conftest.py | 17 ---- mathesar/tests/conftest.py | 146 --------------------------- mathesar/tests/database/__init__ .py | 0 mathesar/tests/query/__init__.py | 0 4 files changed, 163 deletions(-) delete mode 100644 mathesar/tests/api/conftest.py delete mode 100644 mathesar/tests/database/__init__ .py delete mode 100644 mathesar/tests/query/__init__.py diff --git a/mathesar/tests/api/conftest.py b/mathesar/tests/api/conftest.py deleted file mode 100644 index 288a23f257..0000000000 --- a/mathesar/tests/api/conftest.py +++ /dev/null @@ -1,17 +0,0 @@ -from django.core.files import File -import pytest - -from mathesar.models.base import DataFile - - -@pytest.fixture -def create_data_file(): - def _create_data_file(file_path, file_name): - with open(file_path, 'rb') as csv_file: - data_file = DataFile.objects.create( - file=File(csv_file), created_from='file', - base_name=file_name, type='csv' - ) - - return data_file - return _create_data_file diff --git a/mathesar/tests/conftest.py b/mathesar/tests/conftest.py index 850397aa41..c5c1e30a50 100644 --- a/mathesar/tests/conftest.py +++ b/mathesar/tests/conftest.py @@ -37,14 +37,6 @@ def enable_db_access_for_all_tests(db): pass -@pytest.fixture(scope="session") -def django_db_modify_db_settings( - ignore_all_dbs_except_default, # noqa: F841 - django_db_modify_db_settings, # noqa: F841 -): - return - - @pytest.fixture(scope="session", autouse=True) def ignore_all_dbs_except_default(SES_dj_databases): """ @@ -56,31 +48,6 @@ def ignore_all_dbs_except_default(SES_dj_databases): del SES_dj_databases[entry_name] -def add_db_to_dj_settings(request): - """ - If the Django layer should be aware of a db, it should be added to settings.DATABASES dict. - """ - dj_databases = get_fixture_value(request, mathesar.tests.conftest.dj_databases) - added_dbs = set() - - def _add(db_name): - reference_entry = dj_connection.settings_dict - dj_databases[db_name] = reference_entry - dj_databases[db_name]['NAME'] = db_name - cache.clear() - added_dbs.add(db_name) - return db_name - yield _add - - -# defines: -# FUN_add_db_to_dj_settings -# CLA_add_db_to_dj_settings -# MOD_add_db_to_dj_settings -# SES_add_db_to_dj_settings -create_scoped_fixtures(globals(), add_db_to_dj_settings) - - def dj_databases(): """ Returns django.conf.settings.DATABASES by reference. During cleanup, restores it to the state @@ -119,11 +86,6 @@ def paste_filename(): return 'mathesar/tests/data/patents.txt' -@pytest.fixture(scope='session') -def headerless_patents_csv_filepath(): - return 'mathesar/tests/data/headerless_patents.csv' - - @pytest.fixture(scope='session') def patents_url(): return 'https://thisisafakeurl.com' @@ -134,77 +96,11 @@ def patents_url_filename(): return 'mathesar/tests/data/api_patents.csv' -@pytest.fixture(scope='session') -def col_names_with_spaces_csv_filepath(): - return 'mathesar/tests/data/col_names_with_spaces.csv' - - -@pytest.fixture(scope='session') -def col_headers_empty_csv_filepath(): - return 'mathesar/tests/data/col_headers_empty.csv' - - @pytest.fixture(scope='session') def non_unicode_csv_filepath(): return 'mathesar/tests/data/non_unicode_files/utf_16_le.csv' -@pytest.fixture(scope='session') -def duplicate_id_table_csv_filepath(): - return 'mathesar/tests/data/csv_parsing/duplicate_id_table.csv' - - -@pytest.fixture(scope='session') -def null_id_table_csv_filepath(): - return 'mathesar/tests/data/csv_parsing/null_id_table.csv' - - -@pytest.fixture(scope='session') -def duplicate_id_table_json_filepath(): - return 'mathesar/tests/data/json_parsing/duplicate_id_table.json' - - -@pytest.fixture(scope='session') -def null_id_table_json_filepath(): - return 'mathesar/tests/data/json_parsing/null_id_table.json' - - -@pytest.fixture(scope='session') -def duplicate_id_table_excel_filepath(): - return 'mathesar/tests/data/excel_parsing/duplicate_id_table.xlsx' - - -@pytest.fixture(scope='session') -def null_id_table_excel_filepath(): - return 'mathesar/tests/data/excel_parsing/null_id_table.xlsx' - - -@pytest.fixture(scope='session') -def multiple_sheets_excel_filepath(): - return 'mathesar/tests/data/excel_parsing/multiple_sheets.xlsx' - - -# TODO rename to create_mathesar_db_table -@pytest.fixture -def create_mathesar_table(create_db_schema): - def _create_mathesar_table( - table_name, schema_name, columns, engine, metadata=None, - ): - # We use a fixture for schema creation, so that it gets cleaned up. - create_db_schema(schema_name, engine, schema_mustnt_exist=False) - schema_oid = get_schema_oid_from_name(schema_name, engine) - return actual_create_mathesar_table( - engine=engine, table_name=table_name, schema_oid=schema_oid, columns=columns, - ) - yield _create_mathesar_table - - -def _get_datafile_for_path(path): - with open(path, 'rb') as file: - datafile = DataFile.objects.create(file=File(file), type='csv') - return datafile - - @pytest.fixture def user_alice(): user = User.objects.create( @@ -258,45 +154,3 @@ def client_alice(user_alice): client = APIClient() client.login(username=user_alice.username, password='password') return client - - -@pytest.fixture -def user_jerry(): - user = User.objects.create( - username='jerry', - email='jerry@example.com', - full_name='JerrySmith', - short_name='Jerry' - ) - user.set_password('password') - user.save() - yield user - user.delete() - - -@pytest.fixture -def user_turdy(): - user = User.objects.create( - username='turdy', - email='turdy@example.com', - full_name='Turdy', - short_name='Turdy' - ) - user.set_password('password') - user.save() - yield user - user.delete() - - -@pytest.fixture -def user_tom(): - user = User.objects.create( - username='tom', - email='tom@example.com', - full_name='Tom James', - short_name='Tom' - ) - user.set_password('password') - user.save() - yield user - user.delete() diff --git a/mathesar/tests/database/__init__ .py b/mathesar/tests/database/__init__ .py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/mathesar/tests/query/__init__.py b/mathesar/tests/query/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 From 69a43db4f196044dcf846fae046daaa19d4e2e8a Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 13:10:52 +0800 Subject: [PATCH 23/70] remove unused test data files --- .../tests/data/client_build/manifest.json | 32 - mathesar/tests/data/col_headers_empty.csv | 3 - mathesar/tests/data/col_names_with_spaces.csv | 3 - .../data/csv_parsing/duplicate_id_table.csv | 4 - .../tests/data/csv_parsing/null_id_table.csv | 4 - .../tests/data/csv_parsing/paste_invalid.txt | 2 - .../excel_parsing/duplicate_id_table.xlsx | Bin 34842 -> 0 bytes .../data/excel_parsing/misaligned_table.xlsx | Bin 32519 -> 0 bytes .../data/excel_parsing/multiple_sheets.xlsx | Bin 13827 -> 0 bytes .../data/excel_parsing/null_id_table.xlsx | Bin 34926 -> 0 bytes mathesar/tests/data/headerless_patents.csv | 1394 ----------------- .../data/json_parsing/duplicate_id_table.json | 17 - .../tests/data/json_parsing/missing_keys.json | 29 - .../data/json_parsing/nested_objects.json | 34 - .../data/json_parsing/null_id_table.json | 17 - .../tests/data/non_unicode_files/cp1250.csv | 10 - 16 files changed, 1549 deletions(-) delete mode 100644 mathesar/tests/data/client_build/manifest.json delete mode 100644 mathesar/tests/data/col_headers_empty.csv delete mode 100644 mathesar/tests/data/col_names_with_spaces.csv delete mode 100644 mathesar/tests/data/csv_parsing/duplicate_id_table.csv delete mode 100644 mathesar/tests/data/csv_parsing/null_id_table.csv delete mode 100644 mathesar/tests/data/csv_parsing/paste_invalid.txt delete mode 100644 mathesar/tests/data/excel_parsing/duplicate_id_table.xlsx delete mode 100644 mathesar/tests/data/excel_parsing/misaligned_table.xlsx delete mode 100644 mathesar/tests/data/excel_parsing/multiple_sheets.xlsx delete mode 100644 mathesar/tests/data/excel_parsing/null_id_table.xlsx delete mode 100644 mathesar/tests/data/headerless_patents.csv delete mode 100644 mathesar/tests/data/json_parsing/duplicate_id_table.json delete mode 100644 mathesar/tests/data/json_parsing/missing_keys.json delete mode 100644 mathesar/tests/data/json_parsing/nested_objects.json delete mode 100644 mathesar/tests/data/json_parsing/null_id_table.json delete mode 100644 mathesar/tests/data/non_unicode_files/cp1250.csv diff --git a/mathesar/tests/data/client_build/manifest.json b/mathesar/tests/data/client_build/manifest.json deleted file mode 100644 index 9773bc84b7..0000000000 --- a/mathesar/tests/data/client_build/manifest.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "src/main-legacy.ts": { - "file": "assets/main-legacy.a6fa5c22.js", - "src": "src/main-legacy.ts", - "isEntry": true, - "imports": [ - "_vendor-legacy.d63fc969.js" - ] - }, - "_vendor-legacy.d63fc969.js": { - "file": "assets/vendor-legacy.d63fc969.js" - }, - "vite/legacy-polyfills": { - "file": "assets/polyfills-legacy.b378e49a.js", - "src": "vite/legacy-polyfills", - "isEntry": true - }, - "src/main.ts": { - "file": "assets/main.cda2bafc.js", - "src": "src/main.ts", - "isEntry": true, - "imports": [ - "_vendor.9a7b32d4.js" - ], - "css": [ - "assets/main.57ee229b.css" - ] - }, - "_vendor.9a7b32d4.js": { - "file": "assets/vendor.9a7b32d4.js" - } -} diff --git a/mathesar/tests/data/col_headers_empty.csv b/mathesar/tests/data/col_headers_empty.csv deleted file mode 100644 index b56b873713..0000000000 --- a/mathesar/tests/data/col_headers_empty.csv +++ /dev/null @@ -1,3 +0,0 @@ -, ,col2, -aa,bb,cc,dd -ee,ff,gg,hh diff --git a/mathesar/tests/data/col_names_with_spaces.csv b/mathesar/tests/data/col_names_with_spaces.csv deleted file mode 100644 index 965b343f17..0000000000 --- a/mathesar/tests/data/col_names_with_spaces.csv +++ /dev/null @@ -1,3 +0,0 @@ -id,a ,b -1,"foo","bar" -2,"baz","bam" diff --git a/mathesar/tests/data/csv_parsing/duplicate_id_table.csv b/mathesar/tests/data/csv_parsing/duplicate_id_table.csv deleted file mode 100644 index e89bf26fbe..0000000000 --- a/mathesar/tests/data/csv_parsing/duplicate_id_table.csv +++ /dev/null @@ -1,4 +0,0 @@ -id,Name,Age -1,John,25 -3,Cristine,30 -3,Jane,23 \ No newline at end of file diff --git a/mathesar/tests/data/csv_parsing/null_id_table.csv b/mathesar/tests/data/csv_parsing/null_id_table.csv deleted file mode 100644 index 8f62a5dd45..0000000000 --- a/mathesar/tests/data/csv_parsing/null_id_table.csv +++ /dev/null @@ -1,4 +0,0 @@ -id,Name,Age -1,John,25 -,Cristine,30 -3,Jane,23 \ No newline at end of file diff --git a/mathesar/tests/data/csv_parsing/paste_invalid.txt b/mathesar/tests/data/csv_parsing/paste_invalid.txt deleted file mode 100644 index d0fdfded4b..0000000000 --- a/mathesar/tests/data/csv_parsing/paste_invalid.txt +++ /dev/null @@ -1,2 +0,0 @@ -Center Status Case Number -NASA Ames Research Center diff --git a/mathesar/tests/data/excel_parsing/duplicate_id_table.xlsx b/mathesar/tests/data/excel_parsing/duplicate_id_table.xlsx deleted file mode 100644 index 6d89c5dcbffa8e47a5cd6bd34c085eba62b6ee0f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 34842 zcmc&-30O?++s_iRFD)qI6{1Wc*&8I0Ju0ecBc+wnXwMo7A(f(1$xf|6T9MX~ zUQ3JieVyfd&YU@Cw)20@KkxT_u3p~jxS#vE@8A8qm*+X-nghGXjGZV1{gOB!rNjIm z)^Fp$pN4j(R`v=9STYlsG8UwaxAoxfG)4;vO+saGkYvoAY>hW7r+XSME_B;iBewl` zg3AnJ+rN58y**yN!g}?>IZ4BPJEoZ!ONedqJf6iE&b8|ARtnPDml|MXOeiy2x31Hz zQGU~!{%FaSN0&LyUHszmEVIh?WS>oU{BOIq98=T zvp~^d_WLQH6>XY^=9ZovCm;AY>~p+s$M^Yn-dqPXiK((mJ_AcmI*QMs z4`1gG=~SHwTwe*Tf~kaA+)s4<=nxwtVzigBg(Lt)}C_r1we^~U!ow1}(SJ4z6nM2wc}uA!A{+GRAD$`T(~j({nYm6PiEw zlx)3n&tcy5nH>=`oE}Q`S8JVBdvn*1_|#)@^#k0}36H1jU7B-x$J5iPuNO1kUSMp% znPvTaGODsa`_s_e#2z?mt|n@7?K!^A!4!P4-%{v*^&OY?Z%<*3F;fwR)7}MWbWKKh_l_#qRO{IM_5e zEiMGF2L4;`KF~7ott8K9{yaQYWabA6B`rcjgy>T3oPb(4dd1q%hxkzU;jYdAVRy-; zVPXu5Pbht;Kw|O3#LamDfxQ-vySSmGAF#mUrQzo6UEN1H`|Qc zV^0i!mGj`}%(*#dS7}+ja=Sil_e*l27bPfi<=JQ1FAc@MY#w*|&B9$*x6!UnP+9zD z!NvLM4xc6TyRL2Pmu)h(Nc%ZIlDJf2J4MO!RbkPDPw{RWTED7oE;e6QPHb<<*^~EN zXzIJLd5&XBL~ri(zf?8PX;$>R!!z>=+fF@O@M)p3l(q17jf?e0I9Iu%sh-uxJZZ`I zD7`h=+de)!|HZVs(o3r2BtGY~g^{gEaZKg3I?>0nJ6!Ywn?A`z-7DEO;q>eG?@yVH zY1rMIe)sSj=QUf~KCBG7c0tCH(tWyk$-UWyw$pK4Lk3Zw75DCwqg>xs;~r4Zw?tmA zWiokaY#T#Ae7vK7%Zz*N?Q%WC6KOc@##yP<(6YL4(e!!)y@YFWYroFQuRI^L+U`)( zU%2c_qY1%jVb`>WsY(|r^z}s~CO)sd{dd!`sPKyuo-BAAyZF*7vw0eK?L?kTdU7X4 zC0z5x(SD}_d+CSq)pc9lwO5r~lv7R<(>Qcs^|4dh#zoV@K=KAO?$AB}$zcYvD4bWWC;}rt=fV%dD+T zsMd>4Op#eAra9%sb@9W_=1$Fb9;nhhD|9^@9lVN3kJJ(lk=CvF;(L3suSlns+WwD) zc>{Z9oSrVMF8grwsuzBSnSQe*H;i6xyvAIqxA9)l8CA{XM@2O`GD(P z8xk~i9(y<3lbWfVeac(F99^6v*(Yh=gHIrM9C z&h^D*M?_*X0w*@;nM)EcN`4KLs207sM0WUM+P=#rUWAsb?W!FgEo!eCy(KjLe3sfI zbuoWlZu0dqa@#|k)!L)4k`8?DuvCW7g3M z@7?isc{Kji&lUx|oRDe8cxgw}y}I)k%+9J94Eh01y4ZNl!{ zm7<;ujrVRYpNQrn*5j9_+bvjkFk{mDDsd~7o#8d~OhSr7czXA$jQt_@U5`WB&jh%J zP0J7R{5fZ_$Xw&dRrftJo{OJUyE8rYc1FTD$(MOs>aABu%_{%ln=;ky@a#L;$41YJ zT}YWdZ_ES}@1QGI3ks*l##C&Ys!ke}M;>3hK=kg{b8p`*_K}*J^Lg(MJ53G6Wi?OQ zcbpeJH_t*zaZ5#j~6ZsU0Qq6C-Fqt zL%NY_N#U!tHI>tgmTS~n4%EHc*?aO6>ABf1z1+}R8`3BQP5v%A6>g7bZS5@W%}q_u zuv;>Wyzr6!z4+5YLNf2i3eDz|WiDa%)8{pW1nWGlhF@{vAKZ*f`co%73}0aX^J%)5 zd!~4@kZ`iv&T!c*w}O{TT9ZO!+HTMIvSqN-kfIS6WAFK9J!vSKF+_4_q$|)HDr(&r zMJXNw%?bgef!+W^SK5_Y*I|k+y~D?YR>v6hc;k^uqJAHi&Kku^mwY3N&?$6yVf-Zbnn*w!59K3po@Gh9ovp!LR;7}Ds`z=FEatQP$BDpeLlWG}{yXxE-WoH@GI7XMD zdv|L)qb^UOv%i+wc3+xKd!jH@CqZHq8PfYbsJSV%ZoU1a;m$IKWff_ts5{AHNWyin z)}#A=dlrpO@fi4~;7+x}Gp-HQSHu~RhC1bFz~AGHAL*pQXj?|_m0Gv%D{+jxvxcq= z07vKk##%;SbFJI3EkF)nR8~tZi=!Xwqu9E;c1q(PDFBP5L7!wKQ%!LcOU94JCq<0j zq6$VIEtEzg4g6Tz(_ia8T;f5MV9-)LsLqDej%(v!^dQkg8S$N^ z9-Hy>Dr#S|$3T;zM}ID(ua#6+QA_=S2Y^VQrViA;mae4-kZApVz{SDNSp%H~bh0xQ zxKK|45iey>zXMP@C4c2H#*u~$@wE1KkAZ$XU1wmZ%iCkPJ_S!L^+~3av#j%yTAFEj zj8wNlvMs$A@PI~R(7H*CItdR(Z$JLcT0E6%HYjcAKpTvXWpru<7&4Z+R<$-Lq|=_z z-?!mC%4qoh{e1(21@jrHhJDn*f)MD>p}_(J=ui4!!B@sI!#*94p?cb{VUk;Ku8v2y z4um49BZf{si+4MZcgtoB$o(i#a33=4H`LGb@vvsp`$RDM@@#3L^cZPIaJ$E6#^72K zJ=%~~f~P;BWeia8E6VYD^6lQFZp-KG-hHk&3~7^T`(Mz;zfqqpy^t~4z4u1}gAqdV z4k_;>%%Q^8f|sWD*X z_H8S4eyym;OVO9E?K=HY1vjLA)n}#3Ho8b#lU~~=y*3})RD4$5k8n5r9-ryb#;%t# z8dL(nk~kk*CpXsujy(p0hV|)=+YY-MFnaSUsPZ0tQpz{>s*j$tlP+I3&3xH>xk)}l zj!UTX?!SG!==@|E5C5SEi>2>gc0Khu`?@y%q}7I*@3wTAJ;O&4kI75=3=PbpAy4fB zGZy++&xmi?)byj|r$^r7_+L90jCpU`weyIDPEJnXi&INq!0UmexPNu(i&F~nQj@Lb z-oH6^)h+!cC7zMQwenI^Ck2e*+|(8Mst1sIbm zOGJ%n1I8Gng9l#0s4>GJMitNllLpi`6{mwOKwSg08u6`!o`0=cE#CfM3kvub0C*-5 zdax12s{g&WNgM$@AQz(+yLE-~>)UT2%5NUyI>zGgbpWeuwbQ2%th(-H$lk1m9!R0K zY+9n=6+wg^j7M$pdGG!GDFHkHs}b~$bz!KZk^Pgrzjp-a4K=Tz2?x-!B-MX1 zJhR^EpcMG4zr)v|QFhv6AgbO9J(!9DJ0AdhFdaPbx`+as_fzfkXXpV~jgUss>EHvP z?q~Qq6j-Ny2BKy_U8?LpR9y&AcQhS5@bX2~H2`(x&;zgQS2VnDnzHh3B z+!azlZf)VW_WF(#`+?#00Ja41SNft$QP@fVY|jbc0r@=&n>~OHFhc?GZ>WRJJ^_wT6XYn%@110cSKzhdJGvPY*CdVoWL zFz_X<>W(6aN=T5&s4bRV;jaKA$k5hE4GN+QPYUWnNJgRo(DzaE-O89HUI9A zWclMsOx}xOQy#|GfP4LnX;7!SXuaHog z&G?hf?lXI-_EDL!j?u24dZ{ukgS)D_eG?m&T9@qq9uoR_^ONon(wBZ}#|B%!{S!k& zWmNDdUEOC6QXh2?oOrJHFnb+{8c0lg(8svSbC*+y)PlxKS2T(#cqKeIRQ-2~iXX{AY_Z3Un%BK>+G-LBJYr5C$ysc%{6@{f>p0>L3DpReHvJR! z5xe8prp=Zj$lql7+7&ibyGS%Q!n^Jxxd&M+9W>AauoL-29`}spL_pQdUz&SvtoJN|guy;TIOK*Wz57I8pK zd9OqSTdWj_S*OijOL%^tZQcVM`Q0y*x-*LQhj#(=JH$7n%|7G6uJM3D zo&<@4Sk&a<*xia9X-6ChdooxicZFT65VI9K6z3hIgZ2>yDO21x&bz3B+{m`Lr&-gA zgnMf!(Hw{BlYFsTadi5T^Mu0J-1_WacyB&}TC7zWdG9izB?sqej9Of89AF|%RGglE zlcw1TUv5a#o9p5h7P$b%MBzctS!o z!8`IO>Pco*GjwB5vj7t(qT=TC$`H1~hXgN?YVx>NlS^!cUc_a|<`Tye zz1Qd>P%CvpCD)IrxVxEEm`#=UrHeKvdOKB0;Ds5|8XK z&Cf7JJUuD8rn<}qS<39{+DBWFKl3sds3}W#HN({jpTG(olg6M0u zN+GTwTIxV55sPCJYO0eeV+bQs$edcDpEFf*xRIo1%4LgcorgstuOk&zHm$lXkQ=}r zgn(x0NtW@D8^9hU2ZMoWWXKKRAOh-@B?dYfTFlk|1cK@;{0VVevETVS_~8xIi#cHy z)H+tWaqgf3bAZ!ahh)8ULHvhHhH0<%(M+u)a48VbHde72Nwi14gY97#LZ>z}UjIZp zh$W8EOpYN%1@4Bw3q1z70@D>t#GqRKOFIY%kv;}}1@vKXM-o)Fd3aRC^9aj(4BMom z7+yC=S`YwzgO3W3TYpkuFl-hy;q0)=fuQ+?6`=n=g{=AfcZEVgu|EP3o~1ZbRA5;C zz`Q>wTB%q)PqK`+D`2D*W)AUZAC{Pan?hnKApH{b(yP#Z4B3{hrJSC0A8ox^2U{IL ztNfS5Y(OKAsf?*H~o3z>6Mz<8+51Au^HSUCO3rk|R5c{HsIpU#`$Sl(mh z8Wx;rKm2F^NA6hd)1EY5ga1vkvI;M6c8rAg7^)&Gn2I>qfi@jB>5&%3&JDq0j-kpL zjOzmID4^Kd<(a&NRIs9k#U8-E8<%(aMfMOT{#@e*& z#_~7{I1AUmLQ_kKmi-`gQuNX(Xfsy< zy+sg(tPcO%wNPwYwX1%+FWeM>almZkyp5F38nkrwgVJfx0{QS?i+uqn7#C2lfq&=$ z97UO}4NJ@}6qpZ)Vgq4?bRsv9NfHpm0DPpfvucq*pfFFD|2^(dAl+6#cj5U>F*#f} z3P6r_wz+v{q^yfR~#Mf5_8o!1x+9iXZE@ovC6L;P>K#JX4FC{^&~H)l;3 z)Oe`Jag{Di7mMY2S_V)LU()=ya3NQYoOB;J7zy2z{mviwdWM5b2WWy=kFjGEHz9M@3Xji2j(OA2*ytIkrQXoK|Nm2r7~tSda8jg&A_=e*QV7NZPhWg$7Kh6c?q0FT!3FDaRP!| zt8&A}yfNfL2znvGWftr19H8-!#;0gtM1@~H%tj7n2+z3j>V)pJwQa?;S)XlC>a1WE zcP_6(c^@fKVm5=r7Z(vwqH%b6m;q%bFE<469K%>RDfqa;ym4UD5z7kJ#Xlzn__4;? z5i0y3i~@kj3<*M4EOao_dgO}LS(k#09CbjW5P(m$717Q zMhkDykuxC2u2`X*R>nvJSA<;YgcV9mZa_7ZCs+PYxP~-(j_ms*Y9O~%Ts0VOja-l5 zKFBT%|GKjnbsmeh$h`88MdXhY1H8q2Qd3=uw!#vSx<^~eLx`69xq21oqKSGg?AOuj zK_Lex=2Dy1lW0e80397vZLshOXGc$~j~82x}} z+XT?5e{0?*aK<$Qm4#~$WN>6#g5I?Jv%w*tWy^A(H%gk*yCTK}v$69q3tB-R&f(IJ5 zddw(bMcN+JcV5;ZcXk|vxUxhgojK@ma|D{FIj9siv>03}bFuZ048f%TQ+fn6I&tw! zP|q+GBC{VGb*$9EV#L2rF@Y7LRqLPg7%RG17ziAtD@}}_VI#{&Fqxb^0bMx!rfz{| zOXSG{!5(2n4Q*R&-eO^b&I|0E!YZkMfVzM?VEOQ`Gf(9(V;H<(`S&U#CT+o|gQ7&C z&s?x;BbFzFJDBeuM%Gu5DS}=2&UbU@U76cgW+t1WLkT)bv!w(!V+|!H6O7yiHmz)ZXn*7Yo%w1P*a|>l z9q%*1;Y0@%o+=0w+Ht>p$F+NMy;i(DuFTh_4-cO!C__33ZH5MYJ_0SX-(*m%M(=eU_D5Kl1Ts@l8z zIWMb6stb!s6<}WGO{J`Z%(rtR;s|E+!-itOmEg$8iNO-2{;@d+E*RDQn3Ebi3jZCe zxnc@F>vsKsAO<57K0B|`37?&#Dd#!-*VQm4w6&e|jSr@llMfT4; zgKr{7K=`NBv~<9X*7C2MowC^?CEq%wFBxqM(BVH_%O*W31?}U|Umcp>ezzLYqBj5TNUFzLa2ofS*%};Q(F>bMX)Z5OmyQJBr}}e~i&vZ1#>= zE@fabKWqs4K-tLH%O4kDF8l_P?Ez-~{;MnD_bpsS0|Pce1CNOkR($}6`!{jCvFCS8 zQ2!4zn)#S?fCk;d^RmlK2RcizQyhe!?~!13U=|0Y&v9(<2ioAb?%Xc$t5Z2ldNX1g zA}_Jn?8hk6{3Q=F&^I*Vz{>_ME{(j_pd%=|>0uG+UuPyzI1FZUL*@_iS`{4NvH-+j zjtD^8auzu0LFZR6v9o8)X?~2t{DFqwA)ELhR%!`=bB#cBipvW**4j2*!A=my#jH!8@d>No<2eCnJV z_zK+&@LC&u4E!?dA2=chLylM=q~wV0f3_oz@#K!ndolTqv8y+Um26RINK`g@Di(Kh z{MOva@m8I~by%kKnWt#$JeJd$75&EsXe$Z1((&;F=c+g^8L0g z^J|h=VX!ts3%r*|)QgrY^^?1-xx7p*p<;@y7hY1zfawsX z?f8I!>coDEBXxQPuO@d_@eNU3tE)=z=Y^JGue{6>B7Wu z8^=$2F*Z86Xth~GLCJxTW#cEeOeRhhHxkjdzHe6Cb}uw+(;?iP$~i4k@~>w;w~wU>Q{mELNTK>vL}AL5(DmsK?w(zHQi(=p zFYI3k?S5fFaV@I)*uDC$S$@?RP79kGAce!@5rvc!B~nXHdw6=dKi+ZL7+J07T4XHQ z4w&^wVviQrBo7Ck5b2Z#K|iW?>_NIGIdtPt0v<1F(VBo@b3&_&U}1O#DQ* zq0sicx}cOji5Z+9jO;dBhD52{PSp(S{?*?meiBzWa&GaFB1yM*uv72YP8C7!k8^|F-)qPLeHAN4{T$FQafb!{U_m{qxY8aP9WV|c z-ztGnGj4t61*IVh5kD(-K`Z?Ni`M{+W)h5s&`KDM5J+&$7pCBfQLF^0fFPQG3Wi8z zGR$_S%E_>#^%z+4yXDj|+4x*M{?M%K7pp7RbLbRpf?%TXwE4}U|}i2At?D47QD_Wcmft|odXMg56peqS<+Eb z^7*+UQe{Bd+84K=NHT@V)=7CBY%M+wN8AC9xOb^rTG$<&Gd&Fc=opqn=-NeGLOH z2XS+da=;9^GHiN55p4QkSTHEXwy;G@9&lue=L1;h3Nv_f*CODLS_ACQXAXaYi16z2 zVAP-Qus=W9{*dE}jGG7H9%NsF^dP3rGM3H(PMsdGPVt8vG@924Z$5D@Oe2yqR5HV0 zGxwTvm}$zAlwwIjum@d*RceQGsElC=;y487-hc&(5wIY=(5(8V`@?y(EgN;uvPXd{ zIv>WgHjc0Un8s1^GA_(FSI2+239u+ew)rO3vt{GZd17K4duEs#_Yka{mKfoHG-=8v zdq?5TAE1q5(3(T7ba~9&gGdaBO?$kaS>K2HqyUII)+a)|;$a6UF?=lgB{r6eZ8gfS{GN$3{3Bro1)?)IU4lkVHU51C#TlM1x$Fbeyrm$RCas`nt+$5qmm>?y ztaG8l$rTWITIHsi1EqtH_3%v01N{p3C&9@OQXF*xOCfY*5G0MP?{^#V5zsr1&k^WM zBbYuuf1|8B^*&$_OF))0n+zAI{~;n^LgNOtEEy|9zyy5~%9#;mHMJU}UI|8zKS&z! zk_Wso2cVA6A33P}83N8Apd<1Eh(#i(@A9ycc_I>^Q2t5(z%m(X&AikYIZdIH8Lwj) zm2Pl#Eg;ZkV$US5wjgD&RUt9LkhA4E)ptyE4x2~Y#?ku+Yu92eHv|nl6Sxw_(ZK5i zbLARGx=+z+Fk?(=ZIs}hF$~PrNt`oAXCan1FtFU#F!LYyh>jh4`ddMf=HGQo&$hUP18jiZ7D?4FVuV zI7zj_2CAn#G!-|5CM+K6lPf?O6DUN`xnhJy*IsxGRWYuRvq}dS^w2R9OqrZ<0~ck? z0>I^67BpmSVijIX`GzFJdytxI}D{fArgFipaTs*)@vV(O0dh+KA=WW*==VmJDU(i;06n>_L{48kMlug5 z99sk8z_T=u*UW;+;|nuIIV?=Fb~k5^kt0}(@__@`YH=`@GBNn=#;gX*u^44G57x~2 zgd@z!%=yIm+eGklhjsaa){uY65u5v)K}+P}SMnfZeEnK$DDE6F(V~2xc6X$~`xCoC z@SwYIE@RLWEC)fFlB-q=78Gzl56<@72~?KM>c0Z1eE(jF#}LKi4x?wd-Ge%Gg+V<#wP${Fv9|So%F5|iYY?wuef8&tBiPT=n>lS9g@QTk3{+%RVg(Dji-N`F4!Z(aYv1_pHx-r4exWR?~LB z!#Vq66Bms0PkXKPY}xzemNM(+57Os|U)uF~L7{f8;x5C#C-_@vJ^S+Dk%p09gM6gI z?Z=K@yVADKR4_I;tv;Q0Bw)4~d56kN3!FquxY6{G z?x$hq^jXvmfw()<$?@&(>DL2-X3#g(PVM<sNj*PZNzR|B6!nH! zI{Ovn;4MW>*+>65Xa#P(rf0iTS+CD}jpV#Y z%B!ExZ(Xp_R8`(>`%KAYoSX}zW#KxB&gFR7LXVP&%xm}u2@mfy^*PWl^|_5#-k{<9 z#eRMHwRw4ws{0DHZj(YXR5xw@`xfKN4M}=QwdBh_3VzzFm>HSd5)XMMe=1IYbmGg( z)K0%j?S|sV*V_FTye`xx^qv;ZuZ*=05MH?|$T#TzmDUqi(+1Up;&u0==!RDMu8PrK zOG=XV{T5jFP|jX&&RXZB6AOokn;9!J({8F?wD)jVnH~D+z0rn1!zoLCkf}|}LxvW4 zbk>ybsA$lVsy(+MF@9Ib(R(>6EEG0m_%8!v*_tS5Bw;z0V!#tcM^1GQ0}A~?b}(PkhA<^^RX96B5~`s zi2Askn3HcTdYO7T=cGwWLxx1wJ*z3y_qtYdqqDbX_CC-w&VM*LS?DLBti(@8dGd*X z<)sN-h0FW*xxdM9@;QBou!nT0u>$|Te?h)`P)O! zN)o^LMQS^?es;OxKTzdLzbLWK!J_jdWk|;T!*J~EHWBOdzmBK59!N)@ZD^Twwmo5nU7D5yWu1HGhvc#{I*B30~7k{?rx0fP1WzYbXaQE zpt($0o_I>o4 zd1nG0H@q=;{JH<=Z0BtJj}>?{SNOm7R|M`VhIXb_yx;gcVNLDAAKR7yWa|r3}R<{G+RUD^i9lY?D?Zsxd=x;IZSx!P^lZ@4>9gmFv^6dUKuxdc{56N3~64~xwai3<6Pran99aY~P9%kWMEddK&_gd9%K{Y2I}C#m=)aC@7UX4br~ z8;=VcnoOlAu9}kQLEb~?NxAR*erAr-7uwzvO`lHnm!6B3K07pyyV^wir!t}eBocu6 zqA)b}=EioW#}A#cJ85IaY|o}XTPNnMoURr(^F-V;+;*93JF_oO(s4fb;imP&z|Vz; zd-`^4pH9eH%jlkykeRo&W~Kekdr&huFoT^GmlWzf5S6isI1`I#% zfR8DBRk?nVYp)y~@VO$l zd-308*Q(F$UF$0pv{Xt(J6}ERUVuh!oBx$d)%XgH-8-H{xNg5jobo2|&hk+f$3zFD z^D`Mg#~m_IDr#G}cGRv(#EC(U6?z&t7t=_~*>1~EovjGjdrN8FSGmCD`qqjMZo0J2 z`y8)vUHnIewcMQzx&G3#izr!DaRxOxL%x>+b9UwGPVLmuHN7Dk-}=~KPF#&x#}!@Q zEX`9Zl+WKyDp#05YPs@*pf&u|=j8yXoTEmI3QYu%^5@|==2Pf5sER_rkd#E$!1dOG zl<~G6{H+EE&LpNB@|z60hMAN9v>BB2@Goey)gac#ZI2zWvpofs0AxRY8$pR@{eqV1 zf@O|Y|G0pA9~XGR8FN!>Q;ub@Uke6Z7C#gg5^|7)`!}+akWe`1GUj6D_l&a*g%$9A zGdx>?{df-lknf3SFX#6Y`YLqz7vyL51g4)*3ZP$%j$j0D&HL5p$Z8>>R=&0DFG^=y z0Bhl$_1RiXoS7D|Cx70hyu%NB>2)_DA&gOoR{?JqXDb-%!2uqtr?Xe{W5*rxSLc|P zV^f*8zpyv07q;d%o{~xHWVl)5% diff --git a/mathesar/tests/data/excel_parsing/misaligned_table.xlsx b/mathesar/tests/data/excel_parsing/misaligned_table.xlsx deleted file mode 100644 index 75af018ae6aee1094bb6cb8573eb3c2184a8b807..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32519 zcmd6Q2|QI@_rC^dRvF9GAc~@qGTkOcLM0?qNGek)^Kg?&C4{7sG-yA9=bm%!;NGI%=lOm9ectDJU%S26UhBKQYpuQZIo`8hZQAtNY|tO&)6;b+ z{{#8g4DjE^cIH-RrS~IqW>V&yN>9oA4*qWMR5rFb)Hzbn%-4aLdvn$;xNN-gv%7pX z&#q%}t_w|Umk&(IJ65^YdgHRi@neI#IZaLYcoZ%lOCyhETMhMXx~r>^6mrzWu=MDb zEj{i()+lTqiWFF!=FZcH-(rq*HwXvjT<12^K5_J}R#WXURs5h|S5E3a z!Tx=sy{`ln#bI4B?}2dnkoKtx=pH=WcBaL zs8r2YEMBxW=IJiah@erKw;!fVyKJ8=HO&EUQm*rW-J#V(Jz>5+N56DC&szOM`^;67 z=L#JWSrgz^W1A+8IX5usb^GDY<)Y21xEIE6wq6lc;ykg)Ax!>p{S!a0h%D9fi#1hn zceq*&(j9ngpI^zDzH-auLVVDyfmut;4d+kwyIPfDD1K0)`m|x|PRy+k=e>*gLr*U> zV^?4J-ZNv*($&Xp9_v^puYY(%L74j;FS zYuv0htm8SfCLw_%-ep!7&+y>;)Q*}5wNmw)SD9|Qb3`$%w`ul!y= zXViU+GWch!$8h^_ebEPhVId4*)}nv-HfbBSJmwbH$qK1;C*fqnA7H{g#(H}~*gXWq zAM%j#{^6w20=|_G-cB`k8)zD`&@^%*R*2MMq%wnflpd`Q_`$n%$C$zlLtXCBss2TK zV$}1pG(1;q_v@T$vALhG;qN|K=lCKs)|jVCamJaPWvW+p5U--lOh z^FKZma++4e?Wcb0Vug@1SLDY-i#~krJpE#6@iG%ZYxZ4xy}lona+CZr|8k|-Wn#i@ z{6KZ)j;}8~tIYe#uL*Xa#AKa0b=20h@O3$7GxyU~yIl?L{45rIdAmq;=9!F7pH5p$ zYf)=UzIiCeW%KsVf7ac-dO_3@-*=`^>^A>r+XYg+qed^ktykBO#9!Z0?GaKiD7Hqw zeQxgP^iHzDBMzrq?F(;rbxHP*%_d6e{NPF=gqJoy;!gf~^V!m;Z&qI1U?H^krroS(bDjkz zDm^+FtvBRca8~3&Ol9+S51kD~UXn8JdG>1U-)MGP$K(rV&A!9#Qxvq1T#a#*FvFb| z6rHDGfbybAnNo|!r zDbZb>E|oD6RXc89QpNS~++@dSct&2d^K!fA5H`!|t>C%or(EY1ZPhRS={b`_RJJ^> zQa|!-qUbW7gY%-V^B!_J;oKH@Zzu6G?#SgI_SXv2Bb4K`(zmRw3JO>mG^{=YXVrtL8=`}aQ-iq#$RWDKdYqj&@8u6m7&LwLG-xn!; z*-3$}{YY&CLzQxXpiu8Wg^CD@( zho{p6-vnHk<*#sr(@am1^GxuO>Qy%rIkrq~R1%+UeoHL6L@A`{Nm{b8w5smcmxZkg z&*Ce*ndRASMvV*;@cH)k}Yz_18gLllZ zBex$tSZJoQWBZQBcd+T(+2Su-R{M4b$)7WImBM5_=ug`w)!j&{#uA4_(#Q?9HH7{? zM_Vj`7=pJ=BlY^oVm!tMi)x9((&^-0@<@7P`aNThp1x#T4?>@8Z91{%OIJFV^fH&##mBYsBBl76FHpR(^88a!8vx32Jum8qa*nAw2}JIaBSb8t*wU}ncNqW zh#_`=Ngg6MYLa_Pi?Cy@Uu3bQZjsUYv8YO%v451u@b_eqbi(io!k~Eb2dA-+bhn|7 zP)r(eunUti(o_9Gr0JP#Ew-42_fwFoaGoVo^A$J2?@H9U?kn(ni8D?md0ASSK>qs6UzXeJqhY zObju`j+Bac43(M^I=j)3IJHPc-yo1sK$`5-e1UL)uC7dA(A}Am+p3n z)IZpfPU^)F{4wO-M)#i1Pz=%7HVxa?ku2LqZt1{a3C3iL#%!!&7gh%A(HCM%A`z3x zeH|E*zichBz_>P*tl2+Ao=4~%tR;Hy2#-jvLDihl7u=4&E%?dLIXaXOz7<3 zB)652$sxvs)-Od}4att-9(Vmk65K~k@R%X*Y78-Y3`>f}W9q#Xm59+FFhdr2Fh&d? z%l6a6v=6tCqWelX<;i{C)?_laJW9zU+VL z2H3|*#u3bWNY6$Jexwt74c~jzq-S8U>|--KreNI+hb40)oA#7r6dH*|qu3!W+hWbs zRMI!&%*r-RqJ4eTf~Xs>2eoQ3Ls}T@MsnY?v~=h%2XWz}ZCS?4jFw@t9+JLswm$Pn zfyVu)-t+PDyZ^RrZHzR)ULe5JX40-WJUiP!ffPrb9zj*F<<^I^wb} zbIw_S?>;j{zSLdy>vvAek#V!i$Xhn$)9c}{CM8PpR>R$FG4n)3qR*&o zvy=3=SpK$Xfq>F3NZd@?4vPeo_HAnqB8<+8g5|Y%sgIqHuIHMXqx2~%y!$;^-5c~# z^F(E{?K{GPG0}9ASz{ZtjplN!)5*%Uw(pu3SPt)$mzh6HC85FIrqbSq@K~PQUWiHQ z)9fNN%99C3DTV6Eh4XNWO~F6VF8V!kvK?}=1{PN~8(1K^M6r(_;Zkx93nxqPzS)?w zBD%EV8ZOCwP!5Bc!mwo* zSgNU6ekN>LLZh5nEa}a=BmIb>E#+16_OM zpru8cb;d_qj7?gMO#dk>GPKye4yzA7KA zLrr{JN~7FPs}Hc=0KSZJ#-UWFFvkwLE?@{Et2gL}F@S|`#18DZGr5ohh@KCNR=9>6 z`R7faXWQJEX)wC}4faZJ+uZ8pcD850dp&jvQtc7PDq_t3Bf>}Cq|fczrN zVpUP0w?h~waIp~V6ldD#ud5zfe-aGgfXn>?(MFF9ig`D%QFeP^Y8C$PB8_BSkm+L?l`;+J)Fx_s9m$J#xTv5RCI+ zNCb1+6yCNe0C&j1uC(Kh>I43VJcfAtKuV!jav>MM75oF?YS9xuJG9dNJ}@H`F~bMv zd0L*BBow~-T<%MC(Rba+d*za*pVb2`G#DN)Nuo;C77#^AUHU;1f8HDP*QGMUu z^yPr9b6}$B5yqgsgUAhhengBt-VKzbv_76JB;6EkcviB;jh*o!=1Wzd4i#$3JUxn-Pir!eUb zIb)z>G7?%6q~H#<L^ar_fe@_+wr0V|MKa4VY8fwv zyu0*?VGEF93Zz4UX9NQ8ByJ=Nc;FK3Hr*aM<4XH9C|v7d&uSAa5+D`VJgS5Agc784 zu$p+KGb!5DWBMmv;;1n`FJEkjp9R$F;^>uQyx=iS^LZrJivrAwKCQF?1K zsWJ%^nN1X#8bGFmg0g&zh(na7Nk)?;@4jfegE#CB)UU48BS*Ws&)z9!9Cz$OaL|RD zKC{yoTec9P)pPHDDc5{JKhpdq+Wh8%XY|)30M)$KUu|X@6TLY@6f3mrDOjb44E~vflKy1jf z?#kL@pKRVAlQoGA-B6)^x1Fms-OF7`W3ZRoXe?BrLOr})Y`=rns$|uh!2;&t{x?$< zF5yUGS`HmSFrm>n0<6sVd?6wwo)WO;YCIv(c0v?izNe8w znNiu>{HplFszA2J5?HoQrC46EOj$9I9YQbrE|6V~$nF5LpIN_p24rIp**mSdR-6)$ zJOyMQKxB)Xt$&&j^Ayr)Go+Kjq2ZTTaQC%02U@qkqyVO49cgW9U~OjTdrV;L6a`pq z0n1Q+J^W!kpi&{CQW;RGOs1?1sFa96my~ezYrJnMB%RG=}m%(;bGCEaYStGKY z?Yi~f6JowYFv%jWOH{x9-ul&hUz11i ziS#gb`PJI=x{aAH zP}21rMM;pDK^*P{k`3i3oR1(IO0U;QjL`r??v$abm%i`SuYa?C^$l$H4B0HwhRe)e zz|tOU7D=~RDYRJ`vKh|WG{wd&6@uFyMnu3Nls^W4I0hi%Cj@)$R<&4OyG&U-K*Rz; zn(wK;K2w z(yA3w(L6Gt|899P_$lC3%oEDyuPci!P1T}ep6rRuyG4e^KB)2Ai+E|3`& zFW07uCG>-z{zwp1sFMeQE`gg9`jJAgx-#CDZg-!i^nG6$m9{FqWyGcLLbohdTi-?9 zsuc)PDJw6gAf#j6x;f?vuDp1K9{j5_~N%ym=S{vO&O#& z>+hxb;)IWZlTMZ;Cw$UjkUNR3nWr%O6FO>vi2j|+nWlhqlX=#JBp450^P?=;+fE%7 zG(YLbKfy6N96k&Db_z2mpaUPM9yG51S5sJ!4;*ws&ZNOejEO7Mc zHy-@+#5|dRW679_qKO5`StQ}s^u<05rGqJtF#8Zp(FXPJk8`e@*VXY+m|R|r2Z^Mj z5UylWf$0+ChbT2k9&83CqKA?bI1eb3fKf`(h8oezE|AWxz$n_T8jgj5i0^>wXtR-Z zYlo?tE#oTfuWyK8=T8nuwTnvVcZhkC2<2I@7B0xcFUM`BLVkH=ZR!NFL1zX^BE8nB zmBsknTQ&6C9jK3lD_}w#T-vF@N;B1V9h~>+^np_uY8ZU5q%(q=XHj%?^#Nk|55>^L@0Xsc`dlSB}H^kkyuClnwS zMzG`(QbEvV;Kqg#R~CCMCq^y@lm}-pDg*nYt}z!~BJaH%V7YmBA$RguImPPdaO$sp( z#WXL_ci{kKOdjco9#D_5fdi<-}*GYg!T6IV~np}@x8akTndC$dnRg(xO$nE+u@`!CqR z1dk7N(>!57NuXzU)H&lLoYvkWK>!|3m`jh#HBvy zJY1lq-bwHeTyf^!GJ@JJy0e%=2h0Q0`Q82l9QirNPm^K?`?LU)7oJzpBs zm{B`L&12wb51pLt>F+9`E|4as_Go21^-Q!%yzoYA4iO^(`c zBKtO=fIazM|1Jzm2-Ck<(&aYfw&&Q|u|+6%f8q>4$)WP}86iN`q%uA9Hr zg>prV(tyHa#(j`B2USIx8ZUnjJ>}reNXQAiRRLN=sf2z&kl)d|-x6fR-_HTZ<2FVg z4uac#M*dA|GiE{5lLE>?7E_8fBN#)GdCtG4&wnV0mY7fy6l1)=)gWE4ObSF=#zQF@ zfz<|=;q)0Kg6y?yB9DP$7!MkjKXttnwd^917YjNTL#tNs6E!n3haPCpaf~^)9Xz37 zOqMJJQI8w{+g=6{*pG~RfigHQUHnU1F`GrJxQM?dF@<`fMpM^c;s+ctFXo9X)P^9P z8aR2%f({SrRnXcE-lA~mfVw~dq*DSd-{taR@Kb;l(v^X}Pyk%cK}{gt5kGklhzOjA zlseGzfUXeyq#}cKlghouPx;3$bJKonN$CN9sgNM(3PCeTFPnOef$AJVjs*^-pTfL7 zIIE8{9=t7p6o46U)!YhNS^rKX8W!x=W}_{k5RwL`QHsqsJb3S%D6En3wcs zFj-1@$A$5&m*IH$I*N+(rCLU_K&n9w1E@=>hk;+~J1MUhV>!SbGNZysAknmeUL}F9 zR}l~MwTc)pQjUd(C*wXA-2FQqun@ny(eRl$X)Z z0VG{<@T`C?aj>xCKs|cn`f0DaHp75J2~ zwW)MNTfrfGbwWRwKqp5RywO6hjgG#HmQ^dzO+^N2D)g*d)q$n-o3*CvfGo z2CL9qjP@qzO+C$SG(Zu%<0^|o0i9w&Wd~YAm$HSRxJ|+aX;=868nOz$-lRA4RYL#M z@?roU7306&D+;{A$@-DrpXrRImH*B5<-}bR;=^n_@*?W*a0Z!Wo*3ok1*HM?P>T95 zfW$qH7SuTUwabww87Q$Mu%{P}kA{hP!zkn)IHQdVDP|HYQ=UHl3k+CJtOVWW@1u$p z;@~|3%ENsWJ?h~Kh2ju;)rA7i9CsGrzzpghL)@WH?$A5{zp(Io{ir7}G*>+DwuE2$ zprs(ntIEG)2#ajO!fwhtAHNT2(EM!~mzJ0X{bjsPMvxSxsi&ugPk%D#Mt`3`sCS5H zN}kkK)T)NAdX#0G2Ve3aJeVY#p=Z2wJecGv=$4Zqk&lnqzv3J-Em^1&K)4C&N*GAN z?I=~j?@SnuLA3{rqL0PiT4JI6PYnvFAJhcJ$bq3YLiE-^9tr~7HNf31-6{o8=O)|> zSQ-ZI^&t#FVWm7M16ku4cu)ovIwY6h2dM>66Kq9Q02u2CQZBvP%%W)LGQWgMFyQcbQwo_nSI2x)#RXt8(Dg1Peu?mR(}mND;445MDHMF%C9ES5d=PJ z(DQ?7FYxO^JdFHgQkiy-MHdP1?J(jj+Qkj(VCtiiKVq0NE%cC#E>+PUlA)GR-*2LZTx)teV^Wx;&!`tnzv9s3Ao78qO45^_`sN!d zIe?T2-hu!oP?P)QvK#dfgkmajC$ZjOUf^OoJ7H zs5Z-qIPmt_L}0_eu}23G^=^(%<TBnZf2|rrFFB(2rN_&MGMTc1*l%T)%j$Hsr z8?;8hMRYk)U>~C&SnPy8&_wZ5A>RN`*%&A_-b$8`q9+Vg4mDI!?ogh))02XQGRR{> z)a=P{1ZnnuweA0zu9FHN7TQcEG_9$>F?0FAI?*Tr&ETxDZ{r3c<_I7b8+xYUE>1HERGA_ikMjXlu4XVQ#-O(-L^((S{bA6k$B{YkbWGKURl;Owl@a$SA z(|cWw-21ijFIwR);_X!Ynw+l&s&6Z{ zkEqIq3mIPZ$ANN?6kY;kqjTVBt0zx7 zfK?IG`n_bDnuqKy&PamaGY^-eU%K+zO^P(%kMw6es->uW5Th9V)6r}Y*ir4~a=xm@lf)5SAda?l45kXBULj3O|wx*vwBt2cGx%~#W)gTocw)$;u!EFw0z7lt`GGi zP=ZKPVrm^wqAC4OFA4Jy{0)0IRlP;f-$)p*1QuGJr&&0~7P0Ul(Dr;q_Wkg4eXUb? zAqy2$iTxRJb-u`;=cNpoIY918n%sjFx#^0}frWEYs21`&Q7xQNL&bi63S^;x=5-Da z!8E0-BN^U`idUy((%GU+lN?BqtO)@s=0pYbKsOao9@=(mpzX43IZX|^0ZAcuq}6>X zv_&`yp}&|mc@s^yc{JSwA>Eo)AzpCUQhC8y1M$KVf?|{g1$!0*McQ6y@=V&~h0tU` zL$iw_-cg#Se%&UTnx;3pk&uA2SV_}jA59A`nii`l2;lk|4F>8pdOR_$Qa6dk%PX`joFIiz$$_#!a)l zX37%^4Ieo0$}B{;@(qU$Pp*WFX5c_Z?4!dUq{P9THgt{&G>5asK`}aZl}?Ov;46+> zyt#4?TTdnnAJ*rNELTu1o95$VDwKThts&t!@d&mU+9YBfi6xE#H7;FW2gs2=B<_1Em%?TUf$JVZF}Vjz5Yk@4=uSi({bppn#L zLKR=k9z@Zy#fYLi9wUlM`Xa`>pekC1$`U{owMG;z@CAyxbtklW4(yJ(_9f_;Vszaq zfQl-9AmiHO!@1v8V}%dfakrJ%N0@)^`M^mciGCL}YZRR!lq($LXQ%-|C4@qSYS89~ zXiyJn5Cnt=??Ht31L4^r!y5*N8#&GZ=mv$GThgv5qG&0hXzc;Qm7IVQqo@++ffCKZ zKo*Y*B5V3;)O|OQ{T3cX_R~h^%ms6@RSUK>`HK*Us-Jh}<*Clybq(w%Pmo%>A^>57 z_(29X1R&zpqvA?Yae~m65H>Cm2sSq$wHzVe2OyBD1p;eQAQy_QhWWv}5`{e&g&mqh z;fESx@g-FhPB8?|;)uf^2E|hoUVjRCD|V7+tV5NMByH*sd3h}^TkS~DTi~yE!YBU4 zkwLE?@z&~T5t~&3jjy(O4qmZu@7-H4&>dJN`Wo=_4M!yuGO81gWUJ!KnkYK@1_5!d zIdFi$l2yyRfJHC>#djkp0kR#~<0q7slZcoLh2ssdD;$m|)r%?Nxb(H^5lT2h0w^I1 z&jB2R1W*X9Vi{N#LL+km z)Vys|tSx(2G8j_tuZ0qh<^d?#G21eMFDbcUA`PT_Pz~}_fwfRvrciBx^5Ji=oM`_v z;qjz^a{V~sK)Ie#$taMu8Jxmw$b51N6HB~npP;5dIrEQL!C;BJTcfppkn}`K8G)3; zu#S>W7!@Zyb{IE_4&6Ut3Udf#c)OCnJ!OX7k0V%QQfp?Lf}old0VkW!rx#B21w4 z&uG~ED5%^}D2g6Rymc!R#A+GRy%1@OIsUGj8p8pH1qCLP%GylE6%G~|06LzJI|VT% zmuyL@G5?>!4Rv3P-lt@3N@)F$B@$y`HQtj_lZc1;*iA{OyFo7r1s>c_FlNOOm-2BL zm<3IKRk#^52a7Vngvw+Jw-D*=1Oj_0EzS6x!)yzS(nS*lHRhlm zusGvW`rV0&po!unoF}9~jlz4PuT|mZh$$+Vu>7kM$9xQBPMqEY(GxQ#0n-|1t?ofd zXSSLRMeYmX?kq`I3C<@u1Ka*T97(8k3J2$s6;QkK>vM?^$Vz2Rdl-7bNgex{N*v{i zhcV=ssXPf!f{O-_14SrjB{T=t2ZQqI&)CBLU1b(Y>2Pr>a5+!up{Kz8`oBUz%8?r; z30iZ;$WL$<_`3u~P=*)PCWv7eDiI5+rmOucS|z(FT8%r3P_FA3PNI|=&j>hkmhVmm zH5Imxnq;91nTZTx<_U8>m1XLenVG|!b7`bN&s3C)Kt{BoTSkV12tB?2_fywIS& zO0^7t&dUE|wZxhyFgaur7dEVt`6SwWO08t1_hjk|v;C9E0L*76A(EgiQy?*Ogjx1* z-K25}GbNeM&y+h$1}0JNEE(B08I>k;!ps(V2C6AzNYdy|yJ(t>-b~e0R-8D7A@z3P zi^wAbEm7EE|8y*|FOfVtS`tof!IFDyv15bJjBUr7^~ujfo)Jc)uq4uuF}YNlk32LK zj@{aCj2#HEH6{;tJj0SorO9uItz$#sA1Q&BAOSC3Z=gH z!oGgpH}*Gs)m3%$R%G^k9P*D@y6+LMNt&>R$|&2O=tGfvo4>Epo*JoOvD*F0rtFRT zisuD7&Fq^gw7uWja6Vgt=;f8yHiHbLov)_7c3#sd z$==HE8>ZFwOm}`A@+3dw@XUMW?>sr`S7iw8=5E^C`5-T&O72eW-U0PIhnKmDM{cyc zcKMyYF?a3mm+y-2d%kW%Xp9|7*_oGP=QM zUh7)&KI-E0;f(9^aoV({hfmz=(`^loxYh=@bzy7xT3UKOi;NVA3^vtiWb%0LOzTu} z(1>!3T`(}?HNHlufk$?xqd?5!=?xwDOP-D)VL!wfBShPfH+T>D=$YSi_$!wBJ8f;w zncJK*KIDAb{H!4qC-H|0G_uz%AOwHZz4@;B{DM=J)BIN)dgOLKM%Gt5y{Y=vU{{6c zp&i;cw!P&`Kkj(Etk)%>+*P-I>0{rAyd|fW&z>o_M4&9*NnbHI@zJ(}nXCAweQRJ_ z#oJf$^9I|T!g+^%Zmp_Tz51R#D*D>2sVme!8!fc$J0kW(_SC#ymkmh)ax*S$TWK$7 zs^ofpx3v(@P`2|P!#Tl$aYq*71Cx(xs1!(NNqDuHMaR#I+Omz?-}U(7d=u_VgiBc` zO%q#E_|k4$%_Dp|VznePb64uXy@MwC59TJYH5rx`1?$SpJsu)a64(1#Vo1XyC&k(S zjF#cPbgdsa%%`EH`5t%gmvHnno?l-o%%2xG`@VsKkYL?y(s_gZN!p7)OPbrfI5Vww z8mYOloJa2XDy`$2f`ne&4U<}|T)}=n&hE#NkLO2%w`hsPo0G#oW2P|TKGS{MkIMk} z<=_*f3+O*KXL9;91bWy(L(N72x@183Lz{(i5A)aWXl@hL(0##vpUmTy{P_}ufLfJ0UA6CJ0HZ?3+#hAZ4i zUVIw%r+QA}M@2pU8GFOuIm_BJ_l3y34XfXz$R3&UM5tMWcqF-^CMUH{%Ruzuyq7}{ zUId@beD=D0)w*|k17oLi9q88FR<+_B?ugg?1J^F!nkQkm%+>Qs_1>+POHV}Kod1IS zbnv>+368f>zW&@zadT{Ac0{Zio;lcjQ~vcpl0pB)LxNl*Cqzp>@Fw1!n^)VsWGeB) z8FGJ}W2Ts}?I~LWpW)Jm?s80TdPdg#X(6V~r#KUW9j7R~DK^xM4cGZHU1UbXc6{RS zkHpR;GrhG$yiB#WXDlZc4)d&7+8H!*RF3p=YI3$|otx0PJ5F*rMo+&D>G8W{Vj9jdPlG;yXKHI}3HcZKN^ilboTc2`u*#ij zNuS={JeYS!ef8%Ab3Q4T_ctgx&C@<` zVY#hWn|oyaYmYQ%wp`PcjXS#|OqO5nYaQO<72tL;St&MuZqX9CfZ;126CQs(y>R(R zO3B4dOFk8RGuPXg3y&AL8FXuOFm5RlSE>YU~-foZR^}a(qi)X#Oznr*-|L)Vo ze(v@$6(dr2!*VwJGfRqdwa*Kzue!6VQ~O|=P>uXCc4O1|`1Kp+y~XD4Gwe?cb@{X? z%ejiEe*97kPIks^+xGf)KKdh9N;r#b~dq70bN4MTJGgo&N`F|BiUCv7Y!W!d~j z=Bey;b1xg`{-}|wet0g|Veuv2+4z&LYyr=&F1w|rwi=HwdK~gqFkO@Ef1|2i-uJtP zvgvWoQ2S|@-+eJ`s>#N+oQ$8g;#yVx?K=vD8yU;CEZiLbsljA_*kIXS`TaZeTCTp+ zc#|ah?b?&X4Ln&IGnT7Ac0YOdwL1UeeW4aQoKZ)Q`z7A-tjr2?Oi(_xxNtgucb>z0TX1 zVl5dcmwiiw{|i2?BFd;bYc%NMoh;StBlCN7kC^*%$8vJcmeuRYIGTF*Z9v; zqdT#&J)&Pm`$#p-K3Kr`&!Ex-oaLai5B(@AZ8@`@(1$Kt7!(n62@mdnuHu@ bs`JqjxL=JO`m#NU(*E!;1)DUumHh>TX1)GCpZBT+@0Vq3wI~D1$UQ)hT!%VWbd=H zbM`&=zB}H}*Nic#RbBnf>Y82M)m?MSNI*Zqgn0Js83aTjkutNuHL z8rl6xFuioPFi#oLuxw{UggwI8LsncgQN_U!#MB&-Vnkjhw|pKE)`y%q?>Q>YX5t1( zXwlDic{pfkUw!db7sH)&w*no;6|3@CgB1DD3K&Dh;WMS}+as(V-eq}RiTkQ}xH0#| zzME!l1)9gF4png_p4ZFFh}+|pNbAHok+1XNhQu6|h6;CuzQ1{v=+;@AMzOlv2Av4K zc5){<|65Z!S=pM| z85V6i`T3W1hGet4DwrB%2Ao!W zsPt$Ev4Fl;>}*2I%gd~-z?O-H?ZYmj4|SgB>s$SE4TVYP3~AlKA3u6GuBX_oN*3>r zzVCdvpVGQ#OyjLIp(&FlI8J9FgRKl>dbnhBI*YAXVKDV>f)O59T&_7!8;d0k{O1ad^ z>cFzO<-^(4=G93=HH-7@&G)#eI21}&6iWNstMla1)2{t6BG0Q1$FFJCz>BSkhQjL) zJjqjQI-Z`FKRWiW4|$2!P{MMLcU~=79NsSsPaQVz-AH^}a$3}avE$JS1m2U+-XDra zq>9{Z%ABr(-rwAf*E`=jXMvWS;(KVnUjkfCS69OCYdua^H#=?{3eV%fwC(wdE5cpN zo&Dv(=30O5+~?~{?0c8fj=5{;7U#R&u9KcAo|_{+yePWG=I-k)r) ztqtwZ7^flMUu`ZdbbR0=+V4jIKdR>0yQekST3R>XkM6@-EPF3iJe@aAw6w0Q#Xr>5 zwm6@zo^7s0%}JLoAYbeRepm{%J-05}CpfHLy1Suia=tssjq_Ywlh!&!>*2Y(IVHTb z3EP_E;%&C@bf3F9iu@E8z0|lL$3uubm3q5b1eSo@tyuzcHyd5AYK-VJDzS|@#NPc^5i~hl^Lgd4IWexN`tm?_y5@3N z4!ZX8K#qWYAS@?cUwI&B>|WG4#}?Vti$j|sz*b`qr&{s};g-40kXy1{)yd3Sa?(EV zS0=EF^HH**r*8UI{fq-U#BOFyoo>h=%&wdxj%TRrVC&DSxR+C3_H14P9vJVqh6uNG zZA#pd3%lwLY)SxI^*wc}$soe50=xG4_dS2g@7q`cwyJyTRFdZi zw+wA8-I9~K>cGtUE1gRqM-9)=;OhJt2YZO!+}b?dkVTjskmHz%99<%Vp1ouo^j(XQ*S9`-}r6_nrL)hZ4(O=ru~tExBJ40=Qg=g)>#_zjb7m%g`F+02OkeIr zJVfk=+Mq@XDHv%h3iXkUJmLwaSli-4bZau|Ffs@+xffz@BK7C7;aVg14v@Yq zz|El@uoiT3ZE-;@8ns_{&tM!UA3t-Ea_QvW;uc7j$7hn_d?+l zOBbgqe4~21N3EJU3z~ zs@wwzJK9o;6VLaT|(9UT+M>`WD?q}M8_ z;zE^q6jSv*hF?-8pV~LELb>JrT_2tspjC=So6g&0Pvyx84Nkwdir%om`^oFnY{pXm za-7Wx#@LQV!fv1VF?C@vIx|T+-^&VSEnyWpGmW3*(-VVYH-iy%rNsX9I(1gf^tvL} zIDR`7lb61m6-qVC#xH3H!PS%S$|ZC&rV6s_eX1LM+Ra4+ac%A@2@ILvVyY%%_ybgW z88f3}s$%~jlVhqti zTdeA;E{*{TOk_H4EkTI|w23MZv#Dy#YXq*9CYrHk zYYNVaN1CgiJAE5+eEXS3!9r0{EyJ|1U>06ctzl#A?+nEWrM%gKf@*;YCB-VNs;Gjh zs7J)fHba@uJeanM{^}Xwg*5B%{_2V(%2eyGPv@Telp@ej6j9#ZE_hx4#Zj)xQ7)qz zROwj{U!{>7VV`2&H8Cw{p{zKEW!g|MTOu>3IN>O?pY>8+>5qGCXvTRarGp!C6$vaU zPn=_2H5W9O=A|eUrzpt0QG_=RDju0d_*@`5ULY!{m}R1vl|eHWDOBYxrCO24rXZB4 zB!q{PL*2pplN8OR-eC=wLI5?HwE^t*nP5>~ZDa)e(%QQ?u|n}*K$SFOGFT?6SSIOp zIjmD)@=8Gl`sCF^SFkA{rJ(aOcqH!YGo zHQt^{V^+|&Y{gEM?XZ+Z&kMm?27^0BGX}oJ2;5X9Mu~S{lAH^4414zXfLXEeSuw%= zER+4L49Brb$FVmFlGH$N<|H{oXoKnku(Pmb4?OKN7f(Qno{46DP6ebkV3RRCq4dAV z3+JQl4D`P-A>nUe9IWZ^5Mo9Sqx=NIMFd~8R0@}B0)Jvlei;2O6> zyg!)ZebUf;Z2L`Se^yK_`L)H`6ghV3L;4@knnzi>c(uM7nOlTRl3wKd?1d|T_$-@|&JnsWLhLxIG# z37$;Gw7z~;MC%QfoO-PV>+L{Z_U|?SgyB$p@?W6;gczieOXz<^@v(aDRJX8hc>{ob@A> zwU4NdL(n1`_(Lk;^ua@e{aZ21yKi`;Zx{jwG206g9!DG!8j~>^>S^?~%m#1zd?3v^ z;_AeJ*2?sZ!$jCL2l71p3O% z*wXFI2c!wDKS&j%K~)s0!i+xCH!4VqO3;0#7x;9@b>FF#@$IrzKyCcWygR{}B?>G^ z$PM33l<}Ri6HV}+YMLl;e(y8VT=d%f?KbQ@s z^D;6_jfshP9wZV1OmiRyS;2_wnSo~y*PgLMhV9afE3uSvF`htk( z6%kKFSZZQe>Q@ozK@sWo%-qdP%5Q55ryQfhtW{|y0(w#t@fdh^1>MOb@$#=W{koG2 zm?*!G%Abs4h}##Sr^tMfkJ zSzp!+kK8k2N&Vu7t3CE$k*t4cYC$Gt#iTrN62sK3U^Z1|PJZIdPaw4*oboClhgXs( zBQtk*8;*cSQAAq5*l2WT?W?NvpemToH7tH;cpX3izET>4QH`n`jD4_je)7S9DxiE7 zP*Bn7x1!T0DetnQ;9e()Q~u<_1tjz_Vj!}_=um9yoASE)Nac+6zRhtl&2jPJ$~oJ5 z_hakb{%}HiRK7hkKc)PR{!al%XVb00pI8U+SL@wxg`G@=oqVJ=M_O1%dcS=-c;fVK zBNXl+zK;N-h%p9>B5=uc)84kSc=Ih~;M|qHT4>GbWi`Gv2*Z+db<$|w_G__p9SrR| z1AjOVKE);J?d$wo3XDFF0^2m11NoS1Kijl|dCKT(g$>?}`COWFxD8&#ItHrq>^=|v zItIl9ggy`Q1_bQrYq^c)5wVKOD1H7VhKX~8E6@C9Etvy}m}?(ft%7;%dcIi44|K-g zvlHF)z>o=eU(sN_pfp^3AG|&s9|NO zDdT;a!ru~Zqn2o+_O+INu$I2)i-hv1zoPvPO?_GL%falwF?e5U5MfXDWP{e)uWIc* z)W2pI=PyCv!?e6(p+~kkr#`@6D*>&o5<8`>56K?z_TMt*X&8y*tmQeT5GEjZ%u44} z5p`Fk4VdwL(&bC&^2F)(Wd3~1Xsp2^KbO;cYk#Hp`oj3K^0dEQ4jXP=|+L{9ThWd^g-=O`9oXNPMoR^NN3 zm83Es#_dI2at841M48V~gdUxX?d2c28dI3eb%It{=HypSWq8UgUgMB$`yF>4AM|cD zP$D}^C#KD^9lKt9I`1So6th#9qtHYdl(R^3<_YFfwEpHccGqWtQ5q=iCXIid>b~f< z+IpUKSzs~VR?%K6vT-_+JW(Aqm8lxw@Nkop6?rgg;E{E8x=eMs4qTUCI~}=s;B@2; z<#vNIu6|1HV8DK(Op{6D_`-Yn>8g|Dv^Vz~WoW)MZ!WKmjK_De)1b`VsNn)>e5EvZ zRgc|9!rAgVi`CPky0akP{U}EZOR$E~(AAQ#YS=A=_*QA|%^s^>*XtMu7$WnOX*Ovb z>%5oI*PTq~y<78?$o0~*5oHg}RnCJt4x*$Rr1727ewfrhbnUf`MG~?wmnRR$58lgf z*PZ3yCX1BF&CrJrNDs|I}9H31Zstqeqh9O<% zOZ|TP#MPz@hU*xH3&+?0#57#F!{t<)#Br0vkr9o{4vlO2zlicj~q``-3+zY}&}+Yd0*3rkyx z!BFXoI~(+Twhfs+ISNI|h*HxC76=@nL|oH$a$I$CWaLKJS|Xkbu-k7N+Up#q`&zit z=*q~8ur=YUF%zUU(-yqf8M3D>?6@lISVoGlzZ&XGG805P(-yhc8MUV^?zk!rSZ^HJ zeTTo--+U400kR7}k-{H91{hKj8p2|L@AP9;ThDP-&ykTCA;k+FmKHct63P*ukW!mK zus0xeGk_dmNEvDfD*7{H1?;+7^Hz7g#$Wh7-`YX(p&;rfr$7Qi}%} zU_?o11RDj!hy(f#&IAq5v~ANRpCJF)L+c@~>6Da<6avKIcFsd>7spi>M@Akl8y>Fd z)Rc=(Xm7lB9ny)gQM75Swbp)M$9)sRE7;q2_s1!mt7ec_x^ykWi zDIR^nwKa-QKTkk!90oWD1F(vQ>?MnYRDM0cm5i#ij;l;`wms_-zG=L2V9f7UY5k!xonN(9aJG6!`*wfIcYa&bl%J)_n!W15$?Qli zrrJHGe7emd7DSRJ%~lr8_)n_7YHDSdtJVTlM^x$O)#!~&00$+2O63rSJ5%t3>7D`Q zRK7ucp7Hj|cSU z0>Tu}v^UN8Y??=;>O02g^(jAm* zos>r!Y3G}0jhC1v6-BE!7gTUc)N z0xIDV&fyUf{rwyKVfNeGZ`*wWbXG!j<|w*{rzYARsxInPYfAuH$_P^vt02>rdR2D~ zdUfTH(n5cb>9IqVwFZ523818~3oOj-aLnzna;4^eORxSRq_o8!WOaO6^LR(VW`4+4 zKEl+_D#(82wT64j%K>@pVR`J5t*)t)c8B_l*BZ4_*u#9-n=v8hC|Ka@Rt&;PU)M>W zKge^`h^1Qcrm^_|d($ao%L(D6K`5>1WBZNe@r|X9XN|LG4P%QMcZ*v55`F3t{lH;t z=}E)zWAfsDWA$!zB80sLo2V3=^;kb^C}d|yTfDoKn0FU##41HzxC8i|6|nI|&4YDm+jAjL z<9b!;Nh<4~Owy;`m1`_X2O~>qAkF+(^B;jctb0$vgcwa`eK*_G5orW?s}WQhe2Vd4 zgW^1pBoN#~8gM%GzfGn7VuQz|)z4IF^pa%~BTiSE_Mw+M(Ljqm5>x@XV%4JJM7>5s zH=L6BCv%MLxnYjZ7vW-~maXxJH$&yO{Ab_nHd8(-D1765b_?O0)%_*wlmwQjMN=uP zRUU7GT=xqF=fw+nZmgvWsjry@&qh-rztOyD^>w~Z%R3GG66gI7S}4wR@fA}Mj%ZL}@+U}mOd?H9PFw7w zM=Hp5L*hx>xF!nva`vhCSpn>bo;V*XsPys%_VS-GJL6^Pbvde_X)5oZ)V$%3+AhzN&WrX<*RU@F8%zl2hAkNbo{*Gvxf+Q$xanT$T;idY}%>et2!EG|7BPEfr8QD5Fw6L%W zPoidfHjl-5tH9e#n~DH3I?VJEwv7aePdgNdnFw?w6rwn~_OK*Ac*fnW#iUV@Lsz(Z zcpeP+1+-^m!E|$Q_ZDxem{l^#FNC*RXS+uEs`u+rvu6zx7%c{g3JJw{kDz#o(9prF5-=Z~ zUR#tyrjF&&pkiS4wkA*=CEtsxk%SeHy>)vklaCm!ItAwkALmE95t68|*{dlUT3HtZ zn`bH}qa&2uM)hnmoK)2tfg~b~vC~tU$px!+C2I`(BN?IzhE`0ak}}?*A%|2srube} z7z~jp1!O>0N9WVo`6sIv5f5%9&wyCLM%@f{D|0)}0cz;aMUmd+)=aLFx;If6MpK(F z)p=vxsuR>|o5Ys<3xB#5oN3Ue+p#m9A3+dbfDI+{C(%^Z^}qr7QDb4+?<71E*1*dx z?Dn@wkSw)=D^Y~v3)tGJ0=d8tzUWHR%Cjq`JHYDnQX@9|7n{Q*9_zKB=h-m#LN5U% zNAMLQNaI{>cVhx^Ie<&h^aP&}C{g)tyr4&~ExHN0vJlldIq^On&F1<=388f6Zts7QM1C?&sgk5SBIp^JM!Jhs`(#HMiL9Q25ke@SwpOY>0 z$wzVO6E8H?=&UxRb@vZYC(kwXw(y2uNn=bouj9e8OmX=QdQ4^8X$-xZ3j*2RlpGH@ zNPYAPqQxh$C6P62?n!-@hbw!j>F0 z(UDZ3Mh=CMY#g>)OmsD~hW&Y$4?-LZ-wDzOS)Sa8h2Fq6JUGr_uLxNsl$B(UOn9HGL*zeIP@!=1^$oeu8EA+ps!h3Ri5qf!b@<0yTJrO zlCk@K_iX9v@kNv9?!|W6gL~UVbJhFl+r|?XFEBndqRy_7Vo8u)g?@Q5wA3J7y8-qq zfUa!0`=eid(I1Ka@BIqn=aOx2Y+zx)@OXWEy<$*#&T^g@_15POdbO!-s)@QmwjxQ9 zNL6vSdY7gAIJ<2cip}J%8OP1SUE+%&seu;M8cQGxOKW(4VGAw{ieCt;k(sg9dsI}w zx=!EftD951dlyLW@Oc0D%}+%u7-&bPCp+k+`U1>Q(Oej`jR8T?UGOAmR4f2&7ykGw zzy^1RoTSSqflwkW3*iVo6_)6g=(gFkmTtbVFRa83+#H5}y}^DM!P${7BP&r+8I!3$ z2l}B>FK?24kBuA?j1KVnE|?yJQnu2hafim;JST@t$fzMGaZfy}{^jYL4b+;c7CdF> zcDF5Z-rUvatFG>NUcPUHnAGR&0C$Up4jo z7tyv8pfNdVQMOkPW$SW&(beJ0g~C;og@swJFuTyqe)bVU9zE8f;6{3a$1OWIat3J* z7tU%YrFe`0t=kb4%vi1CBRIyMRyWzd;hdi$i+j@L_6-l|@M&K?%TeA?GKe-20^1BH zAM_3hC~yo15cD!BBBb;#!noyQmuT~!*NISIyv+~hlOSL5%1M|78RAlzRm}{|U)8S0 zPWJTU?_wvsMT1-S%T6hTzJ9nH>I$JRh!-rn}%mBoj5!u%uKq&gT81Jb;l zKHBE)I70Z1A+omVjB&I<$4{C8Ux@|De{1I9-}|PpvORotGbg)l$;@*tEIm6DY!w{I zQdwnIX+e8;eCK-IiVIVQ^kYc3%ZRL5o;{MPEKjw-8N2<6>d6EFUV1x{vEJ5tqc^I= z575*gh*JdYv81WfBUmV36YzQac4{^3vzI~dLY-61w2?(Lv+H1;5x3G2g*qGrK zZDP6EQddfrnB??H8_zwm|M+a9!)l=*l#(Dc@ve9Drsf1?jQhFt?Hdz!w7*qs!3(UH*b^bPRD@J*()KraK%V zom!v?7*kQ$(X2x(J$s26So#Woa)Mkq;s{u>VxC3jASryo=mKcDgO)aAO~5><95n^p zeas}4-e>sn7N(5MTBIy}OyIeGwLw703pg#A>`0;IlobVMWe%TS>bfzCZK25vwS&v# zK%cK6`FBYah<7-(s3Kd31TOH(8}YI*IL3)q-N==g+{2<6)*tv0nk^Qv1$%tSbs#q6 zlJMYlbDKK3;sT}kr>QVj+pychHlMv_rP95L5q#w`wt(r#Cb^{y`E>s^(;~mPc!em2 zhJR@66i;->Dr`L{ZU=cV0dAdo0VaZh+ZcJNQQ^J0TF1niF!nsm^TeRFn3h>Nu3U0q zKBRp%^Jh9OaT%Pn3maxx*C(C( zZRxwO^&U0Fq2a{vV22jsa1~H37X{YE>)PLa=>GuyFt8{;xdsq|2NfCYe;QaQKL^&s zR!!H+%IxtC&B!H7A4beT@DA;bYZz9paFBKdom!(xY!}ZP3zV^GzASpgB~KV{4oS8O zTH~F|tCS8aU~%>{8pc?&m~>-iAxNqMT;-l89tT?OSnx%p!Dty+WZQ@Ui}Aq0IJR8h z$=uc{+NAXPx)9dx&CMlaSLgO#Ayaa`n%y0c%!?)HeJ+Fe%3sO&R8Vw`NjNfIUd4}4 z#W@5Tdp`|fBeypqP;@+@yCvZ)P6h) zJN&zW+mM-Z&i0lmz94i*!~*XTjG) z<5mw3Xh2sWS|7ZRE(Fh;|F!4i{d_<_4-tNSOpk$CQxv~x2P3MG^a$6-nVyOiwrF(D zB)PBEk|@Mk<-zlwrV7ljHHE!ZOBe?EFRWpLYR6#~8wym%F z>~+4-zl!#r%%o~BSqKtZcv`^;q`XI#onWj=qyk?q>qYWiI$6Y^ERM;J(4szHW1E~D z7w;f@w*RS1!WJ!IH?E9^enl|=wYnan`Z@O<<=lir;n7Tf7$Lnz{&KV-6HMO-G2BH2 zCGe{p&JhT4BN;Iw;APVa?rK)XHHR^i!RC!9b($_ueRF8SUYWa zkW*)kJP-)7s`Qiy;gB}kO@yTtL?&Uy2>+hqj*M^d zV>ew!YhmOE?)OcPi$s=H25|+rBUJzK*ySOPRTH$bv^TJ{*HUn?Hn7up@aMs3iH9)~ z03IVa77|%x{&M2UbXV`I{c~XB#n-0a#`+1nEHqanW8$fcwq`7BuV}F>F_>lbnyZ1# zKc~`|*|Mz)#%4q)DH=uq!!A#jLk%!nsYWKXha5%C*_sjuoRFzQ+5&8q0+tW_iB)uw zI*SD{V%o3p1d^}(t%>8x-Y^j&TlrT~q{I1+tZR?=k^;)cNZgED*z@h44(*5wd~AOE zuDc@F0#hw%P(2y7hHdxjfVP#N+cy)Wn3znE-W!E{lE!YnHClfo5_4Gv^q82!*8$eH zg#5BWIHrT_lMLgv2F>0m3^F}gx`kaeqAwgso0oQou5s=$h}h#!_{p6W4%>R#b6+y> z0ev)(q53AHreCd&iWySe5}&>QUQJT%tx8$v@xLX z#<7RDScCB5Mbq9m!^5!4)aU7x06z!Zrw?um355yq*QXi3erfRVX~y5nfAyip{~YDv zzW;#ocmVQ0M~MZm3jcuexZV35<<~>czxQYb>~>(3-w*j7w@$yK{CaFi`ZuFQ68w(x zkH@9I1O9qy^mnyl34aIt>+JD&lwXfs{;t+G@$V?VsP)%*)9--4Zo2=j);`JafRAeZ zqs@QawfJi}I zLi*nkevL*Rg4w^yHuX=0e}%TcH~n>;dw z2K={$^>>_KXW)k=<*)LB;cpAf?+t&QF8*$NB*uT7u4N=(z%vU31S+rVux&Jc#paXZZp+udxV*%?rZ+u6;ZE>vuP#%$%ZpxKjE$j#ojxyOWt zj;$MbE~GH2B{?TNRl1?%jsD?TXzsDV+ktP<8d(M+8?*-=4jtLOA@!A*Y`~c4SvP@Y zgh}6E={$of5vk0Y^9}M;IM^YZ1K>DqXR7LGXHQ3secE&|&CQwyEBgdU4w&F(FIs8Mx|So=PmL0HUR|fiYq1y=As_oa?&igZ`A5zzQ#o9H ze@VLm2E}jpvvo!s}GzrJZHHXa=~9WuYG-u+@QX(!5-PJga{6CS6;j{%zlw6 z_whwp=U*RMC4R;>R@*9D=FznjlRWGUJQWeuh$dbT_Em~ zGSB<*cv{Wqo6i%=(<^_v6=>hww#UEPrBOdD;Y@|ZcOF8i3(_OkC!qv$@|(WZ*cq#o z=50sEw9k)9rl-$J^O)1iKQ@w;+uaz2lls0*%tZFS-tN4CA9J^Sx2~Vt%axNdU-i7> zjoP#*70pTm|9)?&D_kGnVEsf7muhTzqxfxN$I5wrTc^2QJY{VQ!2fpojCGBDFE0#ODU6t!v-qQ+tQN8@mT!Z0ehAK+P`y1e3K8iuInW=%?XzM- zG(WLEAd)aqEVw=@^)Dl@;UA;sDu!P8S`i#V>P-;8VvNk~p8`AgO)AVp>hOjA6;OUC z`S`p16X)0N5qPb3=_$XP)y@0o>(y@yyXx$@vf}m1r+syM*PZbbc)cQJ62S05U;+4bq0mqz?wch7WqyZXr0efX=h71zIA zb#Z03%NId|fouCl#X5{Fvwp0MLv0Y;UoU$lv!rzP=Vb3)-Hiu!msyBcp?bUWj}<=W zTJSzv*ll__-_7H}muiLGm&Cuw!`b9B75`c zZAUw^?`ppF*tWOlaXu>@Chj%5tY>M znvb2B(L*$dndKJTwdh`Nuf)*gT)dR_&n2&>A}d>B__A9K^;51%Y;Rohq2_$(R!5DF zHBxVCPR)+UioT{jIVF3c+Q49r;N0i9+uu4&pT%69{dm=*#Pyf9m?@Us*$StEs)VaVc-(F*CXj2X!GxlAJ$oQrexdZ#3gIUvI;<^3GQEYf;pz^seQ;1sQG^E1uPS3(wmoHe;Da`~3Xt z>nnBVB<9?o+oo@^0(Egk<9)$8zMGicAgyL##!vg60oR~_YlCdgxWIMHfkQ4gem-w$D)3~^HfO5Ksv3{7^IhsM~JnlB~Xu)Q_ zD@5fYZ_m%#hj?seZOnFDwL>juUQw;Kwc_EJ214%146RQM?#miCl<#!cD0h9PqWEdN z?B|sq%hu1aG(P=!JCD2Jrq6!s6zw;;+sVqE4tP*fucs?|z2K0!WY))Mm6$`GqSi|3 zt9567!wT?^o?PcVzxti{%BD{?uO0i#w$J8{+p4-W^T+vuz4z*h4xdmK7mdT) z7Rsy6T8ZDYe}BpB|GeRiJSUGap^ zF?ydQwAE2av!OF1+6B^u8JZEI_+pRX)NNDa-6Qj;XtGz)1?2MRYUgO>WMO9JOfSh~ z%yxuhkediuNpnQ{i}@~9KD&%SZrpl9BKBf>T}gzkG+C3~WU zOfO01?-S!XIfB8dK0E90vdJXi>-WHPm-bIv+Fi9OqmDJ;W#_ zf+0AMW2F)JAH@jo{!%bhfgny)5Icz_L=<|W$Q6esJO;VH(2g+Fl7zsIN@FI6FvM0J zG`_VSJ>J7>Jg zgn{7@^aRlSN4+$0YAkGWlCX)9S;XPcK zlt&!pH5#f=$s;t05ZVyL_AomH@iBtX*MZg_DaW|wjrgM9gb*j*leytL1l zqX==I_h4sAwK>8o4mZ_t5uap-9*nmm4B$mD6BQxE&U%%+$>M6n6gPen;WG}PndnqO zkGGp+#w*QU2eq;?M)l8-`SH zUW3vIfS{zF3}UAGCyC8r zn48j=i8h26u>jpP_8mPPk%t%Y8SU+FaW$Iin={pChE86I@QIVo8*g?MA>aT49ffue zMF`u+#xdSwNjPs}FM^O{KmJ|%TappJXA&{hhfBoNWOQc+d*`C3&>1R-@g`|7t+bk0 zUWg`Es1VwVd-KWxq+=oDB1S0um{~P(>SZy&uR+nLwi`2&gd&cajS}lHbl|^AU|Nl|~QOVhCf1?&c;D zV!AHTHhIdqUmx>sqT+ypQFBVbN5af{^q#!S1c7(BZw2L|k@&s=Vp3aDTw9Vz-p~HV z6hglpv2onByslAUs+U;N+22~)88$056_MnM6_|}33_rb98-VER~L)=uMDSEP%BCI@j zTr78dtnWZCamqemI}!X7YLT5__og5EKkbZ#oa}ZUwcClyYw?;4&hU53wy;}`+lQXl z9PU}HZU6b#L8 z%xmdh(j-$(AYA+*wvo_8)R%knX!SMc(_VKcVl92KFZ-WlU_KIqkH0v3c#ZI~ z;vYQ8Uv6GCYg4mBuP1H_J5_OO|E?{;MQe?pBInFA&(Du|P1(w9Ku_1QEk8f^h5hCi zbL4oAJX~vfUtvXXUBe6e9g+qMtS=l_THOAAOld$;`Iu(`BfXJHXq!O!2MC59?tm%G|? zgs0lnkeUIXgdjIX&8i}Q6MU_I)8l}q7~NN56w%2i4EZ|C4fzn>e31M%G^mI&!zqGT z6%j1co?k@RM@=Jg#Hu25#e62jr2K;XFHpeh{Ba1{;*f*da|1q##kw1${UEI) z6Pb}&Q5Awbe)0KxhQBro~H8%&^ZYS2C!u` zd%Xu@YJi{1pw(2JBk>zHfGwEEINbu6#wHJLEjj>BmmOf=HwPT-sk$`Z%MrU-2Mq|( zOxpq65sQKbX3)gJ1PKPfYAT~l2V&U8;e9s&^}Zu)+Yxr{2>W(~Lp#E;9pTiD zaNg;zCY0vi^etvbCY7rW#|-%nbwdLSq~=lrRVYvptCkH0{4Ua%SNP+A!xv}(tft0< zjJsMm5ceZy2aWm;fcoY@+$*sYG;!fT+{tV(;1@^}*9OE@K?7hlRUFWcBXKF{n*f|} z!jT0;F<*3W0ixu4WNTl5(^%V1fLjXqCj-6>G~CJnZqJcm09!=E%?aQJFe*CWKo9-! z9I-^;1_w0*nj11jiA5#xC}>2GCNB}ldxiu9SS(GR)j&)pa4QyCPE94DeRN76i(1;N{Yz@!Nx8Q7t7gNbim*3YlF_`y39Ry37?$UbyeNyv9P;g3a;HT znXklbY5e&L_yfFRw;^9r9keeHKy~*vceNbgF@H!SYIs)vh#ZFW z2{|iKul+9Jx&Hl(mv&~~EVJyj%h1K1Z;1NZVbyv^`E(6E6nW zR-((M#!t6}RlCRMEa~=oTsM9qeK5kdP5yhC>xYPn_OeJj*F5)#j1Tg|Wv&Aey+iW2 zIc|9xeO?>B?`-Is8ox?j8rbjkxOx1<`@x7iZSvyfu6hqDWXdCx+{sIKm%Cng(95?Q zcf=#FV#sSl$N23IOeoQ-YOu63N2_s;XP(B0*M{!#6ZL};nQiheS<>39DVw(O^g1$|#zXYrAPf)```v1(MyG>M*=-GF=^DAjs| zVn=exLFEvWIAF75 zn4stay1SMp-O_0i zN*5UAA}*jOxFo^L*bP%@YFJbCal%U|5p>Cmou`?bUHLKFI4B zsVXCyJh@XLCNogUe`Vi0&n&O?l%VGen#S=jj>^o-uDOhyUqI8iv_J~0*k!T`mAtT% zqOn=}l59SuWdFDttmbnX1gE;xCErPjnhJ&`(*zbLAC(c#uDOZ4UPu#o{Jj+RN|(ug zRPq|S!1dXDrZ4@s)ndb`0#`mWGQmFUGSNmQOVH;@W%CWb^!KjCzNXF#t}W?O?<$eH zaz^lI3TYat4RVo!~P}4z1-PmmsTt$nJaFlq}S;4_ne{Ve+_(64X_nsoVzDQlMWTwlTI;;tG zhm{*Kwwq8^I@xLQ4b-I|-DlM$ysS$=c2`j~j_9sR#M*XSW}wpasT#Ya>t_o<3+pOv zsq(67ZM*flsuD>esSucFr&%TIXaAN!Qg5Ms5^RsMVlcxp;{%f}z8^4FssE`mc30zvn3pb!jyaddxb5Nc|nCK-W~w&%6WVp{4|qD*x6Q zP)mr_HyhSInXARn`*ENfc_oPiRN{47XP{Ww^9R<$?7$mD+&{tzq2N)9IGLlxfeWE@ z_FIhQm?WI+0Am6in91-o&xp1*oaA3$QN`@vOe?ya=6K%@L_8}4IkgxB zNN>q+DhQKXIL%)00XJ;4h?LG5Xbzo?-Qr1^|DWpSAHWQ(kOwDU)-7y7Rz8z~N}-iz z-w%-u+Fez)D68$+X$*}+n!Z_zes(QG^-79w*cjGOS4ykkM&Z@WDTl*LJg!oK%EMlDn3Pm8>JV&CKp%|`a^=CS0=2%%&b0#}YpT$wl(T;PBIkXcxudvdM#VK<2 z2@Rw<3@)e#od!ocsDir>RxkV=luY)+?Fz#Mjuj!Fs+qPRpPLwC5+2P7a~+cZ7-IJc zq4syZV+tEaY%EDGMOkHlV$PHw;6a)4;DD^pFp0nA2X>HXsrz3>B&|#Sx54p0R{c*L z1cTNSLKdqVVP=s}k&Iamj@cYR2+JIfwxkwJ#>6_&$r}4`vnWTRe@?3$Dh3~fY&*bU z0b6*$iCP+E<(i$AO{)!Epf==!Xy~r$q1A@ua+K95a8!nVc0H{gD}vMqn&$@FZd+RQ znFOhi6M;~(3#zXzB$9e6^afR05YhyQq#n^J1^$=1D3J;R+fq`p{%4J84x`^6M1t)= zndL}14tC%e6IHy+|6p71P0-c&hmM4Nri-+$|F7Lms-?`HWdV;GhJcQgHn0YC>Yku1 z0jB+X_l$s!z>s7XT_7zz(<%E01c2KCPGwpL_8U>g4hoD7a1>L}L_`>TqBrL56moTjrb?W!uUZ!6FiLIAc@g#0O(1Pc3}T!QV&OJfg-__ zC3!0m*{s?0KQxJY;YLrb)T^I=uRPRe|0=C24U^hf#v~1}As-U{^^TsNh{YR^jYRX9YI}!BWWh(z~3A_kkoo8S>Ka+ z@LF}HZBPNUGyP2IWnRTL2A-|Q3!$udY1tY|OlkTpFCc-;KJaa*E1=i(XT;~~Bz2jV zrjYJ6nTDWe2h4hq&$hr=nhBKFhYkp}^ydB^gfh|n# zbyuxqB?6}#!wRH6IiZ^d8``AjyiiVHpq5O1!fsGwm}0ijI%7nPM23GB=um>P3NZQq zi$J*G?QlKAnxi-cA3QFAi3#X-g30MCpAiE!IdLYKmI7~Y8-mp5GXI!Q|8t&FYd1aRaTr?vH70oYe~p)!q*J^2{+H`SZY0$))d zatdNvNp5zShR9W!?(RP_ANXf0JAX*8w=q+SeW+48nbr9J+&h0lA6R#@y9YKjIAwep z=oUcZ1UO4~(SI(Gc8>p#e9ozt0KX%ASzW9&jPsZJ4}+KXG=gp%XG3tbr)6t;Sp8jc zWyw=;m{2c#7*ZyuynwDFIoJWt2DnTJOJz8f;RFIVPJ&y6U)K$|#T;v+v;;f?hhFHR z=SR4;{i^svaNEROh&V@;(xuUZ?GIuDr~=-E{7qaGGzmKg!5KpUMK*Y0$3=|tsTI#z$8|?w$&%(f)f#w zf2u)$0^{1hdt02*Xa7PRxtpMq=Z{q8twe7Fk5hgR1M;ikOpb)J1Egj0OM!HqY5A7! zNEm0p&7wT1rF(~VNy@->=nYjCo={&iVRSzyvfzT`Z^%zNMKT4%9}$*(ckr8$2!6GM z^f-h86l!H*Oca#VOLq*%WTN&%3=#W;AhC}%CBPv-y(?z&A>~qvfe)0|uNWN0X{`T^ zf6AeIwO=L=@OtwnPUdk;3^)ilwZ!cSA>N%8)98sLS57pf^%8WC!-ePHWUUlswzQ2| zLn_q2!xqv-`EPQ0g57cK6yi8ZQ=7Bjs4L+0@E*{@+GVF@u$FF64H(YHqzmqP+in|{ z(*2dRm|o$a(hcet{rT~3AP1bPn3j%$s=~>x54x)w0WEAYM$&23(Gy4qPrNfAk<=&N zJ)rC(yQ&%)b;$(Bd{@9TCNtm!Qh^7nsnroYx$gzl@%QY|BnV@&LA?dK|Fc9|t)YXZ zG+Yi|s$p76D+d2;F0J`vb`bO`A+sC#?Sd+y*TapezXM2m5XHQuDjxhU83*>$h({+1 z9Kvue1Gj?V;Izo(G8h~Fjw#Tyt=}Mp+Y_H_ptWKm5AF46I>0MQF9+v*R!6UVTUTDTZIPkGV-q%i~&=~)ilqi|?^1??Rx-9S4Jy%q6WaRc)6 zBve(H7Smdde~v@i)iIM5pZ}J(^kS6%mGnmxG0xul7{wyDKV4A_}d92K& zwA#M}fc&aGgRjZg-^?t5H;vo|Gax{{XJZ&5KT%?$rUR$xiq^F<$fG>>{;gOdy|Vc0 zSR!?Mu9^BFA%D{opkIJHL8^3?@zbp=Jtj znll0+3r=qI=GYMA1#q05dU|J2e zRp4};NBW^YOa}cIyFyQiV z_Q5ES@`Rp2JIKdY{m^f9F})p5&KV5zei9RY7&_Wy2Y32uW4GO^Hh4N`sWMi&dEdv) z!{>Q4+}1>Ogn7)8kMcgGvj4}>@++&3B{-GysH2Q>Y~A85GDa*jv~?ay=**4Ne8#)U zaC?pxc*c^ZA1_f6Bym}NV`T$qjlgr%G^Rn{R-7&9GwPt9%AocSLHopmI+XmYdF=SX z{YH0H#|P&Y<>l2$ZsWL+Ft@;xC2+*~y1A@3&|h~HIzswhj`5CvZC zuw+p(uc1%-lWjNn9mT|BI*EMBPc>aZ(+3{xf?rW$7!=X-%4QRp>c+NMBoxD;G1$&4 z8Ghg*1!d)8mgN|ysvI}*vB{-C{{Xm1%1+DLf_$Ye7N_oQ(VxZ9zIyB)$OcU$M;42~ zEaojW+72##SUkX)(hVSAgI8vN84Q;E;UF9kJ)p-1kCo$@9L@P4pahD|(Xif!9psb} zu!~^_;rsya#8TqHr6le!(PyfjhZS`=*c9P-t2gzEJ&p!3rEK^rrj8W}aTIYd+rXOr zhl6l__!lHd9dqI=Pr)WH*g@Y`;tW~V;Bf)Hjy&t&a5Of99d>`a|5_thHDL#R1u$By zi75-5zXx%?-|Hg3k51MRil-Z!zi@iNb@YX0y@C@eZEAkYeaDQ~WvALsO$$C%neN`& z%`=<9l3L-xt#{2ote-A_G2$Y(?gg#-vF+tXMB$W+t>$9o zcLQglX3Ec+_hLqTdTDhyQVyZy{^+aRi>);~ba`Fip5IyoaJ)DTId5hjf$NE#978ucs5A8R7aM|(dy=s>ea->cMk z#VF9{lEmS`OuCUC+-KKkbWh&5=RPYYNZzMB+;DGbvCX89sKH*n5^l2QTOaG(LOIpv zMb6Q=^~i~8dOQEaX$|SU=K1^&d5~n>rXEEY?$!HDUcS77VY#RadAYU=!*UzSa(Bvd z1t*}w{+g;eZ3fm3s|ZNidv=3YpBPac&0Kp8YYkrXvu=i_;a z!*s_%=F54I^*`gmpnq~B+5B8XiurRxf%)AO^LrS~Z}lWE7w{r42W~1EBf05{7tKw8 zcEJ9R6#FX}Jfb&3kvYmBQ`Cn7sSib_gvGrHJYeYexu5H#>BIpthbS^f8D$bFGP!2b zy?o{zS!VD#ikaq+O1zn5k}O_IB_kMIN*P>19uzQgS~{Dsj4*NR_>2OEpc$I`S<}9L z6jDlWGnMoJ_+?*7-jH1}pX3dHL|6f;uUz#3pjhigV_M9C<$jdqw-}boZXhpD6(cVX z1Abf8JvYM}u_STT@po&#twD4}STCn0RnR%hBuP`#t>Mfah7vEA72G~f_fo%I083x-`D7LFHD9^)BR^Eo6 zqC7-qe=J#KES5o}Gex8aqex?lNK*!pH z7BU<@79h3`-X`yky3Me=%MbGI=TB2pGT@x(D>4zqmyw7FazpDKQf|0dM9U2zr-EP) zr}#300gp{$WIS$&QSgwyYkXv!oEA+#P}8DUDM)O`TgbbgG-KG^fU>(0d3QNPDzIIm zm~5s}F@u?3DP}e?nu!V_oB1e&!OUr`^(T53X$A{mw_TIWQDZsS%4CY4g$ zUd~|V4OxoYW$9)xfpk+G!sam1*2WU|EGB3!E&Zk#MkUqU_NyTTm4?|jNjim51 ziUB&|pJeF7EyzBass*)c`6cr5x=WPh)nHKX3Pq+bgG|vFvP|t52AK&InXf1^Nhrx? zky*EWIf-?x3-5>BAyiq`uD5aXzn=L0_re+zUFuSHunxg56EWjro^bT5K_#J zHv*-U1wc~SvVh>PqRHzofmsW+Wh~}Gy0G|2AM{`85VM#CCgs3*Y3d`Wm@G2~OV~`| z&~tDA3ZSEp)k8tTMD?-I|JF;G3y)Z7ln&J&7HI0>Sjj1|BDX(g3wV&i!r8I@0|dv8 z%fkes6^xg*u>}qs{Bmt?*fgPZjo#f__+PIAAziX$I+?u@(-Z8x=riV;Kc*%ek;1HI}Va5VknPlos7v z`{wTYcvF$F5)77n=Xd^1b^Ogw)PDk1l&O%Ciy@17q*Tc`ja?3kAX2ONSN=C+Dw}J`+`WjAD0lm}z`80v;-Rz;zWW~;@O?e;_v&Kx5 zk^6CW_CaSo(D3~Vs#0<}VwyGuU97MoPtL?lP(dd$kRyJ@KK~@p^8)Vuihdlj^#5cU zTxd|5wO<7|+_WZhw540Vdn@OIX0<2N;%;8!6^A!ahv^bhhlez^zr1ht4>^@;ZE4hqD7Z zfUx2W6O=GMf*}X#QWMl@MwkEoG<}1;utk;)fT{bvpo!WiKXIAiyc%Y{ASp_)?vpg@ z!95pre$lK&|1xT59_zs!tTbXfxJN@9u^imzb^in#(a*3CKp$XeaR|d&--kF*F$|Xq zYYgdVz$6h|E89SOFdIYeshK7YlFJs0Eu@miWHTjhm;j@kq*%-&9kZEXAfCGcTN`UC;c1(v-f05f=fdZwHn!VM-wWH3*L1ti$AX zp3Tkd&Krd8+Cii+GZtnZIamA=|D-5V_ro-ga^n4^EtHO#aqT98H7NhAeu4o4M#5gld^4q?$( zfR5ejDjdSxgsXs|E}R`OoFrc> zuoMIGMapk_iEmRK;KMua(1^ibArB`*`Q-%7q)pJLE=45BQPWJ+PP;({4}4& zi>3vRRGwR{jlXccecxmJ==kLsF>-oeP{{3yrzX8d6k`SQS`HFNd~O^(!>=iHj{scn2D3b$y4`{`L38&d&a)hzw+MfMlOK2Gj8 zE;8NSS4Ti*?A^xDfY%vbNo(ZXla_mF%=4B$Rb*2PtqK1~-+-#AYqxp`9& z>cf}#u%BYxZ@JfVo4bWAPZjpCT3r;IzGbpp@`B4E+kx2pDYuov$ENzjxm$J9`H44E z3olJ%9pB;+xITIPgm(518-rbA7D@2-{xa*x>x-RV% z#>$^<(^FsaY&LF}UkUp2jg!l>(DSWItScg3kKEF3wR4L+V0wDTp>0~O*+mPT@#Q<# zZVT&9&AT-#S}!3HZ8L)J`(pp?Mpoion_Rs+3#T#-yh0PCnm>IjF$(7qHB}HUdE{St zYS&3A?RdA=fTNV_LSd7pS$v{k^TMg1_d0ho zTh1-CteYONRx`%yT=MoCT9_XV!6Uu3>ooUi-P)5Xh&khWrfR?=y~b0gYgO!xXn_jL zHFIb0TE3zx%}sxIP)5ui^*3UI)4w!vi3tqWcHH8cSH`33A1u~zr06ZGzmt9H#NlG; z{EZhoO<$zVN!qc8FTnH6vJb|5m!~f0pEb#7%Mr}GXU#KJq-VW6{>}c};c#{14^i{e zxqcuk%Y$^}=AQ}KSdlVNvT^i;&)XdL00#}^F^tB~YDCfKst-P)4=QH$H=mQKT)FgJ z%G?JA3c^C)?h(!zsJ_x#Rw7|$`_y4NZaSf*xrTq&88MABvVp=+LnEY?9jN7gkmC4L z@BO*)pdA__X=cR862vs-L^a!c&(A9W_cb6{EoA)2ud%&71bT!zQl%MyE^#|D+IG>d z=npdcS~}OA(0R)JK+wEmsZjmP5CbR7$_-lkJboIw)kg^5Ft1)KHJVnme7>4|SPIs< zY)k3~{<$S3F3SD?NI#c8{BfAPE#JXY&PxF~qS#j^iw{lq$-kCTZ@d*;}j$37L|*&?2p zvt4gmoN$W>UN5`0@onxm4TE*jJkLg>o(4I+d7MxqwmDPz&dV7~)cRERd|m5Yt#@&O zn%|XRo{f&HJodlFX7ntd?W-Yj(L`hK>oxeYG5)oydIHB!?IJw; zE4#qto0qWjeYah24Ih0OJ-O854WfAyf{W@i_D>1VAr}`TM>A{IuL#fHhFjeX{y7tT zzj-<1G*|H5VG}z$E6Bf6(87c}#g=bZ@$Eq$=*fFklzLbF-TmX@CF#czdxl!ytlQXF ze~6GL=l^lfbmZySt8Qg`cR#?q+v}~`r0B+@rFLPB-NjDt`0oimdG1_Tlbo$eeGiS- zTp4U1+jsG{*QIR5mmlVrFW+^0?CSgU*iZJ0){N&=T#{X0RQ$z6cEnOIU+H9{=|t(d zZ5oxLY3r6NDrGqyT6cX=lYiNqXAjok4=oLSlrhBDHF?;O(ATtv%f(^&XROw_6*6D% z@9)u4&l7HxKh14qvY=jO3r{K1kG?q7(^+fE!@mXcezw_&r>;nT)4Nqbgrd&=|WDpcVR7z~i<m)u$6Ln5^3Hg6 zWXZM5{75h(CCu+OCy6mr5ovY5T z?roO}Ur-h?JGlP6MbMo{vy*)MOIOY+yNC_@%w4eLvwcYn?`yN?Tz5Xznm$n3QV(upxgU@Y4PKBL#X3`gQcoZtx{{KQdgF z)WF~8s~4E3Zof(H{N1!F>Dibr-)5xN*kVT1Q($iphW43<`SkA850!z}KhkOjRB2?4cAu-H%;I zkm4zSpk;bs8E8xYX#w*-p5z727G^eP49m!Nts3`SAH~hZ<+6fYzo|QMam6q$BP}L< zyPUo+Spe(T=IH{QW--`D{q{V4IlGr&R{hraZiHy0O7E5s^*wTjaP=%N|GqttZzYIf|H zT|WOjNppBAv(^{-rdt`8G3Fi?W|M(qO~!ONCoYhIV@=I;1IQ9sGag-n#6_C(sTEK4 UC^sos7lKHA2SVb`C20Ep17^C-^#A|> diff --git a/mathesar/tests/data/headerless_patents.csv b/mathesar/tests/data/headerless_patents.csv deleted file mode 100644 index c915cf9ef1..0000000000 --- a/mathesar/tests/data/headerless_patents.csv +++ /dev/null @@ -1,1394 +0,0 @@ -NASA Kennedy Space Center,Application,KSC-12871,0,"13/033,085",Polyimide Wire Insulation Repair System, -NASA Ames Research Center,Issued,ARC-14048-1,5694939,"08/543,093",Autogenic-Feedback Training Exercise Method & System,10/03/2015 -NASA Ames Research Center,Issued,ARC-14231-1,6109270,"09/017,519",Multimodality Instrument For Tissue Characterization,02/04/2017 -NASA Ames Research Center,Issued,ARC-14231-2DIV,6976013,"10/874,003",Metrics For Body Sensing System,06/16/2024 -NASA Ames Research Center,Issued,ARC-14231-3,6718196,"09/652,299",Multimodality Instrument For Tissue Characterization,02/04/2017 -NASA Ames Research Center,Issued,ARC-14275-1,6445390,"09/226,673",Automated Triangle Geometry Processing For Surface Modeling And Cartesian Grid Generation (CART3D),12/24/2018 -NASA Ames Research Center,Issued,ARC-14281-1,6606612,"09/374,491",Aerodynamic Design Using Neural Networks,08/13/2019 -NASA Ames Research Center,Issued,ARC-14281-3,7191161,"10/637,087",Method For Constructing Composite Response Surfaces By Combining Neural Networks With Polynomial Interpolation Or Estimation Techniques,11/18/2020 -NASA Ames Research Center,Issued,ARC-14359-1,6314362,"09/498,123",A Direct-To Controller Tool (A Component Of The CTAS Software Suite),02/02/2020 -NASA Ames Research Center,Issued,ARC-14494-1,6720984,"09/606,107",Bio-Electric Keyboard/Mouse/Joystick Interface Software/Algorithm,06/13/2020 -NASA Ames Research Center,Issued,ARC-14512-1,6823333,"09/800,309",Keyword-in-context Search Method And Software For Information Retrieval From Collections Of Text Documents (Quorum/Perilog),03/02/2021 -NASA Ames Research Center,Issued,ARC-14513-1,6741981,"09/800,311",Model-based Phrase Search Method And Software For Information Retrieval From Collections Of Text Documents (Quorum/Perilog),09/14/2021 -NASA Ames Research Center,Issued,ARC-14514-1,6697793,"09/800,313",Method And Software For Using Implicit Phrase Models To Generate Prominent Phrases Contained In Collections Of Text Documents (Quorum/Perilog),03/02/2021 -NASA Ames Research Center,Issued,ARC-14515-1,6721728,"09/800,310",Method And Software For Extracting And Distilling Topically And Situationally Relevant Phrases From Collections Of Text Documents (Quorum/Perilog),07/26/2021 -NASA Ames Research Center,Issued,ARC-14556-1,7346172,09/822470,Spatially-modulated Auditory Alert Having Enhanced Detection,08/24/2022 -NASA Ames Research Center,Issued,ARC-14569-1,7783130,"11/045,041",Spatial Standard Observer,03/26/2028 -NASA Ames Research Center,Issued,ARC-14569-2,8139892,"12/807,375",Spatial Standard Observer,01/24/2025 -NASA Ames Research Center,Issued,ARC-14586-1DIV,7293001,"11/274,744",A Hybrid Neural Network And Support Vector Machine Method For Optimization,01/07/2022 -NASA Ames Research Center,Issued,ARC-14613-1,6858197,"10/099,247",A Novel Technique That Allows For The Deposition And Patterning Of A Catalyst Onto A Surface For The Growth Of Single-Walled Carbon Nanotubes,11/30/2019 -NASA Ames Research Center,Issued,ARC-14652-1,7375826,"10/956,517",3D Laser Scanner,03/25/2026 -NASA Ames Research Center,Issued,ARC-14653-1,7702427,"10/914,783",Future ATM (Air Traffic Management) Concepts Evaluation Tool (FACET),07/30/2024 -NASA Ames Research Center,Issued,ARC-14653-2,8290696,"12/694,966",Future ATM (Air Traffic Management) Concepts Evaluation Tool (FACET),07/30/2024 -NASA Ames Research Center,Issued,ARC-14661-1,7276266,"10/320,698",A Plasma Apparatus And Process For Functionalization Of Carbon Nanotubes,12/13/2022 -NASA Ames Research Center,Issued,ARC-14661-2,7473436,"10/828,524",Improved Functionalization Of Carbon Nanotubes,12/13/2022 -NASA Ames Research Center,Issued,ARC-14661-3,7767270,"11/387,503",Selective Functionalization Of Carbon Nanotubes Based Upon Distance Traveled,11/05/2025 -NASA Ames Research Center,Issued,ARC-14662-1,6968338,"10/232,975",Advanced XML Database Integration Technique For Managing Unstructured Documents (NETMARK) (Part of NTTS Suite),07/18/2023 -NASA Ames Research Center,Issued,ARC-14682-2,7333735,"10/885,533",Communication Using VCSEL Laser Array,11/03/2023 -NASA Ames Research Center,Issued,ARC-14710-1,7231329,"10/706,478",Elimination Of Parameter Input Requirement For Elliptic Grid Generation Methods In Engineering,03/11/2025 -NASA Ames Research Center,Issued,ARC-14733-1,6972056,"10/135,013",An Environmentally Compatible Method To Purify Carbon Nanotubes,01/03/2023 -NASA Ames Research Center,Issued,ARC-14743-1,7767305,10/758611,High-Efficiency Tantalum-Based Ceramics (HETC),01/14/2024 -NASA Armstrong Flight Research Center,Issued,DRC-008-014,8047472,"12/45,970",IMPROVED RAM BOOSTER,03/11/2028 -NASA Ames Research Center,Issued,ARC-14744-1US,7816491,"10/494,853",Ordered Biological Nanostructures Formed From Chaperonin Polypeptides,05/06/2024 -NASA Ames Research Center,Issued,ARC-14744-2,7795388,"11/194,991",A Versatile Platform For Nanotechnology Based On Circular Permutations Of Chaperonin Protein,05/06/2024 -NASA Ames Research Center,Issued,ARC-14940-1,7135172,"10/238,515",Bucky Paper As An Artificial Support Membrane In Retinal Cell Transplantation,06/12/2024 -NASA Ames Research Center,Issued,ARC-14941-1,6755530,"10/198,672",Carbon Nanotubes As A Prototype Interface For Retinal Cell Recording And Stimulation (Vision Chip),10/18/2022 -NASA Ames Research Center,Issued,ARC-14950-1,7596416,"10/928,874",Program Management Tool (PMT) Also Known As Business Intelligence (BI),07/22/2027 -NASA Ames Research Center,Issued,ARC-14950-2,8224472,"12/211,439",Enhanced Project Management Tool,10/20/2026 -NASA Ames Research Center,Issued,ARC-14970-1,7129857,"10/789,049",Intelligent Weather Agent,07/20/2024 -NASA Ames Research Center,Issued,ARC-15040-1,8200486,"10/457,696",Sub Auditory Speech Recognition Based On Electromyographic Signals,09/14/2025 -NASA Ames Research Center,Issued,ARC-15041-2,7206674,"10/923,156",Information Display System For Atypical Flight Phase,05/21/2024 -NASA Ames Research Center,Issued,ARC-15042-2,7217650,"10/816,576",Metallic Nanowire Interconnections For Integrated Circuit Fabrication,03/11/2023 -NASA Ames Research Center,Issued,ARC-15058-1,7383238,"10/789,029",Inductive Monitoring System - System Health Monitoring Software That Learns System Behavior From Data (IMS),03/12/2025 -NASA Ames Research Center,Issued,ARC-15073-1,7590606,"10/703,039","InvestigationOrganizer: Information Storage, Modeling And Visualization Support For Accident/Mishap Investigations (Part Of A Suite Of Software That Includes ARC-15069, ARC-15070 And ARC-15073) ",04/30/2026 -NASA Ames Research Center,Issued,ARC-15088-1,7070923,"10/608,884",Carbon Nanotube Bucky Paper Cages For Immune Shielding Of Cells And Tissue For Transplantation,09/20/2023 -NASA Ames Research Center,Issued,ARC-15101-1,7113265,"10/808,704",Sample Handling Device For X-ray Diffraction Instruments,03/17/2024 -NASA Ames Research Center,Issued,ARC-15157-1,7286573,"10/923,160",A Method Of Converting Quantum Wells From Type-II To Type-I And Of Enhancing Interband Optical Gain ,03/11/2025 -NASA Ames Research Center,Issued,ARC-15171-1,7650232,"11/239,456",Trajectory Specification For High-Capacity Air Traffic Control,05/25/2027 -NASA Ames Research Center,Issued,ARC-15173-1,7273095,"10/825,795",Embedded Carbon Nanotube Array As High Performance Thermal Conductors,03/11/2023 -NASA Ames Research Center,Issued,ARC-15173-2,7784531,"11/900,131",Nanoengineered Thermal Materials Based On Carbon Nanotube Array Composites,02/16/2024 -NASA Ames Research Center,Issued,ARC-15201-1,7381459,"10/779,504",Toughened Uni-piece Fibrous Reduced Oxidation Ceramic (TUFROC) Light-Weight Thermal Protection System For Use On Space Vehicles During Atmospheric Entry At Hypersonic Speed,02/12/2024 -NASA Ames Research Center,Issued,ARC-15201-2,7314648,"10/911,747",Toughened Uni-piece Fibrous Reinforced Oxidation-Resistant Composite (TUFROC),02/12/2024 -NASA Ames Research Center,Issued,ARC-15204-1,7949472,"10/885,537",Nanopore Pipetts For Structural Characterization Of Single Polymeric Biomelecules,01/14/2026 -NASA Ames Research Center,Issued,ARC-15204-1DIV,8494782,"13/092,048",Nanopore Pipetts For Structural Characterization Of Single Polymeric Biomelecules,06/24/2024 -NASA Ames Research Center,Issued,ARC-15205-1,7939734,"10/873,996",The Electrochemical Biosensors Using Carbon Nanotube Nanoelectrode Arrays,06/14/2024 -NASA Ames Research Center,Issued,ARC-15312-1,7672969,"11/513,429",Context Based Configuration Management Concept,08/25/2026 -NASA Ames Research Center,Issued,ARC-15314-1,7718223,"11/007,913",Provision Of Carbon Nanotube Arrays Of Variable Density For IC Hot Spot Control,02/12/2027 -NASA Ames Research Center,Issued,ARC-15314-2,7704547,"11/472,516",Carbon Nanotube Growth Density Control,12/07/2024 -NASA Ames Research Center,Issued,ARC-15315-1,7378963,"11/239,449",Reconfigurable Auditory-visual Display For Multi-channel Control Center And Rescue Communications,01/06/2026 -NASA Ames Research Center,Issued,ARC-15356-2,7161501,"11/66,650",Display Of Aircraft Energy State For Flight Operations Quality Assurance (FOQA) Programs,09/22/2024 -NASA Ames Research Center,Issued,ARC-15356-3,7212135,11/066649,Real-Time Analysis And Display Of Aircraft Approach Maneuvers ,09/22/2024 -NASA Ames Research Center,Issued,ARC-15370-1,7698274,"10/956,524",Selective Access And Editing In A Database (Part of NTTS Suite),03/18/2027 -NASA Ames Research Center,Issued,ARC-15392-1,7313475,"11/053,713",Delay Banking: Collaborative Decision Making For Airspace-user Priority In Tactical Flow Restrictions,04/04/2025 -NASA Ames Research Center,Issued,ARC-15404-1,7288490,"11/009,854",Use Of A Single Electrode To Orient Carbon Nanotube Growth,12/07/2024 -NASA Ames Research Center,Issued,ARC-15437-1,7438422,"11/340,816",Low Cost Portable Planetarium Imaging System,05/14/2027 -NASA Ames Research Center,Issued,ARC-15443-1,7531775,"11/251,006",A Tracking Sunphotometer Without Moving Parts ,01/31/2026 -NASA Ames Research Center,Issued,ARC-15460-1,7426848,"11/203,576",Discharge Based Gas Sensor Array Using Self-Oriented Regular Vertical Array Of Carbon Nanotubes,08/05/2025 -NASA Ames Research Center,Issued,ARC-15462-1,7574338,11/340002,Finite-Difference Simulation And Visualization Of Elastodynamics In Time-Evolving Generalized Curvilinear Coordinates ,07/29/2026 -NASA Ames Research Center,Issued,ARC-15487-1,7796026,"11/111,620",Electronic Firefighter Escape Trail,06/04/2028 -NASA Ames Research Center,Issued,ARC-15506-1,7529633,"11/203,589",Applications Of Carbon Nanotube Hold-Off Voltages,10/22/2026 -NASA Ames Research Center,Issued,ARC-15519-1,7574357,"11/169,265",Security Applications For Subvocal Speech,11/09/2026 -NASA Ames Research Center,Issued,ARC-15566-1,7801687,"11/178,079",Gas Sensors Based on Coated and Doped Carbon Nanotubes,05/26/2029 -NASA Ames Research Center,Issued,ARC-15566-2,8000903,"11/416,505",Coated Or Doped Carbon Nanotube Network Sensors As Affected By Environmental Parameters And Elapsed Time,09/15/2029 -NASA Ames Research Center,Issued,ARC-15566-3,7875455,"11/489,803",Nanotechnology Sensors For Determination Of Chemical Substances In An Oil Reservoir,12/17/2028 -NASA Ames Research Center,Issued,ARC-15566-5,7623972,"11/591,630",Detection Of Presence Of Chemical Precursors,07/08/2025 -NASA Ames Research Center,Issued,ARC-15575-1,7473930,"11/173,053",Use Of Carbon Nanotube Arrays For Display Purposes,10/24/2026 -NASA Ames Research Center,Issued,ARC-15578-2,7873181,"11/525,600",Visual Signal Sensor Organ Replacement: Implementation,05/19/2028 -NASA Ames Research Center,Issued,ARC-15606-1,7431242,"11/265,324",Aero Assist Capsule Vehicle Geometry For Atmospheric Entry,04/01/2026 -NASA Ames Research Center,Issued,ARC-15684-1,7516890,"11/444,807",InterIssued Inventory Monitoring,05/25/2026 -NASA Ames Research Center,Issued,ARC-15714-1,7869029,"11/398,733",Light Collimator And Monitor,11/11/2029 -NASA Ames Research Center,Issued,ARC-15782-1,7549338,11/973998,Nanotechnology Sensor Of Presence And Concentration Of A Target Molecule,09/28/2027 -NASA Ames Research Center,Issued,ARC-15796-1,8675922,"13/444,777",Motion Blur Evaluation Techniques,08/31/1932 -NASA Ames Research Center,Issued,ARC-15870-1,7655497,"11/513,431",Growth Method For Phase Change Nanostructures,08/16/2027 -NASA Ames Research Center,Issued,ARC-15890-1,7655145,"11/543,275",Water Treatment Systems For Long Space Flight Use,11/05/2027 -NASA Ames Research Center,Issued,ARC-15900-1,7490367,"11/526,175",Wet Waste Drying Bag,09/20/2026 -NASA Ames Research Center,Issued,ARC-15903-1DIV,8409491,"13/215,206",In-situ Formation Of Reinforcement Phases In Ceramic Composites And Ultra High Temperature Ceramic Composites For Advanced TPS Applications,09/28/2027 -NASA Ames Research Center,Issued,ARC-15967-1,7635420,"11/645,267",Dielectrophoresis-Based Particle Sensor Using Nanoelectrode Arrays,06/06/2028 -NASA Ames Research Center,Application,ARC-15977-1,0,"12/100,378",Artificial Immune System Based Approach For Air Combat Maneuvering, -NASA Ames Research Center,Application,ARC-15981-4,,"13/463,780",Chaperonin-based Templates for Pseudo-cellulosomes with Multiple Enzymes Present,07/19/2027 -NASA Ames Research Center,Issued,ARC-15983-1,7923709,"12/273,502",Radiation Shielding System Using A Composite Of Hydrogen-Rich Polymers Loaded With Carbon Nanotubes,09/30/2029 -NASA Ames Research Center,Application,ARC-16478-1,,"14/191,246",Real Time PIREPs Using Audio Twitter,02/26/1934 -NASA Ames Research Center,Issued,ARC-15995-1,8290246,"11/958,296",A Method To Measure The Recession Of Ablative Materials In Arc-jet Testing Using Digital Stereo-photogrammetry And Image Cross-correlation,07/01/1931 -NASA Ames Research Center,Issued,ARC-16013-1,7968054,"11/715,785",Wireless Chemical Sensor Data Transmission System Based On Nanotechnology,10/03/2029 -NASA Ames Research Center,Issued,ARC-16018-1,7662459,"12/175,379",Atmospheric Entry Heat Shield Employing Cured Thermal Protection Material Blocks Bonded In A Large-Cell Honeycomb Matrix,07/17/2028 -NASA Ames Research Center,Application,ARC-16132-1,0,"14/091,250",Surface Densification Of Phenolic Impregnated Carbon Ablator (PICA),11/26/1933 -NASA Ames Research Center,Issued,ARC-16133-1,8069001,"12/319,918",Hollow AErothermal Ablation And Temperature (HEAT) Isotherm Sensor For Tracking Isotherm Through The TPS Material,10/09/2029 -NASA Ames Research Center,Application,ARC-16211-1,0,"13/896,284",Low Cost Optical Fiber Solar Cell Configurations,05/16/1933 -NASA Ames Research Center,Issued,ARC-16235-1,8285659,"12/543,411",Modeling-Error-Driven Performance-Seeking Direct Adaptive Control,11/18/1930 -NASA Ames Research Center,Application,ARC-16273-1,0,"12/454,024",Decomposition Technique for Remaining Useful Life Prediction,11/18/1930 -NASA Ames Research Center,Issued,ARC-16280-1,8409845,"12/316,557",Offshore membrane enclosures for dewatering Algae (OMEDA),10/15/1931 -NASA Ames Research Center,Issued,ARC-16298-1,8333810,"12/398,854",Nanotechnology-Based Supercapacitor,06/29/1930 -NASA Ames Research Center,Issued,ARC-16320-1,8332342,"12/622,407",Battery Prognostics using Particle Filtering Techniques,02/05/1931 -NASA Ames Research Center,Issued,ARC-16331-1,8408707,"12/428,441",System to estimate visual acuity from wavefront aberrations,05/29/2029 -NASA Ames Research Center,Issued,ARC-16334-1,8244477,"12/478,667",Estimation of Growth Stage and Growth Rate for Algae,06/04/2029 -NASA Ames Research Center,Application,ARC-16337-1,0,"13/793,998",Method and Device for Biometric Subject Verification and Identification Based Upon electrocardiographic signals,03/11/1933 -NASA Ames Research Center,Application,ARC-16340-1,0,"13/645,284",Method for formation and manufacture of carbon nanotube mesh bucky paper capsules for transplantation of cells and tissue and implantation of medical devices,10/04/1932 -NASA Ames Research Center,Issued,ARC-16342-1,8412469,"12/698,996",Advanced Sensor Technology for Algal Biotechnology (ASTAB) ,12/16/1930 -NASA Ames Research Center,Application,ARC-16348-1,,"13/109,954",Co-Optimized Blunt-Body ReEntry Vehicle Aerothermodynamic Parametric Shape and Multi-Discipline Optimization Design Process, -NASA Ames Research Center,Issued,ARC-16351-1,8498756,"13/213,022",Hovercraft Landing System,12/07/1931 -NASA Ames Research Center,Issued,ARC-16370-1,8375675,"12/574,493",Self Aligning Lug for adapting carbon fiber rods to a bolted metallic connection,05/07/1931 -NASA Ames Research Center,Application,ARC-16372-1,0,"13/794,061",Inexpensive Cooling Systems for Devices,03/11/1933 -NASA Ames Research Center,Issued,ARC-16373-1,8489181,"12/319,220",Heart Electrical Actions as Biometric Indicia,04/29/1932 -NASA Ames Research Center,Application,ARC-16405-1,0,"14/091,236",Nanowire based piezoelectric power generation,11/26/1933 -NASA Ames Research Center,Issued,ARC-16407-1,8337208,"12/622,374",Content Analysis to Detect High Stress in Oral Interviews and Text Documents,05/26/1931 -NASA Ames Research Center,Application,ARC-16419-1,0,"13/317,034",Strobing to Mitigate Vibration for Display Legibility,10/05/1932 -NASA Ames Research Center,Application,ARC-16450-1CIP,0,"13/720,898",Distributed Prognostics and Health Management with a Wireless Network Architecture ,05/05/2029 -NASA Ames Research Center,Application,ARC-16456-1,,"13/480,917",FABRICATION OF NANOPIPETTE ARRAY FOR BIOSENSING, -NASA Ames Research Center,Application,ARC-16461-1,,"13/956,218",Solar Powered CO2 Conversions with Thin Film Devices,07/31/1933 -NASA Ames Research Center,Application,ARC-16466-1,,"14/010,322",Combined HETC/ROCCI TPS Material for Temperatures Up To T=3200 F ,08/26/1933 -NASA Ames Research Center,Application,ARC-16467-1,,"13/615,202",ODVEC: Outlier Detection Via Estimating Clusters, -NASA Ames Research Center,Application,ARC-16607-1,,"13/658,749",An Approach to Make Flexible Ablators that are Flexible Char Formers,10/23/1932 -NASA Ames Research Center,Application,ARC-16621-1,,"13/472,283",Transformable Hypersonic Aerodynamic Decelerator,12/04/1932 -NASA Ames Research Center,Application,ARC-16644-1,,"13/648,197",Variable Camber Continuous Aerodynamic Control Surfaces and Methods for Active Wing Shaping Control ,10/09/1932 -NASA Ames Research Center,Application,ARC-16646-1,,"13/485,721",A method to produce copper nanowires for interconnect applications, -NASA Ames Research Center,Application,ARC-16661-1,,"13/444,789",Video acuity measurement system, -NASA Ames Research Center,Application,ARC-16697-1,,"13/956,929",NTTS Search and Reporting (Part of NTTS Suite),08/01/1933 -NASA Ames Research Center,Application,ARC-16707-1,,"13/438,793",Ectomycorrhizal mediated remediaiton of phenolic-based contamination through use of specifically adapted ectomycorrhizal fungi and enzyme enhancement through partial defoliation of the host., -NASA Ames Research Center,Application,ARC-16707-1CIP,,"13/854,620",Ectomycorrhizal mediated remediaiton of phenolic-based contamination through use of specifically adapted ectomycorrhizal fungi and enzyme enhancement through partial defoliation of the host.,04/03/1932 -NASA Ames Research Center,Application,ARC-16732-1,,"13/573,924",NanoSat Launch Adapter System (NLAS),03/14/1933 -NASA Ames Research Center,Application,ARC-16733-1,,"13/535,884","Habitat Water Wall for Water, Solids, and Atmosphere Recycle and Reuse ", -NASA Ames Research Center,Application,ARC-16752-1,,"14/179,401","Fuel-Efficient, Airport-Friendly, Multi-Speed Transport Aircraft Configuration with Novel Structural Approach",02/12/1934 -NASA Ames Research Center,Application,ARC-16811-1,,"13/544,752",Compliant electrode and composite materials for piezoelectric wind and mechanical energy conversions, -NASA Ames Research Center,Application,ARC-16812-1,,"13/783,112",Graphene composite materials for supercapacitor electrodes ,03/01/1933 -NASA Ames Research Center,Application,ARC-16833-1,,"13/747,875",Flight Deck Predictive Weather Display and Decision Support Interface ,01/23/1933 -NASA Ames Research Center,Application,ARC-16844-1,,"13/662,346",Adaptive control and disturbance rejection of non-minimum phase plants using residual mode filters,10/26/1932 -NASA Ames Research Center,Application,ARC-16846-1,,"13/707,546",Dynamic Weather Routes Tool,12/06/1932 -NASA Ames Research Center,Application,ARC-16892-1A,,"13/929,646",The Surface-Adhering Bioreactor (SABR): A novel microbial cell cultivation platform,06/27/1933 -NASA Ames Research Center,Application,ARC-16902-1,,"13/725,475",Nanosensors for medical diagnosis,12/21/1932 -NASA Ames Research Center,Application,ARC-16916-1,,"13/956,736",A Method for Improving Control Systems with Normalized Adaptation by Optimal Control Modification,08/01/1933 -NASA Ames Research Center,Application,ARC-16924-1,,"14/010,355",Aluminoborosilicate Supplement for Thermal Protection of a Re-entrant Vehicle,08/26/1933 -NASA Ames Research Center,Application,ARC-16942-2,,"13/659,739",A new family of low density flexible ablators,10/24/1932 -NASA Armstrong Flight Research Center,Issued,DRC-001-049,7180943,"10/113,637",Adaptive Lossless Data Compression,03/26/2022 -NASA Armstrong Flight Research Center,Issued,DRC-005-031,7407131,"11/288,052",Sound Shield,10/31/2025 -NASA Armstrong Flight Research Center,Issued,DRC-006-001,7431243,"11/227,325",Algorithms For Autonomous Soaring,02/27/2026 -NASA Armstrong Flight Research Center,Application,DRC-006-002,0,"11/422,554","Air Breathing,Reusable, Vertical Launch, Vertical Landing, First Stage Launch System with Off-the-Shelf Second Stage - Ram Booster", -NASA Armstrong Flight Research Center,Issued,DRC-006-005,7711455,"11/463,485",Propulsion Controlled Aircraft Computer (PCAC),08/09/2026 -NASA Armstrong Flight Research Center,Issued,DRC-006-024,7520176,"11/567,118",Method for Real-Time Structure Shape Sensing,12/05/2026 -NASA Armstrong Flight Research Center,Application,DRC-006-045,0,"11/682,969",METHOD FOR REDUCING THE REFRESH RATE OF FIBER BRAGG GRATING SENSORS, -NASA Armstrong Flight Research Center,Issued,DRC-008-001,8145366,"12/138,747",Real-time Interactive Sonic Boom Display,04/28/2030 -NASA Armstrong Flight Research Center,Issued,DRC-008-023,7715994,"12/191,734",IMPROVED PROCESS FOR USING SURFACE STRAIN MEASUREMENTS TO OBTAIN OPERATIONAL LOADS FOR COMPLEX STRUCTURES,08/14/2028 -NASA Armstrong Flight Research Center,Application,DRC-009-008,0,12/718034,Continental Digital Elevation Map Compression and Decompression Software, -NASA Armstrong Flight Research Center,Issued,DRC-009-026,8447443,13/367990,A New Peak-Seeking Control Method,02/07/2032 -NASA Armstrong Flight Research Center,Application,DRC-010-042,,13/463246,An apparatus and a method to eliminate polarization-induced fading from multiple fiber-optics strain sensors via signal-processing under polarization diversity detection scheme, -NASA Armstrong Flight Research Center,Application,DRC-011-002,,"13/759,847",OPTICAL WAVEGUIDE BRAGG GRATING WAVELENGTH SHIFT BY LIGHT INTERACTION WITH ACTIVE MATERIAL,02/05/2033 -NASA Armstrong Flight Research Center,Application,DRC-011-015,,14/106947,In-situ three-dimensional shape rendering from strain values obtained through optical fiber sensors,05/31/2032 -NASA Armstrong Flight Research Center,Application,DRC-012-005,,13/759210,Method and apparatus of multiplexing and acquiring data from multiple optical fibers using a single data channel of an optical frequency-domain reflectrometry (OFDR) system (Revised),02/05/2033 -NASA Armstrong Flight Research Center,Application,DRC-012-006,,13/733364,A Novel Approach to Liquid Level Sensing Using Fiber Bragg Grating Technology,01/03/2033 -NASA Armstrong Flight Research Center,Application,DRC-012-011,,13/573920,Air Launch From A Towed Aircraft,07/05/2032 -NASA Armstrong Flight Research Center,Issued,DRC-096-055,6126111,"09/112,067",Emergency Flight Control System Using One Engine And Fuel Transfer,07/08/2018 -NASA Armstrong Flight Research Center,Issued,DRC-097-021,6102330,"08/905,777",Emergency Aircraft Lateral Controller Using Existing (non-modified) Digital Engine Computers During A System Failure For The Purpose Of Safe Landing,07/29/2017 -NASA Armstrong Flight Research Center,Issued,DRC-098-001,6216063,"09/74,024",A Flutterometer Flight Test Tool,05/06/2018 -NASA Goddard Space Flight Center,Application,GSC-13378-1,0,"07/710,633",SPLINE-LOCKING PAYLOAD FASTENER, -NASA Goddard Space Flight Center,Issued,GSC-13802-1,6584874,"08/673,859","USING A 3-D SPRAG IN RACHETING TOOLS BASED ON PAT. NO. 5,482-144",07/02/2016 -NASA Goddard Space Flight Center,Issued,GSC-13817-1,5983162,"08/872,586",Empirical Mode Decomposition Method And Hilbert Spectral Analysis Algorithms,06/10/2017 -NASA Goddard Space Flight Center,Issued,GSC-13817-2,6631325,"09/82,523",COMPUTER IMPLEMENTED EMPIRICAL MODE DECOMPOSITION METHOD APPARATUS AND ARTICLE OF MANUFACTURE UTILIZING CURVATURE EXTREMA,06/10/2017 -NASA Goddard Space Flight Center,Issued,GSC-13817-3,6381559,"09/282,424","Empirical Mode Decomposition Apparatus, Method, And Article Of Manufacture For Analyzing Biological Signals And Performing Curve Fitting",03/31/2019 -NASA Goddard Space Flight Center,Issued,GSC-13817-4,6862558,"10/73,957",Empirical Mode Decomposition For Analyzing Acoustical Signals,02/13/2022 -NASA Goddard Space Flight Center,Issued,GSC-13817-5,6738734,"10/11,206","Empirical Mode Decomposition Apparatus, Method And Article Of Manufacture For Analyzing Biological Signals And Performing Curve Fitting",06/10/2017 -NASA Goddard Space Flight Center,Issued,GSC-13905-1,6640949,"10/95,343",1-Way Bearing,03/01/2022 -NASA Goddard Space Flight Center,Issued,GSC-13909-1,6311130,"09/150,671","Computer Implemented Empirical Mode Decomposition Method, Apparatus, And Article Of Manufacture For Two-Dimensional Signals",09/10/2018 -NASA Goddard Space Flight Center,Issued,GSC-13985-1,6566854,"09/646,161",Active Antenna Combined With Non-Ferrous Current Probe.,09/12/2020 -NASA Goddard Space Flight Center,Issued,GSC-14064-1,6648522,"09/804,646",Universal Fiber Optic Connector Polishing Fixture With Precision Alignment Capability,03/13/2021 -NASA Goddard Space Flight Center,Issued,GSC-14207-1,6626792,"09/799,872",Gear Bearings,03/03/2021 -NASA Goddard Space Flight Center,Issued,GSC-14209-1,6293803,"09/501,412",Stress Relieved Zee Electrical Interconnect,02/09/2020 -NASA Goddard Space Flight Center,Issued,GSC-14213-1,6760487,"09/550,254",Estimated Spectrum Adaptive Postfilter (ESAP) And The Iterative Prepost Filtering (IPF) Algorithms,04/14/2020 -NASA Goddard Space Flight Center,Issued,GSC-14236-1,6538796,"09/541,680",MEMS Devices For Spacecraft Thermal Control Applications,03/31/2020 -NASA Goddard Space Flight Center,Issued,GSC-14302-1,6782124,"09/729,138",Extension Of The Empirical Mode Decomposition Method To A Time Series Of 2-Dimensional Grid Maps,11/29/2020 -NASA Goddard Space Flight Center,Issued,GSC-14305-1,6895115,"09/839,147",Method For Recursive Implementation Of Hierarchical Segmentation,04/23/2021 -NASA Goddard Space Flight Center,Issued,GSC-14389-1,7543274,"10/789,028",Deriving Formal Specifications And Code From Scenarios,02/25/2024 -NASA Goddard Space Flight Center,Issued,GSC-14393-1,7145739,"10/385,166",Light Weight Optical Mirrors Formed In Single Crystal Silicon,03/06/2023 -NASA Goddard Space Flight Center,Issued,GSC-14413-1,7255483,"10/93,621",Thrust Rollers,03/01/2022 -NASA Goddard Space Flight Center,Issued,GSC-14435-1,6740224,"10/173,533",Innovative Manufacturing Procedure For Low Cost And High Quality Carbon Nanotubes,06/11/2022 -NASA Goddard Space Flight Center,Issued,GSC-14480-2,7762155,"11/444,808",Gear Bearings,05/25/2026 -NASA Goddard Space Flight Center,Issued,GSC-14561-1,7207245,"11/174,454",Screw-Locking Wrench,06/30/2025 -NASA Goddard Space Flight Center,Issued,GSC-14562-1,7504921,"11/543,278",Stepping Flextures,09/29/2026 -NASA Goddard Space Flight Center,Issued,GSC-14601-1,7008605,"10/292,952",Method For Manufacturing High Quality Carbon Nanotubes,11/08/2022 -NASA Goddard Space Flight Center,Issued,GSC-14603-1,7544146,"11/122,201",Anti-Backlash Gear-Bearings,05/02/2025 -NASA Goddard Space Flight Center,Issued,GSC-14608-1,6990436,"10/729,579",Time Frequency Analysis Based On Extrema Sifting,11/28/2023 -NASA Goddard Space Flight Center,Issued,GSC-14616-1,7248342,"10/730,195",Conceptual Design Of A 3D Imaging Lidar For High-Resolution Mapping Of The Surface Topography Of Moons Or Planets From Space,12/05/2023 -NASA Goddard Space Flight Center,Issued,GSC-14657-1,7512568,"11/109,400",Evolvable Neural Software System,04/08/2025 -NASA Goddard Space Flight Center,Issued,GSC-14666-1,6775600,"10/267,092",Systems And Methods For Determining Spacecraft Orientation,10/07/2022 -NASA Goddard Space Flight Center,Issued,GSC-14673-1,6901353,"10/615,365",Normalized Amplitude Hilbert Transform (NAHT): A New Algorithm For Computing Instantaneous Frequency,07/08/2023 -NASA Goddard Space Flight Center,Issued,GSC-14683-1,8480826,"11/736,874",Specular Coatings For Composite Structures,04/18/2027 -NASA Goddard Space Flight Center,Issued,GSC-14762-1,7769488,"11/108,627",SMART Solar Sail,04/08/2025 -NASA Goddard Space Flight Center,Issued,GSC-14777-1,7341932,"11/251,531",Large Area Vacuum Ultra-Violet Sensors,09/30/2025 -NASA Goddard Space Flight Center,Issued,GSC-14793-1,7548199,"11/239,458","Pivot 2.0: Radiation Hardened, Fast Acquisition/Weak Signal Tracking GPS Receiver",09/20/2025 -NASA Goddard Space Flight Center,Issued,GSC-14807-1,7464006,"10/963,470",Application Of HHT To Financial Data Analysis For Define Volatility And Trend,10/07/2024 -NASA Goddard Space Flight Center,Issued,GSC-14833-1,7346461,"11/251,004",Stability Spectrum Through Hilbert-Huang Transform,09/30/2025 -NASA Goddard Space Flight Center,Issued,GSC-14845-1,7290737,"11/251,537",Demiseable Reaction Wheel Assembly,09/29/2025 -NASA Goddard Space Flight Center,Issued,GSC-14871-1,7935297,"11/370,396",Template For Deposition Of Micron And Sub-micron Pointed Structures,03/06/2026 -NASA Goddard Space Flight Center,Issued,GSC-14873-1,8357211,"12/872,445 ",ADR Salt Pill Design And Crystal Growth Process For Hydrated Magnetic Salts,08/31/2030 -NASA Goddard Space Flight Center,Issued,GSC-14879-1,7635832,"11/469,105",Iterative-Transform Phase-Retrieval Utilizing Adaptive Diversity,08/31/2026 -NASA Goddard Space Flight Center,Issued,GSC-14941-1,7739671,"11/203,590",A Method And System For Direct Implementation Of Formal Specifications Derived Mechanically From Informal Requirements,08/12/2025 -NASA Goddard Space Flight Center,Issued,GSC-14942-1,7752608,"11/203,586","A Method And System For Formal Analysis, Simulation, And Verification Of Knowledge-Based Systems, Rule-Based Systems, And Expert Systems",08/12/2025 -NASA Goddard Space Flight Center,Issued,GSC-14952-1,7513546,"11/689,161",Conformal Gripper,03/21/2027 -NASA Goddard Space Flight Center,Issued,GSC-14960-1,7992760,"11/357,458",Hardware And Technique For Dead End Welding Of All Types Of Tubing,02/08/2026 -NASA Goddard Space Flight Center,Application,GSC-16700-1,,14/041407,SpaceCube v2.0 Flight Processor Card,09/30/2033 -NASA Goddard Space Flight Center,Issued,GSC-14968-1,7627538,"11/251,538",Apoptosis And Self-destruct: Mechanisms For Management Of Autonomic Systems,09/29/2025 -NASA Goddard Space Flight Center,Issued,GSC-14968-2,7925600,"12/603,140",SWARM AUTONOMIC AGENTS WITH SELF-DESTRUCT CAPABILITY,10/21/2029 -NASA Goddard Space Flight Center,Issued,GSC-14979-1,7601091,"11/426,134",Modular Gear Bearing,06/23/2026 -NASA Goddard Space Flight Center,Issued,GSC-14994-1,7697759,"11/251,530",A Split-Remerge Method For Eliminating Processing Window Artifacts In Recursive Hierarchical Segmentation,09/30/2025 -NASA Goddard Space Flight Center,Issued,GSC-15001-1,7924415,"12/389,097",Light Direction Sensor,02/19/2029 -NASA Goddard Space Flight Center,Issued,GSC-15002-1,7240879,"11/124,592",Space Robotic System For In Space Servicing Of Unmanned Spacecraft Applications,05/06/2025 -NASA Goddard Space Flight Center,Issued,GSC-15002-2,7513459,"11/670,653","Method And Associated Apparatus For Capturing, Servicing, And De-Orbiting Earth Satellites Using Robotics",05/06/2025 -NASA Goddard Space Flight Center,Issued,GSC-15002-3,7293743,"11/670,270","Method And Associated Apparatus For Capturing, Servicing, And De-Orbiting Earth Satellites Using Robotics",11/13/2025 -NASA Goddard Space Flight Center,Issued,GSC-15002-4,7438264,"11/670,781","Method And Associated Apparatus For Capturing, Servicing And De-Orbiting Earth Satellites Using Robotics",05/06/2025 -NASA Goddard Space Flight Center,Issued,GSC-15002-5,7513460,"11/671,062","Method And Associated Apparatus For Capturing, Servicing, And De-Orbiting Earth Satellites Using Robotics",05/06/2025 -NASA Goddard Space Flight Center,Issued,GSC-15027-1,7412175,"11/425,352",Millimeter Wave Polarization Transformer,06/20/2026 -NASA Goddard Space Flight Center,Issued,GSC-15027-2,7609978,"12/056,964",INTERFEROMETRIC POLARIZATION CONTROL,03/27/2028 -NASA Goddard Space Flight Center,Issued,GSC-15027-3,7616903,"12/057,060",INTERFEROMETRIC POLARIZATION CONTROL,03/27/2028 -NASA Goddard Space Flight Center,Issued,GSC-15030-1,7907333,"11/460,482","A Pulsed, 1 Micron, Single Frequency, Diode-Seeded Ytterbium-doped Fiber Amplifier With Variable Output Parameters, P",07/27/2026 -NASA Goddard Space Flight Center,Issued,GSC-15038-1,7765171,"11/426,853",SPAACE: Self Properties For An Autonomous & Autonomic Computing Environment,06/27/2026 -NASA Goddard Space Flight Center,Issued,GSC-15039-1,7762523,"11/861,038",Miniaturized Double Latching Solenoid Valve,09/25/2027 -NASA Goddard Space Flight Center,Issued,GSC-15042-1,7622907,"11/535,872",Driven Ground,09/27/2026 -NASA Goddard Space Flight Center,Issued,GSC-15055-1,7746190,"11/748,969",Broadband High Spurious-suppression Microwave Waveguide Filter For Polarization-preserving And Transformer,05/15/2027 -NASA Goddard Space Flight Center,Issued,GSC-15077-1,8068556,"12/147,100",Low Cost TDRSS Tranceiver (LCT2),06/26/2028 -NASA Goddard Space Flight Center,Issued,GSC-15079-1,7886273,"11/532,800",Generation And Verification Of Policies For Autonomic Systems,09/18/2026 -NASA Goddard Space Flight Center,Issued,GSC-15080-1,7979848,"11/533,837",A Method Of Deriving Process Based Specifications From Scenarios Via Pattern Matching,09/21/2026 -NASA Goddard Space Flight Center,Issued,GSC-15115-1,7465926,"11/537,280",Miniaturized Radiation Spectrometer Development,09/29/2026 -NASA Goddard Space Flight Center,Issued,GSC-15136-1,8093094,"12/137,844",Blocking Contacts For N-Type Cadmium Zinc Cadmium Zinc Telluride (CdZnTe),06/12/2028 -NASA Goddard Space Flight Center,Issued,GSC-15148-1,7668796,"11/536,132",Enhancing R2D2C Requirements Based Programming With Automata Learning,09/28/2026 -NASA Goddard Space Flight Center,Issued,GSC-15162-1,7796726,"11/706,693","Instrument And Method For X-Ray Diffraction, Fluorescence, And Crystal Texture Analysis Without Sample Preparation",02/14/2027 -NASA Goddard Space Flight Center,Application,GSC-15163-2,0,13/092198,AIGaN Ultraviolet Detectors For Dual Band UV Detection, -NASA Goddard Space Flight Center,Issued,GSC-15176-1,7899760,"11/533,855",Autonomic Quiescence,09/21/2026 -NASA Goddard Space Flight Center,Issued,GSC-15177-1,8082538,11/536378,A Method For Developing And Maintaining Evolving Systems With Software Product Lines,09/28/2026 -NASA Goddard Space Flight Center,Application,GSC-15177-2,0,13/305932,A Method For Developing And Maintaining Evolving Systems With Software Product Lines, -NASA Goddard Space Flight Center,Issued,GSC-15178-1,7992134,"11/536,969","Modeling, Specifying And Deploying Policies In Autonomous And Autonomic Systems Using An AOSE Methodology",09/29/2026 -NASA Goddard Space Flight Center,Issued,GSC-15179-1,7904396,"11/533,895",An Autonomic Smoke Detector,09/21/2026 -NASA Goddard Space Flight Center,Issued,GSC-15184-1,7978312,"11/933,492","An Active, Solid-state, 3-Dimensional Range Imaging System",11/01/2027 -NASA Goddard Space Flight Center,Issued,GSC-15206-1,8041655,"11/836,352",Otoacoustic Protection In Biologically-Inspired Systems,08/09/2027 -NASA Goddard Space Flight Center,Issued,GSC-15206-2,8140452,13/230915,Otoacoustic Protection In Biologically-Inspired Systems,09/13/2031 -NASA Goddard Space Flight Center,Issued,GSC-15206-3,8140453,13/230922,Otoacoustic Protection In Biologically-Inspired Systems,09/13/2031 -NASA Goddard Space Flight Center,Issued,GSC-15206-4,8275725,13/230920,Otoacoustic Protection In Biologically-Inspired Systems,09/13/2031 -NASA Goddard Space Flight Center,Issued,GSC-15206-5,8165976,13/230922,Otoacoustic Protection In Biologically-Inspired Systems,09/13/2031 -NASA Goddard Space Flight Center,Issued,GSC-15206-6,8165977,13/230923,Otoacoustic Protection In Biologically-Inspired Systems,09/13/2031 -NASA Goddard Space Flight Center,Issued,GSC-15217-1,8139674,"12/173,243",Spaceflight Ka-Band High Rate Rad Hard Modulator,07/15/2028 -NASA Goddard Space Flight Center,Issued,GSC-15301-1,7673089,"11/935,572",An Extendibe USB Drive That Accepts External Media,11/06/2027 -NASA Goddard Space Flight Center,Issued,GSC-15302-1,7673089,"11/935,572",An Double-Headed USB Drive,11/06/2027 -NASA Goddard Space Flight Center,Issued,GSC-15328-1,8499779,"12/014,889",Non-Pyrotechnic Zero-Leak Normally-Closed Valve,01/16/2028 -NASA Goddard Space Flight Center,Application,GSC-15333-1,0,"11/860,830","Improved, Flexure-Base Linear Bearing", -NASA Goddard Space Flight Center,Issued,GSC-15341-1,7922920,"11/862,550",Low Conductance Silicon Micro-leak for Mass Spectrometer Inlet,09/27/2027 -NASA Goddard Space Flight Center,Issued,GSC-15341-3,8455926,"12/889,014 ",Low Conductance Silicon Micro-leak for Mass Spectrometer Inlet,09/23/2030 -NASA Goddard Space Flight Center,Issued,GSC-15349-1,7830527,"12/102,240",Method And Apparatus For Second Harmonic Generation And Other Frequency Convertion With Multiple Frequency Channels,04/14/2028 -NASA Goddard Space Flight Center,Issued,GSC-15353-1,7830224,"11/877,102",Compact Low-loss Planar Magic-T With Broadband Phase And Amplitude Responses,10/23/2027 -NASA Goddard Space Flight Center,Issued,GSC-15357-1,8041661,"11/861,687",Stability Algorithm For Neural Entities (SANE),09/26/2027 -NASA Goddard Space Flight Center,Issued,GSC-15364-1,8155939,"12/170,683",Hughes Particle – Surface Interaction Model,07/10/2028 -NASA Goddard Space Flight Center,Issued,GSC-15377-1,7811406,"12/249,265",Advanced Adhesive Bond Shape Tailoring for Large Composite Primary Structures Subjected to Cryogenic and Ambient Loading Environments,10/10/2028 -NASA Goddard Space Flight Center,Issued,GSC-15416-1,7999427,"12/188,039",Directed Flux Motor Utilizing Concentric Magnets and Interwoven Flux Channels,08/07/2028 -NASA Goddard Space Flight Center,Issued,GSC-15417-1,7735385,"12/187,562",Actuated Ball and Socket Joint,08/07/2028 -NASA Goddard Space Flight Center,Issued,GSC-15419-1,8030873,"12/187,926",Improvements to the Walk and Roll Robot,08/07/2028 -NASA Goddard Space Flight Center,Issued,GSC-15421-1,7968812,"12/353,009",Spring Joint Package with Overstrain Sensor ( OS Sensor Joint ),01/13/2029 -NASA Goddard Space Flight Center,Issued,GSC-15431-1,7921731,"12/327,514",A two-axis direct fluid shear stress sensor suited for aerodynamic applications,12/03/2028 -NASA Goddard Space Flight Center,Issued,GSC-15445-1,7982861,"12/183,820","Pseudo-Noise Code Modulation using Return to Zero pulses for Ranging, Altimetry and Communications",07/31/2028 -NASA Goddard Space Flight Center,Issued,GSC-15458-1,8094731,"12/357,081",Space Link Extension Return Channel Frames (SLE-RCF) Service (User side) Software Library,01/21/2029 -NASA Goddard Space Flight Center,Issued,GSC-15483-1,7817087,"12/116,518",Relative Spacecraft Navigation using Reflected GPS Signals,05/07/2028 -NASA Goddard Space Flight Center,Issued,GSC-15520-1,8547531,12/873373,Non-scanning laser 3D imager,09/01/2030 -NASA Goddard Space Flight Center,Issued,GSC-15527-1,8160728,"12/558,672",Sensor Complete Requirements Algorithm For Autonomous Mobility,09/14/2029 -NASA Goddard Space Flight Center,Issued,GSC-15538-1,8198956,"12/535,954",Compact planar microwave blocking filter,08/05/2029 -NASA Goddard Space Flight Center,Issued,GSC-15550-1,8275724,"12/569,422",A biologically-inspired method of improving system performance and survivability through self-sacrifice,09/29/2029 -NASA Goddard Space Flight Center,Issued,GSC-15552-1,7924126,"12/555,634","Small, High Field Superconducting Magnets",09/08/2029 -NASA Goddard Space Flight Center,Issued,GSC-15557-1,8095485,"12/353,637",Formulation for Emotion Embedding in Logic Systems (FEELS),01/14/2029 -NASA Goddard Space Flight Center,Issued,GSC-15583-1,7970025,"12/496,954",Tunable Frequency-stabilized Laser via Offset Sideband Locking,07/02/2029 -NASA Goddard Space Flight Center,Issued,GSC-15584-1,8144331,"12/487,454",Hilbert-Transform-Based Phase Referencing Algorithm for Wide-Field Imaging Interferometry.,06/18/2029 -NASA Goddard Space Flight Center,Issued,GSC-15655-1,8138961,"12/561,644",Low Frequency Wideband Step Frequency Inverse Synthetic Aperture Radar For 3-D Imaging of Interior of Near Earth Objects/Planetary Bodies,09/17/2029 -NASA Goddard Space Flight Center,Application,GSC-15660-1,0,13/247416,Extreme Environment Low Temperature Transistor Models, -NASA Goddard Space Flight Center,Issued,GSC-15662-1,8092031,"12/569,090",Flight Mirror Mount and Flight Mounting Procedure for an Ultra-Lightweight High-Precision Glass Mirror,09/29/2029 -NASA Goddard Space Flight Center,Application,GSC-15672-1,0,13/211413,Multicolor detectors for ultrasensitive long-wave imaging cameras, -NASA Goddard Space Flight Center,Issued,GSC-15678-1,8484274,"12/549,159",Optimal Padding for the Two-Dimensional Fast Fourier Transform,08/27/2029 -NASA Goddard Space Flight Center,Issued,GSC-15684-1,8285401,"12/549,898",Discrete Fourier Transform (DFT) Analysis in a Complex Vector Space,08/28/2029 -NASA Goddard Space Flight Center,Issued,GSC-15685-1,8331733,"12/550,141",Sampling Theorem in Terms of the Bandwidth and Sampling Interval,08/28/2029 -NASA Goddard Space Flight Center,Issued,GSC-15692-1,8330644,"12/835,958 ",Expandable Reconfigurable Instrument Node - Web Sensor Strand Demonstration,07/19/2030 -NASA Goddard Space Flight Center,Application,GSC-15693-1,0,"12/570,224","Variable Sampling Mapping: A novel supplement to iterative-transform phase retrieval algorithms for undersampled images, broadband illumination, and noisy detection environments", -NASA Goddard Space Flight Center,Issued,GSC-15699-1,8480296,"12/560,535","A Low Cost, Low Temperature Radiometer for Thermal Measurements.",09/16/2029 -NASA Goddard Space Flight Center,Issued,GSC-15724-1,8275015,"12/551,212",Passively Q-switched side pumped Monolithic Ring Laser,08/31/2029 -NASA Goddard Space Flight Center,Application,GSC-15727-1,0,13/222575,"An All-metal, Solderless Circularly Polarized Microwave Antenna Element with Very Low Off-Axis Cross-Polarization", -NASA Goddard Space Flight Center,Issued,GSC-15729-1,8674302,"12/789,937",Novel Superconducting Transition Edge Sensor Design,05/28/2030 -NASA Goddard Space Flight Center,Issued,GSC-15729-2,8393786,"12/789,954 ",Novel Superconducting Transition Edge Sensor Design,05/28/2030 -NASA Goddard Space Flight Center,Issued,GSC-15730-1,8355579,12/783054,Automatic Extraction of Planetary Image Features,05/19/2030 -NASA Goddard Space Flight Center,Issued,GSC-15732-1,8093565,12/695478,Crossed Small Deflection Energy Analyzer (SDEA) for Wind/Temperature Spectrometer (WTS),01/28/2030 -NASA Goddard Space Flight Center,Issued,GSC-15758-1,8044332,"12/553,613",Hybrid Architecture Active Wavefront Sensing and Control,09/03/2029 -NASA Goddard Space Flight Center,Issued,GSC-15771-1,8035081,"12/570,166",High Precision Electric Gate (HPEG) for Time of Flight Mass Spectrometers,09/30/2029 -NASA Goddard Space Flight Center,Application,GSC-15774-1,0,13/154599,Ensemble Detector, -NASA Goddard Space Flight Center,Application,GSC-15782-1,0,13/216479,"Ultra-low Power (< 100mW), 64-Channel Pulse Data Collection System", -NASA Goddard Space Flight Center,Issued,GSC-15792-1,8406469,12/838600,Progressive Band Selection for Hyperspectral Images,07/19/2030 -NASA Goddard Space Flight Center,Application,GSC-15815-1,0,12/887988,LIDAR Luminance Quantizer, -NASA Goddard Space Flight Center,Issued,GSC-15826-1,8134130,12/839207,The Corner Cathode: Making Collimated Electron Beams with a Small Number of Electrodes,07/19/2030 -NASA Goddard Space Flight Center,Application,GSC-15829-1,0,13/601293,Resolution enhanced pseudo random code technique,08/31/2032 -NASA Goddard Space Flight Center,Application,GSC-15839-1,0,12/840787,"Low threshold, narrow linewidth optical parametric generator", -NASA Goddard Space Flight Center,Issued,GSC-15856-1,8196853,12/779494,Aerodynamically Stabilized Instrument Platform for Kites and Tethered Blimps ( AeroPod ),05/13/2030 -NASA Goddard Space Flight Center,Application,GSC-15886-1,0,12/838963,Automated Beam Balance Scale Logger, -NASA Goddard Space Flight Center,Application,GSC-15911-1,0,13/217965,Graphite Composite Panel Polishing Fixture, -NASA Goddard Space Flight Center,Application,GSC-15934-1,0,12/839125,Determining Phase Retrieval Sampling from the Modulation Transfer Function, -NASA Goddard Space Flight Center,Application,GSC-15935-1,0,13/043257,New Variables for Iterative Transform Phase Retrieval, -NASA Goddard Space Flight Center,Application,GSC-15936-1,0,12/854490,SpaceCube Version 1.5, -NASA Goddard Space Flight Center,Issued,GSC-15947-1,8274726,12/839171,Sampling and Reconstruction of the Sinc(x) Function,07/19/2030 -NASA Goddard Space Flight Center,Application,GSC-15948-1,0,13/204767,Lateral Kevlar Suspension Device (LKSD), -NASA Goddard Space Flight Center,Application,GSC-15949-1,0,13/600992,Vectorized Rebinning Algorithm for Fast Data Down-Sampling,08/31/2032 -NASA Goddard Space Flight Center,Application,GSC-15951-1,0,13/222839,An Improved Method of Fabricating Single Crystal Silicon Light Weight Mirrors, -NASA Goddard Space Flight Center,Issued,GSC-15953-1,8484509,12/854546,SpaceCube Demonstration Platform,08/11/2030 -NASA Goddard Space Flight Center,Application,GSC-15953-2,0,13/903357,SpaceCube Demonstration Platform,09/30/2029 -NASA Goddard Space Flight Center,Application,GSC-15957-1,0,13/211526,Imaging System Aperture Masks for Image Plane Exit Pupil Characterization, -NASA Goddard Space Flight Center,Issued,GSC-15964-1,8525110,"13/247,168 ",An Instrument Suite for the Vertical Characterization of the Ionosphere-Thermosphere System from 100 km to 700km Altitude,09/28/2031 -NASA Goddard Space Flight Center,Application,GSC-15970-1,0,13/034125,Electrospray Ionization for Chemical Analysis of Organic Molecules for Mass Spectrometry, -NASA Goddard Space Flight Center,Application,GSC-15976-1,0,12/872366,Phase Retrieval System for Assessing Diamond-Turning and other Optical Surface Artifacts, -NASA Goddard Space Flight Center,Issued,GSC-15977-1,8354952,12/839060,Phase Retrieval for Radio Telescope and Antenna Control,07/19/2030 -NASA Goddard Space Flight Center,Application,GSC-15979-1,0,12/839187,Multi-Scale Image Reconstruction using Wavelets, -NASA Goddard Space Flight Center,Application,GSC-15994-1,,13/104538,Photonic Choke-Joints for Dual-Polarization Waveguides, -NASA Goddard Space Flight Center,Application,GSC-16006-1,,13/216671,Programmable High-Rate Multi-Mission Receiver for Space Communication, -NASA Goddard Space Flight Center,Application,GSC-16008-1,,13/600826,Phase controlled magnetic mirror for wavefront correction,08/31/2032 -NASA Goddard Space Flight Center,Application,GSC-16016-1,,13/193272,Carbon Nanotubes on titanium substrates for stray light suppression, -NASA Goddard Space Flight Center,Issued,GSC-16024-1,8526733,"13/150,316",Refinement of the HSEG Algorithm for Improved Computational Processing Efficiency,06/01/2031 -NASA Goddard Space Flight Center,Application,GSC-16789-1,,14/ 033725,LEARNS (Logic Expansion for Autonomously Reconfigurable Neural Systems), -NASA Goddard Space Flight Center,Application,GSC-16029-1,,13/193249,Nanostructure secondary mirror apodization mask for transmitter signal suppression in a duplex telescope., -NASA Goddard Space Flight Center,Application,GSC-16096-1,,13/211432,Prototype Genomics Based keyed-Hash Message Authentication Code Protocol, -NASA Goddard Space Flight Center,Application,GSC-16100-1,,12/881587,Lunar Reconnaissance Orbiter (LRO) Command and Data Handling Flight Electronics Subsystem, -NASA Goddard Space Flight Center,Application,GSC-16105-1,,13/197214,Molecular Adsorber Coating, -NASA Goddard Space Flight Center,Application,GSC-16109-1,,13/240180,HEXPANDO expanding head for fastener retention hexagonal wrench, -NASA Goddard Space Flight Center,Application,GSC-16122-1,,13/474053,Apparatuses and Methods to Enable Sub-MHz Precision in Fast Laser Frequency Tuning, -NASA Goddard Space Flight Center,Application,GSC-16135-1,,13/534427,A cryptographic approach to microRNA target binding analysis, -NASA Goddard Space Flight Center,Application,GSC-16146-1,,13/601194,Wafer Level Microchannel Fabrication Process for Lap-on-a-Chip Devices,08/31/2032 -NASA Goddard Space Flight Center,Application,GSC-16182-1,,13/595604,"A High Event Rate, Zero Dead Time, Multi-Stop Time-to-digital Converter Application Specific Integrated Circuit",08/27/2032 -NASA Goddard Space Flight Center,Application,GSC-16193-1,,13/720175,Fine Control and Maintenance Algorithm for Visible Nulling Coronagraphy,12/19/2032 -NASA Goddard Space Flight Center,Application,GSC-16223-1,,13/551649,SpaceCube Mini, -NASA Goddard Space Flight Center,Application,GSC-16247-1,,13/570100,Enhanced adhesion multiwalled carbon nanotubes on titanium substrates for stray light control, -NASA Goddard Space Flight Center,Application,GSC-16250-1,,13/150316,Further Refinement of the Computationally Efficient HSEG Algorithm, -NASA Goddard Space Flight Center,Application,GSC-16259-1,,13/050617,Spaceflight Refuiling Tools, -NASA Goddard Space Flight Center,Application,GSC-16299-1,,13/622465,V-Assembly Dual Head Efficiency Resonator (VADER) Laser Transmitter,09/19/2032 -NASA Goddard Space Flight Center,Application,GSC-16301-1,,13/771815,"Impedance matched to vacuum, invisible-edge diffraction suppressed mirror",02/20/2033 -NASA Goddard Space Flight Center,Application,GSC-16327-1,,13/545173,"Miniaturized laser heterodyne radiometer for carbon dioxide (CO2), methane (CH4), and carbon monoxide (CO) measurements in the atmospheric column.", -NASA Goddard Space Flight Center,Application,GSC-16328-1,,13/474367,Development of the Hilbert-Huang Transform Real-Time Data Processing System with 2-D Capabilities, -NASA Goddard Space Flight Center,Application,GSC-16438-1,,13/606174,Power provision based on self-sacrificing spacecraft, -NASA Goddard Space Flight Center,Application,GSC-16460-1,,13/592409,Autonomic Autopoiesis,08/23/2032 -NASA Goddard Space Flight Center,Application,GSC-16461-1,,13/592412,"Autonomic and Apoptotic Cloud, Autonomic and Apoptotic Grid, Autonomic and Apoptotic Highly Distributed System", -NASA Goddard Space Flight Center,Application,GSC-16485-1,,14/038381,Broadband planar impedance transformer,09/26/2033 -NASA Goddard Space Flight Center,Application,GSC-16516-1,,14/021812,Muti-function microposters inside of microfluidic channel for Lab-On-A-Chip device,09/09/2033 -NASA Kennedy Space Center,Application,KSC-12866,0,"12/843,353",In-Situ Wire Damage Detection System, -NASA Goddard Space Flight Center,Application,GSC-16545-1,,13/534442,INTEGRATED GENOMIC AND PROTEOMIC INFORMATION SECURITY PROTOCOL, -NASA Goddard Space Flight Center,Application,GSC-16555-1,,14/023847,Green Precision Cleaning System,09/11/2033 -NASA Goddard Space Flight Center,Application,GSC-16569-1,,"14/041,720",Mirrorlet array for Integral Field Spectrometers (IFS), -NASA Goddard Space Flight Center,Application,GSC-16674-1,,14/041224,MISSE-7 Control Center,09/30/2033 -NASA Goddard Space Flight Center,Application,GSC-16795-1,,"13/781,121 ",Wallops Flight Facility 6U Advanced CubeSat Ejector (ACE),01/04/2033 -NASA Goddard Space Flight Center,Application,GSC-16805-1,,14/040924,SpaceCube v2.0 Micro,09/30/2033 -NASA Goddard Space Flight Center,Application,GSC-16808-1,,14/040848,SpaceCube v. 2.0 Flight Power Card,09/30/2033 -NASA Goddard Space Flight Center,Application,GSC-16859-1,,14/037484,Chemical sensors based on 2-dimensional materials,09/26/2033 -NASA Goddard Space Flight Center,Application,GSC-16887-1,,14/037458,Propellant Transfer Assembly Design and Development,09/26/2033 -NASA Headquarters,Issued,HQN-11248-1,6223143,"09/143,969",Quantitative Risk Assessment Software (QRAS) System,08/31/2018 -NASA Kennedy Space Center,Issued,KSC-11641,5730806,"08/437,859",Gas-Liquid Supersonic Cleaning And Cleaning Verification Spray System, -NASA Kennedy Space Center,Issued,KSC-11751,5710377,"08/540,616",Improved Portable Ultrasonic Leak Detector (Combined With KSC-11751-2), -NASA Kennedy Space Center,Issued,KSC-11804,5693871,"08/695,071",Low-Differential Pressure Generator For Evaluating Low Differential Pressure Transducers, -NASA Kennedy Space Center,Issued,KSC-11866-1,5977773,"08/912,035",Non-Intrusive Impedance-Based Cable Tester - Standing Wave Reflectometer, -NASA Kennedy Space Center,Issued,KSC-11884,6039783,"08/772,057",A New Process And Equipment For Conversion Of NOx Scrubber Liquor To Fertilizer (related To KSC-11994), -NASA Kennedy Space Center,Issued,KSC-11884-2,6641638,"09/511,634",Process And Equipment For Nitrogen Oxide Waste Conversion To Fertilizer - Continuation-In-Part Filed 2/17/00, -NASA Kennedy Space Center,Issued,KSC-11937-2,7209567,"10/390,259",Communication System With Adaptive Noise Suppression, -NASA Kennedy Space Center,Issued,KSC-12035-1,6552521,"09/906,014",Improved Single-Station Accurate Location Of Lightning Strikes (Combined With KSC-12276 & KSC-12173), -NASA Kennedy Space Center,Issued,KSC-12049,6627065,"09/977,531",Liquid Galvanic Coatings For Protection Of Imbedded Metals, -NASA Kennedy Space Center,Issued,KSC-12056,6676912,"09/698,607",New Air Pollution Control Technology For Removal Of Nitrogen Oxides From Stationary Combustion Sources, -NASA Kennedy Space Center,Issued,KSC-12092-2,6967051,"09/939,286",Thermal Insulation System And Method (Continuing Patent Application) (Combined With KSC-12092), -NASA Kennedy Space Center,Issued,KSC-12107,6742926,"09/906,018",Thermal Insulation Test Apparatus With Sleeve (Related To KSC-12108), -NASA Kennedy Space Center,Issued,KSC-12108,6487866,"09/906,011",Multipurpose Thermal Insulation Test Apparatus (Related To 12107), -NASA Kennedy Space Center,Issued,KSC-12168,6452510,"09/802,535",Personal Cabin Pressure Monitor And Altitude Warning System, -NASA Kennedy Space Center,Issued,KSC-12190,6764617,"09/994,996","A Novel Ferromagnetic Conducting Lignosulfonic Acid-Doped Polyaniline (Related To KSC-11940, KSC-11940-1, KSC-11940-2, KSC-12154, KSC-12191)", -NASA Kennedy Space Center,Issued,KSC-12191-2,7179404,"11/215,205",Corrosion Prevention Of Cold Rolled Steel Using Water Dispersible Lignosulfonic Acid Doped Polyaniline, -NASA Kennedy Space Center,Issued,KSC-12205,6715914,"10/185,378",Apparatus And Method For Thermal Performance Testing Of Pipelines And Piping Systems, -NASA Kennedy Space Center,Issued,KSC-12220,6917203,"10/235,020",Current Signature Sensor (Combined With KSC-12152), -NASA Kennedy Space Center,Issued,KSC-12221,6757641,"10/185,830",Multisensor Transducer And Weight Factor (Combined With KSC-12359 and KSC-13139), -NASA Kennedy Space Center,Issued,KSC-12235,6793903,"10/014,140",High-Temperature Decomposition Of Hydrogen Peroxide, -NASA Kennedy Space Center,Issued,KSC-12235-2,6955799,"10/923,152",Temperature Decomposition Of Hydrogen Peroxide, -NASA Kennedy Space Center,Issued,KSC-12235-3,8029736,"10/923,163",High Temperature Decomposition Of Hydrogen Peroxide, -NASA Kennedy Space Center,Issued,KSC-12236,8511396,"10/476,175",Non-Toxic Environmentally Safe Halon Replacement (HABx), -NASA Kennedy Space Center,Application,KSC-12236-2-PCT,0,/0,"Flame Suppression Agent, System And Users", -NASA Kennedy Space Center,Application,KSC-12236-CIP,,"13/428,736",Non-Toxic Environmentally Safe Halon Replacement (HABx), -NASA Kennedy Space Center,Issued,KSC-12246,6664298,"09/972,296",Zero-Valent Metal Emulsion For Reductive Dehalogenation Of DNAPLs, -NASA Kennedy Space Center,Issued,KSC-12246-2,7037946,"10/701,412",Zero-Valent Metal Emulsion For Reductive Dehalogenation Of DNAPLs, -NASA Kennedy Space Center,Issued,KSC-12278,7400766,"10/783,295",Image Edge Extraction Via Fuzzy Reasoning (FRED) (combined With KSC-12272), -NASA Kennedy Space Center,Issued,KSC-12386,7274907,"10/748,915","Modular Wireless Data Acquisition System (combined With KSC-12479, KSC-12486)", -NASA Kennedy Space Center,Issued,KSC-12390,6824306,"10/318,665",Thermal Insulation Test Apparatus For Flat Specimens, -NASA Kennedy Space Center,Issued,KSC-12394,7239751,"10/750,629",Hypothesis Support Mechanism For Mid-Level Visual Pattern Recognition (PIPR), -NASA Kennedy Space Center,Issued,KSC-12458,7156957,"10/440,543",UV Induced Oxidation Of Nitric Oxide, -NASA Kennedy Space Center,Issued,KSC-12490,7298897,"10/779,551",Noniterative Optimal Binarization Of Gray-Scaled Digital Images Via Fuzzy Reasoning (FRAT) (combined With KSC-12272), -NASA Kennedy Space Center,Issued,KSC-12518,7790128,"10/641,581",Hydrogen Peroxide Catalytic Decomposition, -NASA Kennedy Space Center,Issued,KSC-12539,7285306,"10/684,064",Self-Healing Wire Insulation, -NASA Kennedy Space Center,Issued,KSC-12539-2,8119238,"11/856,218",Self-Healing Wire Insulation, -NASA Kennedy Space Center,Application,KSC-12539-3,0,"13/348,861",Self-Healing Wire Insulation, -NASA Kennedy Space Center,Issued,KSC-12540,6958085,"10/666,821",High Performance Immobilized Liquid Membranes For Carbon Dioxide Separations, -NASA Kennedy Space Center,Issued,KSC-12630,7496237,"11/010,698",Image Processing For Binarization Enhancement Via Fuzzy Reasoning, -NASA Kennedy Space Center,Issued,KSC-12631,7582147,"11/208,122",Metallic Pigment Powder Particle For Use In A Liquid Coating System To Protect Reinforcing Steel In Concrete Structures, -NASA Kennedy Space Center,Issued,KSC-12637,7271199,"10/977,622",Micro-scale Particle Emulsion And Their Application For Removal Of PCBs And Metals Found In Ex Situ Structures, -NASA Kennedy Space Center,Issued,KSC-12664,7404938,"10/845,418",Emission Control System, -NASA Kennedy Space Center,Issued,KSC-12664-3-CIP,7582271,"11/40,294",Emission Control System, -NASA Kennedy Space Center,Issued,KSC-12666,7122166,"10/845,607",Hydrogen Peroxide Concentrator, -NASA Kennedy Space Center,Issued,KSC-12669,7302364,"11/83,420","Integrated Spaceport Automated Data Management Architecture (Combine With KSC-12581, KSC-12583, KSC-12671and KSC-12582)", -NASA Kennedy Space Center,Issued,KSC-12697,7309738,"10/962,827",A New Approach For Achieving Fire Retardancy While Retaining Physical Properties In A Compatible Polymer Matrix, -NASA Kennedy Space Center,Issued,KSC-12697-3,7968648,"11/935,093",A New Approach For Achieving Flame Retardancy While Retaining Physical Properties In A Compatible Polymer Matrix, -NASA Kennedy Space Center,Issued,KSC-12703,8031449,"12/485,979",Integral Battery Power Limiting Circuit For Intrinsically Safe Applications, -NASA Kennedy Space Center,Issued,KSC-12723,7790225,"11/239,445",Coating For Corrosion Detection And Prevention, -NASA Kennedy Space Center,Application,KSC-12723-DIV,,"12/792,238",Coating For Corrosion Detection And Prevention, -NASA Kennedy Space Center,Issued,KSC-12848,7781492,"11/759,672",New Organic/inorganic Polymeric Thermal Insulators, -NASA Kennedy Space Center,Issued,KSC-12848-DIV,7977411,"12/835,233",New Organic/inorganic Polymeric Thermal Insulators, -NASA Kennedy Space Center,Application,KSC-12871-CIP,0,"13/915,407",Polyimide Wire Insulation Repair System, -NASA Kennedy Space Center,Application,KSC-12871-DIV1,0,"14/093,701",Polyimide Wire Insulation Repair System, -NASA Kennedy Space Center,Application,KSC-12871-DIV2,0,"14/093,680",Polyimide Wire Insulation Repair System, -NASA Kennedy Space Center,Issued,KSC-12875,7841771,"11/777,711",Self Validating Thermocouple (Combined With KSC-12865), -NASA Kennedy Space Center,Issued,KSC-12878-2-CIP,8163972,"12/465,457",Bimetallic Treatment System and it's application for Removal of PCBs Found in Ex Situ Structures without the Use of a Catalized Agent, -NASA Kennedy Space Center,Issued,KSC-12890,7790787,"11/740,357",New Organic/Inorganic Polymeric Materials, -NASA Kennedy Space Center,Application,KSC-12890-2-DIV,0,"12/834,416",New Organic/Inorganic Polymeric Materials, -NASA Kennedy Space Center,Issued,KSC-12899,8425866,"11/466,624",Gas Phase Oxidation Of NO To NO2, -NASA Kennedy Space Center,Issued,KSC-12978,7842639,"11/749,767",Preparation of a Bimetal Using Mechanical Alloying for the Dehalogenation of Compounds, -NASA Kennedy Space Center,Issued,KSC-12978-DIV,8288307,"12/909,219",Preparation of a Bimetal Using Mechanical Alloying for the Dehalogenation of Compounds, -NASA Kennedy Space Center,Issued,KSC-12983,8409534,"11/692,557",Mercury Emission Control System, -NASA Kennedy Space Center,Application,KSC-13047,0,"12/813,864",Insulation Test Cryostat with Lift Mechanism (Combined with KSC-13048), -NASA Kennedy Space Center,Application,KSC-13047-DIV,0,"14/090,193",Insulation Test Cryostat with Lift Mechanism (Combined with KSC-13048), -NASA Kennedy Space Center,Issued,KSC-13088,8293178,"11/935,545",Improved Thermal Reactivity Of Hydrogen Sensing Pigments In Manufactured Polymer Composites, -NASA Kennedy Space Center,Application,KSC-13088-CON,0,"13/611,856",Improved Thermal Reactivity Of Hydrogen Sensing Pigments In Manufactured Polymer Composites, -NASA Kennedy Space Center,Application,KSC-13088-DIV,0,"13/615,850",Improved Thermal Reactivity Of Hydrogen Sensing Pigments In Manufactured Polymer Composites, -NASA Kennedy Space Center,Application,KSC-13161,0,"12/855,791",PH Sensitive Microcapsule With Corrosion Indicator, -NASA Kennedy Space Center,Application,KSC-13167,0,"12/856,849",Watercore PH Sensitive Microcapsule, -NASA Kennedy Space Center,Application,KSC-13265-CIP2,0,"14/150,502",An Inductive Non-Contact Position Sensor, -NASA Kennedy Space Center,Application,KSC-13278,0,"13/354,576",A Method for Making Elongated Microcapsules Under Simple Shear Conditions, -NASA Kennedy Space Center,Issued,KSC-13285,8593153,"12/843,382",An improved Online Diagnostic Device (ODD) for Wiring Evaluation, -NASA Kennedy Space Center,Issued,KSC-13331,8577639,"13/031,182",A Method for Accurately Calibrating a Spectrometer Using Broadband Light, -NASA Kennedy Space Center,Application,KSC-13336,0,"12/843,487",Sputter Coated wire for in-situ wire damage detection, -NASA Kennedy Space Center,Application,KSC-13343,0,"13/278,710",Conductive Carbon Nanotube for use with Desktop Inkjet Printing, -NASA Kennedy Space Center,Application,KSC-13366,0,"13/523,806",High Performance Self Healing Film, -NASA Kennedy Space Center,Application,KSC-13579,,"13/895,717",Green PCB Removal From Sediment Systems (GPRSS), -NASA Kennedy Space Center,Application,KSC-13588,,"13/495,862",Multi-Dimensional Damage Detection For Flat Surfaces, -NASA Kennedy Space Center,Application,KSC-13592,,"13/542,155",pH sensitive microparticles, -NASA Kennedy Space Center,Application,KSC-13595,,"14/192,784",Aerogel insulation and composites integrated into unique lay-ups (Incorporates Embodiments from KSC-13702), -NASA Kennedy Space Center,Application,KSC-13636,,"13/546,880",Incorporation of Chemochromic Indicator for the Presence of Hypergolic Fuels into a Variety of Manufactured Parts, -NASA Kennedy Space Center,Application,KSC-13638,,"14/176,824",A Two Dimensional Inductive Position Sensor, -NASA Kennedy Space Center,Application,KSC-13664,,"13/896,896",Regolith Advanced Surface Systems Operations Robot (RASSOR) Excavator, -NASA Kennedy Space Center,Application,KSC-13689,,"13/961,521",Coherence Multiplexing of Wireless Surface Acoustic Wave Sensors, -NASA Langley Research Center,Issued,LAR-14673-1,5736642,"08/778,066",Nonlinear Ultrasonic Scanning To Detect Material Defects,01/08/2017 -NASA Langley Research Center,Issued,LAR-14840-1,5841032,"08/792,909",Variable And Fixed Frequency Pulsed Phase-Locked Loop,01/24/2017 -NASA Langley Research Center,Issued,LAR-15205-1,5741883,"08/359,752","Tough, Soluble, Aromatic, Thermoplastic Copolyimides",04/21/2015 -NASA Langley Research Center,Issued,LAR-15282-1,5755571,"08/712,984",Ultrasonic Periodontal Structures Mapping Device,09/09/2016 -NASA Langley Research Center,Issued,LAR-15318-1,5798521,"08/806,732",Distributed Fiber-optic Strain Sensor,02/27/2017 -NASA Langley Research Center,Issued,LAR-15348-1,5632841,"08/416,598","Thin Layer Composite Unimorph Ferroelectric Driver And Sensor, THUNDER",04/04/2015 -NASA Langley Research Center,Issued,LAR-15348-2,6734603,"08/797,553",Thin Layer Composite Unimorph Ferroelectric Driver And Sensor,04/04/2015 -NASA Langley Research Center,Issued,LAR-15351-1-CU,5585083,"08/414,661",Catalyst For Formaldehyde Oxidation,03/30/2015 -NASA Langley Research Center,Issued,LAR-15370-1-SB,5640408,"08/593,438",Quasi Four-Level TM:LuAG Laser (Tm:LuAG Laser),01/27/2016 -NASA Langley Research Center,Issued,LAR-15376-1,5771204,"08/754,642",Relative Phase Measurement Instrument For Multiple-Echo Systems,11/21/2016 -NASA Langley Research Center,Issued,LAR-15406-1,5617873,"08/449,473",Noninvasive Meth/Apparatus For Monitoring Intracranial Pressure & Pressure Vols Index In Humans,05/23/2015 -NASA Langley Research Center,Issued,LAR-15412-1,5606014,"08/511,422",Imide Oligomers And Co-Oligomers Containing Pendent Phenylethynyl Groups And Polymers Therefrom,08/04/2015 -NASA Langley Research Center,Issued,LAR-15412-2,5689004,"08/747,472",Imide Oligomers And Co-Oligomers Containing Pendent Phenylethynyl Groups And Polymers Therefrom,08/04/2015 -NASA Langley Research Center,Issued,LAR-15449-1,6133401,"09/342,462","A Method To Prepare Processable Polyimides With Reactive Endgroups Using 1,3 Bis (3-Aminophenoxyl) Benzene",06/29/2019 -NASA Langley Research Center,Issued,LAR-15449-2,6288209,"09/667,426","Method To Prepare Processable Polyimides With Reactive Endgroups Using 1,3-Bix(3-Aminophenoxyl)Benzene",06/29/2019 -NASA Langley Research Center,Issued,LAR-15507-1,6475147,"09/493,044",Ultrasonic Technique To Measure Intracranial Pressure,01/27/2020 -NASA Langley Research Center,Issued,LAR-15508-1,6545760,"09/535,659",Distributed Rayleigh Scatter Fiber Optic Strain Sensor,03/24/2020 -NASA Langley Research Center,Issued,LAR-15514-1-SB,5991456,"08/654,840",Method Of Improving A Digital Image,05/29/2016 -NASA Langley Research Center,Issued,LAR-15524-1,6000844,"08/810,058",A Method And Apparatus For The Portable Identification Of Material Thickness Of Layers Using A Scanning Linear Heat Source And Infrared Detectorcramer,03/04/2017 -NASA Langley Research Center,Issued,LAR-15525-1-CU,5948965,"08/845,899",Solid State Carbon Monoxide Sensor,04/28/2017 -NASA Langley Research Center,Issued,LAR-15637-1,6015272,"08/673,627",Magnetically Suspended Miniature Fluid Pump And Method Of Making Same,06/26/2016 -NASA Langley Research Center,Issued,LAR-15637-2,6447265,"09/398,878",Magnetically Suspended Miniature Fluid Pump And Method Of Designing The Same,06/26/2019 -NASA Langley Research Center,Issued,LAR-15652-1-CU,6132694,"08/991,075",Catalyst For Oxidation Of Hydro-Carbons And Volatile Organic Compounds,12/16/2017 -NASA Langley Research Center,Application,LAR-15665-1-CU,0,"08/838,596",Catalyst For Carbon Monoxide Oxidation, -NASA Langley Research Center,Issued,LAR-15745-1,6222007,"09/093,826",Prepreg And Composites Made From Polyimide Salt-Like Solution,05/29/2018 -NASA Langley Research Center,Issued,LAR-15747-1-CU,6200539,"09/357,403",One-Atmosphere Uniform Glow Discharge Plasma Gas Flow Acceleration,07/20/2019 -NASA Langley Research Center,Issued,LAR-15767-1,6180746,"09/316,428",Polyimide Foam From Ether-Containing Monomeric Solutions,05/21/2019 -NASA Langley Research Center,Issued,LAR-15816-1,6629341,"09/430,677",Macro-Fiber Composite Actuator With Interdigitated Electrodes,10/29/2019 -NASA Langley Research Center,Issued,LAR-15816-2,7197798,"10/653,824",A Method For Fabricating A Piezoelectric Composite Apparatus,06/30/2020 -NASA Langley Research Center,Issued,LAR-15817-1,6450820,"09/612,412",A Method Of Encouraging Physiological Self-Regulation Through Modulation Of An Operator's Control Input To A Video Game Or Training Simulator,07/12/2020 -NASA Langley Research Center,Issued,LAR-15818-3,6922242,"10/465,386",Optical Path Switching Based Differential Absorption Radiometry For Substance Detection,06/21/2019 -NASA Langley Research Center,Issued,LAR-15831-1,5994418,"09/316,865",Hollow Polyimide Microspheres,05/21/2019 -NASA Langley Research Center,Issued,LAR-15831-2,6235803,"09/408,652",Hollow Polyimide Microspheres,05/21/2019 -NASA Langley Research Center,Issued,LAR-15831-3,6084000,"09/394,534",Hollow Polyimide Microsphere,05/21/2019 -NASA Langley Research Center,Issued,LAR-15834-1,6359107,"09/575,826",High Performance / High Temperature Resins For Infusion And Transfer Molding Processes,05/18/2020 -NASA Langley Research Center,Issued,LAR-15851-1-CU,6753293,"09/607,211",Process For Coating Substrates With Catalyst Materials,05/11/2021 -NASA Langley Research Center,Issued,LAR-15854-1,6761695,"10/94,023",Technique For Non-Invasive Absolute Measurement Of Intra-Cranial Pressure In Humans,07/28/2022 -NASA Langley Research Center,Issued,LAR-15927-1,6584848,"10/263,292",Dielectric Electrostatic Ultrasonic Transducer (DEUT),09/30/2022 -NASA Langley Research Center,Issued,LAR-15934-1,6566648,"09/535,661",Edge Triggered Apparatus And Method For Measuring Strain In Bragg Gratings,03/24/2020 -NASA Langley Research Center,Issued,LAR-15943-1,6746410,"10/121,932",Transducer Assembly To Measure Changes In Circumferential Expansion Of The Human Skull Due To Changes In Intracranial Pressure,11/16/2022 -NASA Langley Research Center,Issued,LAR-15954-1,6376830,"09/606,120",Single Laser Sweep Full S-Parameter Characterization Of Fiber Bragg Gratings,06/15/2020 -NASA Langley Research Center,Issued,LAR-15959-1,7019621,"09/753,370",Structural Tailored High Displacement Ferro-Electric Sensors And Actuators,01/02/2021 -NASA Langley Research Center,Issued,LAR-15977-1,6133330,"09/337,475",Polyimide Foam From Monomeric Solutions,05/21/2019 -NASA Langley Research Center,Issued,LAR-15990-1,6551251,"09/784,413",Dual Transmission Interface For Passive Fetal Heart Monitoring,02/13/2021 -NASA Langley Research Center,Issued,LAR-16001-1,7371358,"10/975,117",Catalyst For Treatment And Control Of Post-Combustion Emissions,10/25/2024 -NASA Langley Research Center,Issued,LAR-16005-1,6426496,"09/648,529",High Precision Solid State Wavelength Monitor,11/26/2020 -NASA Langley Research Center,Issued,LAR-16012-1-CU,6834125,"09/888,701",Improvement To The Multiscale Retinex With Color Restoration,06/25/2021 -NASA Langley Research Center,Issued,LAR-16020-1,6629446,"09/758,115",Single Vector Force Balance Calibration System,01/26/2022 -NASA Langley Research Center,Issued,LAR-16079-1,6939940,"09/757,398","Liquid Crystalline Thermosets From Oligo-Esters, Ester-Imides And Ester-Amides",01/05/2021 -NASA Langley Research Center,Issued,LAR-16083-1,8062129,"11/536,811",A Method And System For Multi-Player Game Playing Where Physiological Characteristics Of The Players Modulate Their Relative Advantage Over Opponents Or Competitors,05/22/2030 -NASA Langley Research Center,Issued,LAR-16116-1,6888346,"10/21,683",Giant Magnetoresistive Based Self-Nulling Probe For Deep Flaw Detection,11/28/2021 -NASA Langley Research Center,Issued,LAR-16176-2,7109287,"10/988,407",Space Environmentally Durable Polyimides And Copolyimides,03/03/2025 -NASA Langley Research Center,Issued,LAR-16220-1,6867533,"09/696,527","Shaping, Tuning, And Positioning Membrane Structures Using Electroactive Polymer Actuators",10/23/2020 -NASA Langley Research Center,Issued,LAR-16231-1-CU,7092539,"09/997,113",MEMS Based Acoustic Array,11/28/2021 -NASA Langley Research Center,Issued,LAR-16256-1,8628333,"11/129,756","Method And System For Training Psychophysiological Skills Conducive To Optimal Performance Through Perturbation Of Training Tasks, Environments And Devices",08/27/2029 -NASA Langley Research Center,Application,LAR-16256-1-CON,0,"14/153,434","Method And System For Training Psychophysiological Skills Conducive To Optimal Performance Through Perturbation Of Training Tasks, Environments And Devices",05/13/2025 -NASA Langley Research Center,Issued,LAR-16299-1,7871682,"10/956,520",Composite Roll Press And Processes,12/07/2025 -NASA Langley Research Center,Issued,LAR-16307-1-SB,7390768,"10/056,845",Methodology For The Effective Stabilization Of Tin-Oxide-Based Oxidation/Reduction Catalysts,01/22/2022 -NASA Langley Research Center,Issued,LAR-16307-2,7985709,"10/956,515",Methodology For The Effective Stabilization Of Tin-Oxide-Based Oxidation/Reduction Catalysts,04/16/2027 -NASA Langley Research Center,Application,LAR-16308-2,0,"12/726,403",Catalyst For Decomposition Of Nitrogen Oxides (Divisional of LAR 16308-1-CU), -NASA Langley Research Center,Issued,LAR-16311-1,6777525,"10/115,812","Heat, Moisture, Chemical Resistant Polyimide Compositions And Methods For Making And Using The Same",04/01/2022 -NASA Langley Research Center,Issued,LAR-16323-1,7253903,"11/27,930",Method To Linearize Non-Linear Physical Measurements,06/24/2025 -NASA Langley Research Center,Issued,LAR-16324-1,6714132,"10/011,229",Proximity Sensor,11/27/2021 -NASA Langley Research Center,Issued,LAR-16324-2,7106203,"10/783,486",Self-Activating System And Method For Alerting When An Object Or Person Is Left Unattended,11/27/2021 -NASA Langley Research Center,Issued,LAR-16326-1,7060991,"10/410,605",Method For Measuring Thickness Of Small Radius Of Curvature Structures Using A Thermal Line Scanner,04/10/2023 -NASA Langley Research Center,Issued,LAR-16332-1-CU,6842543,"09/888,816",Method Of Improving A Digital Image Having White Zones,06/25/2021 -NASA Langley Research Center,Issued,LAR-16363-1,6856073,"10/390,675",Radial Electric Field Piezo-Diaphragm Fluidic Control Systems,03/13/2023 -NASA Langley Research Center,Issued,LAR-16383-1-NP,7588699,"10/288,797","Electrically Conductive, Optically Transparent Polymer/Carbon Nanotube Composites And Process For Preparation Thereof",07/02/2023 -NASA Langley Research Center,Issued,LAR-16383-2,7972536,"12/546,724","Electrically Conductive, Optically Transparent Polymer/Carbon Nanotube Composites And Process For Preparation Thereof",10/12/2029 -NASA Langley Research Center,Issued,LAR-16390-1-SB,7318915,"10/342,660",Ruthenium Stabilization Mechanism For Next Generation Oxidation And Reduction Catalyst Systems,01/13/2023 -NASA Langley Research Center,Issued,LAR-16393-1,6919669,"10/392,491",Sonic Transducers And Sensors Using Radial Field Diaphragms,05/31/2023 -NASA Langley Research Center,Issued,LAR-16406-1-CU,7491169,"10/805,816","Ultrasonic Method And Means To Assess Compartment Syndrome (Hyper Pressure States In Arm, Leg Muscle/Tendon Compartments)",09/20/2025 -NASA Langley Research Center,Issued,LAR-16409-1,8015819,"11/536,790",Wet Active Chevron Nozzle For Controllable Jet Noise Reduction,09/17/2028 -NASA Langley Research Center,Issued,LAR-16432-1,7692116,"10/188,525",Synthesis Of Carbon Nanotubes Using High Average Power Ultrafast Laser Ablation,07/03/2022 -NASA Langley Research Center,Issued,LAR-16437-1-NP,7169374,"11/129,751",Templated Growth Of Carbon Nanotubes,05/11/2025 -NASA Langley Research Center,Issued,LAR-16440-1,6740048,"10/263,285",Method Of Determining Intracranial Pressure From Skull Expansion Measurements,09/25/2022 -NASA Langley Research Center,Issued,LAR-16475-1,7194912,"10/890,843",Carbon Nanotube-Based Structural Health Monitoring Sensor,08/07/2024 -NASA Langley Research Center,Issued,LAR-16496-1,7104498,"10/867,114",Blown Channel-Wing System For Thrust Deflection And Force/Moment Generation,10/03/2024 -NASA Langley Research Center,Issued,LAR-16499-1,7491428,"10/730,188",Method for the controlled deposition and alignment of single walled carbon nanotubes,11/15/2025 -NASA Langley Research Center,Issued,LAR-16510-1,6773407,"10/263,286",Non-Invasive Method Of Determining Absolute Intracranial Pressure,12/25/2022 -NASA Langley Research Center,Issued,LAR-16516-1,6879893,"10/675,502",Autonomous Health Monitoring Architecture Hardware,09/30/2023 -NASA Langley Research Center,Issued,LAR-16517-1,7048228,"10/678,474",Partial-Span Slotted Wing For Transonic Aircraft,10/03/2023 -NASA Langley Research Center,Issued,LAR-16532-1,7334998,"11/5,624",Low-Noise Fan Exit Guide Vanes,12/06/2024 -NASA Langley Research Center,Issued,LAR-16538-1,7675619,"12/129,967",Micro-LiDAR For In-Flight Flow Velocimetry And Boundary Layer Control,11/11/2028 -NASA Langley Research Center,Issued,LAR-16549-1,7262543,"10/943,655","Inductor (L)-Capacitor ( C ) (aka, LC) Sensor Circuit For Piezo Material Monitoring",04/17/2025 -NASA Langley Research Center,Application,LAR-16565-1,0,"13/020,025",e-Sensor: Quantitative Imaging of Electric Fields and Electric Potentials, -NASA Langley Research Center,Issued,LAR-16566-1,7285932,"10/975,119",Method And Apparatus For Loss Of Control Inhibitor Systems,10/27/2024 -NASA Langley Research Center,Issued,LAR-16571-1,7075295,"10/839,448",LC Sensing Element For Closed Cavities Having Low Radio Frequency Transmissivity,04/30/2024 -NASA Langley Research Center,Issued,LAR-16571-2,7589525,"11/421,886",Magnetic Field Response Sensor For Conductive Media,09/26/2024 -NASA Langley Research Center,Issued,LAR-16571-3,7759932,"12/533,520",Magnetic Field Response Sensor For Conductive Media,07/31/2029 -NASA Langley Research Center,Issued,LAR-16573-1,7129467,"10/943,831",Carbon Nanotube Based Light Sensor,09/29/2024 -NASA Langley Research Center,Issued,LAR-16575-1,7181942,"10/943,649",Instrumented Crimping Tool For Critical Wiring Applications,11/24/2024 -NASA Langley Research Center,Issued,LAR-16605-1,7623993,"10/731,742",Energy-extraction-based active noise control system,11/27/2026 -NASA Langley Research Center,Issued,LAR-16615-1,6956066,"10/779,552",Polyimide Foams,02/11/2024 -NASA Langley Research Center,Issued,LAR-16615-2,7541388,"11/124,640",Polyimide Foams,05/05/2025 -NASA Langley Research Center,Issued,LAR-16616-1,7758927,"10/956,704",Laser-Induced Fabrication Of Metallic Interlayers And Patterns In Polyimide Films,09/30/2024 -NASA Langley Research Center,Issued,LAR-16640-1,8089677,"12/135,180",Programmable Smart Grating Device With Quantum Aperture Array,08/05/2029 -NASA Langley Research Center,Issued,LAR-16696-1,7048235,"10/678,397",Slotted Aircraft Wing (a.k.a. Full Span Slotted Wing),10/03/2023 -NASA Langley Research Center,Issued,LAR-16698-1,7394181,"11/76,824",High Performance High Efficiency Hybrid Actuator Systems (HYBAS),03/04/2025 -NASA Langley Research Center,Issued,LAR-16736-1,7962252,"11/422,984","Semi Autonomous Flight System With Avionics Sensor Board, Processing Board, And Flight Control Board",04/07/2027 -NASA Langley Research Center,Issued,LAR-16845-1,8083986,"12/315,520",Advanced Thermo-Electric Materials with Nano-Voids,12/04/2028 -NASA Langley Research Center,Issued,LAR-16854-1,7381186,"10/911,755",Ultrasonic Method And Means To Assess Compartment Syndrome Part B,08/02/2024 -NASA Langley Research Center,Issued,LAR-16858-1,7667847,"11/533,921","Thin, High-Contrast Targets for Ultralightweight Structures",12/15/2026 -NASA Langley Research Center,Issued,LAR-16867-1,7402264,"11/076,460",Electroactive polymer-carbon nanotube-ceramic nanocomposites,02/27/2026 -NASA Langley Research Center,Issued,LAR-17548-1,8236413,"12/166,852",Fail Safe High-Temperature Composite Structure,07/07/2030 -NASA Langley Research Center,Issued,LAR-16867-2,7527751,"12/109,490",Sensing/Actuating Materials Made From Carbon Nanotube Polymer Composites And Methods For Making Same,04/25/2028 -NASA Langley Research Center,Issued,LAR-16868-1,7341883,"11/242,415",Lattice Matched SiGe Layer On Single Crystalline Sapphire Substrate,09/27/2025 -NASA Langley Research Center,Issued,LAR-16871-1,6413227,"09/459,384",Optimization Of Ultrasonic Method For Assessment Of Changes In Intracranial Pressure Through Measurement Of Skull Expansion,12/02/2019 -NASA Langley Research Center,Issued,LAR-16872-1,7514726,"11/387,086",Graded Indexed SiGe Layers on Lattice Matched SiGe Layers on Sapphire,06/10/2027 -NASA Langley Research Center,Issued,LAR-16874-1,7723464,"11/674,321",Novel Aromatic/Aliphatic Diamine Derivatives For Advanced Compositions And Polymers,02/13/2027 -NASA Langley Research Center,Issued,LAR-16877-1,7186367,"11/110,996",Double-Vacuum Bag (DVB) Process For Volatile Management In Resin Matrix Composite Manufacturing,07/08/2025 -NASA Langley Research Center,Issued,LAR-16885-1,7890311,"11/177,664",Method Of Simulating Flow-Through Area Of A Pressure Regulator,12/15/2029 -NASA Langley Research Center,Issued,LAR-16886-1,7375808,"11/536,120",Dual Sensing Capable Germ Or Toxic Chemical (GTC) Sensor Using Quantum Aperture Array With Surface Plasmon Polariton (SPP),09/28/2026 -NASA Langley Research Center,Issued,LAR-16900-1,7278324,"11/155,923",CNT based crack growth detector and strain field monitor,08/07/2024 -NASA Langley Research Center,Issued,LAR-16906-1,8529825,"12/928,128",Fabrication of Nanovoid-imbedded Bismuth Telluride with Low Dimensional System,02/01/2028 -NASA Langley Research Center,Issued,LAR-16907-1,7783060,"11/126,518",A Deconvolution Approach For The Mapping Of Acoustic Sources (DAMAS) Determined From Phased Microphone Arrays,03/27/2029 -NASA Langley Research Center,Issued,LAR-16908-1,7086593,"10/839,445","Magnetic Field Response Measurement Acquisition System (Includes LAR-16138-1, LAR-16554-1, LAR-16591-1, LAR-16614-1, LAR-16617-1, & LAR-16908-1)",05/04/2024 -NASA Langley Research Center,Issued,LAR-16946-1,7484930,"11/169,256",Blowing Flap Side Edge,07/01/2025 -NASA Langley Research Center,Issued,LAR-16950-1,7379231,"11/470,771",Ferroelectric Light Control Device,09/07/2026 -NASA Langley Research Center,Issued,LAR-16958-1,7510802,"11/371,575",Fabrication of Multilayer Ferritin Array for Bionanobattery,08/24/2027 -NASA Langley Research Center,Issued,LAR-16970-1,7231832,"11/229,439",Method For Determining Cracks On And Within Composite Panels,12/02/2025 -NASA Langley Research Center,Issued,LAR-16974-1,7047807,"11/203,583","Methods Of Mounting Erectable, Flexible And Fixed Magnetic Field Response Sensors",08/08/2025 -NASA Langley Research Center,Issued,LAR-17003-1,7467921,"11/239,436",Rotor Blade Vortex Management Via Boundary Layer Separation Control,09/22/2025 -NASA Langley Research Center,Issued,LAR-17013-1,7647771,"11/374,480",Thermally Driven Miniature Piston Actuator,11/12/2026 -NASA Langley Research Center,Issued,LAR-17017-1,7537182,"11/250,700",Enhanced Separation Control Via Simultaneous Multiple-Location Forcing,06/18/2027 -NASA Langley Research Center,Issued,LAR-17032-1,7321185,"11/370,377",A New Concept For Active Bistable Twisting Structures,03/06/2026 -NASA Langley Research Center,Issued,LAR-17044-1,7558371,"12/254,150",Applications Of Twin-Detection XRD Methods On SiGe (111) Layers On Sapphire (0001) Substrate,10/20/2028 -NASA Langley Research Center,Issued,LAR-17073-1,7580323,"11/419,818",Interdigitated Electrode Actuators For Straining Optical Fibers (IDEAS),05/27/2026 -NASA Langley Research Center,Application,LAR-17088-1,0,"13/032,045",Nanotubular Toughening Inclusions For Improved Mechanical Reinforcement, -NASA Langley Research Center,Issued,LAR-17112-1,7507472,"11/81,888",Multi-Layer Electroactive Devices,09/08/2025 -NASA Langley Research Center,Issued,LAR-17116-1,7506541,"11/328,468",Wireless Fuel Volume Measurement Techniques,10/18/2026 -NASA Langley Research Center,Issued,LAR-17126-1,7666939,"11/432,201",A Method For Producing Stable Dispersions Of Single Walled Carbon Nanotubes In Polymer Matrices Using Noncovalent Interactions,05/11/2026 -NASA Langley Research Center,Issued,LAR-17128-1,7285933,"11/188,227",Method And Apparatus For Loss Of Control Inhibitor Systems,07/20/2025 -NASA Langley Research Center,Issued,LAR-17135-1,8217143,"11/827,567",Fabrication of Metal Nanoshells Derived by a Biotemplate,11/17/2030 -NASA Langley Research Center,Issued,LAR-17149-2,8608993,"13/053,633",A Method For Producing Multifunctional Structural Thermally Stable Nanocomposites With Aligned Carbon Nanotubes,05/20/2026 -NASA Langley Research Center,Issued,LAR-17154-1,7655595,"11/421,924",Sprayable Low Temperature Oxidation Catalyst Coating Based on Sol-Gel Technology,08/11/2027 -NASA Langley Research Center,Issued,LAR-17154-2,7781366,"12/369,932",Sol-Gel Based Oxidation Catalyst And Coating System Using Same (Divisional of -1),02/12/2029 -NASA Langley Research Center,Issued,LAR-17155-1,7255004,"11/229,438",Wireless Fluid-Lead Measuring Dipstick Assembly (Broken Out Of LAR-16974-1),03/22/2026 -NASA Langley Research Center,Issued,LAR-17157-1,7507784,"11/124,508","Liquid Crystalline Thermosets From Ester, Ester-Imide, And Ester-Amide Oligomers",01/05/2021 -NASA Langley Research Center,Issued,LAR-17163-1,7467536,"11/428,017",Multi-axis Accelerometer Calibration System Using a Cuboidal Attitude Positioning Device,08/18/2027 -NASA Langley Research Center,Issued,LAR-17165-1,7595112,"11/461,150",Method To Prepare Hybrid Metal/Composite Laminates By Resin Infusion,02/01/2028 -NASA Langley Research Center,Issued,LAR-17168-1,7732998,"11/462,114",Cylindrical Shaped Micro Fiber Composite (CMFC) Actuators,09/24/2027 -NASA Langley Research Center,Issued,LAR-17169-1,7446459,"11/486,200",Hybrid Force/Stress Amplified Piezoelectric Energy Harvesting Transducer System,07/13/2026 -NASA Langley Research Center,Application,LAR-17211-1,0,"13/557,250",Floating Ultrasonic Transducer Inspection System For Nondestructive Evaluation, -NASA Langley Research Center,Issued,LAR-17213-1,8020805,"11/831,233",New Configuration and Power Technology for Application-Specific Scenarios of High Altitude Airships,03/25/2030 -NASA Langley Research Center,Issued,LAR-17224-1,7998368,"12/272,826",Effective Dispersion of Carbon Nanotubes in an Aqueous Solution and Their Application on Bionanotechnology,06/04/2029 -NASA Langley Research Center,Issued,LAR-17229-1,7760778,"11/670,044",Thin-film evaporative cooling concept for a solid-state laser diode crystal,02/01/2027 -NASA Langley Research Center,Issued,LAR-17235-1,7414708,"11/461,569","Multi-Point, Multi-Component Interferometric Rayleigh/Mie Doppler Velocimeter",08/01/2026 -NASA Langley Research Center,Issued,LAR-17237-1,8294989,"12/512,344",Photonic DART (Densely Accumulated Ray-point by micro-zone-plaTe),04/25/2031 -NASA Langley Research Center,Issued,LAR-17240-1,8111943,"12/423,907",Computational Visual Servo:Automatic Measurement and Control for Smart Image Enhancement,09/14/2030 -NASA Langley Research Center,Issued,LAR-17241-1,8018815,"12/490,747",Optical Data Storage System with Micro Zone Plate,12/05/2029 -NASA Langley Research Center,Issued,LAR-17242-1,8174695,"12/508,018",MICRO-RING THIN-FILM SPECTROMETER ARRAY,09/03/2030 -NASA Langley Research Center,Issued,LAR-17243-1,8411214,"12/144,937",Variable Visibility Glasses for Flight Training,02/01/2032 -NASA Langley Research Center,Issued,LAR-17245-1,8344281,"12/751,075",Use of Beam Deflection to Control Electron Beam Wire Deposition Processes,04/26/2031 -NASA Langley Research Center,Issued,LAR-17257-1,7590904,"11/531,703",Detecting the loss of configuration access of reprogrammable Field Programmable Gate Array (FPGA) without external circuitry,10/07/2027 -NASA Langley Research Center,Issued,LAR-17267-1,7704553,"11/710,386",Method of Depositing Metals onto Carbon Allotropes and Compositions Therefrom,06/26/2028 -NASA Langley Research Center,Issued,LAR-17268-1,7647543,"11/535,574",Integrated mitigation for single event upset (SEU) of reprogrammable field programmable gate arrays (FPGA) operating in radiation environments,09/27/2026 -NASA Langley Research Center,Issued,LAR-17280-1,7159774,"11/305,854",Magnetic Field Response Measurement Acquisition System,04/30/2024 -NASA Langley Research Center,Issued,LAR-17286-1,8081734,"12/628,446","Miniature, Low-Power X-Ray Tube Using A Microchannel Electron Generator Electron Source",02/26/2030 -NASA Langley Research Center,Issued,LAR-17290-1,7737867,"11/696,333",Advance Display Media for Improved Airport Surface Operations,06/11/2028 -NASA Langley Research Center,Issued,LAR-17293-1,7991491,"11/559,420",Control Device And Method For Generating Control Signals For Technical Devices,03/04/2030 -NASA Langley Research Center,Issued,LAR-17294-1,8430327,"11/671,089",Low Profile Sensors Using Self-Resonating Inductors,08/22/2028 -NASA Langley Research Center,Issued,LAR-17295-1,7683797,"11/671,131",System For Providing Damage Detection And Thermal Protection,02/15/2028 -NASA Langley Research Center,Issued,LAR-17300-1,7538860,"11/840,363",A Method and Apparatus for Determination of the Reflection Wavelength of Multiple Low-Reflectivity Bragg Gratings in a Single Fiber,12/31/2027 -NASA Langley Research Center,Application,LAR-17307-1,0,"11/466,569",Low Mass Free Piston Space Radiator, -NASA Langley Research Center,Issued,LAR-17317-1,8401217,"11/780,500",Extreme Low Frequency Acoustic Measurement Portable System,11/29/2030 -NASA Langley Research Center,Application,LAR-17317-2,,"13/771,735",Extreme Low Frequency Acoustic Measurement System,07/20/2027 -NASA Langley Research Center,Application,LAR-17318-1,0,"13/082,734",Preparation of Metal Nanowire Decorated Carbon Allotropes,08/29/2027 -NASA Langley Research Center,Issued,LAR-17321-1,8545986,"12/043,276","Ultra High-Temperature, Lightweight Insulation Material Compositions And Methods For Making And Using Them",06/27/2030 -NASA Langley Research Center,Application,LAR-17323-1,0,"11/757,780",Concept And Design Of Oxygen Band Radar For Surface Air Pressure Remote Sensing, -NASA Langley Research Center,Issued,LAR-17325-1,8060350,"12/56,686",Unsteady aerodynamic reduced-order models (ROMs) for efficient aeroelastic analysis,03/04/2030 -NASA Langley Research Center,Issued,LAR-17327-1,8117013,"12/002,857",Standardized Radiation Shield Design Method: 2005 HZETRN,07/05/2030 -NASA Langley Research Center,Application,LAR-17330-1,0,"11/946,207",Multi Functional Composite And Honeycomb Panels, -NASA Langley Research Center,Issued,LAR-17332-1,7958733,"11/762,827",Active Flow Effectors by Embedded Shape Memory Alloy Actuation,11/04/2029 -NASA Langley Research Center,Application,LAR-17332-2,,"13/096,305",Jet Engine Exhaust Nozzle Flow Effector,07/05/2027 -NASA Langley Research Center,Issued,LAR-17335-1,8170234,"12/108,562",Extension Of DAMAS Phased Array Processing For Spatial Coherence Determination (DAMAS-C),03/02/2031 -NASA Langley Research Center,Issued,LAR-17346-1,7649439,"11/465,503",Thermoelectric Devices From Thin Metal System To Include Flexible Substrate And Method Of Making Same,04/28/2027 -NASA Langley Research Center,Issued,LAR-17355-1,8164485,"11/863,964","A Method of Providing a Synthetic Vision System Flight Management Visualization Display for Aiding Pilot Preview, Rehearsal and/or Review and Real-Time Visual Acquisition of Flight Mission Progress",06/24/2029 -NASA Langley Research Center,Application,LAR-17361-1,0,"12/138,709",Airfoil/ Wing Flow Control Using Flexible Extended Trailing Edge, -NASA Langley Research Center,Issued,LAR-17365-1,7784732,"11/958,673",Boundary-Layer-Ingesting S-Duct Diffusing Inlet Flow Control Using Hybrid Vane/Jet Approach at Transonic Flow Conditions,04/26/2029 -NASA Langley Research Center,Issued,LAR-17381-1,8044294,"12/254,016","Thermoelectric material made with highly oriented twinned alloy of Si, Ge, C, and Sn on the basal plane of trigonal substrate and thermoelectric device made with the same material",10/11/2029 -NASA Langley Research Center,Issued,LAR-17382-1,8052069,"12/393,238",Advanced High Performance Vertical Hybrid Electroactive Synthetic Jet Actuator (ASJA-V),10/18/2029 -NASA Langley Research Center,Issued,LAR-17384-1,8662412,"12/354,808",Advanced Modified High Performance Synthetic Jet Actuator With Optimized Curvature Shape Chamber (ASJA-M),10/27/2031 -NASA Langley Research Center,Issued,LAR-17385-1,7671306,"11/589,011",Apparatus For Free Electron Laser Ablative Synthesis Of Carbon Nanotubes,03/10/2028 -NASA Langley Research Center,Application,LAR-17386-1,0,"12/851,584",Fine-Grained Targets For Free Electron Laser Synthesis Of Carbon Nanotubes, -NASA Langley Research Center,Issued,LAR-17387-1,7663077,"11/589,010",Process For Optimizing The Yield And Production Rate Of Single-Walled Carbon Nanotubes Using Free Electron Laser Synthesis,01/23/2028 -NASA Langley Research Center,Issued,LAR-17390-1,8235309,"12/355,782",Advanced High Performance Horizontal Piezoelectric Hybrid Synthetic Jet Actuator (ASJA-H),04/02/2031 -NASA Langley Research Center,Issued,LAR-17391-1,7792015,"12/187,458",A Byzantine-Fault Tolerant Self-Stabilizing Protocol for Distributed Clock Synchronization Systems,08/14/2028 -NASA Langley Research Center,Issued,LAR-17402-1,7964698,"11/935,036","Wholly Aromatic Liquid Crystalline Polyetherimide (LC-PEI) Resin for manufacturing high modulus fibers, films, injection molded articles and foams",09/27/2029 -NASA Langley Research Center,Issued,LAR-17405-1,8226767,"12/254,134",Hybrid Bandgap Engineering for Rhombohedral Super-Hetero-Epitaxy,05/11/2031 -NASA Langley Research Center,Application,LAR-17413-2,0,"12/641,603",Nanoparticle-Containing Thermoplastic Composites and Methods of Preparing Same, -NASA Langley Research Center,Issued,LAR-17425-1,8059273,"12/496,788",Micro Spectrometer for Parallel Light,08/19/2029 -NASA Langley Research Center,Application,LAR-17427-1,0,"12/174,360",Tailorable Dielectric Materials with Complex Permittivity Characteristics providing High Dielectric Constants and Low Loss Factors, -NASA Langley Research Center,Issued,LAR-17432-1,8112243,"12/118,172",Forward Voltage Short Pulse (FVSP) Technique for Measuring High Power Laser Diode Array (LDA) Junction Temperature,11/27/2030 -NASA Langley Research Center,Issued,LAR-17433-1,7902815,"11/856,807",A Multi-Measurement Wheel Sensor,06/19/2029 -NASA Langley Research Center,Issued,LAR-17440-1,7845215,"11/844,571",Resonant Difference-Frequency Atomic Force Ultrasonic Microscope,02/03/2029 -NASA Langley Research Center,Issued,LAR-17444-1,8042739,"11/864,012",Wireless Tamper Detection Sensor Requiring No Electrical Connection,11/08/2029 -NASA Langley Research Center,Issued,LAR-17447-1,8002219,"11/941,119",Multifunctional Boost Protective Cover (MBPC) For A Launch Abort System (LAS),01/16/2030 -NASA Langley Research Center,Application,LAR-17455-3,,"13/938,622",A Nanotube Film Electrode and an Electroactive Device Fabricated with the Nanotube Film Electrode and Methods for Making Same,10/28/2031 -NASA Langley Research Center,Issued,LAR-17469-1,8094306,"12/487,735",Micro Ring Grating Spectrometer with Moveable Aperture Slit,08/27/2030 -NASA Langley Research Center,Issued,LAR-17477-1,7993567,"12/131,420",Auxiliary Electrode For Electrospinning Process,10/02/2029 -NASA Langley Research Center,Issued,LAR-17478-1,7883052,"11/954,452",Integration Of A Turbo-Fan Engine Above An Aircraft's Wing Which Reduces Drag And Community Noise,09/24/2029 -NASA Langley Research Center,Issued,LAR-17480-1,7711509,"11/930,222",A Method To Calibrate Magnetic Response Fluid-Level Sensors Using Complete Sensor Immersion In Fluid,03/18/2028 -NASA Langley Research Center,Issued,LAR-17485-1,7851062,"12/124,273",Composition of and Method to Prepare Hybrid Laminates from Metal Plasma Coated Fibers and Polymer Matrix Resins,09/09/2028 -NASA Langley Research Center,Issued,LAR-17485-2,8017190,"12/906,633",Metal/Fiber Laminate and Fabrication Using A Porous Metal/Fiber Preform,05/21/2028 -NASA Langley Research Center,Issued,LAR-17487-1,8157207,"11/836,517",Jet Engine Nozzle Exit Configurations And Associated Systems And Methods,04/15/2029 -NASA Langley Research Center,Issued,LAR-17488-1,7814786,"12/015,626",Thin-Film Sensor For Measuring Liquid-Level And Temperature Having No Electrical Connections,08/26/2028 -NASA Langley Research Center,Issued,LAR-17493-1,8424200,"12/098,000","Conducting Nanotubes Or Nanostructures Based Composites, Method Of Making Them And Applications",05/16/2031 -NASA Langley Research Center,Issued,LAR-17502-1,8529249,"11/860,703",Quick Change Ceramic Flame Holder for High Output Torch,03/14/2030 -NASA Langley Research Center,Application,LAR-17502-1-CON,,"14/021,325",Flame Holder System,09/25/2027 -NASA Langley Research Center,Issued,LAR-17514-1,8196858,"12/721,833",Mars Airplane,02/15/2031 -NASA Langley Research Center,Issued,LAR-17526-1,7991595,"12/138,768",Adaptive Refinement Tools (ARTs) for Tetrahedral Unstructured Grids,06/07/2029 -NASA Langley Research Center,Issued,LAR-17528-1,7878348,"12/248,339",Lightweight Lunar Surface Remote Manipulator System (LSRMS),10/09/2028 -NASA Langley Research Center,Issued,LAR-17535-1,8206674,"12/152,414",High Pressure Boron Vaporization Synthesis Of Few-Walled Boron Nitride Nanotube Fibers,04/13/2030 -NASA Langley Research Center,Issued,LAR-17539-1,8164328,"12/493,573",Development Of Eddy Current Techniques For The Detection Of Stress Corrosion Cracking In Space Shuttle Primary Reaction Control Thrusters,01/08/2030 -NASA Langley Research Center,Issued,LAR-17547-1,7848381,"12/366,722",Line Tunable Visible and Ultraviolet Laser,07/05/2029 -NASA Langley Research Center,Issued,LAR-17553-1,8257491,"12/288,379",NEW RHOMBOHEDRAL ALIGNMENT OF CUBIC SEMICONDUCTOR ON TRIGONAL SUBSTRATE AT A HIGH TEMPERATURE,07/06/2031 -NASA Langley Research Center,Issued,LAR-17554-1,7769135,"12/288,380",X-ray Diffraction Wafer Mapping Method for Rhombohedral Super-Hetero-Epitaxy,10/20/2028 -NASA Langley Research Center,Application,LAR-17555-1,0,"13/020,194",Front-Flight-Path Turbulence & Vortex Detection System, -NASA Langley Research Center,Issued,LAR-17573-1,7855368,"12/178,173",Air Coupled Acoustic Thermography Nondestructive Evaluation System And Method,10/09/2028 -NASA Langley Research Center,Issued,LAR-17576-1,7742663,"12/261,376",Innovative Structural Design And Materials For Transmission To And Protection Of Ultraviolet And Infrared Radiation Sensors,10/30/2028 -NASA Langley Research Center,Issued,LAR-17579-1,8673649,"12/463,475",Wireless Chemical Sensing Using Changes To An Electrically Conductive Reactant Within Sensor's Magnetic Field,01/04/2031 -NASA Langley Research Center,Issued,LAR-17593-1,8167204,"12/253,422",Open Circuit Damage Location Sensor Having No Electrical Connections,10/30/2030 -NASA Langley Research Center,Issued,LAR-17608-1,7901611,"12/274,652",Methodology for calculating fiber distribution during electrospinning,01/12/2029 -NASA Langley Research Center,Issued,LAR-17609-1,8255732,"12/429,603",A Self-Stabilizing Byzantine-Fault-Tolerant Clock Synchronization Protocol,12/30/2030 -NASA Langley Research Center,Issued,LAR-17629-1,7813599,"12/390,606",A Method for Shape Determination of Multi-Core Optical Fiber,02/23/2029 -NASA Langley Research Center,Issued,LAR-17634-1,7893602,"12/328,162",Distributed transducer capable of generating or sensing a transverse point load,03/14/2029 -NASA Langley Research Center,Application,LAR-17636-1,0,"13/752,495",PICA on Edge: Edgewise strips of PICA ablator to eliminate gaps in capsule heat shield,01/29/2033 -NASA Langley Research Center,Issued,LAR-17638-1,8508413,"13/082,839",Fractal Dielectric Microstrip Antenna using Patterned Substrate Material Geometries,03/02/2032 -NASA Langley Research Center,Issued,LAR-17651-1,8259104,"12/493,666",Domain Decomposition By the Advancing-Partition Method for Parallel Unstructured Grid Generation,03/09/2031 -NASA Langley Research Center,Issued,LAR-17655-1,8111832,"12/424,793",Local Intelligence Based Impedance Optimization Scheme for Adaptive Noise Reduction,06/25/2030 -NASA Langley Research Center,Issued,LAR-17656-1,8108178,"12/467,475",DIRECTED DESIGN OF EXPERIMENTS FOR VALIDATING PROBABILITY OF DETECTION CAPABILITY OF NDE SYSTEMS (DOEPOD),05/05/2030 -NASA Langley Research Center,Application,LAR-17668-1,0,"12/322,591",Device for the Large-Scale synthesis of High-Quality Boron Nitride Nanotubes,02/04/2029 -NASA Langley Research Center,Issued,LAR-17681-1,8347479,"12/849,906",Thermally-Activated Crack Healing Mechanism for Metallic Materials,04/30/2031 -NASA Langley Research Center,Application,LAR-17681-2,,"13/719,740",System for Repairing Cracks in Structures,08/04/2030 -NASA Langley Research Center,Issued,LAR-17681-3,8679642,"14/037,850",System for Repairing Cracks in Structures,08/04/2030 -NASA Langley Research Center,Application,LAR-17689-1,0,"12/393,289",Negative Dielectric Constant Material Based on Ion Conducting Materials,08/20/2031 -NASA Langley Research Center,Application,LAR-17694-1,0,"12/974,359",A Synthetic Quadrature Phase Detector/Demodulator for Fourier Transform Spectrometers,03/09/2032 -NASA Langley Research Center,Issued,LAR-17695-1,8658004,"12/470,689",Vapor-Barrier Vacuum Isolation System,08/01/2032 -NASA Langley Research Center,Application,LAR-17696-1,0,"12/543,686",Asymmetric Dielectric Elastomer Composite Material,03/16/2031 -NASA Langley Research Center,Issued,LAR-17705-1,8672107,"13/042,655",Tunable damper capable of tailoring the structural damping for individual modes of vibration using minimal space and minimal impact on the system frequencies and mode shapes.,11/28/2031 -NASA Langley Research Center,Issued,LAR-17709-1,7912101,"12/628,423",Increased Efficiency Nonlinear Optical Interactions,12/01/2029 -NASA Langley Research Center,Issued,LAR-17711-1,8179203,"12/569,984",Wireless Electrical Applications/Devices Using floating Electrodes Electromagnetically Coupled to Open-Circuit Devices,07/09/2030 -NASA Langley Research Center,Application,LAR-17723-1,0,"12/699,334",Novel material for wound healing applications., -NASA Langley Research Center,Issued,LAR-17724-1,8378659,"12/703,221",Electroactive polymer fibers for structural health monitoring.,01/22/2031 -NASA Langley Research Center,Issued,LAR-17735-1,8490463,"12/881,431","Assessment and Calibration of Crimp Tool Equipped with Ultrasonic Analysis, including Phantom Construction",10/22/2031 -NASA Langley Research Center,Issued,LAR-17736-1,8147920,"12/370,755",Controlled Deposition And Alignment Of Carbon Nanotubes (Continuation of LAR 16499-1),02/13/2029 -NASA Langley Research Center,Application,LAR-17738-1,0,"12/685,280",Sensory Metallic Materials, -NASA Langley Research Center,Issued,LAR-17743-1,8473663,"13/011,198",Reconfigurable Peripheral Component Interconnect local bus controller and target design.,10/07/2031 -NASA Langley Research Center,Issued,LAR-17745-1,7906043,"12/550,431","Electrically Conductive, Optically Transparent Polymer/Carbon Nanotube Composites And Process For Preparation Thereof",11/01/2022 -NASA Langley Research Center,Application,LAR-17877-1,,"13/277,859",Autonomous Leading-Edge Slat Device for Reduction of Aeroacoustic Noise Associated with Aircraft Wings, -NASA Langley Research Center,Application,LAR-17747-1,0,"13/029,471",Temperature Sensing Using Temperature Sensitive Dielectric Material in Proximity to Open-Circuit Sensors Having No Electrical Connections, -NASA Langley Research Center,Application,LAR-18090-1,,"13/786,608",No Moving Part - Variable Frequency Fluidic Oscillator,03/06/2033 -NASA Langley Research Center,Application,LAR-17747-1-CON,,"14/193,861",Wireless Temperature Sensor Having No Electrical Connections and Sensing Method for Use Therewith,02/17/2031 -NASA Langley Research Center,Issued,LAR-17748-1,8303922,"12/546,185",Exfoliation of Hexagonal Boron Nitride,11/19/2030 -NASA Langley Research Center,Issued,LAR-17759-1,7935414,"12/406,315",Multilayer Electroactive Polymer Composite Material (Continuation of LAR 17112-1),03/18/2029 -NASA Langley Research Center,Issued,LAR-17766-1,8452073,"12/750,991",Method for Closed Loop Process Control for Electron Beam Freeform Fabrication and Deposition Processes,10/02/2031 -NASA Langley Research Center,Application,LAR-17769-1,0,"12/894,279",Modifying Surface Energy via Laser Ablative Surface Patterning, -NASA Langley Research Center,Application,LAR-17777-1,,"13/443,940",Process to Fabricate Specific Sized Monodisperse Polystryene Microparticles, -NASA Langley Research Center,Application,LAR-17780-1,0,"12/387,703","Boron Nitride Nanotube Fibrils and Yarns (Filed by JLabs, their ref: ID 1248/Docket 2025(JSA)", -NASA Langley Research Center,Application,LAR-17786-1,0,"12/964,381",Smart Optics Material Characterization System, -NASA Langley Research Center,Application,LAR-17789-1,0,"12/969,076",Electroactive scaffold, -NASA Langley Research Center,Application,LAR-17791-1,0,"13/070,552",Apparatus and Method for Selective Enhancement of Surface Plasmon Polaritons to Initiate and Sustain Low Energy Nuclear Reactions in Metal Hydride Systems, -NASA Langley Research Center,Issued,LAR-17799-1,8655513,"13/046,030",Realtime 3-D Image Processing and Enhancement,05/25/2031 -NASA Langley Research Center,Application,LAR-17800-1,0,"13/527,638",Method for generating laser linear frequency modulation waveform, -NASA Langley Research Center,Application,LAR-17801-1,0,"13/566,077","Coherent Doppler lidar for measuring altitude, ground velocity, and air velocity of aircraft and spaceborne vehicles",08/03/2032 -NASA Langley Research Center,Application,LAR-17813-1,0,"13/198,817",Durable Joining Technology for Uniformly-Curved Composite Sandwich Structures,08/17/2032 -NASA Langley Research Center,Application,LAR-17813-1-CON,,"14/200,708","Systems, Apparatuses, and Methods for Using Durable Adhesively Bonded Joints for Sandwich Structures",08/05/2031 -NASA Langley Research Center,Application,LAR-17830-1,0,"12/925,047",Actuators and Sensors Fabricated with Boron Nitride Nanotubes (BNNTs) and BNNT Polymer Composites, -NASA Langley Research Center,Issued,LAR-17831-1,8651429,"13/214,453",Blended Cutout Flap Design for the Reduction of Jet-Flap Interaction Noise,08/22/2031 -NASA Langley Research Center,Application,LAR-17832-1,0,"13/214,469",Aircraft Engine Nozzle Systems for Jet Noise Reduction by Acoustic Shielding, -NASA Langley Research Center,Application,LAR-17833-1,0,"13/214,481",Active Aircraft Pylon Noise Control System, -NASA Langley Research Center,Issued,LAR-17836-1,8671763,"12/850,708",Sub-Surface Windscreen for Outdoor Measurement of Infrasound,02/18/2031 -NASA Langley Research Center,Application,LAR-17841-1,0," 14/202,699",High Mobility Transport Layer Structures for Rhombohedral Si/Ge/SiGe Devices,03/10/2034 -NASA Langley Research Center,Application,LAR-17848-1,0,"13/796,626","Spectroscopy using Electric Permittivity, Magnetic Permeability and Electrical Conductivity Spatial Profiles",03/12/2033 -NASA Langley Research Center,Issued,LAR-17856-1,8198976,"12/688,309",Flexible Thin Metal Film Thermal Sensing System (CIP of LAR 17346-1),09/20/2030 -NASA Langley Research Center,Application,LAR-17857-1,0,"12/967,690",A GPS-Based Pitot-Static Calibration Method Using Global Output-Error Optimization, -NASA Langley Research Center,Application,LAR-17869-1,,"13/166,226",Team Electronic Gameplay Combining Different Means of Control, -NASA Langley Research Center,Application,LAR-17886-1,,"13/324,527",Method and Apparatus to Detect Wire Pathologies Near Crimped Connector, -NASA Langley Research Center,Application,LAR-17887-1,,"13/743,750",Interrogations Leading to Recertification of Wire Crimps and Other Joining Technologies.,01/17/2033 -NASA Langley Research Center,Issued,LAR-17888-1,8605262,"13/167,093","Time Shifted PN Codes for CW LIDAR, RADAR, and SONAR",12/28/2031 -NASA Langley Research Center,Issued,LAR-17894-1,8494687,"13/166,121",3-D Super Resolution Algorithm for Flash LIDAR Image Enhancement,12/11/2031 -NASA Langley Research Center,Application,LAR-17895-1,,"13/166,166",Method and System for Physiologically Modulating Videogames or Simulations Which Use Motion-Sensing Input Devices, -NASA Langley Research Center,Application,LAR-17902-1,,"13/068,329",Neutron and Ultraviolet Radiation Shielding Films Fabricated Using Boron Nitride Nanotubes and Boron Nitride Nanotube Composites, -NASA Langley Research Center,Application,LAR-17906-1,,"13/272,027",Abnormal Grain Growth Suppression in Aluminum Alloys, -NASA Langley Research Center,Issued,LAR-17908-1,8655094,"13/105,004",New Photogrammetry System to Measure Relative 6-Degree-of-Freedom Motion Between Two Bodies Using Heterogeneous Cameras Having Arbitrary Wide-Angle Lenses with Non-Overlapping Fields of View,04/23/2032 -NASA Langley Research Center,Application,LAR-17918-1,,"13/136,216",High Kinetic Energy Penetrator Shielding and High Wear Resistance Materials Fabricated with Boron Nitride Nanotubes (BNNTs) and BNNT Polymer Composites, -NASA Langley Research Center,Issued,LAR-17919-1,8661653,"13/191,882",Z-Shields from Fiber Metal Laminate,07/27/2031 -NASA Langley Research Center,Application,LAR-17919-2,,"13/963,484",Z-Shields from Fiber Metal Laminate,07/27/2031 -NASA Langley Research Center,Application,LAR-18097-1,,"13/591,320",Arbitrary Shape Initialization of Fiber Optic Shape Sensing Systems,08/22/2032 -NASA Langley Research Center,Application,LAR-17923-1,,"13/411,793",A Method of Creating Micro-scale Silver Telluride Grains Covered with Bismuth Nanospheres as Nano-bridges for Thermoelectric Application,11/14/2032 -NASA Langley Research Center,Application,LAR-17947-1,,"13/775,809",Linear Fresnel Spectrometer Chip with Gradient Line Grating,02/25/2033 -NASA Langley Research Center,Application,LAR-17952-1,,"13/411,891",Multi-Point Interferometric Phase Change Detection Algorithm, -NASA Langley Research Center,Application,LAR-17958-1,,"13/195,251",Wireless Open-Circuit In-Plane Strain and Displacement Sensors Having No Electrical Connections,07/16/2032 -NASA Langley Research Center,Issued,LAR-17959-1,8087494,"12/894,326",Method of Making a Composite Panel Having Subsonic Transverse Wave Speed Characteristics (Continuation of LAR 16535-1),09/30/2030 -NASA Langley Research Center,Application,LAR-17966-1,,"13/457,687",Wide Bandwidth Magneto-Resistive Sensor Based Eddy Current Probe, -NASA Langley Research Center,Application,LAR-17967-1,,"13/293,846",Relaxor Piezoelectric Single Crystal Multilayer Stacks for Energy Harvesting Transducers (RPSEHT), -NASA Langley Research Center,Application,LAR-17972-1,,"13/200,314",BxCyNz Nanotube Formation via the Pressurized Vapor/Condenser, -NASA Langley Research Center,Application,LAR-17973-1,,"13/200,316",Efficient Boron Nitride Nanotube (BNNT) and BxCyNz Nanotube Formation via Combined Laser-Gas Flow Levitation (JLab's ref: 2010-09-13-RRW), -NASA Langley Research Center,Application,LAR-17977-1,,"13/447,513",Variable Stiffness Shape Adaptive Multi-Layered Polymer Composite, -NASA Langley Research Center,Application,LAR-17980-1,,"13/457,540",Space Utilization Optimization Tools, -NASA Langley Research Center,Application,LAR-17984-1,,"13/326,779",FLEXible Side Edge Link (FLEXSEL) for Trailing-Edge Flap Aeroacoustic Noise Reduction,12/15/2031 -NASA Langley Research Center,Application,LAR-17985-1,,"13/231,386",An Acoustic Beamforming Array Using Feedback-Controlled Microphones for Tuning and Self-Matching of Frequency Response (Michigan State University's ref: TEC2011-0045), -NASA Langley Research Center,Application,LAR-17987-1,,"13/364,814",A Self-Stabilizing Distributed Clock Synchronization Protocol For Arbitrary Digraphs, -NASA Langley Research Center,Application,LAR-17991-1,,"13/200,315",Production Rig for the Synthesis of BNNTs via the PVC Method, -NASA Langley Research Center,Issued,LAR-17993-1,8662213,"13/342,264",Locomotion of Amorphous Surface Robots,05/06/2032 -NASA Langley Research Center,Application,LAR-17993-2,,"14/189,019",Locomotion of Amorphous Surface Robots,01/03/2033 -NASA Langley Research Center,Application,LAR-17994-1,,"13/273,516","Manufacturing of Low Mass, Large-Scale Hierarchical Thin Film Structural Systems", -NASA Langley Research Center,Application,LAR-17996-1,,"14/202,289",Nanostructure Neutron Converter Layer Development,03/10/2034 -NASA Langley Research Center,Issued,LAR-18006-1,8671551,"13/363,413",Crimp Quality Assessment from Jaw Position-Ultrasonic Transmission Analysis,02/01/2032 -NASA Langley Research Center,Application,LAR-18006-2,,"14/193,086",Crimp Quality Assessment from Jaw Position-Ultrasonic Transmission Analysis,02/01/2032 -NASA Langley Research Center,Issued,LAR-18016-1,8636407,"13/029,426",Wireless Temperature Sensor Having No Electrical Connections and Sensing Method For Use Therewith,11/23/2031 -NASA Langley Research Center,Application,LAR-18021-1,,"13/417,347",Flap Side Edge Liners for Airframe Noise Reduction,07/31/2032 -NASA Langley Research Center,Application,LAR-18023-1,,"13/417,349",Landing Gear Door Liners for Airframe Noise Reduction,03/12/2032 -NASA Langley Research Center,Application,LAR-18024-1,,"13/417,351",External Acoustic Liners for Multi-Functional Aircraft Noise Reduction, -NASA Langley Research Center,Application,LAR-18026-1,,"13/286,715",Synthesis of Novel Copoly(imide oxetane)s with Unique Surface Properties, -NASA Langley Research Center,Application,LAR-18257-1,,"14/105,757",A Structural Joint With Multi-Axis Load Carrying Capacity,12/13/2033 -NASA Langley Research Center,Issued,LAR-18032-1,8229716,"12/981,432",Fast Tracking Methods and Systems for Air Traffic Modeling Using a Monotonic Lagrangian Grid (US Naval Research Laboratory ref: 100148-US2),12/29/2030 -NASA Langley Research Center,Application,LAR-18034-1,,"13/291,372",Compact Active Vibration Control System, -NASA Langley Research Center,Application,LAR-18037-1,,"13/453,717",A Multifunctional Lightning Protection and Detection System for Aerospace Vehicles, -NASA Langley Research Center,Application,LAR-18040-1,,"13/986,089",Multi-Functional BN-BN Composite,03/29/2033 -NASA Langley Research Center,Application,LAR-18065-1,,"13/860,697",Variable Acceleration Force Calibration System,04/11/2033 -NASA Langley Research Center,Application,LAR-18070-1,,"13/923,307",Transparent and Ubiquitous Sensing Technology,06/20/2033 -NASA Langley Research Center,Application,LAR-18071-1,,"13/923,312",Using Ubiquitous Conductor to Power and Interrogate Wireless Passive Sensors and Construct Sensor Network, -NASA Langley Research Center,Application,LAR-18073-1,,"13/941,441",Doped Chiral Polymer Negative Index Materials (DCPNIM),07/12/2033 -NASA Langley Research Center,Application,LAR-18077-1,,"13/630,459",Flight Deck Technology and Procedure for Pilots to Generate Flight-Optimizing Trajectory Requests that Avoid Nearby Traffic,09/28/2032 -NASA Langley Research Center,Application,LAR-18089-1,,"13/786,713",Synchronized Sweeping Jet Actuators,03/06/2033 -NASA Langley Research Center,Application,LAR-18127-1,,"13/913,782",Synergistic Chemical and Topographical Surface Modifications and Articles of Manufacture for Dynamic Insect Adhesion Mitigation,06/10/2033 -NASA Langley Research Center,Application,LAR-18131-1,,"13/774,422",Puncture- healing Thermoplastic Resin Carbon Fiber Reinforced Composites towards More Damage/Impact Tolerant Systems, -NASA Langley Research Center,Application,LAR-18132-1,,"13/673,360",Modeling of Laser Ablation and Plume Chemistry in a Boron Nitride Nanotube Production Rig,11/09/2032 -NASA Langley Research Center,Application,LAR-18143-1,,"13/694,286",In-situ Mechanical Property Measurements of Amorphous Carbon-boron Nitride Nanotube,11/15/2032 -NASA Langley Research Center,Application,LAR-18144-1,,"13/836,609",Method and System for Physiologically Modulating Videogames and Simulations Which Use Gesture and Body Image Sensing Control Input Devices,03/15/2033 -NASA Langley Research Center,Application,LAR-18160-1,,"13/864,396",Tension Stiffened and Tendon Actuated Space Manipulators,04/17/2033 -NASA Langley Research Center,Application,LAR-18166-1,,"13/764,062",Reactive Orthotropic Lattice Diffuser (ROLD) for Reducing Aerodynamic Noise from Aircraft Flap Tips,03/12/2032 -NASA Langley Research Center,Application,LAR-18179-1,,"13/792,489",Extreme Reduced Instruction Set Computing (xRISC) for High Speed Execution of Computing Algorithms,03/11/2033 -NASA Langley Research Center,Application,LAR-18183-1,,"13/834,294",Height Control and Deposition Measurement for the Electron Beam Free Form Fabrication (EBF3) Process,03/15/2033 -NASA Langley Research Center,Application,LAR-18184-1,,"13/987,706",Conductive Polymer/Carbon Nanotube Structural Materials and Methods for Making Same,08/23/2033 -NASA Langley Research Center,Application,LAR-18186-1,,"12/482,503",Flexible Volumetric Structure, -NASA Langley Research Center,Application,LAR-18202-1,,"13/713,033",Ground-to-Space Laser Calibration System,12/13/2032 -NASA Langley Research Center,Application,LAR-18204-1,,"13/800,379",Quasi-Static Electric Field Generator,03/13/2033 -NASA Langley Research Center,Application,LAR-18211-1,,"13/781,918",A Statistically Based Approach to Broadband Liner Design and Assessment,03/01/2033 -NASA Langley Research Center,Application,LAR-18217-1,,"13/771,116",A Graphical Acoustic Liner Design and Analysis Tool,02/20/2033 -NASA Langley Research Center,Application,LAR-18246-1,,"13/765,714",Tethered Vehicle Control and Tracking System,02/13/2033 -NASA Langley Research Center,Application,LAR-18266-1,,"14/079,914",Airborne Wind Profiling Algorithm for Doppler Wind Lidar (APOLO),11/14/2033 -NASA Langley Research Center,Application,LAR-18267-1,,"13/838,260",Method and System for Physiologically Modulating Action Role-playing Open World Video Games and Simulations Which Use Gesture and Body Image Sensing Control Input Devices, -NASA Langley Research Center,Application,LAR-18270-1,,"14/079,965",Airborne Doppler Wind Lidar Post Data Processing Software DAPS-LV,11/14/2033 -NASA Langley Research Center,Application,LAR-18301-1,,"13/838,163",Flap Edge Noise Reduction Fins (FENoRFins),03/15/2033 -NASA Langley Research Center,Application,LAR-18318-1,,"14/191,898",In-Situ Load System (ILS) for Calibrating and Validating Aerodynamic Properties of Scaled Aircraft in Ground-based Aerospace Testing Applications,02/27/2034 -NASA Langley Research Center,Application,LAR-18374-1,,"14/072,019",Modulated Sine Waves for Differential Absorption Measurements Using a CW Laser System,06/23/2031 -NASA Glenn Research Center,Issued,LEW-16183-1,5866518,"08/786,360",PS300 - Self Lubricating Readily Polished High Temperature Composite,01/16/2017 -NASA Glenn Research Center,Issued,LEW-16519-2,6291838,"09/448,406",Gas Sensing Diode,11/15/2019 -NASA Glenn Research Center,Issued,LEW-16901-1,7190741,"10/274,756",A Real-Time Signal-To-Noise Ratio Estimation Technique For BPSK And QPSK Modulation Using The Active Communications Channel,10/21/2022 -NASA Glenn Research Center,Issued,LEW-17153-1,6550696,"09/794,794",Lean Direct Injection Combustor/Multi Point Integrate Module Fuel-Air Mixer,02/27/2021 -NASA Glenn Research Center,Issued,LEW-17157-1,6869480,"10/198,668",Method For Production Of Atomic Scale Step Height Reference Specimens With Atomically Flat Surfaces,07/17/2022 -NASA Glenn Research Center,Issued,LEW-17166-1,7497443,"11/121,850","Resilient, Flexible, Pressure-Activated Seal",05/03/2025 -NASA Glenn Research Center,Issued,LEW-17167-1,6667725,"10/196,391",Radio Frequency (RF) Telemetry System For Sensors And Actuators,07/11/2022 -NASA Glenn Research Center,Issued,LEW-17170-1,6706549,"10/124,689",Common-Layered Architecture For Semiconductor Silicon Carbide (CLASSiC) Bulk Fabrication,04/12/2022 -NASA Glenn Research Center,Issued,LEW-17182-1,7086648,"10/652,088",Acoustic Seal,08/22/2023 -NASA Glenn Research Center,Issued,LEW-17240-1,7427428,"10/601,657",Mechanically Improved Interphase Coating For Silicon-Carbide Fiber-Reinforced Silicon-Carbide Matrix Composites,06/24/2023 -NASA Glenn Research Center,Issued,LEW-17256-1,6845664,"10/263,980",MEMS Direct Chip Attach (MEMS-DCA) Packaging Methodologies For Harsh Environments,10/03/2022 -NASA Glenn Research Center,Issued,LEW-17256-2,7518234,"10/926,206",MEMS Direct Chip Attach Packaging Methodologies And Apparatus For Harsh Environments,08/25/2024 -NASA Glenn Research Center,Issued,LEW-17269-2,8212138,"11/696,441",Reverse-Bias Protected Solar Array With Integrated ByPass Battery,04/04/2027 -NASA Glenn Research Center,Application,LEW-17269-3,0,"13/482,493",Reverse-Bias Protected Solar Array With Integrated ByPass Battery, -NASA Glenn Research Center,Issued,LEW-17291-1,6784276,"10/202,643",Improved Processing For Polyimdes Via Concentrated Solid Monomer Reactants Approach,07/25/2022 -NASA Glenn Research Center,Issued,LEW-17293-1,7023118,"10/390,256",A Comprehensive C++ Controller For A Magnetically Supported Vertical Rotor: Version 1.0,03/12/2023 -NASA Glenn Research Center,Issued,LEW-17293-2,6809450,"10/729,580",Software For System For Controlling A Magnetically Levitated Rotor,12/04/2023 -NASA Glenn Research Center,Issued,LEW-17299-1,6881820,"10/147,477",Polyimide Rod-Coil Block Copolymers As Membrane Materials For Ion Conduction,05/13/2022 -NASA Glenn Research Center,Issued,LEW-17317-1,7687016,"10/777,630",Process For Improving Properties Of Silicon Carbide (SiC) Fibers And SiC Fiber-Reinforced Ceramic Matrix Composites,02/13/2024 -NASA Glenn Research Center,Application,LEW-17317-2,0,"12/709,086",Process For Improving Properties Of Silicon Carbide (SiC) Fibers And SiC Fiber-Reinforced Ceramic Matrix Composites, -NASA Glenn Research Center,Issued,LEW-17345-2,7813406,"11/402,997",Temporal Laser Pulse Manipulation Using Multiple Optical Ring Cavities,04/13/2026 -NASA Glenn Research Center,Issued,LEW-17383-1,6967462,"10/455,139",Wireless Consumer Power,06/05/2023 -NASA Glenn Research Center,Application,LEW-17458-2,0,"13/113,458",Compact Solid-state Entangled Photon Source, -NASA Glenn Research Center,Issued,LEW-17483-1,7191013,"10/983,230",Hand Held Device For Wireless Powering And Interrogation Of BioMEMS Sensors And Actuators,11/08/2024 -NASA Glenn Research Center,Issued,LEW-17484-5,7268939,"11/363,300",Tracking Of Cells With A Compact Microscope Imaging System Using Intelligent Controls,02/24/2026 -NASA Glenn Research Center,Issued,LEW-17494-1,7458221,"10/693,850","Self-Sealing, Smart, Variable Area Nozzle (S3VAN) For Dynamic Flow Control In Gas Turbine Engines",10/23/2023 -NASA Glenn Research Center,Issued,LEW-17498-1,7187835,"11/44,063",Selective Wavelength Filtering,01/28/2025 -NASA Glenn Research Center,Issued,LEW-17510-1,7416062,"10/693,853",Torsional Magnetorheological Fluid Resistant Device (TMRFRD),10/23/2023 -NASA Glenn Research Center,Issued,LEW-17517-1,7326027,"10/856,361",Flow-Field Control-Rods To Stabilize Flow In A Centrifugal Compressor,05/25/2024 -NASA Glenn Research Center,Issued,LEW-17520-1,7259692,"10/931,205",Hybrid Power Management (HPM) Upgrade,09/01/2024 -NASA Glenn Research Center,Issued,LEW-17551-1,7410714,"10/891,599",Unitized Regenerative Fuel Cell System,07/15/2024 -NASA Glenn Research Center,Issued,LEW-17561-1,7400096,"10/894,225",Large Area Permanent Magnet ECR Plasma Source,07/19/2024 -NASA Glenn Research Center,Issued,LEW-17589-1,7305935,"10/925,499",Slotted Antenna Rectangular Waveguide Plasma Source For Ion Beam And Electron Beam Production,08/25/2024 -NASA Glenn Research Center,Issued,LEW-17592-1,7704622,"10/926,457",New Ion Conducting Organic/Inorganic Hybrid Polymers,08/26/2024 -NASA Glenn Research Center,Application,LEW-17595-1,0,"13/018,611",A Method Of Improving The Thermo-Mechanical Properties Of Fiber-Reinforced Silicon Carbide Matrix Composites, -NASA Glenn Research Center,Issued,LEW-17605-1,8394492,"10/974,991",Skin Modified Aerogel Monoliths For Improved Ruggedness And Lower Hydrophylicity,10/28/2024 -NASA Glenn Research Center,Issued,LEW-17618-1,7015304,"10/897,279",High Tg Polyimides For Resin Transfer Molding (RTM),07/23/2024 -NASA Glenn Research Center,Issued,LEW-17618-1-REIS,"RE43,880","11/429,639",Solvent-Free Low Melt Viscosity Imide Oligomers and Thermosetting Polyimide Composites,05/08/2026 -NASA Glenn Research Center,Application,LEW-17618-3,,"13/952,872",High Tg Polyimides For Resin Transfer Molding (RTM),07/29/2033 -NASA Glenn Research Center,Issued,LEW-17630-1,7534519,"11/228,185",Bi-Electrode Supported Cell For High Power Density Solid Oxide Fuel Cells,09/16/2025 -NASA Glenn Research Center,Application,LEW-17634-1,0,"11/228,184",Solid Oxide Fuel Cell Stack Design With Bi-Electrode Supported Cells, -NASA Glenn Research Center,Application,LEW-17634-2,0,"12/860,210",Solid Oxide Fuel Cell Stack Design With Bi-Electrode Supported Cells, -NASA Glenn Research Center,Issued,LEW-17642-2,7308164,"11/398,734",Energetic Atomic And Ionic Oxygen Textured Optical Surfaces For Blood Glucose Monitoring,03/23/2026 -NASA Glenn Research Center,Issued,LEW-17642-4,7305154,"11/483,887",Energetic Atomic And Ionic Oxygen Textured Optical Surfaces For Blood Glucose Monitoring,07/11/2026 -NASA Glenn Research Center,Issued,LEW-17661-1 with LEW-17765-1,7438030,"11/213,604",Method of Fabricating Silicon Carbide Corrugated Diaphragms and Modular Actuator,08/26/2025 -NASA Glenn Research Center,Issued,LEW-17664-1,7500350,"11/44,471",Elimination Of Lifetime Limiting Mechanism Of Hall Thrusters,01/28/2025 -NASA Glenn Research Center,Issued,LEW-17671-1,7493869,"11/311,183",Very Large Area/Volume Microwave ECR Plasma And Ion Source,12/16/2025 -NASA Glenn Research Center,Issued,LEW-17672-1,7261783,"10/946,286",Low Density High Creep Resistant Single Crystal Superalloy For Turbine Airfoils,09/22/2024 -NASA Glenn Research Center,Issued,LEW-17678-1,7624566,"11/40,304",Magnetic Circuit For Hall Effect Plasma Accelerator,01/18/2025 -NASA Glenn Research Center,Issued,LEW-17694-1,7397978,"11/180,990",Carrier Structure For Packaging Microphotonic Millimeter-Wave Receiver Based On Lithium Niobate Electro-Optic Resonator Disk Technology,07/13/2025 -NASA Glenn Research Center,Issued,LEW-17704-1,7250723,"11/16,735",Cathode Luminescence Light Source For Broad Band Application In The Visible,12/21/2024 -NASA Glenn Research Center,Issued,LEW-17765-1 with LEW-17661-1,7438030,"11/213,604",Side Sliding Microactuator,10/21/2025 -NASA Glenn Research Center,Issued,LEW-17786-1,8197249,"11/412,935",Fully-Premixed Low-Emissions High-Pressure Multi-fuel Burner,04/28/2026 -NASA Glenn Research Center,Issued,LEW-17814-1,7574137,"11/418,304",Multi-wavelength Time-coincident Optical Communications System,05/05/2026 -NASA Glenn Research Center,Issued,LEW-17820-1,7755292,"11/625,545",Method For Ultraminiature Fiber Light Source,01/22/2027 -NASA Glenn Research Center,Issued,LEW-17820-2,8264134,"12/795,356",Method For Ultraminiature Fiber Light Source,09/11/2032 -NASA Glenn Research Center,Issued,LEW-17825-1,8163243,"11/517,555",Zero G Condensing Heat Exchanger With Integral Disinfection,09/07/2026 -NASA Glenn Research Center,Issued,LEW-17826-1,7385692,"11/412,924",Method And System For Fiber Optic Determination Of Nitrogen And Oxygen Concentrations In Ullage Of Liquid Fuel Tanks,04/28/2026 -NASA Glenn Research Center,Issued,LEW-17859-1,7389675,"11/434,578",Miniaturized Metal (Metal Alloy)/PdOx/SiC Schottky Diode Gas Sensors For Hydrogen And Hydrocarbons Detection At High Temperatures,05/12/2026 -NASA Glenn Research Center,Issued,LEW-17859-2,8001828,"12/143,139",Miniaturized Metal (Metal Alloy) PdOx/Sic Hydrogen And Hydrocarbon Gas Sensors,06/20/2028 -NASA Glenn Research Center,Issued,LEW-17877-1,7876276,"11/499,982",Antenna Near-Field Probe Station Scanner,08/02/2026 -NASA Glenn Research Center,Application,LEW-17877-2,,"12/857,004",Antenna Near-Field Probe Station Scanner, -NASA Glenn Research Center,Issued,LEW-17904-1,7425650,"11/378,553",Syntheis Of Asymmetric Dianhydrides,03/15/2026 -NASA Glenn Research Center,Issued,LEW-17904-2,7381849,"11/890,104",Synthesis Of Asymmetrical Benzophenone Dianhydride And Asymmetrical 6F-Dianhydride And Polyimides Therefrom (ALSO See LEW 18236-1),07/19/2027 -NASA Glenn Research Center,Application,LEW-17915-1,0,"12/536,969",Secure Optical Communications Using Quantum Two-Photon Transparency Modulation Spectroscopy, -NASA Glenn Research Center,Issued,LEW-17916-1,8052854,"11/754,255",Miniature Amperometric Solid Electrolyte Carbon Dioxide Sensor,05/25/2027 -NASA Glenn Research Center,Application,LEW-17916-2,,"13/267,978",Miniature Amperometric Solid Electrolyte Carbon Dioxide Sensor, -NASA Glenn Research Center,Application,LEW-17945-1,0,"11/677,654",Portable Unit For Metabolic Analysis PUMA, -NASA Glenn Research Center,Issued,LEW-17951-1,8545786,"10/621,752",Manufacture Of Porous Net-Shaped Materials Comprising Alpha Or Beta Tricalcium Phosphate Or Mixtures Thereof,07/16/2023 -NASA Glenn Research Center,Issued,LEW-17954-1,8016543,"11/695,435",Composite Case Armor,04/02/2027 -NASA Glenn Research Center,Application,LEW-17963-1,0,"11/860,661",Passive Gas/Liquid Separation Within a Fuel Cell or Electrolysis Cell Using A Conductive Porous Separator, -NASA Glenn Research Center,Issued,LEW-17975-1,7382944,"11/489,813",Aluminization And Hyperthermal Atomic Oxygen Texturing Of Polymethylmethacralate Optical Fibers For Blood Glucose Monitoring,07/14/2026 -NASA Glenn Research Center,Issued,LEW-17991-1,7390161,/0,Toughened Composite Structures,06/24/2025 -NASA Glenn Research Center,Issued,LEW-18003-1,7583169,"11/689,770",RF MEMS Switches Utilizing Non-Metallic Thin Film Cantilevers/Bridges With Controlled Stress And Conductivity,03/22/2027 -NASA Glenn Research Center,Issued,LEW-18042-1,8067478,"11/582,693",A Method of Crosslinking Aerogels Using a One-pot Reaction Scheme,10/16/2026 -NASA Glenn Research Center,Application,LEW-18042-2,0,"13/242,425",A Method of Crosslinking Aerogels Using a One-pot Reaction Scheme, -NASA Glenn Research Center,Application,LEW-18043-1,7341040,"11/486,460",Supercharged Two-Cycle Engines Employing Novel Single Element Reciprocating Shuttle Inlet Valve Mechanisms And With A Variable Compression Ratio,07/14/2026 -NASA Glenn Research Center,Application,LEW-18048-1,0,"12/285,157",Two And Three Dimensional Near Infrared Subcutaneous Structure Imager Using Adaptive Nonlinear Video Processing, -NASA Glenn Research Center,Issued,LEW-18049-1,7909897,"11/946,079",Direct Fuel Impingement Planar-Array-Microreactor,11/28/2028 -NASA Glenn Research Center,Issued,LEW-18054-1,7501032,"11/364,283",High Work Output Ni-Ti-Pt High Temperature Shape Memory Alloys And Associated Processing Methods,02/28/2026 -NASA Glenn Research Center,Issued,LEW-18059-1,8242162,"11/956,848",Fluorescent On-Off Chemical Sensors,11/30/2019 -NASA Glenn Research Center,Issued,LEW-18076-1,7999173,"11/689,431",Dust removal from solar cells,03/21/2027 -NASA Glenn Research Center,Application,LEW-18076-2,,"13/198,896",Dust Removal from Solar Cells, -NASA Glenn Research Center,Issued,LEW-18089-1,8077103,"11/774,574",Cup Cylindrical Waveguide Antenna,07/06/2027 -NASA Glenn Research Center,Issued,LEW-18138-1,7904282,"11/689,874",In-Flight Fault Accommodation Through Automated Control Parameter Changes,03/22/2027 -NASA Glenn Research Center,Application,LEW-18205-1,0,"12/317,232",Branched Rod-Coil Polyimide-poly(ethylene Oxide) (PEO) Copolymers That Are Cured In The Solid State At Ambient Temperatures, -NASA Glenn Research Center,Application,LEW-18207-1,0,"11/759,570",Circuit For Communication Over DC Power Line Using High Temperature Electronics, -NASA Glenn Research Center,Issued,LEW-18221-1,7763325,"11/864,607",A Method For Thermal Spraying Of Coatings Using Resonant Pulsed Combustion,09/28/2027 -NASA Glenn Research Center,Application,LEW-18221-2,,"12/835,345",A Method For Thermal Spraying Of Coatings Using Resonant Pulsed Combustion, -NASA Glenn Research Center,Issued,LEW-18236-1,8093348,"11/894,290",Synthesis Of Asymmetrical Benzophenone Dianhydride And Asymmetrical 6F-Dianhydride And Polyimides Therefrom,08/22/2027 -NASA Glenn Research Center,Application,LEW-18236-2,0,"13/325,626",Synthesis Of Asymmetrical Benzophenone Dianhydride And Asymmetrical 6F-Dianhydride And Polyimides Therefrom, -NASA Glenn Research Center,Issued,LEW-18248-1,7791552,"11/871,237",Cellular Reflectarray Antenna,10/12/2027 -NASA Glenn Research Center,Issued,LEW-18248-2,7990327,"12/874,370",Cellular Reflectarray Antenna,09/02/2030 -NASA Glenn Research Center,Issued,LEW-18253-1,8191426,"12/133,743",Low TCR Nanocomposite Strain Gages,06/05/2028 -NASA Glenn Research Center,Issued,LEW-18254-1,7876423,"12/163,382",Simultaneous Non-Contact Precision Measurement Of Microstructual And Thickness Variation In Dielectric Materials Using Terahertz Energy,06/27/2028 -NASA Glenn Research Center,Issued,LEW-18255-1,7630736,"11/541,102",Autonomous Wireless Sensor Transceiver,05/09/2028 -NASA Glenn Research Center,Issued,LEW-18256-1,7688117,"12/081,762",An N Channel JFET Based Digital Logic Gate Structure Using Resistive Level Shifters And Having Direct Application To High Temperature Silicon Carbide Electronics,04/21/2028 -NASA Glenn Research Center,Issued,LEW-18261-1,7933027,"12/326,436",A Software Platform For Post-Processing Waveform-Based NDE,12/02/2028 -NASA Glenn Research Center,Application,LEW-18291-1,0,"12/214,114",Adaptive Morphological Feature-Based Object Classifier For A Color Imaging System, -NASA Glenn Research Center,Application,LEW-18296-1,0,"13/193,160",Modular Battery Charge Controller, -NASA Glenn Research Center,Issued,LEW-18313-1,7923715,"12/336,503",A Novel Nanoionics-based Switch For Radiofrequency (RF) Applications,12/06/2028 -NASA Glenn Research Center,Issued,LEW-18313-2,8410469,"13/050,229",A Novel Nanoionics-based Switch For Radiofrequency (RF) Applications,03/17/2031 -NASA Glenn Research Center,Application,LEW-18324-1,0,"12/195,358",Semiconductor Metal Oxide Modified Solid Electrolyte Carbon Dioxide Microsensors With Reduced Operation Temperature, -NASA Glenn Research Center,Issued,LEW-18325-1,8415839,"12/319,617",External Magnetic Field Reduction Techniquie For Advanced Stirling Radioisotope Generator,01/09/2029 -NASA Glenn Research Center,Application,LEW-18325-2,,"13/859,179",External Magnetic Field Reduction Techniquie For Advanced Stirling Radioisotope Generator,01/09/2029 -NASA Glenn Research Center,Issued,LEW-18338-1,8506787,12/533/258,"Advancd Lightweight, High-Strength Electrochemical Cell Design and Structures",07/31/2029 -NASA Glenn Research Center,Issued,LEW-18340-1,8091445,"12/431,456",Offset Compound Gear Inline Two-Speed Drive,04/28/2029 -NASA Glenn Research Center,Issued,LEW-18340-2,8668613,"13/346,959",Offset Compound Gear Inline Two-Speed Drive,01/10/2032 -NASA Glenn Research Center,Issued,LEW-18356-1,8220989,"12/571,215","Device for Measuring the Thermal Conductivity of Small, Highly Insulating Materials",09/30/2029 -NASA Glenn Research Center,Issued,LEW-18356-2,8573835,"13/492,181","Device for Measuring the Thermal Conductivity of Small, Highly Insulating Materials",06/08/2032 -NASA Glenn Research Center,Issued,LEW-18362-1,7872750,"12/285,173",Space Radiation Detector with Spherical Geometry,09/30/2028 -NASA Glenn Research Center,Issued,LEW-18362-2,8159669,"12/972,624",Space Radiation Detector with Spherical Geometry,12/20/2030 -NASA Glenn Research Center,Issued,LEW-18373-1,8353209,"12/570,841",A Radio Frequency Tank Eigenmode Sensor For Propellant Quantity Gauging,02/04/2031 -NASA Glenn Research Center,Issued,LEW-18426-1,8484980,"12/894,346",A Free-Jet Dual-Mode Combustor Concept for Wide Operating Range Ramjet Propulsion,09/30/2030 -NASA Glenn Research Center,Application,LEW-18426-2,0,"13/941,987",A Free-Jet Dual-Mode Combustor Concept for Wide Operating Range Ramjet Propulsion,07/15/2033 -NASA Glenn Research Center,Issued,LEW-18432-1,7935601,"12/584,497",Addendum of Self-Aligned Ion Implant to Design and Processing of SiC High Temperature Transistors for Durable Operation Above 400 C,09/04/2029 -NASA Glenn Research Center,Application,LEW-18432-2,0,"13/078,510",Addendum of Self-Aligned Ion Implant to Design and Processing of SiC High Temperature Transistors for Durable Operation Above 400 C, -NASA Glenn Research Center,Issued,LEW-18458-1,8386121,"12/791,907",Optimal Tuner Selection For Kalman Filter-Based Aircraft Engine Performance Estimation,06/02/2030 -NASA Glenn Research Center,Issued,LEW-18461-1,8159238,"12/570,742",Method and Circuit for In-Situ Health Monitoring of Solar Cells in Space,09/30/2029 -NASA Glenn Research Center,Application,LEW-18461-2,,"13/448,801",Method and Circuit for In-Situ Health Monitoring of Solar Cells in Space, -NASA Glenn Research Center,Application,LEW-18466-1,0,"12/616,952",Spring Tire, -NASA Glenn Research Center,Application,LEW-18473-1,0,"12/879,713",Ka-Band Waveguide 2-Way Hybrid Combiner for MMIC Amplifiers With Unequal and Arbitrary Power Output Ratio, -NASA Glenn Research Center,Issued,LEW-18474-1,8609750,"12/792,380",Selective Clay Placement Within A Silicate Clay-Epoxy Blend Nanocomposite,06/02/2030 -NASA Glenn Research Center,Issued,LEW-18476-1,8182741,"12/544,742",Ball Bearings Comprising Nickel-Titanium And Methods Of Manufacture Thereof,08/20/2029 -NASA Glenn Research Center,Application,LEW-18476-2,0,"12/544,674",Ball Bearings Comprising Nickel-Titanium And Methods Of Manufacture Thereof, -NASA Glenn Research Center,Application,LEW-18477-1,0,"13/242,300",Graphene Based Reversible Nano-Switch/Sensor Schottky Diode (nanoSSSD) Device, -NASA Glenn Research Center,Issued,LEW-18483-1,8310671,"12/893,627",Frame-Transfer Gating (FTG) Raman Spectroscopy for Time-Resolved Multiscalar Combustion Diagnostics,09/29/2030 -NASA Glenn Research Center,Application,LEW-18486-2,0,"14/168,830",Polyimide Aerogels With Three Dimensional Cross-Linked Structure,01/30/2034 -NASA Glenn Research Center,Issued,LEW-18491-1,8209976,"12/323,091",Shape Memory Based Actuators and Release Mechanisms,11/25/2028 -NASA Glenn Research Center,Application,LEW-18492-1,0,"13/036,887","Synthesis Methods, Microscopy Characterization and Device Integration of Nanoscale Metal Oxide Semiconductors for Gas Sensing in Aerospace Applications", -NASA Glenn Research Center,Issued,LEW-18496-1,8283172,"12/711,465",Process to Produce Iron Nanoparticles - Lunar Dust Simulant Composite,02/24/2030 -NASA Glenn Research Center,Application,LEW-18500-1,0,"12/848,903",Precision Time Protocol Base Trilateration for Planetary Navigation, -NASA Glenn Research Center,Application,LEW-18516-1,0,"13/542,163",Hybrid Gear, -NASA Glenn Research Center,Issued,LEW-18538-1,8373175,"12/791,276",Ohmic Contact to N- and P-type Silicon Carbide,06/01/2030 -NASA Glenn Research Center,Application,LEW-18542-1,0,"12/870,475",Functionalization of Single Wall Carbon Nanotubes (SWCNTs) by Photooxidation, -NASA Glenn Research Center,Application,LEW-18554-1,0,"12/845,998",Internal Limit Sensor (ILS), -NASA Glenn Research Center,Application,LEW-18561-1,0,"12/726,926",NASA PS400: A New High Temperature Solid Lubricant Coating for High Temperature Wear Applications, -NASA Glenn Research Center,Application,LEW-18565-1,0,"13/646,100",Catalytic Microtube Rocket Igniter,10/05/2032 -NASA Glenn Research Center,Application,LEW-18566-1,0,"12/829,663","Low Density, High Creep Resistant Single Crystal Superalloy with Lower Manufacturing Cost", -NASA Glenn Research Center,Application,LEW-18586-1,,"13/030,342",Shock Sensing Apparatus, -NASA Glenn Research Center,Issued,LEW-18593-1,8653693,"13/014,849",Integrated Exciter/Igniter,01/27/2031 -NASA Glenn Research Center,Issued,LEW-18594-1,8409372,"12/874,523",Thermomechanical Methodology for Stabilizing Shape Memory Alloy (SMA) Response,09/02/2030 -NASA Glenn Research Center,Application,LEW-18594-2,,"13/845,526",Thermomechanical Methodology for Stabilizing Shape Memory Alloy (SMA) Response, -NASA Glenn Research Center,Issued,LEW-18601-1,8577504,"12/954,009",Inductive Power Device (IDP),11/24/2030 -NASA Glenn Research Center,Application,LEW-18604-1,,"12/894,444","Shock Resistant, Debris Tolerant, Lightweight, Corrosion Proof Bearings, Mechanical Components and Mechanisms Made From Hard, Highly Elastic Materials", -NASA Glenn Research Center,Issued,LEW-18605-1,8468794,"12/894,565",Dual-Mode Hybrid-Engine (DMH-Engine): A Next-Generation Electric Propulsion Thruster,09/30/2030 -NASA Glenn Research Center,Application,LEW-18605-2,,"13/713,907",Dual-Mode Hybrid-Engine (DMH-Engine): A Next-Generation Electric Propulsion Thruster, -NASA Glenn Research Center,Application,LEW-18605-3,,"14/152,125",Dual-Mode Hybrid-Engine (DMH-Engine): A Next-Generation Electric Propulsion Thruster, -NASA Glenn Research Center,Application,LEW-18608-1,,"12/892,339",Liquid Tin Electrodes for Directo Conversion of JP-8 Fuel using the NASA BSC Solid Oxide Fuel Cell, -NASA Glenn Research Center,Application,LEW-18614-1,,"13/303,292",High-Temperature Thermometer Using Cr-Doped GdAlO3 Broadband Luminescence, -NASA Glenn Research Center,Application,LEW-18615-1,,"12/892,278",Purify Nanomaterials By Dissolving Excess Reactants And Catalysts In Ferric Chloride, -NASA Glenn Research Center,Application,LEW-18629-1,,"13/731,314",Electrospray Collection of Lunar Dust, -NASA Glenn Research Center,Application,LEW-18631-1,,"13/218,847",Circuit for Communication Over Power Lines, -NASA Glenn Research Center,Application,LEW-18632-1,,"13/311,987",Method For Fabricating Diamond-Dispersed Fiber-Reinforced Composite Coating On Low Temperature Sliding Thrust Bearing Interfaces, -NASA Glenn Research Center,Application,LEW-18634-1,,"13/134,959",Multi-Parameter Aerosol Scattering Sensor, -NASA Glenn Research Center,Issued,LEW-18636-1,8416007,"13/098,918",A Source Coupled N Channel JFET Based Digital Logic Gate Structure Using Resistive Level Shifters and Having Direct Application to High Temperature Silicon Carbide Electronics,05/02/2031 -NASA Glenn Research Center,Application,LEW-18639-1,,"13/112,293",Atomic Oxygen Fluence Monitor, -NASA Glenn Research Center,Application,LEW-18649-1,,"12/870,443",Ultracapacitor Based Uninterruptible Power Supply (UPS) System, -NASA Glenn Research Center,Application,LEW-18652-1,,"13/476,470",Polarization Dependent Whispering Gallery Modes in Microspheres, -NASA Glenn Research Center,Application,LEW-18658-1,,"13/250,300",Levitated Ducted Fan (LDF) Aircraft Auxiliary Generator, -NASA Glenn Research Center,Application,LEW-18674-1,,"13/552,760",Polymer Electrolyte Based Ambient Temperature Oxygen Microsensors with Extremely Low Power Consumption for Enviromental Monitoring Applications, -NASA Johnson Space Center,Application,MSC-25349-1,0,13/922036,Robonaut Teleoperation System, -NASA Glenn Research Center,Issued,LEW-18691-1,7588746,"11/431,815",Process and Apparatus for Hydrogen and Carbon Production via Carbon Aerosol-Catalyzed Dissociation of Hydrocarbons,05/10/2026 -NASA Glenn Research Center,Issued,LEW-18692-1,7332146,"11/148,778",Method For Zero Emission Liquid Hydrogen Production From Methane & Landfill Gas,06/08/2025 -NASA Glenn Research Center,Application,LEW-18693-1,,/,Process For Hydrogen Production via Integrated Processing of Landfill Gas and Biomass, -NASA Glenn Research Center,Application,LEW-18694-1,,"13/075,879",Discrete Data Qualification System and Method Comprising Noise Series Fault Detection, -NASA Glenn Research Center,Application,LEW-18704-1,,"13/531,763",A Hybrid Power Management (HPM) Based Vehicle Architecture, -NASA Glenn Research Center,Application,LEW-18714-1,,"13/361,220",High Strength Nanocomposite Glass Fibers, -NASA Glenn Research Center,Issued,LEW-18717-1,8476979,"13/178,101","A Novel Wideband GaN MMIC Distributed Amplifier Based Microwave Power Module for Space Communications, Navigation, and Radar",07/07/2031 -NASA Glenn Research Center,Application,LEW-18717-2,,"13/847,779","A Novel Wideband GaN MMIC Distributed Amplifier Based Microwave Power Module for Space Communications, Navigation, and Radar", -NASA Glenn Research Center,Application,LEW-18724-1,,"13/339,521",VESGEN Software for Mapping and Quantification of Vascular Remodeling in Botanical Plant Leaves, -NASA Glenn Research Center,Application,LEW-18732-1,,"13/514,582","Water Purification by High Voltage, Nanosecond, Non-Equilibrium Plasma: Applications to Human Spaceflight and Terrestrial Point-of-Use",08/16/2032 -NASA Glenn Research Center,Application,LEW-18736-1,,"13/534,745",Iridium Interfacial Stack (IrIS) Final, -NASA Glenn Research Center,Application,LEW-18738-1,,"13/474,948",Atmospheric Turbulence Modeling for Aero Vehicles, -NASA Glenn Research Center,Application,LEW-18752-1,,"13/686,000",Large Strain Transparent Magneto-active Polymer Nanocomposites,11/28/2031 -NASA Glenn Research Center,Application,LEW-18754-1,,"13/534,870",Method For Making Measurements Of The Post-Combustion Residence Time In A Gas Turbine Engine, -NASA Glenn Research Center,Application,LEW-18761-1,,"13/247,601",Temperature Sensitive Coating Sensor Based On Hematite, -NASA Glenn Research Center,Application,LEW-18762-1,,13/364691,Selenium Interlayer for High-efficiency Multijunction Solar Cell, -NASA Glenn Research Center,Application,LEW-18768-1,,"13/788,041",Processing of Nanosensors Using a Sacrificial Template Approach,03/23/2032 -NASA Glenn Research Center,Application,LEW-18769-1,,"13/537,816","Compact, Lightweight, CMC (Ceramic Matrix Composite)-Based Acoustic Liner for Subsonic Jet Aircraft Engines--Offering High Temperature Capability, Weight Reduction, and Broadband Acoustic Treatment", -NASA Glenn Research Center,Application,LEW-18771-1,,"13/301,249",Integrated Temperature and Capacitive Ablation Recession Rate Sensors, -NASA Glenn Research Center,Application,LEW-18785-1,,"13/246,440","Method to Pre-Stress Shock Resistant Mechanical Components and Mechanisms made from Hard, Highly Elastic Materials", -NASA Glenn Research Center,Application,LEW-18789-1,,"13/771,833",Method to Increase Performance of Foil Bearings Through Passive Thermal Management,02/27/2032 -NASA Glenn Research Center,Application,LEW-18797-1,,"13/714,906","High Speed, Compliant, Planetary Flywheel Touchdown Bearing",12/16/2031 -NASA Glenn Research Center,Application,LEW-18802-1,,"13/534,804","Alpha-STREAM Convertor - A Stirling Engine with no moving parts, eliminated streaming losses, high efficiency, low cost fabrication, and electronic wave modulation.", -NASA Glenn Research Center,Application,LEW-18809-1,,"13/410,663",Sampling and Control Circuit Board for an Inertial Measurement Unit,08/03/2032 -NASA Glenn Research Center,Application,LEW-18816-1,,"13/749,773",High Speed Edge Detecting Circuit For Use With Linear Image Sensor,06/01/2032 -NASA Glenn Research Center,Application,LEW-18821-1,,"13/561,359",Dopant Selective Reactive Ion Etching of Silicon Carbide,07/30/2032 -NASA Glenn Research Center,Application,LEW-18822-1,,"13/524,327",Planar Modular Package, -NASA Glenn Research Center,Application,LEW-18825-1,0,"13/804,546",Porous Cross-Linked Polyimide-UREA Networks,03/14/2033 -NASA Glenn Research Center,Application,LEW-18837-1,,"13/527,181",In-Situ Solid Particle Generator, -NASA Glenn Research Center,Application,LEW-18844-1,,"13/918,333",Electrospun Nanofiber Coating Of Fiber Materials: A Composite Toughening Approach,06/14/2033 -NASA Glenn Research Center,Application,LEW-18849-1,,"13/906,521",Paired Threaded Film Cooling Holes for Improved Turbine Film Cooling,05/31/2033 -NASA Glenn Research Center,Application,LEW-18858-1,,"13/904,513",V-Cess: A Novel Flow Control Method Using A Shaped Recess,05/29/2033 -NASA Glenn Research Center,Application,LEW-18862-1,,"13/474,972",Cascading TESLA oscillating flow diode for Stirling Engine Gas Bearings, -NASA Glenn Research Center,Application,LEW-18864-1,,"13/756,855",Polyimide Aerogel Thin Films,02/03/2032 -NASA Glenn Research Center,Application,LEW-18873-1,,"13/968,000",High Temperature Single Crystal Preloader,08/15/2033 -NASA Glenn Research Center,Application,LEW-18887-1,,"13/756,604",Fuzzy Neuron: Method and Hardware Realization,02/01/2033 -NASA Glenn Research Center,Application,LEW-18889-1,,"13/713,846",High Speed Idle Engine Control Mode,12/13/2032 -NASA Glenn Research Center,Application,LEW-18890-1,,"13/871,114",Suppression Of Unwanted Noise And Howl In A Test Configuration Where A Jet Exhaust Is Discharged Into A Duct, -NASA Glenn Research Center,Application,LEW-18891-1 with LEW-18611-1 and LEW-18895-1,,"13/723,598",G6 Flywheel Design,12/23/2031 -NASA Glenn Research Center,Application,LEW-18893-1,,"13/653,027",Novel Aerogel-Based Antennas (ABA) for Aerospace Applications, -NASA Glenn Research Center,Application,LEW-18900-1,,,"High Efficiency, High Temperature Titanium Heat Pipe Radiator for Space Power and Propulsion Systems", -NASA Glenn Research Center,Application,LEW-18902-1,,"14/094,006",Analog Correlator Based on One Bit Digital Correlator,12/02/2033 -NASA Glenn Research Center,Application,LEW-18903-1,,"13/923,441",Modeling and Simulation of a Solar Electric Propulsion Vehicle in Near-Earth Vicinity Including Solar Array Degradation,06/21/2033 -NASA Glenn Research Center,Application,LEW-18919-1,,"13/645,799",Wireless Controlled Chalcogenide Nanoionic Radio Frequency Switch,04/04/2032 -NASA Glenn Research Center,Application,LEW-18923-1,,"13/963,060",New Power Source For Deep Space Missions- Utilizing The Doubly Exothermic Reaction Between Deuterium And Palladium To Produce Electrical Power,08/09/2033 -NASA Glenn Research Center,Application,LEW-18928-1,,,Pt-Ti-Si Simultaneous Ohmic Contacts to N- and P-Type Silicon Carbide, -NASA Glenn Research Center,Application,LEW-18934-1,,"13/900,642",Conditionally Active Min-Max Limit Regulators,05/23/2033 -NASA Glenn Research Center,Application,LEW-18939-1,,"13/916,797","Magnetostrictive Alternator - Low cost, No moving part, High Efficiency, Oscillating Acoustic Pressure Wave to Electric Power Transducer",06/13/2033 -NASA Glenn Research Center,Application,LEW-18942-1,,"13/771,920",Adaptive Phase Delay Generator,02/20/2033 -NASA Glenn Research Center,Application,LEW-18949-1,,"13/923,450",Advanced High Temperature and Fatigue Resistant Environmental Barrier Coating Bond Coat Systems for SiC/SiC Ceramic Matrix Composites,06/21/2033 -NASA Glenn Research Center,Application,LEW-18952-1,,,A Novel Real Time Adaptive Filter For The Reduction Of Artifacts In Functional Near Infrared Spectroscopy Signals, -NASA Glenn Research Center,Application,LEW-18957-1,,"14/048,895",Dynamic Range Enhancement Of High-Speed Data Acquisition Systems By Reversible Non-Linear Amplitude Compression,10/08/2033 -NASA Glenn Research Center,Application,LEW-18960-1,,"13/891,461",Dry Snorkel Cold Immersion Suit for Hypothermia Prevention,05/11/2032 -NASA Glenn Research Center,Application,LEW-18963-1,,"13/853,308",Flywheel Pulse & Glide System for Vehicles, -NASA Glenn Research Center,Application,LEW-18964-1,,"13/905,333",High Temperature Lightweight Self-Healing Ceramic Composites for Aircraft Engine Applications,05/30/2033 -NASA Glenn Research Center,Application,LEW-18970-1,,"14/158,080",Methods for Intercalating and Exfoliating Hexagonal Boron Nitride,01/17/2034 -NASA Glenn Research Center,Application,LEW-18986-1,,,Generation Of High Pressure Oxygen Via Electrochemical Pumping In A Multi-Stage Electrolysis Stack, -NASA Glenn Research Center,Application,LEW-19013-1,,"14/095,442",Spoked Wheel Assembly With Two Rotational Modes,12/03/2033 -NASA Glenn Research Center,Application,LEW-19029-1,,"14/191,708",Superelastic Ternary Ordered Intermetallic Compounds,02/27/2034 -NASA Glenn Research Center,Application,LEW-19040-1,,"14/193,024","Fast, Large Area, Wide Band Gap UV Photodetector for Cherenkov Light Detection",02/28/2034 -NASA Glenn Research Center,Application,LEW-19045-1,,"13/968,531",Multimode Directional Coupler for Measurement and Utilization of Harmonic Frequencies from Traveling Wave Tube Amplifiers,08/16/2033 -NASA Glenn Research Center,Application,LEW-19053-1,,"14/193,719",Process for Preparing Aerogels from Polyamides,02/28/2034 -NASA Glenn Research Center,Application,LEW-19067-1,,,Plasma Spray-Physical Vapor Deposition (PS-PVD) of Advanced Environmental Barrier Coatings, -NASA Glenn Research Center,Application,LEW-19077-1,,,Improved Composite Damage Tolerance and Through Thickness Conductivity By Interleaving Carbon Fiber Veil Nanocomposites, -NASA Glenn Research Center,Application,LEW-19080-1,,,"Crosslinked Polyethylene Aerogels from Low Density Polyethylene, Linear Low Density Polyethylene, and Repurposed Polyethylene", -NASA Glenn Research Center,Application,LEW-19098-1,,"61/866,585","High Temperature, Flexible Composite Seals for Aeronautics and Space Environments Incorporating Aerogel Insulation", -NASA Glenn Research Center,Application,LEW-19171-1,,"61/931,189",Low Power Charged Particle Counter for Space Radiation Monitoring, -NASA Marshall Space Flight Center,Issued,MFS-28402-2,5780594,"08/448,196",Biologically Active Protein Fragments Containing Specific Binding Regions Of Serum Albumin Or Related Proteins,07/14/2015 -NASA Marshall Space Flight Center,Issued,MFS-28985-1,5641681,"08/422,963",Device And Method For Screening Crystallization Conditions In Solution Crystal Growth,04/17/2015 -NASA Marshall Space Flight Center,Issued,MFS-31175-2-CIP,6578851,"09/693,098",Gasket Assembly For Sealing Mating Surfaces,10/16/2020 -NASA Marshall Space Flight Center,Issued,MFS-31243-1,6459822," 09/364,919",Video Image Stabilization And Registration (VISAR),07/26/2019 -NASA Marshall Space Flight Center,Issued,MFS-31243-2-CON,6560375,"10/143,539",Video Image Stabilization And Registration,05/10/2022 -NASA Marshall Space Flight Center,Issued,MFS-31258-1,6135255,"09/207,710",Releasable Conical Roller Clutch,12/09/2018 -NASA Marshall Space Flight Center,Issued,MFS-31294-2-CIP2,6592687,"10/196,389",Aluminum Alloy And Article Cast Therefrom,07/11/2022 -NASA Marshall Space Flight Center,Issued,MFS-31294-5-CIP,6399020,"09/688,729",Aluminum-Silicon Alloy Having Improved Properties At Elevated Temperatures And Articles Cast Therefrom,10/11/2020 -NASA Marshall Space Flight Center,Issued,MFS-31294-6-CIP,6419769,"09/749,503",Aluminum-Silicon Alloy Having Improved Properties At Elevated Temperatures And Process For Producing Cast Articles Therefrom,12/22/2020 -NASA Marshall Space Flight Center,Issued,MFS-31294-7-CIP,6669792,"09/800,312",Process For Producing A Cast Article From A Hypereutectic Aluminum-Silicon Alloy,03/02/2021 -NASA Marshall Space Flight Center,Issued,MFS-31303-1,6748349,"09/313,576",Generalized Fluid System Simulation Program (GFSSP) Version 2.01c,05/07/2019 -NASA Marshall Space Flight Center,Issued,MFS-31387-1,6361961,"09/560,532",GRAVITY RESPONSIVE NADH OXIDASE OF THE PLASMA MEMBRANE,04/25/2020 -NASA Marshall Space Flight Center,Issued,MFS-31399-1,6658329,"10/138,887",Addition Of Rangefinder To The Video Guidance Sensor,06/05/2022 -NASA Marshall Space Flight Center,Issued,MFS-31413-1,6497355,"09/690,035",Precision Penetration Control System For The Friction Stir Welding (FSW) Retractable Pin Tool,10/19/2020 -NASA Marshall Space Flight Center,Issued,MFS-31475-1,6424470,"09/616,624",Panoramic Refracting Optic (PRO),07/28/2020 -NASA Marshall Space Flight Center,Issued,MFS-31475-2-DIV,6580567,"10/173,410",Panoramic Refracting Conical Optic,06/17/2022 -NASA Marshall Space Flight Center,Issued,MFS-31488-1,6028693,"09/7,124",Microresonator And Associated Method For Producing And Controlling Photonic Signals With A Photonic Bandgap Delay Apparatus,01/14/2018 -NASA Marshall Space Flight Center,Issued,MFS-31490-1,7118074,"10/690,161",Electrodynamic Tether System Design For Spacecraft Deorbit,10/17/2023 -NASA Marshall Space Flight Center,Issued,MFS-31529-1,7081730,"10/857,375",Micro-Commanding Servo Motor Controller With Greater Than Fifty Million To One Dynamic Rate Range,06/19/2024 -NASA Marshall Space Flight Center,Issued,MFS-31559-1-CON,8127977,"13/157,895",Phase/Matrix Transformation Weld Process And Apparatus,11/27/2021 -NASA Marshall Space Flight Center,Issued,MFS-31559-1-DIV,7980449,"10/385,168",Phase/Matrix Transformation Weld Process And Apparatus,11/27/2021 -NASA Marshall Space Flight Center,Issued,MFS-31559-2-DIV,8225984,13/157988,Phase/Matrix Transformation Weld Process And Apparatus,11/27/2021 -NASA Marshall Space Flight Center,Issued,MFS-31565-1,6885779,"09/877,801","Full-Cycle, Low Loss, Low Distortion Phase Modulation From Multi-Layered Dielectric Stack With Terahertz Optical Bandwidth",08/17/2022 -NASA Marshall Space Flight Center,Issued,MFS-31584-1,6497091,"09/877,800",Hypergolic Ignitor Assembly,06/06/2021 -NASA Marshall Space Flight Center,Issued,MFS-31584-1-CIP,6845605,"10/288,800",Hypergolic Ignitor,01/26/2023 -NASA Marshall Space Flight Center,Issued,MFS-31593-1,6939610,"10/212,564",Smart Thermal Management Coating,09/20/2022 -NASA Marshall Space Flight Center,Issued,MFS-31596-1,6873762,"10/118,626",Fabrication Of Fiber-Optic Gratings Over A Wide Range Of Bragg Wavelength And Bandwidth Using A Single Phase Mask,10/12/2022 -NASA Marshall Space Flight Center,Issued,MFS-31616-1,6540426,"09/949,408",Passive Ball Capture Latch Docking Mechanism,09/04/2021 -NASA Marshall Space Flight Center,Issued,MFS-31646-1,6860099,"10/263,297",Liquid Propellant Tracing Impingement Injector,05/24/2023 -NASA Marshall Space Flight Center,Issued,MFS-31649-1,7446860,"11/527,648","Nonintrusive, Remote, Micron Accuracy, Laser Fresnel Ranging System",10/19/2026 -NASA Marshall Space Flight Center,Issued,MFS-31698-1,6802999,"10/173,536",Method Of Fabricating A Protective Crucible Wall Coating Incorporating Designed Multi-Use Channels,05/02/2023 -NASA Marshall Space Flight Center,Issued,MFS-31706-1,6886392,"10/622,174",Single Ball Bearing Lubricant And Material Evaluator,07/17/2023 -NASA Marshall Space Flight Center,Issued,MFS-31727-1,6953129,"10/231,428",Impact And Fire Resistant Coating For Pressure Vessels,11/07/2022 -NASA Marshall Space Flight Center,Issued,MFS-31761-1,6802488,"10/232,974",Electro-Mechanically Actuated Propellant Valve,01/29/2023 -NASA Marshall Space Flight Center,Issued,MFS-31768-1,6745942,"10/214,482",Magnetic Symbology Reader,08/05/2022 -NASA Marshall Space Flight Center,Issued,MFS-31776-1,7735265,"11/780,610",Foam-Rigidized Inflatable Tubular Space Booms,07/20/2027 -NASA Marshall Space Flight Center,Issued,MFS-31785-1,7006203,"10/646,000",Integrated Rangefinding Measurement In Video Guidance Sensor,08/21/2023 -NASA Marshall Space Flight Center,Issued,MFS-31789-1,7265476,"10/975,121",MEMS- Micro-Translation Stage With Indefinite Linear Travel Capability,11/01/2025 -NASA Marshall Space Flight Center,Issued,MFS-31807-1,7050161,"10/637,085",Global Radius Of Curvature Estimation And Control System For Segmented Mirrors (GRoCECS),01/07/2025 -NASA Marshall Space Flight Center,Issued,MFS-31813-1,7802799,"11/527,653",Joining Metallic To Composite Components,07/29/2029 -NASA Marshall Space Flight Center,Issued,MFS-31815-1,7325749,"10/738,352",Distributed Solid State Programmable Thermostat / Power Controller,01/29/2026 -NASA Marshall Space Flight Center,Issued,MFS-31817-1,7515257,"11/14,455",Short-Range / Long-Range Integrated Target (SLIT) For Video Guidance Sensor Rendezvous And Docking,06/07/2027 -NASA Marshall Space Flight Center,Issued,MFS-31823-1-DIV,7095000,"10/943,827",Radio-Frequency Driven Dielectric Heaters For Non-Nuclear Testing In Nuclear Core Development,11/27/2024 -NASA Marshall Space Flight Center,Issued,MFS-31828-1,6918970,"10/120,226",High Strength Aluminum Alloy For High Temperature Applications,04/12/2023 -NASA Marshall Space Flight Center,Issued,MFS-31838-1,7641949,"10/857,379",Improved Pressure Vessel Impact Resistance Utilizing Filament Wound Hybrid Fibers,10/15/2025 -NASA Marshall Space Flight Center,Issued,MFS-31842-1,7347089,"11/215,749","Gas Volume Contents Within A Container, Smart Volume Instrument",11/26/2025 -NASA Marshall Space Flight Center,Issued,MFS-31843-1,7174077,"10/631,220",Fiber-Coupled Laser Diodes With Even Illumination Pattern,07/30/2023 -NASA Marshall Space Flight Center,Issued,MFS-31852-1,7106457,"10/857,372",Achromatic Shearing Phase Sensor For Phase Alignment Of A Segmented Telescope,01/21/2025 -NASA Marshall Space Flight Center,Issued,MFS-31865-1,6888476,"10/615,369",Advanced Video Guidance Sensor Software,07/21/2023 -NASA Marshall Space Flight Center,Issued,MFS-31886-1,6850592,"10/321,873",Digital Equivalent System (DEDS) For X-Ray Flourescent Spectral Output,01/08/2023 -NASA Marshall Space Flight Center,Issued,MFS-31891-1,7375801,"11/108,140",Video Sensor With Range Measurement Capability,11/06/2025 -NASA Marshall Space Flight Center,Issued,MFS-31918-1,7275675,"10/928,876",Optimal Design Geometry For All Friction Stir Weld Tools,01/15/2025 -NASA Marshall Space Flight Center,Issued,MFS-31944-1,7017812,"10/730,191",Variable Distance Angular Symbology Reader,11/26/2023 -NASA Marshall Space Flight Center,Issued,MFS-32024-1,8297468,"10/857,380",Liquefied Natural Gas Fuel Tank,07/13/2021 -NASA Marshall Space Flight Center,Issued,MFS-32031-1,7738084,"11/543,284",Fiber Optic Liquid Mass Flow Sensor - Improved Prototype Design,09/29/2026 -NASA Marshall Space Flight Center,Issued,MFS-32099-1-CON,8561829,"13/544,066",Composite Pressure Vessel Including Crack Arresting Barrier,10/23/2029 -NASA Marshall Space Flight Center,Issued,MFS-32102-1,7540143,"11/172,665",Heated Pressure Balls Monopropellant Thermal Rocket Engine Cycle,12/12/2026 -NASA Marshall Space Flight Center,Issued,MFS-32105-1-DIV,7568608,"11/700,972",Ultrasonic Stir Welding Process And Apparatus,01/29/2027 -NASA Marshall Space Flight Center,Issued,MFS-32115-1,7686202,"11/543,287",Gimbling Shoulder For Friction Stir Welding,06/18/2027 -NASA Marshall Space Flight Center,Issued,MFS-32136-1,7595841,"11/174,210",Video Image Stabilization And Registration - Plus (VISAR+),12/03/2027 -NASA Marshall Space Flight Center,Issued,MFS-32137-1,7177164,"11/376,632",Multi-loop High Voltage Power Supply with Fast Rise/Fall Time,03/10/2026 -NASA Marshall Space Flight Center,Issued,MFS-32175-1,7228241,"11/152,810","An Extended Lee-Kesler Equation-of-State (ELK-EoS) For The Volumetric And Thermodynamic Properties Of Propellant Fluids, Including The Non-Polar Quantum And Polar Fluids",06/13/2025 -NASA Marshall Space Flight Center,Issued,MFS-32192-1,7116098,"11/357,454",Absolute Limit Sensor (ALS),02/16/2026 -NASA Marshall Space Flight Center,Issued,MFS-32208-1,7259981,"11/296,719",Analog Nonvolatile Computer Memory,12/14/2025 -NASA Marshall Space Flight Center,Issued,MFS-32214-1,7418814,"11/172,666",Dual Expander Cycle Rocket Engine Cycle with an Intermediate Brayton Cycle Heat Exchanger,12/19/2026 -NASA Marshall Space Flight Center,Issued,MFS-32228-1,8290435,"12/241,322",Short Range Antenna / Close Proximity Transmitter and Receiver,08/17/2031 -NASA Marshall Space Flight Center,Issued,MFS-32253-1,7469878,"11/518,733",Magnetorestrictive Valves,10/17/2026 -NASA Marshall Space Flight Center,Issued,MFS-32307-1,7908079,"11/527,658",Portable Runway Intersection Display And Monitoring System,01/13/2030 -NASA Marshall Space Flight Center,Issued,MFS-32311-1,7623621,"12/47,686",Identification And Authentication System Using Integrated Optical And X-ray Fluorescene Spectral Methods,03/13/2028 -NASA Marshall Space Flight Center,Issued,MFS-32318-1,8098060,"12/173,318",SCAPS(Single Coil Absolute Position Sensor) GAPSYN (Inductive Gap Sensor) Digital Signal Conditioning Electronics,09/29/2030 -NASA Marshall Space Flight Center,Issued,MFS-32323-1,8169620,"12/563,819",Sub-Pixel Spatial Resolution Interferometry With Interlaced Stitching,10/15/2030 -NASA Marshall Space Flight Center,Issued,MFS-32324-1,7594530,"11/942,322",Orbital Foamed Metal Extruder,06/09/2028 -NASA Marshall Space Flight Center,Issued,MFS-32341-1,8550468,"12/210,843",High Load Fully Retained Dynamic Cryogenic Seal,01/09/2032 -NASA Marshall Space Flight Center,Issued,MFS-32364-1,7808353,"11/513,433","Plasmoid Thruster for Electrode-less, High Specific Impulse Propulsion",07/22/2029 -NASA Marshall Space Flight Center,Issued,MFS-32390-1,7867589,"11/780,561",Hybrid composite cryogenic tank structure,10/14/2029 -NASA Marshall Space Flight Center,Issued,MFS-32400-1,7900436,"11/780,626",Gas Generator Augmented Expander Cycle Rocket Engine,01/04/2030 -NASA Marshall Space Flight Center,Issued,MFS-32402-1,7911174,"12/39,506","Inexpensive, Rate Insensitive, Linear, Load Compensating System for Hybrid Stepper Motors",01/25/2029 -NASA Marshall Space Flight Center,Issued,MFS-32429-1,7807097,"12/123,170",Orbital Batch Process Foamed Aluminum Facility,07/11/2028 -NASA Marshall Space Flight Center,Issued,MFS-32438-1,8004364,"11/828,563",16-Kilowatt (KW) 2-30MHz Solid State Power Amplifier using innovative combining methods,11/03/2028 -NASA Marshall Space Flight Center,Issued,MFS-32439-1,7831225,"11/828,590",H2O-NaCl based radio frequency power load,04/07/2029 -NASA Marshall Space Flight Center,Issued,MFS-32497-1,7848606,"12/047,805",Reprocessing Non-Oxide Optical Fiber Preforms Utilizing an Axial Magnetic Field,05/26/2029 -NASA Marshall Space Flight Center,Application,MFS-32518-1-CIP,,"13/452,303",Liquid Propellant Injection Elements with Self-Adjusted Inlet Area for Rocket and Other Combustor-Type Engines Applications,10/03/2028 -NASA Marshall Space Flight Center,Issued,MFS-32521-1,7804600,"12/44,740",Dispersive Filter For Enhancement Of Laser Gyroscopes,06/10/2029 -NASA Marshall Space Flight Center,Issued,MFS-32548-1,7409875,"11/862,793",Optical Hotspot Conductive Fluid Flow Sensor,09/27/2027 -NASA Marshall Space Flight Center,Issued,MFS-32558-1,8490470,"12/569,555",True Shear Parallel Plate Viscometer,12/04/2031 -NASA Marshall Space Flight Center,Issued,MFS-32584-1,7929144,"12/336,260",Local Leak Detection and Health Monitoring of Pressurized Tanks in a Space Environment,11/17/2029 -NASA Marshall Space Flight Center,Issued,MFS-32588-1,8052860,"11/957,051",ELECTROCHEMICALLY-ENHANCED MECHANICAL POLISHING OF OPTICS,09/06/2030 -NASA Marshall Space Flight Center,Issued,MFS-32605-1,8309944,"12/240,626",Grazing Incidence Optics for Neutron Analysis and Imaging,12/07/2030 -NASA Marshall Space Flight Center,Application,MFS-32605-1-CIP,0,"12/717,450",Novel Grazing Incidence Neutron Optics,09/29/2028 -NASA Marshall Space Flight Center,Issued,MFS-32605-1-DIV,8575577,"13/534,951",Novel Grazing Incidence Neutron Optics,09/29/2028 -NASA Marshall Space Flight Center,Application,MFS-32612-1-CIP,,"13/796,693",Protective Safety Cover for Pool and Spa Drains,03/24/2029 -NASA Marshall Space Flight Center,Issued,MFS-32614-1,464750,"12/826,887",Magnetostrictive Regulator,04/03/2031 -NASA Marshall Space Flight Center,Issued,MFS-32615-1,8132772,"12/567,451",Avionics/Electronics Box Rail Mount System,11/27/2030 -NASA Marshall Space Flight Center,Issued,MFS-32638-1,8291776,"12/827,515",Magnetostrictive Force-to-Angle Sensor,03/12/2031 -NASA Marshall Space Flight Center,Application,MFS-32642-1,0,"12/827,598",Cryogenic and Non-Cryogenic Optical Liquid Level Instrument for Stratified Conditions,04/05/2031 -NASA Marshall Space Flight Center,Issued,MFS-32651-1,8090484,"12/403,096",A Planar Translation Device for Solar Sail Spacecraft Attitude Control and Maneuvering,07/03/2030 -NASA Marshall Space Flight Center,Application,MFS-32655-1,0,"12/862,510","AEROSPACE LASER IGNITION/ABLATION VARIABLE, HIGH PRECISION THRUSTER", -NASA Marshall Space Flight Center,Issued,MFS-32667-1,8357884,"12/839,848",Extraction of Water from the Soil of Space Bodies Using Microwave processes,04/22/2031 -NASA Marshall Space Flight Center,Issued,MFS-32697-1,8252734,"12/634,502",Multi Layered or Mixed Element Aqueous Ionic Fluids As Fuel or Lubrication Friction Modifiers,08/26/2030 -NASA Marshall Space Flight Center,Issued,MFS-32697-1-CIP,8563487,"13/525,623",Multi Layered or Mixed Element Aqueous Ionic Fluids As Fuel or Lubrication Friction Modifiers,12/09/2029 -NASA Marshall Space Flight Center,Issued,MFS-32715-1,8535440,12/758169,Improvement of Crystalline Quality during Melt Growth of Semiconductors by Mechanically Induced Nucleation,07/18/2032 -NASA Marshall Space Flight Center,Issued,MFS-32719-1,8564770,13/150832,Field-Deployable Spectral Estimator of Trichloroacetic Acid (TCAA) in Plants,05/18/2032 -NASA Marshall Space Flight Center,Issued,MFS-32733-1,7621670,"12/392,867",Unbalanced Flow Distribution Mixer with Flow Metering Capability,02/25/2029 -NASA Marshall Space Flight Center,Issued,MFS-32737-1,8448498,"12/870,468",Hermetic Seal Leak Detection Apparatus,06/06/2031 -NASA Marshall Space Flight Center,Application,MFS-32737-1-CIP,,13/874182,Hermetic Seal Leak Detection Apparatus,08/27/2030 -NASA Marshall Space Flight Center,Issued,MFS-32748-1,8132961,"12/397,973",Optimized Length-to-Diameter Ratio Flow Meter,08/16/2030 -NASA Marshall Space Flight Center,Application,MFS-32757-1,0,13/118086,Compliant Mechanical Motor, -NASA Marshall Space Flight Center,Application,MFS-32761-1-CIP,,"13/673,309","Multi-Channel Flow Plug with Eddy Current Minimization for Metering, Mixing, and Conditioning",07/23/2029 -NASA Marshall Space Flight Center,Application,MFS-32761-1-CON,,"13/729,861","Multi-Channel Flow Plug with Eddy Current Minimization for Meeting, Mixing, and Conditioning",07/23/2029 -NASA Marshall Space Flight Center,Issued,MFS-32777-1,8425751,13/020144,Electrodeposited Nickel-Cobalt Alloy Development,05/31/2031 -NASA Marshall Space Flight Center,Issued,MFS-32797-1,8330961,"12/837,173",A compact sensor for in-situ measurements of gas leaks,08/24/2031 -NASA Marshall Space Flight Center,Issued,MFS-32803-1,8133768,"12/560,371","Method of Manufacturing Light Emmitting, Photovoltaic or other Electronic Apparatus",05/31/2027 -NASA Marshall Space Flight Center,Application,MFS-32809-1,0,"13/369,704",Telemetry encoder/decoder, -NASA Marshall Space Flight Center,Issued,MFS-32817-1,8290006,"13/281,025",Variable Power Handheld Laser Torch for Joining Processes,10/25/2031 -NASA Marshall Space Flight Center,Issued,MFS-32826-1,8316884,"12/846,429","Drain System for Pools, Spas, and Tanks. (Reference MFS 32612-1)",03/23/2031 -NASA Marshall Space Flight Center,Application,MFS-33054-1,,"14/020,326",Multi-spacecraft Autonomous Positioning System / Network-Based Navigation,09/06/2033 -NASA Marshall Space Flight Center,Issued,MFS-32830-1,8420582,13/027472,FRICTION MANAGEMENT USING SOLVENT PARTITIONING OF SINGLE ELEMENT AND MULTI-ELEMENT HYDROPHILIC SURFACE-INTERACTIVE CHEMICALS CONTAINED IN HYDROPHILIC TARGETED EMULSIONS,02/15/2031 -NASA Marshall Space Flight Center,Application,MFS-32830-1-CIP,,"13/900,452",Friction and Wear Management Using Solvent Partioning of Hydrophilic Surface-Interactive Chemicals contains in Boundary Layer-Targeted Emulsions,03/07/2033 -NASA Marshall Space Flight Center,Issued,MFS-32840-1,8322685,"12/842,218",Non-collinear Valve Actuator,04/02/2031 -NASA Marshall Space Flight Center,Application,MFS-32841-1,,"13/424,754",DUPLICATE of Telemetry encoder/decoder, -NASA Marshall Space Flight Center,Application,MFS-32853-1,,"14/196,203",Particle Damping for Vibration Mitigation of Circuit Cards,03/04/2034 -NASA Marshall Space Flight Center,Issued,MFS-32857-1,8668168,"13/326,513",Rocket Vent Design with Variable Flow Control and Rain Protection,01/21/2032 -NASA Marshall Space Flight Center,Issued,MFS-32859-1,8393520,"13/240,075",Variably Pulsed High Power Ultrasonic (HPU) Energy for Ultrasonic Stir Welding (USW),11/07/2031 -NASA Marshall Space Flight Center,Issued,MFS-32859-1-DIV,8393523,"13/523,310",Pulsed Ultrasonic Stir Welding Method,09/22/2031 -NASA Marshall Space Flight Center,Application,MFS-32865-1,,"13/302,734","Easily Installed, In-situ Adaptable Flow Measurement Device and Method.", -NASA Marshall Space Flight Center,Issued,MFS-32865-2,8555731,"13/302,773","Easily Installed, In-situ Adaptable Flow Measurement Device and Method.",06/04/2032 -NASA Marshall Space Flight Center,Application,MFS-32865-3,,"13/302,817","Easily Installed, In-situ Adaptable Flow Measurement Device and Method.", -NASA Marshall Space Flight Center,Application,MFS-32865-4,,"13/302,845","Easily Installed, In-situ Adaptable Flow Measurement Device and Method.",08/23/2032 -NASA Marshall Space Flight Center,Issued,MFS-32871-1,8577519,"13/424,898",Low Cost Telemetry System for Small/micro satellites,06/13/2032 -NASA Marshall Space Flight Center,Application,MFS-32873-1,,13/523210,"High-current, high-voltage switch using non-hazardous liquid metals",11/29/2032 -NASA Marshall Space Flight Center,Application,MFS-32889-1,,"13/174,084",Pyrotechnic Pipe Plug and Variable Area Flow Meter, -NASA Marshall Space Flight Center,Application,MFS-32895-1,,"13/242,734",High Powered Ultrasonically Assisted Thermal Stir Welding, -NASA Marshall Space Flight Center,Application,MFS-32912-1,,"13/299,930",Salt Water Power Load - Part II, -NASA Marshall Space Flight Center,Application,MFS-32916-1,,13/333283,Improved Impact Toughness and Heat Treatment for Cast Aluminum Wheels, -NASA Marshall Space Flight Center,Application,MFS-32924-1,,"13/312,481",Partial Automated Alignment & Integration System,07/09/2032 -NASA Marshall Space Flight Center,Application,MFS-32934-1,,"12/833,894","Methods, Devices, and Systems Relating to a Sensing Device", -NASA Marshall Space Flight Center,Issued,MFS-32940-1,8657179,"13/430,268",Closed Loop Temperature Control for the Thermal Stir Welding Process,03/26/2032 -NASA Marshall Space Flight Center,Application,MFS-32944-1,,"13/896,137",Mitigation of Sonic Boom from Supersonic Vehicles by means of Long Penetration Mode (LPM) Counter-Flowing Cold Gas Jets,05/16/2033 -NASA Marshall Space Flight Center,Application,MFS-32945-1,,"14/082,956",Piezoelectric Gravity Gradient and Multiple Purpose Sensor Detection System,11/18/2033 -NASA Marshall Space Flight Center,Application,MFS-32986-1,,"13/961,573",Non-Explosively-Actuated Pressurization Start Valve,08/07/2033 -NASA Marshall Space Flight Center,Application,MFS-33007-1,,"14/192,350",Carbon Nanotube Tape Vibrating Gyroscope Update,02/27/2034 -NASA Marshall Space Flight Center,Application,MFS-33022-1,,"14/192,395",A Design Technology to Eliminate Dribble Volume in Rocket Engine Manifolds for Swirl-Coaxial Injectors,02/27/2034 -NASA Marshall Space Flight Center,Application,MFS-33031-1,,"13/949,361",An aerodynamic design concept for rocket nozzle side load reduction,07/24/2033 -NASA Marshall Space Flight Center,Application,MFS-33060-1,,"14/104,881",Carbon Nanotube Tape Single Axis Accelerometer,12/12/2033 -NASA Johnson Space Center,Issued,MSC-21715-2,5869238,"08/390,904",Quantitative Method Of Measuring Cancer Cell Urokinase And Metastatic Potential,02/09/2016 -NASA Johnson Space Center,Issued,MSC-21947-1,7541159,"10/828,531",MOLECULAR SPECIFIC ANTIBODIES AGAINST UROKINASE,08/28/2025 -NASA Johnson Space Center,Issued,MSC-22119-1,5851816,"08/172,962",A PROCESS FOR DEVELOPING HIGH-FIDELITY THREE-DIMENSIONAL TUMOR MODELS OF HUMAN PROSTATE CARCINOMA,12/22/2015 -NASA Johnson Space Center,Issued,MSC-22122-1,6117674,"08/366,065",HORIZONTAL ROTATING-WALL VESSEL PROPAGATION IN IN VITRO HUMAN TISSUE MODELS,09/12/2017 -NASA Johnson Space Center,Issued,MSC-22489-1,5827531,"08/349,169","Multi-Lamellar, Immiscible-Phase Microencapsulation of Drugs",10/27/2015 -NASA Johnson Space Center,Issued,MSC-22616-2,6133036,"09/7,239",Preservation Of Liquid Biological Samples,12/12/2015 -NASA Johnson Space Center,Issued,MSC-22616-3,6716392,"09/630,979",Preservation Of Liquid Biological Samples,01/14/2018 -NASA Johnson Space Center,Issued,MSC-22633-1,6485963,"09/587,028",Electrically Potentiated Growth Of Mammalian Neuronal Tissue Facilitated By Rotating Wall Vessel Culture,06/02/2020 -NASA Johnson Space Center,Issued,MSC-22633-2,6673597,"09/798,854",Growth Stimulation Of Biological Cells And Tissue By Electromagnetic Fields And Uses Thereof,02/28/2021 -NASA Johnson Space Center,Issued,MSC-22695-1,6261844,"09/213,988",A Unique Urine Preservative With Combined Antibacterial And Antioxidant Properties,12/17/2018 -NASA Johnson Space Center,Issued,MSC-22721-2,6254359,"09/354,915",Blood Pump Bearing System,07/09/2019 -NASA Johnson Space Center,Issued,MSC-22724-1,6047216,"09/129,832",Millimeter Wave/Microwave Ablation For Treatment Of Atherosclerotic Lesions,08/05/2018 -NASA Johnson Space Center,Issued,MSC-22724-2,6226553,"09/501,150",Endothelium Preserving Microwave Treatment For Atherosclerosis,02/09/2020 -NASA Johnson Space Center,Issued,MSC-22724-3,6223086,"09/504,768",Endothelium Preserving Microwave Treatment For Atherosclerosis,02/09/2020 -NASA Johnson Space Center,Issued,MSC-22724-5,6496736,"09/500,538",Endothelium Preserving Microwave Treatment For Atherosclerosis,02/09/2020 -NASA Johnson Space Center,Issued,MSC-22757-1,5879079,"08/917,581",Automated Propellant Blending Machine,08/20/2017 -NASA Johnson Space Center,Issued,MSC-22797-1,6312398,"08/786,842",A Method Of Applying External Power To Assist In The Operation Of Joints In Pressure Suits And Inflatable Structures2283,12/19/2016 -NASA Johnson Space Center,Issued,MSC-22839-1,6501414,"09/826,402",Locating Concealed Objects Using Spectral Signatures,04/02/2021 -NASA Johnson Space Center,Issued,MSC-22859-1,6730498,"09/56,363","Production Of 1-25diOH Vitamin D3, Erythropoietin And Other Products By Epithelial And Interstitial Cells In Response To Shear Stress",04/08/2017 -NASA Johnson Space Center,Issued,MSC-22859-2,6946246,"09/532,001",Production Of Functional Proteins: Balance Of Shear Stress And Gravity,03/21/2020 -NASA Johnson Space Center,Issued,MSC-22859-3,7198947,"10/734,759",Production Of Functional Proteins: Balance Of Shear Stress And Gravity,12/22/2023 -NASA Johnson Space Center,Issued,MSC-22859-5,7972821,"12/174,221",Production of Functional Proteins: Balance of Shear Stress and Gravity,02/11/2029 -NASA Johnson Space Center,Issued,MSC-22863-1,7122071,"10/263,280",Centrifugal Adsorption Cartridge System (CACS),12/21/2022 -NASA Johnson Space Center,Issued,MSC-22866-1,6099864,"09/79,741",INSITU Activation Of Microcapsules,05/15/2018 -NASA Johnson Space Center,Issued,MSC-22900-1,6231010,"09/236,785",Advanced Structural/Inflatable Hybrid Spacecraft Habitation Module,01/25/2019 -NASA Johnson Space Center,Issued,MSC-23563-2,8039099,"11/848,332",Nanoencapsulated Aerogels Produced By Monomer Vapor Deposition And Polymerization,08/13/2028 -NASA Johnson Space Center,Issued,MSC-22931-1,6354540,"09/405,301","Electro-Mechanically Actuated Magnetic Ring With Load Sensing Feedback And Closed Loop Control Docking/Berthing System For Alignment And Mating Of Multiple Vehicles, Structures, And/or Assemblies",09/20/2019 -NASA Johnson Space Center,Issued,MSC-22936-1,6387399,"09/79,766",Protein Crystal Encapsulation Process,05/15/2018 -NASA Johnson Space Center,Issued,MSC-22936-2,6558698,"09/733,391",Microencapsulated Bioactive Agents And Method Of Making,12/06/2020 -NASA Johnson Space Center,Issued,MSC-22936-3,6676964,"09/774,168",Method For Determining The Three-Dimensional Structure Of A Protein,01/26/2021 -NASA Johnson Space Center,Issued,MSC-22936-4,6599449,"09/774,169",X-Ray Crystallography Reagent,01/24/2021 -NASA Johnson Space Center,Issued,MSC-22937-1,6214300,"09/79,833",Microencapsulation And Electrostatic Processing Device (MEPS),05/15/2018 -NASA Johnson Space Center,Issued,MSC-22938-1,6103271,"09/79,770",Low-Shear Microencapsulation & Electrostatic Coating Process,05/15/2018 -NASA Johnson Space Center,Issued,MSC-22939-4,7968117,"12/100,009",Externally Triggered Microcapsules,07/09/2029 -NASA Johnson Space Center,Issued,MSC-22970-1,6253563,"09/337,208",Solar-Powered Refrigeration System,06/03/2019 -NASA Johnson Space Center,Issued,MSC-22970-2,6469487,"09/838,679",Solar Powered Refrigeration System,06/03/2019 -NASA Johnson Space Center,Issued,MSC-22970-3,6453693,"09/838,680",Solar Powered Refrigeration System,06/03/2019 -NASA Johnson Space Center,Issued,MSC-23029-1,6651739,"09/793,817",Medium Frequency Pseudo Noise Geological Radar,07/20/2021 -NASA Johnson Space Center,Issued,MSC-23037-1,6864473,"09/988,855",Variable Shadow Screen For Optical Devices,11/14/2021 -NASA Johnson Space Center,Issued,MSC-23041-1,6334302,"09/351,152",Variable Specific Impulse Magnetoplasma Rocket (VASIMR),06/28/2019 -NASA Johnson Space Center,Issued,MSC-23049-3,6592579,"09/746,542",Method For Selective Thermal Ablation,06/28/2021 -NASA Johnson Space Center,Issued,MSC-23049-4,6675050,"09/746,533",Computer Program For Microwave Antenna,05/07/2021 -NASA Johnson Space Center,Issued,MSC-23076-1,6321746,"09/574,758","Collapsable, Light, Portable Human Hyperbaric Chamber/Airlock System",05/17/2020 -NASA Johnson Space Center,Issued,MSC-23092-1,6547189,"09/826,403","Advanced, Large Volume, Highly Loaded, Hybrid Inflatable Pressure Vessel",05/26/2021 -NASA Johnson Space Center,Issued,MSC-23153-1,6995572,"09/803,613",Coplanar Waveguide Ice Detection Sensor,11/04/2023 -NASA Johnson Space Center,Issued,MSC-23154-1,7113820,"09/906,013","A Real-Time, High Frequency QRS Electrocardiograph.",05/03/2023 -NASA Johnson Space Center,Issued,MSC-23154-2,7539535,"11/345,687","A Real-Time, High Frequency QRS Electrocardiograph",07/13/2027 -NASA Johnson Space Center,Issued,MSC-23178-1,6997637,"10/5,820",Deceleration Limiting Safety Crash Wall,05/19/2022 -NASA Johnson Space Center,Issued,MSC-23193-1,6618010,"09/994,989",Passive Noncoherent Tracking Of A Data-Modulated Signal,11/14/2021 -NASA Johnson Space Center,Issued,MSC-23277-1,7295309,"10/734,753",Microcapsule Flow Sensor,11/12/2024 -NASA Johnson Space Center,Issued,MSC-23303-1,7397774,"10/446,283",Downlink Data Multiplexer,01/16/2026 -NASA Johnson Space Center,Issued,MSC-23307-1,6559645,"10/28,962",Detection Of Subterranean Metal Objects Using Differential Spectral Processing,11/17/2020 -NASA Johnson Space Center,Issued,MSC-23309-1,7040319,"10/87,866",Oxygen Partial Pressure Monitoring Device For Aircraft Oxygen Masks.,04/27/2022 -NASA Johnson Space Center,Issued,MSC-23311-1,6650280,"09/953,612",Mass Measurement During Fluid Flow Using An Integrated Sonic/Microwave Detector.,09/14/2021 -NASA Johnson Space Center,Issued,MSC-23314-1,6899009,"09/892,355",Flexshield (Flexible Multi-Shock Shield Technology),06/26/2021 -NASA Johnson Space Center,Issued,MSC-23349-1,7415005,"10/283,354",MCC Voice Over Internet Protocol (VOIP),08/08/2026 -NASA Johnson Space Center,Application,MSC-23349-2-SB,0,"12/170,614",Ad Hoc Selection of Voice Over Internet Streams, -NASA Johnson Space Center,Issued,MSC-23424-1,6985606,"10/212,579",Global Distribution Of Large Fluvial Fans/Potential Hydrocarbon Exploration Guide,06/12/2024 -NASA Johnson Space Center,Issued,MSC-23427-1,6944504,"10/302,323",Microwave Ablation Of Prostatic Cells Using A Separated Antenna Array,07/23/2023 -NASA Johnson Space Center,Issued,MSC-23436-1,7126553,"10/679,688",Tri-Sector Deployable Array Antenna,08/11/2024 -NASA Johnson Space Center,Issued,MSC-23443-1,6647855,"10/263,293",Method And Apparatus For Deploying A Hypervelocity Shield,09/30/2022 -NASA Johnson Space Center,Issued,MSC-23444-1,6932090,"10/361,046",A Simple Countermeasure For Management Of Motion Sickness And Vestibular/Sensory-Motor Problems Associated With Space Flight And Terrestial Motion Sickness,07/01/2023 -NASA Johnson Space Center,Issued,MSC-23449-1,7386340,"10/402,866",Method For Diagnosis Of Coronary Artery Disease And Related Conditions Using 12-Lead High Frequency QRS Electrocardiography,12/30/2025 -NASA Johnson Space Center,Issued,MSC-23510-1,6851647,"10/417,377",Portable Catapult Launcher For Small Aircraft,04/03/2023 -NASA Johnson Space Center,Issued,MSC-23518-1,7168935,"10/637,086",Low Voltage Electron Beam Solid Freeform Fabrication System,09/29/2024 -NASA Johnson Space Center,Issued,MSC-23538-1,6943619,"10/443,233",Practical Active Capacitor Filter,05/21/2023 -NASA Johnson Space Center,Issued,MSC-23539-1,6943621,"10/443,234","Auto-Routable, Configurable, Daisy Chainable Data Acquisition System",08/16/2023 -NASA Johnson Space Center,Issued,MSC-23563-1,7270851,"10/985,081",Nano-Encapsulated Aerogel,05/14/2025 -NASA Johnson Space Center,Issued,MSC-23594-1,7125370,"10/845,608",Articulating Subject Support For Resistive Exercise In The Horizontal Position,02/22/2025 -NASA Johnson Space Center,Issued,MSC-23623-1,7212934,"11/370,379",String Resistance Detector Concept,03/06/2026 -NASA Johnson Space Center,Issued,MSC-23659-1,7094045,"10/734,754",Pulse-Flow Microencapsulation System,06/09/2024 -NASA Johnson Space Center,Issued,MSC-23659-2,7588703,"11/428,465",Microencapsulation System And Method,03/14/2027 -NASA Johnson Space Center,Issued,MSC-23668-1,7250075,"10/874,004",Water Outlet Control Mechanism For Fuel Cell System Operation In Variable Gravity Environments,11/04/2025 -NASA Johnson Space Center,Issued,MSC-23695-1,7249540,"11/177,652",Torquing Tool Attachment For Round Connectors With Attached Cables,08/27/2025 -NASA Johnson Space Center,Issued,MSC-23781-1,7410485,"11/40,613",Directional Microwave Applicator/Antenna,10/16/2026 -NASA Johnson Space Center,Issued,MSC-23805-1,7462141,"11/31,942",Advanced Resistive Exercise Device (ARED),01/10/2027 -NASA Johnson Space Center,Issued,MSC-23881-1,7686529,"11/958,908","Low Friction, Low Profile, High Moment Two-Axis Joint",12/18/2027 -NASA Johnson Space Center,Application,MSC-23882-1,0,12/899654,Analog Strain Gage Conditioning System for Space Environment, -NASA Johnson Space Center,Issued,MSC-23906-1,7295884,"11/158,354",Method for the Design and Analysis of the Primary Load Bearing Layer of an Inflatable Vessel,07/20/2026 -NASA Johnson Space Center,Issued,MSC-23933-1,7543779,"11/625,066","Low Impact Docking System (LIDS) A.k.a, International Berthing Docking Mechanism (IBDM)",02/22/2028 -NASA Johnson Space Center,Issued,MSC-23954-1,7357606,"11/357,461",Self-Advancing Step-Tap Drill,08/14/2026 -NASA Johnson Space Center,Issued,MSC-23988-1,8343740,"12/58,227",Micro-Organ Device,10/31/2031 -NASA Johnson Space Center,Issued,MSC-23988-2,8580546,13/688982,Micro-Organ Device,11/29/2032 -NASA Johnson Space Center,Issued,MSC-23997-2,7815149,"12/388,345",Magnetic Capture Docking Mechanism,04/01/2025 -NASA Johnson Space Center,Issued,MSC-24000-1,8076136,/0,Development And Characterization Of A Three-Dimensional Tissue Culture Model Of Bone,10/31/2021 -NASA Johnson Space Center,Issued,MSC-24042-1,7411198,"11/421,174",New Architecture for Space Radiation Detection,02/01/2027 -NASA Johnson Space Center,Issued,MSC-24106-1,7577482,"11/683,770",Network System Plug And Play Through Positional And Functional Connectivity Identification,04/21/2028 -NASA Johnson Space Center,Issued,MSC-24115-1,8022307,"11/772,999",Method and Apparatus for Fabric Circuits and Antennas,06/19/2030 -NASA Johnson Space Center,Issued,MSC-24149-1,8122646,"12/402,986","A Description Of An Improved Method For Folding, Assembling, And Weight Relief Of An Inflatable Shell",02/04/2030 -NASA Johnson Space Center,Issued,MSC-24149-2,8266866,13/346137,"A Description Of An Improved Method For Folding, Assembling, And Weight Relief Of An Inflatable Shell",03/12/2029 -NASA Johnson Space Center,Issued,MSC-24164-1,8338114,"11/789,117",Methods For Growing Tissue-Like 3D Assemblies (TLA) Of Human Broncho-Epithelial Cells,05/04/2030 -NASA Johnson Space Center,Issued,MSC-24169-1,7862946,"11/671,210",Self-Regulating Control of Parasitic Electric Loads in Fuel Cell Power Systems,11/05/2029 -NASA Johnson Space Center,Issued,MSC-24180-1,7935259,"12/167,332","Water Filtering Device, 100% Effective",09/14/2029 -NASA Johnson Space Center,Issued,MSC-24184-1,8116350,"12/353,755",Ultra-Wideband (UWB) Two-Cluster Angle Of Arrival (AOA) Passive Tracking System Design,07/22/2030 -NASA Johnson Space Center,Issued,MSC-24201-1,7509774,"11/610,295",A Description Of An Improved Method For Attaching An Inflatable Shell To A Rigid Interface,06/13/2027 -NASA Johnson Space Center,Issued,MSC-24207-1,7604782,"11/625,670",X-38 Advanced Sublimator,04/12/2028 -NASA Johnson Space Center,Issued,MSC-24215-1,8070105,"11/956,826",A Description Of A Concentric Nested Torroidal Inflatable Habitat,10/04/2030 -NASA Johnson Space Center,Issued,MSC-24216-1,8047473,"12/240,537",A Description Of An Octonode Connecting Node Concept And Method,01/10/2030 -NASA Johnson Space Center,Issued,MSC-24228-1,7521682,"11/421,196",New Architecture For Space Radiation Detection,03/07/2027 -NASA Johnson Space Center,Issued,MSC-24238-1,8388613,12/757657,Microwave Tissue Welding For Wound Closure,11/17/2031 -NASA Johnson Space Center,Issued,MSC-24263-1,7805276,"11/958,937",Impact Detection System,02/12/2029 -NASA Johnson Space Center,Issued,MSC-24273-1,7840387,"11/778,858",Method For The Design And Analysis Of The Primary Load Bearing Layer That Interfaces To The Structural Pass-through Of An Inflatable Vessel,07/31/2029 -NASA Johnson Space Center,Application,MSC-24314-1,0,12/880602,HDSS - High Density Spot Seeding, -NASA Johnson Space Center,Issued,MSC-24346-1,8466776,12/828558,Extended Range RFID and Sensor Tag,09/05/2031 -NASA Johnson Space Center,Issued,MSC-24387-1,8011229,"12/323,912",Artificial Intelligence Algorithm For Assessing Postural Stability During Normal Daily Activities Using Shoe Insert Pressure Sensors,11/26/2028 -NASA Johnson Space Center,Issued,MSC-24441-1,7905946,"12/190,364",A Capillary-based Static Phase Separator For Highly Variable Wetting Conditions,07/02/2029 -NASA Johnson Space Center,Issued,MSC-24444-1,8577120,12/900644,Flash Infrared (IR) Thermography Contrast Computer Simulation And Data Analysis Software,04/22/2031 -NASA Johnson Space Center,Application,MSC-24451-1,0,13/057399,Rapid Detection Of The Varicella Zoster Virus (VZV) In Saliva Samples, -NASA Johnson Space Center,Issued,MSC-24464-1,7859292,"12/502,575",Reconfigurable SEU/SET Tolerance for FPGAs,07/14/2029 -NASA Johnson Space Center,Issued,MSC-24466-1,8183870,"12/370,021",Battery cell voltage sensing and balancing using addressable transformers with electrical isolation and minimal additional connector pins and circuitry.,07/01/2030 -NASA Johnson Space Center,Application,MSC-24490-1,0,"12/612,171",High Altitude Hydration System, -NASA Johnson Space Center,Application,MSC-24506-1,0,12/971919,A Method to Measure and Estimate Normalized contrast In Infrared Flash Thermography,01/08/2030 -NASA Johnson Space Center,Issued,MSC-24508-1,8343403,"12/174,380",METHOD FOR MAKING A MICROPOROUS MEMBRANE,12/31/2030 -NASA Johnson Space Center,Issued,MSC-24509-1,8570047,12/855384,Battery Fault Detection with Saturating Transformers,02/02/2032 -NASA Johnson Space Center,Issued,MSC-24525-1,8384614,12/894749,Deployable Fresnel Rings,10/11/2031 -NASA Johnson Space Center,Application,MSC-24541-1,0,12/899815,"Electromagnetic Time-Variance Magnetic Fields (TVMF) to generate, and re-grow Cartilage Cells by a Noninvasive Method", -NASA Johnson Space Center,Issued,MSC-24569-1,8176809,12/331844,Planar Torsion Spring, -NASA Johnson Space Center,Issued,MSC-24570-1,8276958,12/269579,Bidirectional Tendon Terminator, -NASA Johnson Space Center,Issued,MSC-24571-1,8371177,12/241309,Tendon Tension Sensor, -NASA Johnson Space Center,Application,MSC-24685-1,8056423,"12/269,552",Sensing the Tendon Tension through the Conduit Reaction Forces,11/12/2028 -NASA Johnson Space Center,Application,MSC-24686-1,8060250,"12/335,153",Joint Space Impedance Control for Tendon-Driven Manipulators,12/15/2028 -NASA Johnson Space Center,Issued,MSC-24687-1,8170718,12/338697,Multiple Priority Operational Space Impedance Control, -NASA Johnson Space Center,Issued,MSC-24688-1,8280837,12/474068,CONTACT STATE ESTIMATION FOR MULTI-FINGER ROBOT HANDS USING PARTICLE FILTERS, -NASA Johnson Space Center,Issued,MSC-24689-1,7784363,12/241320,PHALANGE TACTILE LOAD CELL,09/30/2028 -NASA Johnson Space Center,Issued,MSC-24732-1,8364314,12/624445,METHOD AND APPARATUS FOR AUTOMATIC CONTROL OF A HUMANOID ROBOT, -NASA Johnson Space Center,Application,MSC-24733-1,0,13/349265,Pyrometer, -NASA Johnson Space Center,Application,MSC-24734-1,8498741,12/564088,Dexterous Humanoid Robotic Wrist, -NASA Johnson Space Center,Application,MSC-24735-1,8467903,12/564086,Tendon Driven Finger Actuation System, -NASA Johnson Space Center,Issued,MSC-24736-1,8291788,12/564090,Rotary Series Elastic Actuator, -NASA Johnson Space Center,Issued,MSC-24737-1,8401700,12/564124,ACTUATOR AND ELECTRONICS PACKAGING FOR EXTRINSIC HUMANOID HAND, -NASA Johnson Space Center,Application,MSC-24738-1,0,12/564094,FRAMEWORK AND METHOD FOR CONTROLLING A ROBOTIC SYSTEM USING A DISTRIBUTED COMPUTER NETWORK, -NASA Johnson Space Center,Application,MSC-24739-1,8511964,12/564084,Dexterous Humanoid Robot, -NASA Johnson Space Center,Application,MSC-24740-1,0,12/564078,Dexterous Humanoid Robotic Finger, -NASA Johnson Space Center,Issued,MSC-24741-1,8255079,12/564095,Human Grasp Assist,09/23/2029 -NASA Johnson Space Center,Application,MSC-24742-1,8442684,12/564076,Integrated High Speed FPGA Based Torque Controller, -NASA Johnson Space Center,Application,MSC-24743-1,8250901,12/564092,Rotary Absolute Position Sensor Calibration, -NASA Johnson Space Center,Application,MSC-24744-1,8369992,12/564083,"Diagnostics, prognostics & health management for humanoid robotics and method thereof", -NASA Johnson Space Center,GM,MSC-24745-1,8424941,12/564085,ROBOTIC THUMB ASSEMBLY, -NASA Johnson Space Center,Application,MSC-24746-1,8260460,12/564096,Interactive Robot Control System, -NASA Johnson Space Center,Issued,MSC-24747-1,8244402,12/564074,VISUAL PERCEPTION SYSTEM AND METHOD FOR A HUMANOID ROBOT, -NASA Johnson Space Center,Issued,MSC-24750-1,8483882,12/686512,HIERARCHICAL ROBOT CONTROL SYSTEM AND METHOD FOR CONTROLLING SELECT DEGREES OF FREEDOM OF AN OBJECT USING MULTIPLE MANIPULATORS, -NASA Johnson Space Center,Issued,MSC-24751-1,8412376,12/720725,TENSION DISTRIBUTION IN A TENDON-DRIVEN ROBOTIC FINGER, -NASA Johnson Space Center,Issued,MSC-24752-1,8033876,12/706744,CONNECTOR PIN AND METHOD, -NASA Johnson Space Center,Application,MSC-24753-1,0,12/720727,UNDERACTUATED DESIGN AND CONTROL OF A TENDON-DRIVEN FINGER, -NASA Johnson Space Center,Application,MSC-24755-1,0,12/698832,Architecture For Robust Force and Impedance Control Of Series Elastic Actuators, -NASA Johnson Space Center,Application,MSC-24758-1,0,14/184278,RFID Cavity,03/11/2033 -NASA Johnson Space Center,Application,MSC-24798-1,0,13/789903,Soft Decision Analyzer (SDA),03/08/2033 -NASA Johnson Space Center,Application,MSC-24811-1,0,"13/461,487",Self-enclosed and pipette free DNA/RNA Isolation device, -NASA Johnson Space Center,Application,MSC-24813-1,0,13/791290,Pre-Polymerase Chain Reaction Preparation Kit,08/06/2032 -NASA Johnson Space Center,Application,MSC-24817-1,8265792,12/760954,Method and Apparatus for Calibrating Multi-Axis Load Cells in a Dexterous Robot, -NASA Johnson Space Center,Application,MSC-24837-1,0,12/787479,Applying Workspace Limitations in a Velocity-Controlled Robotic Mechanism, -NASA Johnson Space Center,Application,MSC-24919-1,0,13/790591,"RFID Waveguide, Antenna, and Cavity Sensors",07/13/2032 -NASA Johnson Space Center,Issued,MSC-24926-1,8412378,12/629637,IN-VIVO TENSION CALIBRATION IN TENDON-DRIVEN MANIPULATORS, -NASA Johnson Space Center,Issued,MSC-24930-1,8489239,12/916803,ROBUST OPERATION OF TENDON-DRIVEN ROBOT FINGERS USING FORCE AND POSITION-BASED CONTROL LAWS, -NASA Johnson Space Center,Application,MSC-25026-1,0,13/354552,Battery Charge Equalizer with transformer array, -NASA Johnson Space Center,Issued,MSC-25053-1,"D628,609",29/359105,ROBOT,04/06/2030 -NASA Johnson Space Center,Application,MSC-25056-1,0,13/014901,SYSTEM AND METHOD FOR TENSIONING A ROBOTICALLY ACTUATED TENDON, -NASA Johnson Space Center,Issued,MSC-25084-1,8067909,12/474430,METHOD AND APPARATUS FOR ELECTROMAGNETICALLY BRAKING A MOTOR,05/29/2029 -NASA Johnson Space Center,Application,MSC-25084-DE,0,12/474430,Method and Apparatus for Electromagnetically Braking a Motor, -NASA Johnson Space Center,Application,MSC-25084-JP,0,12/474430,Method and Apparatus for Electromagnetically Braking a Motor, -NASA Johnson Space Center,Application,MSC-25091-1,0,13/199484,"FRET-Aptamer Assays for C-Telopeptide, Creatinine and Vitamin D",08/31/2031 -NASA Johnson Space Center,Issued,MSC-25121-1,8483877,12/875254,WORKSPACE SAFE OPERATION OF A FORCE- OR IMPEDANCE-CONTROLLED ROBOT, -NASA Johnson Space Center,Application,MSC-25149-1,0,13/196252,Controlling Execution Sequence Using Tactile-Classification during manipulation by a humanoid robot, -NASA Johnson Space Center,Application,MSC-25216-1,0,"13/439,546",METHOD AND COMPOSITION FOR AMELIORATING THE EFFECTS FOR A SUBJECT EXPOSED TO RADIATION OR OTHER SOURCES OF OXIDATIVE STRESS, -NASA Johnson Space Center,Application,MSC-25217-1,0,13/272442,METHOD FOR DYNAMIC OPTIMIZATION OF A ROBOT CONTROL INTERFACE, -NASA Johnson Space Center,Application,MSC-25219,0,13/207911,FAST GRASP CONTACT COMPUTATION FOR A SERIAL ROBOT, -NASA Johnson Space Center,Application,MSC-25265-1,0,13/851778,New method and device for digital to analog transformations and reconstructions of multichannel electrocardiograms,10/30/2032 -NASA Johnson Space Center,Application,MSC-25286-1,0,14/252660,A chemical formulation to stabilize urine and minimize the precipitation potential of minerals during distillation of urine,03/11/2033 -NASA Johnson Space Center,Application,MSC-25313-1,0,13/774835,Hydrostatic Hyperbaric Chamber,02/22/2033 -NASA Johnson Space Center,Application,MSC-25318,0,13/408668,HUMAN GRASP ASSIST SOFT, -NASA Johnson Space Center,Application,MSC-25319,0,13/408656,HUMAN GRASP ASSIST , -NASA Johnson Space Center,Application,MSC-25320,0,13/408675,HUMAN GRASP ASSIST CONTROLS, -NASA Johnson Space Center,Application,MSC-25327-1,0,13/459557,COMMUNICATION SYSTEM AND METHOD, -NASA Johnson Space Center,Application,MSC-25386-1,0,13/951671,Active Response Gravity Offload System - Vertical Software Release,07/26/2033 -NASA Johnson Space Center,Application,MSC-25590-1,0,13/790927,Systems and Methods for RFID-Enabled Information Collection, -NASA Johnson Space Center,Application,MSC-25604-1,0,13/791584,Systems and Methods for RFID-Enabled Dispenser, -NASA Johnson Space Center,Application,MSC-25605-1,0,13/790721,Switch Using Radio Frequency Identification, -NASA Johnson Space Center,Application,MSC-25626-1,0,"14/200,122",RFID Torque-Sensing Tag System for Fasteners,03/07/2034 -NASA Johnson Space Center,Application,MSC-25632-1,0,13/803017,"ROBOT TASK COMMANDER WITH EXTENSIBLE PROGRAMMING ENVIRONMENT -",03/14/2033 -NASA Johnson Space Center,Application,MSC-25758-1,0,14/184303,"Methods, Systems and Apparatuses for Radio Frequency Identification",03/11/2033 -NASA Johnson Space Center,Application,MSC-25759-1,0,14/184337,"Methods, Systems and Apparatuses for Radio Frequency Identification",03/11/2033 -NASA Johnson Space Center,Application,MSC-25760-1,0,14/184365,"Methods, Systems and Apparatuses for Radio Frequency Identification",03/11/2033 -NASA Jet Propulsion Laboratory,Application,NPO-17734-1,0,"07/700,830",Formation Of Self-Aligned Guard Ring For Silicide Schottky-Barrier Diodes Used For Infrared Detection, -NASA Jet Propulsion Laboratory,Issued,NPO-19289-1,6513023,"09/412,199",On-Chip Learning In VLSI Hardware,10/01/2019 -NASA Jet Propulsion Laboratory,Application,NPO-19769-1,0,"08/868,175",Automated Cargo Inventory Identification Transponder, -NASA Jet Propulsion Laboratory,Issued,NPO-19855-1,6374630,"09/853,931",Champagne Heat Pump,05/09/2021 -NASA Jet Propulsion Laboratory,Issued,NPO-20031-1,6828935,"10/176,761",Receiver Controlled Phased Array Antenna,07/19/2022 -NASA Jet Propulsion Laboratory,Issued,NPO-20837-1,6526556,"09/591,386",MORPHING TECHNIQUE FOR ACCELERATED EVOLUTIONARY SYNTHESIS OF ELECTRONIC CIRCUITS,06/07/2020 -NASA Jet Propulsion Laboratory,Application,NPO-21136-1,0,"10/219,384",A CMOS ACTIVE PIXEL SENSOR (APS) FOR READING COMPACT DISCS, -NASA Jet Propulsion Laboratory,Issued,NPO-30703-1,7240208,"10/424,287",ENCRYPTING DIGITAL CAMERA,04/23/2023 -NASA Jet Propulsion Laboratory,Issued,NPO-40040-1,7480984,"40/863,835",A Concept For Suppressing Sublimation In Advanced Thermoelectric Devices,06/07/2024 -NASA Jet Propulsion Laboratory,Issued,NPO-40407-1,7592747,"11/056,633",Piezoelectrically Enhanced PhotoCathode (PEPC),02/09/2025 -NASA Jet Propulsion Laboratory,Issued,NPO-40827-1,7156189,"11/1,465",SELF-MOUNTABLE AND EXTRACTABLE ULTRASONIC/SONIC ANCHOR (U/S-Anchor),12/01/2024 -NASA Jet Propulsion Laboratory,Issued,NPO-41446-1,8358723,"11/602,440",Architecture Of An Autonomous Radio,09/12/2031 -NASA Jet Propulsion Laboratory,Issued,NPO-41506-2,8492160,"12/720,103",BIOMARKER SENSOR SYSTEM AND METHOD FOR MULTI-COLOR IMAGING AND PROCESSING OF SINGLE-MOLECULE LIFE SIGNATURES,04/09/2031 -NASA Jet Propulsion Laboratory,Issued,NPO-41511-1,7385462,"11/376,638",Wideband (31 To 36 GHz) 24-Way Radial Power Combiner/Divider Fed By A Marie Transducer,03/14/2026 -NASA Jet Propulsion Laboratory,Issued,NPO-41982-1,8078309,"12/415,206",Inverse Tomographic Approach To Create Arbitrary Sidewall Geometries In 3D Using LiGA Technologies,03/03/2021 -NASA Jet Propulsion Laboratory,Issued,NPO-42131-1,7824247,"11/756,819",PORTABLE RAPID AND QUIET DRILL (PRAQD),11/02/2027 -NASA Jet Propulsion Laboratory,Issued,NPO-42312-1,7184624,"11/422,147",Slow light in chains of vertically coupled whispering gallery mode resonators,06/05/2026 -NASA Jet Propulsion Laboratory,Issued,NPO-42466-1,7764384,"11/924,766",Swept frequency laser metrology system,10/26/2027 -NASA Jet Propulsion Laboratory,Issued,NPO-42563-1,7353768,"11/456,441",Submersible Vehicle Propulsion and Power Generation,07/10/2026 -NASA Jet Propulsion Laboratory,Issued,NPO-42672-1,7996112,"11/756,793",Micro Robot Explorer (SpiderBot) Mesh Crawler,06/08/2030 -NASA Jet Propulsion Laboratory,Issued,NPO-43213-1,7850861,"11/764,359",Patterning packing materials for Fluidic Channels,10/13/2029 -NASA Jet Propulsion Laboratory,Issued,NPO-43348-1,7809521,"12/40,459",Precise delay measurement circuit on FPGAs,01/31/2029 -NASA Jet Propulsion Laboratory,Issued,NPO-43361-1,7773121,"11/741,213","High Resolution, Continuous Field of View, Non-Rotating Imaging Sensor Head",10/15/2028 -NASA Jet Propulsion Laboratory,Issued,NPO-43524-1,7773362,"11/683,007",Dusty Plasma Thruster,01/03/2029 -NASA Jet Propulsion Laboratory,Issued,NPO-44079-1,8022860,"11/781,022",Enhanced Interference Cancellation and Telemetry Reception with a Single Parabolic Dish Antenna using a Focal Plane Array,04/30/2030 -NASA Jet Propulsion Laboratory,Issued,NPO-44765-1,7740088,"11/928,069",Ultrasonic/Sonic Rotary-Hammer Drill (USRoHD),04/15/2028 -NASA Jet Propulsion Laboratory,Issued,NPO-44914-1,8407979,"11/926,279","Magnetically-Conformed, Variable Area Discharge Chamber for Hall Thruster Plasma Accelerators",06/08/2031 -NASA Jet Propulsion Laboratory,Issued,NPO-45053-1,8057283,"12/119,989",The process of significant improving of optical quality factor of whispering gallery mode resonator.,09/15/2030 -NASA Jet Propulsion Laboratory,Issued,NPO-45911-1,8163094,"12/508,006",Method to Improve Indium Bump Bonding Via Indium Oxide Removal Using a Two Step Plasma Process,08/16/2030 -NASA Jet Propulsion Laboratory,Issued,NPO-45948-1,7843650,"12/490,422",Monolithic Afocal Telescope,06/24/2029 -NASA Jet Propulsion Laboratory,Application,NPO-46253-1,0,"12/237,159",Generation of optical combs in a whispering gallery mode resonator from a bichromatic pump, -NASA Jet Propulsion Laboratory,Issued,NPO-46843-1,8169371,"12/541,725","A single-layer, all-metal patch antenna element with wide bandwidth",09/25/2030 -NASA Jet Propulsion Laboratory,Issued,NPO-46938-1,8026768,"12/691,070",A 201Hg+ co-magnetometer for 199Hg+ trapped ion space atomic clocks,04/03/2030 -NASA Jet Propulsion Laboratory,Application,NPO-47300-1,0,"13/017,174","Textured Si Anode for High Capacity, Rapid Charge Rate Li Ion Batteries", -NASA Jet Propulsion Laboratory,Application,NPO-47300-2,0,"13/895,499","Textured Si Anode for High Capacity, Rapid Charge Rate Li Ion Batteries",01/31/2031 -NASA Jet Propulsion Laboratory,Issued,NPO-47310-1,8502987,"13/018,672",Coherent Detector for Near-Angle Scattering and Polarization Characterization of Telescope Mirror Coatings,03/24/2032 -NASA Jet Propulsion Laboratory,Issued,NPO-47604-1,8649000,"13/277,954",Surface Enhanced Raman Scattering using Silica Whispering-Gallery Mode Resonators,07/10/2032 -NASA Jet Propulsion Laboratory,Application,NPO-47717-1,,"13/281,683",360-Degree Camera Head for Unmanned Surface Sea Vehicles, -NASA Jet Propulsion Laboratory,Issued,NPO-47869-1,8649609,"13/071,299",FPGA Vision Data Architecture,04/17/2032 -NASA Jet Propulsion Laboratory,Application,NPO-47881-1,,"14/151,684",Pulsed Plasma Lubricator (PPL) Technology for the In Situ Replenishment of Dry Lubricants in Extreme Environments, -NASA Jet Propulsion Laboratory,Application,NPO-48140-1,,"13/456,451",Probabilistic Surface Characterization for Safe Landing Hazard Detection and Avoidance, -NASA Jet Propulsion Laboratory,Application,NPO-48413-1,,"13/757,929",Simple Laser-Communications Terminal for Downlink from Earth-Orbit at Rates Exceeding 10 Gb/s,02/04/2033 -NASA Jet Propulsion Laboratory,Application,NPO-48539-1,,"13/858,267",Neutral mounting of whispering gallery mode resonators for suppression of acceleration-induced frequency fluctuations,04/08/2033 -NASA Jet Propulsion Laboratory,Application,NPO-49086-1,,"14/101,547",Electride Mediated Surface Enhanced Raman Spectroscopy,12/10/2033 -NASA Stennis Space Center,Issued,SSC-00040,5726632,"08/622,178",HANDHELD HYDROGEN FIRE IMAGER,03/14/2016 -NASA Stennis Space Center,Issued,SSC-00050,6020587,"09/3,212",A HAND HELD PLANT STRESS DETECTION SYSTEM,01/06/2018 -NASA Stennis Space Center,Issued,SSC-00247,8618933,"11/866,042",Valve Health Monitoring System Utilizing Smart Instrumentation for Real Time and Historical Data Tracking,05/03/2032 -NASA Stennis Space Center,Issued,SSC-00264,8336849,12/704193,Conical Seat Shut Off Valve,01/13/2031 -NASA Stennis Space Center,Issued,SSC-00327,8401820,"12/566,111",IN SITU HEALTH MONITORING OF PIEZOELECTRIC SENSORS,07/31/2030 diff --git a/mathesar/tests/data/json_parsing/duplicate_id_table.json b/mathesar/tests/data/json_parsing/duplicate_id_table.json deleted file mode 100644 index 047b0d2b60..0000000000 --- a/mathesar/tests/data/json_parsing/duplicate_id_table.json +++ /dev/null @@ -1,17 +0,0 @@ -[ - { - "id": 1, - "Name": "John", - "Age": 25 - }, - { - "id": 3, - "Name": "Cristine", - "Age": 30 - }, - { - "id": 3, - "Name": "Jane", - "Age": 23 - } -] \ No newline at end of file diff --git a/mathesar/tests/data/json_parsing/missing_keys.json b/mathesar/tests/data/json_parsing/missing_keys.json deleted file mode 100644 index e7398a3252..0000000000 --- a/mathesar/tests/data/json_parsing/missing_keys.json +++ /dev/null @@ -1,29 +0,0 @@ -[ - { - "first_name":"Matt", - "last_name":"Murdock", - "gender":"Male", - "friends": ["Stick", "Foggy"], - "address": { - "street": "210", - "city": "NY" - } - }, - { - "first_name":"John", - "last_name":"Doe", - "email":"jd@example.org", - "gender":"Male", - "friends": ["Mark", "Bill"] - }, - { - "first_name":"Frank", - "last_name":"Castle", - "email":"fc@example.org", - "address": { - "street": "211", - "city": "NY" - } - } - ] - \ No newline at end of file diff --git a/mathesar/tests/data/json_parsing/nested_objects.json b/mathesar/tests/data/json_parsing/nested_objects.json deleted file mode 100644 index 1a172f1dfc..0000000000 --- a/mathesar/tests/data/json_parsing/nested_objects.json +++ /dev/null @@ -1,34 +0,0 @@ -[ - { - "name": "John Doe", - "age": 30, - "email": "john.doe@example.com", - "division": { - "name": "frontend", - "project": { - "name": "Project A", - "status": "In Progress", - "team": { - "lead": "John", - "members": ["Mary", "Mark"] - } - } - } - }, - { - "name": "Jane Smith", - "age": 25, - "email": "jane.smith@example.com", - "division": { - "name": "backend", - "project": { - "name": "Project B", - "status": "In Progress", - "team": { - "lead": "Jane", - "members": ["Bob", "Sarah"] - } - } - } - } -] diff --git a/mathesar/tests/data/json_parsing/null_id_table.json b/mathesar/tests/data/json_parsing/null_id_table.json deleted file mode 100644 index 243d5f2169..0000000000 --- a/mathesar/tests/data/json_parsing/null_id_table.json +++ /dev/null @@ -1,17 +0,0 @@ -[ - { - "id": 1, - "Name": "John", - "Age": 25 - }, - { - "id": null, - "Name": "Cristine", - "Age": 30 - }, - { - "id": 3, - "Name": "Jane", - "Age": 23 - } -] \ No newline at end of file diff --git a/mathesar/tests/data/non_unicode_files/cp1250.csv b/mathesar/tests/data/non_unicode_files/cp1250.csv deleted file mode 100644 index 9580fe8212..0000000000 --- a/mathesar/tests/data/non_unicode_files/cp1250.csv +++ /dev/null @@ -1,10 +0,0 @@ -1,"Eldon Base for stackable storage shelf, platinum",Muhammed MacIntyre,3,-213.25,38.94,35,Nunavut,Storage & Organization,0.8 -2,"1.7 Cubic Foot Compact ""Cube"" Office Refrigerators",Barry French,293,457.81,208.16,68.02,Nunavut,Appliances,0.58 -3,"Cardinal Slant-D® Ring Binder, Heavy Gauge Vinyl",Barry French,293,46.71,8.69,2.99,Nunavut,Binders and Binder Accessories,0.39 -4,R380,Clay Rozendal,483,1198.97,195.99,3.99,Nunavut,Telephones and Communication,0.58 -5,Holmes HEPA Air Purifier,Carlos Soltero,515,30.94,21.78,5.94,Nunavut,Appliances,0.5 -6,G.E. Longer-Life Indoor Recessed Floodlight Bulbs,Carlos Soltero,515,4.43,6.64,4.95,Nunavut,Office Furnishings,0.37 -7,"Angle-D Binders with Locking Rings, Label Holders",Carl Jackson,613,-54.04,7.3,7.72,Nunavut,Binders and Binder Accessories,0.38 -8,"SAFCO Mobile Desk Side File, Wire Frame",Carl Jackson,613,127.70,42.76,6.22,Nunavut,Storage & Organization, -9,"SAFCO Commercial Wire Shelving, Black",Monica Federle,643,-695.26,138.14,35,Nunavut,Storage & Organization, -10,Xerox 198,Dorothy Badders,678,-226.36,4.98,8.33,Nunavut,Paper,0.38 From 8b63ec6835cd7a72bf35b83434aca3d7c9666cc9 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 13:20:03 +0800 Subject: [PATCH 24/70] remove unused utility modules --- mathesar/migrations/0001_initial.py | 3 +- mathesar/utils/models.py | 93 --------- mathesar/utils/prefetch.py | 299 ---------------------------- 3 files changed, 1 insertion(+), 394 deletions(-) delete mode 100644 mathesar/utils/models.py delete mode 100644 mathesar/utils/prefetch.py diff --git a/mathesar/migrations/0001_initial.py b/mathesar/migrations/0001_initial.py index 216d9e4009..cd06fbb803 100644 --- a/mathesar/migrations/0001_initial.py +++ b/mathesar/migrations/0001_initial.py @@ -8,7 +8,6 @@ import django.db.models.deletion import django.db.models.manager import django.utils.timezone -import mathesar.utils.models class Migration(migrations.Migration): @@ -162,7 +161,7 @@ class Migration(migrations.Migration): ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), - ('file', models.FileField(upload_to=mathesar.utils.models.user_directory_path)), + ('file', models.FileField()), ('created_from', models.CharField(choices=[('FILE', 'File'), ('PASTE', 'Paste'), ('URL', 'Url')], max_length=128)), ('base_name', models.CharField(max_length=100)), ('header', models.BooleanField(default=True)), diff --git a/mathesar/utils/models.py b/mathesar/utils/models.py deleted file mode 100644 index 5cb07ce9be..0000000000 --- a/mathesar/utils/models.py +++ /dev/null @@ -1,93 +0,0 @@ -import os - -from sqlalchemy import text -from sqlalchemy.exc import OperationalError - -from rest_framework import status -from rest_framework.exceptions import ValidationError - -from db.tables.operations.alter import alter_table, SUPPORTED_TABLE_ALTER_ARGS -from db.schemas.operations.alter import patch_schema_via_sql_alchemy -from db.columns.exceptions import InvalidTypeError - -from mathesar.api.exceptions.error_codes import ErrorCodes -from mathesar.api.exceptions.generic_exceptions import base_exceptions as base_api_exceptions - - -def user_directory_path(instance, filename): - user_identifier = instance.user.username if instance.user else 'anonymous' - # file will be uploaded to MEDIA_ROOT/user_/ - return os.path.join(user_identifier, filename) - - -def update_sa_table(table, validated_data): - errors = [ - base_api_exceptions.ErrorBody( - ErrorCodes.UnsupportedAlter.value, - message=f'Updating {arg} for tables is not supported.' - ) - for arg in set(validated_data) - SUPPORTED_TABLE_ALTER_ARGS - ] - if errors: - raise base_api_exceptions.GenericAPIException(errors, status_code=status.HTTP_400_BAD_REQUEST) - try: - data = _update_columns_side_effector(table, validated_data) - alter_table(table.name, table.oid, table.schema.name, table.schema._sa_engine, data) - # TODO: Catch more specific exceptions - except InvalidTypeError as e: - raise e - except Exception as e: - raise base_api_exceptions.MathesarAPIException(e, status_code=status.HTTP_400_BAD_REQUEST) - - -def update_sa_schema(schema, validated_data): - errors = [base_api_exceptions.ErrorBody( - ErrorCodes.UnsupportedAlter.value, - message=f'Updating {arg} for schema is not supported.' - ) - for arg in set(validated_data) - {'name', 'description'}] - if errors: - raise base_api_exceptions.GenericAPIException(errors, status_code=status.HTTP_400_BAD_REQUEST) - if errors: - raise ValidationError(errors) - patch_schema_via_sql_alchemy(schema.name, schema._sa_engine, validated_data) - - -def ensure_cached_engine_ready(engine): - """ - We must make sure that a cached engine is usable. An engine might become unusable if its - Postgres database is dropped and then recreated. This handles that case, by making a dumb - query, which if it fails, will cause the engine to reestablish a usable connection and the - subsequent queries will work as expected. - - A problem with this is that we have to do this whenever an engine is retrieved from our engine - cache, which degrades the performance benefits of an engine cache. It might be worth eventually - benchmarking whether this is indeed better than not caching engines at all. - """ - try: - attempt_dumb_query(engine) - except OperationalError: - pass - - -def attempt_dumb_query(engine): - with engine.connect() as con: - con.execute(text('select 1 as is_alive')) - - -def _update_columns_side_effector(table, validated_data): - data = validated_data.get('columns') - if data is not None: - queryset = table.columns.all() - for column_data in data: - col_id = column_data.pop('id') - dj_col = queryset.get(id=col_id) - column_data['attnum'] = dj_col.attnum - display_options = column_data.pop('display_options', 'NOT PASSED') - if display_options != 'NOT PASSED': - dj_col.display_options = display_options - dj_col.save() - for col in queryset: - if col.attnum not in {column_data['attnum'] for column_data in data}: - data.append({"attnum": col.attnum, "delete": True}) - return validated_data diff --git a/mathesar/utils/prefetch.py b/mathesar/utils/prefetch.py deleted file mode 100644 index 7a3e51ff77..0000000000 --- a/mathesar/utils/prefetch.py +++ /dev/null @@ -1,299 +0,0 @@ -import collections -import time -from logging import getLogger - -import django -from django.db import models -from django.db.models import query -from django.db.models.fields.related_descriptors import ForwardManyToOneDescriptor - -__version__ = '1.2.3' - -logger = getLogger(__name__) - - -class PrefetchManagerMixin(models.Manager): - use_for_related_fields = True - prefetch_definitions = {} - - @classmethod - def get_queryset_class(cls): - return PrefetchQuerySet - - def __init__(self): - super(PrefetchManagerMixin, self).__init__() - for name, prefetcher in self.prefetch_definitions.items(): - if prefetcher.__class__ is not Prefetcher and not callable(prefetcher): - raise InvalidPrefetch("Invalid prefetch definition %s. This prefetcher needs to be a class not an instance." % name) - - def get_queryset(self): - qs = self.get_queryset_class()( - self.model, prefetch_definitions=self.prefetch_definitions - ) - - if getattr(self, '_db', None) is not None: - qs = qs.using(self._db) - return qs - - def prefetch(self, *args): - return self.get_queryset().prefetch(*args) - - -class PrefetchManager(PrefetchManagerMixin): - def __init__(self, **kwargs): - self.prefetch_definitions = kwargs - super(PrefetchManager, self).__init__() - - -class InvalidPrefetch(Exception): - pass - - -class PrefetchOption(object): - def __init__(self, name, *args, **kwargs): - self.name = name - self.args = args - self.kwargs = kwargs - - -P = PrefetchOption - - -class PrefetchQuerySet(query.QuerySet): - def __init__(self, model=None, query=None, using=None, - prefetch_definitions=None, **kwargs): - super(PrefetchQuerySet, self).__init__(model, query, using, **kwargs) - self._prefetch = {} - self.prefetch_definitions = prefetch_definitions - - if django.VERSION < (2, 0): - def _clone(self, **kwargs): - return super(PrefetchQuerySet, self). \ - _clone(_prefetch=self._prefetch, - prefetch_definitions=self.prefetch_definitions, **kwargs) - else: - def _clone(self): - c = super(PrefetchQuerySet, self)._clone() - c._prefetch = self._prefetch - c.prefetch_definitions = self.prefetch_definitions - return c - - def prefetch(self, *names): - obj = self._clone() - - for opt in names: - if isinstance(opt, PrefetchOption): - name = opt.name - else: - name = opt - opt = None - parts = name.split('__') - forwarders = [] - prefetcher = None - model = self.model - prefetch_definitions = self.prefetch_definitions - - for what in parts: - if not prefetcher: - if what in prefetch_definitions: - prefetcher = prefetch_definitions[what] - continue - descriptor = getattr(model, what, None) - if isinstance(descriptor, ForwardManyToOneDescriptor): - field = descriptor.field - forwarders.append(field.name) - model = field.remote_field.model - manager = model.objects - if not isinstance(manager, PrefetchManagerMixin): - raise InvalidPrefetch('Manager for %s is not a PrefetchManagerMixin instance.' % model) - prefetch_definitions = manager.prefetch_definitions - else: - raise InvalidPrefetch("Invalid part %s in prefetch call for %s on model %s. " - "The name is not a prefetcher nor a forward relation (fk)." % ( - what, name, self.model)) - else: - raise InvalidPrefetch("Invalid part %s in prefetch call for %s on model %s. " - "You cannot have any more relations after the prefetcher." % ( - what, name, self.model)) - if not prefetcher: - raise InvalidPrefetch("Invalid prefetch call with %s for on model %s. " - "The last part isn't a prefetch definition." % (name, self.model)) - if opt: - if prefetcher.__class__ is Prefetcher: - raise InvalidPrefetch("Invalid prefetch call with %s for on model %s. " - "This prefetcher (%s) needs to be a subclass of Prefetcher." % ( - name, self.model, prefetcher)) - - obj._prefetch[name] = forwarders, prefetcher(*opt.args, **opt.kwargs) - else: - obj._prefetch[name] = forwarders, prefetcher if prefetcher.__class__ is Prefetcher else prefetcher() - - for forwarders, prefetcher in obj._prefetch.values(): - if forwarders: - obj = obj.select_related('__'.join(forwarders)) - return obj - - def _fetch_all(self): - # We are storing the prefetch state to call our own prefetch related, - # we store it before calling _fetch_all() as it would end up marking prefetch as true - prefetch_done = self._prefetch_done - super()._fetch_all() - obj_list = self._result_cache - good_objects = True - if not prefetch_done: - for obj in obj_list: - if not hasattr(obj, '_prefetched_objects_cache'): - try: - obj._prefetched_objects_cache = {} - except (AttributeError, TypeError): - # Must be an immutable object from - # values_list(flat=True), for example (TypeError) or - # a QuerySet subclass that isn't returning Model - # instances (AttributeError), either in Django or a 3rd - # party. prefetch_related() doesn't make sense, so quit. - good_objects = False - break - if good_objects: - for name, (forwarders, prefetcher) in self._prefetch.items(): - prefetcher.fetch(obj_list, name, self.model, forwarders) - - -class Prefetcher(object): - """ - Prefetch definitition. For convenience you can either subclass this and - define the methods on the subclass or just pass the functions to the - contructor. - - Eg, subclassing:: - - class GroupPrefetcher(Prefetcher): - - @staticmethod - def filter(ids): - return User.groups.through.objects.filter(user__in=ids).select_related('group') - - @staticmethod - def reverse_mapper(user_group_association): - return [user_group_association.user_id] - - @staticmethod - def decorator(user, user_group_associations=()): - setattr(user, 'prefetched_groups', [i.group for i in user_group_associations]) - - Or with contructor:: - - Prefetcher( - filter = lambda ids: User.groups.through.objects.filter(user__in=ids).select_related('group'), - reverse_mapper = lambda user_group_association: [user_group_association.user_id], - decorator = lambda user, user_group_associations=(): setattr(user, 'prefetched_groups', [ - i.group for i in user_group_associations - ]) - ) - - - Glossary: - - * filter(list_of_ids): - - A function that returns a queryset containing all the related data for a given list of keys. - Takes a list of ids as argument. - - * reverse_mapper(related_object): - - A function that takes the related object as argument and returns a list - of keys that maps that related object to the objects in the queryset. - - * mapper(object): - - Optional (defaults to ``lambda obj: obj.pk``). - - A function that returns the key for a given object in your query set. - - * decorator(object, list_of_related_objects): - - A function that will save the related data on each of your objects in - your queryset. Takes the object and a list of related objects as - arguments. Note that you should not override existing attributes on the - model instance here. - - """ - collect = False - - def __init__(self, filter=None, reverse_mapper=None, decorator=None, mapper=None, collect=None): - if filter: - self.filter = filter - elif not hasattr(self, 'filter'): - raise RuntimeError("You must define a filter function") - - if reverse_mapper: - self.reverse_mapper = reverse_mapper - elif not hasattr(self, 'reverse_mapper'): - raise RuntimeError("You must define a reverse_mapper function") - - if decorator: - self.decorator = decorator - elif not hasattr(self, 'decorator'): - raise RuntimeError("You must define a decorator function") - - if mapper: - self.mapper = mapper - - if collect is not None: - self.collect = collect - - @staticmethod - def mapper(obj): - return obj.pk - - def fetch(self, dataset, name, model, forwarders): - collect = self.collect or forwarders - - try: - data_mapping = collections.defaultdict(list) - t1 = time.time() - for obj in dataset: - for field in forwarders: - obj = getattr(obj, field, None) - - if not obj: - continue - - if collect: - data_mapping[self.mapper(obj)].append(obj) - else: - data_mapping[self.mapper(obj)] = obj - - t2 = time.time() - logger.debug("Creating data_mapping for %s query took %.3f secs for the %s prefetcher.", - model.__name__, t2 - t1, name) - t1 = time.time() - related_data = self.filter(data_mapping.keys(), data_mapping.values()) - related_data_len = len(related_data) - t2 = time.time() - logger.debug("Filtering for %s related objects for %s query took %.3f secs for the %s prefetcher.", - related_data_len, model.__name__, t2 - t1, name) - relation_mapping = collections.defaultdict(list) - - t1 = time.time() - if isinstance(related_data, dict): - relation_mapping = related_data - else: - for obj in related_data: - for id_ in self.reverse_mapper(obj): - if id_: - relation_mapping[id_].append(obj) - for id_, related_items in relation_mapping.items(): - if id_ in data_mapping: - if collect: - for item in data_mapping[id_]: - self.decorator(item, related_items) - else: - self.decorator(data_mapping[id_], related_items) - - t2 = time.time() - logger.debug("Adding the related objects on the %s query took %.3f secs for the %s prefetcher.", - model.__name__, t2 - t1, name) - return dataset - except Exception: - logger.exception("Prefetch failed for %s prefetch on the %s model:", name, model.__name__) - raise From c82bbe2fb2dd0fcfccb4a746e611396a2dc2e8a3 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 13:34:39 +0800 Subject: [PATCH 25/70] clean up unused imports --- mathesar/imports/csv.py | 6 ------ mathesar/imports/json.py | 7 ------- mathesar/imports/utils.py | 9 +-------- mathesar/tests/conftest.py | 10 +--------- 4 files changed, 2 insertions(+), 30 deletions(-) diff --git a/mathesar/imports/csv.py b/mathesar/imports/csv.py index 182b106501..dc41289b98 100644 --- a/mathesar/imports/csv.py +++ b/mathesar/imports/csv.py @@ -2,14 +2,8 @@ import clevercsv as csv -from db.tables.operations.alter import update_pk_sequence_to_latest -from db.records.operations.insert import insert_records_from_csv -from db.tables.operations.create import create_string_column_table -from db.tables.operations.drop import drop_table from mathesar.errors import InvalidTableError -from mathesar.imports.utils import get_alternate_column_names, process_column_names from db.constants import COLUMN_NAME_TEMPLATE -from psycopg2.errors import IntegrityError, DataError # The user-facing documentation replicates these delimiter characters. If you # change this variable, please update the documentation as well. diff --git a/mathesar/imports/json.py b/mathesar/imports/json.py index 9c699e139d..c956c554af 100644 --- a/mathesar/imports/json.py +++ b/mathesar/imports/json.py @@ -1,16 +1,9 @@ import json from json.decoder import JSONDecodeError -from db.tables.operations.alter import update_pk_sequence_to_latest -from db.records.operations.insert import insert_records_from_json -from db.tables.operations.create import create_string_column_table -from db.tables.operations.drop import drop_table from mathesar.api.exceptions.database_exceptions import ( exceptions as database_api_exceptions ) -from mathesar.imports.utils import get_alternate_column_names, process_column_names -from psycopg2.errors import IntegrityError, DataError -from sqlalchemy.exc import IntegrityError as sqlalchemy_integrity_error def is_valid_json(data): diff --git a/mathesar/imports/utils.py b/mathesar/imports/utils.py index 19b87cdaf9..a1d683dc9c 100644 --- a/mathesar/imports/utils.py +++ b/mathesar/imports/utils.py @@ -1,5 +1,5 @@ from db.identifiers import truncate_if_necessary -from db.constants import COLUMN_NAME_TEMPLATE, ID, ID_ORIGINAL +from db.constants import COLUMN_NAME_TEMPLATE def process_column_names(column_names): @@ -19,10 +19,3 @@ def process_column_names(column_names): in enumerate(column_names) ) return list(column_names) - - -def get_alternate_column_names(column_names): - return [ - fieldname if fieldname != ID else ID_ORIGINAL - for fieldname in column_names - ] diff --git a/mathesar/tests/conftest.py b/mathesar/tests/conftest.py index c5c1e30a50..ebd522c10f 100644 --- a/mathesar/tests/conftest.py +++ b/mathesar/tests/conftest.py @@ -5,20 +5,12 @@ import responses from copy import deepcopy -from django.core.files import File -from django.core.cache import cache from django.conf import settings -from django.db import connection as dj_connection from rest_framework.test import APIClient -from db.tables.operations.create import create_mathesar_table as actual_create_mathesar_table -from db.schemas.utils import get_schema_oid_from_name - -import mathesar.tests.conftest -from mathesar.models.base import DataFile from mathesar.models.users import User -from fixtures.utils import create_scoped_fixtures, get_fixture_value +from fixtures.utils import create_scoped_fixtures @pytest.fixture From a072b434951719a51759b12a3b0263162dc0ba1f Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 13:40:56 +0800 Subject: [PATCH 26/70] fix import_ import --- db/tables/operations/import_.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/db/tables/operations/import_.py b/db/tables/operations/import_.py index 0748950fa8..a714d6d998 100644 --- a/db/tables/operations/import_.py +++ b/db/tables/operations/import_.py @@ -8,7 +8,8 @@ from db.tables.operations.create import prepare_table_for_import from db.encoding_utils import get_sql_compatible_encoding from mathesar.models.base import DataFile -from mathesar.imports.csv import get_file_encoding, get_sv_reader, process_column_names +from mathesar.imports.csv import get_file_encoding, get_sv_reader +from mathesar.imports.utils import process_column_names def import_csv(data_file_id, table_name, schema_oid, conn, comment=None): From dfb2f8be2248d655b01812924a1914861ef0d13f Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 13:41:23 +0800 Subject: [PATCH 27/70] remove unused table alter functions --- db/tables/operations/alter.py | 79 ------------------------ db/tests/tables/operations/test_alter.py | 29 --------- 2 files changed, 108 deletions(-) diff --git a/db/tables/operations/alter.py b/db/tables/operations/alter.py index 72c72ca514..84125ea1ba 100644 --- a/db/tables/operations/alter.py +++ b/db/tables/operations/alter.py @@ -1,55 +1,7 @@ """The functions in this module wrap SQL functions that use `ALTER TABLE`.""" import json -from db import constants from db import connection as db_conn -from db.columns.operations.alter import batch_update_columns - -SUPPORTED_TABLE_ALTER_ARGS = {'name', 'columns', 'description'} - - -def rename_table(name, schema, engine, rename_to): - """ - Change a table's name, returning the command executed. - - Args: - name: original table name - schema: schema where the table lives - engine: SQLAlchemy engine object for connecting. - rename_to: new table name - """ - if name == rename_to: - result = None - else: - result = db_conn.execute_msar_func_with_engine( - engine, 'rename_table', schema, name, rename_to - ).fetchone()[0] - return result - - -def comment_on_table(name, schema, engine, comment): - """ - Change the description of a table, returning command executed. - - Args: - name: The name of the table whose comment we will change. - schema: The schema of the table whose comment we will change. - engine: SQLAlchemy engine object for connecting. - comment: The new comment. Any quotes or special characters must - be escaped. - """ - return db_conn.execute_msar_func_with_engine( - engine, 'comment_on_table', schema, name, comment - ).fetchone()[0] - - -def alter_table(table_name, table_oid, schema, engine, update_data): - if 'description' in update_data: - comment_on_table(table_name, schema, engine, update_data['description']) - if 'name' in update_data: - rename_table(table_name, schema, engine, update_data['name']) - if 'columns' in update_data: - batch_update_columns(table_oid, engine, update_data['columns']) def alter_table_on_database(table_oid, table_data_dict, conn): @@ -70,34 +22,3 @@ def alter_table_on_database(table_oid, table_data_dict, conn): return db_conn.exec_msar_func( conn, 'alter_table', table_oid, json.dumps(table_data_dict) ).fetchone()[0] - - -def update_pk_sequence_to_latest(engine, table, connection=None): - """ - Update the primary key sequence to the current maximum. - - This way, the next value inserted will use the next value in the - sequence, avoiding collisions. - - Args: - table_id: The OID of the table whose primary key sequence we'll - update. - col_attnum: The attnum of the primary key column. - """ - schema = table.schema or 'public' - name = table.name - column = table.c[constants.ID].name - if connection is not None: - # The quote wrangling here is temporary; due to SQLAlchemy's query - # builder. - db_conn.execute_msar_func_with_psycopg2_conn( - connection, - 'update_pk_sequence_to_latest', - f"'{schema}'", - f"'{name}'", - f"'{column}'", - ).fetchone()[0] - else: - db_conn.execute_msar_func_with_engine( - engine, 'update_pk_sequence_to_latest', schema, name, column - ).fetchone()[0] diff --git a/db/tests/tables/operations/test_alter.py b/db/tests/tables/operations/test_alter.py index af1f0b9b0c..75baea1271 100644 --- a/db/tests/tables/operations/test_alter.py +++ b/db/tests/tables/operations/test_alter.py @@ -4,35 +4,6 @@ import db.tables.operations.alter as tab_alter -def test_rename_table(engine_with_schema): - engine, schema_name = engine_with_schema - with patch.object(tab_alter.db_conn, 'execute_msar_func_with_engine') as mock_exec: - tab_alter.rename_table('rename_me', schema_name, engine, rename_to='renamed') - call_args = mock_exec.call_args_list[0][0] - assert call_args[0] == engine - assert call_args[1] == "rename_table" - assert call_args[2] == schema_name - assert call_args[3] == "rename_me" - assert call_args[4] == "renamed" - - -def test_comment_on_table(engine_with_schema): - engine, schema_name = engine_with_schema - with patch.object(tab_alter.db_conn, 'execute_msar_func_with_engine') as mock_exec: - tab_alter.comment_on_table( - 'comment_on_me', - schema_name, - engine=engine, - comment='This is a comment' - ) - call_args = mock_exec.call_args_list[0][0] - assert call_args[0] == engine - assert call_args[1] == "comment_on_table" - assert call_args[2] == schema_name - assert call_args[3] == "comment_on_me" - assert call_args[4] == "This is a comment" - - def test_alter_table(): with patch.object(tab_alter.db_conn, 'exec_msar_func') as mock_exec: tab_alter.alter_table_on_database( From 14b890f1bd16d78e30a9abbb35e706ff4bac5d3e Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 13:44:24 +0800 Subject: [PATCH 28/70] remove unused table merging function --- db/tables/operations/merge.py | 47 --------- db/tests/tables/operations/test_merge.py | 116 ----------------------- 2 files changed, 163 deletions(-) delete mode 100644 db/tables/operations/merge.py delete mode 100644 db/tests/tables/operations/test_merge.py diff --git a/db/tables/operations/merge.py b/db/tables/operations/merge.py deleted file mode 100644 index 162f1d36d2..0000000000 --- a/db/tables/operations/merge.py +++ /dev/null @@ -1,47 +0,0 @@ -from sqlalchemy import select - -from db.columns.base import MathesarColumn -from db.tables.operations.create import create_mathesar_table -from db.tables.operations.select import reflect_table -from db.metadata import get_empty_metadata - - -def merge_tables(table_name_one, table_name_two, merged_table_name, schema, engine, drop_original_tables=False): - """ - This specifically undoes the `extract_columns_from_table` (up to - unique rows). It may not work in other contexts (yet). - """ - # TODO reuse metadata - metadata = get_empty_metadata() - table_one = reflect_table(table_name_one, schema, engine, metadata=metadata) - table_two = reflect_table(table_name_two, schema, engine, metadata=metadata) - merge_join = table_one.join(table_two) - referencing_columns = [ - col for col in [merge_join.onclause.left, merge_join.onclause.right] - if col.foreign_keys - ] - merged_columns_all = [ - MathesarColumn.from_column(col) - for col in list(table_one.columns) + list(table_two.columns) - if col not in referencing_columns - ] - merged_columns = [col for col in merged_columns_all if not col.is_default] - with engine.begin() as conn: - merged_table = create_mathesar_table( - merged_table_name, schema, merged_columns, engine, - ) - insert_stmt = merged_table.insert().from_select( - [col.name for col in merged_columns], - select(merged_columns, distinct=True).select_from(merge_join) - ) - conn.execute(insert_stmt) - - if drop_original_tables: - if table_one.foreign_keys: - table_one.drop(bind=engine) - table_two.drop(bind=engine) - else: - table_two.drop(bind=engine) - table_one.drop(bind=engine) - - return merged_table diff --git a/db/tests/tables/operations/test_merge.py b/db/tests/tables/operations/test_merge.py deleted file mode 100644 index d4d9042758..0000000000 --- a/db/tests/tables/operations/test_merge.py +++ /dev/null @@ -1,116 +0,0 @@ -import pytest -from sqlalchemy import MetaData, select - -from db.columns.defaults import DEFAULT_COLUMNS -from db.tables.operations.merge import merge_tables - - -@pytest.mark.skip(reason="Fixture needs to be refactored to return initial column set. " - "Splitting a table modifies the original table, so it is not possible to get the initial columns. ") -def test_merge_columns_undoes_extract_columns_ddl_rem_ext(extracted_remainder_roster): - extracted, remainder, engine, schema = extracted_remainder_roster - merge_tables( - remainder.name, - extracted.name, - "Merged Roster", - schema, - engine, - ) - metadata = MetaData(bind=engine, schema=schema) - metadata.reflect() - merged = metadata.tables[f"{schema}.Merged Roster"] - expect_merged_names = sorted([col.name for col in remainder.columns]) - actual_merged_names = sorted([col.name for col in merged.columns]) - assert expect_merged_names == actual_merged_names - - -@pytest.mark.skip(reason="Fixture needs to be refactored to return initial column set.") -def test_merge_columns_undoes_extract_columns_ddl_ext_rem(extracted_remainder_roster): - extracted, remainder, engine, schema = extracted_remainder_roster - merge_tables( - extracted.name, - remainder.name, - "Merged Roster", - schema, - engine, - ) - metadata = MetaData(bind=engine, schema=schema) - metadata.reflect() - merged = metadata.tables[f"{schema}.Merged Roster"] - expect_merged_names = sorted([col.name for col in remainder.columns]) - actual_merged_names = sorted([col.name for col in merged.columns]) - assert expect_merged_names == actual_merged_names - - -@pytest.mark.skip(reason="Fixture needs to be refactored to return initial column set.") -def test_merge_columns_returns_original_data_rem_ext(extracted_remainder_roster): - extracted, remainder, engine, schema = extracted_remainder_roster - merge_tables( - remainder.name, - extracted.name, - "Merged Roster", - schema, - engine, - ) - metadata = MetaData(bind=engine, schema=schema) - metadata.reflect() - roster_columns = sorted( - [ - col.name for col in remainder.columns - if col.name not in DEFAULT_COLUMNS - ] - ) - merged = metadata.tables[f"{schema}.Merged Roster"] - merged_columns = sorted( - [ - col.name for col in merged.columns - if col.name not in DEFAULT_COLUMNS - ] - ) - expect_tuple_sel = select( - [remainder.columns[name] for name in roster_columns] - ) - actual_tuple_sel = select( - [merged.columns[name] for name in merged_columns] - ) - with engine.begin() as conn: - expect_tuples = conn.execute(expect_tuple_sel).fetchall() - actual_tuples = conn.execute(actual_tuple_sel).fetchall() - assert sorted(expect_tuples) == sorted(actual_tuples) - - -@pytest.mark.skip(reason="Fixture needs to be refactored to return initial column set.") -def test_merge_columns_returns_original_data_ext_rem(extracted_remainder_roster): - extracted, remainder, engine, schema = extracted_remainder_roster - merge_tables( - extracted.name, - remainder.name, - "Merged Roster", - schema, - engine, - ) - metadata = MetaData(bind=engine, schema=schema) - metadata.reflect() - roster_columns = sorted( - [ - col.name for col in remainder.columns - if col.name not in DEFAULT_COLUMNS - ] - ) - merged = metadata.tables[f"{schema}.Merged Roster"] - merged_columns = sorted( - [ - col.name for col in merged.columns - if col.name not in DEFAULT_COLUMNS - ] - ) - expect_tuple_sel = select( - [remainder.columns[name] for name in roster_columns] - ) - actual_tuple_sel = select( - [merged.columns[name] for name in merged_columns] - ) - with engine.begin() as conn: - expect_tuples = conn.execute(expect_tuple_sel).fetchall() - actual_tuples = conn.execute(actual_tuple_sel).fetchall() - assert sorted(expect_tuples) == sorted(actual_tuples) From 73c96c2834e34edc6baf408795c21598f282bc43 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 14:13:26 +0800 Subject: [PATCH 29/70] remove tests for unused functionality --- db/tests/columns/operations/test_alter.py | 104 +----- db/tests/records/operations/test_select.py | 79 ----- db/tests/schemas/operations/test_select.py | 7 - db/tests/tables/operations/test_create.py | 36 -- .../tables/operations/test_infer_types.py | 327 ------------------ 5 files changed, 1 insertion(+), 552 deletions(-) delete mode 100644 db/tests/tables/operations/test_create.py delete mode 100644 db/tests/tables/operations/test_infer_types.py diff --git a/db/tests/columns/operations/test_alter.py b/db/tests/columns/operations/test_alter.py index 78dbeb1abe..bc70a2a5b3 100644 --- a/db/tests/columns/operations/test_alter.py +++ b/db/tests/columns/operations/test_alter.py @@ -4,12 +4,11 @@ from db import constants from db.columns.operations import alter as col_alt -from db.columns.operations.alter import batch_update_columns, rename_column +from db.columns.operations.alter import batch_update_columns from db.columns.operations.select import ( get_column_attnum_from_name, get_column_name_from_attnum, get_columns_attnum_from_names, ) -from db.tables.operations.create import create_mathesar_table from db.tables.operations.select import ( get_oid_from_table, reflect_table, reflect_table_from_oid ) @@ -59,20 +58,6 @@ def test_alter_columns_in_table_basic(): assert json.loads(mock_exec.call_args.args[3]) == expect_json_arg -def _rename_column_and_assert(table, old_col_name, new_col_name, engine): - """ - Renames the colum of a table and assert the change went through - """ - table_oid = get_oid_from_table(table.name, table.schema, engine) - column_attnum = get_column_attnum_from_name(table_oid, old_col_name, engine, metadata=get_empty_metadata()) - with engine.begin() as conn: - rename_column(table_oid, column_attnum, engine, conn, new_col_name) - table = reflect_table(table.name, table.schema, engine, metadata=get_empty_metadata()) - assert new_col_name in table.columns - assert old_col_name not in table.columns - return table - - def _create_pizza_table(engine, schema): table_name = 'Pizzas' cols = [ @@ -109,93 +94,6 @@ def _get_pizza_column_data(table_oid, engine): return column_data -def test_rename_column_and_assert(engine_with_schema): - old_col_name = "col1" - new_col_name = "col2" - table_name = "table_with_columns" - engine, schema = engine_with_schema - metadata = MetaData(bind=engine, schema=schema) - table = Table(table_name, metadata, Column(old_col_name, VARCHAR)) - table.create() - _rename_column_and_assert(table, old_col_name, new_col_name, engine) - - -def test_rename_column_foreign_keys(engine_with_schema): - engine, schema = engine_with_schema - metadata = get_empty_metadata() - table_name = "table_to_split" - columns_list = [ - { - "name": "Filler 1", - "type": {"name": PostgresType.INTEGER.id} - }, - { - "name": "Filler 2", - "type": {"name": PostgresType.INTEGER.id} - } - ] - schema_oid = get_schema_oid_from_name(schema, engine) - create_mathesar_table(engine, table_name, schema_oid, columns_list) - table_oid = get_oid_from_table(table_name, schema, engine) - extracted_cols = ["Filler 1"] - extracted_col_attnums = get_columns_attnum_from_names( - table_oid, extracted_cols, engine, metadata=metadata - ) - extracted_table_oid, remainder_table_oid, fk_attnum = extract_columns_from_table( - table_oid, extracted_col_attnums, "Extracted", schema, engine - ) - remainder = reflect_table_from_oid(remainder_table_oid, engine, metadata) - extracted = reflect_table_from_oid(extracted_table_oid, engine, metadata) - fk_name = get_column_name_from_attnum(remainder_table_oid, fk_attnum, engine, metadata) - new_fk_name = "new_" + fk_name - remainder = _rename_column_and_assert(remainder, fk_name, new_fk_name, engine) - - fk = list(remainder.foreign_keys)[0] - assert fk.parent.name == new_fk_name - assert fk.column.table.name == extracted.name - - -def test_rename_column_sequence(engine_with_schema): - old_col_name = constants.ID - new_col_name = "new_" + constants.ID - engine, schema = engine_with_schema - table_name = "table_with_columns" - schema_oid = get_schema_oid_from_name(schema, engine) - table_oid = create_mathesar_table(engine, table_name, schema_oid) - table = reflect_table_from_oid(table_oid, engine, metadata=get_empty_metadata()) - with engine.begin() as conn: - ins = table.insert() - conn.execute(ins) - - table = _rename_column_and_assert(table, old_col_name, new_col_name, engine) - - with engine.begin() as conn: - ins = table.insert() - conn.execute(ins) - slct = select(table) - result = conn.execute(slct) - new_value = result.fetchall()[-1][new_col_name] - assert new_value == 2 - - -def test_rename_column_index(engine_with_schema): - old_col_name = constants.ID - new_col_name = "new_" + constants.ID - engine, schema = engine_with_schema - table_name = "table_with_index" - metadata = MetaData(bind=engine, schema=schema) - table = Table(table_name, metadata, Column(old_col_name, INTEGER, index=True)) - table.create() - - _rename_column_and_assert(table, old_col_name, new_col_name, engine) - - with engine.begin() as conn: - index = engine.dialect.get_indexes(conn, table_name, schema)[0] - index_columns = index["column_names"] - assert old_col_name not in index_columns - assert new_col_name in index_columns - - def test_batch_update_columns_no_changes(engine_with_schema): engine, schema = engine_with_schema table = _create_pizza_table(engine, schema) diff --git a/db/tests/records/operations/test_select.py b/db/tests/records/operations/test_select.py index 8b58fae0b7..df121135df 100644 --- a/db/tests/records/operations/test_select.py +++ b/db/tests/records/operations/test_select.py @@ -1,7 +1,6 @@ from decimal import Decimal from collections import Counter from db.records.operations.select import get_records, get_column_cast_records -from db.tables.operations.create import create_mathesar_table from db.types.base import PostgresType from db.schemas.utils import get_schema_oid_from_name from db.metadata import get_empty_metadata @@ -27,84 +26,6 @@ def test_get_records_gets_limited_offset_records(roster_table_obj): assert len(offset_records) == 10 and offset_records[0] == base_records[5] -def test_get_column_cast_records(engine_with_schema): - COL1 = "col1" - COL2 = "col2" - col1 = { - "name": COL1, - "type": {"name": PostgresType.CHARACTER_VARYING.id} - } - col2 = { - "name": COL2, - "type": {"name": PostgresType.CHARACTER_VARYING.id} - } - column_list = [col1, col2] - engine, schema = engine_with_schema - table_name = "table_with_columns" - schema_oid = get_schema_oid_from_name(schema, engine) - table_oid = create_mathesar_table( - engine, table_name, schema_oid, column_list - ) - table = reflect_table_from_oid(table_oid, engine, metadata=get_empty_metadata()) - ins = table.insert().values( - [{COL1: 'one', COL2: 1}, {COL1: 'two', COL2: 2}] - ) - with engine.begin() as conn: - conn.execute(ins) - COL1_MOD = COL1 + "_mod" - COL2_MOD = COL2 + "_mod" - column_definitions = [ - {"name": "id", "type": PostgresType.INTEGER.id}, - {"name": COL1_MOD, "type": PostgresType.CHARACTER_VARYING.id}, - {"name": COL2_MOD, "type": PostgresType.NUMERIC.id}, - ] - records = get_column_cast_records(engine, table, column_definitions) - for record in records: - assert ( - type(record[COL1 + "_mod"]) is str - and type(record[COL2 + "_mod"]) is Decimal - ) - - -def test_get_column_cast_records_options(engine_with_schema): - COL1 = "col1" - COL2 = "col2" - col1 = { - "name": COL1, - "type": {"name": PostgresType.CHARACTER_VARYING.id} - } - col2 = { - "name": COL2, - "type": {"name": PostgresType.CHARACTER_VARYING.id} - } - column_list = [col1, col2] - engine, schema = engine_with_schema - table_name = "table_with_columns" - schema_oid = get_schema_oid_from_name(schema, engine) - table_oid = create_mathesar_table( - engine, table_name, schema_oid, column_list - ) - table = reflect_table_from_oid(table_oid, engine, metadata=get_empty_metadata()) - ins = table.insert().values( - [{COL1: 'one', COL2: 1}, {COL1: 'two', COL2: 2}] - ) - with engine.begin() as conn: - conn.execute(ins) - COL1_MOD = COL1 + "_mod" - COL2_MOD = COL2 + "_mod" - column_definitions = [ - {"name": "id", "type": PostgresType.INTEGER.id}, - {"name": COL1_MOD, "type": PostgresType.CHARACTER_VARYING.id}, - {"name": COL2_MOD, "type": PostgresType.NUMERIC.id, "type_options": {"precision": 5, "scale": 2}}, - ] - records = get_column_cast_records(engine, table, column_definitions) - for record in records: - assert ( - type(record[COL1 + "_mod"]) is str - and type(record[COL2 + "_mod"]) is Decimal - ) - - def test_get_records_duplicate_only(roster_table_obj): roster, engine = roster_table_obj duplicate_only = ["Grade", "Subject"] diff --git a/db/tests/schemas/operations/test_select.py b/db/tests/schemas/operations/test_select.py index 35854d66ea..ea0efeec8f 100644 --- a/db/tests/schemas/operations/test_select.py +++ b/db/tests/schemas/operations/test_select.py @@ -2,7 +2,6 @@ from sqlalchemy import select, Table, MetaData, text from db.constants import TYPES_SCHEMA -from db.tables.operations import infer_types from db.schemas.operations import select as ssel @@ -30,12 +29,6 @@ def test_get_mathesar_schemas_with_oids_avoids_types_schema(engine_with_schema): assert all([schema != TYPES_SCHEMA for schema, _ in actual_schemas]) -def test_get_mathesar_schemas_with_oids_avoids_temp_schema(engine_with_schema): - engine, schema = engine_with_schema - actual_schemas = ssel.get_mathesar_schemas_with_oids(engine) - assert all([schema != infer_types.TEMP_SCHEMA for schema, _ in actual_schemas]) - - def test_get_mathesar_schemas_with_oids_gets_correct_oid(engine_with_schema): engine, schema = engine_with_schema metadata = MetaData() diff --git a/db/tests/tables/operations/test_create.py b/db/tests/tables/operations/test_create.py deleted file mode 100644 index ba086f4981..0000000000 --- a/db/tests/tables/operations/test_create.py +++ /dev/null @@ -1,36 +0,0 @@ -from sqlalchemy import select, func -from db.tables.operations.create import create_mathesar_table -from db.tables.operations.select import reflect_table_from_oid -from db.schemas.utils import get_schema_oid_from_name -from db.metadata import get_empty_metadata - - -def test_table_creation_doesnt_reuse_defaults(engine_with_schema): - column_list = [] - engine, schema = engine_with_schema - schema_oid = get_schema_oid_from_name(schema, engine) - t1_oid = create_mathesar_table(engine, "t1", schema_oid, column_list) - t2_oid = create_mathesar_table(engine, "t2", schema_oid, column_list) - t1 = reflect_table_from_oid(t1_oid, engine, metadata=get_empty_metadata()) - t2 = reflect_table_from_oid(t2_oid, engine, metadata=get_empty_metadata()) - assert all( - [ - c1.name == c2.name and c1 != c2 - for c1, c2 in zip(t1.columns, t2.columns) - ] - ) - - -def test_table_creation_adds_comment(engine_with_schema): - engine, schema = engine_with_schema - column_list = [] - expect_comment = 'mytable comment goes here!!' - schema_oid = get_schema_oid_from_name(schema, engine) - table_oid = create_mathesar_table( - engine, 'mytable', schema_oid, column_list, comment=expect_comment, - ) - with engine.begin() as conn: - res = conn.execute(select(func.obj_description(table_oid, 'pg_class'))) - actual_comment = res.fetchone()[0] - - assert actual_comment == expect_comment diff --git a/db/tests/tables/operations/test_infer_types.py b/db/tests/tables/operations/test_infer_types.py deleted file mode 100644 index 99434b0725..0000000000 --- a/db/tests/tables/operations/test_infer_types.py +++ /dev/null @@ -1,327 +0,0 @@ -import pytest -from unittest.mock import call, patch -from sqlalchemy import Column, MetaData, Table, select - -from db.columns.operations.infer_types import infer_column_type -from db.tables.operations import infer_types as infer_operations -from db.tables.operations.create import create_mathesar_table -from db.types.base import PostgresType, MathesarCustomType -from db.types.operations.convert import get_db_type_enum_from_class -from db.schemas.utils import get_schema_oid_from_name - - -type_data_list = [ - ( - PostgresType.TEXT, - [ - "3.14", - "1,41", - "149,600,000.00", - "4.543.000.000,005", - "13 800 000 000,00", - "7,53,00,00,000.0", - "140'004'453.0", - "-3.14", - "-1,41", - "-149,600,000.00", - "-4.543.000.000,005", - "-13 800 000 000,00", - "-7,53,00,00,000.0", - "-140'004'453.0" - ], - PostgresType.NUMERIC - ), - ( - PostgresType.TEXT, - [], - PostgresType.TEXT, - ), - ( - PostgresType.TEXT, - ["1.0"], - PostgresType.NUMERIC - ), - ( - PostgresType.TEXT, - ["1"], - PostgresType.BOOLEAN - ), - ( - PostgresType.TEXT, - ["0"], - PostgresType.BOOLEAN - ), - ( - PostgresType.TEXT, - ["1", "0"], - PostgresType.BOOLEAN - ), - ( - PostgresType.NUMERIC, - [0, 2, 1, 0], - PostgresType.NUMERIC - ), - ( - PostgresType.NUMERIC, - [0, 1, 1, 0], - PostgresType.NUMERIC, - ), - ( - PostgresType.TEXT, - ["t", "false", "true", "f", "f"], - PostgresType.BOOLEAN - ), - ( - PostgresType.TEXT, - ["t", "false", "2", "0"], - PostgresType.TEXT - ), - ( - PostgresType.TEXT, - ["a", "cat", "mat", "bat"], - PostgresType.TEXT - ), - ( - PostgresType.TEXT, - ["2", "1", "0", "0"], - PostgresType.NUMERIC - ), - ( - PostgresType.TEXT, - ["$2", "$1", "$0"], - MathesarCustomType.MATHESAR_MONEY - ), - ( - PostgresType.TEXT, - ["2000-01-12", "6/23/2004", "May-2007-29", "May-2007-29 00:00:00+0", "20200909"], - PostgresType.DATE - ), - ( - PostgresType.TEXT, - ["9:24+01", "23:12", "03:04:05", "3:4:5"], - PostgresType.TIME_WITHOUT_TIME_ZONE - ), - ( - PostgresType.TEXT, - ["2000-01-12 9:24", "6/23/2004 23:12", "May-2007-29 03:04:05", "May-2007-29 5:00:00+0", "May-2007-29", "20200909 3:4:5"], - PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE - ), - ( - PostgresType.TEXT, - ["2000-01-12 9:24-3", "6/23/2004 23:12+01", "May-2007-29 03:04:05", "May-2007-29", "20200909 3:4:5+01:30"], - PostgresType.TIMESTAMP_WITH_TIME_ZONE - ), - ( - PostgresType.TEXT, - ["alice@example.com", "bob@example.com", "jon.doe@example.ca"], - MathesarCustomType.EMAIL - ), - ( - PostgresType.TEXT, - [ - "https://centerofci.org", - "ldap://[2001:db8::7]/c=GB?objectClass?one" - "mailto:John.Doe@example.com", - "news:comp.infosystems.www.servers.unix", - "tel:+1-816-555-1212", - "telnet://192.0.2.16:80/", - "urn:oasis:names:specification:docbook:dtd:xml:4.1.2", - "centerofci.org", - "nasa.gov", - "lwn.net", - "github.com", - ], - MathesarCustomType.URI - ), -] - - -def create_test_table(engine, schema, table_name, column_name, column_type, values): - metadata = MetaData(bind=engine) - column_sa_type = column_type.get_sa_class(engine) - input_table = Table( - table_name, - metadata, - Column(column_name, column_sa_type), - schema=schema - ) - input_table.create() - for value in values: - ins = input_table.insert(values=(value,)) - with engine.begin() as conn: - conn.execute(ins) - return input_table - - -@pytest.mark.parametrize("initial_type,value_list,expected_type", type_data_list) -def test_type_inference(engine_with_schema, initial_type, value_list, expected_type): - engine, schema = engine_with_schema - TEST_TABLE = "test_table" - TEST_COLUMN = "test_column" - create_test_table( - engine, schema, TEST_TABLE, TEST_COLUMN, initial_type, value_list - ) - - infer_column_type( - schema, - TEST_TABLE, - TEST_COLUMN, - engine - ) - - with engine.begin(): - metadata = MetaData(bind=engine, schema=schema) - reflected_type_sa_class = Table( - TEST_TABLE, metadata, schema=schema, autoload_with=engine, - ).columns[TEST_COLUMN].type.__class__ - reflected_type = get_db_type_enum_from_class(reflected_type_sa_class) - assert reflected_type == expected_type - - -@pytest.mark.parametrize("initial_type,value_list,expected_type", type_data_list) -def test_table_inference(engine_with_schema, initial_type, value_list, expected_type): - engine, schema = engine_with_schema - test_table = "test_table" - test_column = "test_column" - input_table = create_test_table( - engine, schema, test_table, test_column, initial_type, value_list - ) - - with engine.begin() as conn: - results = conn.execute(select(input_table)) - original_table = results.fetchall() - - inferred_types = infer_operations.infer_table_column_types( - schema, - test_table, - engine - ) - assert inferred_types == (expected_type,) - - # Ensure the original table is untouced - with engine.begin() as conn: - results = conn.execute(select(input_table)) - new_table = results.fetchall() - assert original_table == new_table - - -def test_table_inference_drop_temp(engine_with_schema): - engine, schema = engine_with_schema - test_table = "test_table" - test_column = "test_column" - db_type = PostgresType.NUMERIC - values = [0, 1, 2, 3, 4] - create_test_table(engine, schema, test_table, test_column, db_type, values) - - # Ensure that the temp table is deleted even when the function errors - with patch.object(infer_operations, "infer_column_type") as mock_infer: - mock_infer.side_effect = Exception() - with pytest.raises(Exception): - infer_operations.infer_table_column_types(schema, test_table, engine) - infer_operations.infer_table_column_types(schema, test_table, engine) - - -def test_table_inference_same_name(engine_with_schema): - engine, schema = engine_with_schema - test_table = "temp_table" - test_column = "test_column" - db_type = PostgresType.NUMERIC - values = [0, 1, 2, 3, 4] - table = create_test_table(engine, schema, test_table, test_column, db_type, values) - with engine.begin() as conn: - results = conn.execute(select(table)) - original_table = results.fetchall() - infer_operations.infer_table_column_types(schema, test_table, engine) - with engine.begin() as conn: - results = conn.execute(select(table)) - new_table = results.fetchall() - assert original_table == new_table - - -def test_infer_table_column_types_doesnt_touch_defaults(engine_with_schema): - column_list = [] - engine, schema = engine_with_schema - table_name = "t1" - schema_oid = get_schema_oid_from_name(schema, engine) - create_mathesar_table( - engine, table_name, schema_oid, column_list - ) - with patch.object(infer_operations, "infer_column_type") as mock_infer: - infer_operations.update_table_column_types( - schema, - table_name, - engine - ) - mock_infer.assert_not_called() - - -def test_update_table_column_types_infers_non_default_types(engine_with_schema): - col1 = { - "name": "col1", - "type": {"name": PostgresType.CHARACTER_VARYING.id} - } - col2 = { - "name": "col2", - "type": {"name": PostgresType.CHARACTER_VARYING.id} - } - column_list = [col1, col2] - engine, schema = engine_with_schema - metadata = MetaData() - table_name = "table_with_columns" - schema_oid = get_schema_oid_from_name(schema, engine) - create_mathesar_table( - engine, table_name, schema_oid, column_list - ) - with patch.object(infer_operations, "infer_column_type") as mock_infer: - infer_operations.update_table_column_types( - schema, - table_name, - engine, - metadata - ) - expect_calls = [ - call( - schema, - table_name, - col1["name"], - engine, - metadata=metadata, - columns_might_have_defaults=True, - ), - call( - schema, - table_name, - col2["name"], - engine, - metadata=metadata, - columns_might_have_defaults=True, - ), - ] - mock_infer.assert_has_calls(expect_calls) - - -def test_update_table_column_types_skips_pkey_columns(engine_with_schema): - engine, schema = engine_with_schema - table_name = "t1" - schema_oid = get_schema_oid_from_name(schema, engine) - create_mathesar_table( - engine, table_name, schema_oid - ) - with patch.object(infer_operations, "infer_column_type") as mock_infer: - infer_operations.update_table_column_types( - schema, - table_name, - engine - ) - mock_infer.assert_not_called() - - -def test_update_table_column_types_skips_fkey_columns(extracted_remainder_roster, roster_fkey_col): - _, remainder, engine, schema = extracted_remainder_roster - with patch.object(infer_operations, "infer_column_type") as mock_infer: - infer_operations.update_table_column_types( - schema, - remainder.name, - engine - ) - assert all([call_[1][2] != roster_fkey_col for call_ in mock_infer.mock_calls]) From 3c4ef3d1c5d2a8e9477ccb4d1e9fd53873206b05 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 14:13:55 +0800 Subject: [PATCH 30/70] remove python-layer type inference --- db/columns/operations/infer_types.py | 168 --------------------------- db/tables/operations/infer_types.py | 67 ----------- 2 files changed, 235 deletions(-) delete mode 100644 db/columns/operations/infer_types.py diff --git a/db/columns/operations/infer_types.py b/db/columns/operations/infer_types.py deleted file mode 100644 index 6f15396b95..0000000000 --- a/db/columns/operations/infer_types.py +++ /dev/null @@ -1,168 +0,0 @@ -import logging - -from sqlalchemy import VARCHAR, TEXT, Text -from sqlalchemy.exc import DatabaseError - -from db.columns.exceptions import DagCycleError -from db.columns.operations.alter import alter_column_type -from db.columns.operations.select import ( - determine_whether_column_contains_data, - get_column_attnum_from_name -) -from db.tables.operations.select import get_oid_from_table, reflect_table -from db.types.base import PostgresType, MathesarCustomType, get_available_known_db_types -from db.metadata import get_empty_metadata - - -logger = logging.getLogger(__name__) - -MAX_INFERENCE_DAG_DEPTH = 100 - -TYPE_INFERENCE_DAG = { - PostgresType.BOOLEAN: [], - MathesarCustomType.EMAIL: [], - PostgresType.INTERVAL: [], - PostgresType.NUMERIC: [], - PostgresType.TEXT: [ - PostgresType.BOOLEAN, - PostgresType.DATE, - PostgresType.NUMERIC, - MathesarCustomType.MATHESAR_MONEY, - PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE, - PostgresType.TIMESTAMP_WITH_TIME_ZONE, - # We only infer to TIME_WITHOUT_TIME_ZONE as time zones don't make much - # sense without additional date information. See postgres documentation - # for further details: - # https://www.postgresql.org/docs/13/datatype-datetime.html - PostgresType.TIME_WITHOUT_TIME_ZONE, - PostgresType.INTERVAL, - MathesarCustomType.EMAIL, - MathesarCustomType.MATHESAR_JSON_ARRAY, - MathesarCustomType.MATHESAR_JSON_OBJECT, - MathesarCustomType.URI, - ], -} - - -# TODO This logic should be moved to the DB ASAP for speed and clarity. -def infer_column_type( - schema, - table_name, - column_name, - engine, - depth=0, - type_inference_dag=None, - metadata=None, - columns_might_have_defaults=True, -): - """ - Attempt to cast the column to the best type for it. - - Returns the resulting column type's class. - - Algorithm: - 1. Check for any data in the column. - - If the column is empty, return the column's current type - class. - 2. reflect the column's type class. - 3. Use _get_type_classes_mapped_to_dag_nodes to map it to a - TYPE_INFERENCE_DAG key. - 4. Look up the sequence of types referred to by that key on the - TYPE_INFERENCE_DAG. - - If there's no such key on the TYPE_INFERENCE_DAG dict, or if - its value is an empty list, return the current column's type - class. - 5. Iterate through that sequence of types trying to alter the - column's type to them. - - If the column's type is altered successfully, break - iteration and return the output of running infer_column_type - again (trigger tail recursion). - - If none of the column type alterations succeed, return the - current column's type class. - """ - metadata = metadata if metadata else get_empty_metadata() - - if type_inference_dag is None: - type_inference_dag = TYPE_INFERENCE_DAG - if depth > MAX_INFERENCE_DAG_DEPTH: - raise DagCycleError("The type_inference_dag likely has a cycle") - type_classes_to_dag_nodes = _get_type_classes_mapped_to_dag_nodes(engine) - column_type_class = _get_column_class( - engine=engine, - schema=schema, - table_name=table_name, - column_name=column_name, - metadata=metadata, - ) - table_oid = get_oid_from_table(table_name, schema, engine) - column_contains_data = determine_whether_column_contains_data( - table_oid, column_name, engine, metadata - ) - # We short-circuit in this case since we can't infer type without data. - if not column_contains_data: - return column_type_class - - # a DAG node will be a DatabaseType Enum - dag_node = type_classes_to_dag_nodes.get(column_type_class) - logger.debug(f"dag_node: {dag_node}") - types_to_cast_to = type_inference_dag.get(dag_node, []) - column_attnum = get_column_attnum_from_name(table_oid, column_name, engine, metadata) - for db_type in types_to_cast_to: - try: - with engine.begin() as conn: - alter_column_type( - table_oid, - column_attnum, - engine, - conn, - db_type, - ) - logger.info(f"Column {column_name} altered to type {db_type.id}") - column_type_class = infer_column_type( - schema, - table_name, - column_name, - engine, - depth=depth + 1, - type_inference_dag=type_inference_dag, - metadata=metadata - ) - break - # It's expected we catch this error when the test to see whether - # a type is appropriate for a column fails. - except DatabaseError: - logger.info( - f"Cannot alter column {column_name} to type {db_type.id}" - ) - return column_type_class - - -def _get_column_class(engine, schema, table_name, column_name, metadata): - # Metadata can be reused because reflect_table fetches the table details again - table = reflect_table(table_name, schema, engine, metadata=metadata) - column_type_class = table.columns[column_name].type.__class__ - return column_type_class - - -def _get_type_classes_mapped_to_dag_nodes(engine): - """ - Returns SA type classes mapped to TYPE_INFERENCE_DAG nodes. - - Purpose of this mapping is to find the wanted position on the TYPE_INFERENCE_DAG, given a - column's SA type class. - """ - type_classes_to_enums = { - db_type.get_sa_class(engine): db_type - for db_type - in get_available_known_db_types(engine) - } - # NOTE: below dict merge sets some keys to PostgresType.TEXT, which, in infer_column_type, - # maps these classes to the types grouped under TYPE_INFERENCE_DAG[PostgresType.TEXT]. - type_classes_to_dag_nodes = ( - type_classes_to_enums | { - Text: PostgresType.TEXT, - TEXT: PostgresType.TEXT, - VARCHAR: PostgresType.TEXT, - } - ) - return type_classes_to_dag_nodes diff --git a/db/tables/operations/infer_types.py b/db/tables/operations/infer_types.py index 54cc2b5b59..a77a848c0c 100644 --- a/db/tables/operations/infer_types.py +++ b/db/tables/operations/infer_types.py @@ -4,7 +4,6 @@ from db import constants from db.columns.base import MathesarColumn -from db.columns.operations.infer_types import infer_column_type from db.connection import exec_msar_func from db.schemas.operations.create import create_schema_if_not_exists_via_sql_alchemy from db.tables.operations.create import CreateTableAs @@ -13,10 +12,6 @@ from db.metadata import get_empty_metadata -TEMP_SCHEMA = constants.INFERENCE_SCHEMA -TEMP_TABLE = f"{constants.MATHESAR_PREFIX}temp_table_%s" - - def infer_table_column_data_types(conn, table_oid): """ Infer the best type for each column in the table. @@ -34,65 +29,3 @@ def infer_table_column_data_types(conn, table_oid): return exec_msar_func( conn, 'infer_table_column_data_types', table_oid ).fetchone()[0] - - -def update_table_column_types(schema, table_name, engine, metadata=None, columns_might_have_defaults=True): - metadata = metadata if metadata else get_empty_metadata() - table = reflect_table(table_name, schema, engine, metadata=metadata) - # we only want to infer (modify) the type of non-default columns - inferable_column_names = ( - col.name for col in table.columns - if not MathesarColumn.from_column(col).is_default - and not col.primary_key - and not col.foreign_keys - ) - for column_name in inferable_column_names: - infer_column_type( - schema, - table_name, - column_name, - engine, - metadata=metadata, - columns_might_have_defaults=columns_might_have_defaults, - ) - - -# TODO consider returning a mapping of column identifiers to types -def infer_table_column_types(schema, table_name, engine, metadata=None, columns_might_have_defaults=True): - metadata = metadata if metadata else get_empty_metadata() - table = reflect_table(table_name, schema, engine, metadata=metadata) - - temp_name = TEMP_TABLE % (int(time())) - create_schema_if_not_exists_via_sql_alchemy(TEMP_SCHEMA, engine) - with engine.begin() as conn: - while engine.dialect.has_table(conn, temp_name, schema=TEMP_SCHEMA): - temp_name = TEMP_TABLE.format(int(time())) - - full_temp_name = f"{TEMP_SCHEMA}.{temp_name}" - - select_table = select(table) - with engine.begin() as conn: - conn.execute(CreateTableAs(full_temp_name, select_table)) - temp_table = reflect_table(temp_name, TEMP_SCHEMA, engine, metadata=metadata) - - try: - update_table_column_types( - TEMP_SCHEMA, - temp_table.name, - engine=engine, - metadata=metadata, - columns_might_have_defaults=columns_might_have_defaults, - ) - except Exception as e: - # Ensure the temp table is deleted - temp_table.drop(bind=engine) - raise e - else: - temp_table = reflect_table(temp_name, TEMP_SCHEMA, engine, metadata=metadata) - types = tuple( - get_db_type_enum_from_class(c.type.__class__) - for c - in temp_table.columns - ) - temp_table.drop(bind=engine) - return types From 87c1ddc83b2afbf39ee95fe42a8e9354cd4f6c5d Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 14:22:36 +0800 Subject: [PATCH 31/70] remove unused imports --- db/tables/operations/infer_types.py | 11 ----------- db/tests/columns/operations/test_alter.py | 12 +++--------- db/tests/records/operations/test_select.py | 7 +------ 3 files changed, 4 insertions(+), 26 deletions(-) diff --git a/db/tables/operations/infer_types.py b/db/tables/operations/infer_types.py index a77a848c0c..db40563984 100644 --- a/db/tables/operations/infer_types.py +++ b/db/tables/operations/infer_types.py @@ -1,15 +1,4 @@ -from time import time - -from sqlalchemy import select - -from db import constants -from db.columns.base import MathesarColumn from db.connection import exec_msar_func -from db.schemas.operations.create import create_schema_if_not_exists_via_sql_alchemy -from db.tables.operations.create import CreateTableAs -from db.tables.operations.select import reflect_table -from db.types.operations.convert import get_db_type_enum_from_class -from db.metadata import get_empty_metadata def infer_table_column_data_types(conn, table_oid): diff --git a/db/tests/columns/operations/test_alter.py b/db/tests/columns/operations/test_alter.py index bc70a2a5b3..993fdafe2a 100644 --- a/db/tests/columns/operations/test_alter.py +++ b/db/tests/columns/operations/test_alter.py @@ -1,23 +1,17 @@ import json from unittest.mock import patch -from sqlalchemy import Column, select, Table, MetaData, VARCHAR, INTEGER +from sqlalchemy import Column, VARCHAR -from db import constants from db.columns.operations import alter as col_alt from db.columns.operations.alter import batch_update_columns -from db.columns.operations.select import ( - get_column_attnum_from_name, get_column_name_from_attnum, - get_columns_attnum_from_names, -) +from db.columns.operations.select import get_column_attnum_from_name from db.tables.operations.select import ( - get_oid_from_table, reflect_table, reflect_table_from_oid + get_oid_from_table, reflect_table ) -from db.tables.operations.split import extract_columns_from_table from db.tests.columns.utils import create_test_table from db.types.base import PostgresType from db.types.operations.convert import get_db_type_enum_from_class from db.metadata import get_empty_metadata -from db.schemas.utils import get_schema_oid_from_name def test_alter_columns_in_table_basic(): diff --git a/db/tests/records/operations/test_select.py b/db/tests/records/operations/test_select.py index df121135df..ac9bc71f02 100644 --- a/db/tests/records/operations/test_select.py +++ b/db/tests/records/operations/test_select.py @@ -1,10 +1,5 @@ -from decimal import Decimal from collections import Counter -from db.records.operations.select import get_records, get_column_cast_records -from db.types.base import PostgresType -from db.schemas.utils import get_schema_oid_from_name -from db.metadata import get_empty_metadata -from db.tables.operations.select import reflect_table_from_oid +from db.records.operations.select import get_records def test_get_records_gets_all_records(roster_table_obj): From 36f580baf1b6556309988e7c3daa11e87f82590a Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 14:25:16 +0800 Subject: [PATCH 32/70] remove unused SQLAlchemy table creators --- db/tables/operations/create.py | 65 +--------------------------------- 1 file changed, 1 insertion(+), 64 deletions(-) diff --git a/db/tables/operations/create.py b/db/tables/operations/create.py index 13fa5802a7..36a962d598 100644 --- a/db/tables/operations/create.py +++ b/db/tables/operations/create.py @@ -1,37 +1,6 @@ -from sqlalchemy.ext import compiler -from sqlalchemy.schema import DDLElement import json -from db.connection import execute_msar_func_with_engine, exec_msar_func +from db.connection import exec_msar_func from db.types.base import PostgresType -from db.tables.operations.select import reflect_table_from_oid -from db.metadata import get_empty_metadata - - -def create_mathesar_table(engine, table_name, schema_oid, columns=[], constraints=[], comment=None): - """ - Creates a table with a default id column. - - Args: - engine: SQLAlchemy engine object for connecting. - table_name: Name of the table to be created. - schema_oid: The OID of the schema where the table will be created. - columns: The columns dict for the new table, in order. (optional) - constraints: The constraints dict for the new table. (optional) - comment: The comment for the new table. (optional) - - Returns: - Returns the OID of the created table. - """ - return execute_msar_func_with_engine( - engine, - 'add_mathesar_table', - schema_oid, - table_name, - json.dumps(columns), - json.dumps(constraints), - None, - comment - ).fetchone()[0]["oid"] def create_table_on_database( @@ -69,24 +38,6 @@ def create_table_on_database( ).fetchone()[0] -# TODO stop relying on reflections, instead return oid of the created table. -# TODO remove this function -def create_string_column_table(name, schema_oid, column_names, engine, comment=None): - """ - This method creates a Postgres table in the specified schema, with all - columns being String type. - """ - columns_ = [ - { - "name": column_name, - "type": {"name": PostgresType.TEXT.id} - } for column_name in column_names - ] - table_oid = create_mathesar_table(engine, name, schema_oid, columns_, comment=comment) - table = reflect_table_from_oid(table_oid, engine, metadata=get_empty_metadata()) - return table - - def prepare_table_for_import( table_name, schema_oid, @@ -129,17 +80,3 @@ def prepare_table_for_import( import_info['table_oid'], import_info['table_name'] ) - - -class CreateTableAs(DDLElement): - def __init__(self, name, selectable): - self.name = name - self.selectable = selectable - - -@compiler.compiles(CreateTableAs) -def compile_create_table_as(element, compiler, **_): - return "CREATE TABLE %s AS (%s)" % ( - element.name, - compiler.sql_compiler.process(element.selectable, literal_binds=True), - ) From acea97e2d9772236f53ce6758b8e29b9b9b89e40 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 14:27:23 +0800 Subject: [PATCH 33/70] remove unused SQLAlchemy table dropper --- db/tables/operations/drop.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/db/tables/operations/drop.py b/db/tables/operations/drop.py index 9ffeb170d2..f7fc0523b2 100644 --- a/db/tables/operations/drop.py +++ b/db/tables/operations/drop.py @@ -1,8 +1,4 @@ -from db.connection import execute_msar_func_with_engine, exec_msar_func - - -def drop_table(name, schema, engine, cascade=False, if_exists=False): - execute_msar_func_with_engine(engine, 'drop_table', schema, name, cascade, if_exists) +from db.connection import exec_msar_func def drop_table_from_database(table_oid, conn, cascade=False): From d0440f1b1b0aade5ba94e033098cba2a29ef279b Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 14:31:59 +0800 Subject: [PATCH 34/70] remove unused SQLAlchemy column moving function --- db/tables/operations/move_columns.py | 187 ------------------ .../tables/operations/test_move_columns.py | 154 --------------- 2 files changed, 341 deletions(-) delete mode 100644 db/tests/tables/operations/test_move_columns.py diff --git a/db/tables/operations/move_columns.py b/db/tables/operations/move_columns.py index 714b7fd9be..c6586da757 100644 --- a/db/tables/operations/move_columns.py +++ b/db/tables/operations/move_columns.py @@ -1,14 +1,4 @@ -from sqlalchemy import exists, func, literal, select -from sqlalchemy.dialects.postgresql import insert - -from db import constants from db.connection import exec_msar_func -from db.columns.base import MathesarColumn -from db.columns.operations.alter import batch_alter_table_drop_columns -from db.columns.operations.create import bulk_create_mathesar_column -from db.columns.operations.select import get_column_names_from_attnums -from db.tables.operations.select import reflect_table_from_oid -from db.metadata import get_empty_metadata def move_columns_to_referenced_table(conn, source_table_oid, target_table_oid, move_column_attnums): @@ -19,180 +9,3 @@ def move_columns_to_referenced_table(conn, source_table_oid, target_table_oid, m target_table_oid, move_column_attnums ) - - -def move_columns_between_related_tables( - source_table_oid, - target_table_oid, - column_attnums_to_move, - schema, - engine -): - # TODO reuse metadata - metadata = get_empty_metadata() - source_table = reflect_table_from_oid(source_table_oid, engine, metadata=metadata) - target_table = reflect_table_from_oid(target_table_oid, engine, metadata=metadata) - relationship = _find_table_relationship(source_table, target_table) - column_names_to_move = get_column_names_from_attnums(source_table_oid, column_attnums_to_move, engine, metadata=metadata) - moving_columns = [source_table.columns[name] for name in column_names_to_move] - assert _check_columns(relationship, moving_columns) - source_table_reference_column, target_table_reference_column = _get_table_connecting_columns( - relationship, - target_table - ) - extracted_columns = [MathesarColumn.from_column(col) for col in moving_columns] - bulk_create_mathesar_column(engine, target_table_oid, extracted_columns, schema) - # TODO reuse metadata - # Re reflect the target table as we have added a new column - # Metadata needs to be shared so that target table is the same object as the table returned from the relation finding function - target_table = reflect_table_from_oid(target_table_oid, engine, metadata=metadata) - if relationship["referenced"] == target_table: - extracted_columns_update_stmt = _create_move_referrer_table_columns_update_stmt( - source_table, - target_table, - moving_columns, - source_table_reference_column, - ) - else: - extracted_columns_update_stmt = _create_move_referent_table_columns_update_stmt( - source_table, - target_table, - moving_columns, - source_table_reference_column, - target_table_reference_column, - ) - with engine.begin() as conn: - conn.execute(extracted_columns_update_stmt) - deletion_column_data = [ - {'attnum': column_attnum, 'delete': True} - for column_attnum in column_attnums_to_move - ] - batch_alter_table_drop_columns(source_table_oid, deletion_column_data, conn, engine) - # TODO reuse metadata - source_table = reflect_table_from_oid(source_table_oid, engine, metadata=get_empty_metadata()) - return target_table, source_table - - -def _create_move_referent_table_columns_update_stmt( - source_table, - target_table, - columns_to_move, - source_table_reference_column, - target_table_reference_column -): - moved_column_names = [col.name for col in columns_to_move] - extract_cte = select( - source_table - ).cte() - extracted_columns_update_dict = {column_name: extract_cte.c[column_name] for column_name in moved_column_names} - extract_ins = ( - target_table - .update().values(**extracted_columns_update_dict) - .where(target_table.c[target_table_reference_column.name] == extract_cte.c[source_table_reference_column.name]) - ) - - return extract_ins - - -def _create_move_referrer_table_columns_update_stmt( - source_table, - target_table, - columns_to_move, - source_table_reference_column, -): - SPLIT_ID = f"{constants.MATHESAR_PREFIX}_move_column_alias" - moved_column_names = [col.name for col in columns_to_move] - moved_columns = [ - source_table.c[moved_column_name] - for moved_column_name in moved_column_names - ] - # New rank is computed using Existing ranking column + set of columns being moved - ranking_columns = [source_table.c[source_table_reference_column.name]] + moved_columns - target_table_existing_data_column_names = [ - column.name - for column in target_table.columns - if column.name != constants.ID and column.name not in moved_column_names - ] - ranked_target_table_columns = [ - target_table.c[column_name] - for column_name in target_table_existing_data_column_names - ] - ranked_columns = [source_table.c[constants.ID]] + moved_columns + ranked_target_table_columns - # Compute new rank to be used as the new foreign key value - rank_cte = select( - [ - *ranked_columns, - func.dense_rank().over(order_by=ranking_columns).label(SPLIT_ID) - ] - ).join(target_table).cte() - new_target_table_data_columns = target_table_existing_data_column_names + moved_column_names - cte_extraction_columns = ( - [rank_cte.columns[SPLIT_ID]] + [rank_cte.columns[n] for n in new_target_table_data_columns] - ) - extract_sel = select( - cte_extraction_columns, - distinct=True - ) - extract_ins_stmt = ( - insert(target_table) - .from_select([constants.ID] + new_target_table_data_columns, extract_sel) - .returning(literal(1)) - ) - extract_ins_cte = extract_ins_stmt.on_conflict_do_update( - index_elements=[target_table.c[constants.ID]], - set_={ - name: extract_ins_stmt.excluded[name] - for name in new_target_table_data_columns} - ).cte() - fk_update_dict = {source_table_reference_column.name: rank_cte.c[SPLIT_ID]} - split_ins = ( - source_table - .update().values(**fk_update_dict). - where( - source_table.c[constants.ID] == rank_cte.c[constants.ID], - exists(extract_ins_cte.select()) - ) - ) - return split_ins - - -def _find_table_relationship(table_one, table_two): - """ - This function takes two tables, and returns a dict defining the direction - of the foreign key constraint relating the tables (if one exists) - """ - one_referencing_two = [ - fkey_constraint for fkey_constraint in table_one.foreign_key_constraints - if fkey_constraint.referred_table == table_two - ] - two_referencing_one = [ - fkey_constraint for fkey_constraint in table_two.foreign_key_constraints - if fkey_constraint.referred_table == table_one - ] - if one_referencing_two and not two_referencing_one: - relationship = {"referencing": table_one, "referenced": table_two, "constraint": one_referencing_two[0]} - elif two_referencing_one and not one_referencing_two: - relationship = {"referencing": table_two, "referenced": table_one, "constraint": two_referencing_one[0]} - else: - relationship = None - return relationship - - -def _check_columns(relationship, moving_columns): - return ( - relationship is not None - and all([not c.foreign_keys for c in moving_columns]) - ) - - -def _get_table_connecting_columns(relationship, target_table): - constraint = relationship['constraint'] - referrer_column = constraint.columns[0] - referent_column = constraint.elements[0].column - if relationship["referenced"] == target_table: - source_table_reference_column = referrer_column - target_table_reference_column = referent_column - else: - source_table_reference_column = referent_column - target_table_reference_column = referrer_column - return source_table_reference_column, target_table_reference_column diff --git a/db/tests/tables/operations/test_move_columns.py b/db/tests/tables/operations/test_move_columns.py deleted file mode 100644 index 8b7f22df8b..0000000000 --- a/db/tests/tables/operations/test_move_columns.py +++ /dev/null @@ -1,154 +0,0 @@ -from sqlalchemy import MetaData, select - -from db.columns.operations.select import get_columns_attnum_from_names -from db.tables.operations.move_columns import move_columns_between_related_tables -from db.tables.operations.select import get_oid_from_table -from db.metadata import get_empty_metadata - - -def test_move_columns_moves_column_from_ext_to_rem(extracted_remainder_roster, roster_extracted_cols): - extracted, remainder, engine, schema = extracted_remainder_roster - moving_col = roster_extracted_cols[0] - extracted_cols = [col.name for col in extracted.columns] - remainder_cols = [col.name for col in remainder.columns] - expect_extracted_cols = [ - name for name in extracted_cols if name != moving_col - ] - expect_remainder_cols = remainder_cols + [moving_col] - extracted_name = extracted.name - remainder_name = remainder.name - extracted_oid = get_oid_from_table(extracted_name, schema, engine) - remainder_oid = get_oid_from_table(remainder_name, schema, engine) - column_attnums_to_move = get_columns_attnum_from_names(extracted_oid, [moving_col], engine, metadata=get_empty_metadata()) - move_columns_between_related_tables( - extracted_oid, - remainder_oid, - column_attnums_to_move, - schema, - engine, - ) - metadata = MetaData(bind=engine, schema=schema) - metadata.reflect() - new_extracted = metadata.tables[f"{schema}.{extracted_name}"] - new_remainder = metadata.tables[f"{schema}.{remainder_name}"] - actual_extracted_cols = [col.name for col in new_extracted.columns] - actual_remainder_cols = [col.name for col in new_remainder.columns] - assert sorted(actual_extracted_cols) == sorted(expect_extracted_cols) - assert sorted(actual_remainder_cols) == sorted(expect_remainder_cols) - - -def test_move_columns_moves_column_from_rem_to_ext(extracted_remainder_roster): - extracted, remainder, engine, schema = extracted_remainder_roster - extracted_cols = [col.name for col in extracted.columns] - remainder_cols = [col.name for col in remainder.columns] - moving_col = "Grade" - expect_remainder_cols = [ - name for name in remainder_cols if name != moving_col - ] - expect_extracted_cols = extracted_cols + [moving_col] - extracted_name = extracted.name - remainder_name = remainder.name - extracted_oid = get_oid_from_table(extracted_name, schema, engine) - remainder_oid = get_oid_from_table(remainder_name, schema, engine) - column_attnums_to_move = get_columns_attnum_from_names(remainder_oid, [moving_col], engine, metadata=get_empty_metadata()) - move_columns_between_related_tables( - remainder_oid, - extracted_oid, - column_attnums_to_move, - schema, - engine, - ) - metadata = MetaData(bind=engine, schema=schema) - metadata.reflect() - new_extracted = metadata.tables[f"{schema}.{extracted_name}"] - new_remainder = metadata.tables[f"{schema}.{remainder_name}"] - actual_extracted_cols = [col.name for col in new_extracted.columns] - actual_remainder_cols = [col.name for col in new_remainder.columns] - assert sorted(actual_extracted_cols) == sorted(expect_extracted_cols) - assert sorted(actual_remainder_cols) == sorted(expect_remainder_cols) - - -def test_move_columns_moves_correct_data_from_ext_to_rem(extracted_remainder_roster, roster_extracted_cols): - extracted, remainder, engine, schema = extracted_remainder_roster - moving_col = roster_extracted_cols[0] - extracted_name = extracted.name - remainder_name = remainder.name - expect_tuple_sel = ( - select(extracted.columns[moving_col]) - .distinct() - ) - with engine.begin() as conn: - expect_tuples = conn.execute(expect_tuple_sel).fetchall() - extracted_oid = get_oid_from_table(extracted_name, schema, engine) - remainder_oid = get_oid_from_table(remainder_name, schema, engine) - column_attnums_to_move = get_columns_attnum_from_names(extracted_oid, [moving_col], engine, metadata=get_empty_metadata()) - move_columns_between_related_tables( - extracted_oid, - remainder_oid, - column_attnums_to_move, - schema, - engine, - ) - metadata = MetaData(bind=engine, schema=schema) - metadata.reflect() - new_remainder = metadata.tables[f"{schema}.{remainder_name}"] - actual_tuple_sel = select( - [new_remainder.columns[moving_col]], - distinct=True - ) - with engine.begin() as conn: - actual_tuples = conn.execute(actual_tuple_sel).fetchall() - assert sorted(expect_tuples) == sorted(actual_tuples) - - -def test_move_columns_moves_correct_data_from_rem_to_extract(extracted_remainder_roster, roster_extracted_cols): - extracted, remainder, engine, schema = extracted_remainder_roster - moving_col = "Grade" - existing_extracted_table_column_names = ['Teacher', 'Teacher Email'] - - # build expected tuple table - existing_extracted_table_columns = [ - extracted.columns[existing_extracted_table_column_name] - for existing_extracted_table_column_name in existing_extracted_table_column_names - ] - expect_tuple_sel = ( - select([*existing_extracted_table_columns, remainder.columns[moving_col]]).join(extracted) - .distinct() - ) - with engine.begin() as conn: - expect_tuples = conn.execute(expect_tuple_sel).fetchall() - - # move columns from "remainder" to "extracted" table - extracted_name = extracted.name - remainder_name = remainder.name - extracted_oid = get_oid_from_table(extracted_name, schema, engine) - remainder_oid = get_oid_from_table(remainder_name, schema, engine) - column_attnums_to_move = get_columns_attnum_from_names(remainder_oid, [moving_col], engine, metadata=get_empty_metadata()) - move_columns_between_related_tables( - remainder_oid, - extracted_oid, - column_attnums_to_move, - schema, - engine, - ) - - # reflect records in "extracted" table after move - metadata = MetaData(bind=engine, schema=schema) - metadata.reflect() - new_extracted = metadata.tables[f"{schema}.{extracted_name}"] - new_existing_extracted_table_columns = [ - new_extracted.columns[existing_extracted_table_column_name] - for existing_extracted_table_column_name in existing_extracted_table_column_names - ] - actual_tuple_sel = select( - [ - *new_existing_extracted_table_columns, - new_extracted.columns[moving_col] - - ], - ) - with engine.begin() as conn: - actual_tuples = conn.execute(actual_tuple_sel).fetchall() - - # check that expected and actual tuple tables match - assert sorted(expect_tuples) == sorted(actual_tuples) From 54ad8e6ea42aa06af9cce425411f207fd8f5fd94 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 14:36:36 +0800 Subject: [PATCH 35/70] remove unused column extraction function --- db/tables/operations/split.py | 17 +---------------- db/tests/conftest.py | 19 ------------------- 2 files changed, 1 insertion(+), 35 deletions(-) diff --git a/db/tables/operations/split.py b/db/tables/operations/split.py index e3a710d8bb..7ab08f6cbe 100644 --- a/db/tables/operations/split.py +++ b/db/tables/operations/split.py @@ -1,19 +1,4 @@ -from db.connection import execute_msar_func_with_engine, exec_msar_func - - -def extract_columns_from_table( - old_table_oid, extracted_column_attnums, extracted_table_name, schema, - engine, relationship_fk_column_name=None -): - curr = execute_msar_func_with_engine( - engine, 'extract_columns_from_table', - old_table_oid, - extracted_column_attnums, - extracted_table_name, - relationship_fk_column_name - ) - extracted_table_oid, new_fkey_attnum = curr.fetchone()[0] - return extracted_table_oid, old_table_oid, new_fkey_attnum +from db.connection import exec_msar_func def split_table( diff --git a/db/tests/conftest.py b/db/tests/conftest.py index 753469977c..0d839b72f4 100644 --- a/db/tests/conftest.py +++ b/db/tests/conftest.py @@ -7,7 +7,6 @@ from db.columns.operations.select import ( get_columns_attnum_from_names, get_column_attnum_from_name ) -from db.tables.operations.split import extract_columns_from_table from db.tables.operations.select import get_oid_from_table from db.types.base import MathesarCustomType from db.columns.operations.alter import alter_column_type @@ -187,24 +186,6 @@ def books_import_target_table_name(): return "books_target" -@pytest.fixture -def extracted_remainder_roster(engine_with_roster, roster_table_name, roster_extracted_cols, teachers_table_name): - engine, schema = engine_with_roster - roster_table_oid = get_oid_from_table(roster_table_name, schema, engine) - roster_extracted_col_attnums = get_columns_attnum_from_names(roster_table_oid, roster_extracted_cols, engine, metadata=get_empty_metadata()) - extract_columns_from_table( - roster_table_oid, - roster_extracted_col_attnums, - teachers_table_name, - schema, - engine, - ) - metadata = get_empty_metadata() - extracted = Table(teachers_table_name, metadata, schema=schema, autoload_with=engine) - remainder = Table(roster_table_name, metadata, schema=schema, autoload_with=engine) - return extracted, remainder, engine, schema - - @pytest.fixture def times_table_obj(engine_with_times): engine, schema = engine_with_times From 5d0c176a5a216feeb393722331ce350945543c60 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 14:44:45 +0800 Subject: [PATCH 36/70] remove unused SQLAlchemy column deletion functions --- db/records/operations/delete.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/db/records/operations/delete.py b/db/records/operations/delete.py index ff1f656514..ddbb200607 100644 --- a/db/records/operations/delete.py +++ b/db/records/operations/delete.py @@ -1,8 +1,6 @@ import json -from sqlalchemy import delete from db import connection as db_conn -from db.tables.utils import get_primary_key_column def delete_records_from_table(conn, record_ids, table_oid): @@ -21,17 +19,3 @@ def delete_records_from_table(conn, record_ids, table_oid): table_oid, json.dumps(record_ids), ).fetchone()[0] - - -def delete_record(table, engine, id_value): - primary_key_column = get_primary_key_column(table) - query = delete(table).where(primary_key_column == id_value) - with engine.begin() as conn: - return conn.execute(query) - - -def bulk_delete_records(table, engine, id_values): - primary_key_column = get_primary_key_column(table) - query = delete(table).where(primary_key_column.in_(id_values)) - with engine.begin() as conn: - return conn.execute(query) From ed6f39a3a371ef257d9444d9213b543ec3afbd67 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 14:52:11 +0800 Subject: [PATCH 37/70] remove unused SQLAlchemy record insert functions --- db/records/operations/insert.py | 196 --------------------- db/tests/conftest.py | 5 +- db/tests/records/operations/test_insert.py | 41 ----- 3 files changed, 1 insertion(+), 241 deletions(-) delete mode 100644 db/tests/records/operations/test_insert.py diff --git a/db/records/operations/insert.py b/db/records/operations/insert.py index bec20df1e8..14ab640170 100644 --- a/db/records/operations/insert.py +++ b/db/records/operations/insert.py @@ -1,19 +1,6 @@ import json -import pandas -import tempfile -from psycopg2 import sql -from sqlalchemy.exc import IntegrityError, ProgrammingError -from psycopg2.errors import NotNullViolation, ForeignKeyViolation, DatatypeMismatch, UniqueViolation, ExclusionViolation from db import connection as db_conn -from db.columns.exceptions import NotNullError, ForeignKeyError, TypeMismatchError, UniqueValueError, ExclusionError -from db.columns.base import MathesarColumn -from db.constants import ID, ID_ORIGINAL -from db.encoding_utils import get_sql_compatible_encoding -from db.records.operations.select import get_record -from sqlalchemy import select - -READ_SIZE = 20000 def add_record_to_table(conn, record_def, table_oid, return_record_summaries=False): @@ -26,186 +13,3 @@ def add_record_to_table(conn, record_def, table_oid, return_record_summaries=Fal return_record_summaries ).fetchone()[0] return result - - -def insert_record_or_records(table, engine, record_data): - """ - record_data can be a dictionary, tuple, or list of dictionaries or tuples. - if record_data is a list, it creates multiple records. - """ - id_value = None - with engine.begin() as connection: - result = connection.execute(table.insert(), record_data) - # If there was only a single record created, return the record. - if result.rowcount == 1: - # We need to manually commit insertion so that we can retrieve the record. - connection.commit() - id_value = result.inserted_primary_key[0] - if id_value is not None: - return get_record(table, engine, id_value) - # Do not return any records if multiple rows were added. - return None - - -def get_records_from_dataframe(df): - """ - We convert the dataframe to JSON using to_json() method and then to a Python object. - This method replaces 'NaN' values in the dataframe with 'None' values in Python - object. The reason behind not using df.to_dict() method is beacuse it stringifies - 'NaN' values rather than converting them to a 'None' value. - We pass 'records' as the orientation parameter because we want each record to contain - data of a single row and not of a single column (which is the default behaviour). - """ - return json.loads(df.to_json(orient='records')) - - -def insert_records_from_json(table, engine, json_filepath, column_names, max_level): - """ - Normalizes JSON data and inserts it into a table. - - Args: - table: Table. The table to insert JSON data into. - engine: MockConnection. The SQLAlchemy engine. - json_filepath: str. The path to the stored JSON data file. - column_names: List[str]. List of column names. - max_level: int. The depth upto which JSON dict should be flattened. - - Algorithm: - 1. We convert JSON data into Python object using json.load(). - 2. We normalize data into a pandas dataframe using pandas.json_normalize() method. - The method takes column names as meta. We provide all possible keys as column - names, hence it adds missing keys to JSON objects and marks their values as NaN. - 3. We get records from the dataframe using the method get_records_from_dataframe(). - 4. The processed data is now a list of dict objects. Each dict has same keys, that are - the column names of the table. We loop through each dict object, and if any value is - a dict or a list, we stringify them before inserting them into the table. This way, - our type inference logic kicks in later on converting them into - 'MathesarCustomType.MATHESAR_JSON_OBJECT' and 'MathesarCustomType.MATHESAR_JSON_ARRAY' - respectively. - 5. We pass data (a list of dicts) to 'insert_record_or_records()' method which inserts - them into the table. - """ - - with open(json_filepath, 'r') as json_file: - data = json.load(json_file) - - """ - data: JSON object. The data we want to normalize. - max_level: int. Max number of levels(depth of dict) to normalize. - Normalizing a dict involes flattening it and if max_level is None, - pandas normalizes all levels. Default max_level is kept 0. - meta: Fields to use as metadata for each record in resulting table. Without meta, - the method chooses keys from the first JSON object it encounters as column names. - We provide column names as meta, because we want all possible keys as columns in - our table and not just the keys from the first JSON object. - """ - df = pandas.json_normalize(data, max_level=max_level, meta=column_names) - records = get_records_from_dataframe(df) - - for i, row in enumerate(records): - if ID in row and ID_ORIGINAL in column_names: - row[ID_ORIGINAL] = row.pop("id") - records[i] = { - k: json.dumps(v) - if (isinstance(v, dict) or isinstance(v, list)) - else v - for k, v in row.items() - } - insert_record_or_records(table, engine, records) - - -def insert_records_from_excel(table, engine, dataframe): - records = get_records_from_dataframe(dataframe) - insert_record_or_records(table, engine, records) - - -def insert_records_from_csv(table, engine, csv_filepath, column_names, header, delimiter=None, escape=None, quote=None, encoding=None): - with open(csv_filepath, "r", encoding=encoding) as csv_file: - with engine.begin() as conn: - cursor = conn.connection.cursor() - # We should convert our entire query to sql.SQL class in order to keep its original header's name - # When we call sql.Indentifier which will return a Identifier class (based on sql.Composable) - # instead of a String. So we have to convert our punctuations to sql.Composable using sql.SQL - relation = sql.SQL(".").join( - sql.Identifier(part) for part in (table.schema, table.name) - ) - formatted_columns = sql.SQL(",").join( - sql.Identifier(column_name) for column_name in column_names - ) - conversion_encoding, sql_encoding = get_sql_compatible_encoding(encoding) - copy_sql = sql.SQL( - "COPY {relation} ({formatted_columns}) FROM STDIN CSV {header} {delimiter} {escape} {quote} {encoding}" - ).format( - relation=relation, - formatted_columns=formatted_columns, - # If HEADER is not None, we'll pass its value to our entire SQL query - header=sql.SQL("HEADER" if header else ""), - # If DELIMITER is not None, we'll pass its value to our entire SQL query - delimiter=sql.SQL(f"DELIMITER E'{delimiter}'" if delimiter else ""), - # If ESCAPE is not None, we'll pass its value to our entire SQL query - escape=sql.SQL(f"ESCAPE '{escape}'" if escape else ""), - quote=sql.SQL( - ("QUOTE ''''" if quote == "'" else f"QUOTE '{quote}'") - if quote - else "" - ), - encoding=sql.SQL(f"ENCODING '{sql_encoding}'" if sql_encoding else ""), - ) - if conversion_encoding == encoding: - cursor.copy_expert(copy_sql, csv_file) - else: - # File needs to be converted to compatible database supported encoding - with tempfile.SpooledTemporaryFile(mode='wb+', encoding=conversion_encoding) as temp_file: - while True: - # TODO: Raise an exception instead of silently replacing the characters - contents = csv_file.read(READ_SIZE).encode(conversion_encoding, "replace") - if not contents: - break - temp_file.write(contents) - temp_file.seek(0) - cursor.copy_expert(copy_sql, temp_file) - - -def insert_from_select(from_table, target_table, engine, col_mappings=None): - if col_mappings: - from_table_col_list, target_table_col_list = zip( - *[ - (from_table.c[from_col], target_table.c[target_col]) - for from_col, target_col in col_mappings - ] - ) - else: - from_table_col_list = [ - col for col in from_table.c - if not MathesarColumn.from_column(col).is_default - ] - target_table_col_list = [ - col for col in target_table.c - if not MathesarColumn.from_column(col).is_default - ] - with engine.begin() as conn: - sel = select(from_table_col_list) - ins = target_table.insert().from_select(target_table_col_list, sel) - try: - result = conn.execute(ins) - except IntegrityError as e: - if type(e.orig) is NotNullViolation: - raise NotNullError - elif type(e.orig) is ForeignKeyViolation: - raise ForeignKeyError - elif type(e.orig) is UniqueViolation: - # ToDo: Try to differentiate between the types of unique violations - # Scenario 1: Adding a duplicate value into a column with uniqueness constraint in the target table. - # Scenario 2: Adding a non existing value twice in a column with uniqueness constraint in the target table. - # Both the scenarios currently result in the same exception being thrown. - raise UniqueValueError - elif type(e.orig) is ExclusionViolation: - raise ExclusionError - else: - raise e - except ProgrammingError as e: - if type(e.orig) is DatatypeMismatch: - raise TypeMismatchError - else: - raise e - return target_table, result diff --git a/db/tests/conftest.py b/db/tests/conftest.py index 0d839b72f4..8cfbf367bd 100644 --- a/db/tests/conftest.py +++ b/db/tests/conftest.py @@ -4,13 +4,10 @@ from sqlalchemy import MetaData, text, Table from db import constants -from db.columns.operations.select import ( - get_columns_attnum_from_names, get_column_attnum_from_name -) +from db.columns.operations.select import get_column_attnum_from_name from db.tables.operations.select import get_oid_from_table from db.types.base import MathesarCustomType from db.columns.operations.alter import alter_column_type -from db.metadata import get_empty_metadata FILE_DIR = os.path.abspath(os.path.dirname(__file__)) RESOURCES = os.path.join(FILE_DIR, "resources") diff --git a/db/tests/records/operations/test_insert.py b/db/tests/records/operations/test_insert.py deleted file mode 100644 index 40f5ee174b..0000000000 --- a/db/tests/records/operations/test_insert.py +++ /dev/null @@ -1,41 +0,0 @@ -from db.records.operations.insert import insert_from_select -from db.records.operations.select import get_records - - -def test_insert_from_select_without_mappings(books_table_import_from_obj, books_table_import_target_obj): - # We need engine of the from_table since insert_from_select() is set-up from the 'from_table' side - from_table, engine = books_table_import_from_obj - target_table, _ = books_table_import_target_obj - records_without_mappings = [ - (1, 'Steve Jobs', 'Walter Issacson'), - (2, 'The Idiot', 'Fyodor Dostevsky'), - (3, 'David Copperfield', 'Charles Darwin'), - (4, 'Fyodor Dostoevsky', 'Crime and Punishment'), - (5, 'Cervantes', 'Don Quixote') - ] - res_table, _ = insert_from_select(from_table, target_table, engine, col_mappings=None) - records = get_records(res_table, engine) - assert res_table.c['id'] == target_table.c[0] - assert res_table.c['title'] == target_table.c[1] - assert res_table.c['author'] == target_table.c[2] - assert records == records_without_mappings - - -def test_insert_from_select_with_mappings(books_table_import_from_obj, books_table_import_target_obj): - # We need engine of the from_table since insert_from_select() is set-up from the 'from_table' side - from_table, engine = books_table_import_from_obj - target_table, _ = books_table_import_target_obj - records_with_mappings = [ - (1, 'Steve Jobs', 'Walter Issacson'), - (2, 'The Idiot', 'Fyodor Dostevsky'), - (3, 'David Copperfield', 'Charles Darwin'), - (4, 'Crime and Punishment', 'Fyodor Dostoevsky'), - (5, 'Don Quixote', 'Cervantes') - ] - col_mappings = [['book_title', 'title'], ['author_name', 'author']] - res_table, _ = insert_from_select(from_table, target_table, engine, col_mappings) - records = get_records(res_table, engine) - assert res_table.c['id'] == target_table.c[0] - assert res_table.c['title'] == target_table.c[1] - assert res_table.c['author'] == target_table.c[2] - assert records == records_with_mappings From 564e4b7eac0722cd028f18d93fec69b5159e2c7c Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 15:01:31 +0800 Subject: [PATCH 38/70] remove unused record search transform --- db/records/operations/relevance.py | 52 ------- db/tests/records/operations/test_relevance.py | 127 ------------------ db/transforms/base.py | 22 +-- 3 files changed, 1 insertion(+), 200 deletions(-) delete mode 100644 db/records/operations/relevance.py delete mode 100644 db/tests/records/operations/test_relevance.py diff --git a/db/records/operations/relevance.py b/db/records/operations/relevance.py deleted file mode 100644 index f20daa3c23..0000000000 --- a/db/records/operations/relevance.py +++ /dev/null @@ -1,52 +0,0 @@ -from sqlalchemy import case, select, desc -from db.types import categories -from db.types.base import MathesarCustomType -from db.types.operations.convert import get_db_type_enum_from_class - -WEIGHT_4 = 4 -WEIGHT_3 = 3 -WEIGHT_2 = 2 -WEIGHT_1 = 1 -WEIGHT_0 = 0 -SCORE_COL = '__mathesar_relevance_score' - - -def get_rank_and_filter_rows_query(relation, parameters_dict, limit=10): - """ - Given a relation, we use a score-assignment algorithm to rank rows of - the relation by the strength of their match with the various - parameters given in parameters_dict. - """ - rank_cte = _get_scored_selectable(relation, parameters_dict) - filtered_ordered_cte = select(rank_cte).where(rank_cte.columns[SCORE_COL] > 0).order_by(desc(SCORE_COL)).cte() - return select( - *[filtered_ordered_cte.columns[c] for c in [col.name for col in relation.columns]] - ).limit(limit) - - -def _get_scored_selectable(relation, parameters_dict): - return select( - relation, - sum( - [ - _get_col_score_expr(relation.columns[col_name], val) - for col_name, val in parameters_dict.items() - ] - ).label(SCORE_COL) - ).cte() - - -def _get_col_score_expr(col, param_val): - col_type = get_db_type_enum_from_class(col.type.__class__) - searchable_string_types = categories.STRING_LIKE_TYPES | frozenset([MathesarCustomType.URI, MathesarCustomType.EMAIL]) - if col_type in searchable_string_types: - score_expr = case( - (col.ilike(param_val), WEIGHT_4), - (col.ilike(param_val + '%'), WEIGHT_3), - (col.ilike('%' + param_val + '%'), WEIGHT_2), - else_=WEIGHT_0 - ) - else: - score_expr = case((col == param_val, WEIGHT_4), else_=WEIGHT_0) - - return score_expr diff --git a/db/tests/records/operations/test_relevance.py b/db/tests/records/operations/test_relevance.py deleted file mode 100644 index 63996dd03d..0000000000 --- a/db/tests/records/operations/test_relevance.py +++ /dev/null @@ -1,127 +0,0 @@ -from sqlalchemy import select, desc -from db.records.operations import relevance - - -def test_rank_and_filter_rows(roster_table_obj): - roster, engine = roster_table_obj - sel = relevance.get_rank_and_filter_rows_query( - relation=roster, - parameters_dict={'Student Name': 'John'} - ) - - with engine.begin() as conn: - res = conn.execute(sel).fetchall() - - assert len(res) == 10 and all( - ['John' in row['Student Name'] for row in res] - ) - - -def test_get_scored_selectable_text_exact(roster_table_obj): - roster, engine = roster_table_obj - sel = select( - relevance._get_scored_selectable( - relation=roster, - parameters_dict={'Student Name': 'John Jones'} - ) - ).order_by(desc(relevance.SCORE_COL)) - with engine.begin() as conn: - res = conn.execute(sel).fetchall() - - matches = [ - row for row in res if row[relevance.SCORE_COL] > 0 - ] - - assert len(matches) == 5 and all( - [ - row['Student Name'] == 'John Jones' - and row[relevance.SCORE_COL] == 4 - for row in matches - ] - ) - - -def test_get_scored_selectable_text_begin_and_mid(roster_table_obj): - roster, engine = roster_table_obj - sel = select( - relevance._get_scored_selectable( - relation=roster, - parameters_dict={'Student Name': 'John'} - ) - ).order_by(desc(relevance.SCORE_COL)) - with engine.begin() as conn: - res = conn.execute(sel).fetchall() - - matches = [ - row for row in res if row[relevance.SCORE_COL] > 0 - ] - - assert len(matches) == 40 and all( - [ - (row['Student Name'][:4] == 'John' and row[relevance.SCORE_COL] == 3) - or ('John' in row['Student Name'] and row[relevance.SCORE_COL] == 2) - for row in matches - ] - ) - - -def test_get_scored_selectable_multicol(roster_table_obj): - roster, engine = roster_table_obj - sel = select( - relevance._get_scored_selectable( - relation=roster, - parameters_dict={'Student Name': 'John', 'Subject': 'Math'} - ) - ).order_by(desc(relevance.SCORE_COL), 'Student Number') - with engine.begin() as conn: - res = conn.execute(sel).fetchall() - - matches = [ - row for row in res if row[relevance.SCORE_COL] > 0 - ] - - assert len(matches) == 124 and all( - [ - ( - row['Student Name'][:4] == 'John' - and row['Subject'] == 'Math' - and row[relevance.SCORE_COL] == 7 - ) or ( - row['Student Name'][:4] == 'John' - and row['Subject'] != 'Math' - and row[relevance.SCORE_COL] == 3 - ) or ( - 'John' in row['Student Name'] - and row['Subject'] == 'Math' - and row[relevance.SCORE_COL] == 6 - ) or ( - 'John' in row['Student Name'] - and row['Subject'] != 'Math' - and row[relevance.SCORE_COL] == 2 - ) or ( - 'John' not in row['Student Name'] - and row['Subject'] == 'Math' - and row[relevance.SCORE_COL] == 4 - ) - for row in matches - ] - ) - - -def test_get_scored_selectable_nontext(roster_table_obj): - roster, engine = roster_table_obj - sel = select( - relevance._get_scored_selectable( - relation=roster, - parameters_dict={'Grade': 100}) - ).order_by(desc(relevance.SCORE_COL)) - with engine.begin() as conn: - res = conn.execute(sel).fetchall() - - matches = [ - row for row in res if row[relevance.SCORE_COL] > 0 - ] - - assert len(matches) == 7 and all( - [row['Grade'] == 100 and row[relevance.SCORE_COL] == 4 for row in matches] - ) diff --git a/db/transforms/base.py b/db/transforms/base.py index 3eb10406af..548fdf13fc 100644 --- a/db/transforms/base.py +++ b/db/transforms/base.py @@ -7,7 +7,7 @@ from db.functions.operations.apply import apply_db_function_by_id, apply_db_function_spec_as_filter from db.functions.packed import DistinctArrayAgg -from db.records.operations import group, relevance, sort as rec_sort +from db.records.operations import group, sort as rec_sort class UniqueConstraintMapping: @@ -178,26 +178,6 @@ def apply_to_relation(self, relation): return _to_non_executable(executable) -class Search(Transform): - type = "search" - spec = [] - - @property - def search_spec(self): - return self.spec[0] - - @property - def limit_spec(self): - return self.spec[1] - - def apply_to_relation(self, relation): - search = self.search_spec - limit = self.limit_spec - search_params = {search_obj['column']: search_obj['literal'] for search_obj in search} - executable = relevance.get_rank_and_filter_rows_query(relation, search_params, limit) - return _to_non_executable(executable) - - class Group(Transform): type = "group" From 3164a6fb8dcdd14145820f93e185430fe586ff57 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 15:11:22 +0800 Subject: [PATCH 39/70] remove unused SQLAlchemy record update function --- db/records/operations/update.py | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/db/records/operations/update.py b/db/records/operations/update.py index 1535251966..5f413a2b89 100644 --- a/db/records/operations/update.py +++ b/db/records/operations/update.py @@ -1,10 +1,5 @@ import json from db import connection as db_conn -from db.records.operations.select import get_record -from db.tables.utils import get_primary_key_column -from sqlalchemy.exc import DataError -from psycopg2.errors import DatetimeFieldOverflow, InvalidDatetimeFormat -from db.records.exceptions import InvalidDate, InvalidDateFormat def patch_record_in_table(conn, record_def, record_id, table_oid, return_record_summaries=False): @@ -18,20 +13,3 @@ def patch_record_in_table(conn, record_def, record_id, table_oid, return_record_ return_record_summaries ).fetchone()[0] return result - - -def update_record(table, engine, id_value, record_data): - primary_key_column = get_primary_key_column(table) - with engine.begin() as connection: - try: - connection.execute( - table.update().where(primary_key_column == id_value).values(record_data) - ) - except DataError as e: - if type(e.orig) is DatetimeFieldOverflow: - raise InvalidDate - elif type(e.orig) is InvalidDatetimeFormat: - raise InvalidDateFormat - else: - raise e - return get_record(table, engine, id_value) From d8f83a470aac89e6c8bb2cc7623ec71cf8447ab6 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 15:21:08 +0800 Subject: [PATCH 40/70] remove some schema utils --- db/schemas/utils.py | 16 +--------------- db/tests/schemas/test_utils.py | 11 ----------- 2 files changed, 1 insertion(+), 26 deletions(-) delete mode 100644 db/tests/schemas/test_utils.py diff --git a/db/schemas/utils.py b/db/schemas/utils.py index cc3766b0ab..5b9feef37c 100644 --- a/db/schemas/utils.py +++ b/db/schemas/utils.py @@ -1,6 +1,4 @@ -from sqlalchemy import inspect - -from db.schemas.operations.select import reflect_schema, get_mathesar_schemas_with_oids +from db.schemas.operations.select import reflect_schema def get_schema_name_from_oid(oid, engine, metadata=None): @@ -13,15 +11,3 @@ def get_schema_oid_from_name(name, engine): schema_info = reflect_schema(engine, name=name) if schema_info: return schema_info["oid"] - - -def get_mathesar_schemas(engine): - return [schema for schema, _ in get_mathesar_schemas_with_oids(engine)] - - -def get_all_schemas(engine): - inspector = inspect(engine) - # We don't need to exclude system schemas (i.e., starting with "pg_") - # since Inspector.get_schema_names already excludes them. Thus, this - # function actually gets all non-pg-reserved schemas. - return inspector.get_schema_names() diff --git a/db/tests/schemas/test_utils.py b/db/tests/schemas/test_utils.py deleted file mode 100644 index 69866c75df..0000000000 --- a/db/tests/schemas/test_utils.py +++ /dev/null @@ -1,11 +0,0 @@ -from unittest.mock import patch - -from db.engine import create_engine -from db.schemas import utils as schema_utils - - -def test_get_mathesar_schemas(): - engine = create_engine("postgresql://") - with patch.object(schema_utils, "get_mathesar_schemas_with_oids") as mock_schemas: - schema_utils.get_mathesar_schemas(engine) - mock_schemas.assert_called_once_with(engine) From 2218d5f0c235adef5125d0ea53061bd370ba08b0 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 15:28:55 +0800 Subject: [PATCH 41/70] remove first layer of schema functions --- db/schemas/operations/alter.py | 16 +--------------- db/schemas/operations/create.py | 20 +------------------- db/schemas/operations/drop.py | 1 + db/schemas/operations/select.py | 9 +-------- db/tests/schemas/operations/test_create.py | 16 ---------------- db/tests/schemas/operations/test_select.py | 13 +------------ 6 files changed, 5 insertions(+), 70 deletions(-) delete mode 100644 db/tests/schemas/operations/test_create.py diff --git a/db/schemas/operations/alter.py b/db/schemas/operations/alter.py index 03f933b491..01367949af 100644 --- a/db/schemas/operations/alter.py +++ b/db/schemas/operations/alter.py @@ -1,20 +1,6 @@ import json -from db.connection import execute_msar_func_with_engine, exec_msar_func - - -def patch_schema_via_sql_alchemy(schema_name, engine, patch): - """ - Patch a schema using a SQLAlchemy engine. - - Args: - schema_name: Name of the schema to change. - engine: SQLAlchemy engine object for connecting. - patch: A dict mapping the following fields to new values: - - 'name' (optional): New name for the schema. - - 'description' (optional): New description for the schema. - """ - execute_msar_func_with_engine(engine, "patch_schema", schema_name, json.dumps(patch)) +from db.connection import exec_msar_func def patch_schema(schema_oid, conn, patch): diff --git a/db/schemas/operations/create.py b/db/schemas/operations/create.py index 07e6a3f28a..0fed24c7ed 100644 --- a/db/schemas/operations/create.py +++ b/db/schemas/operations/create.py @@ -1,25 +1,7 @@ from db.connection import execute_msar_func_with_engine, exec_msar_func -def create_schema_via_sql_alchemy(schema_name, engine, description=None): - """ - Creates a schema using a SQLAlchemy engine. - - Args: - schema_name: Name of the schema to create. - engine: SQLAlchemy engine object for connecting. - description: A new description to set on the schema. - - If a schema already exists with the given name, this function will raise an error. - - Returns: - The integer oid of the newly created schema. - """ - return execute_msar_func_with_engine( - engine, 'create_schema', schema_name, None, description - ).fetchone()[0] - - +# TODO Remove (only used in testing) def create_schema_if_not_exists_via_sql_alchemy(schema_name, engine): """ Ensure that a schema exists using a SQLAlchemy engine. diff --git a/db/schemas/operations/drop.py b/db/schemas/operations/drop.py index 919ce352bb..c659061b4d 100644 --- a/db/schemas/operations/drop.py +++ b/db/schemas/operations/drop.py @@ -1,6 +1,7 @@ from db.connection import execute_msar_func_with_engine, exec_msar_func +# TODO Remove (only used in testing) def drop_schema_via_name(engine, name, cascade=False): """ Drop a schema by its name. diff --git a/db/schemas/operations/select.py b/db/schemas/operations/select.py index 973876909a..2680ada4a4 100644 --- a/db/schemas/operations/select.py +++ b/db/schemas/operations/select.py @@ -1,4 +1,4 @@ -from sqlalchemy import select, and_, not_, or_, func +from sqlalchemy import select, and_, not_, or_ from db.constants import INTERNAL_SCHEMAS from db.utils import get_pg_catalog_table @@ -49,10 +49,3 @@ def get_mathesar_schemas_with_oids(engine): with engine.begin() as conn: result = conn.execute(sel).fetchall() return result - - -def get_schema_description(oid, engine): - with engine.begin() as conn: - res = conn.execute(select(func.obj_description(oid, 'pg_namespace'))) - - return res.fetchone()[0] diff --git a/db/tests/schemas/operations/test_create.py b/db/tests/schemas/operations/test_create.py deleted file mode 100644 index 9452b95e1a..0000000000 --- a/db/tests/schemas/operations/test_create.py +++ /dev/null @@ -1,16 +0,0 @@ -from unittest.mock import patch -import db.schemas.operations.create as sch_create - - -def test_create_schema_via_sql_alchemy(engine_with_schema): - engine = engine_with_schema - with patch.object(sch_create, 'execute_msar_func_with_engine') as mock_exec: - sch_create.create_schema_via_sql_alchemy( - schema_name='new_schema', - engine=engine, - description=None, - ) - call_args = mock_exec.call_args_list[0][0] - assert call_args[0] == engine - assert call_args[1] == "create_schema" - assert call_args[2] == "new_schema" diff --git a/db/tests/schemas/operations/test_select.py b/db/tests/schemas/operations/test_select.py index ea0efeec8f..82a0495351 100644 --- a/db/tests/schemas/operations/test_select.py +++ b/db/tests/schemas/operations/test_select.py @@ -1,5 +1,5 @@ import warnings -from sqlalchemy import select, Table, MetaData, text +from sqlalchemy import select, Table, MetaData from db.constants import TYPES_SCHEMA from db.schemas.operations import select as ssel @@ -41,14 +41,3 @@ def test_get_mathesar_schemas_with_oids_gets_correct_oid(engine_with_schema): actual_schemata = ssel.get_mathesar_schemas_with_oids(engine) actual_oid = [oid for schm, oid in actual_schemata if schm == schema][0] assert actual_oid == expect_oid - - -def test_get_schema_description(engine_with_schema): - engine, schema = engine_with_schema - schema_oid = ssel.reflect_schema(engine, name=schema)['oid'] - expect_description = 'test schema description' - with engine.begin() as conn: - conn.execute(text(f'''COMMENT ON SCHEMA "{schema}" IS '{expect_description}';''')) - actual_description = ssel.get_schema_description(schema_oid, engine) - - assert actual_description == expect_description From 56e5f43edf7afb35f9dc5dcfc8001527deedd816 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 15:56:02 +0800 Subject: [PATCH 42/70] move schema funcs only used in testing to conftest --- conftest.py | 56 ++++++++++++++++++---- db/schemas/operations/create.py | 19 +------- db/schemas/operations/drop.py | 21 +------- db/schemas/operations/select.py | 42 ---------------- db/schemas/utils.py | 13 ----- db/tests/schemas/operations/test_drop.py | 15 ------ db/tests/schemas/operations/test_select.py | 43 ----------------- 7 files changed, 50 insertions(+), 159 deletions(-) delete mode 100644 db/schemas/utils.py delete mode 100644 db/tests/schemas/operations/test_drop.py delete mode 100644 db/tests/schemas/operations/test_select.py diff --git a/conftest.py b/conftest.py index a452c313e2..f815bc290f 100644 --- a/conftest.py +++ b/conftest.py @@ -7,15 +7,15 @@ # These imports come from the mathesar namespace, because our DB setup logic depends on it. from django.db import connection as dj_connection -from sqlalchemy import MetaData, text, Table +from sqlalchemy import MetaData, text, Table, select, or_ from sqlalchemy.exc import OperationalError from sqlalchemy_utils import database_exists, create_database, drop_database +from db.connection import execute_msar_func_with_engine from db.engine import add_custom_types_to_ischema_names, create_engine as sa_create_engine from db.sql import install as sql_install -from db.schemas.operations.drop import drop_schema_via_name as drop_sa_schema -from db.schemas.operations.create import create_schema_if_not_exists_via_sql_alchemy -from db.schemas.utils import get_schema_oid_from_name, get_schema_name_from_oid +from db.utils import get_pg_catalog_table +from db.metadata import get_empty_metadata from fixtures.utils import create_scoped_fixtures @@ -210,8 +210,8 @@ def _create_schema(schema_name, engine, schema_mustnt_exist=True): if schema_mustnt_exist: assert schema_name not in created_schemas logger.debug(f'creating {schema_name}') - create_schema_if_not_exists_via_sql_alchemy(schema_name, engine) - schema_oid = get_schema_oid_from_name(schema_name, engine) + _create_schema_if_not_exists_via_sql_alchemy(schema_name, engine) + schema_oid = _get_schema_oid_from_name(schema_name, engine) db_name = engine.url.database created_schemas_in_this_engine = created_schemas.setdefault(db_name, {}) created_schemas_in_this_engine[schema_name] = schema_oid @@ -223,15 +223,55 @@ def _create_schema(schema_name, engine, schema_mustnt_exist=True): try: for _, schema_oid in created_schemas_in_this_engine.items(): # Handle schemas being renamed during test - schema_name = get_schema_name_from_oid(schema_oid, engine) + schema_name = _get_schema_name_from_oid(schema_oid, engine) if schema_name: - drop_sa_schema(engine, schema_name, cascade=True) + _drop_schema_via_name(engine, schema_name, cascade=True) logger.debug(f'dropping {schema_name}') except OperationalError as e: logger.debug(f'ignoring operational error: {e}') logger.debug('exit') +def _create_schema_if_not_exists_via_sql_alchemy(schema_name, engine): + return execute_msar_func_with_engine( + engine, 'create_schema_if_not_exists', schema_name + ).fetchone()[0] + + +def _get_schema_name_from_oid(oid, engine, metadata=None): + schema_info = _reflect_schema(engine, oid=oid, metadata=metadata) + if schema_info: + return schema_info["name"] + + +def _get_schema_oid_from_name(name, engine): + schema_info = _reflect_schema(engine, name=name) + if schema_info: + return schema_info["oid"] + + +def _reflect_schema(engine, name=None, oid=None, metadata=None): + # If we have both arguments, the behavior is undefined. + try: + assert name is None or oid is None + except AssertionError as e: + raise e + # TODO reuse metadata + metadata = metadata if metadata else get_empty_metadata() + pg_namespace = get_pg_catalog_table("pg_namespace", engine, metadata=metadata) + sel = ( + select(pg_namespace.c.oid, pg_namespace.c.nspname.label("name")) + .where(or_(pg_namespace.c.nspname == name, pg_namespace.c.oid == oid)) + ) + with engine.begin() as conn: + schema_info = conn.execute(sel).fetchone() + return schema_info + + +def _drop_schema_via_name(engine, name, cascade=False): + execute_msar_func_with_engine(engine, 'drop_schema', name, cascade).fetchone() + + # Seems to be roughly equivalent to mathesar/database/base.py::create_mathesar_engine # TODO consider fixing this seeming duplication # either way, both depend on Django configuration. can that be resolved? diff --git a/db/schemas/operations/create.py b/db/schemas/operations/create.py index 0fed24c7ed..2524f8aeca 100644 --- a/db/schemas/operations/create.py +++ b/db/schemas/operations/create.py @@ -1,21 +1,4 @@ -from db.connection import execute_msar_func_with_engine, exec_msar_func - - -# TODO Remove (only used in testing) -def create_schema_if_not_exists_via_sql_alchemy(schema_name, engine): - """ - Ensure that a schema exists using a SQLAlchemy engine. - - Args: - schema_name: Name of the schema to create. - engine: SQLAlchemy engine object for connecting. - - Returns: - The integer oid of the newly created schema. - """ - return execute_msar_func_with_engine( - engine, 'create_schema_if_not_exists', schema_name - ).fetchone()[0] +from db.connection import exec_msar_func def create_schema(schema_name, conn, owner_oid, description=None): diff --git a/db/schemas/operations/drop.py b/db/schemas/operations/drop.py index c659061b4d..9ff87c2fb2 100644 --- a/db/schemas/operations/drop.py +++ b/db/schemas/operations/drop.py @@ -1,23 +1,4 @@ -from db.connection import execute_msar_func_with_engine, exec_msar_func - - -# TODO Remove (only used in testing) -def drop_schema_via_name(engine, name, cascade=False): - """ - Drop a schema by its name. - - If no schema exists with the given name, an exception will be raised. - - Deprecated: - Use drop_schema_via_oid instead. This function is deprecated because we - are phasing out name-based operations in favor of OID-based operations - and we are phasing out SQLAlchemy in favor of psycopg. - - Args: - engine: SQLAlchemy engine object for connecting. name: Name of the - schema to drop. cascade: Whether to drop the dependent objects. - """ - execute_msar_func_with_engine(engine, 'drop_schema', name, cascade).fetchone() +from db.connection import exec_msar_func def drop_schema_via_oid(conn, id, cascade=False): diff --git a/db/schemas/operations/select.py b/db/schemas/operations/select.py index 2680ada4a4..8e61cee0a8 100644 --- a/db/schemas/operations/select.py +++ b/db/schemas/operations/select.py @@ -1,8 +1,3 @@ -from sqlalchemy import select, and_, not_, or_ - -from db.constants import INTERNAL_SCHEMAS -from db.utils import get_pg_catalog_table -from db.metadata import get_empty_metadata from db.connection import exec_msar_func @@ -12,40 +7,3 @@ def list_schemas(conn): def get_schema(schema_oid, conn): return exec_msar_func(conn, 'get_schema').fetchone()[0] - - -def reflect_schema(engine, name=None, oid=None, metadata=None): - # If we have both arguments, the behavior is undefined. - try: - assert name is None or oid is None - except AssertionError as e: - raise e - # TODO reuse metadata - metadata = metadata if metadata else get_empty_metadata() - pg_namespace = get_pg_catalog_table("pg_namespace", engine, metadata=metadata) - sel = ( - select(pg_namespace.c.oid, pg_namespace.c.nspname.label("name")) - .where(or_(pg_namespace.c.nspname == name, pg_namespace.c.oid == oid)) - ) - with engine.begin() as conn: - schema_info = conn.execute(sel).fetchone() - return schema_info - - -def get_mathesar_schemas_with_oids(engine): - # TODO reuse metadata - metadata = get_empty_metadata() - pg_namespace = get_pg_catalog_table("pg_namespace", engine, metadata=metadata) - sel = ( - select(pg_namespace.c.nspname.label('schema'), pg_namespace.c.oid) - .where( - and_( - *[pg_namespace.c.nspname != schema for schema in INTERNAL_SCHEMAS], - pg_namespace.c.nspname != "information_schema", - not_(pg_namespace.c.nspname.like("pg_%")) - ) - ) - ) - with engine.begin() as conn: - result = conn.execute(sel).fetchall() - return result diff --git a/db/schemas/utils.py b/db/schemas/utils.py deleted file mode 100644 index 5b9feef37c..0000000000 --- a/db/schemas/utils.py +++ /dev/null @@ -1,13 +0,0 @@ -from db.schemas.operations.select import reflect_schema - - -def get_schema_name_from_oid(oid, engine, metadata=None): - schema_info = reflect_schema(engine, oid=oid, metadata=metadata) - if schema_info: - return schema_info["name"] - - -def get_schema_oid_from_name(name, engine): - schema_info = reflect_schema(engine, name=name) - if schema_info: - return schema_info["oid"] diff --git a/db/tests/schemas/operations/test_drop.py b/db/tests/schemas/operations/test_drop.py deleted file mode 100644 index 243ec4fbd6..0000000000 --- a/db/tests/schemas/operations/test_drop.py +++ /dev/null @@ -1,15 +0,0 @@ -import pytest -from unittest.mock import patch -import db.schemas.operations.drop as sch_drop - - -@pytest.mark.parametrize("cascade", [True, False]) -def test_drop_schema(engine_with_schema, cascade): - engine = engine_with_schema - with patch.object(sch_drop, 'execute_msar_func_with_engine') as mock_exec: - sch_drop.drop_schema_via_name(engine, 'drop_test_schema', cascade) - call_args = mock_exec.call_args_list[0][0] - assert call_args[0] == engine - assert call_args[1] == "drop_schema" - assert call_args[2] == "drop_test_schema" - assert call_args[3] == cascade diff --git a/db/tests/schemas/operations/test_select.py b/db/tests/schemas/operations/test_select.py deleted file mode 100644 index 82a0495351..0000000000 --- a/db/tests/schemas/operations/test_select.py +++ /dev/null @@ -1,43 +0,0 @@ -import warnings -from sqlalchemy import select, Table, MetaData - -from db.constants import TYPES_SCHEMA -from db.schemas.operations import select as ssel - - -def test_get_mathesar_schemas_with_oids_gets_added_schema(engine_with_schema): - engine, schema = engine_with_schema - actual_schemas = ssel.get_mathesar_schemas_with_oids(engine) - assert schema in [schema for schema, oid in actual_schemas] - - -def test_get_mathesar_schemas_with_oids_avoids_pg_schemas(engine_with_schema): - engine, schema = engine_with_schema - actual_schemas = ssel.get_mathesar_schemas_with_oids(engine) - assert all([schema[:3] != "pg_" for schema, oid in actual_schemas]) - - -def test_get_mathesar_schemas_with_oids_avoids_information_schema(engine_with_schema): - engine, schema = engine_with_schema - actual_schemas = ssel.get_mathesar_schemas_with_oids(engine) - assert all([schema != "information_schema" for schema, _ in actual_schemas]) - - -def test_get_mathesar_schemas_with_oids_avoids_types_schema(engine_with_schema): - engine, schema = engine_with_schema - actual_schemas = ssel.get_mathesar_schemas_with_oids(engine) - assert all([schema != TYPES_SCHEMA for schema, _ in actual_schemas]) - - -def test_get_mathesar_schemas_with_oids_gets_correct_oid(engine_with_schema): - engine, schema = engine_with_schema - metadata = MetaData() - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", message="Did not recognize type") - pg_namespace = Table("pg_namespace", metadata, autoload_with=engine) - sel = select(pg_namespace.c.oid).where(pg_namespace.c.nspname == schema) - with engine.begin() as conn: - expect_oid = conn.execute(sel).fetchone()[0] - actual_schemata = ssel.get_mathesar_schemas_with_oids(engine) - actual_oid = [oid for schm, oid in actual_schemata if schm == schema][0] - assert actual_oid == expect_oid From 425d692a53fe51b2984d075c1f6b89c2b017a1e5 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 16:01:24 +0800 Subject: [PATCH 43/70] remove unused dependents code --- db/dependents/__init__.py | 0 db/dependents/dependents_utils.py | 302 ------------------------- db/tests/dependents/__init__.py | 0 db/tests/dependents/conftest.py | 24 -- db/tests/dependents/test_dependents.py | 223 ------------------ 5 files changed, 549 deletions(-) delete mode 100644 db/dependents/__init__.py delete mode 100644 db/dependents/dependents_utils.py delete mode 100644 db/tests/dependents/__init__.py delete mode 100644 db/tests/dependents/conftest.py delete mode 100644 db/tests/dependents/test_dependents.py diff --git a/db/dependents/__init__.py b/db/dependents/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/db/dependents/dependents_utils.py b/db/dependents/dependents_utils.py deleted file mode 100644 index 8894b8bf6a..0000000000 --- a/db/dependents/dependents_utils.py +++ /dev/null @@ -1,302 +0,0 @@ -from sqlalchemy import MetaData, any_, column, exists, func, literal, select, text, true, union, and_, collate -import warnings -from sqlalchemy.dialects.postgresql import array - -from db.utils import get_pg_catalog_table - -# OIDs assigned during normal database operation are constrained to be 16384 or higher. -USER_DEFINED_OBJECTS_MIN_OID = 16384 -# automatic and normal dependents -PG_DEPENDENT_TYPES = ['a', 'n'] -DEFAULT_NON_COLUMN_OBJSUBID = 0 -PG_CLASS_CATALOGUE_NAME = '\'pg_class\'' -START_LEVEL = 1 -MAX_LEVEL = 10 - - -def get_dependents_graph(referenced_object_id, engine, exclude_types, attnum=None): - dependency_pairs = _get_typed_dependency_pairs_stmt(engine, exclude_types) - dependency_pairs_cte = dependency_pairs.cte(recursive=True, name='dependency_pairs_cte') - - pg_identify_refobject = _get_pg_identify_object_lateral_stmt( - dependency_pairs_cte.c.refclassid, - dependency_pairs_cte.c.refobjid, - (DEFAULT_NON_COLUMN_OBJSUBID if attnum is None else attnum) - ) - - # anchor member which includes all dependents of a requested object - anchor = ( - select( - dependency_pairs_cte, - pg_identify_refobject.c.name.label('refobjname'), - pg_identify_refobject.c.type.label('refobjtype'), - literal(START_LEVEL).label('level'), - array([dependency_pairs_cte.c.refobjid]).label('dependency_chain') - ) - .join(pg_identify_refobject, true()) - .where(dependency_pairs_cte.c.refobjid == referenced_object_id) - .where(dependency_pairs_cte.c.objid != referenced_object_id) - ) - - if (attnum is not None): - anchor = anchor.where(dependency_pairs_cte.c.refobjsubid == attnum) - - anchor = anchor.cte('cte') - - # recursive member which includes dependents for each object of the previous level - recursive = ( - select( - dependency_pairs_cte, - anchor.c.objname.label('refobjname'), - anchor.c.objtype.label('refobjtype'), - (anchor.c.level + 1), - anchor.c.dependency_chain + array([anchor.c.objid]) - ) - .where(anchor.c.level < MAX_LEVEL) - .where(dependency_pairs_cte.c.objid != any_(anchor.c.dependency_chain)) - .where(dependency_pairs_cte.c.objid != dependency_pairs_cte.c.refobjid) - ) - - recursive = recursive.join(anchor, dependency_pairs_cte.c.refobjid == anchor.c.objid) - - stmt = select(anchor.union(recursive)) - - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", message="SELECT statement has a cartesian product") - with engine.connect() as conn: - result = conn.execute(stmt) - - return _get_structured_result(result) - - -def _get_constraint_dependents(pg_identify_object, dependency_pairs): - return dependency_pairs.where(pg_identify_object.c.type == 'table constraint') - - -def _get_index_dependents(pg_identify_object, dependency_pairs): - return dependency_pairs.where(pg_identify_object.c.type == 'index') - - -def _get_rule_dependents(pg_identify_object, dependency_pairs): - return dependency_pairs.where(pg_identify_object.c.type == 'rule') - - -def _get_trigger_dependents(pg_depend, pg_identify_object, pg_trigger): - return ( - select( - pg_depend, - # for some reason, tgname column is in C collation which collides with other columns collations - collate(pg_trigger.c.tgname, 'default').label('objname'), - pg_identify_object.c.type.label('objtype') - ) - .select_from(pg_depend) - .join(pg_identify_object, true()) - .join(pg_trigger, pg_trigger.c.oid == pg_depend.c.objid) - .where(pg_depend.c.deptype == any_(array(PG_DEPENDENT_TYPES))) - .where(pg_depend.c.objid >= USER_DEFINED_OBJECTS_MIN_OID) - .where(pg_identify_object.c.type == 'trigger') - .group_by( - pg_depend, - pg_trigger.c.tgname, - pg_identify_object.c.type) - ) - - -def _get_sequence_dependents(pg_identify_object, dependency_pairs): - return dependency_pairs.where(pg_identify_object.c.type == 'sequence') - - -def _get_view_dependents(pg_identify_object, pg_rewrite_table, rule_dependents): - pg_identify_object = _get_pg_identify_object_lateral_stmt( - text(f'{PG_CLASS_CATALOGUE_NAME}::regclass::oid'), pg_rewrite_table.c.ev_class, DEFAULT_NON_COLUMN_OBJSUBID) - - return select( - rule_dependents.c.classid, - pg_rewrite_table.c.ev_class.label('objid'), - rule_dependents.c.objsubid, - rule_dependents.c.refclassid, - rule_dependents.c.refobjid, - rule_dependents.c.refobjsubid, - rule_dependents.c.deptype, - pg_identify_object.c.name.label('objname'), - pg_identify_object.c.type.label('objtype')) \ - .select_from(rule_dependents) \ - .join(pg_rewrite_table, rule_dependents.c.objid == pg_rewrite_table.c.oid) \ - .join(pg_identify_object, true()) \ - .group_by( - rule_dependents, - pg_rewrite_table.c.ev_class, - pg_identify_object.c.type, - pg_identify_object.c.name) - - -def _get_table_dependents(pg_identify_object, base): - return base.where(pg_identify_object.c.type == 'table') - - -def _get_function_dependents(pg_depend, pg_identify_object, pg_proc): - return ( - select( - pg_depend, - # the same as with pg_trigger table - collate(pg_proc.c.proname, 'default').label('objname'), - pg_identify_object.c.type.label('objtype') - ) - .select_from(pg_depend) - .join(pg_identify_object, true()) - .join(pg_proc, pg_proc.c.oid == pg_depend.c.objid) - .where(pg_depend.c.deptype == any_(array(PG_DEPENDENT_TYPES))) - .where(pg_depend.c.objid >= USER_DEFINED_OBJECTS_MIN_OID) - .where(pg_identify_object.c.type == 'function') - .group_by( - pg_depend, - pg_proc.c.proname, - pg_identify_object.c.type) - ) - - -# stmt for getting a full list of dependents and identifying them -def _get_dependency_pairs_stmt(pg_depend, pg_identify_object): - result = ( - select( - pg_depend, - pg_identify_object.c.name.label('objname'), - pg_identify_object.c.type.label('objtype') - ) - .select_from(pg_depend) - .join(pg_identify_object, true()) - .where(pg_depend.c.deptype == any_(array(PG_DEPENDENT_TYPES))) - .where(pg_depend.c.objid >= USER_DEFINED_OBJECTS_MIN_OID) - .group_by( - pg_depend, - pg_identify_object.c.name, - pg_identify_object.c.type) - ) - - return result - - -def _get_pg_depend_table(engine, metadata): - return get_pg_catalog_table("pg_depend", engine, metadata=metadata) - - -def _get_pg_rewrite(engine, metadata): - return get_pg_catalog_table("pg_rewrite", engine, metadata=metadata) - - -def _get_pg_trigger(engine, metadata): - return get_pg_catalog_table('pg_trigger', engine, metadata=metadata) - - -def _get_pg_proc(engine, metadata): - return get_pg_catalog_table('pg_proc', engine, metadata=metadata) - - -def _get_pg_identify_object_lateral_stmt(classid, objid, objsubid): - return ( - select( - column("name"), - column("type") - ) - .select_from(func.pg_identify_object( - classid, - objid, - objsubid)) - .lateral() - ) - - -def _get_typed_dependency_pairs_stmt(engine, exclude_types): - metadata = MetaData() - - pg_depend = _get_pg_depend_table(engine, metadata) - pg_identify_object = _get_pg_identify_object_lateral_stmt( - pg_depend.c.classid, pg_depend.c.objid, pg_depend.c.objsubid) - pg_rewrite = _get_pg_rewrite(engine, metadata) - pg_trigger = _get_pg_trigger(engine, metadata) - pg_proc = _get_pg_proc(engine, metadata) - - type_dependents = {} - # each statement filters the base statement extracting dependents of a specific type - # so it's easy to exclude particular types or add new - dependency_pairs = _get_dependency_pairs_stmt(pg_depend, pg_identify_object) - constraint_dependents = _get_constraint_dependents(pg_identify_object, dependency_pairs).cte('constraint_dependents') - type_dependents['table constraint'] = constraint_dependents - - table_dependents = _get_table_dependents(pg_identify_object, dependency_pairs).cte('table_dependents') - type_dependents['table'] = table_dependents - - # should not be returned directly, used for getting views - # this relation is required because views in PostgreSQL are implemented using the rule system - # views don't depend on tables directly but through rules, that are mapped one-to-one - rule_dependents = _get_rule_dependents(pg_identify_object, dependency_pairs).cte('rule_dependents') - view_dependents = _get_view_dependents(pg_identify_object, pg_rewrite, rule_dependents).cte('view_dependents') - type_dependents['view'] = view_dependents - - index_dependents = _get_index_dependents(pg_identify_object, dependency_pairs).cte('index_dependents') - type_dependents['index'] = index_dependents - - trigger_dependents = _get_trigger_dependents(pg_depend, pg_identify_object, pg_trigger).cte('trigger_dependents') - type_dependents['trigger'] = [trigger_dependents] - - sequence_dependents = _get_sequence_dependents(pg_identify_object, dependency_pairs).cte('sequence_dependents') - type_dependents['sequence'] = [sequence_dependents] - - # only schemas' function dependents - function_dependents = _get_function_dependents(pg_depend, pg_identify_object, pg_proc).cte('function_dependents') - type_dependents['function'] = [function_dependents] - - dependent_selects = [ - select(dependent) - for type, dependent in type_dependents.items() - if type not in exclude_types] - - return union(*dependent_selects) - - -def has_dependents(referenced_object_id, engine, attnum=None): - metadata = MetaData() - - pg_depend = _get_pg_depend_table(engine, metadata) - - conditions = [ - pg_depend.c.refobjid == referenced_object_id, - pg_depend.c.deptype == any_(array(PG_DEPENDENT_TYPES)), - pg_depend.c.objid >= USER_DEFINED_OBJECTS_MIN_OID - ] - - if attnum is not None: - conditions.append(pg_depend.c.refobjsubid == (0 if attnum is None else attnum)) - - stmt = select( - exists( - select().select_from(pg_depend) - .where(and_(*conditions)) - ) - ) - - with engine.connect() as conn: - result = conn.execute(stmt).scalar() - - return result - - -def _get_structured_result(dependency_graph_result): - result = [] - for dependency_pair in dependency_graph_result: - d = {} - d['level'] = dependency_pair.level - d['obj'] = { - 'objid': dependency_pair.objid, - 'type': dependency_pair.objtype, - 'name': dependency_pair.objname - } - d['parent_obj'] = { - 'objid': dependency_pair.refobjid, - 'type': dependency_pair.refobjtype, - 'objsubid': (dependency_pair.refobjsubid if dependency_pair.refobjsubid != 0 else None), - 'name': dependency_pair.refobjname - } - result.append(d) - - return result diff --git a/db/tests/dependents/__init__.py b/db/tests/dependents/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/db/tests/dependents/conftest.py b/db/tests/dependents/conftest.py deleted file mode 100644 index 2255df1600..0000000000 --- a/db/tests/dependents/conftest.py +++ /dev/null @@ -1,24 +0,0 @@ -import pytest - -from db.tables.operations.select import get_oid_from_table - - -@pytest.fixture -def library_tables_oids(engine_with_library): - engine, schema = engine_with_library - - authors_oid = get_oid_from_table('Authors', schema, engine) - checkouts_oid = get_oid_from_table('Checkouts', schema, engine) - items_oid = get_oid_from_table('Items', schema, engine) - patrons_oid = get_oid_from_table('Patrons', schema, engine) - publications_oid = get_oid_from_table('Publications', schema, engine) - publishers_oid = get_oid_from_table('Publishers', schema, engine) - - return { - 'Authors': authors_oid, - 'Checkouts': checkouts_oid, - 'Items': items_oid, - 'Patrons': patrons_oid, - 'Publications': publications_oid, - 'Publishers': publishers_oid - } diff --git a/db/tests/dependents/test_dependents.py b/db/tests/dependents/test_dependents.py deleted file mode 100644 index fc838bec23..0000000000 --- a/db/tests/dependents/test_dependents.py +++ /dev/null @@ -1,223 +0,0 @@ -import pytest -from sqlalchemy import MetaData, select, Index -from sqlalchemy_utils import create_view -from db.constraints.operations.create import add_constraint_via_sql_alchemy -from db.constraints.base import ForeignKeyConstraint -from db.dependents.dependents_utils import get_dependents_graph -from db.constraints.operations.select import get_constraint_oid_by_name_and_table_oid -from db.columns.operations.create import create_column -from db.columns.operations.select import get_column_attnum_from_name -from db.types.base import PostgresType -from db.metadata import get_empty_metadata - - -def _get_object_dependents(dependents_graph, object_oid): - return list(filter(lambda x: x['parent_obj']['objid'] == object_oid, dependents_graph)) - - -def _get_object_dependents_oids(dependents_graph, object_oid): - return [dependent['obj']['objid'] for dependent in _get_object_dependents(dependents_graph, object_oid)] - - -def _get_object_dependents_by_name(dependents_graph, object_oid, name): - return [dependent['obj'] for dependent in _get_object_dependents(dependents_graph, object_oid) if dependent['obj']['name'] == name] - - -def test_correct_dependents_amount_and_level(engine, library_tables_oids): - publishers_dependents_graph = get_dependents_graph(library_tables_oids['Publishers'], engine, []) - - publishers_dependents = _get_object_dependents(publishers_dependents_graph, library_tables_oids['Publishers']) - - assert len(publishers_dependents) == 3 - assert all( - [ - r['level'] == 1 - for r in publishers_dependents - ] - ) - - -def test_response_format(engine, library_tables_oids): - publishers_dependents_graph = get_dependents_graph(library_tables_oids['Publishers'], engine, []) - - dependent_expected_attrs = ['obj', 'parent_obj', 'level'] - obj_expected_attrs = ['objid', 'type'] - assert all( - [ - all(attr in dependent for attr in dependent_expected_attrs) - for dependent in publishers_dependents_graph - ] - ) - assert all( - [ - all(attr in dependent['obj'] for attr in obj_expected_attrs) - for dependent in publishers_dependents_graph - ] - ) - assert all( - [ - all(attr in dependent['parent_obj'] for attr in obj_expected_attrs) - for dependent in publishers_dependents_graph - ] - ) - - -def test_constrains_as_dependents(engine, library_tables_oids, library_db_tables): - items_oid = library_tables_oids['Items'] - items_dependents_graph = get_dependents_graph(items_oid, engine, []) - items_dependents_oids = _get_object_dependents_oids(items_dependents_graph, items_oid) - - items_constraint_oids = [ - get_constraint_oid_by_name_and_table_oid(constraint.name, items_oid, engine) - for constraint in library_db_tables['Items'].constraints] - - checkouts_items_fk_oid = get_constraint_oid_by_name_and_table_oid( - 'Checkouts_Item id_fkey', library_tables_oids['Checkouts'], engine - ) - - assert all( - [ - oid in items_dependents_oids - for oid in items_constraint_oids + [checkouts_items_fk_oid] - ] - ) - - -# if a table contains a foreign key referencing itself, it shouldn't be treated as a dependent -def test_self_reference(engine_with_schema, library_tables_oids): - engine, schema = engine_with_schema - - publishers_oid = library_tables_oids['Publishers'] - - # remove when library_without_checkouts.sql is updated and includes self-reference case - fk_column_attnum = create_column(engine, publishers_oid, {'name': 'Parent Publisher', 'type': PostgresType.INTEGER.id})[0] - pk_column_attnum = get_column_attnum_from_name(publishers_oid, 'id', engine, metadata=get_empty_metadata()) - fk_constraint = ForeignKeyConstraint('Publishers_Publisher_fkey', publishers_oid, [fk_column_attnum], publishers_oid, [pk_column_attnum], {}) - add_constraint_via_sql_alchemy(fk_constraint, engine) - - publishers_oid = library_tables_oids['Publishers'] - publishers_dependents_graph = get_dependents_graph(publishers_oid, engine, []) - - publishers_dependents_oids = _get_object_dependents_oids(publishers_dependents_graph, publishers_oid) - assert publishers_oid not in publishers_dependents_oids - - -# if two tables depend on each other, we should return dependence only for the topmost object in the graph -# excluding the possibility of circulal reference -def test_circular_reference(engine_with_schema, library_tables_oids): - engine, schema = engine_with_schema - - publishers_oid = library_tables_oids['Publishers'] - publications_oid = library_tables_oids['Publications'] - - # remove when library_without_checkouts.sql is updated and includes circular reference case - fk_column_attnum = create_column(engine, publishers_oid, {'name': 'Top Publication', 'type': PostgresType.INTEGER.id})[0] - publications_pk_column_attnum = get_column_attnum_from_name(publications_oid, 'id', engine, metadata=get_empty_metadata()) - fk_constraint = ForeignKeyConstraint('Publishers_Publications_fkey', publishers_oid, [fk_column_attnum], publications_oid, [publications_pk_column_attnum], {}) - add_constraint_via_sql_alchemy(fk_constraint, engine) - - publishers_dependents_graph = get_dependents_graph(publishers_oid, engine, []) - publications_dependents_oids = _get_object_dependents_oids(publishers_dependents_graph, publications_oid) - - assert publishers_oid not in publications_dependents_oids - - -def test_dependents_graph_max_level(engine_with_schema, library_db_tables, library_tables_oids): - engine, schema = engine_with_schema - metadata = MetaData(schema=schema, bind=engine) - source = library_db_tables['Checkouts'].c.id - - for i in range(15): - view_name = str(i) - source = create_view(view_name, select(source), metadata) - metadata.create_all(engine) - - checkouts_dependents_graph = get_dependents_graph(library_tables_oids['Checkouts'], engine, []) - - # by default, dependents graph max level is 10 - dependents_by_level = sorted(checkouts_dependents_graph, key=lambda x: x['level']) - assert dependents_by_level[0]['level'] == 1 - assert dependents_by_level[-1]['level'] == 10 - - -def test_column_dependents(engine, library_tables_oids): - publications_oid = library_tables_oids['Publications'] - items_oid = library_tables_oids['Items'] - publications_id_column_attnum = get_column_attnum_from_name(publications_oid, 'id', engine, metadata=get_empty_metadata()) - publications_id_column_dependents_graph = get_dependents_graph(publications_oid, engine, [], publications_id_column_attnum) - - publications_pk_oid = get_constraint_oid_by_name_and_table_oid('Publications_pkey', publications_oid, engine) - items_publications_fk_oid = get_constraint_oid_by_name_and_table_oid('Items_Publications_id_fkey', items_oid, engine) - - publications_dependents = _get_object_dependents(publications_id_column_dependents_graph, publications_oid) - publications_dependent_oids = _get_object_dependents_oids(publications_id_column_dependents_graph, publications_oid) - assert all( - [ - r['parent_obj']['objsubid'] == 1 - for r in publications_dependents - ] - ) - assert all( - [ - oid in publications_dependent_oids - for oid in [publications_pk_oid, items_publications_fk_oid] - ] - ) - - -def test_views_as_dependents(engine_with_schema, library_db_tables, library_tables_oids): - engine, schema = engine_with_schema - metadata = MetaData(schema=schema, bind=engine) - - publications = library_db_tables['Publications'] - new_publications_view = select(publications).where(publications.c['Publication Year'] >= 2000) - view_name = 'new_publications' - create_view(view_name, new_publications_view, metadata) - metadata.create_all(engine) - - publications_oid = library_tables_oids['Publications'] - publications_dependents_graph = get_dependents_graph(publications_oid, engine, []) - publications_view_dependent = _get_object_dependents_by_name(publications_dependents_graph, publications_oid, view_name)[0] - - assert publications_view_dependent['name'] == view_name - - -def test_indexes_as_dependents(engine, library_db_tables, library_tables_oids): - index_name = 'index' - index = Index(index_name, library_db_tables['Publishers'].c.id) - index.create(engine) - - publishers_dependents_graph = get_dependents_graph(library_tables_oids['Publishers'], engine, []) - publishers_index_dependent = _get_object_dependents_by_name(publishers_dependents_graph, library_tables_oids['Publishers'], index_name)[0] - - assert publishers_index_dependent['name'] == index_name - - -types = [ - ['table'], - ['table constraint'], - ['table', 'table constraint'], -] - - -@pytest.mark.parametrize("exclude_types", types) -def test_filter(engine, library_tables_oids, exclude_types): - publishers_oid = library_tables_oids['Publishers'] - - publishers_dependents_graph = get_dependents_graph(publishers_oid, engine, exclude_types) - dependents_types = [dependent['obj']['type'] for dependent in publishers_dependents_graph] - - assert all( - [ - type not in dependents_types for type in exclude_types - ] - ) - - -def test_sequences_as_dependents(engine, library_tables_oids): - publishers_oid = library_tables_oids['Publishers'] - publishers_sequence_name = '"Publishers_id_seq"' - publishers_dependents_graph = get_dependents_graph(publishers_oid, engine, []) - publishers_sequence_dependent = _get_object_dependents_by_name(publishers_dependents_graph, publishers_oid, publishers_sequence_name)[0] - - assert publishers_sequence_dependent['name'] == publishers_sequence_name From 5a471d28c7f71c319c0c7d0fe5d248a8184759f7 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 16:17:36 +0800 Subject: [PATCH 44/70] remove unused constraint code --- db/constraints/base.py | 66 ---------- db/constraints/operations/create.py | 21 +-- db/constraints/operations/drop.py | 20 +-- db/constraints/operations/select.py | 88 ------------- db/constraints/utils.py | 124 ------------------ db/metadata.py | 3 +- db/tests/constraints/operations/__init__.py | 0 .../constraints/operations/test_create.py | 34 ----- db/tests/constraints/operations/test_drop.py | 19 --- db/tests/constraints/utils.py | 20 --- 10 files changed, 3 insertions(+), 392 deletions(-) delete mode 100644 db/constraints/base.py delete mode 100644 db/constraints/utils.py delete mode 100644 db/tests/constraints/operations/__init__.py delete mode 100644 db/tests/constraints/operations/test_create.py delete mode 100644 db/tests/constraints/operations/test_drop.py delete mode 100644 db/tests/constraints/utils.py diff --git a/db/constraints/base.py b/db/constraints/base.py deleted file mode 100644 index e8ee8d1ff4..0000000000 --- a/db/constraints/base.py +++ /dev/null @@ -1,66 +0,0 @@ -import json -from abc import ABC, abstractmethod -from db.constraints.utils import ( - get_constraint_match_char_from_type, get_constraint_char_from_action -) - - -class Constraint(ABC): - @abstractmethod - def get_constraint_def_json(self): - pass - - -class UniqueConstraint(Constraint): - def __init__(self, name, table_oid, columns_attnum): - self.name = name - self.table_oid = table_oid - self.columns_attnum = columns_attnum - - def get_constraint_def_json(self): - return json.dumps( - [ - { - 'name': self.name, - 'type': 'u', - 'columns': self.columns_attnum - } - ], - ) - - -class ForeignKeyConstraint(Constraint): - def __init__( - self, name, - table_oid, - columns_attnum, - referent_table_oid, - referent_columns_attnum, - options - ): - self.name = name - self.table_oid = table_oid - self.columns_attnum = columns_attnum - self.referent_table_oid = referent_table_oid - self.referent_columns = referent_columns_attnum - self.options = options - - def get_constraint_def_json(self): - match_type = get_constraint_match_char_from_type(self.options.get('match')) - on_update = get_constraint_char_from_action(self.options.get('onupdate')) - on_delete = get_constraint_char_from_action(self.options.get('ondelete')) - return json.dumps( - [ - { - 'name': self.name, - 'type': 'f', - 'columns': self.columns_attnum, - 'deferrable': self.options.get('deferrable'), - 'fkey_relation_id': self.referent_table_oid, - 'fkey_columns': self.referent_columns, - 'fkey_update_action': on_update, - 'fkey_delete_action': on_delete, - 'fkey_match_type': match_type, - } - ] - ) diff --git a/db/constraints/operations/create.py b/db/constraints/operations/create.py index 45b9b4600f..e6810026a3 100644 --- a/db/constraints/operations/create.py +++ b/db/constraints/operations/create.py @@ -1,25 +1,6 @@ import json -from db.connection import execute_msar_func_with_engine, exec_msar_func - - -def add_constraint_via_sql_alchemy(constraint_obj, engine): - """ - Add a constraint. - - Args: - constraint_obj: (See __msar.process_con_def_jsonb for details) - engine: SQLAlchemy engine object for connecting. - - Returns: - Returns a list of oid(s) of constraints for a given table. - """ - return execute_msar_func_with_engine( - engine, - 'add_constraints', - constraint_obj.table_oid, - constraint_obj.get_constraint_def_json() - ).fetchone()[0] +from db.connection import exec_msar_func def create_constraint(table_oid, constraint_obj_list, conn): diff --git a/db/constraints/operations/drop.py b/db/constraints/operations/drop.py index 5faeb1bba9..7f3aa5397a 100644 --- a/db/constraints/operations/drop.py +++ b/db/constraints/operations/drop.py @@ -1,22 +1,4 @@ -from db.connection import execute_msar_func_with_engine, exec_msar_func - - -def drop_constraint(table_name, schema_name, engine, constraint_name): - """ - Drop a constraint. - - Args: - table_name: The name of the table that has the constraint to be dropped. - schema_name: The name of the schema where the table with constraint to be dropped resides. - engine: SQLAlchemy engine object for connecting. - constraint_name: The name of constraint to be dropped. - - Returns: - Returns a string giving the command that was run. - """ - return execute_msar_func_with_engine( - engine, 'drop_constraint', schema_name, table_name, constraint_name - ).fetchone()[0] +from db.connection import exec_msar_func def drop_constraint_via_oid(table_oid, constraint_oid, conn): diff --git a/db/constraints/operations/select.py b/db/constraints/operations/select.py index de47bacb1d..0156c71f47 100644 --- a/db/constraints/operations/select.py +++ b/db/constraints/operations/select.py @@ -1,93 +1,5 @@ -from sqlalchemy import select, and_ - from db.connection import select_from_msar_func -from db.utils import get_pg_catalog_table -from db.metadata import get_empty_metadata def get_constraints_for_table(table_oid, conn): return select_from_msar_func(conn, 'get_constraints_for_table', table_oid) - - -def get_constraints_with_oids(engine, table_oid=None): - # TODO reuse metadata - metadata = get_empty_metadata() - pg_constraint = get_pg_catalog_table("pg_constraint", engine, metadata=metadata) - # conrelid is the table's OID. - if table_oid: - where_clause = pg_constraint.c.conrelid == table_oid - else: - # We only want to select constraints attached to a table. - where_clause = pg_constraint.c.conrelid != 0 - query = select(pg_constraint).where(where_clause) - with engine.begin() as conn: - result = conn.execute(query).fetchall() - return result - - -def get_constraint_from_oid(oid, engine, table): - constraint_record = get_constraint_record_from_oid(oid, engine) - for constraint in table.constraints: - if constraint.name == constraint_record['conname']: - return constraint - return None - - -def get_constraint_record_from_oid(oid, engine, metadata=None): - metadata = metadata if metadata else get_empty_metadata() - pg_constraint = get_pg_catalog_table("pg_constraint", engine, metadata=metadata) - # conrelid is the table's OID. - query = select(pg_constraint).where(pg_constraint.c.oid == oid) - with engine.begin() as conn: - constraint_record = conn.execute(query).first() - return constraint_record - - -def get_constraint_oid_by_name_and_table_oid(name, table_oid, engine): - # TODO reuse metadata - metadata = get_empty_metadata() - pg_constraint = get_pg_catalog_table("pg_constraint", engine, metadata=metadata) - # We only want to select constraints attached to a table. - # conrelid is the table's OID. - query = select(pg_constraint).where( - and_(pg_constraint.c.conrelid == table_oid, pg_constraint.c.conname == name) - ) - with engine.begin() as conn: - result = conn.execute(query).first() - return result['oid'] - - -def get_fkey_constraint_oid_by_name_and_referent_table_oid(name, table_oid, engine): - """ - Sometimes, we need to find a foreign key by the referent table OID. - """ - # TODO reuse metadata - metadata = get_empty_metadata() - pg_constraint = get_pg_catalog_table("pg_constraint", engine, metadata=metadata) - # We only want to select constraints attached to a table. - # confrelid is the referent table's OID. - query = select(pg_constraint).where( - and_(pg_constraint.c.confrelid == table_oid, pg_constraint.c.conname == name) - ) - with engine.begin() as conn: - result = conn.execute(query).first() - return result['oid'] - - -def get_column_constraints(column_attnum, table_oid, engine): - # TODO reuse metadata - metadata = get_empty_metadata() - pg_constraint = get_pg_catalog_table("pg_constraint", engine, metadata=metadata) - query = ( - select(pg_constraint) - .where(and_( - # 'conrelid' contains the table oid - pg_constraint.c.conrelid == table_oid, - # 'conkey' contains a list of the constrained column's attnum - # Here, we check if the column attnum appears in the conkey list - pg_constraint.c.conkey.bool_op("&&")(f"{{{column_attnum}}}") - )) - ) - with engine.begin() as conn: - result = conn.execute(query).fetchall() - return result diff --git a/db/constraints/utils.py b/db/constraints/utils.py deleted file mode 100644 index 473558fdaf..0000000000 --- a/db/constraints/utils.py +++ /dev/null @@ -1,124 +0,0 @@ -"""Utilities for database constraints.""" -from enum import Enum - - -class ConstraintType(Enum): - FOREIGN_KEY = 'foreignkey' - PRIMARY_KEY = 'primary' - UNIQUE = 'unique' - CHECK = 'check' - EXCLUDE = 'exclude' - - -class ConstraintAction(Enum): - RESTRICT = 'RESTRICT' - CASCADE = 'CASCADE' - SET_NULL = 'SET NULL' - NO_ACTION = 'NO ACTION' - SET_DEFAULT = 'SET DEFAULT' - - -class ConstraintMatch(Enum): - FULL = 'FULL' - PARTIAL = 'PARTIAL' - SIMPLE = 'SIMPLE' - - -# TODO Remove this. It's incorrect, and not robust. -# Naming conventions for constraints follow standard Postgres conventions -# described in https://stackoverflow.com/a/4108266 -naming_convention = { - "ix": '%(table_name)s_%(column_0_name)s_idx', - "uq": '%(table_name)s_%(column_0_name)s_key', - "ck": '%(table_name)s_%(column_0_name)s_check', - "fk": '%(table_name)s_%(column_0_name)s_fkey', - "pk": '%(table_name)s_%(column_0_name)s_pkey' -} - - -def get_constraint_type_from_char(constraint_char): - """ - Map the char for a constraint to the string used for creating it in SQL. - - Args: - constraint_char: Single character, matching pg_constraints. - - Returns: - The string used for creating the constraint in SQL. - """ - char_type_map = { - "c": ConstraintType.CHECK.value, - "f": ConstraintType.FOREIGN_KEY.value, - "p": ConstraintType.PRIMARY_KEY.value, - "u": ConstraintType.UNIQUE.value, - "x": ConstraintType.EXCLUDE.value - } - return char_type_map.get(constraint_char) - - -def _get_char_action_map(reverse=False): - action_map = { - "a": ConstraintAction.NO_ACTION.value, - "r": ConstraintAction.RESTRICT.value, - "c": ConstraintAction.CASCADE.value, - "n": ConstraintAction.SET_NULL.value, - "d": ConstraintAction.SET_DEFAULT.value, - } - if reverse: - action_map = {v: k for k, v in action_map.items()} - return action_map - - -def get_constraint_action_from_char(action_char): - """ - Map the action_char to a string giving the on update or on delecte action. - - Args: - action_char: Single character, matching pg_constraints. - """ - action_map = _get_char_action_map() - return action_map.get(action_char) - - -def get_constraint_char_from_action(action): - """ - Map the on update or on delete action to a single character. - - Args: - action: Single character, matching pg_constraints. - """ - action_map = _get_char_action_map(reverse=True) - return action_map.get(action) - - -def _get_char_match_map(reverse=False): - match_map = { - "f": ConstraintMatch.FULL.value, - "p": ConstraintMatch.PARTIAL.value, - "s": ConstraintMatch.SIMPLE.value, - } - if reverse: - match_map = {v: k for k, v in match_map.items()} - return match_map - - -def get_constraint_match_type_from_char(match_char): - """ - Map the match_char to a string giving the match type. - - Args: - match_char: Single character, matching pg_constraints. - """ - match_map = _get_char_match_map() - return match_map.get(match_char) - - -def get_constraint_match_char_from_type(match_type): - """ - Map the match_type to a single character. - - Args: - match_type: Single character, matching pg_constraints. - """ - match_map = _get_char_match_map(reverse=True) - return match_map.get(match_type) diff --git a/db/metadata.py b/db/metadata.py index 399bae5ea1..78ba16f139 100644 --- a/db/metadata.py +++ b/db/metadata.py @@ -1,5 +1,4 @@ from sqlalchemy import MetaData -from db.constraints.utils import naming_convention def get_empty_metadata(): @@ -8,4 +7,4 @@ def get_empty_metadata(): This is probably the only way you'll want to instantiate MetaData in this codebase. """ - return MetaData(naming_convention=naming_convention) + return MetaData() diff --git a/db/tests/constraints/operations/__init__.py b/db/tests/constraints/operations/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/db/tests/constraints/operations/test_create.py b/db/tests/constraints/operations/test_create.py deleted file mode 100644 index 476ef1c74b..0000000000 --- a/db/tests/constraints/operations/test_create.py +++ /dev/null @@ -1,34 +0,0 @@ -import pytest -from unittest.mock import patch -import db.constraints.operations.create as con_create -from db.constraints.base import UniqueConstraint, ForeignKeyConstraint - - -@pytest.mark.parametrize( - "constraint_obj", [ - (UniqueConstraint( - name='test_uq_con', - table_oid=12345, - columns_attnum=[80085, 53301] - )), - (ForeignKeyConstraint( - name='test_fk_con', - table_oid=12345, - columns_attnum=[80085], - referent_table_oid=54321, - referent_columns_attnum=[53301], - options={'match_type': 'f', 'on_update': 'r', 'on_delete': 'c'} - ))] -) -def test_add_constraint_db(engine_with_schema, constraint_obj): - engine = engine_with_schema - with patch.object(con_create, 'execute_msar_func_with_engine') as mock_exec: - con_create.add_constraint_via_sql_alchemy( - engine=engine, - constraint_obj=constraint_obj - ) - call_args = mock_exec.call_args_list[0][0] - assert call_args[0] == engine - assert call_args[1] == "add_constraints" - assert call_args[2] == constraint_obj.table_oid - assert call_args[3] == constraint_obj.get_constraint_def_json() diff --git a/db/tests/constraints/operations/test_drop.py b/db/tests/constraints/operations/test_drop.py deleted file mode 100644 index 87674bf158..0000000000 --- a/db/tests/constraints/operations/test_drop.py +++ /dev/null @@ -1,19 +0,0 @@ -from unittest.mock import patch -import db.constraints.operations.drop as con_drop - - -def test_drop_constraint_db(engine_with_schema): - engine, schema_name = engine_with_schema - with patch.object(con_drop, 'execute_msar_func_with_engine') as mock_exec: - con_drop.drop_constraint( - engine=engine, - schema_name=schema_name, - table_name='test_table_name', - constraint_name='test_constraint_name' - ) - call_args = mock_exec.call_args_list[0][0] - assert call_args[0] == engine - assert call_args[1] == "drop_constraint" - assert call_args[2] == schema_name - assert call_args[3] == "test_table_name" - assert call_args[4] == "test_constraint_name" diff --git a/db/tests/constraints/utils.py b/db/tests/constraints/utils.py deleted file mode 100644 index 55a94b14b8..0000000000 --- a/db/tests/constraints/utils.py +++ /dev/null @@ -1,20 +0,0 @@ -from sqlalchemy import PrimaryKeyConstraint, UniqueConstraint - - -def get_first_unique_constraint(table): - constraint_list = list(table.constraints) - for item in constraint_list: - if type(item) is UniqueConstraint: - return item - - -def assert_only_primary_key_present(table): - constraint_list = list(table.constraints) - assert len(constraint_list) == 1 - assert type(constraint_list[0]) is PrimaryKeyConstraint - - -def assert_primary_key_and_unique_present(table): - constraint_list = list(table.constraints) - assert len(constraint_list) == 2 - assert set([PrimaryKeyConstraint, UniqueConstraint]) == set([type(constraint) for constraint in table.constraints]) From f125eda97d96f9063a5c0c597442d6f528a60e78 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 16:21:51 +0800 Subject: [PATCH 45/70] remove unused SQLAlchemy-based link functions --- db/links/operations/create.py | 67 +----------------------- db/tests/links/operations/test_create.py | 47 ----------------- 2 files changed, 1 insertion(+), 113 deletions(-) delete mode 100644 db/tests/links/operations/test_create.py diff --git a/db/links/operations/create.py b/db/links/operations/create.py index 1ffc89a738..75f116fa88 100644 --- a/db/links/operations/create.py +++ b/db/links/operations/create.py @@ -1,38 +1,6 @@ import json -from db.connection import execute_msar_func_with_engine, exec_msar_func - - -def create_foreign_key_link( - engine, - referrer_column_name, - referrer_table_oid, - referent_table_oid, - unique_link=False -): - """ - Creates a Many-to-One or One-to-One link. - - Args: - engine: SQLAlchemy engine object for connecting. - referrer_column_name: Name of the new column to be created - in the referrer table. - referrer_table_oid: The OID of the referrer table. - referent_table_oid: The OID of the referent table. - unique_link: Whether to make the link one-to-one - instead of many-to-one. - - Returns: - Returns the attnum of the newly created column. - """ - return execute_msar_func_with_engine( - engine, - 'add_foreign_key_column', - referrer_column_name, - referrer_table_oid, - referent_table_oid, - unique_link - ).fetchone()[0] +from db.connection import exec_msar_func def add_foreign_key_column( @@ -64,39 +32,6 @@ def add_foreign_key_column( ) -def create_many_to_many_link(engine, schema_oid, map_table_name, referents_dict): - """ - Creates a Many-to-Many link. - - Args: - engine: SQLAlchemy engine object for connecting. - schema_oid: The OID of the schema in - which new referrer table is to be created. - map_table_name: Name of the referrer table to be created. - referents_dict: A python dict that contain 2 keys - 'referent_table_oids' & 'column_names' with values as - ordered lists of table_oids & col_names respectively - - Returns: - Returns the OID of the newly created table. - """ - return execute_msar_func_with_engine( - engine, - 'add_mapping_table', - schema_oid, - map_table_name, - json.dumps( - [ - {"column_name": c, "referent_table_oid": i} - for c, i in zip( - referents_dict['column_names'], - referents_dict['referent_table_oids'], - ) - ] - ) - ).fetchone()[0] - - def add_mapping_table( conn, schema_oid, diff --git a/db/tests/links/operations/test_create.py b/db/tests/links/operations/test_create.py deleted file mode 100644 index 6172e78371..0000000000 --- a/db/tests/links/operations/test_create.py +++ /dev/null @@ -1,47 +0,0 @@ -import json -import pytest -from unittest.mock import patch -import db.links.operations.create as link_create - - -@pytest.mark.parametrize( - "unique_link", [(True), (False), (None)] -) -def test_create_foreign_key_link(engine_with_schema, unique_link): - engine = engine_with_schema - with patch.object(link_create, 'execute_msar_func_with_engine') as mock_exec: - link_create.create_foreign_key_link( - engine=engine, - referent_table_oid=12345, - referrer_table_oid=54321, - referrer_column_name='actor_id', - unique_link=unique_link - ) - call_args = mock_exec.call_args_list[0][0] - assert call_args[0] == engine - assert call_args[1] == "add_foreign_key_column" - assert call_args[2] == "actor_id" - assert call_args[3] == 54321 - assert call_args[4] == 12345 - assert call_args[5] == unique_link or False - - -def test_many_to_many_link(engine_with_schema): - engine = engine_with_schema - referents = {'referent_table_oids': [12345, 54321], 'column_names': ['movie_id', 'actor_id']} - with patch.object(link_create, 'execute_msar_func_with_engine') as mock_exec: - link_create.create_many_to_many_link( - engine=engine, - schema_oid=2200, - referents_dict=referents, - map_table_name='movies_actors' - ) - call_args = mock_exec.call_args_list[0][0] - assert call_args[0] == engine - assert call_args[1] == "add_mapping_table" - assert call_args[2] == 2200 - assert call_args[3] == "movies_actors" - assert json.loads(call_args[4]) == [ - {"column_name": "movie_id", "referent_table_oid": 12345}, - {"column_name": "actor_id", "referent_table_oid": 54321} - ] From 522dc39e0371f318bf96413bcedf9d1399983523 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 16:29:17 +0800 Subject: [PATCH 46/70] remove obsolete cast function testing --- db/tests/types/operations/test_cast.py | 1566 ------------------------ 1 file changed, 1566 deletions(-) delete mode 100644 db/tests/types/operations/test_cast.py diff --git a/db/tests/types/operations/test_cast.py b/db/tests/types/operations/test_cast.py deleted file mode 100644 index d76c78683c..0000000000 --- a/db/tests/types/operations/test_cast.py +++ /dev/null @@ -1,1566 +0,0 @@ -from decimal import Decimal - -import pytest -from psycopg2.errors import InvalidParameterValue -from sqlalchemy import Table, Column, MetaData, select, cast, text -from sqlalchemy import VARCHAR, NUMERIC -from sqlalchemy.exc import DataError -import json - -from db.types.custom.base import CUSTOM_DB_TYPE_TO_SA_CLASS -from db.columns.operations.select import get_column_attnum_from_name, get_column_default -from db.columns.operations.alter import alter_column_type -from db.tables.operations.select import get_oid_from_table -from db.types.custom import multicurrency -from db.types.operations import cast as cast_operations -from db.types.operations.convert import get_db_type_enum_from_class -from db.types.base import ( - DatabaseType, PostgresType, MathesarCustomType, get_available_known_db_types, -) -from db.metadata import get_empty_metadata - - -TARGET_DICT = "target_dict" -VALID = "valid" -INVALID = "invalid" - - -MASTER_DB_TYPE_MAP_SPEC = { - # This dict specifies the full map of what types can be cast to what - # target types in Mathesar. Format of each top-level key, val pair is: - # : { - # TARGET_DICT: { - # : { - # VALID: [(in_val, out_val), (in_val, out_val)], - # INVALID: [in_val, in_val] - # }, - # : { - # INVALID: [(in_val, out_val), (in_val, out_val)] - # INVALID: [in_val, in_val] - # }, - # } - # } - # - # The TARGET_DICT is a dict with keys giving a valid target type for - # alteration of a column of the given type, and values giving a dict - # of valid and invalid casting values. VALID value list is a list of - # tuples representing the input and expected output, whereas INVALID - # value list only needs input (since it should break, giving no output) - PostgresType.BIGINT: { - TARGET_DICT: { - PostgresType.BIGINT: {VALID: [(500, 500), (500000000000, 500000000000)]}, - PostgresType.BOOLEAN: {VALID: [(1, True), (0, False)], INVALID: [3]}, - PostgresType.CHARACTER: {VALID: [(3, "3")]}, - PostgresType.DOUBLE_PRECISION: {VALID: [(3, 3.0)]}, - PostgresType.INTEGER: {VALID: [(500, 500)]}, - MathesarCustomType.MATHESAR_MONEY: { - VALID: [(1234, Decimal('1234.0'))], - }, - PostgresType.MONEY: { - VALID: [(1234, "$1,234.00")], - }, - MathesarCustomType.MULTICURRENCY_MONEY: { - VALID: [ - ( - 1234123412341234, - { - multicurrency.VALUE: 1234123412341234, - multicurrency.CURRENCY: "USD" - } - ) - ], - }, - PostgresType.NUMERIC: {VALID: [(1, Decimal('1.0'))]}, - PostgresType.REAL: {VALID: [(5, 5.0)]}, - PostgresType.SMALLINT: {VALID: [(500, 500)]}, - PostgresType.TEXT: {VALID: [(3, "3")]}, - PostgresType.CHARACTER_VARYING: {VALID: [(3, "3")]}, - } - }, - PostgresType.BOOLEAN: { - TARGET_DICT: { - PostgresType.BIGINT: {VALID: [(True, 1), (False, 0)]}, - PostgresType.BOOLEAN: {VALID: [(True, True), (False, False)]}, - PostgresType.CHARACTER: {VALID: []}, - PostgresType.DOUBLE_PRECISION: {VALID: [(True, 1.0), (False, 0.0)]}, - PostgresType.INTEGER: {VALID: [(True, 1), (False, 0)]}, - PostgresType.NUMERIC: {VALID: [(True, Decimal('1.0')), (False, Decimal('0'))]}, - PostgresType.REAL: {VALID: [(True, 1.0), (False, 0.0)]}, - PostgresType.SMALLINT: {VALID: [(True, 1), (False, 0)]}, - PostgresType.TEXT: {VALID: [(True, 'true'), (False, 'false')]}, - PostgresType.CHARACTER_VARYING: {VALID: [(True, 'true'), (False, 'false')]}, - } - }, - PostgresType.JSON: { - TARGET_DICT: { - PostgresType.JSONB: { - VALID: [ - ({"key1": "val1"}, {"key1": "val1"}), - ({"key2": "val2"}, {"key2": "val2"}) - ], - INVALID: [], - }, - PostgresType.JSON: { - VALID: [ - ({"key1": "val1"}, {"key1": "val1"}), - ({"key2": "val2"}, {"key2": "val2"}) - ], - INVALID: [], - }, - PostgresType.TEXT: { - VALID: [ - ({"key1": "val1"}, '{"key1": "val1"}'), - ({"key2": "val2"}, '{"key2": "val2"}') - ], - INVALID: [], - }, - PostgresType.CHARACTER: { - VALID: [], - INVALID: [], - }, - PostgresType.CHARACTER_VARYING: { - VALID: [ - ({"key1": "val1"}, '{"key1": "val1"}'), - ({"key2": "val2"}, '{"key2": "val2"}') - ], - INVALID: [], - }, - MathesarCustomType.MATHESAR_JSON_OBJECT: { - VALID: [ - ({'key1': 'val1'}, json.dumps({'key1': 'val1'})) - ], - INVALID: [[1, 2, 3]], - }, - MathesarCustomType.MATHESAR_JSON_ARRAY: { - VALID: [ - ([1, 2, 3], "[1, 2, 3]") - ], - INVALID: ["{'key1': 'val1'}"], - }, - }, - }, - PostgresType.JSONB: { - TARGET_DICT: { - PostgresType.JSONB: { - VALID: [ - ({"key1": "val1"}, {"key1": "val1"}), - ({"key2": "val2"}, {"key2": "val2"}) - ], - INVALID: [], - }, - PostgresType.JSON: { - VALID: [ - ({"key1": "val1"}, {"key1": "val1"}), - ({"key2": "val2"}, {"key2": "val2"}) - ], - INVALID: [], - }, - PostgresType.TEXT: { - VALID: [ - ({"key1": "val1"}, '{"key1": "val1"}'), - ({"key2": "val2"}, '{"key2": "val2"}') - ], - INVALID: [], - }, - PostgresType.CHARACTER: { - VALID: [], - INVALID: [], - }, - PostgresType.CHARACTER_VARYING: { - VALID: [ - ({"key1": "val1"}, '{"key1": "val1"}'), - ({"key2": "val2"}, '{"key2": "val2"}') - ], - INVALID: [], - }, - MathesarCustomType.MATHESAR_JSON_OBJECT: { - VALID: [ - ({'key1': 'val1'}, json.dumps({'key1': 'val1'})) - ], - INVALID: [[1, 2, 3]], - }, - MathesarCustomType.MATHESAR_JSON_ARRAY: { - VALID: [ - ([1, 2, 3], "[1, 2, 3]") - ], - INVALID: [], - }, - }, - }, - MathesarCustomType.MATHESAR_JSON_OBJECT: { - TARGET_DICT: { - PostgresType.JSONB: { - VALID: [ - ({"key1": "val1"}, {"key1": "val1"}) - ], - INVALID: [], - }, - PostgresType.JSON: { - VALID: [ - ({"key1": "val1"}, {"key1": "val1"}), - ], - INVALID: [], - }, - PostgresType.TEXT: { - VALID: [ - ({"key1": "val1"}, '{"key1": "val1"}'), - ], - INVALID: [], - }, - PostgresType.CHARACTER: { - VALID: [], - INVALID: [], - }, - PostgresType.CHARACTER_VARYING: { - VALID: [ - ({"key1": "val1"}, '{"key1": "val1"}'), - ], - INVALID: [], - }, - MathesarCustomType.MATHESAR_JSON_OBJECT: { - VALID: [], - INVALID: [], - }, - MathesarCustomType.MATHESAR_JSON_ARRAY: { - VALID: [], - INVALID: [], - }, - }, - }, - MathesarCustomType.MATHESAR_JSON_ARRAY: { - TARGET_DICT: { - PostgresType.JSONB: { - VALID: [ - ([1, 2, 3], [1, 2, 3]) - ], - INVALID: [], - }, - PostgresType.JSON: { - VALID: [ - ([1, 2, 3], [1, 2, 3]) - ], - INVALID: [], - }, - PostgresType.TEXT: { - VALID: [ - ([1, 2, 3], '[1, 2, 3]') - ], - INVALID: [], - }, - PostgresType.CHARACTER: { - VALID: [], - INVALID: [], - }, - PostgresType.CHARACTER_VARYING: { - VALID: [ - ([1, 2, 3], '[1, 2, 3]') - ], - INVALID: [], - }, - MathesarCustomType.MATHESAR_JSON_OBJECT: { - VALID: [], - INVALID: [], - }, - MathesarCustomType.MATHESAR_JSON_ARRAY: { - VALID: [], - INVALID: [], - }, - }, - }, - PostgresType.CHARACTER: { - TARGET_DICT: { - PostgresType.BIGINT: {VALID: [("4", 4)], INVALID: ["c"]}, - PostgresType.BOOLEAN: {VALID: [("t", True), ("f", False)], INVALID: ["c"]}, - PostgresType.CHARACTER: {VALID: [("a", "a")]}, - PostgresType.DOUBLE_PRECISION: {VALID: [("1", 1)], INVALID: ["b"]}, - MathesarCustomType.EMAIL: {VALID: [], INVALID: ["a"]}, - PostgresType.INTEGER: {VALID: [("4", 4)], INVALID: ["j"]}, - PostgresType.INTERVAL: {VALID: []}, - MathesarCustomType.MATHESAR_MONEY: {VALID: []}, - PostgresType.MONEY: {VALID: []}, - PostgresType.JSON: { - VALID: [], - INVALID: [], - }, - PostgresType.JSONB: { - VALID: [], - INVALID: [], - }, - MathesarCustomType.MATHESAR_JSON_OBJECT: { - VALID: [], - INVALID: [], - }, - MathesarCustomType.MATHESAR_JSON_ARRAY: { - VALID: [], - INVALID: [], - }, - MathesarCustomType.MULTICURRENCY_MONEY: { - VALID: [ - ( - "1", - {multicurrency.VALUE: 1, multicurrency.CURRENCY: "USD"} - ) - ], - INVALID: ["n"], - }, - PostgresType.NUMERIC: {VALID: [("1", Decimal("1"))], INVALID: ["a"]}, - PostgresType.REAL: {VALID: [("1", 1.0)], INVALID: ["b"]}, - PostgresType.SMALLINT: {VALID: [("4", 4)], INVALID: ["j"]}, - PostgresType.DATE: {VALID: [], INVALID: ["n"]}, - PostgresType.TIMESTAMP_WITH_TIME_ZONE: {VALID: [], INVALID: ["n"]}, - PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE: {VALID: [], INVALID: ["n"]}, - PostgresType.TEXT: {VALID: [("a", "a")]}, - MathesarCustomType.URI: {VALID: [], INVALID: ["a"]}, - PostgresType.CHARACTER_VARYING: {VALID: [("a", "a")]}, - } - }, - PostgresType.DATE: { - TARGET_DICT: { - PostgresType.CHARACTER: {VALID: []}, - PostgresType.DATE: {VALID: [("1999-01-18 AD", "1999-01-18 AD")]}, - PostgresType.TEXT: {VALID: [("1999-01-18 AD", "1999-01-18")]}, - PostgresType.CHARACTER_VARYING: {VALID: [("1999-01-18 AD", "1999-01-18")]}, - PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE: { - VALID: [("1999-01-18 AD", "1999-01-18T00:00:00.0 AD")] - }, - }, - }, - PostgresType.DOUBLE_PRECISION: { - TARGET_DICT: { - PostgresType.BIGINT: {VALID: [(500, 500)]}, - PostgresType.BOOLEAN: {VALID: [(1.0, True), (0.0, False)]}, - PostgresType.CHARACTER: {VALID: [(3, "3")]}, - PostgresType.DOUBLE_PRECISION: {VALID: [(1, 1.0), (1.5, 1.5)]}, - PostgresType.INTEGER: {VALID: [(500, 500)]}, - MathesarCustomType.MATHESAR_MONEY: {VALID: [(12.12, Decimal('12.12'))]}, - PostgresType.MONEY: {VALID: [(12.12, "$12.12")]}, - MathesarCustomType.MULTICURRENCY_MONEY: { - VALID: [ - ( - 12.12, - { - multicurrency.VALUE: 12.12, - multicurrency.CURRENCY: "USD" - } - ) - ] - }, - PostgresType.NUMERIC: {VALID: [(1, 1.0)]}, - PostgresType.REAL: {VALID: [(1, 1.0), (1.5, 1.5)]}, - PostgresType.SMALLINT: {VALID: [(500, 500)]}, - PostgresType.TEXT: {VALID: [(3, "3")]}, - PostgresType.CHARACTER_VARYING: {VALID: [(3, "3")]}, - } - }, - MathesarCustomType.EMAIL: { - TARGET_DICT: { - PostgresType.CHARACTER: {VALID: []}, - MathesarCustomType.EMAIL: {VALID: [("alice@example.com", "alice@example.com")]}, - PostgresType.TEXT: {VALID: [("bob@example.com", "bob@example.com")]}, - PostgresType.CHARACTER_VARYING: {VALID: [("bob@example.com", "bob@example.com")]}, - } - }, - PostgresType.INTEGER: { - TARGET_DICT: { - PostgresType.BIGINT: {VALID: [(500, 500)]}, - PostgresType.BOOLEAN: {VALID: [(1, True), (0, False)], INVALID: [3]}, - PostgresType.CHARACTER: {VALID: [(3, "3")]}, - PostgresType.DOUBLE_PRECISION: {VALID: [(3, 3.0)]}, - PostgresType.INTEGER: {VALID: [(500, 500)]}, - MathesarCustomType.MATHESAR_MONEY: {VALID: [(500, Decimal('500.0'))]}, - PostgresType.MONEY: {VALID: [(12, "$12.00")]}, - MathesarCustomType.MULTICURRENCY_MONEY: { - VALID: [ - (12, {multicurrency.VALUE: 12, multicurrency.CURRENCY: "USD"}) - ] - }, - PostgresType.NUMERIC: {VALID: [(1, Decimal('1.0'))]}, - PostgresType.REAL: {VALID: [(5, 5.0)]}, - PostgresType.SMALLINT: {VALID: [(500, 500)]}, - PostgresType.TEXT: {VALID: [(3, "3")]}, - PostgresType.CHARACTER_VARYING: {VALID: [(3, "3")]}, - } - }, - PostgresType.INTERVAL: { - TARGET_DICT: { - PostgresType.CHARACTER: { - VALID: [] - }, - PostgresType.INTERVAL: { - VALID: [ - ("P0Y0M3DT3H5M30S", "P0Y0M3DT3H5M30S") - ] - }, - PostgresType.TEXT: { - VALID: [] - }, - PostgresType.CHARACTER_VARYING: { - VALID: [ - ("P0Y0M3DT3H5M30S", "3 days 03:05:30") - ] - }, - } - }, - MathesarCustomType.MATHESAR_MONEY: { - TARGET_DICT: { - PostgresType.BIGINT: {VALID: [(12341234, 12341234)]}, - PostgresType.CHARACTER: {VALID: []}, - PostgresType.DOUBLE_PRECISION: {VALID: [(12.12, 12.12)]}, - PostgresType.INTEGER: {VALID: [(123412, 123412)]}, - MathesarCustomType.MATHESAR_MONEY: {VALID: [(12.12, Decimal('12.12'))]}, - MathesarCustomType.MULTICURRENCY_MONEY: { - VALID: [ - ( - 12.12, - { - multicurrency.VALUE: 12.12, - multicurrency.CURRENCY: 'USD' - } - ) - ] - }, - PostgresType.MONEY: {VALID: [(12.12, "$12.12")]}, - PostgresType.NUMERIC: {VALID: [(12.12, Decimal('12.12'))]}, - PostgresType.REAL: {VALID: [(12.12, 12.12)]}, - PostgresType.SMALLINT: {VALID: [(1234, 1234)]}, - PostgresType.TEXT: {VALID: [(12.12, "12.12")]}, - PostgresType.CHARACTER_VARYING: {VALID: [(12.12, "12.12")]}, - } - }, - # TODO resolve all PostgresType.MONEY to number type casts are failing. - PostgresType.MONEY: { - TARGET_DICT: { - PostgresType.BIGINT: {VALID: [ - # TODO Following case is failing for some reason. - # ("$12341234.00", 12341234) - ]}, - PostgresType.CHARACTER: {VALID: []}, - PostgresType.DOUBLE_PRECISION: {VALID: [ - # TODO Following case is failing for some reason. - # ("$12.12", 12.12) - ]}, - PostgresType.INTEGER: {VALID: [ - # TODO Following case is failing for some reason. - # ("$123412.00", 123412) - ]}, - MathesarCustomType.MATHESAR_MONEY: {VALID: [("$20.00", Decimal(20.0))]}, - MathesarCustomType.MULTICURRENCY_MONEY: { - VALID: [ - ( - "$12.12", - { - multicurrency.VALUE: 12.12, - multicurrency.CURRENCY: 'USD' - } - ) - ] - }, - PostgresType.MONEY: {VALID: [("$12.12", "$12.12")]}, - PostgresType.REAL: {VALID: [ - # TODO Following case is failing for some reason. - # ("$12.12", 12.12) - ]}, - PostgresType.SMALLINT: {VALID: [ - # TODO Following case is failing for some reason. - # ("$1234.00", 1234) - ]}, - PostgresType.TEXT: {VALID: [("$12.12", "$12.12")]}, - PostgresType.CHARACTER_VARYING: {VALID: [("$12.12", "$12.12")]}, - PostgresType.NUMERIC: {VALID: [ - # TODO Following case is failing for some reason. - # ("$12.34", 12.34) - ]}, - } - }, - MathesarCustomType.MULTICURRENCY_MONEY: { - TARGET_DICT: { - PostgresType.CHARACTER: {VALID: []}, - MathesarCustomType.MULTICURRENCY_MONEY: { - VALID: [ - ( - { - multicurrency.VALUE: 1234.12, - multicurrency.CURRENCY: 'XYZ' - }, - { - multicurrency.VALUE: 1234.12, - multicurrency.CURRENCY: 'XYZ' - } - ) - ] - }, - PostgresType.TEXT: { - VALID: [ - ( - { - multicurrency.VALUE: 1234.12, - multicurrency.CURRENCY: 'XYZ' - }, - '(1234.12,XYZ)' - ) - ] - }, - PostgresType.CHARACTER_VARYING: { - VALID: [ - ( - { - multicurrency.VALUE: 1234.12, - multicurrency.CURRENCY: 'XYZ' - }, - '(1234.12,XYZ)' - ) - ] - }, - } - }, - PostgresType.NUMERIC: { - TARGET_DICT: { - PostgresType.BIGINT: {VALID: [(500, 500)]}, - PostgresType.BOOLEAN: { - VALID: [(1, True), (0, False), (1.0, True), (0.0, False)], - INVALID: [42, -1] - }, - PostgresType.CHARACTER: {VALID: [(3, "3")], INVALID: [1234, 1.2]}, - PostgresType.DOUBLE_PRECISION: {VALID: [(1, 1.0), (1.5, 1.5)]}, - PostgresType.INTEGER: { - VALID: [(500, 500)], - INVALID: [1.234, 1234123412341234] - }, - MathesarCustomType.MATHESAR_MONEY: {VALID: [(12.12, Decimal('12.12'))]}, - MathesarCustomType.MULTICURRENCY_MONEY: { - VALID: [ - (1, {multicurrency.VALUE: 1, multicurrency.CURRENCY: "USD"}) - ] - }, - PostgresType.MONEY: {VALID: [(12.12, "$12.12")]}, - PostgresType.NUMERIC: {VALID: [(1, 1.0)]}, - PostgresType.REAL: {VALID: [(1, 1.0), (1.5, 1.5)]}, - PostgresType.SMALLINT: { - VALID: [(500, 500)], - INVALID: [1.234, 12341234] - }, - PostgresType.TEXT: {VALID: [(3, "3")]}, - PostgresType.CHARACTER_VARYING: {VALID: [(3, "3")]}, - } - }, - PostgresType.REAL: { - TARGET_DICT: { - PostgresType.BIGINT: {VALID: [(500, 500)]}, - PostgresType.BOOLEAN: { - VALID: [(1.0, True), (0.0, False)], - INVALID: [42, -1] - }, - PostgresType.CHARACTER: {VALID: [(3, "3")], INVALID: [234, 5.78]}, - PostgresType.DOUBLE_PRECISION: {VALID: [(1, 1.0), (1.5, 1.5)]}, - PostgresType.INTEGER: { - VALID: [(500, 500)], - INVALID: [3.345] - }, - MathesarCustomType.MATHESAR_MONEY: {VALID: [(12.12, Decimal('12.12'))]}, - MathesarCustomType.MULTICURRENCY_MONEY: { - VALID: [ - ( - 1.2, - {multicurrency.VALUE: 1.2, multicurrency.CURRENCY: "USD"} - ) - ] - }, - PostgresType.MONEY: {VALID: [(12.12, "$12.12")]}, - PostgresType.NUMERIC: {VALID: [(1, 1.0)]}, - PostgresType.REAL: {VALID: [(1, 1.0), (1.5, 1.5)]}, - PostgresType.SMALLINT: { - VALID: [(500, 500)], - INVALID: [3.345] - }, - PostgresType.TEXT: {VALID: [(3, "3")]}, - PostgresType.CHARACTER_VARYING: {VALID: [(3, "3")]}, - } - }, - PostgresType.SMALLINT: { - TARGET_DICT: { - PostgresType.BIGINT: {VALID: [(500, 500)]}, - PostgresType.BOOLEAN: {VALID: [(1, True), (0, False)], INVALID: [3]}, - PostgresType.CHARACTER: {VALID: [(3, "3")]}, - PostgresType.DOUBLE_PRECISION: {VALID: [(3, 3.0)]}, - PostgresType.INTEGER: {VALID: [(500, 500)]}, - MathesarCustomType.MATHESAR_MONEY: {VALID: [(12, 12)]}, - MathesarCustomType.MULTICURRENCY_MONEY: { - VALID: [ - (1, {multicurrency.VALUE: 1, multicurrency.CURRENCY: "USD"}) - ] - }, - PostgresType.MONEY: {VALID: [(12, "$12.00")]}, - PostgresType.NUMERIC: {VALID: [(1, Decimal('1.0'))]}, - PostgresType.REAL: {VALID: [(5, 5.0)]}, - PostgresType.SMALLINT: {VALID: [(500, 500)]}, - PostgresType.TEXT: {VALID: [(3, "3")]}, - PostgresType.CHARACTER_VARYING: {VALID: [(3, "3")]}, - } - }, - PostgresType.TIME_WITHOUT_TIME_ZONE: { - TARGET_DICT: { - PostgresType.CHARACTER: {VALID: []}, - PostgresType.TIME_WITHOUT_TIME_ZONE: {VALID: [("12:30:45", "12:30:45.0")]}, - PostgresType.TIME_WITH_TIME_ZONE: {VALID: [("12:30:45", "12:30:45.0Z")]}, - PostgresType.TEXT: {VALID: [("12:30:45", "12:30:45")]}, - PostgresType.CHARACTER_VARYING: {VALID: [("12:30:45", "12:30:45")]}, - }, - }, - PostgresType.TIME_WITH_TIME_ZONE: { - TARGET_DICT: { - PostgresType.CHARACTER: {VALID: []}, - PostgresType.TIME_WITH_TIME_ZONE: { - VALID: [("12:30:45+01:00", "12:30:45.0+01:00")] - }, - PostgresType.TIME_WITHOUT_TIME_ZONE: {VALID: [("12:30:45+01:00", "12:30:45.0")]}, - PostgresType.TEXT: {VALID: [("12:30:45+01:00", "12:30:45+01")]}, - PostgresType.CHARACTER_VARYING: {VALID: [("12:30:45+01:00", "12:30:45+01")]}, - }, - }, - PostgresType.TIMESTAMP_WITH_TIME_ZONE: { - TARGET_DICT: { - PostgresType.CHARACTER: {VALID: []}, - PostgresType.DATE: { - VALID: [("1999-01-18T00:00:00.0Z AD", "1999-01-18 AD")], - INVALID: [ - "1999-01-18T12:30:45.0Z AD", - "1999-01-18T00:00:00.0+01:00 AD", - ] - }, - PostgresType.TIMESTAMP_WITH_TIME_ZONE: { - VALID: [ - ( - "1999-01-18T12:30:45.0+01:00 AD", - "1999-01-18T11:30:45.0Z AD", - ), - ] - }, - PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE: { - VALID: [ - ( - "1999-01-18T12:30:45.0+01:00 AD", - "1999-01-18T11:30:45.0 AD", - ) - ], - }, - PostgresType.TEXT: { - VALID: [ - ("1999-01-18T12:30:45.0+01:00 AD", "1999-01-18 11:30:45+00") - ] - }, - PostgresType.CHARACTER_VARYING: { - VALID: [ - ("1999-01-18T12:30:45.0+01:00 AD", "1999-01-18 11:30:45+00") - ] - }, - }, - }, - PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE: { - TARGET_DICT: { - PostgresType.CHARACTER: {VALID: []}, - PostgresType.DATE: { - VALID: [("1999-01-18T00:00:00.0 AD", "1999-01-18 AD")], - INVALID: ["1999-01-18T00:10:00.0 AD"] - }, - PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE: { - VALID: [("1999-01-18T12:30:45", "1999-01-18T12:30:45.0 AD")] - }, - PostgresType.TIMESTAMP_WITH_TIME_ZONE: { - VALID: [("1999-01-18T12:30:45", "1999-01-18T12:30:45.0Z AD")] - }, - PostgresType.TEXT: {VALID: [("1999-01-18T12:30:45.0 AD", "1999-01-18 12:30:45")]}, - PostgresType.CHARACTER_VARYING: { - VALID: [("1999-01-18T12:30:45.0 AD", "1999-01-18 12:30:45")] - }, - }, - }, - PostgresType.TEXT: { - TARGET_DICT: { - PostgresType.BIGINT: { - VALID: [("432", 432), ("1234123412341234", 1234123412341234)], - INVALID: ["1.2234"] - }, - PostgresType.BOOLEAN: { - VALID: [ - ("true", True), ("false", False), ("t", True), ("f", False), - ("yes", True), ("y", True), ("no", False), ("n", False), - ("on", True), ("off", False), - ], - INVALID: ["cat"], - }, - PostgresType.CHARACTER: {VALID: [("a", "a")]}, - PostgresType.JSON: { - VALID: [ - ('{"key1": "val1"}', json.loads('{"key1": "val1"}')), - ('{"key2": "val2"}', json.loads('{"key2": "val2"}')) - ], - INVALID: [], - }, - PostgresType.JSONB: { - VALID: [ - ('{"key1": "val1"}', json.loads('{"key1": "val1"}')), - ('{"key2": "val2"}', json.loads('{"key2": "val2"}')) - ], - INVALID: [], - }, - MathesarCustomType.MATHESAR_JSON_OBJECT: { - VALID: [ - ('{"key1": "val1"}', json.dumps({"key1": "val1"})) - ], - INVALID: [], - }, - MathesarCustomType.MATHESAR_JSON_ARRAY: { - VALID: [ - ('[1, 2, 3]', '[1, 2, 3]') - ], - INVALID: [], - }, - PostgresType.DOUBLE_PRECISION: { - VALID: [("1.234", 1.234)], - INVALID: ["bat"], - }, - MathesarCustomType.EMAIL: { - VALID: [("alice@example.com", "alice@example.com")], - INVALID: ["alice-example.com"] - }, - PostgresType.INTEGER: { - VALID: [("432", 432)], - INVALID: ["1.2234"] - }, - PostgresType.INTERVAL: { - VALID: [ - ("1 day", "P0Y0M1DT0H0M0S"), - ("1 week", "P0Y0M7DT0H0M0S"), - ("3:30", "P0Y0M0DT3H30M0S"), - ("00:03:30", "P0Y0M0DT0H3M30S"), - ], - INVALID: ["1 potato", "3"], - }, - MathesarCustomType.MATHESAR_MONEY: { - VALID: [ - ("$1234", 1234), - ("$1234 HK", 1234), - ("$1234.00", 1234), - ("$1,234.00", 1234), - ("1234 USD", 1234), - ("$1,234,567.1234", Decimal('1234567.1234')), - ], - INVALID: ["nanumb"], - }, - MathesarCustomType.MULTICURRENCY_MONEY: { - VALID: [ - ( - "1234", - {multicurrency.VALUE: 1234, multicurrency.CURRENCY: "USD"} - ) - ], - INVALID: ["nanumb"], - }, - PostgresType.MONEY: { - VALID: [("$1234", "$1,234.00")], - INVALID: ["nanumb"], - }, - PostgresType.NUMERIC: { - VALID: [ - ("3.14", Decimal("3.14")), - ("123,456.7", Decimal("123456.7")), - ("123.456,7", Decimal("123456.7")), - ("123 456,7", Decimal("123456.7")), - ("1,23,456.7", Decimal("123456.7")), - ("123'456.7", Decimal("123456.7")), - ("-3.14", Decimal("-3.14")), - ("-123,456.7", Decimal("-123456.7")), - ("-123.456,7", Decimal("-123456.7")), - ("-123 456,7", Decimal("-123456.7")), - ("-1,23,456.7", Decimal("-123456.7")), - ("-123'456.7", Decimal("-123456.7")) - ], - INVALID: ["not a number"], - }, - PostgresType.REAL: { - VALID: [("1.234", 1.234)], - INVALID: ["real"] - }, - PostgresType.SMALLINT: { - VALID: [("432", 432)], - INVALID: ["1.2234"] - }, - PostgresType.DATE: { - VALID: [ - ("1999-01-18", "1999-01-18 AD"), - ("1/18/1999", "1999-01-18 AD"), - ("jan-1999-18", "1999-01-18 AD"), - ("19990118", "1999-01-18 AD"), - ], - INVALID: [ - "18/1/1999", - "not a date", - "1234", - ] - }, - MathesarCustomType.URI: { - VALID: [ - ("https://centerofci.org", "https://centerofci.org"), - ("http://centerofci.org", "http://centerofci.org"), - ("centerofci.org", "http://centerofci.org"), - ("nasa.gov", "http://nasa.gov"), - ("museumoflondon.org.uk", "http://museumoflondon.org.uk"), - ], - INVALID: ["/sdf/", "localhost", "$123.45", "154.23USD"] - }, - PostgresType.TEXT: {VALID: [("a string", "a string")]}, - PostgresType.TIME_WITHOUT_TIME_ZONE: { - VALID: [("04:05:06", "04:05:06.0"), ("04:05", "04:05:00.0")], - INVALID: ["not a time"] - }, - PostgresType.TIME_WITH_TIME_ZONE: { - VALID: [ - ("04:05:06", "04:05:06.0Z"), - ("04:05+01", "04:05:00.0+01:00") - ], - INVALID: ["not a time"] - }, - PostgresType.TIMESTAMP_WITH_TIME_ZONE: { - VALID: [("1999-01-18 12:30:45+00", "1999-01-18T12:30:45.0Z AD")], - INVALID: ["not a timestamp"] - }, - PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE: { - VALID: [("1999-01-18 12:30:45", "1999-01-18T12:30:45.0 AD")], - INVALID: ["not a timestamp"] - }, - PostgresType.CHARACTER_VARYING: {VALID: [("a string", "a string")]}, - } - }, - MathesarCustomType.URI: { - TARGET_DICT: { - PostgresType.CHARACTER: {VALID: []}, - PostgresType.TEXT: {VALID: [("https://centerofci.org", "https://centerofci.org")]}, - MathesarCustomType.URI: {VALID: [("https://centerofci.org", "https://centerofci.org")]}, - PostgresType.CHARACTER_VARYING: {VALID: [("https://centerofci.org", "https://centerofci.org")]}, - } - }, - PostgresType.CHARACTER_VARYING: { - TARGET_DICT: { - PostgresType.BIGINT: { - VALID: [("432", 432), ("1234123412341234", 1234123412341234)], - INVALID: ["1.2234"] - }, - PostgresType.BOOLEAN: { - VALID: [ - ("true", True), ("false", False), ("t", True), ("f", False), - ("yes", True), ("y", True), ("no", False), ("n", False), - ("on", True), ("off", False), - ], - INVALID: ["cat"], - }, - PostgresType.CHARACTER: {VALID: [("a", "a")]}, - PostgresType.DATE: { - VALID: [ - ("1999-01-18", "1999-01-18 AD"), - ("1/18/1999", "1999-01-18 AD"), - ("jan-1999-18", "1999-01-18 AD"), - ("19990118", "1999-01-18 AD"), - ], - INVALID: [ - "18/1/1999", - "not a date", - "1234", - ] - }, - PostgresType.JSON: { - VALID: [ - ('{"key1": "val1"}', json.loads('{"key1": "val1"}')), - ('{"key2": "val2"}', json.loads('{"key2": "val2"}')) - ], - INVALID: [], - }, - PostgresType.JSONB: { - VALID: [ - ('{"key1": "val1"}', json.loads('{"key1": "val1"}')), - ('{"key2": "val2"}', json.loads('{"key2": "val2"}')) - ], - INVALID: [], - }, - MathesarCustomType.MATHESAR_JSON_OBJECT: { - VALID: [ - ('{"key1": "val1"}', json.dumps({"key1": "val1"})) - ], - INVALID: [], - }, - MathesarCustomType.MATHESAR_JSON_ARRAY: { - VALID: [ - ('[1, 2, 3]', '[1, 2, 3]') - ], - INVALID: [], - }, - PostgresType.DOUBLE_PRECISION: { - VALID: [("1.234", 1.234)], - INVALID: ["bat"], - }, - MathesarCustomType.EMAIL: { - VALID: [("alice@example.com", "alice@example.com")], - INVALID: ["alice-example.com"] - }, - PostgresType.INTEGER: { - VALID: [("432", 432)], - INVALID: ["1.2234"] - }, - PostgresType.INTERVAL: { - VALID: [ - ("1 day", "P0Y0M1DT0H0M0S"), - ("1 week", "P0Y0M7DT0H0M0S"), - ("3:30", "P0Y0M0DT3H30M0S"), - ("00:03:30", "P0Y0M0DT0H3M30S"), - ], - INVALID: ["1 potato", "3"], - }, - MathesarCustomType.MATHESAR_MONEY: { - VALID: [ - ("$1234", 1234), - ("-$$ 1,234,567", Decimal('-1234567')), - ], - INVALID: ["nanumb"], - }, - PostgresType.MONEY: {VALID: [("$12.12", "$12.12")]}, - MathesarCustomType.MULTICURRENCY_MONEY: { - VALID: [ - ( - "1234", - {multicurrency.VALUE: 1234, multicurrency.CURRENCY: "USD"} - ) - ], - INVALID: ["nanumb"], - }, - PostgresType.NUMERIC: { - VALID: [ - ("3.14", Decimal("3.14")), - ("123,456.7", Decimal("123456.7")), - ("123.456,7", Decimal("123456.7")), - ("123 456,7", Decimal("123456.7")), - ("1,23,456.7", Decimal("123456.7")), - ("123'456.7", Decimal("123456.7")), - ("-3.14", Decimal("-3.14")), - ("-123,456.7", Decimal("-123456.7")), - ("-123.456,7", Decimal("-123456.7")), - ("-123 456,7", Decimal("-123456.7")), - ("-1,23,456.7", Decimal("-123456.7")), - ("-123'456.7", Decimal("-123456.7")) - ], - INVALID: ["not a number"], - }, - PostgresType.REAL: { - VALID: [("1.234", 1.234)], - INVALID: ["real"] - }, - PostgresType.SMALLINT: { - VALID: [("432", 432)], - INVALID: ["1.2234"] - }, - PostgresType.TEXT: {VALID: [("a string", "a string")]}, - PostgresType.TIME_WITHOUT_TIME_ZONE: { - VALID: [("04:05:06", "04:05:06.0"), ("04:05", "04:05:00.0")], - INVALID: ["not a time"] - }, - PostgresType.TIME_WITH_TIME_ZONE: { - VALID: [ - ("04:05:06", "04:05:06.0Z"), - ("04:05+01", "04:05:00.0+01:00") - ], - INVALID: [ - "not a time", - ] - }, - PostgresType.TIMESTAMP_WITH_TIME_ZONE: { - VALID: [("1999-01-18 12:30:45+00", "1999-01-18T12:30:45.0Z AD")], - INVALID: ["not a timestamp"] - }, - PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE: { - VALID: [("1999-01-18 12:30:45+00", "1999-01-18T12:30:45.0 AD")], - INVALID: ["not a timestamp"] - }, - MathesarCustomType.URI: { - VALID: [("https://centerofci.org", "https://centerofci.org")], - INVALID: ["/sdf/"] - }, - PostgresType.CHARACTER_VARYING: {VALID: [("a string", "a string")]}, - } - } -} - - -# TODO move to a more fundamental db type test suite -def test_get_alter_column_types_with_custom_engine(engine): - available_known_db_types = get_available_known_db_types(engine) - custom_db_types = CUSTOM_DB_TYPE_TO_SA_CLASS.keys() - for custom_db_type in custom_db_types: - assert custom_db_type in available_known_db_types - - -# TODO move to a more fundamental db type test suite -def test_db_type_juggling_consistency(engine): - """ - A db type should remain constant after being reflected from its SA class. - """ - available_known_db_types = get_available_known_db_types(engine) - for db_type in available_known_db_types: - sa_class = db_type.get_sa_class(engine) - db_type_from_sa_class = get_db_type_enum_from_class(sa_class) - assert db_type == db_type_from_sa_class - - -# This list is assembled by taking all source and target type pairs without type options and then -# appending some of those pairs again with type options specified. -type_test_list = [ - ( - source_type, - target_type, - {}, - ) - for source_type, val in MASTER_DB_TYPE_MAP_SPEC.items() - for target_type in val[TARGET_DICT] -] + [ - (source_type, PostgresType.NUMERIC, {"precision": 5}) - for source_type, val in MASTER_DB_TYPE_MAP_SPEC.items() if PostgresType.NUMERIC in val[TARGET_DICT] -] + [ - (source_type, PostgresType.NUMERIC, {"precision": 5, "scale": 3}) - for source_type, val in MASTER_DB_TYPE_MAP_SPEC.items() if PostgresType.NUMERIC in val[TARGET_DICT] -] + [ - (source_type, PostgresType.TIME_WITHOUT_TIME_ZONE, {"precision": 5}) - for source_type, val in MASTER_DB_TYPE_MAP_SPEC.items() if PostgresType.TIME_WITHOUT_TIME_ZONE in val[TARGET_DICT] -] + [ - (source_type, PostgresType.TIME_WITH_TIME_ZONE, {"precision": 5}) - for source_type, val in MASTER_DB_TYPE_MAP_SPEC.items() if PostgresType.TIME_WITH_TIME_ZONE in val[TARGET_DICT] -] + [ - (source_type, PostgresType.TIMESTAMP_WITH_TIME_ZONE, {"precision": 5}) - for source_type, val in MASTER_DB_TYPE_MAP_SPEC.items() if PostgresType.TIMESTAMP_WITH_TIME_ZONE in val[TARGET_DICT] -] + [ - (source_type, PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE, {"precision": 5}) - for source_type, val in MASTER_DB_TYPE_MAP_SPEC.items() if PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE in val[TARGET_DICT] -] + [ - (source_type, PostgresType.CHARACTER, {"length": 5}) - for source_type, val in MASTER_DB_TYPE_MAP_SPEC.items() if PostgresType.CHARACTER in val[TARGET_DICT] -] - - -@pytest.mark.parametrize( - "source_type,target_type,options", type_test_list -) -def test_alter_column_type_alters_column_type( - engine_with_schema, source_type, target_type, options -): - """ - The massive number of cases make sure all type casting functions at - least pass a smoke test for each type mapping defined in - MASTER_DB_TYPE_MAP_SPEC above. - """ - engine, schema = engine_with_schema - TABLE_NAME = "testtable" - COLUMN_NAME = "testcol" - metadata = MetaData(bind=engine) - source_sa_type = source_type.get_sa_class(engine) - input_table = Table( - TABLE_NAME, - metadata, - Column(COLUMN_NAME, source_sa_type), - schema=schema - ) - input_table.create() - with engine.begin() as conn: - table_oid = get_oid_from_table(TABLE_NAME, schema, engine) - alter_column_type( - table_oid, - get_column_attnum_from_name(table_oid, COLUMN_NAME, engine, metadata), - engine, - conn, - target_type, - options - ) - metadata = MetaData(bind=engine) - metadata.reflect() - actual_column = Table( - TABLE_NAME, - metadata, - schema=schema, - autoload_with=engine - ).columns[COLUMN_NAME] - actual_type = get_db_type_enum_from_class(actual_column.type.__class__) - assert actual_type == target_type - - -type_test_data_args_list = [ - ( - NUMERIC(precision=5), - PostgresType.NUMERIC, - {}, - 1, - 1.0, - ), - ( - NUMERIC(precision=5, scale=2), - PostgresType.NUMERIC, - {}, - 1, - 1.0, - ), - ( - PostgresType.NUMERIC, - PostgresType.NUMERIC, - {"precision": 5, "scale": 2}, - 1.234, - Decimal("1.23"), - ), - # test that rounding is as intended - ( - PostgresType.NUMERIC, - PostgresType.NUMERIC, - {"precision": 5, "scale": 2}, - 1.235, - Decimal("1.24"), - ), - ( - PostgresType.CHARACTER_VARYING, - PostgresType.NUMERIC, - {"precision": 6, "scale": 2}, - "5000.134", - Decimal("5000.13"), - ), - ( - PostgresType.TIME_WITHOUT_TIME_ZONE, - PostgresType.TIME_WITHOUT_TIME_ZONE, - {"precision": 0}, - "00:00:00.1234", - "00:00:00.0", - ), - ( - PostgresType.TIME_WITH_TIME_ZONE, - PostgresType.TIME_WITH_TIME_ZONE, - {"precision": 0}, - "00:00:00.1234-04:30", - "00:00:00.0-04:30", - ), - ( - PostgresType.TIMESTAMP_WITH_TIME_ZONE, - PostgresType.TIMESTAMP_WITH_TIME_ZONE, - {"precision": 0}, - "1999-01-01 00:00:00", - "1999-01-01T00:00:00.0Z AD", - ), - ( - PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE, - PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE, - {"precision": 0}, - "1999-01-01 00:00:00", - "1999-01-01T00:00:00.0 AD", - ), - ( - PostgresType.CHARACTER_VARYING, - PostgresType.CHARACTER, - {"length": 5}, - "abcde", - "abcde", - ), -] - - -@pytest.mark.parametrize( - "source_type,target_type,options,value,expect_value", type_test_data_args_list -) -def test_alter_column_type_casts_column_data_args( - engine_with_schema, source_type, target_type, options, value, expect_value, -): - engine, schema = engine_with_schema - TABLE_NAME = "testtable" - COLUMN_NAME = "testcol" - metadata = MetaData(bind=engine) - # Sometimes source_type is a DatabaseType enum and other times an SA type instance. - source_sa_type = ( - source_type.get_sa_class(engine) - if isinstance(source_type, DatabaseType) - else source_type - ) - input_table = Table( - TABLE_NAME, - metadata, - Column(COLUMN_NAME, source_sa_type), - schema=schema - ) - input_table.create() - ins = input_table.insert(values=(value,)) - with engine.begin() as conn: - conn.execute(ins) - table_oid = get_oid_from_table(TABLE_NAME, schema, engine) - alter_column_type( - table_oid, - get_column_attnum_from_name(table_oid, COLUMN_NAME, engine, metadata), - engine, - conn, - target_type, - options - ) - metadata = MetaData(bind=engine) - metadata.reflect() - actual_table = Table( - TABLE_NAME, - metadata, - schema=schema, - autoload_with=engine - ) - sel = actual_table.select() - with engine.connect() as conn: - res = conn.execute(sel).fetchall() - actual_value = res[0][0] - assert actual_value == expect_value - - -type_test_data_gen_list = [ - ( - source_type, - target_type, - in_val, - out_val, - ) - for source_type, val in MASTER_DB_TYPE_MAP_SPEC.items() - for target_type in val[TARGET_DICT] - for in_val, out_val in val[TARGET_DICT][target_type].get(VALID, []) -] - - -@pytest.mark.parametrize( - "source_type,target_type,in_val,out_val", type_test_data_gen_list -) -def test_alter_column_casts_data_gen( - engine_with_schema, source_type, target_type, in_val, out_val -): - engine, schema = engine_with_schema - TABLE_NAME = "testtable" - COLUMN_NAME = "testcol" - metadata = MetaData(bind=engine) - source_sa_type = source_type.get_sa_class(engine) - default_unsupported = [ - MathesarCustomType.MULTICURRENCY_MONEY, - PostgresType.CHARACTER, - PostgresType.JSON, - PostgresType.JSONB, - MathesarCustomType.MATHESAR_JSON_ARRAY, - MathesarCustomType.MATHESAR_JSON_OBJECT, - ] - if source_type not in default_unsupported and target_type not in default_unsupported: - in_sel = select(cast(cast(in_val, source_sa_type), VARCHAR)) - with engine.begin() as conn: - processed_in_val = conn.execute(in_sel).scalar() - else: - processed_in_val = None - - input_table = Table( - TABLE_NAME, - metadata, - Column( - COLUMN_NAME, - source_sa_type, - server_default=processed_in_val - ), - schema=schema - ) - input_table.create() - ins = input_table.insert().values(testcol=in_val) - with engine.begin() as conn: - conn.execute(ins) - table_oid = get_oid_from_table(TABLE_NAME, schema, engine) - alter_column_type( - table_oid, - get_column_attnum_from_name(table_oid, COLUMN_NAME, engine, metadata), - engine, - conn, - target_type - ) - metadata = MetaData(bind=engine) - metadata.reflect() - actual_table = Table(TABLE_NAME, metadata, schema=schema, autoload_with=engine) - sel = actual_table.select() - with engine.connect() as conn: - res = conn.execute(sel).fetchall() - actual_value = res[0][0] - assert actual_value == out_val - table_oid = get_oid_from_table(TABLE_NAME, schema, engine) - metadata = get_empty_metadata() - column_attnum = get_column_attnum_from_name(table_oid, COLUMN_NAME, engine, metadata=metadata) - actual_default = get_column_default(table_oid, column_attnum, engine, metadata=metadata) - # TODO This needs to be sorted out by fixing how server_default is set. - if source_type not in default_unsupported and target_type not in default_unsupported: - assert actual_default == out_val - - -type_test_bad_data_gen_list = [ - ( - source_type, - target_type, - data, - ) - for source_type, val in MASTER_DB_TYPE_MAP_SPEC.items() - for target_type in val[TARGET_DICT] - for data in val[TARGET_DICT][target_type].get(INVALID, []) -] - - -@pytest.mark.parametrize( - "source_type,target_type,value", type_test_bad_data_gen_list -) -def test_alter_column_type_raises_on_bad_column_data( - engine_with_schema, source_type, target_type, value, -): - engine, schema = engine_with_schema - TABLE_NAME = "testtable" - COLUMN_NAME = "testcol" - metadata = MetaData(bind=engine) - source_sa_type = source_type.get_sa_class(engine) - input_table = Table( - TABLE_NAME, - metadata, - Column(COLUMN_NAME, source_sa_type), - schema=schema - ) - input_table.create() - ins = input_table.insert(values=(value,)) - with engine.begin() as conn: - conn.execute(ins) - with pytest.raises(Exception): - table_oid = get_oid_from_table(TABLE_NAME, schema, engine) - alter_column_type( - table_oid, - get_column_attnum_from_name(table_oid, COLUMN_NAME, engine, metadata), - engine, - conn, - target_type, - None - ) - - -def test_alter_column_type_raises_on_bad_parameters( - engine_with_schema, -): - engine, schema = engine_with_schema - TABLE_NAME = "testtable" - COLUMN_NAME = "testcol" - metadata = MetaData(bind=engine) - input_table = Table( - TABLE_NAME, - metadata, - Column(COLUMN_NAME, NUMERIC), - schema=schema - ) - input_table.create() - ins = input_table.insert(values=(5.3,)) - bad_options = {"precision": 3, "scale": 4} # scale must be smaller than precision - with engine.begin() as conn: - conn.execute(ins) - with pytest.raises(DataError) as e: - table_oid = get_oid_from_table(TABLE_NAME, schema, engine) - alter_column_type( - table_oid, - get_column_attnum_from_name(table_oid, COLUMN_NAME, engine, metadata), - engine, - conn, - PostgresType.NUMERIC, - bad_options - ) - assert e.orig == InvalidParameterValue - - -def test_get_column_cast_expression_unchanged(engine_with_schema): - engine, _ = engine_with_schema - target_type = PostgresType.NUMERIC - col_name = "my_column" - column = Column(col_name, NUMERIC) - cast_expr = cast_operations.get_column_cast_expression( - column, target_type, engine - ) - assert cast_expr == column - - -def test_get_column_cast_expression_change(engine_with_schema): - engine, _ = engine_with_schema - target_type = PostgresType.BOOLEAN - col_name = "my_column" - column = Column(col_name, NUMERIC) - cast_expr = cast_operations.get_column_cast_expression( - column, target_type, engine - ) - assert str(cast_expr) == f"mathesar_types.cast_to_boolean({col_name})" - - -def test_get_column_cast_expression_change_quotes(engine_with_schema): - engine, _ = engine_with_schema - target_type = PostgresType.BOOLEAN - col_name = "A Column Needing Quotes" - column = Column(col_name, NUMERIC) - cast_expr = cast_operations.get_column_cast_expression( - column, target_type, engine - ) - assert str(cast_expr) == f'mathesar_types.cast_to_boolean("{col_name}")' - - -def test_get_column_cast_expression_unsupported(engine_without_ischema_names_updated): - engine = engine_without_ischema_names_updated - target_type = MathesarCustomType.URI - column = Column("colname", NUMERIC) - with pytest.raises(cast_operations.UnsupportedTypeException): - cast_operations.get_column_cast_expression( - column, target_type, engine - ) - - -cast_expr_numeric_option_list = [ - ( - PostgresType.NUMERIC, - PostgresType.NUMERIC, - {"precision": 3}, - 'CAST(colname AS NUMERIC(3))', - ), - ( - PostgresType.NUMERIC, - PostgresType.NUMERIC, - {"precision": 3, "scale": 2}, - 'CAST(colname AS NUMERIC(3, 2))', - ), - ( - PostgresType.NUMERIC, - PostgresType.NUMERIC, - {"precision": 3, "scale": 2}, - 'CAST(colname AS NUMERIC(3, 2))', - ), - ( - PostgresType.CHARACTER_VARYING, - PostgresType.NUMERIC, - {"precision": 3, "scale": 2}, - 'CAST(mathesar_types.cast_to_numeric(colname) AS NUMERIC(3, 2))', - ), - ( - PostgresType.INTERVAL, - PostgresType.INTERVAL, - {"fields": "YEAR"}, - "CAST(colname AS INTERVAL YEAR)", - ), - ( - PostgresType.INTERVAL, - PostgresType.INTERVAL, - {"precision": 2}, - "CAST(colname AS INTERVAL (2))", - ), - ( - PostgresType.INTERVAL, - PostgresType.INTERVAL, - {"precision": 3, "fields": "SECOND"}, - "CAST(colname AS INTERVAL SECOND (3))", - ), - ( - PostgresType.CHARACTER_VARYING, - PostgresType.INTERVAL, - {"precision": 3, "fields": "SECOND"}, - "CAST(mathesar_types.cast_to_interval(colname) AS INTERVAL SECOND (3))", - ) -] - - -@pytest.mark.parametrize( - "source_type,target_type,options,expect_cast_expr", cast_expr_numeric_option_list -) -def test_get_column_cast_expression_type_options( - engine_with_schema, source_type, target_type, options, expect_cast_expr -): - engine, _ = engine_with_schema - source_sa_type = source_type.get_sa_class(engine) - column = Column("colname", source_sa_type) - cast_expr = cast_operations.get_column_cast_expression( - column, target_type, engine, type_options=options, - ) - actual_cast_expr = str(cast_expr.compile(engine)) - assert actual_cast_expr == expect_cast_expr - - -expect_cast_tuples = [ - (source_type, [target_type for target_type in val[TARGET_DICT]]) - for source_type, val in MASTER_DB_TYPE_MAP_SPEC.items() -] - - -@pytest.mark.parametrize("source_type,expect_target_types", expect_cast_tuples) -def test_get_full_cast_map(engine_with_schema, source_type, expect_target_types): - engine, _ = engine_with_schema - actual_cast_map = cast_operations.get_full_cast_map(engine) - actual_target_types = actual_cast_map[source_type] - assert set(actual_target_types) == set(expect_target_types) - - -money_array_examples = [ - ('$1,000.00', ['1,000.00', ',', '.', '$']), - ('1,000.00$', ['1,000.00', ',', '.', '$']), - ('$1', ['1', None, None, '$']), - ('1$', ['1', None, None, '$']), - ('$ 1', ['1', None, None, '$ ']), - ('1', None), - ('1,000', None), - ('1,000.00', None), - ('$1,000', None), - ('$1,000,000', ['1,000,000', ',', None, '$']), - ('$1,234,567.1234', ['1,234,567.1234', ',', '.', '$']), - ('1,000,000$', ['1,000,000', ',', None, '$']), - ('$1 000,000', ['1 000,000', ' ', ',', '$']), - ('1 000,000$', ['1 000,000', ' ', ',', '$']), - ('1.000,000$', ['1.000,000', '.', ',', '$']), - ('$1.000,00 HK', ['1.000,00', '.', ',', '$ HK']), - ('EUR 1.000,00', ['1.000,00', '.', ',', 'EUR ']), - ('€1.000,00', ['1.000,00', '.', ',', '€']), - ('1.000,00€', ['1.000,00', '.', ',', '€']), - ('€1 000', ['1 000', ' ', None, '€']), - ('1 000€', ['1 000', ' ', None, '€']), - ('â‚¿1,324.23466 BTC', ['1,324.23466', ',', '.', 'â‚¿ BTC']), - ('12â‚¿1,324.23466 BTC', None), - ('â‚¿1,324.23466 BTC12', None), - ('₹1,00,000', ['1,00,000', ',', None, '₹']), - ('1,00,000₹', ['1,00,000', ',', None, '₹']), - ('₹1,00,000.00', ['1,00,000.00', ',', '.', '₹']), - ('1,00,000.00₹', ['1,00,000.00', ',', '.', '₹']), - ('10,00,000.00₹', ['10,00,000.00', ',', '.', '₹']), - ('₹10,00,00,000.00', ['10,00,00,000.00', ',', '.', '₹']), - ('10,00,00,000.00₹', ['10,00,00,000.00', ',', '.', '₹']), -] - - -@pytest.mark.parametrize("source_str,expect_arr", money_array_examples) -def test_mathesar_money_array_sql(engine_with_schema, source_str, expect_arr): - engine, _ = engine_with_schema - with engine.begin() as conn: - res = conn.execute( - select( - text(f"mathesar_types.get_mathesar_money_array('{source_str}'::text)") - ) - ).scalar() - assert res == expect_arr - - -numeric_array_examples = [ - ('3.14', ['3.14', None, '.']), - ('331,209.00', ['331,209.00', ',', '.']), - ('1,234,567.8910', ['1,234,567.8910', ',', '.']), - ('-1,234,567.8910', ['1,234,567.8910', ',', '.']), - ('3,14', ['3,14', None, ',']), - ('331.293,00', ['331.293,00', '.', ',']), - ('1.234.567,8910', ['1.234.567,8910', '.', ',']), - ('331 293,00', ['331 293,00', ' ', ',']), - ('1 234 567,8910', ['1 234 567,8910', ' ', ',']), - ('-1 234 567,8910', ['1 234 567,8910', ' ', ',']), - ('1,23,45,678.910', ['1,23,45,678.910', ',', '.']), - ('1\'\'234\'\'567.8910', ['1\'234\'567.8910', '\'', '.']), -] - - -@pytest.mark.parametrize("source_str,expect_arr", numeric_array_examples) -def test_numeric_array_sql(engine_with_schema, source_str, expect_arr): - engine, _ = engine_with_schema - with engine.begin() as conn: - res = conn.execute( - select( - text(f"mathesar_types.get_numeric_array('{source_str}'::text)") - ) - ).scalar() - assert res == expect_arr From 12a82bc3c7a97d2dc747db988da61b2b0e2fe736 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 16:38:58 +0800 Subject: [PATCH 47/70] remove unused col alter functions --- db/columns/operations/alter.py | 92 +---------- db/tests/columns/operations/test_alter.py | 178 ---------------------- 2 files changed, 2 insertions(+), 268 deletions(-) diff --git a/db/columns/operations/alter.py b/db/columns/operations/alter.py index bb0740c767..de21b9961e 100644 --- a/db/columns/operations/alter.py +++ b/db/columns/operations/alter.py @@ -8,6 +8,7 @@ from db.columns.exceptions import InvalidDefaultError, InvalidTypeError, InvalidTypeOptionError +# TODO Remove; only used in testing def alter_column(engine, table_oid, column_attnum, column_data, connection=None): """ Alter a column of the a table. @@ -67,6 +68,7 @@ def alter_column(engine, table_oid, column_attnum, column_data, connection=None) ) +# TODO Remove; only used in testing def alter_column_type( table_oid, column_attnum, engine, connection, target_type, type_options=None ): @@ -90,96 +92,6 @@ def alter_column_type( ) -# TODO remove once table splitting logic is moved to SQL. -def rename_column(table_oid, column_attnum, engine, connection, new_name): - """ - Rename a single column. - - Args: - table_oid: integer giving the OID of the table with the column. - column_attnum: integer giving the attnum of the column. - engine: SQLAlchemy engine defining the connection string for the DB. - connection: psycopg2 connection object. - new_name: string giving the new name for the column. - """ - alter_column( - engine, - table_oid, - column_attnum, - {"name": new_name}, - connection=connection - ) - - -def _validate_columns_for_batch_update(column_data): - ALLOWED_KEYS = ['attnum', 'name', 'type', 'type_options', 'delete'] - for single_column_data in column_data: - if 'attnum' not in single_column_data.keys(): - raise ValueError('Key "attnum" is required') - for key in single_column_data.keys(): - if key not in ALLOWED_KEYS: - allowed_key_list = ', '.join(ALLOWED_KEYS) - raise ValueError(f'Key "{key}" found in columns. Keys allowed are: {allowed_key_list}') - - -def batch_alter_table_drop_columns(table_oid, column_data_list, connection, engine): - """ - Drop the given columns from the given table. - - Args: - table_oid: OID of the table whose columns we'll drop. - column_data_list: List of dictionaries describing columns to alter. - connection: the connection (if any) to use with the database. - engine: the SQLAlchemy engine to use with the database. - - Returns: - A string of the command that was executed. - """ - columns_to_drop = [ - int(col['attnum']) for col in column_data_list - if col.get('attnum') is not None and col.get('delete') is not None - ] - - if connection is not None and columns_to_drop: - return db_conn.execute_msar_func_with_psycopg2_conn( - connection, 'drop_columns', int(table_oid), *columns_to_drop - ) - elif columns_to_drop: - return db_conn.execute_msar_func_with_engine( - engine, 'drop_columns', int(table_oid), *columns_to_drop - ) - - -def batch_update_columns(table_oid, engine, column_data_list): - """ - Alter the given columns of the table. - - For details on the column_data_list format, see _process_column_alter_dict_dep. - - Args: - table_oid: the OID of the table whose columns we'll alter. - engine: The SQLAlchemy engine to use with the database. - column_data_list: A list of dictionaries describing alterations. - """ - _validate_columns_for_batch_update(column_data_list) - try: - db_conn.execute_msar_func_with_engine( - engine, 'alter_columns', - table_oid, - json.dumps( - [_process_column_alter_dict_dep(column) for column in column_data_list] - ) - ) - except InvalidParameterValue: - raise InvalidTypeOptionError - except InvalidTextRepresentation: - raise InvalidTypeError(None, None) - except RaiseException: - raise InvalidTypeError(None, None) - except SyntaxError: - raise InvalidTypeOptionError - - def alter_columns_in_table(table_oid, column_data_list, conn): """ Alter columns of the given table in bulk. diff --git a/db/tests/columns/operations/test_alter.py b/db/tests/columns/operations/test_alter.py index 993fdafe2a..17f26245ea 100644 --- a/db/tests/columns/operations/test_alter.py +++ b/db/tests/columns/operations/test_alter.py @@ -1,17 +1,7 @@ import json from unittest.mock import patch -from sqlalchemy import Column, VARCHAR from db.columns.operations import alter as col_alt -from db.columns.operations.alter import batch_update_columns -from db.columns.operations.select import get_column_attnum_from_name -from db.tables.operations.select import ( - get_oid_from_table, reflect_table -) -from db.tests.columns.utils import create_test_table -from db.types.base import PostgresType -from db.types.operations.convert import get_db_type_enum_from_class -from db.metadata import get_empty_metadata def test_alter_columns_in_table_basic(): @@ -50,171 +40,3 @@ def test_alter_columns_in_table_basic(): assert mock_exec.call_args.args[:3] == ('conn', 'alter_columns', 123) # Necessary since `json.dumps` mangles dict ordering, but we don't care. assert json.loads(mock_exec.call_args.args[3]) == expect_json_arg - - -def _create_pizza_table(engine, schema): - table_name = 'Pizzas' - cols = [ - Column('ID', VARCHAR), - Column('Pizza', VARCHAR), - Column('Checkbox', VARCHAR), - Column('Rating', VARCHAR) - ] - insert_data = [ - ('1', 'Pepperoni', 'true', '4.0'), - ('2', 'Supreme', 'false', '5.0'), - ('3', 'Hawaiian', 'true', '3.5') - ] - return create_test_table(table_name, cols, insert_data, schema, engine) - - -def _get_pizza_column_data(table_oid, engine): - column_data = [{ - 'name': 'ID', - 'type': PostgresType.CHARACTER_VARYING.id - }, { - 'name': 'Pizza', - 'type': PostgresType.CHARACTER_VARYING.id - }, { - 'name': 'Checkbox', - 'type': PostgresType.CHARACTER_VARYING.id - }, { - 'name': 'Rating', - 'type': PostgresType.CHARACTER_VARYING.id - }] - for data in column_data: - name = data['name'] - data['attnum'] = get_column_attnum_from_name(table_oid, name, engine, metadata=get_empty_metadata()) - return column_data - - -def test_batch_update_columns_no_changes(engine_with_schema): - engine, schema = engine_with_schema - table = _create_pizza_table(engine, schema) - table_oid = get_oid_from_table(table.name, schema, engine) - - column_data = _get_pizza_column_data(table_oid, engine) - batch_update_columns(table_oid, engine, column_data) - updated_table = reflect_table(table.name, schema, engine, metadata=get_empty_metadata()) - - assert len(table.columns) == len(updated_table.columns) - for index, _ in enumerate(table.columns): - new_column_type_class = updated_table.columns[index].type.__class__ - new_column_type = get_db_type_enum_from_class(new_column_type_class).id - assert new_column_type == PostgresType.CHARACTER_VARYING.id - assert updated_table.columns[index].name == table.columns[index].name - - -def test_batch_update_column_names(engine_with_schema): - engine, schema = engine_with_schema - table = _create_pizza_table(engine, schema) - table_oid = get_oid_from_table(table.name, schema, engine) - - column_data = _get_pizza_column_data(table_oid, engine) - column_data[1]['name'] = 'Pizza Style' - column_data[2]['name'] = 'Eaten Recently?' - - batch_update_columns(table_oid, engine, column_data) - updated_table = reflect_table(table.name, schema, engine, metadata=get_empty_metadata()) - - assert len(table.columns) == len(updated_table.columns) - for index, _ in enumerate(table.columns): - new_column_type_class = updated_table.columns[index].type.__class__ - new_column_type = get_db_type_enum_from_class(new_column_type_class).id - assert new_column_type == column_data[index]['type'] - assert updated_table.columns[index].name == column_data[index]['name'] - - -def test_batch_update_column_types(engine_with_schema): - engine, schema = engine_with_schema - table = _create_pizza_table(engine, schema) - table_oid = get_oid_from_table(table.name, schema, engine) - - column_data = _get_pizza_column_data(table_oid, engine) - column_data[0]['type'] = PostgresType.DOUBLE_PRECISION.id - column_data[2]['type'] = PostgresType.BOOLEAN.id - - batch_update_columns(table_oid, engine, column_data) - updated_table = reflect_table(table.name, schema, engine, metadata=get_empty_metadata()) - - assert len(table.columns) == len(updated_table.columns) - for index, _ in enumerate(table.columns): - new_column_type_class = updated_table.columns[index].type.__class__ - new_column_type = get_db_type_enum_from_class(new_column_type_class).id - assert new_column_type == column_data[index]['type'] - assert updated_table.columns[index].name == column_data[index]['name'] - - -def test_batch_update_column_names_and_types(engine_with_schema): - engine, schema = engine_with_schema - table = _create_pizza_table(engine, schema) - table_oid = get_oid_from_table(table.name, schema, engine) - - column_data = _get_pizza_column_data(table_oid, engine) - column_data[0]['name'] = 'Pizza ID' - column_data[0]['type'] = PostgresType.INTEGER.id - column_data[1]['name'] = 'Pizza Style' - column_data[2]['type'] = PostgresType.BOOLEAN.id - - batch_update_columns(table_oid, engine, column_data) - updated_table = reflect_table(table.name, schema, engine, metadata=get_empty_metadata()) - - assert len(table.columns) == len(updated_table.columns) - for index, _ in enumerate(table.columns): - new_column_type_class = updated_table.columns[index].type.__class__ - new_column_type = get_db_type_enum_from_class(new_column_type_class).id - assert new_column_type == column_data[index]['type'] - assert updated_table.columns[index].name == column_data[index]['name'] - - -def test_batch_update_column_drop_columns(engine_with_schema): - engine, schema = engine_with_schema - table = _create_pizza_table(engine, schema) - table_oid = get_oid_from_table(table.name, schema, engine) - - column_data = _get_pizza_column_data(table_oid, engine) - metadata = get_empty_metadata() - column_data[0] = { - 'attnum': get_column_attnum_from_name(table_oid, column_data[0]['name'], engine, metadata=metadata), - 'delete': True - } - column_data[1] = { - 'attnum': get_column_attnum_from_name(table_oid, column_data[1]['name'], engine, metadata=metadata), - 'delete': True - } - - batch_update_columns(table_oid, engine, column_data) - updated_table = reflect_table(table.name, schema, engine, metadata=get_empty_metadata()) - - assert len(updated_table.columns) == len(table.columns) - 2 - for index, _ in enumerate(updated_table.columns): - new_column_type_class = updated_table.columns[index].type.__class__ - new_column_type = get_db_type_enum_from_class(new_column_type_class).id - assert new_column_type == column_data[index - 2]['type'] - assert updated_table.columns[index].name == column_data[index - 2]['name'] - - -def test_batch_update_column_all_operations(engine_with_schema): - engine, schema = engine_with_schema - table = _create_pizza_table(engine, schema) - table_oid = get_oid_from_table(table.name, schema, engine) - - column_data = _get_pizza_column_data(table_oid, engine) - column_data[0]['name'] = 'Pizza ID' - column_data[0]['type'] = PostgresType.INTEGER.id - column_data[1]['name'] = 'Pizza Style' - column_data[2]['type'] = PostgresType.BOOLEAN.id - column_data[3] = { - 'attnum': get_column_attnum_from_name(table_oid, column_data[3]['name'], engine, metadata=get_empty_metadata()), - 'delete': True - } - - batch_update_columns(table_oid, engine, column_data) - updated_table = reflect_table(table.name, schema, engine, metadata=get_empty_metadata()) - - assert len(updated_table.columns) == len(table.columns) - 1 - for index, _ in enumerate(updated_table.columns): - new_column_type_class = updated_table.columns[index].type.__class__ - new_column_type = get_db_type_enum_from_class(new_column_type_class).id - assert new_column_type == column_data[index]['type'] - assert updated_table.columns[index].name == column_data[index]['name'] From d392abafac11749949db0cde0666bf2a05aabaf4 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 16:43:40 +0800 Subject: [PATCH 48/70] remove unused SQLAlchemy-based column create functions --- db/columns/operations/create.py | 52 ---------------------- db/tests/columns/operations/test_create.py | 17 ------- 2 files changed, 69 deletions(-) diff --git a/db/columns/operations/create.py b/db/columns/operations/create.py index ddaf29d4b3..f7f58883c4 100644 --- a/db/columns/operations/create.py +++ b/db/columns/operations/create.py @@ -1,31 +1,9 @@ """Python functions to add columns to preexisting tables.""" import json -from alembic.migration import MigrationContext -from alembic.operations import Operations -from psycopg.errors import InvalidTextRepresentation, InvalidParameterValue - from db import connection as db_conn from db.columns.defaults import DEFAULT, NAME, NULLABLE, DESCRIPTION -from db.columns.exceptions import InvalidDefaultError, InvalidTypeOptionError -from db.tables.operations.select import reflect_table_from_oid from db.types.base import PostgresType -from db.metadata import get_empty_metadata - - -def create_column(engine, table_oid, column_data): - col_create_def = [_transform_column_create_dict(column_data)] - try: - curr = db_conn.execute_msar_func_with_engine( - engine, 'add_columns', - table_oid, - json.dumps(col_create_def) - ) - except InvalidTextRepresentation: - raise InvalidDefaultError - except InvalidParameterValue: - raise InvalidTypeOptionError - return curr.fetchone()[0] def add_columns_to_table(table_oid, column_data_list, conn): @@ -84,33 +62,3 @@ def _transform_column_create_dict(data): "default": data.get(DEFAULT, {}).get('value'), "description": data.get(DESCRIPTION), } - - -def bulk_create_mathesar_column(engine, table_oid, columns, schema): - # TODO reuse metadata - table = reflect_table_from_oid(table_oid, engine, metadata=get_empty_metadata()) - with engine.begin() as conn: - ctx = MigrationContext.configure(conn) - op = Operations(ctx) - for column in columns: - op.add_column(table.name, column, schema=schema) - - -def duplicate_column( - table_oid, - copy_from_attnum, - engine, - new_column_name=None, - copy_data=True, - copy_constraints=True -): - curr = db_conn.execute_msar_func_with_engine( - engine, - 'copy_column', - table_oid, - copy_from_attnum, - new_column_name, - copy_data, - copy_constraints - ) - return curr.fetchone()[0] diff --git a/db/tests/columns/operations/test_create.py b/db/tests/columns/operations/test_create.py index a248fd5d49..fa8d4849b1 100644 --- a/db/tests/columns/operations/test_create.py +++ b/db/tests/columns/operations/test_create.py @@ -73,20 +73,3 @@ def test_add_columns_type_options(in_options, out_options): assert call_args[2] == 123 assert json.loads(call_args[3])[0]["type"]["name"] == "character varying" assert json.loads(call_args[3])[0]["type"]["options"] == out_options - - -def test_duplicate_column_smoke(engine_with_schema): - """This is just a smoke test, since the underlying function is trivial.""" - engine, schema = engine_with_schema - with patch.object(col_create.db_conn, "execute_msar_func_with_engine") as mock_exec: - col_create.duplicate_column( - 12345, - 4, - engine, - new_column_name='newcol', - copy_data=False, - copy_constraints=True - ) - mock_exec.assert_called_once_with( - engine, 'copy_column', 12345, 4, 'newcol', False, True - ) From 15aaa5c7a47ed4609cc6ca8593b69c5d7c21f231 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 17:00:18 +0800 Subject: [PATCH 49/70] remove unused SQLAlchemy-based column functions --- db/columns/base.py | 29 +--- db/columns/operations/drop.py | 17 --- db/columns/operations/select.py | 151 +-------------------- db/tests/columns/operations/test_select.py | 99 +------------- 4 files changed, 6 insertions(+), 290 deletions(-) diff --git a/db/columns/base.py b/db/columns/base.py index ca710e0c1c..6a4138fe78 100644 --- a/db/columns/base.py +++ b/db/columns/base.py @@ -1,9 +1,7 @@ from sqlalchemy import Column, ForeignKey, inspect from db.columns.defaults import TYPE, PRIMARY_KEY, NULLABLE, DEFAULT_COLUMNS -from db.columns.operations.select import ( - get_column_attnum_from_name, get_column_default, get_column_default_dict, -) +from db.columns.operations.select import get_column_attnum_from_name from db.tables.operations.select import get_oid_from_table from db.types.operations.cast import get_full_cast_map from db.types.operations.convert import get_db_type_enum_from_class @@ -197,31 +195,6 @@ def column_attnum(self): metadata=metadata, ) - @property - def column_default_dict(self): - if self.table_ is None: - return - metadata = self.table_.metadata - default_dict = get_column_default_dict( - self.table_oid, self.column_attnum, self.engine, metadata=metadata, - ) - if default_dict: - return { - 'is_dynamic': default_dict['is_dynamic'], - 'value': default_dict['value'] - } - - @property - def default_value(self): - if self.table_ is not None: - metadata = self.table_.metadata - return get_column_default( - self.table_oid, - self.column_attnum, - self.engine, - metadata=metadata, - ) - @property def db_type(self): """ diff --git a/db/columns/operations/drop.py b/db/columns/operations/drop.py index a9227af358..1f35cd3e74 100644 --- a/db/columns/operations/drop.py +++ b/db/columns/operations/drop.py @@ -2,23 +2,6 @@ from db import connection as db_conn -def drop_column(table_oid, column_attnum, engine): - """ - Drop the given column from the given table. - - Args: - table_oid: OID of the table whose column we'll drop. - column_attnum: The attnum of the column to drop. - engine: SQLAlchemy engine object for connecting. - - Returns: - Returns a string giving the command that was run. - """ - return db_conn.execute_msar_func_with_engine( - engine, 'drop_columns', table_oid, column_attnum - ).fetchone()[0] - - def drop_columns_from_table(table_oid, column_attnums, conn): """ Drop the given columns from the given table. diff --git a/db/columns/operations/select.py b/db/columns/operations/select.py index 5ef5f1f508..18b1206041 100644 --- a/db/columns/operations/select.py +++ b/db/columns/operations/select.py @@ -1,9 +1,6 @@ -import warnings +from sqlalchemy import and_, asc, select -from sqlalchemy import and_, asc, cast, select, text, exists, Identity - -from db.columns.exceptions import DynamicDefaultWarning -from db.connection import execute_msar_func_with_engine, exec_msar_func +from db.connection import exec_msar_func from db.tables.operations.select import reflect_table_from_oid from db.utils import execute_statement, get_pg_catalog_table @@ -46,31 +43,6 @@ def get_column_info_for_table(table, conn): return exec_msar_func(conn, 'get_column_info', table).fetchone()[0] -def get_column_description(oid, attnum, engine): - cursor = execute_msar_func_with_engine(engine, 'col_description', oid, attnum) - row = cursor.fetchone() - description = row[0] - return description - - -def get_column_attnum_from_names_as_map(table_oid, column_names, engine, metadata, connection_to_use=None): - statement = _get_columns_attnum_from_names(table_oid, column_names, engine, metadata=metadata) - attnums_tuple = execute_statement(engine, statement, connection_to_use).fetchall() - name_attnum_map = {attnum_tuple['attname']: attnum_tuple['attnum'] for attnum_tuple in attnums_tuple} - return name_attnum_map - - -def get_columns_attnum_from_names(table_oid, column_names, engine, metadata, connection_to_use=None): - """ - Returns the respective list of attnum of the column names passed. - The order is based on the column order in the table and not by the order of the column names argument. - """ - statement = _get_columns_attnum_from_names(table_oid, column_names, engine=engine, metadata=metadata) - attnums_tuple = execute_statement(engine, statement, connection_to_use).fetchall() - attnums = [attnum_tuple[0] for attnum_tuple in attnums_tuple] - return attnums - - def get_column_attnum_from_name(table_oid, column_name, engine, metadata, connection_to_use=None): statement = _get_columns_attnum_from_names(table_oid, [column_name], engine=engine, metadata=metadata) return execute_statement(engine, statement, connection_to_use).scalar() @@ -87,125 +59,6 @@ def _get_columns_attnum_from_names(table_oid, column_names, engine, metadata): return sel -def get_column_attnums_from_tables(table_oids, engine, metadata, connection_to_use=None): - pg_attribute = get_pg_catalog_table("pg_attribute", engine, metadata=metadata) - sel = select(pg_attribute.c.attnum, pg_attribute.c.attrelid.label('table_oid')).where( - and_( - pg_attribute.c.attrelid.in_(table_oids), - # Ignore system columns - pg_attribute.c.attnum > 0, - # Ignore removed columns - pg_attribute.c.attisdropped.is_(False) - ) - ) - results = execute_statement(engine, sel, connection_to_use).fetchall() - return results - - -def get_map_of_attnum_and_table_oid_to_column_name(table_oids, engine, metadata, connection_to_use=None): - """ - Order determined by the column order in the table. - """ - triples_of_col_info = _get_triples_of_column_name_and_attnum_and_table_oid( - table_oids, None, engine, metadata, connection_to_use - ) - return { - (attnum, table_oid): column_name - for column_name, attnum, table_oid - in triples_of_col_info - } - - -def get_column_names_from_attnums(table_oid, attnums, engine, metadata, connection_to_use=None): - return list(get_map_of_attnum_to_column_name(table_oid, attnums, engine, metadata, connection_to_use).values()) - - -def get_map_of_attnum_to_column_name(table_oid, attnums, engine, metadata, connection_to_use=None): - """ - Order determined by the column order in the table. - """ - triples_of_col_info = _get_triples_of_column_name_and_attnum_and_table_oid( - [table_oid], attnums, engine, metadata, connection_to_use - ) - return { - attnum: column_name - for column_name, attnum, _ - in triples_of_col_info - } - - -def _get_triples_of_column_name_and_attnum_and_table_oid( - table_oids, attnums, engine, metadata, connection_to_use -): - statement = _statement_for_triples_of_column_name_and_attnum_and_table_oid( - table_oids, attnums, engine, metadata - ) - return execute_statement(engine, statement, connection_to_use).fetchall() - - -def get_column_default(table_oid, attnum, engine, metadata, connection_to_use=None): - default_dict = get_column_default_dict( - table_oid, - attnum, - engine, - metadata=metadata, - connection_to_use=connection_to_use, - ) - if default_dict is not None: - return default_dict['value'] - - -def get_column_default_dict(table_oid, attnum, engine, metadata, connection_to_use=None): - column = get_column_from_oid_and_attnum( - table_oid=table_oid, - attnum=attnum, - engine=engine, - metadata=metadata, - connection_to_use=connection_to_use, - ) - default = column.server_default - - if default is None: - return - - is_dynamic = execute_msar_func_with_engine( - engine, 'is_default_possibly_dynamic', table_oid, attnum - ).fetchone()[0] - - sql_text = str(default.arg) if not isinstance(default, Identity) else 'identity' - - if is_dynamic: - warnings.warn( - "Dynamic column defaults are read only", DynamicDefaultWarning - ) - default_value = sql_text - else: - # Defaults are often stored as text with SQL casts appended - # Ex: "'test default string'::character varying" or "'2020-01-01'::date" - # Here, we execute the cast to get the proper python value - default_value = execute_statement( - engine, - select(cast(text(sql_text), column.type)), - connection_to_use - ).scalar() - - return {"value": default_value, "is_dynamic": is_dynamic} - - -def determine_whether_column_contains_data( - table_oid, column_name, engine, metadata, connection_to_use=None -): - """ - Given a column, return True if it contains data, False otherwise. - """ - sa_table = reflect_table_from_oid( - table_oid, engine, metadata=metadata, connection_to_use=connection_to_use, - ) - sel = select(exists(1).where(sa_table.columns[column_name] != None)) # noqa - contains_data = execute_statement(engine, sel, connection_to_use).scalar() - return contains_data - - def get_column_from_oid_and_attnum(table_oid, attnum, engine, metadata, connection_to_use=None): sa_table = reflect_table_from_oid(table_oid, engine, metadata=metadata, connection_to_use=connection_to_use) column_name = get_column_name_from_attnum(table_oid, attnum, engine, metadata=metadata, connection_to_use=connection_to_use) diff --git a/db/tests/columns/operations/test_select.py b/db/tests/columns/operations/test_select.py index 44c995766f..d062e53a7f 100644 --- a/db/tests/columns/operations/test_select.py +++ b/db/tests/columns/operations/test_select.py @@ -1,17 +1,11 @@ from unittest.mock import patch -import warnings -import pytest -from sqlalchemy import ( - String, Integer, Column, Table, MetaData, DateTime, func -) -from db.columns.exceptions import DynamicDefaultWarning +from sqlalchemy import String, Integer, Column, Table, MetaData from db.columns.operations import select as col_select from db.columns.operations.select import ( - get_column_attnum_from_name, get_column_default, - get_column_name_from_attnum, get_columns_attnum_from_names, + get_column_attnum_from_name, + get_column_name_from_attnum ) from db.tables.operations.select import get_oid_from_table -from db.tests.columns.utils import column_test_dict, get_default from db.metadata import get_empty_metadata @@ -41,90 +35,3 @@ def test_get_attnum_from_name(engine_with_schema): column_one_attnum = get_column_attnum_from_name(table_oid, one_name, engine, metadata=metadata) assert get_column_name_from_attnum(table_oid, column_zero_attnum, engine, metadata=metadata) == zero_name assert get_column_name_from_attnum(table_oid, column_one_attnum, engine, metadata=metadata) == one_name - - -def test_get_attnum_from_names(engine_with_schema): - engine, schema = engine_with_schema - table_name = "table_with_columns" - zero_name = "colzero" - one_name = "colone" - table = Table( - table_name, - MetaData(bind=engine, schema=schema), - Column(zero_name, Integer), - Column(one_name, String), - ) - table.create() - table_oid = get_oid_from_table(table_name, schema, engine) - metadata = get_empty_metadata() - columns_attnum = get_columns_attnum_from_names(table_oid, [zero_name, one_name], engine, metadata=metadata) - assert get_column_name_from_attnum(table_oid, columns_attnum[0], engine, metadata=metadata) == zero_name - assert get_column_name_from_attnum(table_oid, columns_attnum[1], engine, metadata=metadata) == one_name - - -@pytest.mark.parametrize("filler", [True, False]) -@pytest.mark.parametrize("col_type", column_test_dict.keys()) -def test_get_column_default(engine_with_schema, filler, col_type): - engine, schema = engine_with_schema - table_name = "get_column_default_table" - column_name = "get_column_default_column" - _, set_default, expt_default = column_test_dict[col_type].values() - - # Ensure we test one and multiple defaults in a table - # There _was_ a bug associated with multiple defaults - cols = [Column(column_name, col_type, server_default=set_default)] - if filler: - cols.append(Column("Filler", Integer, server_default="0")) - table = Table( - table_name, - MetaData(bind=engine, schema=schema), - *cols - ) - table.create() - table_oid = get_oid_from_table(table_name, schema, engine) - metadata = get_empty_metadata() - column_attnum = get_column_attnum_from_name(table_oid, column_name, engine, metadata=metadata) - default = get_column_default(table_oid, column_attnum, engine, metadata=metadata) - created_default = get_default(engine, table) - assert default == expt_default - assert default == created_default - - -get_column_generated_default_test_list = [ - Column("generated_default_col", Integer, primary_key=True), - Column("generated_default_col", DateTime, server_default=func.now()), - Column("generated_default_col", DateTime, server_default=func.current_timestamp()) -] - - -@pytest.mark.parametrize("col", get_column_generated_default_test_list) -def test_get_column_generated_default(engine_with_schema, col): - engine, schema = engine_with_schema - table_name = "get_column_generated_default_table" - table = Table( - table_name, - MetaData(bind=engine, schema=schema), - col, - ) - table.create() - table_oid = get_oid_from_table(table_name, schema, engine) - metadata = get_empty_metadata() - column_attnum = get_column_attnum_from_name(table_oid, col.name, engine, metadata=metadata) - with warnings.catch_warnings(), pytest.raises(DynamicDefaultWarning): - warnings.filterwarnings("error", category=DynamicDefaultWarning) - get_column_default(table_oid, column_attnum, engine, metadata=metadata) - - -default_expression_test_list = [ - ("CURRENT_TIMESTAMP", True), - ("CURRENT_TIMESTAMP::CHAR(64)", True), - ("NOW()", True), - ("myfunc()", True), - ("now()", True), - ("now()::VARCHAR", True), - ("'now()'::VARCHAR", False), - ("'3'::NUMERIC", False), - ("'3'::CHAR", False), - ("'abcde'::CHAR(3)", False), - ("'abcde'::CHAR(5)", False), -] From 7322aea5793a5a8b13b8db52f07665912ee4a382 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Wed, 16 Oct 2024 20:53:02 +0800 Subject: [PATCH 50/70] remove unused column utilites functions --- db/columns/utils.py | 127 -------------------- db/tests/columns/operations/test_mapping.py | 113 ----------------- db/tests/columns/test_base.py | 6 - db/tests/test_utils.py | 18 --- 4 files changed, 264 deletions(-) delete mode 100644 db/tests/columns/operations/test_mapping.py delete mode 100644 db/tests/test_utils.py diff --git a/db/columns/utils.py b/db/columns/utils.py index fa291cbf1c..9743e55553 100644 --- a/db/columns/utils.py +++ b/db/columns/utils.py @@ -7,39 +7,6 @@ from db import constants -def get_default_mathesar_column_list(): - return [MathesarColumn(col_name, **DEFAULT_COLUMNS[col_name]) for col_name in DEFAULT_COLUMNS] - - -def to_mathesar_column_with_engine(col, engine): - new_column = MathesarColumn.from_column(col) - new_column.add_engine(engine) - return new_column - - -def get_type_options(column): - return MathesarColumn.from_column(column).type_options - - -def get_enriched_column_table(table, metadata, engine=None): - table_columns = [MathesarColumn.from_column(c) for c in table.columns] - if engine is not None: - for col in table_columns: - col.add_engine(engine) - return Table( - table.name, - metadata, - *table_columns, - schema=table.schema, - ) - - -def init_mathesar_table_column_list_with_defaults(column_list): - default_columns = get_default_mathesar_column_list() - given_columns = [MathesarColumn.from_column(c) for c in column_list if c.name != constants.ID] - return default_columns + given_columns - - def get_column_obj_from_relation(relation, column): """ This function can look for anything that's reasonably referred to as @@ -84,97 +51,3 @@ def get_primary_key_column_collection_from_relation(relation): pkey = getattr(relation, 'primary_key', None) pk_cols = getattr(pkey, 'columns', pkey) return pk_cols - - -def perfect_map(temp_col_list, target_col_list, engine): - """ - Returns a list of tuples which contain index of temp table column and its equivalent - target table column. - - e.g. - temp_col_list = [('A', PostgresType.INTEGER), ('B', PostgresType.TEXT), - ('C', PostgresType.DATE)] - - target_col_list = [('B', PostgresType.TEXT), ('A', PostgresType.INTEGER), - ('C', PostgresType.DATE)] - - perfect_map will return [(0, 1), (1, 0), (2, 2)] - """ - match = list(zip(sorted(temp_col_list), sorted(target_col_list))) - if all(temp_col[0] == target_col[0] for temp_col, target_col in match): - result = _build_match_tuple(temp_col_list, target_col_list, match) - if result and is_type_casting_valid(match, engine): - return result - - -def _build_match_tuple(tmp_col_list, trgt_col_list, match): - return [ - (tmp_col_list.index(temp_col), trgt_col_list.index(target_col)) - for temp_col, target_col in match - ] - - -def find_match(temp_col_list, target_col_list, engine): - """ - Suggests column mappings based on the columns of the temp table and the target table. - - How are column mappings suggested: - - First it check if only the order of the columns are wrong. - - Otherwise check if one of the following transforms on the column names return a mapping: - - Making the column names case insensitive. - - Replacing an '_'(underscore) between words of column names to a ' '(space). - - Making the column names case insensitive and replacing an underscore with a space. - - If none of the above return a column mapping it raises ColumnMappingsNotFound exception. - """ - if perfect_match := perfect_map(temp_col_list, target_col_list, engine): - return perfect_match - else: - def lowercase(*col_lists): - """ - Transforms the column names to lowercase. - - e.g. - lowercase(col_list = [('A', ...), ('B', ...), ('C', ...)], [...]) - - returns [[('a', ...), ('b', ...), ('c', ...)], [...]] - """ - return [ - [(col[0].lower(), *col[1:]) for col in col_list] - for col_list in col_lists - ] - - def replace_(*col_lists): - """ - Transforms the column names by replacing '_'(underscore) with a ' '(space) - - e.g. - replace_(col_lists = [('A_a', ...), ('B_b', ...), ('C_c', ...)], [...]) - - returns [[('A a', ...), ('B b', ...), ('C c', ...)], [...]] - """ - return [ - [(col[0].replace('_', ' '), *col[1:]) for col in col_list] - for col_list in col_lists - ] - - if case_insensitive_match := perfect_map( - *lowercase(temp_col_list, target_col_list), engine - ): - return case_insensitive_match - elif space_switched_match := perfect_map( - *replace_(temp_col_list, target_col_list), engine - ): - return space_switched_match - elif space_switched_case_insensetive_match := perfect_map( - *lowercase(*replace_(temp_col_list, target_col_list)), engine - ): - return space_switched_case_insensetive_match - else: - raise ColumnMappingsNotFound - - -def is_type_casting_valid(match, engine): - """ Checks if the column of the temporary table can be type casted - to that of a target table if a valid match is found between them. """ - cast_map = get_full_cast_map(engine) - return all(temp[1] in cast_map.get(target[1]) for temp, target in match) diff --git a/db/tests/columns/operations/test_mapping.py b/db/tests/columns/operations/test_mapping.py deleted file mode 100644 index e77b40713f..0000000000 --- a/db/tests/columns/operations/test_mapping.py +++ /dev/null @@ -1,113 +0,0 @@ -from db.columns.exceptions import ColumnMappingsNotFound -from db.columns.utils import find_match, is_type_casting_valid -from db.types.base import PostgresType -import pytest - - -def test_mapping_suggestions_perfect_match(engine): - temp_table_col_list = [('Case Number', PostgresType.INTEGER), - ('Center', PostgresType.TEXT), - ('Patent Expiration Date', PostgresType.DATE) - ] - - target_table_col_list = [('Center', PostgresType.TEXT), - ('Case Number', PostgresType.INTEGER), - ('Patent Expiration Date', PostgresType.DATE) - ] - - match = find_match(temp_table_col_list, target_table_col_list, engine) - expected_match = [(0, 1), (1, 0), (2, 2)] - assert match == expected_match - - -def test_mapping_suggestions_case_insensitive(engine): - temp_table_col_list = [('Case number', PostgresType.INTEGER), - ('center', PostgresType.TEXT), - ('Patent expiration Date', PostgresType.DATE) - ] - - target_table_col_list = [('Center', PostgresType.TEXT), - ('patent Expiration date', PostgresType.DATE), - ('case Number', PostgresType.INTEGER) - ] - - match = find_match(temp_table_col_list, target_table_col_list, engine) - expected_match = [(0, 2), (1, 0), (2, 1)] - assert match == expected_match - - -def test_mapping_suggestions_space_switched(engine): - temp_table_col_list = [('Case_Number', PostgresType.INTEGER), - ('Patent_Expiration Date', PostgresType.DATE), - ('Center', PostgresType.TEXT), - ] - - target_table_col_list = [('Center', PostgresType.TEXT), - ('Case Number', PostgresType.INTEGER), - ('Patent Expiration_Date', PostgresType.DATE) - ] - - match = find_match(temp_table_col_list, target_table_col_list, engine) - expected_match = [(0, 1), (2, 0), (1, 2)] - assert match == expected_match - - -def test_mapping_suggestions_space_switched_case_insensitive(engine): - temp_table_col_list = [('Case_number', PostgresType.INTEGER), - ('center', PostgresType.TEXT), - ('Patent Expiration Date', PostgresType.DATE) - ] - - target_table_col_list = [('Patent_expiration_date', PostgresType.DATE), - ('Center', PostgresType.TEXT), - ('case Number', PostgresType.INTEGER) - ] - - match = find_match(temp_table_col_list, target_table_col_list, engine) - expected_match = [(0, 2), (1, 1), (2, 0)] - assert match == expected_match - - -def test_mappings_suggestions_no_match(engine): - temp_table_col_list = [('Case #', PostgresType.INTEGER), - ('Center', PostgresType.TEXT), - ('Patent-Expiration-Date', PostgresType.DATE) - ] - - target_table_col_list = [('Center', PostgresType.TEXT), - ('Case Number', PostgresType.INTEGER), - ('Patent Expiration Date', PostgresType.DATE) - ] - - with pytest.raises(ColumnMappingsNotFound): - find_match(temp_table_col_list, target_table_col_list, engine) - - -def test_type_cast_validator_valid_castings(engine): - temp_table_col_list = [('Case Number', PostgresType.INTEGER), - ('Center', PostgresType.CHARACTER), - ('Patent Expiration Date', PostgresType.DATE) - ] - - target_table_col_list = [('Center', PostgresType.JSON), - ('Case Number', PostgresType.NUMERIC), - ('Patent Expiration Date', PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE) - ] - sorted_zip = list(zip(sorted(temp_table_col_list), sorted(target_table_col_list))) - is_valid = is_type_casting_valid(sorted_zip, engine) - assert is_valid is True - - -def test_type_cast_validator_invalid_castings(engine): - temp_table_col_list = [('Case Number', PostgresType.JSON), - ('Center', PostgresType.TEXT), - ('Patent Expiration Date', PostgresType.DATE) - ] - - target_table_col_list = [('Center', PostgresType.TEXT), - ('Case Number', PostgresType.REAL), - ('Patent Expiration Date', PostgresType.DATERANGE) - ] - sorted_zip = list(zip(sorted(temp_table_col_list), sorted(target_table_col_list))) - is_valid = is_type_casting_valid(sorted_zip, engine) - assert is_valid is False diff --git a/db/tests/columns/test_base.py b/db/tests/columns/test_base.py index 7acbcc9eda..6ce078c7aa 100644 --- a/db/tests/columns/test_base.py +++ b/db/tests/columns/test_base.py @@ -9,7 +9,6 @@ from db.columns.base import MathesarColumn from db.columns.defaults import DEFAULT_COLUMNS -from db.columns.utils import get_default_mathesar_column_list from db.types.custom import email, datetime from db.types.base import MathesarCustomType, PostgresType, UnknownType @@ -110,11 +109,6 @@ def test_MC_inits_with_engine_empty(column_builder): assert col.server_default is None -def test_MC_is_default_when_true(): - for default_col in get_default_mathesar_column_list(): - assert default_col.is_default - - def test_MC_is_default_with_uuid_col(): col = MathesarColumn('id', UUID, primary_key=True, nullable=False) assert not col.is_default diff --git a/db/tests/test_utils.py b/db/tests/test_utils.py deleted file mode 100644 index ca5a202369..0000000000 --- a/db/tests/test_utils.py +++ /dev/null @@ -1,18 +0,0 @@ -from sqlalchemy import MetaData, Column, String, Table - -from db.columns.utils import get_enriched_column_table -from db.metadata import get_empty_metadata - - -def test_get_enriched_column_table(engine): - abc = "abc" - table = Table("testtable", MetaData(), Column(abc, String), Column('def', String)) - enriched_table = get_enriched_column_table(table, engine=engine, metadata=get_empty_metadata()) - assert enriched_table.columns[abc].engine == engine - - -def test_get_enriched_column_table_no_engine(): - abc = "abc" - table = Table("testtable", MetaData(), Column(abc, String), Column('def', String)) - enriched_table = get_enriched_column_table(table, metadata=get_empty_metadata()) - assert enriched_table.columns[abc].engine is None From b2a325bb607668ab05070cb7e5fe1c3363b6ffb1 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Thu, 17 Oct 2024 13:21:02 +0800 Subject: [PATCH 51/70] remove unneeded MathesarColumn properties --- db/columns/base.py | 49 ------------------------- db/columns/defaults.py | 20 ---------- db/columns/utils.py | 9 ----- db/records/operations/select.py | 41 --------------------- db/tests/columns/test_base.py | 65 +-------------------------------- 5 files changed, 1 insertion(+), 183 deletions(-) diff --git a/db/columns/base.py b/db/columns/base.py index 6a4138fe78..fe72e78832 100644 --- a/db/columns/base.py +++ b/db/columns/base.py @@ -1,9 +1,7 @@ from sqlalchemy import Column, ForeignKey, inspect -from db.columns.defaults import TYPE, PRIMARY_KEY, NULLABLE, DEFAULT_COLUMNS from db.columns.operations.select import get_column_attnum_from_name from db.tables.operations.select import get_oid_from_table -from db.types.operations.cast import get_full_cast_map from db.types.operations.convert import get_db_type_enum_from_class @@ -100,17 +98,6 @@ def from_column(cls, column, engine=None): ) return new_column - def to_sa_column(self): - """ - MathesarColumn sometimes is not interchangeable with SQLAlchemy's Column. - For use in those situations, this method attempts to recreate an SA Column. - - NOTE: this method is incomplete: it does not account for all properties of MathesarColumn. - """ - sa_column = Column(name=self.name, type_=self.type) - sa_column.table = self.table_ - return sa_column - @property def table_(self): """ @@ -133,45 +120,9 @@ def table_oid(self): oid = None return oid - @property - def is_default(self): - default_def = DEFAULT_COLUMNS.get(self.name, False) - try: - self.type.python_type - except NotImplementedError: - return False - return ( - default_def - and self.type.python_type == default_def[TYPE]().python_type - and self.primary_key == default_def.get(PRIMARY_KEY, False) - and self.nullable == default_def.get(NULLABLE, True) - ) - def add_engine(self, engine): self.engine = engine - @property - def valid_target_types(self): - """ - Returns a set of valid types to which the type of the column can be - altered. - """ - if ( - self.engine is not None - and not self.is_default - and self.db_type is not None - ): - db_type = self.db_type - valid_target_types = sorted( - list( - set( - get_full_cast_map(self.engine).get(db_type, []) - ) - ), - key=lambda db_type: db_type.id - ) - return valid_target_types if valid_target_types else [] - @property def column_attnum(self): """ diff --git a/db/columns/defaults.py b/db/columns/defaults.py index bb5153b30c..6880a0b010 100644 --- a/db/columns/defaults.py +++ b/db/columns/defaults.py @@ -1,24 +1,4 @@ -from sqlalchemy import Integer - -from db import constants - - NAME = "name" DESCRIPTION = "description" NULLABLE = "nullable" -PRIMARY_KEY = "primary_key" -TYPE = "sa_type" DEFAULT = "default" -AUTOINCREMENT = "autoincrement" - -ID_TYPE = Integer - - -DEFAULT_COLUMNS = { - constants.ID: { - TYPE: ID_TYPE, - PRIMARY_KEY: True, - NULLABLE: False, - AUTOINCREMENT: True - } -} diff --git a/db/columns/utils.py b/db/columns/utils.py index 9743e55553..210f981a64 100644 --- a/db/columns/utils.py +++ b/db/columns/utils.py @@ -1,12 +1,3 @@ -from sqlalchemy import Table - -from db.columns.base import MathesarColumn -from db.columns.defaults import DEFAULT_COLUMNS -from db.columns.exceptions import ColumnMappingsNotFound -from db.types.operations.cast import get_full_cast_map -from db import constants - - def get_column_obj_from_relation(relation, column): """ This function can look for anything that's reasonably referred to as diff --git a/db/records/operations/select.py b/db/records/operations/select.py index 9ef568d79e..be407473b2 100644 --- a/db/records/operations/select.py +++ b/db/records/operations/select.py @@ -3,10 +3,7 @@ from sqlalchemy.sql.functions import count from db import connection as db_conn -from db.columns.base import MathesarColumn from db.tables.utils import get_primary_key_column -from db.types.operations.cast import get_column_cast_expression -from db.types.operations.convert import get_db_type_enum_from_id from db.utils import execute_pg_query from db.transforms.operations.apply import apply_transformations_deprecated @@ -180,41 +177,3 @@ def get_count(table, engine, filter=None, search=None): search=search, ) return execute_pg_query(engine, relation)[0][col_name] - - -def get_column_cast_records(engine, table, column_definitions, num_records=20): - assert len(column_definitions) == len(table.columns) - cast_expression_list = [ - _get_column_cast_expression_or_column(column, col_def, engine) - for column, col_def in zip(table.columns, column_definitions) - ] - sel = select(cast_expression_list).limit(num_records) - with engine.begin() as conn: - result = conn.execute(sel) - return result.fetchall() - - -def _get_column_cast_expression_or_column(column, col_def, engine): - """ - Will return a cast expression for column, unless it's a default column, in which case the - unchaged column will be returned. - """ - target_type = get_db_type_enum_from_id(col_def["type"]) - if target_type is None: - raise Exception( - "Unknown db type id encountered. This should be handled in the request " - + "validation phase. Something is wrong." - ) - type_options = col_def.get("type_options", {}) - if not MathesarColumn.from_column(column).is_default: - return ( - get_column_cast_expression( - column=column, - target_type=target_type, - engine=engine, - type_options=type_options, - ) - .label(col_def["name"]) - ) - else: - return column diff --git a/db/tests/columns/test_base.py b/db/tests/columns/test_base.py index 6ce078c7aa..1b5811bce9 100644 --- a/db/tests/columns/test_base.py +++ b/db/tests/columns/test_base.py @@ -4,11 +4,10 @@ from sqlalchemy import ( INTEGER, ForeignKey, VARCHAR, CHAR, NUMERIC ) -from sqlalchemy.dialects.postgresql import UUID, ARRAY, JSON +from sqlalchemy.dialects.postgresql import ARRAY, JSON from sqlalchemy.sql.sqltypes import NullType from db.columns.base import MathesarColumn -from db.columns.defaults import DEFAULT_COLUMNS from db.types.custom import email, datetime from db.types.base import MathesarCustomType, PostgresType, UnknownType @@ -109,68 +108,6 @@ def test_MC_inits_with_engine_empty(column_builder): assert col.server_default is None -def test_MC_is_default_with_uuid_col(): - col = MathesarColumn('id', UUID, primary_key=True, nullable=False) - assert not col.is_default - - -def test_MC_is_default_when_false_for_name(): - for default_col in DEFAULT_COLUMNS: - dc_definition = DEFAULT_COLUMNS[default_col] - col = MathesarColumn( - "definitely_not_a_default", - dc_definition["sa_type"], - primary_key=dc_definition.get("primary_key", False), - nullable=dc_definition.get("nullable", True), - ) - assert not col.is_default - - -def test_MC_is_default_when_false_for_type(): - for default_col in DEFAULT_COLUMNS: - dc_definition = DEFAULT_COLUMNS[default_col] - changed_type = INTEGER if dc_definition["sa_type"] == VARCHAR else VARCHAR - col = MathesarColumn( - default_col, - changed_type, - primary_key=dc_definition.get("primary_key", False), - nullable=dc_definition.get("nullable", True), - ) - assert not col.is_default - - -def test_MC_is_default_when_false_for_pk(): - for default_col in DEFAULT_COLUMNS: - dc_definition = DEFAULT_COLUMNS[default_col] - not_pk = not dc_definition.get("primary_key", False), - col = MathesarColumn( - default_col, - dc_definition["sa_type"], - primary_key=not_pk, - nullable=dc_definition.get("nullable", True), - ) - assert not col.is_default - - -def test_MC_valid_target_types_no_engine(): - mc = MathesarColumn('testable_col', VARCHAR) - assert mc.valid_target_types is None - - -def test_MC_valid_target_types_default_engine(engine): - mc = MathesarColumn('testable_col', PostgresType.CHARACTER_VARYING.get_sa_class(engine)) - mc.add_engine(engine) - assert mc.valid_target_types is not None - assert PostgresType.CHARACTER_VARYING in mc.valid_target_types - - -def test_MC_valid_target_types_custom_engine(engine): - mc = MathesarColumn('testable_col', VARCHAR) - mc.add_engine(engine) - assert mc.valid_target_types is not None - assert MathesarCustomType.EMAIL in mc.valid_target_types - - def test_MC_type_no_opts(engine): mc = MathesarColumn('acolumn', VARCHAR) mc.add_engine(engine) From 025ff728c53a7362813202e81370e5840f313098 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Thu, 17 Oct 2024 14:03:11 +0800 Subject: [PATCH 52/70] remove unused SA type code --- db/types/categories.py | 101 ---- db/types/hintsets.py | 60 -- db/types/install.py | 10 - db/types/operations/cast.py | 1031 ----------------------------------- 4 files changed, 1202 deletions(-) delete mode 100644 db/types/categories.py delete mode 100644 db/types/hintsets.py delete mode 100644 db/types/install.py delete mode 100644 db/types/operations/cast.py diff --git a/db/types/categories.py b/db/types/categories.py deleted file mode 100644 index 5110d0a49d..0000000000 --- a/db/types/categories.py +++ /dev/null @@ -1,101 +0,0 @@ -from db.types.base import PostgresType, MathesarCustomType - -STRING_TYPES = frozenset({ - PostgresType.CHARACTER, - PostgresType.CHARACTER_VARYING, - PostgresType.TEXT, -}) - -STRING_LIKE_TYPES = frozenset({ - *STRING_TYPES, - PostgresType.CHAR, - PostgresType.NAME, -}) - -INTEGER_TYPES = frozenset({ - PostgresType.BIGINT, - PostgresType.INTEGER, - PostgresType.SMALLINT, -}) - -DECIMAL_TYPES = frozenset({ - PostgresType.DOUBLE_PRECISION, - PostgresType.REAL, -}) - -TIME_OF_DAY_TYPES = frozenset({ - PostgresType.TIME_WITH_TIME_ZONE, - PostgresType.TIME_WITHOUT_TIME_ZONE, -}) - -DATE_TYPES = frozenset({ - PostgresType.DATE, -}) - -DATETIME_TYPES = frozenset({ - PostgresType.TIMESTAMP_WITH_TIME_ZONE, - PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE, -}) - -POINT_IN_TIME_TYPES = frozenset({ - *TIME_OF_DAY_TYPES, - *DATETIME_TYPES, - *DATE_TYPES, -}) - -DURATION_TYPES = frozenset({ - PostgresType.INTERVAL, -}) - -TIME_RELATED_TYPES = frozenset({ - *POINT_IN_TIME_TYPES, - *DURATION_TYPES, -}) - -MONEY_WITH_CURRENCY_TYPES = frozenset({ - MathesarCustomType.MULTICURRENCY_MONEY, -}) - -MONEY_WITHOUT_CURRENCY_TYPES = frozenset({ - PostgresType.MONEY, - MathesarCustomType.MATHESAR_MONEY, -}) - -MONEY_TYPES = frozenset({ - *MONEY_WITH_CURRENCY_TYPES, - *MONEY_WITHOUT_CURRENCY_TYPES, -}) - -NUMERIC_TYPES = frozenset({ - *INTEGER_TYPES, - *DECIMAL_TYPES, - PostgresType.NUMERIC -}) - -# Comparable types are those that should support greater, lesser, equal comparisons amongst -# members of the same type (at least). -COMPARABLE_TYPES = frozenset({ - *NUMERIC_TYPES, - *MONEY_TYPES, - *TIME_RELATED_TYPES, -}) - -ARRAY = frozenset({ - PostgresType._ARRAY -}) - -JSON_TYPES = frozenset({ - PostgresType.JSON, - PostgresType.JSONB, - MathesarCustomType.MATHESAR_JSON_OBJECT, - MathesarCustomType.MATHESAR_JSON_ARRAY -}) - -# TODO is JSON_ARRAY useful being separate from ARRAY? -JSON_ARRAY = frozenset({ - MathesarCustomType.MATHESAR_JSON_ARRAY, -}) - -JSON_OBJECT = frozenset({ - MathesarCustomType.MATHESAR_JSON_OBJECT, -}) diff --git a/db/types/hintsets.py b/db/types/hintsets.py deleted file mode 100644 index 240914521c..0000000000 --- a/db/types/hintsets.py +++ /dev/null @@ -1,60 +0,0 @@ -from frozendict import frozendict - -from db.functions import hints -from db.types import categories -from db.types.base import PostgresType, MathesarCustomType, known_db_types - - -# TODO switch from using tuples for hintsets to using frozensets -def _build_db_types_hinted(): - """ - Builds up a map of db types to hintsets. - """ - # Start out by defining some hints manually. - db_types_hinted = { - PostgresType.BOOLEAN: tuple([ - hints.boolean - ]), - MathesarCustomType.URI: tuple([ - hints.uri - ]), - MathesarCustomType.EMAIL: tuple([ - hints.email - ]), - } - - # Then, start adding hints automatically. - # This is for many-to-many relationships, i.e. adding multiple identical hintsets to the - # hintsets of multiple db types. - def _add_to_db_type_hintsets(db_types, hints): - """ - Mutates db_types_hinted to map every hint in `hints` to every DB type in `db_types`. - """ - for db_type in db_types: - if db_type in db_types_hinted: - updated_hintset = tuple(set(db_types_hinted[db_type] + tuple(hints))) - db_types_hinted[db_type] = updated_hintset - else: - db_types_hinted[db_type] = tuple(hints) - - # all types get the "any" hint - all_db_types = known_db_types - _add_to_db_type_hintsets(all_db_types, (hints.any,)) - - _add_to_db_type_hintsets(categories.STRING_LIKE_TYPES, (hints.string_like,)) - _add_to_db_type_hintsets(categories.TIME_OF_DAY_TYPES, (hints.time,)) - _add_to_db_type_hintsets(categories.POINT_IN_TIME_TYPES, (hints.point_in_time,)) - _add_to_db_type_hintsets(categories.DATE_TYPES, (hints.date,)) - _add_to_db_type_hintsets(categories.DATETIME_TYPES, (hints.date, hints.time,)) - _add_to_db_type_hintsets(categories.DURATION_TYPES, (hints.duration,)) - _add_to_db_type_hintsets(categories.COMPARABLE_TYPES, (hints.comparable,)) - _add_to_db_type_hintsets(categories.NUMERIC_TYPES, (hints.numeric,)) - - # TODO do we want JSON_ARRAY and ARRAY distinct here? - _add_to_db_type_hintsets(categories.JSON_ARRAY, (hints.json_array,)) - _add_to_db_type_hintsets(categories.ARRAY, (hints.array,)) - - return frozendict(db_types_hinted) - - -db_types_hinted = _build_db_types_hinted() diff --git a/db/types/install.py b/db/types/install.py deleted file mode 100644 index 27c5db696e..0000000000 --- a/db/types/install.py +++ /dev/null @@ -1,10 +0,0 @@ -from db.constants import TYPES_SCHEMA -import psycopg - - -def uninstall_mathesar_from_database(engine): - conn_str = str(engine.url) - with psycopg.connect(conn_str) as conn: - # TODO: Clean up this code so that it references all the schemas in our - # `INTERNAL_SCHEMAS` constant. - conn.execute(f"DROP SCHEMA IF EXISTS __msar, msar, {TYPES_SCHEMA} CASCADE") diff --git a/db/types/operations/cast.py b/db/types/operations/cast.py deleted file mode 100644 index dc46191395..0000000000 --- a/db/types/operations/cast.py +++ /dev/null @@ -1,1031 +0,0 @@ -from frozendict import frozendict - -from sqlalchemy import text -from sqlalchemy.sql import quoted_name -from sqlalchemy.sql.functions import Function - -from db.types.custom import uri -from db.types.exceptions import UnsupportedTypeException -from db.types.base import PostgresType, MathesarCustomType, get_available_known_db_types, get_qualified_name -from db.types.operations.convert import get_db_type_enum_from_class -from db.types import categories -from db.types.custom.money import MONEY_ARR_FUNC_NAME - -NUMERIC_ARR_FUNC_NAME = "get_numeric_array" - - -def get_column_cast_expression(column, target_type, engine, type_options=None): - """ - Given a Column, we get the correct SQL selectable for selecting the - results of a Mathesar cast_to_ function on that column, where - is derived from the target_type. - """ - if type_options is None: - type_options = {} - target_type_class = target_type.get_sa_class(engine) - if target_type_class is None: - raise UnsupportedTypeException( - f"Target Type '{target_type.id}' is not supported." - ) - column_type = get_db_type_enum_from_class(column.type.__class__) - if target_type == column_type: - cast_expr = column - else: - qualified_function_name = get_cast_function_name(target_type) - cast_expr = Function( - quoted_name(qualified_function_name, False), - column - ) - if type_options: - type_with_options = target_type_class(**type_options) - cast_expr = cast_expr.cast(type_with_options) - return cast_expr - - -def install_all_casts(engine): - create_boolean_casts(engine) - create_decimal_number_casts(engine) - create_email_casts(engine) - create_integer_casts(engine) - create_interval_casts(engine) - create_datetime_casts(engine) - create_mathesar_money_casts(engine) - create_money_casts(engine) - create_multicurrency_money_casts(engine) - create_textual_casts(engine) - create_uri_casts(engine) - create_numeric_casts(engine) - create_json_casts(engine) - - -def create_boolean_casts(engine): - type_body_map = _get_boolean_type_body_map() - create_cast_functions(PostgresType.BOOLEAN, type_body_map, engine) - - -def create_json_casts(engine): - json_types = categories.JSON_TYPES - for db_type in json_types: - type_body_map = _get_json_type_body_map(db_type) - create_cast_functions(db_type, type_body_map, engine) - - -def create_decimal_number_casts(engine): - decimal_number_types = categories.DECIMAL_TYPES - for db_type in decimal_number_types: - type_body_map = _get_decimal_number_type_body_map(target_type=db_type) - create_cast_functions(db_type, type_body_map, engine) - - -def create_email_casts(engine): - type_body_map = _get_email_type_body_map() - create_cast_functions(MathesarCustomType.EMAIL, type_body_map, engine) - - -def create_integer_casts(engine): - integer_types = categories.INTEGER_TYPES - for db_type in integer_types: - type_body_map = _get_integer_type_body_map(target_type=db_type) - create_cast_functions(db_type, type_body_map, engine) - - -def create_interval_casts(engine): - type_body_map = _get_interval_type_body_map() - create_cast_functions(PostgresType.INTERVAL, type_body_map, engine) - - -def create_datetime_casts(engine): - time_types = [PostgresType.TIME_WITHOUT_TIME_ZONE, PostgresType.TIME_WITH_TIME_ZONE] - for time_type in time_types: - type_body_map = _get_time_type_body_map(time_type) - create_cast_functions(time_type, type_body_map, engine) - - type_body_map = _get_timestamp_with_timezone_type_body_map(PostgresType.TIMESTAMP_WITH_TIME_ZONE) - create_cast_functions(PostgresType.TIMESTAMP_WITH_TIME_ZONE, type_body_map, engine) - - type_body_map = _get_timestamp_without_timezone_type_body_map() - create_cast_functions(PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE, type_body_map, engine) - - type_body_map = _get_date_type_body_map() - create_cast_functions(PostgresType.DATE, type_body_map, engine) - - -def create_mathesar_money_casts(engine): - mathesar_money_array_create = _build_mathesar_money_array_function() - with engine.begin() as conn: - conn.execute(text(mathesar_money_array_create)) - type_body_map = _get_mathesar_money_type_body_map() - create_cast_functions(MathesarCustomType.MATHESAR_MONEY, type_body_map, engine) - - -def create_money_casts(engine): - type_body_map = _get_money_type_body_map() - create_cast_functions(PostgresType.MONEY, type_body_map, engine) - - -def create_multicurrency_money_casts(engine): - type_body_map = _get_multicurrency_money_type_body_map() - create_cast_functions(MathesarCustomType.MULTICURRENCY_MONEY, type_body_map, engine) - - -def create_textual_casts(engine): - textual_types = categories.STRING_LIKE_TYPES - for db_type in textual_types: - type_body_map = _get_textual_type_body_map(engine) - create_cast_functions(db_type, type_body_map, engine) - - -def create_uri_casts(engine): - type_body_map = _get_uri_type_body_map() - create_cast_functions(MathesarCustomType.URI, type_body_map, engine) - - -def create_numeric_casts(engine): - numeric_array_create = _build_numeric_array_function() - with engine.begin() as conn: - conn.execute(text(numeric_array_create)) - type_body_map = _get_numeric_type_body_map() - create_cast_functions(PostgresType.NUMERIC, type_body_map, engine) - - -# TODO find more descriptive name -def get_full_cast_map(engine): - """ - Returns a mapping of source types to target type sets. - """ - target_to_source_maps = { - PostgresType.BIGINT: _get_integer_type_body_map(target_type=PostgresType.BIGINT), - PostgresType.BOOLEAN: _get_boolean_type_body_map(), - PostgresType.CHARACTER: _get_textual_type_body_map(engine), - PostgresType.CHARACTER_VARYING: _get_textual_type_body_map(engine), - PostgresType.DATE: _get_date_type_body_map(), - PostgresType.JSON: _get_json_type_body_map(target_type=PostgresType.JSON), - PostgresType.JSONB: _get_json_type_body_map(target_type=PostgresType.JSONB), - MathesarCustomType.MATHESAR_JSON_ARRAY: _get_json_type_body_map(target_type=MathesarCustomType.MATHESAR_JSON_ARRAY), - MathesarCustomType.MATHESAR_JSON_OBJECT: _get_json_type_body_map(target_type=MathesarCustomType.MATHESAR_JSON_OBJECT), - PostgresType.DOUBLE_PRECISION: _get_decimal_number_type_body_map(target_type=PostgresType.DOUBLE_PRECISION), - MathesarCustomType.EMAIL: _get_email_type_body_map(), - PostgresType.INTEGER: _get_integer_type_body_map(target_type=PostgresType.INTEGER), - MathesarCustomType.MATHESAR_MONEY: _get_mathesar_money_type_body_map(), - PostgresType.MONEY: _get_money_type_body_map(), - MathesarCustomType.MULTICURRENCY_MONEY: _get_multicurrency_money_type_body_map(), - PostgresType.INTERVAL: _get_interval_type_body_map(), - PostgresType.NUMERIC: _get_decimal_number_type_body_map(target_type=PostgresType.NUMERIC), - PostgresType.REAL: _get_decimal_number_type_body_map(target_type=PostgresType.REAL), - PostgresType.SMALLINT: _get_integer_type_body_map(target_type=PostgresType.SMALLINT), - PostgresType.TIME_WITHOUT_TIME_ZONE: _get_time_type_body_map(PostgresType.TIME_WITHOUT_TIME_ZONE), - PostgresType.TIME_WITH_TIME_ZONE: _get_time_type_body_map(PostgresType.TIME_WITH_TIME_ZONE), - PostgresType.TIMESTAMP_WITH_TIME_ZONE: _get_timestamp_with_timezone_type_body_map(PostgresType.TIMESTAMP_WITH_TIME_ZONE), - PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE: _get_timestamp_without_timezone_type_body_map(), - PostgresType.TEXT: _get_textual_type_body_map(engine), - MathesarCustomType.URI: _get_uri_type_body_map(), - } - # invert the map - source_to_target_tuples = ( - (source, target) - for target in target_to_source_maps - for source in target_to_source_maps[target] - ) - # reduce (source, target) tuples to a dictionary of sets - source_to_target_sets = {} - for source, target in source_to_target_tuples: - source_to_target_sets.setdefault(source, set()).add(target) - # freeze the collections - return frozendict( - { - source: frozenset(target_set) - for source, target_set - in source_to_target_sets.items() - } - ) - - -def create_cast_functions(target_type, type_body_map, engine): - """ - This python function writes a number of PL/pgSQL functions that cast - between types supported by Mathesar, and installs them on the DB - using the given engine. Each generated PL/pgSQL function has the - name `cast_to_`. We utilize the function overloading of - PL/pgSQL to use the correct function body corresponding to a given - input (source) type. - - Args: - target_type: Enum corresponding to the target type of the - cast function. - type_body_map: dictionary that gives a map between source types - and the body of a PL/pgSQL function to cast a - given source type to the target type. - engine: an SQLAlchemy engine. - """ - for type_, body in type_body_map.items(): - query = assemble_function_creation_sql(type_, target_type, body) - with engine.begin() as conn: - conn.execute(text(query)) - - -def assemble_function_creation_sql(argument_type, target_type, function_body): - function_name = get_cast_function_name(target_type) - return f""" - CREATE OR REPLACE FUNCTION {function_name}({argument_type.id}) - RETURNS {target_type.id} - AS $$ - {function_body} - $$ LANGUAGE plpgsql RETURNS NULL ON NULL INPUT; - """ - - -# TODO Replace with SQL version (msar.get_cast_function_name) when refactoring. -def get_cast_function_name(target_type): - """ - Some casting functions change postgres config parameters for the - transaction they are run on like cast function for casting different - data type to timestamp with timezone, So they used be in an isolated - transaction - """ - unqualified_type_name = target_type.id.split('.')[-1].lower() - if '(' in unqualified_type_name: - bare_type_name = unqualified_type_name[:unqualified_type_name.find('(')] - if unqualified_type_name[-1] != ')': - bare_type_name += unqualified_type_name[unqualified_type_name.find(')') + 1:] - else: - bare_type_name = unqualified_type_name - function_type_name = '_'.join(bare_type_name.split()) - bare_function_name = f"cast_to_{function_type_name}" - escaped_bare_function_name = _escape_illegal_characters(bare_function_name) - qualified_escaped_bare_function_name = get_qualified_name(escaped_bare_function_name) - return qualified_escaped_bare_function_name - - -def _escape_illegal_characters(sql_name): - replacement_mapping = { - '"': '_double_quote_' - } - resulting_string = sql_name - for old, new in replacement_mapping.items(): - resulting_string = resulting_string.replace(old, new) - return resulting_string - - -def _get_json_type_body_map(target_type): - """ - Allow casting from text, primitive json types and Mathesar custom json types. - Target types include primitive json, jsonb, Mathesar json object and Mathesar json array - """ - default_behavior_source_types = categories.STRING_TYPES | frozenset([PostgresType.JSON, PostgresType.JSONB, MathesarCustomType.MATHESAR_JSON_ARRAY, MathesarCustomType.MATHESAR_JSON_OBJECT]) - type_body_map = _get_default_type_body_map( - default_behavior_source_types, target_type - ) - - return type_body_map - - -def _get_boolean_type_body_map(): - """ - Get SQL strings that create various functions for casting different - types to booleans. - - boolean -> boolean: Identity. No remarks - varchar -> boolean: We only cast 't', 'f', 'true', or 'false' - all others raise a custom exception. - number type -> boolean: We only cast numbers 1 -> true, 0 -> false - (this is not default behavior for - PostgreSQL). Others raise a custom - exception. - """ - source_number_types = categories.NUMERIC_TYPES - source_text_types = categories.STRING_TYPES - default_behavior_source_types = frozenset([PostgresType.BOOLEAN]) - - not_bool_exception_str = f"RAISE EXCEPTION '% is not a {PostgresType.BOOLEAN.id}', $1;" - - def _get_number_to_boolean_cast_str(): - return f""" - BEGIN - IF $1<>0 AND $1<>1 THEN - {not_bool_exception_str} END IF; - RETURN $1<>0; - END; - """ - - def _get_text_to_boolean_cast_str(): - return f""" - DECLARE - istrue {PostgresType.BOOLEAN.id}; - BEGIN - SELECT - $1='1' OR lower($1) = 'on' - OR lower($1)='t' OR lower($1)='true' - OR lower($1)='y' OR lower($1)='yes' - INTO istrue; - IF istrue - OR $1='0' OR lower($1) = 'off' - OR lower($1)='f' OR lower($1)='false' - OR lower($1)='n' OR lower($1)='no' - THEN - RETURN istrue; - END IF; - {not_bool_exception_str} - END; - """ - - type_body_map = _get_default_type_body_map( - default_behavior_source_types, PostgresType.BOOLEAN, - ) - type_body_map.update( - { - number_type: _get_number_to_boolean_cast_str() - for number_type in source_number_types - } - ) - type_body_map.update( - { - text_type: _get_text_to_boolean_cast_str() - for text_type in source_text_types - } - ) - return type_body_map - - -def _get_email_type_body_map(): - """ - Get SQL strings that create various functions for casting different - types to email. - - email -> email: Identity. No remarks - varchar -> email: We use the default PostgreSQL behavior (this will - just check that the VARCHAR object satisfies the email - DOMAIN). - """ - identity_set = {MathesarCustomType.EMAIL} - default_behavior_source_types = categories.STRING_TYPES - source_types = default_behavior_source_types.union(identity_set) - return _get_default_type_body_map( - source_types, MathesarCustomType.EMAIL, - ) - - -def _get_interval_type_body_map(): - """ - Get SQL strings that create various functions for casting different - types to interval. - - interval -> interval: Identity. No remarks - text_type -> interval: We first check that the varchar *cannot* be cast - to a numeric, and then try to cast the varchar - to an interval. - """ - source_text_types = categories.STRING_TYPES - - def _get_text_interval_type_body_map(): - # We need to check that a string isn't a valid number before - # casting to intervals (since a number is more likely) - return f""" BEGIN - PERFORM $1::{PostgresType.NUMERIC.id}; - RAISE EXCEPTION '% is a {PostgresType.NUMERIC.id}', $1; - EXCEPTION - WHEN sqlstate '22P02' THEN - RETURN $1::{PostgresType.INTERVAL.id}; - END; - """ - - type_body_map = { - PostgresType.INTERVAL: """ - BEGIN - RETURN $1; - END; - """ - } - type_body_map.update( - { - text_type: _get_text_interval_type_body_map() - for text_type in source_text_types - } - ) - return type_body_map - - -def _get_integer_type_body_map(target_type=PostgresType.INTEGER): - """ - We use default behavior for identity and casts from TEXT types. - We specifically disallow rounding or truncating when casting from numerics, - etc. - """ - default_behavior_source_types = categories.INTEGER_TYPES | categories.STRING_TYPES - no_rounding_source_types = categories.DECIMAL_TYPES | categories.MONEY_WITHOUT_CURRENCY_TYPES | frozenset([PostgresType.NUMERIC]) - target_type_str = target_type.id - cast_loss_exception_str = ( - f"RAISE EXCEPTION '% cannot be cast to {target_type_str} without loss', $1;" - ) - - def _get_no_rounding_cast_to_integer(): - return f""" - DECLARE integer_res {target_type_str}; - BEGIN - SELECT $1::{target_type_str} INTO integer_res; - IF integer_res = $1 THEN - RETURN integer_res; - END IF; - {cast_loss_exception_str} - END; - """ - - type_body_map = _get_default_type_body_map( - default_behavior_source_types, target_type, - ) - type_body_map.update( - { - type_name: _get_no_rounding_cast_to_integer() - for type_name in no_rounding_source_types - } - ) - type_body_map.update({PostgresType.BOOLEAN: _get_boolean_to_number_cast(target_type)}) - return type_body_map - - -def _get_decimal_number_type_body_map(target_type=PostgresType.NUMERIC): - """ - Get SQL strings that create various functions for casting different - types to number types including DECIMAL, DOUBLE PRECISION, FLOAT, - NUMERIC, and REAL. - - The only notable non-default cast is from boolean: - boolean -> number: We cast TRUE -> 1, FALSE -> 0 - """ - - default_behavior_source_types = ( - categories.NUMERIC_TYPES | categories.STRING_TYPES | categories.MONEY_WITHOUT_CURRENCY_TYPES - ) - type_body_map = _get_default_type_body_map( - default_behavior_source_types, target_type, - ) - type_body_map.update({PostgresType.BOOLEAN: _get_boolean_to_number_cast(target_type)}) - return type_body_map - - -def _get_boolean_to_number_cast(target_type): - target_type_str = target_type.id - return f""" - BEGIN - IF $1 THEN - RETURN 1::{target_type_str}; - END IF; - RETURN 0::{target_type_str}; - END; - """ - - -def _get_time_type_body_map(target_type): - default_behavior_source_types = [ - PostgresType.TEXT, PostgresType.CHARACTER_VARYING, PostgresType.TIME_WITHOUT_TIME_ZONE, PostgresType.TIME_WITH_TIME_ZONE - ] - return _get_default_type_body_map( - default_behavior_source_types, target_type, - ) - - -def get_text_and_datetime_to_datetime_cast_str(type_condition, exception_string): - return f""" - DECLARE - timestamp_value_with_tz NUMERIC; - timestamp_value NUMERIC; - date_value NUMERIC; - BEGIN - SET LOCAL TIME ZONE 'UTC'; - SELECT EXTRACT(EPOCH FROM $1::TIMESTAMP WITH TIME ZONE ) INTO timestamp_value_with_tz; - SELECT EXTRACT(EPOCH FROM $1::TIMESTAMP WITHOUT TIME ZONE) INTO timestamp_value; - SELECT EXTRACT(EPOCH FROM $1::DATE ) INTO date_value; - {type_condition} - - {exception_string} - END; - """ - - -def _get_timestamp_with_timezone_type_body_map(target_type): - default_behavior_source_types = categories.DATETIME_TYPES | categories.STRING_TYPES - return _get_default_type_body_map(default_behavior_source_types, target_type) - - -def _get_timestamp_without_timezone_type_body_map(): - """ - Get SQL strings that create various functions for casting different - types to timestamp without timezone. - We allow casting any text, timezone and date type to be cast into a - timestamp without timezone, provided it does not any timezone - information as this could lead to a information loss - - The cast function changes the timezone to `utc` for the transaction - is called on. So this function call should be used in a isolated - transaction to avoid timezone change causing unintended side effect - """ - source_text_types = categories.STRING_TYPES - source_datetime_types = frozenset([PostgresType.TIMESTAMP_WITH_TIME_ZONE, PostgresType.DATE]) - default_behavior_source_types = frozenset([PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE]) - - not_timestamp_without_tz_exception_str = ( - f"RAISE EXCEPTION '% is not a {PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE.id}', $1;" - ) - # Check if the value is missing timezone by casting it to a timestamp - # with timezone and comparing if the value is equal to a timestamp - # without timezone. - timestamp_without_tz_condition_str = f""" - IF (timestamp_value_with_tz = timestamp_value) THEN - RETURN $1::{PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE.id}; - END IF; - """ - - type_body_map = _get_default_type_body_map( - default_behavior_source_types, PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE, - ) - type_body_map.update( - { - text_type: get_text_and_datetime_to_datetime_cast_str( - timestamp_without_tz_condition_str, - not_timestamp_without_tz_exception_str - ) - for text_type in source_text_types - } - ) - type_body_map.update( - { - datetime_type: get_text_and_datetime_to_datetime_cast_str( - timestamp_without_tz_condition_str, - not_timestamp_without_tz_exception_str - ) - for datetime_type in source_datetime_types - } - ) - return type_body_map - - -def _get_mathesar_money_type_body_map(): - """ - Get SQL strings that create various functions for casting different - types to money. - We allow casting any number type to our custom money. - We allow casting the default money type to our custom money. - We allow casting any textual type to money with the text prefixed or - suffixed with a currency. - """ - money_array_function = get_qualified_name(MONEY_ARR_FUNC_NAME) - default_behavior_source_types = frozenset([MathesarCustomType.MATHESAR_MONEY]) - number_types = categories.NUMERIC_TYPES - textual_types = categories.STRING_TYPES | frozenset([PostgresType.MONEY]) - cast_exception_str = ( - f"RAISE EXCEPTION '% cannot be cast to {MathesarCustomType.MATHESAR_MONEY.id}', $1;" - ) - - def _get_number_cast_to_money(): - return f""" - BEGIN - RETURN $1::numeric::{MathesarCustomType.MATHESAR_MONEY.id}; - END; - """ - - def _get_base_textual_cast_to_money(): - return rf""" - DECLARE decimal_point {PostgresType.TEXT.id}; - DECLARE is_negative {PostgresType.BOOLEAN.id}; - DECLARE money_arr {PostgresType.TEXT.id}[]; - DECLARE money_num {PostgresType.TEXT.id}; - BEGIN - SELECT {money_array_function}($1::{PostgresType.TEXT.id}) INTO money_arr; - IF money_arr IS NULL THEN - {cast_exception_str} - END IF; - - SELECT money_arr[1] INTO money_num; - SELECT ltrim(to_char(1, 'D'), ' ') INTO decimal_point; - SELECT $1::text ~ '^.*(-|\(.+\)).*$' INTO is_negative; - - IF money_arr[2] IS NOT NULL THEN - SELECT regexp_replace(money_num, money_arr[2], '', 'gq') INTO money_num; - END IF; - IF money_arr[3] IS NOT NULL THEN - SELECT regexp_replace(money_num, money_arr[3], decimal_point, 'q') INTO money_num; - END IF; - IF is_negative THEN - RETURN ('-' || money_num)::{MathesarCustomType.MATHESAR_MONEY.id}; - END IF; - RETURN money_num::{MathesarCustomType.MATHESAR_MONEY.id}; - END; - """ - - type_body_map = _get_default_type_body_map( - default_behavior_source_types, MathesarCustomType.MATHESAR_MONEY, - ) - type_body_map.update( - { - type_name: _get_number_cast_to_money() - for type_name in number_types - } - ) - type_body_map.update( - { - type_name: _get_base_textual_cast_to_money() - for type_name in textual_types - } - ) - return type_body_map - - -def _build_mathesar_money_array_function(): - """ - The main reason for this function to be separate is for testing. This - does have some performance impact; we should consider inlining later. - """ - qualified_function_name = get_qualified_name(MONEY_ARR_FUNC_NAME) - - # An attempt to separate pieces into logical bits for easier - # understanding and modification - non_numeric = r"(?:[^.,0-9]+)" - no_separator_big = r"[0-9]{4,}(?:([,.])[0-9]+)?" - no_separator_small = r"[0-9]{1,3}(?:([,.])[0-9]{1,2}|[0-9]{4,})?" - comma_separator_req_decimal = r"[0-9]{1,3}(,)[0-9]{3}(\.)[0-9]+" - period_separator_req_decimal = r"[0-9]{1,3}(\.)[0-9]{3}(,)[0-9]+" - comma_separator_opt_decimal = r"[0-9]{1,3}(?:(,)[0-9]{3}){2,}(?:(\.)[0-9]+)?" - period_separator_opt_decimal = r"[0-9]{1,3}(?:(\.)[0-9]{3}){2,}(?:(,)[0-9]+)?" - space_separator_opt_decimal = r"[0-9]{1,3}(?:( )[0-9]{3})+(?:([,.])[0-9]+)?" - comma_separator_lakh_system = r"[0-9]{1,2}(?:(,)[0-9]{2})+,[0-9]{3}(?:(\.)[0-9]+)?" - - inner_number_tree = "|".join( - [ - no_separator_big, - no_separator_small, - comma_separator_req_decimal, - period_separator_req_decimal, - comma_separator_opt_decimal, - period_separator_opt_decimal, - space_separator_opt_decimal, - comma_separator_lakh_system, - ] - ) - inner_number_group = f"({inner_number_tree})" - required_currency_beginning = f"{non_numeric}{inner_number_group}{non_numeric}?" - required_currency_ending = f"{non_numeric}?{inner_number_group}{non_numeric}" - money_finding_regex = f"^(?:{required_currency_beginning}|{required_currency_ending})$" - - actual_number_indices = [1, 16] - group_divider_indices = [4, 6, 8, 10, 12, 14, 19, 21, 23, 25, 27, 29] - decimal_point_indices = [2, 3, 5, 7, 9, 11, 13, 15, 17, 18, 20, 22, 24, 26, 28, 30] - actual_numbers_str = ','.join([f'raw_arr[{idx}]' for idx in actual_number_indices]) - group_dividers_str = ','.join([f'raw_arr[{idx}]' for idx in group_divider_indices]) - decimal_points_str = ','.join([f'raw_arr[{idx}]' for idx in decimal_point_indices]) - - text_db_type_id = PostgresType.TEXT.id - return rf""" - CREATE OR REPLACE FUNCTION {qualified_function_name}({text_db_type_id}) RETURNS {text_db_type_id}[] - AS $$ - DECLARE - raw_arr {text_db_type_id}[]; - actual_number_arr {text_db_type_id}[]; - group_divider_arr {text_db_type_id}[]; - decimal_point_arr {text_db_type_id}[]; - actual_number {text_db_type_id}; - group_divider {text_db_type_id}; - decimal_point {text_db_type_id}; - BEGIN - SELECT regexp_matches($1, '{money_finding_regex}') INTO raw_arr; - IF raw_arr IS NULL THEN - RETURN NULL; - END IF; - SELECT array_remove(ARRAY[{actual_numbers_str}], null) INTO actual_number_arr; - SELECT array_remove(ARRAY[{group_dividers_str}], null) INTO group_divider_arr; - SELECT array_remove(ARRAY[{decimal_points_str}], null) INTO decimal_point_arr; - SELECT actual_number_arr[1] INTO actual_number; - SELECT group_divider_arr[1] INTO group_divider; - SELECT decimal_point_arr[1] INTO decimal_point; - RETURN ARRAY[actual_number, group_divider, decimal_point, replace($1, actual_number, '')]; - END; - $$ LANGUAGE plpgsql; - """ - - -def _get_money_type_body_map(): - """ - Get SQL strings that create various functions for casting different - types to money. - We allow casting any number type to money, assuming currency is the - locale currency. - We allow casting our custom money type to money assuming currency is the - locale currency. - We allow casting any textual type to money with the text prefixed or - suffixed with the locale currency. - """ - default_behavior_source_types = frozenset([PostgresType.MONEY, MathesarCustomType.MATHESAR_MONEY]) - number_types = categories.NUMERIC_TYPES - textual_types = categories.STRING_TYPES - cast_loss_exception_str = ( - f"RAISE EXCEPTION '% cannot be cast to {PostgresType.MONEY.id} as currency symbol is missing', $1;" - ) - - def _get_number_cast_to_money(): - return f""" - BEGIN - RETURN $1::numeric::{PostgresType.MONEY.id}; - END; - """ - - def _get_base_textual_cast_to_money(): - return f""" - DECLARE currency {PostgresType.TEXT.id}; - BEGIN - SELECT to_char(1, 'L') INTO currency; - IF ($1 LIKE '%' || currency) OR ($1 LIKE currency || '%') THEN - RETURN $1::{PostgresType.MONEY.id}; - END IF; - {cast_loss_exception_str} - END; - """ - - type_body_map = _get_default_type_body_map( - default_behavior_source_types, PostgresType.MONEY, - ) - type_body_map.update( - { - db_type: _get_number_cast_to_money() - for db_type in number_types - } - ) - type_body_map.update( - { - db_type: _get_base_textual_cast_to_money() - for db_type in textual_types - } - ) - return type_body_map - - -def _get_multicurrency_money_type_body_map(): - """ - Get SQL strings that create various functions for casting different - types to money. - We allow casting any number type to money, assuming currency is USD. - We allow casting any textual type to money, assuming currency is USD - and that the type can be cast through a numeric. - """ - default_behavior_source_types = [MathesarCustomType.MULTICURRENCY_MONEY] - number_types = categories.NUMERIC_TYPES | frozenset([MathesarCustomType.MATHESAR_MONEY]) - textual_types = categories.STRING_TYPES | frozenset([PostgresType.MONEY]) - - def _get_number_cast_to_money(): - return f""" - BEGIN - RETURN ROW($1, 'USD')::{MathesarCustomType.MULTICURRENCY_MONEY.id}; - END; - """ - - def _get_base_textual_cast_to_money(): - return f""" - BEGIN - RETURN ROW($1::numeric, 'USD')::{MathesarCustomType.MULTICURRENCY_MONEY.id}; - END; - """ - - type_body_map = _get_default_type_body_map( - default_behavior_source_types, MathesarCustomType.MULTICURRENCY_MONEY, - ) - type_body_map.update( - { - db_type: _get_number_cast_to_money() - for db_type in number_types - } - ) - type_body_map.update( - { - db_type: _get_base_textual_cast_to_money() - for db_type in textual_types - } - ) - return type_body_map - - -def _get_textual_type_body_map(engine): - """ - Get SQL strings that create various functions for casting different - types to text types through the TEXT type. - - All casts to varchar use default PostgreSQL behavior. - All types in get_supported_alter_column_types are supported. - """ - supported_types = get_available_known_db_types(engine) - # We cast everything through TEXT so that formatting is done correctly - # for CHAR. - text_cast_str = f""" - BEGIN - RETURN $1::{PostgresType.TEXT.id}; - END; - """ - return {type_: text_cast_str for type_ in supported_types} - - -def _get_date_type_body_map(): - """ - Get SQL strings that create various functions for casting different - types to date. - - We allow casting any text, timezone and date type to be cast into a - timestamp without timezone, provided it does not any timezone - information as this could lead to a information loss. - - The cast function changes the timezone to `utc` for the transaction - is called on. So this function call should be used in a isolated - transaction to avoid timezone change causing unintended side effect. - """ - # Note that default postgres conversion for dates depends on the - # `DateStyle` option set on the server, which can be one of DMY, MDY, - # or YMD. Defaults to MDY. - source_text_types = categories.STRING_TYPES - source_datetime_types = frozenset([PostgresType.TIMESTAMP_WITH_TIME_ZONE, PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE]) - default_behavior_source_types = frozenset([PostgresType.DATE]) - - not_date_exception_str = f"RAISE EXCEPTION '% is not a {PostgresType.DATE.id}', $1;" - date_condition_str = f""" - IF (timestamp_value_with_tz = date_value) THEN - RETURN $1::{PostgresType.DATE.id}; - END IF; - """ - - type_body_map = _get_default_type_body_map( - default_behavior_source_types, PostgresType.TIMESTAMP_WITH_TIME_ZONE - ) - type_body_map.update( - { - text_type: get_text_and_datetime_to_datetime_cast_str(date_condition_str, not_date_exception_str) - for text_type in source_text_types - } - ) - type_body_map.update( - { - datetime_type: get_text_and_datetime_to_datetime_cast_str(date_condition_str, not_date_exception_str) - for datetime_type in source_datetime_types - } - ) - return type_body_map - - -def _get_uri_type_body_map(): - """ - Get SQL strings that create various functions for casting different - types to URIs. - """ - - def _get_text_uri_type_body_map(): - # We need to check that a string isn't a valid number before - # casting to intervals (since a number is more likely) - auth_func = uri.URIFunction.AUTHORITY.value - tld_regex = r"'(?<=\.)(?:.(?!\.))+$'" - not_uri_exception_str = f"RAISE EXCEPTION '% is not a {MathesarCustomType.URI.id}', $1;" - return f""" - DECLARE uri_res {MathesarCustomType.URI.id} := 'https://centerofci.org'; - DECLARE uri_tld {PostgresType.TEXT.id}; - BEGIN - RETURN $1::{MathesarCustomType.URI.id}; - EXCEPTION WHEN SQLSTATE '23514' THEN - SELECT lower(('http://' || $1)::{MathesarCustomType.URI.id}) INTO uri_res; - SELECT (regexp_match({auth_func}(uri_res), {tld_regex}))[1] - INTO uri_tld; - IF EXISTS(SELECT 1 FROM {uri.QUALIFIED_TLDS} WHERE tld = uri_tld) THEN - RETURN uri_res; - END IF; - {not_uri_exception_str} - END; - """ - - source_types = frozenset([MathesarCustomType.URI]) | categories.STRING_TYPES - return {type_: _get_text_uri_type_body_map() for type_ in source_types} - - -def _get_numeric_type_body_map(): - """ - Get SQL strings that create various functions for casting different - types to numeric. - We allow casting any textual type to locale-agnostic numeric. - """ - default_behavior_source_types = categories.NUMERIC_TYPES | frozenset([PostgresType.MONEY]) - text_source_types = categories.STRING_TYPES - - type_body_map = _get_default_type_body_map( - default_behavior_source_types, PostgresType.NUMERIC - ) - type_body_map.update( - { - text_type: _get_text_to_numeric_cast() - for text_type in text_source_types - } - ) - type_body_map.update({PostgresType.BOOLEAN: _get_boolean_to_number_cast(PostgresType.NUMERIC)}) - return type_body_map - - -def _get_text_to_numeric_cast(): - text_db_type_id = PostgresType.TEXT.id - numeric_db_type_id = PostgresType.NUMERIC.id - - numeric_array_function = get_qualified_name(NUMERIC_ARR_FUNC_NAME) - cast_exception_str = ( - f"RAISE EXCEPTION '% cannot be cast to {PostgresType.NUMERIC}', $1;" - ) - return rf""" - DECLARE decimal_point {text_db_type_id}; - DECLARE is_negative {PostgresType.BOOLEAN.id}; - DECLARE numeric_arr {text_db_type_id}[]; - DECLARE numeric {text_db_type_id}; - BEGIN - SELECT {numeric_array_function}($1::{text_db_type_id}) INTO numeric_arr; - IF numeric_arr IS NULL THEN - {cast_exception_str} - END IF; - - SELECT numeric_arr[1] INTO numeric; - SELECT ltrim(to_char(1, 'D'), ' ') INTO decimal_point; - SELECT $1::text ~ '^-.*$' INTO is_negative; - - IF numeric_arr[2] IS NOT NULL THEN - SELECT regexp_replace(numeric, numeric_arr[2], '', 'gq') INTO numeric; - END IF; - IF numeric_arr[3] IS NOT NULL THEN - SELECT regexp_replace(numeric, numeric_arr[3], decimal_point, 'q') INTO numeric; - END IF; - IF is_negative THEN - RETURN ('-' || numeric)::{numeric_db_type_id}; - END IF; - RETURN numeric::{numeric_db_type_id}; - END; - """ - - -def _build_numeric_array_function(): - """ - The main reason for this function to be separate is for testing. This - does have some performance impact; we should consider inlining later. - """ - qualified_function_name = get_qualified_name(NUMERIC_ARR_FUNC_NAME) - - no_separator_big = r"[0-9]{4,}(?:([,.])[0-9]+)?" - no_separator_small = r"[0-9]{1,3}(?:([,.])[0-9]{1,2}|[0-9]{4,})?" - comma_separator_req_decimal = r"[0-9]{1,3}(,)[0-9]{3}(\.)[0-9]+" - period_separator_req_decimal = r"[0-9]{1,3}(\.)[0-9]{3}(,)[0-9]+" - comma_separator_opt_decimal = r"[0-9]{1,3}(?:(,)[0-9]{3}){2,}(?:(\.)[0-9]+)?" - period_separator_opt_decimal = r"[0-9]{1,3}(?:(\.)[0-9]{3}){2,}(?:(,)[0-9]+)?" - space_separator_opt_decimal = r"[0-9]{1,3}(?:( )[0-9]{3})+(?:([,.])[0-9]+)?" - comma_separator_lakh_system = r"[0-9]{1,2}(?:(,)[0-9]{2})+,[0-9]{3}(?:(\.)[0-9]+)?" - single_quote_separator_opt_decimal = r"[0-9]{1,3}(?:(\'')[0-9]{3})+(?:([.])[0-9]+)?" - - inner_number_tree = "|".join( - [ - no_separator_big, - no_separator_small, - comma_separator_req_decimal, - period_separator_req_decimal, - comma_separator_opt_decimal, - period_separator_opt_decimal, - space_separator_opt_decimal, - comma_separator_lakh_system, - single_quote_separator_opt_decimal - ]) - numeric_finding_regex = f"^(?:[+-]?({inner_number_tree}))$" - - actual_number_indices = [1] - group_divider_indices = [4, 6, 8, 10, 12, 14, 16] - decimal_point_indices = [2, 3, 5, 7, 9, 11, 13, 15, 17] - actual_numbers_str = ','.join([f'raw_arr[{idx}]' for idx in actual_number_indices]) - group_dividers_str = ','.join([f'raw_arr[{idx}]' for idx in group_divider_indices]) - decimal_points_str = ','.join([f'raw_arr[{idx}]' for idx in decimal_point_indices]) - - text_db_type_id = PostgresType.TEXT.id - return rf""" - CREATE OR REPLACE FUNCTION {qualified_function_name}({text_db_type_id}) RETURNS {text_db_type_id}[] - AS $$ - DECLARE - raw_arr {text_db_type_id}[]; - actual_number_arr {text_db_type_id}[]; - group_divider_arr {text_db_type_id}[]; - decimal_point_arr {text_db_type_id}[]; - actual_number {text_db_type_id}; - group_divider {text_db_type_id}; - decimal_point {text_db_type_id}; - BEGIN - SELECT regexp_matches($1, '{numeric_finding_regex}') INTO raw_arr; - IF raw_arr IS NULL THEN - RETURN NULL; - END IF; - SELECT array_remove(ARRAY[{actual_numbers_str}], null) INTO actual_number_arr; - SELECT array_remove(ARRAY[{group_dividers_str}], null) INTO group_divider_arr; - SELECT array_remove(ARRAY[{decimal_points_str}], null) INTO decimal_point_arr; - SELECT actual_number_arr[1] INTO actual_number; - SELECT group_divider_arr[1] INTO group_divider; - SELECT decimal_point_arr[1] INTO decimal_point; - RETURN ARRAY[actual_number, group_divider, decimal_point]; - END; - $$ LANGUAGE plpgsql; - """ - - -def _get_default_type_body_map(source_types, target_type): - default_cast_str = f""" - BEGIN - RETURN $1::{target_type.id}; - END; - """ - return {db_type: default_cast_str for db_type in source_types} From e8fce620ec6cd296f8486e3a22b1a4f605420539 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Thu, 17 Oct 2024 14:27:41 +0800 Subject: [PATCH 53/70] remove unused column testing utils --- db/tests/columns/utils.py | 75 --------------------------------------- 1 file changed, 75 deletions(-) delete mode 100644 db/tests/columns/utils.py diff --git a/db/tests/columns/utils.py b/db/tests/columns/utils.py deleted file mode 100644 index 72036f7342..0000000000 --- a/db/tests/columns/utils.py +++ /dev/null @@ -1,75 +0,0 @@ -import decimal - -from sqlalchemy import ( - FLOAT, SMALLINT, String, Integer, BOOLEAN, TEXT, VARCHAR, select, Table, MetaData, NUMERIC, BIGINT, DECIMAL, - REAL -) -from sqlalchemy.dialects.postgresql import DOUBLE_PRECISION, MONEY - -from db.types.custom.datetime import ( - DATE, Interval, TIMESTAMP_WITHOUT_TIME_ZONE, TIMESTAMP_WITH_TIME_ZONE, - TIME_WITHOUT_TIME_ZONE, TIME_WITH_TIME_ZONE, -) -from db.types.custom.email import Email -from db.types.custom.money import MathesarMoney -from db.types.custom import multicurrency -from db.types.custom.uri import URI - -column_test_dict = { - BIGINT: {"start": "499999999999", "set": "500000000000", "expt": 500000000000}, - BOOLEAN: {"start": "false", "set": "true", "expt": True}, - # Disabled, since there's a bug in server_default that makes the associated - # test buggy. WE are setting the default using `server_default` on the - # column, but we *never actually do that in the codebase* (and shouldn't. - # It's broken) - # CHAR: {"start": "a", "set": "b", "expt": "'b'::bpchar"}, - DECIMAL: {"start": "111.01111", "set": "111.01112", "expt": decimal.Decimal('111.01112')}, - DOUBLE_PRECISION: {"start": "111.01111", "set": "111.01112", "expt": 111.01112}, - DATE: {"start": "1999-01-15 AD", "set": "1999-01-18 AD", "expt": "1999-01-18 AD"}, - Email: {"start": "alice@example.com", "set": "ob@example.com", "expt": "ob@example.com"}, - Interval: { - "start": "1 year 2 months 3 days 4 hours 5 minutes 6 seconds", - "set": "1 year 2 months 3 days 4:05:06", - "expt": "P1Y2M3DT4H5M6S" - }, - - FLOAT: {"start": "111.01111", "set": "111.01112", "expt": 111.01112}, - Integer: {"start": "0", "set": "5", "expt": 5}, - # Rounds to 2 digits - MONEY: {"start": "$12,312.23", "set": "$12,312.24", "expt": "$12,312.24"}, - MathesarMoney: {"start": "12312.23", "set": "12312.24", "expt": decimal.Decimal("12312.24")}, - multicurrency.MulticurrencyMoney: { - "start": "(1234.12,XYZ)", "set": "(1234.12,XYZ)", - "expt": {multicurrency.CURRENCY: 'XYZ', multicurrency.VALUE: 1234.12} - }, - NUMERIC: {"start": "111.01111", "set": "111.01112", "expt": decimal.Decimal('111.01112')}, - REAL: {"start": "111.01111", "set": "111.01112", "expt": 111.01112}, - SMALLINT: {"start": "500", "set": "500", "expt": 500}, - TIME_WITH_TIME_ZONE: {"start": "12:30:45.0Z", "set": "12:30:45.0+05:30", "expt": '12:30:45.0+05:30'}, - TIME_WITHOUT_TIME_ZONE: {"start": "12:31:00.0", "set": "12:30:00.0", "expt": '12:30:00.0'}, - TIMESTAMP_WITH_TIME_ZONE: {"start": "10000-01-01T00:00:00.0Z AD", "set": "2000-07-30T19:15:03.65Z AD", "expt": '2000-07-30T19:15:03.65Z AD'}, - TIMESTAMP_WITHOUT_TIME_ZONE: {"start": "10000-01-01T00:00:00.0 AD", "set": "2000-07-30T19:15:03.65 AD", "expt": '2000-07-30T19:15:03.65 AD'}, - TEXT: {"start": "default", "set": "test", "expt": "test"}, - String: {"start": "default", "set": "test", "expt": "test"}, - URI: {"start": "https://centerofci.com", "set": "https://centerofci.org", "expt": "https://centerofci.org"}, - VARCHAR: {"start": "default", "set": "test", "expt": "test"}, -} - - -def create_test_table(table_name, cols, insert_data, schema, engine): - table = Table( - table_name, - MetaData(bind=engine, schema=schema), - *cols - ) - table.create() - with engine.begin() as conn: - for data in insert_data: - conn.execute(table.insert().values(data)) - return table - - -def get_default(engine, table): - with engine.begin() as conn: - conn.execute(table.insert()) - return conn.execute(select(table)).fetchall()[0][0] From a411e2521a5a71dd02099d519d70c69c0d8ccbc9 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Thu, 17 Oct 2024 14:34:03 +0800 Subject: [PATCH 54/70] remove unused code from module --- db/functions/base.py | 179 +---------------------- db/functions/hints.py | 167 --------------------- db/functions/operations/apply.py | 5 - db/functions/operations/check_support.py | 69 --------- db/functions/packed.py | 148 +------------------ 5 files changed, 5 insertions(+), 563 deletions(-) delete mode 100644 db/functions/hints.py delete mode 100644 db/functions/operations/check_support.py diff --git a/db/functions/base.py b/db/functions/base.py index bb1ab9fd2b..898571a0ac 100644 --- a/db/functions/base.py +++ b/db/functions/base.py @@ -1,15 +1,8 @@ """ -This namespace defines the DBFunction abstract class and its subclasses. These subclasses -represent functions that have identifiers, display names and hints, and their instances -hold parameters. Each DBFunction subclass defines how its instance can be converted into an -SQLAlchemy expression. - -Hints hold information about what kind of input the function might expect and what output -can be expected from it. This is used to provide interface information without constraining its -user. - -These classes might be used, for example, to define a filter for an SQL query, or to -access hints on what composition of functions and parameters should be valid. +This namespace defines the DBFunction abstract class and its subclasses. These +subclasses represent functions that have identifiers and display names, and +their instances hold parameters. Each DBFunction subclass defines how its +instance can be converted into an SQLAlchemy expression. """ from abc import ABC, abstractmethod @@ -21,7 +14,6 @@ from sqlalchemy.sql.functions import GenericFunction, concat, percentile_disc, mode, max, min from db.engine import get_dummy_engine -from db.functions import hints from db.functions.exceptions import BadDBFunctionFormat from db.types.base import PostgresType from db.types.custom.json_array import MathesarJsonArray @@ -70,7 +62,6 @@ def sa_call_sql_function(function_name, *parameters, return_type=None): class DBFunction(ABC): id = None name = None - hints = None # Optionally lists the SQL functions this DBFunction depends on. # Will be checked against SQL functions defined on a database to tell if it @@ -117,10 +108,6 @@ def to_sa_expression(): class Literal(DBFunction): id = 'literal' name = 'as literal' - hints = tuple([ - hints.parameter_count(1), - hints.parameter(0, hints.literal), - ]) @staticmethod def to_sa_expression(primitive): @@ -133,10 +120,6 @@ class Noop(DBFunction): which doesn't play nicely with the type conversion between python classes and db types in psycopg2.""" id = 'noop' name = 'no wrapping' - hints = tuple([ - hints.parameter_count(1), - hints.parameter(0, hints.literal), - ]) @staticmethod def to_sa_expression(primitive): @@ -153,10 +136,6 @@ class ColumnName(DBFunction): """ id = 'column_name' name = 'as column name' - hints = tuple([ - hints.parameter_count(1), - hints.parameter(0, hints.column), - ]) @property def column(self): @@ -179,12 +158,6 @@ def to_sa_expression(*items): class Null(DBFunction): id = 'null' name = 'is null' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(1), - hints.parameter(0, hints.any), - hints.mathesar_filter, - ]) @staticmethod def to_sa_expression(value): @@ -194,9 +167,6 @@ def to_sa_expression(value): class Not(DBFunction): id = 'not' name = 'negate' - hints = tuple([ - hints.returns(hints.boolean), - ]) @staticmethod def to_sa_expression(*values): @@ -210,13 +180,6 @@ def to_sa_expression(*values): class Equal(DBFunction): id = 'equal' name = 'is equal to' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(2), - hints.all_parameters(hints.any), - hints.mathesar_filter, - hints.use_this_alias_when("is same as", hints.point_in_time), - ]) @staticmethod def to_sa_expression(value1, value2): @@ -226,13 +189,6 @@ def to_sa_expression(value1, value2): class Greater(DBFunction): id = 'greater' name = 'is greater than' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(2), - hints.all_parameters(hints.comparable), - hints.mathesar_filter, - hints.use_this_alias_when("is after", hints.point_in_time), - ]) @staticmethod def to_sa_expression(value1, value2): @@ -242,13 +198,6 @@ def to_sa_expression(value1, value2): class Lesser(DBFunction): id = 'lesser' name = 'is lesser than' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(2), - hints.all_parameters(hints.comparable), - hints.mathesar_filter, - hints.use_this_alias_when("is before", hints.point_in_time), - ]) @staticmethod def to_sa_expression(value1, value2): @@ -258,12 +207,6 @@ def to_sa_expression(value1, value2): class In(DBFunction): id = 'in' name = 'is in' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(2), - hints.parameter(0, hints.any), - hints.parameter(1, hints.array), - ]) @staticmethod def to_sa_expression(value1, value2): @@ -273,9 +216,6 @@ def to_sa_expression(value1, value2): class And(DBFunction): id = 'and' name = 'and' - hints = tuple([ - hints.returns(hints.boolean), - ]) @staticmethod def to_sa_expression(*values): @@ -285,9 +225,6 @@ def to_sa_expression(*values): class Or(DBFunction): id = 'or' name = 'or' - hints = tuple([ - hints.returns(hints.boolean), - ]) @staticmethod def to_sa_expression(*values): @@ -297,11 +234,6 @@ def to_sa_expression(*values): class StartsWith(DBFunction): id = 'starts_with' name = 'starts with' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(2), - hints.all_parameters(hints.string_like), - ]) @staticmethod def to_sa_expression(string, prefix): @@ -312,11 +244,6 @@ def to_sa_expression(string, prefix): class Contains(DBFunction): id = 'contains' name = 'contains' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(2), - hints.all_parameters(hints.string_like), - ]) @staticmethod def to_sa_expression(string, sub_string): @@ -327,12 +254,6 @@ def to_sa_expression(string, sub_string): class StartsWithCaseInsensitive(DBFunction): id = 'starts_with_case_insensitive' name = 'starts with' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(2), - hints.all_parameters(hints.string_like), - hints.mathesar_filter, - ]) @staticmethod def to_sa_expression(string, prefix): @@ -343,12 +264,6 @@ def to_sa_expression(string, prefix): class ContainsCaseInsensitive(DBFunction): id = 'contains_case_insensitive' name = 'contains' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(2), - hints.all_parameters(hints.string_like), - hints.mathesar_filter, - ]) @staticmethod def to_sa_expression(string, sub_string): @@ -359,12 +274,6 @@ def to_sa_expression(string, sub_string): class ToLowercase(DBFunction): id = 'to_lowercase' name = 'to lowercase' - hints = tuple([ - hints.returns(hints.string_like), - hints.parameter_count(1), - hints.all_parameters(hints.string_like), - hints.mathesar_filter, - ]) @staticmethod def to_sa_expression(string): @@ -374,9 +283,6 @@ def to_sa_expression(string): class Count(DBFunction): id = 'count' name = 'count' - hints = tuple([ - hints.aggregation, - ]) @staticmethod def to_sa_expression(column_expr): @@ -386,9 +292,6 @@ def to_sa_expression(column_expr): class Max(DBFunction): id = 'max' name = 'max' - hints = tuple([ - hints.aggregation, - ]) @staticmethod def to_sa_expression(column_expr): @@ -398,9 +301,6 @@ def to_sa_expression(column_expr): class Mode(DBFunction): id = 'mode' name = 'mode' - hints = tuple([ - hints.aggregation - ]) @staticmethod def to_sa_expression(column_expr): @@ -410,9 +310,6 @@ def to_sa_expression(column_expr): class PeakTime(DBFunction): id = 'peak_time' name = 'peak_time' - hints = tuple([ - hints.aggregation - ]) @staticmethod def to_sa_expression(column_expr): @@ -423,9 +320,6 @@ def to_sa_expression(column_expr): class PeakMonth(DBFunction): id = 'peak_month' name = 'peak_month' - hints = tuple([ - hints.aggregation - ]) @staticmethod def to_sa_expression(column_expr): @@ -436,9 +330,6 @@ def to_sa_expression(column_expr): class Min(DBFunction): id = 'min' name = 'min' - hints = tuple([ - hints.aggregation, - ]) @staticmethod def to_sa_expression(column_expr): @@ -448,9 +339,6 @@ def to_sa_expression(column_expr): class Mean(DBFunction): id = 'mean' name = 'mean' - hints = tuple([ - hints.aggregation, - ]) @staticmethod def to_sa_expression(column_expr): @@ -460,9 +348,6 @@ def to_sa_expression(column_expr): class ArrayAgg(DBFunction): id = 'aggregate_to_array' name = 'aggregate to array' - hints = tuple([ - hints.aggregation, - ]) @staticmethod def to_sa_expression(column_expr): @@ -473,9 +358,6 @@ def to_sa_expression(column_expr): class Sum(DBFunction): id = 'sum' name = 'sum' - hints = tuple([ - hints.aggregation, - ]) @staticmethod def to_sa_expression(column_expr): @@ -485,9 +367,6 @@ def to_sa_expression(column_expr): class Percentage_True(DBFunction): id = 'percentage_true' name = 'percentage_true' - hints = tuple([ - hints.aggregation, - ]) @staticmethod def to_sa_expression(column_expr): @@ -498,9 +377,6 @@ def to_sa_expression(column_expr): class Median(DBFunction): id = 'median' name = 'median' - hints = tuple([ - hints.aggregation, - ]) @staticmethod def to_sa_expression(column_expr): @@ -519,12 +395,6 @@ def to_sa_expression(column_expr): class ArrayContains(DBFunction): id = 'array_contains' name = 'contains' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(2), - hints.parameter(0, hints.array), - hints.parameter(1, hints.array), - ]) @staticmethod def to_sa_expression(value1, value2): @@ -539,13 +409,6 @@ def to_sa_expression(value1, value2): class ArrayLength(DBFunction): id = 'array_length' name = 'length' - hints = tuple([ - hints.returns(hints.comparable), - hints.parameter_count(2), - hints.parameter(0, hints.array), - hints.parameter(1, hints.any), - hints.mathesar_filter - ]) @staticmethod def to_sa_expression(value, dimension): @@ -570,10 +433,6 @@ def to_sa_expression(value, dimension): class Alias(DBFunction): id = 'alias' name = 'alias' - hints = tuple([ - hints.parameter_count(2), - hints.parameter(0, hints.column), - ]) @staticmethod def to_sa_expression(expr, alias): @@ -583,12 +442,6 @@ def to_sa_expression(expr, alias): class JsonArrayLength(DBFunction): id = 'json_array_length' name = 'length' - hints = tuple([ - hints.returns(hints.comparable), - hints.parameter_count(1), - hints.parameter(0, hints.json_array), - hints.mathesar_filter, - ]) @staticmethod def to_sa_expression(value): @@ -598,12 +451,6 @@ def to_sa_expression(value): class JsonArrayContains(DBFunction): id = 'json_array_contains' name = 'contains' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(2), - hints.parameter(0, hints.json_array), - hints.parameter(1, hints.array), - ]) @staticmethod def to_sa_expression(value1, value2): @@ -618,10 +465,6 @@ def to_sa_expression(value1, value2): class ExtractURIAuthority(DBFunction): id = 'extract_uri_authority' name = 'extract URI authority' - hints = tuple([ - hints.parameter_count(1), - hints.parameter(1, hints.uri), - ]) depends_on = tuple([URIFunction.AUTHORITY]) @staticmethod @@ -632,10 +475,6 @@ def to_sa_expression(uri): class ExtractURIScheme(DBFunction): id = 'extract_uri_scheme' name = 'extract URI scheme' - hints = tuple([ - hints.parameter_count(1), - hints.parameter(1, hints.uri), - ]) depends_on = tuple([URIFunction.SCHEME]) @staticmethod @@ -646,7 +485,6 @@ def to_sa_expression(uri): class TruncateToYear(DBFunction): id = 'truncate_to_year' name = 'Truncate to Year' - hints = tuple([hints.parameter_count(1)]) # TODO extend hints @staticmethod def to_sa_expression(col): @@ -656,7 +494,6 @@ def to_sa_expression(col): class TruncateToMonth(DBFunction): id = 'truncate_to_month' name = 'Truncate to Month' - hints = tuple([hints.parameter_count(1)]) # TODO extend hints @staticmethod def to_sa_expression(col): @@ -666,7 +503,6 @@ def to_sa_expression(col): class TruncateToDay(DBFunction): id = 'truncate_to_day' name = 'Truncate to Day' - hints = tuple([hints.parameter_count(1)]) # TODO extend hints @staticmethod def to_sa_expression(col): @@ -676,7 +512,6 @@ def to_sa_expression(col): class CurrentDate(DBFunction): id = 'current_date' name = 'current date' - hints = tuple([hints.returns(hints.date), hints.parameter_count(0)]) @staticmethod def to_sa_expression(): @@ -686,7 +521,6 @@ def to_sa_expression(): class CurrentTime(DBFunction): id = 'current_time' name = 'current time' - hints = tuple([hints.returns(hints.time), hints.parameter_count(0)]) @staticmethod def to_sa_expression(): @@ -698,7 +532,6 @@ def to_sa_expression(): class CurrentDateTime(DBFunction): id = 'current_datetime' name = 'current datetime' - hints = tuple([hints.returns(hints.date, hints.time), hints.parameter_count(0)]) @staticmethod def to_sa_expression(): @@ -710,10 +543,6 @@ def to_sa_expression(): class ExtractEmailDomain(DBFunction): id = 'extract_email_domain' name = 'extract email domain' - hints = tuple([ - hints.parameter_count(1), - hints.parameter(1, hints.email), - ]) depends_on = tuple([EMAIL_DOMAIN_NAME]) @staticmethod diff --git a/db/functions/hints.py b/db/functions/hints.py deleted file mode 100644 index 0b998c1883..0000000000 --- a/db/functions/hints.py +++ /dev/null @@ -1,167 +0,0 @@ -from frozendict import frozendict - - -def get_hints_with_id(db_function_subclass, id): - return tuple( - hint - for hint in db_function_subclass.hints - if is_hint_id_equal_to(hint, id) - ) - - -def is_hint_id_equal_to(hint, id): - return hint.get("id") == id - - -def _make_hint(id, **rest): - return frozendict({"id": id, **rest}) - - -def get_parameter_hints(index, db_function_subclass): - """ - Returns the hints declared on the parameter at specified index. If explicit hints are not - declared for that parameter, returns the hints declared for all parameters. - """ - hints_for_all_parameters = None - for hint in db_function_subclass.hints: - if hint['id'] == "parameter" and hint['index'] == index: - hints_for_parameter_at_index = hint['hints'] - return hints_for_parameter_at_index - if hint['id'] == "all_parameters": - hints_for_all_parameters = hint['hints'] - return hints_for_all_parameters - - -def get_parameter_count(db_function_subclass): - for hint in db_function_subclass.hints: - if hint['id'] == "parameter_count": - return hint['count'] - return None - - -def parameter_count(count): - return _make_hint("parameter_count", count=count) - - -def parameter(index, *hints): - return _make_hint("parameter", index=index, hints=hints) - - -def all_parameters(*hints): - return _make_hint("all_parameters", hints=hints) - - -def returns(*hints): - return _make_hint("returns", hints=hints) - - -def get_parameter_type_hints(index, db_function_subclass): - """ - Returns the output of get_parameter_hints filtered to only include hints that are applicable to - types. Useful when comparing a parameter's hintset to a type's hintset. We do that when - matching filters to UI/Mathesar types, for example. - """ - parameter_hints = get_parameter_hints(index, db_function_subclass) - parameter_type_hints = tuple( - hint - for hint in parameter_hints - if _is_hint_applicable_to_types(hint) - ) - return parameter_type_hints - - -def _is_hint_applicable_to_types(hint): - """ - Checks that a hint doesn't have the `not_applicable_to_types` hintset. - """ - hints_about_hints = hint.get("hints", None) - if hints_about_hints: - return not_applicable_to_types not in hints_about_hints - else: - return True - - -# When applied to a hint, meant to suggest that it doesn't describe type attributes. -# Useful when you want to find only the hints that describe a type (or not a type). -# For example, when checking if hints applied to a Mathesar/UI type are a superset of hints applied -# to a parameter, you are only interested in hints that describe type-related information (that -# might be applied to a type). -not_applicable_to_types = _make_hint("not_applicable_to_types") - - -boolean = _make_hint("boolean") - - -comparable = _make_hint("comparable") - - -column = _make_hint("column") - - -array = _make_hint("array") - - -numeric = _make_hint("numeric") - - -string_like = _make_hint("string_like") - - -uri = _make_hint("uri") - - -email = _make_hint("email") - - -duration = _make_hint("duration") - - -time = _make_hint("time") - - -date = _make_hint("date") - - -literal = _make_hint("literal") - - -json = _make_hint("json") - - -json_array = _make_hint("jsonlist") - - -json_object = _make_hint("map") - - -# Meant to mark a DBFunction for the filtering API to use. -mathesar_filter = _make_hint("mathesar_filter") - - -# A hint that all types are meant to satisfy. -any = _make_hint("any") - - -# Meant to mark a DBFunction as an aggregation. -aggregation = _make_hint("aggregation") - - -# When applied to a parameter, meant to suggest values for that parameter. -def suggested_values(values): - return _make_hint("suggested_values", hints=(not_applicable_to_types,), values=values) - - -# This hints suggests that a type is a point in time -point_in_time = _make_hint("point_in_time") - - -# Specifies that under conditions suggested by the `when` hintset the passed `alias` should be -# used instead of the default name. Useful, for example, for filters that you want to have -# different display names depending on what it is operating on. -def use_this_alias_when(alias, *when): - return _make_hint( - "use_this_alias_when", - alias=alias, - when=when, - hints=(not_applicable_to_types,), - ) diff --git a/db/functions/operations/apply.py b/db/functions/operations/apply.py index da91c32c65..08c51a53ab 100644 --- a/db/functions/operations/apply.py +++ b/db/functions/operations/apply.py @@ -26,11 +26,6 @@ def apply_db_function_as_filter(relation, db_function): return relation -def get_sa_expression_from_db_function_spec(ma_function_spec): - db_function = get_db_function_from_ma_function_spec(ma_function_spec) - return _db_function_to_sa_expression(db_function) - - def _assert_that_all_referenced_columns_exist(relation, db_function): columns_that_exist = _get_columns_that_exist(relation) referenced_columns = db_function.referenced_columns diff --git a/db/functions/operations/check_support.py b/db/functions/operations/check_support.py deleted file mode 100644 index 2d003aa734..0000000000 --- a/db/functions/operations/check_support.py +++ /dev/null @@ -1,69 +0,0 @@ -from enum import Enum - -from sqlalchemy import select, MetaData, join, literal - -from db.functions.known_db_functions import known_db_functions -from db.utils import get_pg_catalog_table - - -def get_supported_db_functions(engine): - functions_on_database = _get_functions_defined_on_database(engine) - supported_db_functions = tuple( - db_function - for db_function in known_db_functions - if _are_db_function_dependencies_satisfied( - db_function, - functions_on_database - ) - ) - return supported_db_functions - - -# TODO consider caching -def _get_functions_defined_on_database(engine): - """ - Constructs and executes a query that returns the set of schema- - qualified function names on the database. E.g. - `{'mathesar_types.uri_scheme', ..., ...}`. - """ - metadata = MetaData() - pg_proc = get_pg_catalog_table('pg_proc', engine, metadata=metadata) - pg_namespace = get_pg_catalog_table('pg_namespace', engine, metadata=metadata) - join_statement = join(pg_proc, pg_namespace, pg_proc.c.pronamespace == pg_namespace.c.oid) - select_statement = ( - select(pg_namespace.c.nspname + literal('.') + pg_proc.c.proname) - .select_from(join_statement) - ) - with engine.connect() as connection: - return frozenset( - qualified_function_name - for qualified_function_name, - in connection.execute(select_statement) - ) - - -def _are_db_function_dependencies_satisfied(db_function, functions_on_database): - no_dependencies = not db_function.depends_on - return ( - no_dependencies - or all( - _is_dependency_function_in(dependency_function, functions_on_database) - for dependency_function in db_function.depends_on - ) - ) - - -def _is_dependency_function_in(dependency_function, functions_on_database): - """ - A dependency function may be specified as a string or as an enum instance, whose .value - attribute is the string name of the function. - - An enum instance is accepted since some SQL function names are stored in enums (e.g. URI - functions). - """ - def _get_function_name(dependency_function): - if isinstance(dependency_function, Enum): - return dependency_function.value - else: - return dependency_function - return _get_function_name(dependency_function) in functions_on_database diff --git a/db/functions/packed.py b/db/functions/packed.py index 8c8f46b2f4..b6170f9c8a 100644 --- a/db/functions/packed.py +++ b/db/functions/packed.py @@ -6,7 +6,7 @@ from abc import abstractmethod -from db.functions import hints, base +from db.functions import base from db.types.custom.uri import URIFunction from db.types.custom.email import EMAIL_DOMAIN_NAME @@ -38,9 +38,6 @@ class DistinctArrayAgg(DBFunctionPacked): """ id = 'distinct_aggregate_to_array' name = 'distinct aggregate to array' - hints = tuple([ - hints.aggregation, - ]) def unpack(self): param0 = self.parameters[0] @@ -53,12 +50,6 @@ def unpack(self): class NotNull(DBFunctionPacked): id = 'not_null' name = 'Is not null' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(1), - hints.parameter(0, hints.any), - hints.mathesar_filter, - ]) def unpack(self): param0 = self.parameters[0] @@ -70,13 +61,6 @@ def unpack(self): class LesserOrEqual(DBFunctionPacked): id = 'lesser_or_equal' name = 'is lesser or equal to' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(2), - hints.all_parameters(hints.comparable), - hints.mathesar_filter, - hints.use_this_alias_when("is before or same as", hints.point_in_time), - ]) def unpack(self): param0 = self.parameters[0] @@ -90,13 +74,6 @@ def unpack(self): class GreaterOrEqual(DBFunctionPacked): id = 'greater_or_equal' name = 'is greater or equal to' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(2), - hints.all_parameters(hints.comparable), - hints.mathesar_filter, - hints.use_this_alias_when("is before or same as", hints.point_in_time), - ]) def unpack(self): param0 = self.parameters[0] @@ -110,15 +87,6 @@ def unpack(self): class ArrayLengthEquals(DBFunctionPacked): id = 'array_length_equals' name = 'Number of elements is' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(3), - hints.parameter(0, hints.array), - # TODO any is too generic - hints.parameter(1, hints.any), - hints.parameter(2, hints.numeric), - hints.mathesar_filter, - ]) def unpack(self): param0 = self.parameters[0] @@ -133,15 +101,6 @@ def unpack(self): class ArrayLengthGreaterThan(DBFunctionPacked): id = 'array_length_greater_than' name = 'Number of elements is greater than' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(3), - hints.parameter(0, hints.array), - # TODO any is too generic - hints.parameter(1, hints.any), - hints.parameter(2, hints.numeric), - hints.mathesar_filter - ]) def unpack(self): param0 = self.parameters[0] @@ -156,15 +115,6 @@ def unpack(self): class ArrayLengthLessThan(DBFunctionPacked): id = 'array_length_lesser_than' name = 'Number of elements is lesser than' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(3), - hints.parameter(0, hints.array), - # TODO any is too generic - hints.parameter(1, hints.any), - hints.parameter(2, hints.numeric), - hints.mathesar_filter - ]) def unpack(self): param0 = self.parameters[0] @@ -179,15 +129,6 @@ def unpack(self): class ArrayLengthGreaterOrEqual(DBFunctionPacked): id = 'array_length_greater_than_or_equal' name = 'Number of elements is greater than or equal to' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(3), - hints.parameter(0, hints.array), - # TODO any is too generic - hints.parameter(1, hints.any), - hints.parameter(2, hints.numeric), - hints.mathesar_filter - ]) def unpack(self): param0 = self.parameters[0] @@ -202,15 +143,6 @@ def unpack(self): class ArrayLengthLessOrEqual(DBFunctionPacked): id = 'array_length_lesser_than_or_equal' name = 'Number of elements is lesser than or equal to' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(3), - hints.parameter(0, hints.array), - # TODO any is too generic - hints.parameter(1, hints.any), - hints.parameter(2, hints.numeric), - hints.mathesar_filter - ]) def unpack(self): param0 = self.parameters[0] @@ -225,14 +157,6 @@ def unpack(self): class ArrayNotEmpty(DBFunctionPacked): id = 'array_not_empty' name = 'Is not empty' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(2), - hints.parameter(0, hints.array), - # TODO any is too generic - hints.parameter(1, hints.any), - hints.mathesar_filter, - ]) def unpack(self): param0 = self.parameters[0] @@ -246,14 +170,6 @@ def unpack(self): class JsonLengthEquals(DBFunctionPacked): id = 'json_array_length_equals' name = 'Number of elements is' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(2), - hints.parameter(0, hints.json_array), - # TODO any is too generic - hints.parameter(1, hints.any), - hints.mathesar_filter, - ]) def unpack(self): param0 = self.parameters[0] @@ -267,13 +183,6 @@ def unpack(self): class JsonLengthGreaterThan(DBFunctionPacked): id = 'json_array_length_greater_than' name = 'Number of elements is greater than' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(2), - hints.parameter(0, hints.json_array), - hints.parameter(1, hints.numeric), - hints.mathesar_filter, - ]) def unpack(self): param0 = self.parameters[0] @@ -287,13 +196,6 @@ def unpack(self): class JsonLengthGreaterorEqual(DBFunctionPacked): id = 'json_array_length_greater_or_equal' name = 'Number of elements is greater than or equal to' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(2), - hints.parameter(0, hints.json_array), - hints.parameter(1, hints.numeric), - hints.mathesar_filter, - ]) def unpack(self): param0 = self.parameters[0] @@ -307,13 +209,6 @@ def unpack(self): class JsonLengthLessThan(DBFunctionPacked): id = 'json_array_length_less_than' name = 'Number of elements is less than' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(2), - hints.parameter(0, hints.json_array), - hints.parameter(1, hints.numeric), - hints.mathesar_filter, - ]) def unpack(self): param0 = self.parameters[0] @@ -327,13 +222,6 @@ def unpack(self): class JsonLengthLessorEqual(DBFunctionPacked): id = 'json_array_length_less_or_equal' name = 'Number of elements is less than or equal to' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(2), - hints.parameter(0, hints.json_array), - hints.parameter(1, hints.numeric), - hints.mathesar_filter, - ]) def unpack(self): param0 = self.parameters[0] @@ -347,12 +235,6 @@ def unpack(self): class JsonNotEmpty(DBFunctionPacked): id = 'json_array_not_empty' name = 'Is not empty' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(1), - hints.parameter(0, hints.json_array), - hints.mathesar_filter, - ]) def unpack(self): param0 = self.parameters[0] @@ -365,13 +247,6 @@ def unpack(self): class URIAuthorityContains(DBFunctionPacked): id = 'uri_authority_contains' name = 'URI authority contains' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(2), - hints.parameter(0, hints.uri), - hints.parameter(1, hints.string_like), - hints.mathesar_filter, - ]) depends_on = tuple([URIFunction.AUTHORITY]) def unpack(self): @@ -386,13 +261,6 @@ def unpack(self): class URISchemeEquals(DBFunctionPacked): id = 'uri_scheme_equals' name = 'URI scheme is' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(2), - hints.parameter(0, hints.uri), - hints.parameter(1, hints.string_like), - hints.mathesar_filter, - ]) depends_on = tuple([URIFunction.SCHEME]) def unpack(self): @@ -407,13 +275,6 @@ def unpack(self): class EmailDomainContains(DBFunctionPacked): id = 'email_domain_contains' name = 'email domain contains' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(2), - hints.parameter(0, hints.email), - hints.parameter(1, hints.string_like), - hints.mathesar_filter, - ]) depends_on = tuple([EMAIL_DOMAIN_NAME]) def unpack(self): @@ -428,13 +289,6 @@ def unpack(self): class EmailDomainEquals(DBFunctionPacked): id = 'email_domain_equals' name = 'email domain is' - hints = tuple([ - hints.returns(hints.boolean), - hints.parameter_count(2), - hints.parameter(0, hints.email), - hints.parameter(1, hints.string_like), - hints.mathesar_filter, - ]) depends_on = tuple([EMAIL_DOMAIN_NAME]) def unpack(self): From 79cee72911ee8f9b88ff08c3337d907389eea76c Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Fri, 18 Oct 2024 10:36:38 +0800 Subject: [PATCH 55/70] remove unused attributes of ExplorationResult type --- mathesar/rpc/explorations.py | 12 ------------ mathesar_ui/src/api/rpc/explorations.ts | 6 ------ 2 files changed, 18 deletions(-) diff --git a/mathesar/rpc/explorations.py b/mathesar/rpc/explorations.py index c5a9786218..2b79a76516 100644 --- a/mathesar/rpc/explorations.py +++ b/mathesar/rpc/explorations.py @@ -100,10 +100,6 @@ class ExplorationResult(TypedDict): column_metadata: A dict describing the metadata applied to included columns. limit: Specifies the max number of rows returned.(default 100) offset: Specifies the number of rows skipped.(default 0) - filter: A dict describing filters applied to an exploration. - order_by: The ordering applied to the columns of an exploration. - search: Specifies a list of dicts containing column names and searched expression. - duplicate_only: A list of column names for which you want duplicate records. """ query: dict records: dict @@ -111,10 +107,6 @@ class ExplorationResult(TypedDict): column_metadata: dict limit: Optional[int] offset: Optional[int] - filter: Optional[dict] - order_by: Optional[list[dict]] - search: Optional[list[dict]] - duplicate_only: Optional[list] @classmethod def from_dict(cls, e): @@ -125,10 +117,6 @@ def from_dict(cls, e): column_metadata=e["column_metadata"], limit=e.get("limit", None), offset=e.get("offset", None), - filter=e.get("filter", None), - order_by=e.get("order_by", None), - search=e.get("search", None), - duplicate_only=e.get("duplicate_only", None), ) diff --git a/mathesar_ui/src/api/rpc/explorations.ts b/mathesar_ui/src/api/rpc/explorations.ts index dae99561bb..556f6cfe8b 100644 --- a/mathesar_ui/src/api/rpc/explorations.ts +++ b/mathesar_ui/src/api/rpc/explorations.ts @@ -185,12 +185,6 @@ export interface ExplorationResult { }; limit: number; offset: number; - filter: unknown; - order_by: unknown; - /** Specifies a list of dicts containing column names and searched expression. */ - search: unknown; - /** A list of column names for which you want duplicate records. */ - duplicate_only: unknown; } export const explorations = { From 97e1d0881ae4fbba0d5ca46c905c778bddf37eca Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Fri, 18 Oct 2024 10:53:41 +0800 Subject: [PATCH 56/70] remove unused record processing --- mathesar/api/utils.py | 88 ---------------------------------- mathesar/utils/explorations.py | 3 +- 2 files changed, 1 insertion(+), 90 deletions(-) delete mode 100644 mathesar/api/utils.py diff --git a/mathesar/api/utils.py b/mathesar/api/utils.py deleted file mode 100644 index 74b27df815..0000000000 --- a/mathesar/api/utils.py +++ /dev/null @@ -1,88 +0,0 @@ -from db.records.operations import group -from mathesar.utils.preview import column_alias_from_preview_template - -DATA_KEY = 'data' -METADATA_KEY = 'metadata' - - -def process_annotated_records(record_list, column_name_id_map=None, preview_metadata=None): - - RESULT_IDX = 'result_indices' - - def _get_record_dict(record): - return record._asdict() if not isinstance(record, dict) else record - - split_records = ( - {DATA_KEY: record_dict} - for record_dict in (_get_record_dict(record) for record in record_list) - ) - - combined_records, groups = group.extract_group_metadata( - split_records, data_key=DATA_KEY, metadata_key=METADATA_KEY - ) - - processed_records, record_metadata = zip( - *tuple(tuple(d.values()) for d in combined_records) - ) - - def _replace_column_names_with_ids(group_metadata_item): - try: - processed_group_metadata_item = { - column_name_id_map[k]: v for k, v in group_metadata_item.items() - } - except AttributeError: - # TODO why are we doing this catch? is this in case group_metadata_item is None? we - # should use an explicit None check in that case. - processed_group_metadata_item = group_metadata_item - return processed_group_metadata_item - - def _use_correct_column_identifier(group_metadata_item): - """ - If column_name_id_map is defined, the identifier to use is the column's Django ID. If - column_name_id_map is None, the identifier to use is the column's name/alias, in which - case, no processing is needed. - """ - if column_name_id_map is not None: - return _replace_column_names_with_ids(group_metadata_item) - else: - return group_metadata_item - - if preview_metadata and column_name_id_map is not None: - column_id_name_map = {i: n for n, i in column_name_id_map.items()} - # Extract preview data from the records - # TODO Replace modifying the parameter directly - for preview_colum_id, preview_info in preview_metadata.items(): - preview_template = preview_info['template'] - # TODO Move the unwanted field preprocessing step to a suitable place - preview_metadata[preview_colum_id].pop('path') - # Move column id into the object so that dict can be flattened into a list - preview_metadata[preview_colum_id]['column'] = preview_colum_id - preview_data_column_aliases = column_alias_from_preview_template(preview_template) - preview_records = {} - for record in processed_records: - column_preview_data = {} - for preview_data_column_alias in preview_data_column_aliases: - preview_value = record.pop(preview_data_column_alias) - column_preview_data.update({preview_data_column_alias: preview_value}) - preview_records[str(record[column_id_name_map[preview_colum_id]])] = column_preview_data - preview_metadata[preview_colum_id]['data'] = preview_records - # Flatten the preview objects - preview_metadata = preview_metadata.values() - - if groups is not None: - groups_by_id = { - grp[group.GroupMetadataField.GROUP_ID.value]: { - k: _use_correct_column_identifier(v) for k, v in grp.items() - if k != group.GroupMetadataField.GROUP_ID.value - } | {RESULT_IDX: []} - for grp in groups - } - - for i, meta in enumerate(record_metadata): - groups_by_id[meta[group.GroupMetadataField.GROUP_ID.value]][RESULT_IDX].append(i) - - output_groups = sorted(list(groups_by_id.values()), key=lambda x: x[RESULT_IDX][0]) - else: - output_groups = None - - return processed_records, output_groups, preview_metadata diff --git a/mathesar/utils/explorations.py b/mathesar/utils/explorations.py index 1f91f5c413..5fc56c74a6 100644 --- a/mathesar/utils/explorations.py +++ b/mathesar/utils/explorations.py @@ -20,7 +20,6 @@ PeakMonth, ) from db.functions.packed import DistinctArrayAgg -from mathesar.api.utils import process_annotated_records from mathesar.models.base import Explorations, ColumnMetaData, Database from mathesar.rpc.columns.metadata import ColumnMetaDataRecord @@ -126,7 +125,7 @@ def run_exploration(exploration_def, conn, limit=100, offset=0): limit=limit, offset=offset ) - processed_records = process_annotated_records(records)[0] + processed_records = [r._asdict() for r in records] column_metadata = _get_exploration_column_metadata( exploration_def, processed_initial_columns, From 585bb109a315d9519e3d3a5d5fafc7026c0ed02b Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Fri, 18 Oct 2024 10:55:53 +0800 Subject: [PATCH 57/70] remove unused Group and DuplicatesOnly transforms --- db/records/operations/select.py | 4 --- db/tests/records/operations/test_select.py | 17 --------- db/transforms/base.py | 41 +--------------------- db/transforms/operations/apply.py | 3 -- 4 files changed, 1 insertion(+), 64 deletions(-) diff --git a/db/records/operations/select.py b/db/records/operations/select.py index be407473b2..43289d2d17 100644 --- a/db/records/operations/select.py +++ b/db/records/operations/select.py @@ -120,7 +120,6 @@ def get_records( filter=None, group_by=None, search=None, - duplicate_only=None, fallback_to_default_ordering=False, ): """ @@ -138,8 +137,6 @@ def get_records( filter: a dictionary with one key-value pair, where the key is the filter id and the value is a list of parameters; supports composition/nesting. group_by: group.GroupBy object - duplicate_only: list of column names; only rows that have duplicates across those rows - will be returned """ if order_by is None: order_by = [] @@ -154,7 +151,6 @@ def get_records( filter=filter, group_by=group_by, search=search, - duplicate_only=duplicate_only, ) return execute_pg_query(engine, relation) diff --git a/db/tests/records/operations/test_select.py b/db/tests/records/operations/test_select.py index ac9bc71f02..c677336815 100644 --- a/db/tests/records/operations/test_select.py +++ b/db/tests/records/operations/test_select.py @@ -19,20 +19,3 @@ def test_get_records_gets_limited_offset_records(roster_table_obj): base_records = get_records(roster, engine, limit=10) offset_records = get_records(roster, engine, limit=10, offset=5) assert len(offset_records) == 10 and offset_records[0] == base_records[5] - - -def test_get_records_duplicate_only(roster_table_obj): - roster, engine = roster_table_obj - duplicate_only = ["Grade", "Subject"] - - full_record_list = get_records(roster, engine) - dupe_record_list = get_records(roster, engine, duplicate_only=duplicate_only) - - # Ensures that: - # - All duplicate values in the table appeared in our query - # - All values in our query are duplicate values - # - All duplicate values appear the correct number of times - all_counter = Counter(tuple(r[c] for c in duplicate_only) for r in full_record_list) - all_counter = {k: v for k, v in all_counter.items() if v > 1} - got_counter = Counter(tuple(r[c] for c in duplicate_only) for r in dupe_record_list) - assert all_counter == got_counter diff --git a/db/transforms/base.py b/db/transforms/base.py index 548fdf13fc..60ed086fde 100644 --- a/db/transforms/base.py +++ b/db/transforms/base.py @@ -7,7 +7,7 @@ from db.functions.operations.apply import apply_db_function_by_id, apply_db_function_spec_as_filter from db.functions.packed import DistinctArrayAgg -from db.records.operations import group, sort as rec_sort +from db.records.operations import sort as rec_sort class UniqueConstraintMapping: @@ -153,45 +153,6 @@ def apply_to_relation(self, relation): return _to_non_executable(executable) -class DuplicateOnly(Transform): - type = "duplicate_only" - - def apply_to_relation(self, relation): - duplicate_columns = self.spec - enforce_relation_type_expectations(relation) - DUPLICATE_LABEL = "_is_dupe" - duplicate_flag_col = ( - sqlalchemy.func - .count(1) - .over(partition_by=duplicate_columns) > 1 - ).label(DUPLICATE_LABEL) - duplicate_flag_cte = ( - select( - *relation.c, - duplicate_flag_col, - ).select_from(relation) - ).cte() - executable = ( - select(duplicate_flag_cte) - .where(duplicate_flag_cte.c[DUPLICATE_LABEL]) - ) - return _to_non_executable(executable) - - -class Group(Transform): - type = "group" - - def apply_to_relation(self, relation): - group_by = self.spec - # TODO maybe keep this as json, and convert to GroupBy at last moment? - # other transform specs are json at this point in the pipeline - if isinstance(group_by, group.GroupBy): - executable = group.get_group_augmented_records_pg_query(relation, group_by) - return _to_non_executable(executable) - else: - return relation - - class Summarize(Transform): """ "spec": { diff --git a/db/transforms/operations/apply.py b/db/transforms/operations/apply.py index 74b9795b83..0e4250fcc2 100644 --- a/db/transforms/operations/apply.py +++ b/db/transforms/operations/apply.py @@ -25,7 +25,6 @@ def apply_transformations_deprecated( filter=None, columns_to_select=None, group_by=None, - duplicate_only=None, search=None, fallback_to_default_ordering=False, ): @@ -51,8 +50,6 @@ def apply_transformations_deprecated( if filter: transforms.append(base.Filter(filter)) - if duplicate_only: - transforms.append(base.DuplicateOnly(duplicate_only)) if group_by: transforms.append(base.Group(group_by)) if order_by or fallback_to_default_ordering: From 5dc247edf8f6b853fabe8b3a7d8b6d159b860a76 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Fri, 18 Oct 2024 11:48:22 +0800 Subject: [PATCH 58/70] move into DBQuery object --- db/queries/base.py | 27 +- db/records/operations/select.py | 78 ------ db/tests/records/operations/test_select.py | 21 -- db/tests/records/operations/test_sort.py | 256 ------------------ db/tests/transforms/test_basic.py | 92 ------- db/tests/transforms/test_json_without_pkey.py | 7 - db/transforms/operations/apply.py | 30 +- mathesar/utils/explorations.py | 16 +- 8 files changed, 22 insertions(+), 505 deletions(-) delete mode 100644 db/tests/records/operations/test_select.py delete mode 100644 db/tests/records/operations/test_sort.py delete mode 100644 db/tests/transforms/test_basic.py delete mode 100644 db/tests/transforms/test_json_without_pkey.py diff --git a/db/queries/base.py b/db/queries/base.py index 0807b17e73..817b4a1ad9 100644 --- a/db/queries/base.py +++ b/db/queries/base.py @@ -1,12 +1,13 @@ from frozendict import frozendict from sqlalchemy import select +from sqlalchemy.sql.functions import count -from db.records.operations import select as records_select from db.columns.base import MathesarColumn from db.columns.operations.select import get_column_name_from_attnum from db.tables.operations.select import reflect_table_from_oid -from db.transforms.operations.apply import apply_transformations -from db.transforms.base import Order +from db.transforms.operations import apply +from db.transforms import base +from db.utils import execute_pg_query from db.metadata import get_empty_metadata @@ -113,12 +114,12 @@ def get_records(self, **kwargs): that would override the transformation). """ fallback_to_default_ordering = not self._is_sorting_transform_used - return records_select.get_records( + final_relation = apply.apply_transformations_deprecated( table=self.transformed_relation, - engine=self.engine, fallback_to_default_ordering=fallback_to_default_ordering, - **kwargs, + **kwargs ) + return execute_pg_query(self.engine, final_relation) @property def _is_sorting_transform_used(self): @@ -126,16 +127,20 @@ def _is_sorting_transform_used(self): Checks if any of the transforms define a sorting for the results. """ return any( - type(transform) is Order + type(transform) is base.Order for transform in self.transformations ) # mirrors a method in db.records.operations.select - def get_count(self, **kwargs): - return records_select.get_count( - table=self.transformed_relation, engine=self.engine, **kwargs, + @property + def count(self): + col_name = "_count" + relation = apply.apply_transformations_deprecated( + table=self.transformed_relation, + columns_to_select=[count(1).label(col_name)], ) + return execute_pg_query(self.engine, relation)[0][col_name] # NOTE if too expensive, can be rewritten to parse DBQuery spec, instead of leveraging sqlalchemy @property @@ -184,7 +189,7 @@ def transformed_relation(self): """ transformations = self.transformations if transformations: - transformed = apply_transformations( + transformed = apply.apply_transformations( self.initial_relation, transformations, ) diff --git a/db/records/operations/select.py b/db/records/operations/select.py index 43289d2d17..5d1f866d79 100644 --- a/db/records/operations/select.py +++ b/db/records/operations/select.py @@ -1,11 +1,6 @@ import json -from sqlalchemy import select -from sqlalchemy.sql.functions import count from db import connection as db_conn -from db.tables.utils import get_primary_key_column -from db.utils import execute_pg_query -from db.transforms.operations.apply import apply_transformations_deprecated def list_records_from_table( @@ -100,76 +95,3 @@ def search_records_from_table( table_oid, json.dumps(search), limit, return_record_summaries ).fetchone()[0] return result - - -def get_record(table, engine, id_value): - primary_key_column = get_primary_key_column(table) - pg_query = select(table).where(primary_key_column == id_value) - result = execute_pg_query(engine, pg_query) - assert len(result) <= 1 - return result[0] if result else None - - -# TODO consider using **kwargs instead of manually redefining defaults and piping all these arguments -def get_records( - table, - engine, - limit=None, - offset=None, - order_by=None, - filter=None, - group_by=None, - search=None, - fallback_to_default_ordering=False, -): - """ - Returns annotated records from a table. - - Args: - table: SQLAlchemy table object - engine: SQLAlchemy engine object - limit: int, gives number of rows to return - offset: int, gives number of rows to skip - order_by: list of dictionaries, where each dictionary has a 'field' and - 'direction' field. - search: list of dictionaries, where each dictionary has a 'column' and - 'literal' field. - filter: a dictionary with one key-value pair, where the key is the filter id and - the value is a list of parameters; supports composition/nesting. - group_by: group.GroupBy object - """ - if order_by is None: - order_by = [] - if search is None: - search = [] - relation = apply_transformations_deprecated( - table=table, - limit=limit, - offset=offset, - order_by=order_by, - fallback_to_default_ordering=fallback_to_default_ordering, - filter=filter, - group_by=group_by, - search=search, - ) - return execute_pg_query(engine, relation) - - -def get_count(table, engine, filter=None, search=None): - if search is None: - search = [] - col_name = "_count" - columns_to_select = [ - count(1).label(col_name) - ] - relation = apply_transformations_deprecated( - table=table, - limit=None, - offset=None, - # TODO does it make sense to order, when we're only interested in row count? - order_by=None, - filter=filter, - columns_to_select=columns_to_select, - search=search, - ) - return execute_pg_query(engine, relation)[0][col_name] diff --git a/db/tests/records/operations/test_select.py b/db/tests/records/operations/test_select.py deleted file mode 100644 index c677336815..0000000000 --- a/db/tests/records/operations/test_select.py +++ /dev/null @@ -1,21 +0,0 @@ -from collections import Counter -from db.records.operations.select import get_records - - -def test_get_records_gets_all_records(roster_table_obj): - roster, engine = roster_table_obj - record_list = get_records(roster, engine) - assert len(record_list) == 1000 - - -def test_get_records_gets_limited_records(roster_table_obj): - roster, engine = roster_table_obj - record_list = get_records(roster, engine, limit=10) - assert len(record_list) == 10 - - -def test_get_records_gets_limited_offset_records(roster_table_obj): - roster, engine = roster_table_obj - base_records = get_records(roster, engine, limit=10) - offset_records = get_records(roster, engine, limit=10, offset=5) - assert len(offset_records) == 10 and offset_records[0] == base_records[5] diff --git a/db/tests/records/operations/test_sort.py b/db/tests/records/operations/test_sort.py deleted file mode 100644 index c4ce2a0c89..0000000000 --- a/db/tests/records/operations/test_sort.py +++ /dev/null @@ -1,256 +0,0 @@ -import pytest - -from sqlalchemy import MetaData, Table -from sqlalchemy.schema import DropConstraint - -from db.records.operations.select import get_records -from db.records.operations.sort import BadSortFormat, SortFieldNotFound - - -def test_get_records_gets_ordered_records_str_col_name(roster_table_obj): - roster, engine = roster_table_obj - order_list = [{"field": "Teacher", "direction": "asc"}] - record_list = get_records(roster, engine, order_by=order_list) - assert record_list[0][4] == "Amber Hudson" - - -def test_get_records_gets_ordered_records_num_col(roster_table_obj): - roster, engine = roster_table_obj - order_list = [{"field": "Grade", "direction": "asc"}] - record_list = get_records(roster, engine, order_by=order_list) - assert record_list[0][7] == 25 - - -def test_json_sort_array(json_table_obj): - roster, engine = json_table_obj - order_list = [{"field": "json_array", "direction": "asc"}] - record_list = get_records(roster, engine, order_by=order_list) - assert [row["json_array"] for row in record_list] == [ - '[]', - '["BMW", "Ford", "Fiat"]', - '["BMW", "Ford", [1, 2]]', - '["BMW", "Ford", ["Akshay", "Prashant", "Varun"]]', - '["BMW", "Ford", [1, 2, 3]]', - '["Ford", "BMW", "Fiat"]', - '[1, 2, 3]', - '[1, 2, false]', - '[1, 2, true]', - '[2, 3, 4]', - '[false, false, false]', - '[true, true, false]', - '["BMW", "Ford", "Fiat", "Fiat"]', - '["Ram", "Shyam", "Radhika", "Akshay", "Prashant", "Varun"]' - ] - - -def test_json_sort_object(json_table_obj): - roster, engine = json_table_obj - order_list = [{"field": "json_object", "direction": "asc"}] - record_list = get_records(roster, engine, order_by=order_list) - assert [row["json_object"] for row in record_list] == [ - '{}', - '{"name": "John"}', - '{"age": 30, "name": "John"}', - '{"30": "age", "car": null, "name": "John"}', - '{"age": 30, "car": null, "name": null}', - '{"age": 30, "car": null, "name": "Amy"}', - '{"age": 30, "car": null, "name": "John"}', - '{"age": 30, "car": null, "name": "John11"}', - '{"age": 30, "car": null, "name": 11}', - '{"age": 30, "car": null, "name": 12}', - '{"age": 30, "car": null, "name": false}', - '{"age": 30, "car": null, "name": true}', - '{"age": 30, "car": null, "name1": "John"}', - '{"car": null, "name": "John", "age11": 30}', - ] - - -def test_get_records_gets_ordered_records_str_col_obj(roster_table_obj): - roster, engine = roster_table_obj - order_list = [{"field": roster.columns["Teacher"], "direction": "asc"}] - record_list = get_records(roster, engine, order_by=order_list) - assert record_list[0][4] == "Amber Hudson" - - -def test_get_records_gets_ordered_records_num_col_obj(roster_table_obj): - roster, engine = roster_table_obj - order_list = [{"field": roster.columns["Grade"], "direction": "asc"}] - record_list = get_records(roster, engine, order_by=order_list) - assert record_list[0][7] == 25 - - -def test_get_records_ordered_col_set(roster_table_obj): - roster, engine = roster_table_obj - order_list = [ - {"field": "Student Name", "direction": "asc"}, - {"field": "Grade", "direction": "asc"} - ] - record_list = get_records(roster, engine, order_by=order_list) - assert record_list[0][2] == "Alejandro Lam" and record_list[0][7] == 40 - - -def test_get_records_ordered_col_set_different_col_order(roster_table_obj): - roster, engine = roster_table_obj - order_list = [ - {"field": "Grade", "direction": "asc"}, - {"field": "Student Name", "direction": "asc"} - ] - record_list = get_records(roster, engine, order_by=order_list) - assert record_list[0][7] == 25 and record_list[0][2] == "Amy Gamble" - - -def test_get_records_orders_before_limiting(roster_table_obj): - roster, engine = roster_table_obj - order_list = [ - {"field": "Grade", "direction": "asc"}, - {"field": "Student Name", "direction": "asc"} - ] - record_list = get_records(roster, engine, limit=1, order_by=order_list) - assert record_list[0][7] == 25 and record_list[0][2] == "Amy Gamble" - - -def check_single_field_ordered(record_list, field, direction): - for i in range(1, len(record_list)): - prev = getattr(record_list[i - 1], field) - curr = getattr(record_list[i], field) - if prev is None or curr is None: - continue - comp_func = dir_to_python_func[direction] - assert comp_func(prev, curr) - - -def check_multi_field_ordered(record_list, field_dir_pairs): - for i in range(1, len(record_list)): - for field, direction in field_dir_pairs: - prev = getattr(record_list[i - 1], field) - curr = getattr(record_list[i], field) - if prev is None or curr is None: - continue - - comp_func = dir_to_python_func[direction] - # If fields are equal, check the next field - # If fields differ, ensure the comparison is correct - if prev != curr: - assert comp_func(prev, curr) - break - - -def test_get_records_default_order_single_primary_key(roster_table_obj): - roster, engine = roster_table_obj - primary_column = roster.primary_key.columns[0].name - record_list = get_records(roster, engine, fallback_to_default_ordering=True) - check_single_field_ordered(record_list, primary_column, 'asc') - - -def test_get_records_default_order_adds_primary_key(roster_table_obj): - roster, engine = roster_table_obj - primary_column = roster.primary_key.columns[0].name - passed_order_by = [{"field": "Subject", "direction": "asc"}] - record_list = get_records(roster, engine, order_by=passed_order_by) - field_dir_pairs = [("Subject", "asc"), (primary_column, "asc")] - check_multi_field_ordered(record_list, field_dir_pairs) - - -def test_get_records_default_order_composite_primary_key(filter_sort_table_obj): - filter_sort, engine = filter_sort_table_obj - primary_columns = [col.name for col in filter_sort.primary_key.columns] - record_list = get_records(filter_sort, engine) - field_dir_pairs = [(col, 'asc') for col in primary_columns] - check_multi_field_ordered(record_list, field_dir_pairs) - - -def test_get_records_default_order_no_primary_key(filter_sort_table_obj): - filter_sort, engine = filter_sort_table_obj - - constraint = filter_sort.primary_key - with engine.begin() as conn: - conn.execute(DropConstraint(constraint)) - metadata = MetaData(bind=engine) - filter_sort = Table( - filter_sort.name, metadata, schema=filter_sort.schema, autoload_with=engine - ) - assert len(filter_sort.primary_key.columns) == 0 - - record_list = get_records(filter_sort, engine) - - columns = [col.name for col in filter_sort.columns] - field_dir_pairs = [(col, 'asc') for col in columns] - check_multi_field_ordered(record_list, field_dir_pairs) - - -dir_to_python_func = { - "asc": lambda x, y: x <= y, - "desc": lambda x, y: x >= y, -} - - -single_field_test_list = [ - (field, direction, null) - for field in ["varchar", "numeric", "date"] - for direction in ["asc", "desc"] - for null in ["nullsfirst", "nullslast"] -] - - -@pytest.mark.parametrize("field,direction,null", single_field_test_list) -def test_get_records_orders_single_field( - filter_sort_table_obj, field, direction, null -): - filter_sort, engine = filter_sort_table_obj - order_list = [{"field": field, "direction": direction}] - order_list[0][null] = True - - record_list = get_records(filter_sort, engine, order_by=order_list) - - if null == "nullsfirst": - assert getattr(record_list[0], field) is None - elif null == "nullslast": - assert getattr(record_list[-1], field) is None - - check_single_field_ordered(record_list, field, direction) - - -multi_field_test_list = [ - list(zip(fields, directions)) - for fields in [ - ("Student Email", "Grade"), - ("Grade", "Student Email") - ] - for directions in [ - ["asc", "asc"], - ["asc", "desc"], - ["desc", "asc"], - ["desc", "desc"], - ] -] - - -@pytest.mark.parametrize("field_dir_pairs", multi_field_test_list) -def test_get_records_orders_multiple_fields( - roster_table_obj, field_dir_pairs -): - roster_sort, engine = roster_table_obj - order_list = [ - {"field": field, "direction": direction} - for field, direction in field_dir_pairs - ] - - record_list = get_records(roster_sort, engine, order_by=order_list) - - check_multi_field_ordered(record_list, field_dir_pairs) - - -exceptions_test_list = [ - (("field", "tuple", "direction", "asc"), BadSortFormat), - ([{"field": "varchar", "direction": "sideways"}], BadSortFormat), - ([{"direction": "asc"}], BadSortFormat), - ([{"field": "varchar"}], BadSortFormat), - ([{"field": "non_existent", "direction": "asc"}], SortFieldNotFound), -] - - -@pytest.mark.parametrize("order_list,exception", exceptions_test_list) -def test_get_records_orders_exceptions(filter_sort_table_obj, order_list, exception): - filter_sort, engine = filter_sort_table_obj - with pytest.raises(exception): - get_records(filter_sort, engine, order_by=order_list) diff --git a/db/tests/transforms/test_basic.py b/db/tests/transforms/test_basic.py deleted file mode 100644 index d6ecc6fb6e..0000000000 --- a/db/tests/transforms/test_basic.py +++ /dev/null @@ -1,92 +0,0 @@ -import pytest - -from db.transforms.operations.apply import apply_transformations -from db.transforms import base as transforms_base -from db.records.operations.select import get_records - - -@pytest.mark.parametrize( - "transformations,expected_records", - [ - [ - [ - transforms_base.Filter( - spec=dict( - contains=[ - dict(column_name=["Student Name"]), - dict(literal=["son"]), - ] - ), - ), - transforms_base.Order( - spec=[{"field": "Teacher Email", "direction": "asc"}], - ), - transforms_base.Limit( - spec=5, - ), - transforms_base.SelectSubsetOfColumns( - spec=["id"], - ), - ], - [ - (99,), - (194,), - (978,), - (155,), - (192,), - ] - ], - [ - [ - transforms_base.Limit( - spec=50, - ), - transforms_base.Filter( - spec=dict( - contains=[ - dict(column_name=["Student Name"]), - dict(literal=["son"]), - ] - ), - ), - transforms_base.Order( - spec=[{"field": "Teacher Email", "direction": "asc"}], - ), - transforms_base.Limit( - spec=5, - ), - transforms_base.SelectSubsetOfColumns( - spec=["id"], - ), - ], - [ - (16,), - (31,), - (18,), - (24,), - (33,), - ] - ], - [ - [ - transforms_base.Limit( - spec=1, - ), - transforms_base.SelectSubsetOfColumns( - spec=["id", "Grade"], - ), - transforms_base.HideColumns( - spec=["Grade"], - ), - ], - [ - (1,), - ] - ], - ] -) -def test_transformations(roster_table_obj, transformations, expected_records): - roster, engine = roster_table_obj - relation = apply_transformations(roster, transformations) - records = get_records(relation, engine) - assert records == expected_records diff --git a/db/tests/transforms/test_json_without_pkey.py b/db/tests/transforms/test_json_without_pkey.py deleted file mode 100644 index 2ce417e8e4..0000000000 --- a/db/tests/transforms/test_json_without_pkey.py +++ /dev/null @@ -1,7 +0,0 @@ -from db.records.operations.select import get_records - - -def test_default_ordering(json_without_pkey_table_obj): - table, engine = json_without_pkey_table_obj - records = get_records(table, engine, fallback_to_default_ordering=True) - assert len(records) == 2 diff --git a/db/transforms/operations/apply.py b/db/transforms/operations/apply.py index 0e4250fcc2..bb495d771a 100644 --- a/db/transforms/operations/apply.py +++ b/db/transforms/operations/apply.py @@ -16,46 +16,20 @@ def _apply_transform(relation, transform): return relation -# NOTE deprecated; this will be replaced with apply_transformations def apply_transformations_deprecated( table, limit=None, offset=None, - order_by=None, - filter=None, columns_to_select=None, - group_by=None, - search=None, fallback_to_default_ordering=False, ): - """ - ## Regarding ordering - - The `fallback_to_default_ordering` flag, when true, will make sure that the ordering is total, - even if `order_by` is not provided. When `order_by` is provided, it will be converted into a - total ordering automatically. As a consequence, the ordering is always total. - - At the same time, when both `order_by` and `fallback_to_default_ordering` are falsy, an ordering - will not be applied. This is useful, when `table` has already been pre-sorted (e.g. because it's - actually the result of a DBQuery that defines an ordering that we don't want to override). - """ - # TODO rename the actual method parameter - if search is None: - search = [] relation = table - enforce_relation_type_expectations(relation) transforms = [] - if filter: - transforms.append(base.Filter(filter)) - if group_by: - transforms.append(base.Group(group_by)) - if order_by or fallback_to_default_ordering: - transforms.append(base.Order(order_by)) - if search: - transforms.append(base.Search([search, limit])) + if fallback_to_default_ordering: + transforms.append(base.Order([])) if columns_to_select: transforms.append(base.SelectSubsetOfColumns(columns_to_select)) if offset: diff --git a/mathesar/utils/explorations.py b/mathesar/utils/explorations.py index 5fc56c74a6..f1ddbbd316 100644 --- a/mathesar/utils/explorations.py +++ b/mathesar/utils/explorations.py @@ -1,6 +1,5 @@ from db.engine import create_future_engine_with_custom_types from db.metadata import get_empty_metadata -from db.records.operations.select import get_count from db.queries.base import DBQuery, InitialColumn, JoinParameter from db.queries.operations.process import get_transforms_with_summarizes_speced from db.tables.operations.select import get_table @@ -121,11 +120,8 @@ def run_exploration(exploration_def, conn, limit=100, offset=0): transformations, exploration_def.get("display_names", {}) ) - records = db_query.get_records( - limit=limit, - offset=offset - ) - processed_records = [r._asdict() for r in records] + query_results = db_query.get_records(limit=limit, offset=offset) + column_metadata = _get_exploration_column_metadata( exploration_def, processed_initial_columns, @@ -137,12 +133,8 @@ def run_exploration(exploration_def, conn, limit=100, offset=0): return { "query": exploration_def, "records": { - "count": get_count( - table=db_query.transformed_relation, - engine=engine, - filter=exploration_def.get('filter', None) - ), - "results": processed_records + "count": db_query.count, + "results": [r._asdict() for r in query_results], }, "output_columns": tuple(sa_col.name for sa_col in db_query.sa_output_columns), "column_metadata": column_metadata, From 69b2c92c98ecbb1e9153696d8eabe22c06bf71a6 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Fri, 18 Oct 2024 12:26:11 +0800 Subject: [PATCH 59/70] remove unused record grouping code --- db/records/operations/group.py | 596 ---------------- db/tests/records/operations/test_group.py | 809 ---------------------- 2 files changed, 1405 deletions(-) delete mode 100644 db/records/operations/group.py delete mode 100644 db/tests/records/operations/test_group.py diff --git a/db/records/operations/group.py b/db/records/operations/group.py deleted file mode 100644 index 050242833e..0000000000 --- a/db/records/operations/group.py +++ /dev/null @@ -1,596 +0,0 @@ -from enum import Enum -import json -import logging -from sqlalchemy import select, func, and_, case, literal, cast, TEXT, extract - -from db.functions.operations.deserialize import get_db_function_subclass_by_id -from db.records import exceptions as records_exceptions -from db.records.operations import calculation -from db.records.utils import create_col_objects - -logger = logging.getLogger(__name__) - -MATHESAR_GROUP_METADATA = '__mathesar_group_metadata' - - -class GroupMode(Enum): - DISTINCT = 'distinct' - ENDPOINTS = 'endpoints' # intended for internal use at the moment - EXTRACT = 'extract' - MAGNITUDE = 'magnitude' - COUNT_BY = 'count_by' - PERCENTILE = 'percentile' - PREFIX = 'prefix' - - -class GroupMetadataField(Enum): - COUNT = 'count' - GROUP_ID = 'group_id' - FIRST_VALUE = 'first_value' - LAST_VALUE = 'last_value' - LEQ_VALUE = 'less_than_eq_value' - GEQ_VALUE = 'greater_than_eq_value' - LT_VALUE = 'less_than_value' - GT_VALUE = 'greater_than_value' - EQ_VALUE = 'eq_value' - - -class GroupBy: - def __init__( - self, - columns, - mode=GroupMode.DISTINCT.value, - preproc=None, - num_groups=None, - bound_tuples=None, - count_by=None, - global_min=None, - global_max=None, - prefix_length=None, - extract_field=None, - ): - self._columns = tuple(columns) if type(columns) is not str else tuple([columns]) - self._mode = mode - if type(preproc) is str: - self._preproc = tuple([preproc]) - elif preproc is not None: - self._preproc = tuple(preproc) - else: - self._preproc = None - self._num_groups = num_groups - self._bound_tuples = bound_tuples - self._count_by = count_by - self._global_min = global_min - self._global_max = global_max - self._prefix_length = prefix_length - self._extract_field = extract_field - self._ranged = bool(mode != GroupMode.DISTINCT.value) - self.validate() - - @property - def columns(self): - return self._columns - - @property - def mode(self): - return self._mode - - @property - def preproc(self): - return self._preproc - - @property - def num_groups(self): - return self._num_groups - - @property - def bound_tuples(self): - if self._bound_tuples is not None: - return self._bound_tuples - elif self._mode == GroupMode.COUNT_BY.value: - return [bt for bt in self._bound_tuple_generator()] - - @property - def count_by(self): - return self._count_by - - @property - def global_min(self): - return self._count_by - - @property - def global_max(self): - return self._count_by - - @property - def prefix_length(self): - return self._prefix_length - - @property - def extract_field(self): - return self._extract_field - - @property - def ranged(self): - return self._ranged - - def _bound_tuple_generator(self): - val = self._global_min - while val <= self._global_max: - yield (val,) - val += self._count_by - - def validate(self): - group_modes = {group_mode.value for group_mode in GroupMode} - if self.mode not in group_modes: - raise records_exceptions.InvalidGroupType( - f'mode "{self.mode}" is invalid. valid modes are: ' - + ', '.join([f"'{gm}'" for gm in group_modes]) - ) - elif self.preproc is not None and len(self.preproc) != len(self.columns): - raise records_exceptions.BadGroupFormat( - 'preproc must be same length as columns if given' - ) - - elif ( - self.mode == GroupMode.PERCENTILE.value - and not type(self.num_groups) is int - ): - raise records_exceptions.BadGroupFormat( - f'{GroupMode.PERCENTILE.value} mode requires integer num_groups' - ) - elif self.mode == GroupMode.MAGNITUDE.value and not len(self.columns) == 1: - raise records_exceptions.BadGroupFormat( - f'{GroupMode.MAGNITUDE.value} mode only works on single columns' - ) - elif self.mode == GroupMode.ENDPOINTS.value and not self.bound_tuples: - raise records_exceptions.BadGroupFormat( - f'{GroupMode.ENDPOINTS.value} mode requires bound_tuples' - ) - elif ( - self.mode == GroupMode.PREFIX.value - and ( - not len(self.columns) == 1 - or self.prefix_length is None - ) - ): - raise records_exceptions.BadGroupFormat( - f'{GroupMode.PREFIX.value} mode requires prefix_length,' - ' and only works for single columns.' - ) - elif ( - self.mode == GroupMode.COUNT_BY.value - and ( - self._count_by is None - or not len(self.columns) == 1 - or self._global_min is None - or self._global_max is None - ) - ): - raise records_exceptions.BadGroupFormat( - f'{GroupMode.COUNT_BY.value} mode requires' - ' count_by, global_min, and global_max.' - ' further, it works only for single columns.' - ) - elif ( - self.mode == GroupMode.EXTRACT.value - and (not len(self.columns) == 1 or self._extract_field is None) - ): - raise records_exceptions.BadGroupFormat( - f'{GroupMode.EXTRACT.value} requires extract_field,' - ' and only works for single columns.' - ) - - for col in self.columns: - if type(col) is not str: - raise records_exceptions.BadGroupFormat( - f"Group column {col} must be a string." - ) - - def get_validated_group_by_columns(self, table): - for col in self.columns: - col_name = col if isinstance(col, str) else col.name - if col_name not in table.columns: - raise records_exceptions.GroupFieldNotFound( - f"Group col {col} not found in {table}." - ) - return create_col_objects(table, self.columns) - - -class GroupingWindowDefinition: - def __init__(self, partition_by, order_by): - self._partition_by = partition_by - self._order_by = tuple(order_by) - self._range = (None, None) - - @property - def partition_by(self): - return self._partition_by - - @property - def order_by(self): - return self._order_by - - @property - def range_(self): - return self._range - - -def get_group_augmented_records_pg_query(table, group_by): - """ - Returns counts by specified groupings - - Args: - table: SQLAlchemy table object - group_by: GroupBy object giving args for grouping - """ - grouping_columns = group_by.get_validated_group_by_columns(table) - - if group_by.mode == GroupMode.PERCENTILE.value: - pg_query = _get_percentile_range_group_select( - table, grouping_columns, group_by.num_groups - ) - elif ( - group_by.mode == GroupMode.ENDPOINTS.value - or group_by.mode == GroupMode.COUNT_BY.value - ): - pg_query = _get_custom_endpoints_range_group_select( - table, grouping_columns, group_by.bound_tuples - ) - elif group_by.mode == GroupMode.MAGNITUDE.value: - pg_query = _get_tens_powers_range_group_select(table, grouping_columns) - elif group_by.mode == GroupMode.DISTINCT.value: - pg_query = _get_distinct_group_select(table, grouping_columns, group_by.preproc) - elif group_by.mode == GroupMode.PREFIX.value: - pg_query = _get_prefix_group_select(table, grouping_columns, group_by.prefix_length) - elif group_by.mode == GroupMode.EXTRACT.value: - pg_query = _get_extract_group_select(table, grouping_columns, group_by.extract_field) - else: - raise records_exceptions.BadGroupFormat("Unknown error") - return pg_query - - -def _get_distinct_group_select(table, grouping_columns, preproc): - def _get_processed_column(proc, col): - if proc is not None: - pcol = get_db_function_subclass_by_id(proc).to_sa_expression(col) - else: - pcol = col - return pcol - - if preproc is not None: - processed_columns = [ - _get_processed_column(proc, col) - for proc, col in zip(preproc, grouping_columns) - ] - else: - processed_columns = grouping_columns - - eq_expr = func.json_build_object( - *[ - i for t in [ - (literal(col.name), val) - for col, val in zip(grouping_columns, processed_columns) - ] - for i in t - ] - ) - - window_def = GroupingWindowDefinition( - order_by=grouping_columns, partition_by=processed_columns - ) - - group_id_expr = func.dense_rank().over( - order_by=processed_columns, range_=window_def.range_ - ) - return select( - table, - _get_group_metadata_definition( - window_def, grouping_columns, group_id_expr, eq_expr=eq_expr - ) - ) - - -def _get_extract_group_select(table, grouping_columns, extract_field): - processed_columns = [extract(extract_field, grouping_columns[0])] - - eq_expr = func.json_build_object( - *[ - i for t in [ - (literal(col.name), val) - for col, val in zip(grouping_columns, processed_columns) - ] - for i in t - ] - ) - - window_def = GroupingWindowDefinition( - order_by=grouping_columns, partition_by=processed_columns - ) - group_id_expr = func.dense_rank().over( - order_by=processed_columns, range_=window_def.range_ - ) - - return select( - table, - _get_group_metadata_definition( - window_def, grouping_columns, group_id_expr, eq_expr=eq_expr - ) - ) - - -def _get_prefix_group_select(table, grouping_columns, prefix_length): - grouping_column = grouping_columns[0] - prefix_expr = func.left(cast(grouping_column, TEXT), prefix_length) - window_def = GroupingWindowDefinition( - order_by=grouping_columns, partition_by=prefix_expr - ) - group_id_expr = func.dense_rank().over( - order_by=window_def.partition_by, range_=window_def.range_ - ) - return select( - table, - _get_group_metadata_definition(window_def, grouping_columns, group_id_expr) - ) - - -def _get_tens_powers_range_group_select(table, grouping_columns): - EXTREMA_DIFF = 'extrema_difference' - POWER = 'power' - RAW_ID = 'raw_id' - - assert len(grouping_columns) == 1 - grouping_column = grouping_columns[0] - diff_cte = calculation.get_extrema_diff_select( - table, grouping_column, EXTREMA_DIFF - ).cte('diff_cte') - power_cte = calculation.get_offset_order_of_magnitude_select( - diff_cte, diff_cte.columns[EXTREMA_DIFF], POWER - ).cte('power_cte') - raw_id_cte = calculation.divide_by_power_of_ten_select( - power_cte, - power_cte.columns[grouping_column.name], - power_cte.columns[POWER], - RAW_ID - ).cte('raw_id_cte') - cte_main_col_list = [ - col for col in raw_id_cte.columns if col.name == grouping_column.name - ] - window_def = GroupingWindowDefinition( - order_by=cte_main_col_list, partition_by=raw_id_cte.columns[RAW_ID] - ) - - group_id_expr = func.dense_rank().over( - order_by=window_def.partition_by, range_=window_def.range_ - ) - - def _get_pretty_bound_expr(id_offset): - raw_id_col = raw_id_cte.columns[RAW_ID] - power_col = raw_id_cte.columns[POWER] - power_expr = func.pow(literal(10.0), power_col) - return case( - (power_col >= 0, func.trunc((raw_id_col + id_offset) * power_expr)), - else_=func.trunc( - (raw_id_col + id_offset) * power_expr, - ((-1) * power_col) - ) - ) - - geq_expr = func.json_build_object( - grouping_column.name, _get_pretty_bound_expr(0) - ) - lt_expr = func.json_build_object( - grouping_column.name, _get_pretty_bound_expr(1) - ) - return select( - *[col for col in raw_id_cte.columns if col.name in table.columns], - _get_group_metadata_definition( - window_def, - cte_main_col_list, - group_id_expr, - geq_expr=geq_expr, - lt_expr=lt_expr, - ) - ) - - -def _get_custom_endpoints_range_group_select(table, columns, bound_tuples_list): - column_names = [col.name for col in columns] - RANGE_ID = 'range_id' - GEQ_BOUND = 'geq_bound' - LT_BOUND = 'lt_bound' - - def _get_inner_json_object(bound_tuple): - key_value_tuples = ( - (literal(str(col)), literal(val)) - for col, val in zip(column_names, bound_tuple) - ) - key_value_list = [ - part for tup in key_value_tuples for part in tup - ] - return func.json_build_object(*key_value_list) - - def _build_range_cases(result_expr): - return [ - ( - and_( - func.ROW(*columns) >= func.ROW(*bound_tuples_list[i]), - func.ROW(*columns) < func.ROW(*bound_tuples_list[i + 1]) - ), - result_expr(i) - ) - for i in range(len(bound_tuples_list) - 1) - ] - ranges_cte = select( - *columns, - case(*_build_range_cases(lambda x: x + 1), else_=None).label(RANGE_ID), - case( - *_build_range_cases( - lambda x: _get_inner_json_object(bound_tuples_list[x]) - ), - else_=None - ).label(GEQ_BOUND), - case( - *_build_range_cases( - lambda x: _get_inner_json_object(bound_tuples_list[x + 1]) - ), - else_=None - ).label(LT_BOUND), - ).cte() - - ranges_aggregation_cols = [ - col for col in ranges_cte.columns if col.name in column_names - ] - window_def = GroupingWindowDefinition( - order_by=ranges_aggregation_cols, - partition_by=ranges_cte.columns[RANGE_ID] - ) - group_id_expr = window_def.partition_by - geq_expr = ranges_cte.columns[GEQ_BOUND] - lt_expr = ranges_cte.columns[LT_BOUND] - return select( - *[col for col in ranges_cte.columns if col.name in table.columns], - _get_group_metadata_definition( - window_def, - ranges_aggregation_cols, - group_id_expr, - geq_expr=geq_expr, - lt_expr=lt_expr, - ) - ).where(ranges_cte.columns[RANGE_ID] != None) # noqa - - -def _get_percentile_range_group_select(table, columns, num_groups): - column_names = [col.name for col in columns] - # cume_dist is a PostgreSQL function that calculates the cumulative - # distribution. - # See https://www.postgresql.org/docs/13/functions-window.html - CUME_DIST = 'cume_dist' - RANGE_ID = 'range_id' - cume_dist_cte = select( - table, - func.cume_dist().over(order_by=columns).label(CUME_DIST) - ).cte() - ranges = [ - ( - and_( - cume_dist_cte.columns[CUME_DIST] > i / num_groups, - cume_dist_cte.columns[CUME_DIST] <= (i + 1) / num_groups - ), - i + 1 - ) - for i in range(num_groups) - ] - - ranges_cte = select( - *[col for col in cume_dist_cte.columns if col.name != CUME_DIST], - case(*ranges).label(RANGE_ID) - ).cte() - ranges_aggregation_cols = [ - col for col in ranges_cte.columns if col.name in column_names - ] - window_def = GroupingWindowDefinition( - order_by=ranges_aggregation_cols, - partition_by=ranges_cte.columns[RANGE_ID] - ) - group_id_expr = window_def.partition_by - - return select( - *[col for col in ranges_cte.columns if col.name in table.columns], - _get_group_metadata_definition( - window_def, ranges_aggregation_cols, group_id_expr - ) - ) - - -def _get_group_metadata_definition( - window_def, - grouping_columns, - group_id_expr, - leq_expr=None, - geq_expr=None, - lt_expr=None, - gt_expr=None, - eq_expr=None, -): - col_key_value_tuples = ( - (literal(str(col.name)), col) for col in grouping_columns - ) - col_key_value_list = [ - col_part for col_tuple in col_key_value_tuples for col_part in col_tuple - ] - inner_grouping_object = func.json_build_object(*col_key_value_list) - - return func.json_build_object( - literal(GroupMetadataField.GROUP_ID.value), - group_id_expr, - literal(GroupMetadataField.COUNT.value), - func.count(1).over(partition_by=window_def.partition_by), - literal(GroupMetadataField.FIRST_VALUE.value), - func.first_value(inner_grouping_object).over( - partition_by=window_def.partition_by, - order_by=window_def.order_by, - range_=window_def.range_, - ), - literal(GroupMetadataField.LAST_VALUE.value), - func.last_value(inner_grouping_object).over( - partition_by=window_def.partition_by, - order_by=window_def.order_by, - range_=window_def.range_, - ), - # These values are 'pretty' bounds. What 'pretty' means is based - # on the caller, and so these expressions need to be defined by - # that caller. - literal(GroupMetadataField.LEQ_VALUE.value), - leq_expr, - literal(GroupMetadataField.GEQ_VALUE.value), - geq_expr, - literal(GroupMetadataField.LT_VALUE.value), - lt_expr, - literal(GroupMetadataField.GT_VALUE.value), - gt_expr, - literal(GroupMetadataField.EQ_VALUE.value), - eq_expr, - ).label(MATHESAR_GROUP_METADATA) - - -def extract_group_metadata( - record_dictionaries, data_key='data', metadata_key='metadata', -): - """ - This function takes an iterable of record dictionaries with record data and - record metadata, and moves the group metadata from the data section to the - metadata section. - """ - def _get_record_pieces(record): - data = { - k: v for k, v in record[data_key].items() - if k != MATHESAR_GROUP_METADATA - } - group_metadata = record[data_key].get(MATHESAR_GROUP_METADATA, {}) - if group_metadata: - metadata = ( - record.get(metadata_key, {}) - | { - GroupMetadataField.GROUP_ID.value: group_metadata.get( - GroupMetadataField.GROUP_ID.value - ) - } - ) - else: - metadata = record.get(metadata_key) - return ( - {data_key: data, metadata_key: metadata}, - group_metadata if group_metadata else None - ) - - record_tup, group_tup = zip( - *(_get_record_pieces(record) for record in record_dictionaries) - ) - - reduced_groups = sorted( - [json.loads(blob) for blob in set([json.dumps(group) for group in group_tup])], - key=lambda x: x[GroupMetadataField.GROUP_ID.value] if x else None - ) - - return list(record_tup), reduced_groups if reduced_groups != [None] else None diff --git a/db/tests/records/operations/test_group.py b/db/tests/records/operations/test_group.py deleted file mode 100644 index 0f0fcd7e2e..0000000000 --- a/db/tests/records/operations/test_group.py +++ /dev/null @@ -1,809 +0,0 @@ -import pytest -from sqlalchemy import Column - -from db.records.operations import group -from db.records import exceptions as records_exceptions - - -@pytest.fixture -def roster_distinct_setup(roster_table_obj): - roster, engine = roster_table_obj - input_cols = ['Student Number', 'Student Email'] - gb = group.GroupBy(columns=input_cols) - grouping_columns = gb.get_validated_group_by_columns(roster) - sel = group._get_distinct_group_select(roster, grouping_columns, None) - with engine.begin() as conn: - res = conn.execute(sel).fetchall() - return res - - -@pytest.fixture -def roster_percentile_subj_grade_setup(roster_table_obj): - roster, engine = roster_table_obj - input_cols = ['Subject', 'Grade'] - group_by = group.GroupBy( - columns=input_cols, - mode=group.GroupMode.PERCENTILE.value, - num_groups=12 - ) - grouping_columns = group_by.get_validated_group_by_columns(roster) - num_groups = group_by.num_groups - sel = group._get_percentile_range_group_select(roster, grouping_columns, num_groups) - with engine.begin() as conn: - res = conn.execute(sel).fetchall() - return res - - -@pytest.fixture -def record_dictionary_list(): - return [ - { - 'data': { - 'id': 1, 'Center': 'NASA KSC', 'Status': 'Application', 'Case Number': 'KSC-12871', - '__mathesar_group_metadata': { - 'group_id': 15, 'count': 29, - 'first_value': {'Center': 'NASA KSC', 'Status': 'Application'}, - 'last_value': {'Center': 'NASA KSC', 'Status': 'Application'}, - 'less_than_eq_value': None, 'greater_than_eq_value': None, - 'less_than_value': None, 'greater_than_value': None, - } - }, - 'metadata': {} - }, - { - 'data': { - 'id': 2, 'Center': 'NASA ARC', 'Status': 'Issued', 'Case Number': 'ARC-14048-1', - '__mathesar_group_metadata': { - 'group_id': 2, 'count': 100, - 'first_value': {'Center': 'NASA ARC', 'Status': 'Issued'}, - 'last_value': {'Center': 'NASA ARC', 'Status': 'Issued'}, - 'less_than_eq_value': None, 'greater_than_eq_value': None, - 'less_than_value': None, 'greater_than_value': None, - } - }, - 'metadata': {} - }, - { - 'data': { - 'id': 3, 'Center': 'NASA ARC', 'Status': 'Issued', 'Case Number': 'ARC-14231-1', - '__mathesar_group_metadata': { - 'group_id': 2, 'count': 100, - 'first_value': {'Center': 'NASA ARC', 'Status': 'Issued'}, - 'last_value': {'Center': 'NASA ARC', 'Status': 'Issued'}, - 'less_than_eq_value': None, 'greater_than_eq_value': None, - 'less_than_value': None, 'greater_than_value': None, - } - }, - 'metadata': {} - } - ] - - -def test_GB_validate_passes_defaults(): - gb = group.GroupBy( - columns=['col1', 'col2'], - ) - gb.validate() - - -def test_GB_validate_passes_valid_kwargs(): - gb = group.GroupBy( - columns=['col1', 'col2'], - mode=group.GroupMode.DISTINCT.value - ) - gb.validate() - - -def test_GB_validate_passes_valid_kwargs_perc(): - gb = group.GroupBy( - columns=['col1', 'col2'], - mode=group.GroupMode.PERCENTILE.value, - num_groups=1234, - ) - gb.validate() - - -def test_GB_validate_passes_valid_kwargs_mag(): - gb = group.GroupBy( - columns=['col1'], - mode=group.GroupMode.MAGNITUDE.value, - ) - gb.validate() - - -def test_GB_validate_passes_valid_kwargs_endpoints(): - gb = group.GroupBy( - columns=['col1'], - mode=group.GroupMode.ENDPOINTS.value, - bound_tuples=[('a', 5), ('b', 0)], - ) - gb.validate() - - -def test_GB_validate_passes_valid_kwargs_count_by(): - gb = group.GroupBy( - columns=['col1'], - mode=group.GroupMode.COUNT_BY.value, - count_by=3, - global_min=234.5, - global_max=987.6 - ) - gb.validate() - - -def test_GB_validate_passes_valid_kwargs_prefix(): - gb = group.GroupBy( - columns=['col1'], - mode=group.GroupMode.PREFIX.value, - prefix_length=3 - ) - gb.validate() - - -def test_GB_validate_passes_valid_kwargs_extract_field(): - gb = group.GroupBy( - columns=['col1'], - mode=group.GroupMode.EXTRACT.value, - extract_field='year' - ) - gb.validate() - - -def test_GB_validate_fails_invalid_mode(): - with pytest.raises(records_exceptions.InvalidGroupType): - group.GroupBy( - columns=['col1', 'col2'], - mode='potato', - num_groups=1234, - ) - - -def test_GB_validate_fails_invalid_num_group(): - with pytest.raises(records_exceptions.BadGroupFormat): - group.GroupBy( - columns=['col1', 'col2'], - mode=group.GroupMode.PERCENTILE.value, - num_groups=None, - ) - - -def test_GB_validate_fails_invalid_columns_len(): - with pytest.raises(records_exceptions.BadGroupFormat): - group.GroupBy( - columns=['col1', 'col2'], - mode=group.GroupMode.MAGNITUDE.value, - ) - - -def test_GB_validate_fails_missing_bound_tuples(): - with pytest.raises(records_exceptions.BadGroupFormat): - group.GroupBy( - columns=['col1', 'col2'], - mode=group.GroupMode.ENDPOINTS.value, - ) - - -def test_GB_validate_fails_missing_prefix_length(): - with pytest.raises(records_exceptions.BadGroupFormat): - group.GroupBy( - columns=['col1'], - mode=group.GroupMode.PREFIX.value, - ) - - -def test_GB_validate_fails_multi_cols_prefix(): - with pytest.raises(records_exceptions.BadGroupFormat): - group.GroupBy( - columns=['col1', 'col2'], - mode=group.GroupMode.PREFIX.value, - prefix_length=3 - ) - - -def test_GB_validate_fails_missing_extract_field(): - with pytest.raises(records_exceptions.BadGroupFormat): - group.GroupBy( - columns=['col1'], - mode=group.GroupMode.EXTRACT.value, - ) - - -def test_GB_validate_fails_multi_cols_extract(): - with pytest.raises(records_exceptions.BadGroupFormat): - group.GroupBy( - columns=['col1', 'col2'], - mode=group.GroupMode.EXTRACT.value, - extract_field='year' - ) - - -def test_GB_validate_fails_missing_count_by(): - with pytest.raises(records_exceptions.BadGroupFormat): - group.GroupBy( - columns=['col1'], - mode=group.GroupMode.COUNT_BY.value, - count_by=None, - global_min=234.5, - global_max=987.6 - ) - - -def test_GB_validate_fails_missing_global_min(): - with pytest.raises(records_exceptions.BadGroupFormat): - group.GroupBy( - columns=['col1'], - mode=group.GroupMode.COUNT_BY.value, - count_by=3, - global_min=None, - global_max=987.6 - ) - - -def test_GB_validate_fails_missing_global_max(): - with pytest.raises(records_exceptions.BadGroupFormat): - group.GroupBy( - columns=['col1'], - mode=group.GroupMode.COUNT_BY.value, - count_by=3, - global_min=234.5, - global_max=None - ) - - -def test_GB_validate_fails_multiple_cols_with_count_by(): - with pytest.raises(records_exceptions.BadGroupFormat): - group.GroupBy( - columns=['col1', 'col2'], - mode=group.GroupMode.COUNT_BY.value, - count_by=3, - global_min=234.5, - global_max=987.6 - ) - - -def test_GB_get_valid_group_by_columns_str_cols(roster_table_obj): - roster, _ = roster_table_obj - column_names = ['Student Number', 'Student Email'] - gb = group.GroupBy(columns=column_names) - cols = gb.get_validated_group_by_columns(roster) - assert all( - [ - isinstance(col, Column) and col.name == name - for col, name in zip(cols, column_names) - ] - ) - - -def test_GB_get_valid_group_by_columns_invalid_col(roster_table_obj): - roster, _ = roster_table_obj - input_cols = ['notintable'] - gb = group.GroupBy(columns=input_cols) - with pytest.raises(records_exceptions.GroupFieldNotFound): - gb.get_validated_group_by_columns(roster) - - -def _group_first_val(row): - return row[group.MATHESAR_GROUP_METADATA][group.GroupMetadataField.FIRST_VALUE.value] - - -def _group_last_val(row): - return row[group.MATHESAR_GROUP_METADATA][group.GroupMetadataField.LAST_VALUE.value] - - -def _group_geq_value(row): - return row[group.MATHESAR_GROUP_METADATA][group.GroupMetadataField.GEQ_VALUE.value] - - -def _group_lt_value(row): - return row[group.MATHESAR_GROUP_METADATA][group.GroupMetadataField.LT_VALUE.value] - - -def _group_eq_value(row): - return row[group.MATHESAR_GROUP_METADATA][group.GroupMetadataField.EQ_VALUE.value] - - -def _group_count(row): - return row[group.MATHESAR_GROUP_METADATA][group.GroupMetadataField.COUNT.value] - - -def _group_id(row): - return row[group.MATHESAR_GROUP_METADATA][group.GroupMetadataField.GROUP_ID.value] - - -basic_group_modes = [ - group.GroupMode.DISTINCT.value, - group.GroupMode.PERCENTILE.value, - group.GroupMode.ENDPOINTS.value, -] - - -@pytest.mark.parametrize('group_mode', basic_group_modes) -def test_get_group_augmented_records_pg_query_metadata_fields(roster_table_obj, group_mode): - roster, engine = roster_table_obj - group_by = group.GroupBy( - ['Student Number', 'Student Name'], - mode=group_mode, - num_groups=12, - bound_tuples=[ - ('00000000-0000-0000-0000-000000000000', 'Alice'), - ('77777777-7777-7777-7777-777777777777', 'Margot'), - ('ffffffff-ffff-ffff-ffff-ffffffffffff', 'Zachary'), - ] - ) - augmented_pg_query = group.get_group_augmented_records_pg_query(roster, group_by) - with engine.begin() as conn: - res = conn.execute(augmented_pg_query).fetchall() - for row in res: - assert all( - [ - metadata_field.value in row[group.MATHESAR_GROUP_METADATA] - for metadata_field in group.GroupMetadataField - ] - ) - - -def test_smoke_get_group_augmented_records_pg_query_prefix(roster_table_obj): - roster, engine = roster_table_obj - group_by = group.GroupBy( - ['Student Number'], - mode=group.GroupMode.PREFIX.value, - prefix_length=1, - ) - augmented_pg_query = group.get_group_augmented_records_pg_query(roster, group_by) - with engine.begin() as conn: - res = conn.execute(augmented_pg_query).fetchall() - for row in res: - assert all( - [ - metadata_field.value in row[group.MATHESAR_GROUP_METADATA] - for metadata_field in group.GroupMetadataField - ] - ) - - -def test_smoke_get_group_augmented_records_pg_query_email_preproc(roster_table_obj): - roster, engine = roster_table_obj - group_by = group.GroupBy( - ['Student Email'], - mode=group.GroupMode.DISTINCT.value, - preproc=['extract_email_domain'] - ) - augmented_pg_query = group.get_group_augmented_records_pg_query(roster, group_by) - with engine.begin() as conn: - res = conn.execute(augmented_pg_query).fetchall() - for row in res: - assert all( - [ - metadata_field.value in row[group.MATHESAR_GROUP_METADATA] - for metadata_field in group.GroupMetadataField - ] - ) - - -@pytest.mark.parametrize('preproc', ['extract_uri_authority', 'extract_uri_scheme']) -def test_smoke_get_group_augmented_records_pg_query_uris_preproc(uris_table_obj, preproc): - roster, engine = uris_table_obj - group_by = group.GroupBy( - ['uri'], - mode=group.GroupMode.DISTINCT.value, - preproc=[preproc] - ) - augmented_pg_query = group.get_group_augmented_records_pg_query(roster, group_by) - with engine.begin() as conn: - res = conn.execute(augmented_pg_query).fetchall() - for row in res: - assert all( - [ - metadata_field.value in row[group.MATHESAR_GROUP_METADATA] - for metadata_field in group.GroupMetadataField - ] - ) - - -def test_smoke_get_group_augmented_records_pg_query_extract(times_table_obj): - roster, engine = times_table_obj - group_by = group.GroupBy( - ['date'], - mode=group.GroupMode.EXTRACT.value, - extract_field='month', - ) - augmented_pg_query = group.get_group_augmented_records_pg_query(roster, group_by) - with engine.begin() as conn: - res = conn.execute(augmented_pg_query).fetchall() - for row in res: - assert all( - [ - metadata_field.value in row[group.MATHESAR_GROUP_METADATA] - for metadata_field in group.GroupMetadataField - ] - ) - - -datetime_trunc_tests_def = [ - ('date', 'truncate_to_year', 3, 4, {'1999': 1, '2010': 1, '2013': 4}), - ('timestamp', 'truncate_to_year', 3, 4, {'1999': 1, '1980': 1, '1981': 4}), - ( - 'date', 'truncate_to_month', 4, 7, - {'1999-01': 1, '2010-01': 1, '2013-01': 3, '2013-02': 1}, - ), ( - 'timestamp', 'truncate_to_month', 4, 7, - {'1999-01': 1, '1980-01': 1, '1981-01': 3, '1981-02': 1}, - ), ( - 'date', 'truncate_to_day', 6, 10, - { - '1999-01-08': 1, '2010-01-08': 1, '2013-01-08': 1, '2013-01-09': 1, - '2013-01-10': 1, '2013-02-08': 1, - } - ), ( - 'timestamp', 'truncate_to_day', 6, 10, - { - '1999-01-08': 1, '1980-01-08': 1, '1981-01-08': 1, '1981-01-09': 1, - '1981-01-10': 1, '1981-02-08': 1, - } - ), -] - - -@pytest.mark.parametrize('col,preproc,num,length,count_map', datetime_trunc_tests_def) -def test_get_group_augmented_records_pg_query_datetimes_preproc( - times_table_obj, col, preproc, num, length, count_map -): - roster, engine = times_table_obj - group_by = group.GroupBy( - [col], - mode=group.GroupMode.DISTINCT.value, - preproc=[preproc] - ) - augmented_pg_query = group.get_group_augmented_records_pg_query(roster, group_by) - with engine.begin() as conn: - res = conn.execute(augmented_pg_query).fetchall() - - assert max([_group_id(row) for row in res]) == num - for row in res: - assert row[col][:length] == _group_eq_value(row)[col] - assert count_map[_group_eq_value(row)[col]] == _group_count(row) - - -datetime_extract_tests_def = [ - ('date', 'year', 3), - ('timestamp', 'year', 3), - ('date', 'month', 2), - ('timestamp', 'month', 2), - ('date', 'day', 3), - ('timestamp', 'day', 3), -] - - -@pytest.mark.parametrize('col,field,num', datetime_extract_tests_def) -def test_get_group_augmented_records_pg_query_datetimes_extract( - times_table_obj, col, field, num -): - roster, engine = times_table_obj - group_by = group.GroupBy( - [col], mode=group.GroupMode.EXTRACT.value, extract_field=field - ) - augmented_pg_query = group.get_group_augmented_records_pg_query(roster, group_by) - with engine.begin() as conn: - res = conn.execute(augmented_pg_query).fetchall() - - assert max([_group_id(row) for row in res]) == num - - -single_col_number_modes = [ - group.GroupMode.MAGNITUDE.value, - group.GroupMode.COUNT_BY.value, -] - - -@pytest.mark.parametrize('mode', single_col_number_modes) -def test_smoke_get_group_augmented_records_pg_query_magnitude(magnitude_table_obj, mode): - magnitude, engine = magnitude_table_obj - group_by = group.GroupBy( - ['big_num'], - mode=mode, - count_by=50, - global_min=0, - global_max=1000 - ) - augmented_pg_query = group.get_group_augmented_records_pg_query(magnitude, group_by) - with engine.begin() as conn: - res = conn.execute(augmented_pg_query).fetchall() - for row in res: - assert all( - [ - metadata_field.value in row[group.MATHESAR_GROUP_METADATA] - for metadata_field in group.GroupMetadataField - ] - ) - - -group_by_num_list = [ - ( - group.GroupBy( - ['Student Number', 'Student Email'], - mode=group.GroupMode.DISTINCT.value - ), - 259 - ), - ( - group.GroupBy( - ['Student Number', 'Student Email'], - mode=group.GroupMode.DISTINCT.value, - preproc=[None, 'extract_email_domain'] - ), - 259 - ), - ( - group.GroupBy( - ['Student Email'], - mode=group.GroupMode.DISTINCT.value, - preproc=['extract_email_domain'] - ), - 3 - ), - ( - group.GroupBy( - ['Student Number', 'Student Email'], - mode=group.GroupMode.PERCENTILE.value, - num_groups=12, - ), - 12 - ), - ( - group.GroupBy( - ['Subject', 'Grade'], - mode=group.GroupMode.PERCENTILE.value, - num_groups=12, - ), - 12 - ), - ( - group.GroupBy( - ['Subject', 'Grade'], - mode=group.GroupMode.PERCENTILE.value, - num_groups=100, - ), - 100 - ), - ( - group.GroupBy( - ['Subject', 'Grade'], - mode=group.GroupMode.PERCENTILE.value, - num_groups=1500, - ), - 1500 - ), - ( - group.GroupBy( - ['Subject', 'Grade'], - mode=group.GroupMode.ENDPOINTS.value, - bound_tuples=[ - ('a', 50), ('f', 75), ('k', 25), ('p', 90), ('r', 100) - ] - ), - 4 - ), - ( - group.GroupBy( - ['Student Number'], - mode=group.GroupMode.PREFIX.value, - prefix_length=1 - ), - 16 - ), -] - - -@pytest.mark.parametrize('group_by,num', group_by_num_list) -def test_get_distinct_group_select_correct_num_group_id( - roster_table_obj, group_by, num -): - roster, engine = roster_table_obj - augmented_pg_query = group.get_group_augmented_records_pg_query(roster, group_by) - with engine.begin() as conn: - res = conn.execute(augmented_pg_query).fetchall() - - assert max([_group_id(row) for row in res]) == num - - -magnitude_lt_zero = ['sm_num', 'sm_dbl'] -magnitude_gt_zero = ['id', 'big_num', 'big_int', 'pm_seq', 'tens_seq'] - - -magnitude_columns = magnitude_lt_zero + magnitude_gt_zero - -magnitude_max_group_ids = [30, 87, 21, 85, 90, 21, 21] - - -@pytest.mark.parametrize('col_name,num', zip(magnitude_columns, magnitude_max_group_ids)) -def test_group_select_correct_num_group_id_magnitude( - magnitude_table_obj, col_name, num -): - magnitude, engine = magnitude_table_obj - group_by = group.GroupBy( - [col_name], - mode=group.GroupMode.MAGNITUDE.value, - ) - augmented_pg_query = group.get_group_augmented_records_pg_query(magnitude, group_by) - with engine.begin() as conn: - res = conn.execute(augmented_pg_query).fetchall() - - assert max([_group_id(row) for row in res]) == num - - -count_by_count_by = [0.000005, 0.00001, 7, 80.5, 750, 25, 100] -count_by_global_min = [0, 0, 0, -100, -4500, -100, 0] -count_by_global_max = [0.0003, 0.001, 250, 600, 5500, 100, 2000] -count_by_max_group_id = [59, 99, 29, 8, 13, 8, 20] - - -@pytest.mark.parametrize( - 'col_name,count_by,global_min,global_max,num', zip( - magnitude_columns, count_by_count_by, count_by_global_min, - count_by_global_max, count_by_max_group_id - ) -) -def test_group_select_correct_num_group_id_count_by( - magnitude_table_obj, col_name, count_by, global_min, global_max, num -): - magnitude, engine = magnitude_table_obj - group_by = group.GroupBy( - [col_name], - mode=group.GroupMode.COUNT_BY.value, - count_by=count_by, - global_min=global_min, - global_max=global_max, - ) - augmented_pg_query = group.get_group_augmented_records_pg_query(magnitude, group_by) - with engine.begin() as conn: - res = conn.execute(augmented_pg_query).fetchall() - - assert max([_group_id(row) for row in res]) == num - - -@pytest.mark.parametrize('col_name', magnitude_columns) -def test_magnitude_group_select_bounds_chain(magnitude_table_obj, col_name): - magnitude, engine = magnitude_table_obj - group_by = group.GroupBy( - [col_name], - mode=group.GroupMode.MAGNITUDE.value, - ) - augmented_pg_query = group.get_group_augmented_records_pg_query(magnitude, group_by) - with engine.begin() as conn: - res = conn.execute(augmented_pg_query).fetchall() - - for i in range(len(res) - 1): - assert ( - _group_lt_value(res[i])[col_name] <= _group_geq_value(res[i + 1])[col_name] - or ( - _group_lt_value(res[i]) == _group_lt_value(res[i + 1]) - and _group_geq_value(res[i]) == _group_geq_value(res[i + 1]) - ) - ) - - -@pytest.mark.parametrize('col_name', magnitude_columns) -def test_magnitude_group_select_bounds_pretty(magnitude_table_obj, col_name): - magnitude, engine = magnitude_table_obj - group_by = group.GroupBy( - [col_name], - mode=group.GroupMode.MAGNITUDE.value, - ) - augmented_pg_query = group.get_group_augmented_records_pg_query(magnitude, group_by) - with engine.begin() as conn: - res = conn.execute(augmented_pg_query).fetchall() - - for row in res: - assert ( - len(str(_group_lt_value(row)[col_name])) <= 7 - and len(str(_group_geq_value(row)[col_name])) <= 7 - ) - - -@pytest.mark.parametrize('col_name', magnitude_columns) -def test_magnitude_group_select_inside_bounds(magnitude_table_obj, col_name): - magnitude, engine = magnitude_table_obj - group_by = group.GroupBy( - [col_name], - mode=group.GroupMode.MAGNITUDE.value, - ) - augmented_pg_query = group.get_group_augmented_records_pg_query(magnitude, group_by) - with engine.begin() as conn: - res = conn.execute(augmented_pg_query).fetchall() - - for row in res: - assert ( - row[col_name] < _group_lt_value(row)[col_name] - and row[col_name] >= _group_geq_value(row)[col_name] - ) - - -def test_get_distinct_group_select_correct_first_last_row_match(roster_distinct_setup): - res = roster_distinct_setup - for row in res: - first_val = _group_first_val(row) - last_val = _group_last_val(row) - assert row['Student Number'] == first_val['Student Number'] - assert row['Student Email'] == first_val['Student Email'] - assert first_val == last_val - - -def test_get_distinct_group_select_groups_distinct(roster_distinct_setup): - res = roster_distinct_setup - group_member_tuples = { - (_group_id(row), row['Student Number'], row['Student Email']) for row in res - } - assert ( - len({tup[0] for tup in group_member_tuples}) - == len({(tup[1], tup[2]) for tup in group_member_tuples}) - == len(group_member_tuples) - ) - - -def test_get_percentile_range_group_first_last(roster_percentile_subj_grade_setup): - res = roster_percentile_subj_grade_setup - for row in res: - first_val = _group_first_val(row) - last_val = _group_last_val(row) - assert (first_val['Subject'], first_val['Grade']) <= (row['Subject'], row['Grade']) - assert (last_val['Subject'], last_val['Grade']) >= (row['Subject'], row['Grade']) - - -def test_get_percentile_range_group_groups_correct(roster_percentile_subj_grade_setup): - res = roster_percentile_subj_grade_setup - group_member_tuples = { - ( - _group_id(row), - _group_first_val(row)['Subject'], - _group_first_val(row)['Grade'], - _group_last_val(row)['Subject'], - _group_last_val(row)['Grade'], - ) - for row in res - } - assert ( - len({tup[0] for tup in group_member_tuples}) - == len({(tup[1], tup[2]) for tup in group_member_tuples}) - == len({(tup[3], tup[4]) for tup in group_member_tuples}) - == len({(tup[1], tup[2], tup[3], tup[4]) for tup in group_member_tuples}) - == len(group_member_tuples) - ) - - -def test_extract_group_metadata_correct_data(record_dictionary_list): - records, _ = group.extract_group_metadata( - record_dictionary_list, data_key='data', metadata_key='metadata' - ) - data_no_meta = [ - {k: v for k, v in rec['data'].items() if k != group.MATHESAR_GROUP_METADATA} - for rec in record_dictionary_list - ] - assert all( - [rec['data'] == expect for rec, expect in zip(records, data_no_meta)] - ) - - -def test_extract_group_metadata_correct_metadata(record_dictionary_list): - records, _ = group.extract_group_metadata( - record_dictionary_list, data_key='data', metadata_key='metadata' - ) - assert all( - [ - rec['metadata'][group.GroupMetadataField.GROUP_ID.value] == _group_id(orig['data']) - for rec, orig in zip(records, record_dictionary_list) - ] - ) - - -def test_extract_group_metadata_correct_groups(record_dictionary_list): - _, groups = group.extract_group_metadata( - record_dictionary_list, data_key='data', metadata_key='metadata' - ) - assert len(groups) == 2 - actual_ids = [ - gr_dict[group.GroupMetadataField.GROUP_ID.value] for gr_dict in groups - ] - expect_ids = [ - _group_id(rec['data']) for rec in record_dictionary_list - ] - assert set(actual_ids) == set(expect_ids) From c0f3a0a06da508fb440265d6b0a1ec39fed5f8b1 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Fri, 18 Oct 2024 12:29:14 +0800 Subject: [PATCH 60/70] remove record calculation module, related tests --- db/records/operations/calculation.py | 48 ---------- db/tests/records/__init__.py | 0 db/tests/records/conftest.py | 14 --- db/tests/records/operations/__init__.py | 0 .../records/operations/test_calculation.py | 93 ------------------- 5 files changed, 155 deletions(-) delete mode 100644 db/records/operations/calculation.py delete mode 100644 db/tests/records/__init__.py delete mode 100644 db/tests/records/conftest.py delete mode 100644 db/tests/records/operations/__init__.py delete mode 100644 db/tests/records/operations/test_calculation.py diff --git a/db/records/operations/calculation.py b/db/records/operations/calculation.py deleted file mode 100644 index acfb24be57..0000000000 --- a/db/records/operations/calculation.py +++ /dev/null @@ -1,48 +0,0 @@ -from sqlalchemy import func, literal, select, cast, INTEGER - - -def get_extrema_diff_select(selectable, column, output_label): - """ - This function creates a select statement composed of the given - selectable, with an additional column containing the difference - between the max and min of the given column on each row. The - given column needs to be in the given table. - """ - return select( - selectable, - (func.max(column).over() - func.min(column).over()).label(output_label) - ) - - -def get_offset_order_of_magnitude_select(selectable, column, output_label): - """ - This function returns a select statement composed of the given - selectable, with an additional column containing an integer p such - that p is maximal, subject to the constraint that 10**(p + 1) is - less than or equal to the value of the given column for that row. - - """ - return select( - selectable, - cast( - (func.floor(func.log(column)) - 1), INTEGER - ).label(output_label) - ) - - -def divide_by_power_of_ten_select(selectable, divisor_col, power_col, output_label): - """ - This function returns a select statement composed of the given - selectable, with an additional column containing an integer that is the next - after dividing the given divisor_col by 10**(the given power_col) for - each row. - """ - return select( - selectable, - cast( - func.floor( - divisor_col / func.pow(literal(10.0), power_col) - ), - INTEGER - ).label(output_label) - ) diff --git a/db/tests/records/__init__.py b/db/tests/records/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/db/tests/records/conftest.py b/db/tests/records/conftest.py deleted file mode 100644 index 2cd185cc4d..0000000000 --- a/db/tests/records/conftest.py +++ /dev/null @@ -1,14 +0,0 @@ -import pytest - -from sqlalchemy import MetaData, Table - - -FILTER_SORT = "filter_sort" - - -@pytest.fixture -def filter_sort_table_obj(engine_with_filter_sort): - engine, schema = engine_with_filter_sort - metadata = MetaData(bind=engine) - roster = Table(FILTER_SORT, metadata, schema=schema, autoload_with=engine) - return roster, engine diff --git a/db/tests/records/operations/__init__.py b/db/tests/records/operations/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/db/tests/records/operations/test_calculation.py b/db/tests/records/operations/test_calculation.py deleted file mode 100644 index 602fce8dac..0000000000 --- a/db/tests/records/operations/test_calculation.py +++ /dev/null @@ -1,93 +0,0 @@ -from decimal import Decimal -import pytest -from sqlalchemy import select - -from db.records.operations import calculation - - -CHECK_CTE_FLAG = False - - -magnitude_columns_test_list = [ - ('big_num', Decimal('997.195962329400'), 1, -3), - ('big_int', 9993, 2, 21), - ('sm_num', Decimal('0.0002982117006408827'), Decimal('-5'), 6), - ('sm_dbl', 0.0009893330872576484, -5.0, 12), - ('pm_seq', 199, 1, -10), - ('tens_seq', 1990, 2, 0), -] - - -@pytest.mark.parametrize( - 'colname,diff', - [(t[0], t[1]) for t in magnitude_columns_test_list] -) -def test_get_extrema_diff_select(magnitude_table_obj, colname, diff): - global CHECK_CTE_FLAG - magnitude, engine = magnitude_table_obj - sel = calculation.get_extrema_diff_select( - magnitude, magnitude.columns[colname], 'extrema_diff' - ) - if CHECK_CTE_FLAG: - sel = select(sel.cte()) - CHECK_CTE_FLAG = not CHECK_CTE_FLAG - with engine.begin() as conn: - res = conn.execute(sel).fetchone() - assert list(res.keys()) == [ - 'id', 'big_num', 'big_int', 'sm_num', 'sm_dbl', - 'pm_seq', 'tens_seq', 'extrema_diff' - ] - assert res['extrema_diff'] == diff - - -@pytest.mark.parametrize( - 'colname,power', - [(t[0], t[2]) for t in magnitude_columns_test_list] -) -def test_get_offset_order_of_magnitude_select(magnitude_table_obj, colname, power): - global CHECK_CTE_FLAG - magnitude, engine = magnitude_table_obj - extrema_cte = calculation.get_extrema_diff_select( - magnitude, magnitude.columns[colname], 'extrema_diff' - ).cte() - sel = calculation.get_offset_order_of_magnitude_select( - extrema_cte, extrema_cte.columns['extrema_diff'], 'power' - ) - if CHECK_CTE_FLAG: - sel = select(sel.cte()) - CHECK_CTE_FLAG = not CHECK_CTE_FLAG - with engine.begin() as conn: - res = conn.execute(sel).fetchone() - assert list(res.keys()) == [ - 'id', 'big_num', 'big_int', 'sm_num', 'sm_dbl', - 'pm_seq', 'tens_seq', 'extrema_diff', 'power' - ] - assert res['power'] == power - - -@pytest.mark.parametrize( - 'colname,raw_id', - [(t[0], t[3]) for t in magnitude_columns_test_list] -) -def test_divide_by_power_of_ten_select(magnitude_table_obj, colname, raw_id): - global CHECK_CTE_FLAG - magnitude, engine = magnitude_table_obj - extrema_cte = calculation.get_extrema_diff_select( - magnitude, magnitude.columns[colname], 'extrema_diff' - ).cte() - power_cte = calculation.get_offset_order_of_magnitude_select( - extrema_cte, extrema_cte.columns['extrema_diff'], 'power' - ).cte() - sel = calculation.divide_by_power_of_ten_select( - power_cte, power_cte.columns[colname], power_cte.columns['power'], 'raw_id' - ) - if CHECK_CTE_FLAG: - sel = select(sel.cte()) - CHECK_CTE_FLAG = not CHECK_CTE_FLAG - with engine.begin() as conn: - res = conn.execute(sel).fetchone() - assert list(res.keys()) == [ - 'id', 'big_num', 'big_int', 'sm_num', 'sm_dbl', - 'pm_seq', 'tens_seq', 'extrema_diff', 'power', 'raw_id', - ] - assert res['raw_id'] == raw_id From 47bab200d1eb71b09799c3a055bd46f11bb549e5 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Fri, 18 Oct 2024 12:54:53 +0800 Subject: [PATCH 61/70] remove SQLAlchemy column alterer --- db/columns/operations/alter.py | 90 +--------------------------------- db/tests/conftest.py | 27 ---------- db/tests/types/conftest.py | 19 ------- db/tests/types/test_base.py | 82 ------------------------------- db/tests/types/test_email.py | 24 +-------- db/tests/types/test_uri.py | 24 +-------- 6 files changed, 3 insertions(+), 263 deletions(-) delete mode 100644 db/tests/types/test_base.py diff --git a/db/columns/operations/alter.py b/db/columns/operations/alter.py index de21b9961e..d3c9d0cfd7 100644 --- a/db/columns/operations/alter.py +++ b/db/columns/operations/alter.py @@ -1,95 +1,7 @@ import json -from psycopg.errors import ( - InvalidTextRepresentation, InvalidParameterValue, RaiseException, - SyntaxError -) + from db import connection as db_conn from db.columns.defaults import NAME, NULLABLE, DESCRIPTION -from db.columns.exceptions import InvalidDefaultError, InvalidTypeError, InvalidTypeOptionError - - -# TODO Remove; only used in testing -def alter_column(engine, table_oid, column_attnum, column_data, connection=None): - """ - Alter a column of the a table. - - Args: - engine: An SQLAlchemy engine defining the database connection string. - table_oid: integer giving the OID of the table with the column. - column_attnum: integer giving the attnum of the column to alter. - column_data: dictionary describing the alterations to make. - connection: A connection to use. Remove ASAP. - - column_data should have the form: - { - "type": - "type_options": , - "column_default_dict": {"is_dynamic": , "value": } - "nullable": , - "name": , - "description": - } - """ - column_alter_def = _process_column_alter_dict_dep(column_data, column_attnum) - requested_type = column_alter_def.get("type", {}).get("name") - if connection is None: - try: - db_conn.execute_msar_func_with_engine( - engine, 'alter_columns', - table_oid, - json.dumps([column_alter_def]) - ) - except InvalidParameterValue: - raise InvalidTypeOptionError - except InvalidTextRepresentation: - if column_alter_def.get('default') is None: - column_db_name = db_conn.execute_msar_func_with_engine( - engine, 'get_column_name', table_oid, column_attnum - ).fetchone()[0] - raise InvalidTypeError(column_db_name, requested_type) - else: - raise InvalidDefaultError - except RaiseException: - column_db_name = db_conn.execute_msar_func_with_engine( - engine, 'get_column_name', table_oid, column_attnum - ).fetchone()[0] - raise InvalidTypeError(column_db_name, requested_type) - except SyntaxError as e: - # TODO this except catch is too broad; syntax errors can be caused - # by many things, especially programmer errors during development. - # find a way to be more selective about what we call an invalid - # type option error. - raise InvalidTypeOptionError(e) - else: - db_conn.execute_msar_func_with_psycopg2_conn( - connection, 'alter_columns', - table_oid, - f"'{json.dumps([column_alter_def])}'" - ) - - -# TODO Remove; only used in testing -def alter_column_type( - table_oid, column_attnum, engine, connection, target_type, type_options=None -): - """ - Alter the type of a single column. - - Args: - table_oid: integer giving the OID of the table with the column. - column_attnum: integer giving the attnum of the column. - engine: SQLAlchemy engine defining the connection string for the DB. - connection: psycopg2 connection object. - target_type: PostgresType defining the type to alter to. - type_options: dict defining the options for the type to alter to. - """ - alter_column( - engine, - table_oid, - column_attnum, - {"type": target_type.id, "type_options": type_options}, - connection=connection - ) def alter_columns_in_table(table_oid, column_data_list, conn): diff --git a/db/tests/conftest.py b/db/tests/conftest.py index 8cfbf367bd..49768f65a1 100644 --- a/db/tests/conftest.py +++ b/db/tests/conftest.py @@ -4,10 +4,6 @@ from sqlalchemy import MetaData, text, Table from db import constants -from db.columns.operations.select import get_column_attnum_from_name -from db.tables.operations.select import get_oid_from_table -from db.types.base import MathesarCustomType -from db.columns.operations.alter import alter_column_type FILE_DIR = os.path.abspath(os.path.dirname(__file__)) RESOURCES = os.path.join(FILE_DIR, "resources") @@ -239,29 +235,6 @@ def json_table_obj(engine_with_json, json_table_name): return table, engine -@pytest.fixture -def uris_table_obj(engine_with_uris, uris_table_name): - engine, schema = engine_with_uris - metadata = MetaData(bind=engine) - table = Table(uris_table_name, metadata, schema=schema, autoload_with=engine) - # Cast "uri" column from string to URI - with engine.begin() as conn: - uri_column_name = "uri" - uri_type = MathesarCustomType.URI - table_oid = get_oid_from_table(table.name, schema, engine) - uri_column_attnum = get_column_attnum_from_name( - table_oid, uri_column_name, engine, metadata - ) - alter_column_type( - table_oid, - uri_column_attnum, - engine, - conn, - uri_type, - ) - yield table, engine - - @pytest.fixture def books_table_import_from_obj(engine_with_books_to_import_from, books_import_from_table_name): engine, schema = engine_with_books_to_import_from diff --git a/db/tests/types/conftest.py b/db/tests/types/conftest.py index bb11cc6a23..ebd4ae4534 100644 --- a/db/tests/types/conftest.py +++ b/db/tests/types/conftest.py @@ -1,9 +1,5 @@ import pytest from sqlalchemy import MetaData, Table -from db.tables.operations.select import get_oid_from_table -from db.types.base import MathesarCustomType -from db.columns.operations.alter import alter_column_type -from db.columns.operations.select import get_column_attnum_from_name @pytest.fixture @@ -11,19 +7,4 @@ def roster_table_obj(engine_with_roster, roster_table_name): engine, schema = engine_with_roster metadata = MetaData(bind=engine) table = Table(roster_table_name, metadata, schema=schema, autoload_with=engine) - # Cast "Teacher Email" column from string to Email - with engine.begin() as conn: - email_column_name = "Teacher Email" - email_type = MathesarCustomType.EMAIL - table_oid = get_oid_from_table(table.name, schema, engine) - email_column_attnum = get_column_attnum_from_name( - table_oid, email_column_name, engine, metadata - ) - alter_column_type( - table_oid, - email_column_attnum, - engine, - conn, - email_type, - ) yield table, engine diff --git a/db/tests/types/test_base.py b/db/tests/types/test_base.py deleted file mode 100644 index 60a9dc08ff..0000000000 --- a/db/tests/types/test_base.py +++ /dev/null @@ -1,82 +0,0 @@ -import pytest - -from sqlalchemy import select - -from db.functions.base import ArrayAgg -from db.tables.operations.select import reflect_table -from db.transforms.base import Summarize - - -@pytest.fixture -def up_to_date_uris_table_obj(uris_table_obj): - uris_table, engine = uris_table_obj - # Apparently we need to reflect to have up-to-date column type - uris_table = reflect_table( - name=uris_table.name, - schema=uris_table.schema, - engine=engine, - metadata=uris_table.metadata - ) - return uris_table, engine - - -@pytest.mark.skip(reason="produces weird breakage") -def test_custom_type_aggregation(up_to_date_uris_table_obj): - """ - Our custom types can break during array_agg (ArrayAgg) with output looking something like: - - `['{', 'h', 't', 't', 'p', ':', '/', '/', 's', 'o', ...]` - - This is meant to test that that doesn't happen. - """ - uris_table, engine = up_to_date_uris_table_obj - uri_col = uris_table.c.uri - uri_col_name = uri_col.name - id_col = uris_table.c.id - id_col_name = id_col.name - spec = dict( - base_grouping_column=id_col_name, - grouping_expressions=[ - dict( - input_alias=id_col_name, - output_alias=id_col_name + "grouped", - preproc=None, - ), - ], - aggregation_expressions=[ - dict( - input_alias=uri_col_name, - output_alias=uri_col_name + "agged", - function=ArrayAgg.id, - ) - ] - ) - summarize_transform = Summarize(spec=spec) - non_executable = summarize_transform.apply_to_relation(uris_table) - executable = select(non_executable) - records = list(engine.connect().execute(executable)) - result_uris = set(record[1][0] for record in records) - expected_records = [ - (11, ['http://tweetphoto.com/31332311']), - (8, ['http://soundcloud.com/dj-soro']), - (19, ['ftps://asldp.com/158915']), - (4, ['http://imgur.com/M2v2H.png']), - (14, ['http://yfrog.com/msradon2p']), - (3, ['http://banedon.posterous.com/bauforstschritt-2262010']), - (17, ['http://tumblr.com/x4acyiuxf']), - (20, ['ftp://abcdefg.com/x-y-z']), - (13, ['http://yfrog.com/j6cimg3038gj']), - (10, ['http://www.flickr.com/photos/jocke66/4657443374/']), - (9, ['http://i.imgur.com/H6yyu.jpg']), - (7, ['http://tweetphoto.com/31103212']), - (1, ['http://soundcloud.com/denzo-1/denzo-in-mix-0knackpunkt-nr-15-0-electro-swing']), - (5, ['http://tweetphoto.com/31300678']), - (18, ['ftp://foobar.com/179179']), - (2, ['http://picasaweb.google.com/lh/photo/94RGMDCSTmCW04l6SPnteTBPFtERcSvqpRI6vP3N6YI?feat=embedwebsite']), - (16, ['http://soundcloud.com/strawberryhaze/this-is-my-house-in-summer-2010']), - (15, ['http://soundcloud.com/hedo/hedo-der-groove-junger-knospen']), - (6, ['http://www.youtube.com/watch?v=zXLGHyGxY2E']), - (12, ['http://tweetphoto.com/31421017']) - ] - expected_uris = set(expected_record[1][0] for expected_record in expected_records) - assert result_uris == expected_uris diff --git a/db/tests/types/test_email.py b/db/tests/types/test_email.py index 62454cbef8..00850088ac 100644 --- a/db/tests/types/test_email.py +++ b/db/tests/types/test_email.py @@ -4,10 +4,7 @@ from sqlalchemy.exc import IntegrityError from db.types.custom import email from db.types.base import PostgresType -from db.utils import execute_pg_query -from db.functions.base import ColumnName, Literal, sa_call_sql_function -from db.functions.packed import EmailDomainContains, EmailDomainEquals -from db.functions.operations.apply import apply_db_function_as_filter +from db.functions.base import sa_call_sql_function def test_domain_func_wrapper(engine_with_schema): @@ -102,22 +99,3 @@ def test_create_email_type_domain_checks_broken_emails(engine_with_schema): ) ) assert type(e.orig) is CheckViolation - - -@pytest.mark.parametrize("main_db_function,literal_param,expected_count", [ - (EmailDomainContains, "mail", 588), - (EmailDomainEquals, "gmail.com", 303), - (EmailDomainContains, "krista", 0), - (EmailDomainEquals, "kristaramirez@yahoo.com", 0), -]) -def test_email_db_functions(roster_table_obj, main_db_function, literal_param, expected_count): - table, engine = roster_table_obj - selectable = table.select() - email_column_name = "Teacher Email" - db_function = main_db_function([ - ColumnName([email_column_name]), - Literal([literal_param]), - ]) - query = apply_db_function_as_filter(selectable, db_function) - record_list = execute_pg_query(engine, query) - assert len(record_list) == expected_count diff --git a/db/tests/types/test_uri.py b/db/tests/types/test_uri.py index d384914ff0..c6da702b5a 100644 --- a/db/tests/types/test_uri.py +++ b/db/tests/types/test_uri.py @@ -3,10 +3,7 @@ from sqlalchemy import text, select, Table, MetaData, Column from sqlalchemy.exc import IntegrityError from db.types.custom import uri -from db.utils import execute_pg_query -from db.functions.base import ColumnName, Contains, Literal, sa_call_sql_function -from db.functions.packed import URIAuthorityContains, URISchemeEquals -from db.functions.operations.apply import apply_db_function_as_filter +from db.functions.base import sa_call_sql_function from db.types.base import PostgresType @@ -225,22 +222,3 @@ def test_uri_type_domain_rejects_malformed_uris(engine_with_schema, test_str): with engine.begin() as conn: conn.execute(text(f"SELECT '{test_str}'::{uri.DB_TYPE}")) assert type(e.orig) is CheckViolation - - -@pytest.mark.parametrize("main_db_function,literal_param,expected_count", [ - (URIAuthorityContains, "soundcloud", 4), - (URIAuthorityContains, "http", 0), - (URISchemeEquals, "ftp", 2), - (Contains, ".com/31421017", 1), -]) -def test_uri_db_functions(uris_table_obj, main_db_function, literal_param, expected_count): - table, engine = uris_table_obj - selectable = table.select() - uris_column_name = "uri" - db_function = main_db_function([ - ColumnName([uris_column_name]), - Literal([literal_param]), - ]) - query = apply_db_function_as_filter(selectable, db_function) - record_list = execute_pg_query(engine, query) - assert len(record_list) == expected_count From ad6ca2e71ad565727ce848b4db5278d5de4ee66f Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Fri, 18 Oct 2024 13:18:29 +0800 Subject: [PATCH 62/70] deal with and update TODOs in db module --- db/columns/base.py | 5 +- db/columns/operations/alter.py | 65 ----------------------- db/columns/operations/select.py | 1 + db/columns/utils.py | 1 - db/engine.py | 2 - db/functions/operations/apply.py | 3 -- db/queries/base.py | 1 - db/tests/queries/test_initial_relation.py | 2 - db/tests/tables/operations/test_select.py | 2 - db/transforms/base.py | 1 - db/types/custom/money.py | 1 - db/utils.py | 1 - 12 files changed, 2 insertions(+), 83 deletions(-) diff --git a/db/columns/base.py b/db/columns/base.py index fe72e78832..7291716fee 100644 --- a/db/columns/base.py +++ b/db/columns/base.py @@ -1,3 +1,4 @@ +# TODO Remove this file once explorations are in the database from sqlalchemy import Column, ForeignKey, inspect from db.columns.operations.select import get_column_attnum_from_name @@ -5,8 +6,6 @@ from db.types.operations.convert import get_db_type_enum_from_class -# TODO consider renaming to DbColumn or DatabaseColumn -# We are attempting to reserve the term Mathesar for types in the mathesar namespace. class MathesarColumn(Column): """ This class constrains the possible arguments, enabling us to include @@ -131,8 +130,6 @@ def column_attnum(self): """ engine_exists = self.engine is not None table_exists = self.table_ is not None - # TODO are we checking here that the table exists on the database? explain why we have to do - # that. engine_has_table = inspect(self.engine).has_table( self.table_.name, schema=self.table_.schema, diff --git a/db/columns/operations/alter.py b/db/columns/operations/alter.py index d3c9d0cfd7..184e29a78c 100644 --- a/db/columns/operations/alter.py +++ b/db/columns/operations/alter.py @@ -75,68 +75,3 @@ def _transform_column_alter_dict(data): alter_def.update(default=default_dict["value"]) return alter_def - - -# TODO This function is deprecated. Remove it when possible. -def _process_column_alter_dict_dep(column_data, column_attnum=None): - """ - Transform the column_data dict into the form needed for the DB functions. - - Input column_data form: - { - "type": - "type_options": , - "column_default_dict": {"is_dynamic": , "value": } - "nullable": , - "name": , - "delete": , - "description": - } - - Output form: - { - "type": {"name": , "options": }, - "name": , - "not_null": , - "default": , - "delete": , - "description": - } - - Note that keys with empty values will be dropped, unless the given "default" - key is explicitly set to None. - """ - DEFAULT_DICT = 'column_default_dict' - DEFAULT_KEY = 'value' - - column_type = { - "name": column_data.get('type'), - "options": column_data.get('type_options') - } - new_type = {k: v for k, v in column_type.items() if v} or None - column_nullable = column_data.get(NULLABLE) - column_delete = column_data.get("delete") - column_not_null = not column_nullable if column_nullable is not None else None - column_name = (column_data.get(NAME) or '').strip() or None - raw_col_alter_def = { - "attnum": column_attnum or column_data.get("attnum") or column_data.get("id"), - "type": new_type, - "not_null": column_not_null, - "name": column_name, - "delete": column_delete, - } - col_alter_def = {k: v for k, v in raw_col_alter_def.items() if v is not None} - # NOTE DESCRIPTION is set separately, because it shouldn't be removed if its - # value is None (that signals that the description should be removed in the - # db). - if DESCRIPTION in column_data: - column_description = column_data.get(DESCRIPTION) - col_alter_def[DESCRIPTION] = column_description - default_dict = column_data.get(DEFAULT_DICT, {}) - if default_dict is not None and DEFAULT_KEY in default_dict: - default_value = column_data.get(DEFAULT_DICT, {}).get(DEFAULT_KEY) - col_alter_def.update(default=default_value) - elif default_dict is None: - col_alter_def.update(default=None) - - return col_alter_def diff --git a/db/columns/operations/select.py b/db/columns/operations/select.py index 18b1206041..427f005c07 100644 --- a/db/columns/operations/select.py +++ b/db/columns/operations/select.py @@ -1,3 +1,4 @@ +# TODO Move SQLAlchemy-base column attribute getters to separate module from sqlalchemy import and_, asc, select from db.connection import exec_msar_func diff --git a/db/columns/utils.py b/db/columns/utils.py index 210f981a64..f2b3be6b5a 100644 --- a/db/columns/utils.py +++ b/db/columns/utils.py @@ -12,7 +12,6 @@ def get_column_obj_from_relation(relation, column): return column -# TODO deal with quotes; still better than the default def find_column_by_name_in_relation(relation, col_name_string): """ Because we may have to look for the column by a name with an diff --git a/db/engine.py b/db/engine.py index f2bc51daa2..04b16b6427 100644 --- a/db/engine.py +++ b/db/engine.py @@ -18,7 +18,6 @@ def create_future_engine_with_custom_types( return engine -# TODO would an engine without ischema names updated ever be used? make it private if not def create_future_engine( username, password, hostname, database, port, *args, **kwargs ): @@ -46,7 +45,6 @@ def create_engine(conn_url, *args, **kwargs): return engine -# TODO should refactor for this to be private def add_custom_types_to_ischema_names(engine): """ Updating the ischema_names dict changes which Postgres types are reflected into which SA diff --git a/db/functions/operations/apply.py b/db/functions/operations/apply.py index 08c51a53ab..584bf2c421 100644 --- a/db/functions/operations/apply.py +++ b/db/functions/operations/apply.py @@ -61,9 +61,6 @@ def _db_function_to_sa_expression(db_function_or_literal): for raw_parameter in raw_parameters ] db_function_subclass = type(db_function) - # TODO do we need to keep to_sa_expression as a static method? - # TODO maybe make it an instance method and then rewrite DBFunctionPacked.to_sa_expression - # to call to_sa_expression on result of self.unpack(). sa_expression = db_function_subclass.to_sa_expression(*sa_expression_parameters) return sa_expression else: diff --git a/db/queries/base.py b/db/queries/base.py index 817b4a1ad9..7970dcd7ff 100644 --- a/db/queries/base.py +++ b/db/queries/base.py @@ -264,7 +264,6 @@ def _process_initial_column(initial_col): def get_input_alias_for_output_alias(self, output_alias): return self.map_of_output_alias_to_input_alias.get(output_alias) - # TODO consider caching; not urgent, since redundant calls don't trigger IO, it seems @property def map_of_output_alias_to_input_alias(self): m = dict() diff --git a/db/tests/queries/test_initial_relation.py b/db/tests/queries/test_initial_relation.py index 07fd0956bd..f80daae29a 100644 --- a/db/tests/queries/test_initial_relation.py +++ b/db/tests/queries/test_initial_relation.py @@ -42,8 +42,6 @@ def test_shallow_link(shallow_link_dbquery): assert records == [(1, 'uni1'), (2, 'uni1'), (3, 'uni2')] -# TODO determine why this is failing when all run, but not the individual file -@pytest.mark.skipif def test_deep_link(engine_with_academics): engine, schema = engine_with_academics art_oid = get_oid_from_table("articles", schema, engine) diff --git a/db/tests/tables/operations/test_select.py b/db/tests/tables/operations/test_select.py index 36c0cc8ec9..3ab1aa89b3 100644 --- a/db/tests/tables/operations/test_select.py +++ b/db/tests/tables/operations/test_select.py @@ -207,8 +207,6 @@ def _get_expect_joinable_tables(base, depth): ] -# TODO Figure out how to test fkey paths - @pytest.mark.parametrize('table,depth', JOINABLE_TABLES_PARAMS) def test_get_joinable_tables_query_paths(engine_with_academics, table, depth): engine, schema = engine_with_academics diff --git a/db/transforms/base.py b/db/transforms/base.py index 60ed086fde..d86591d225 100644 --- a/db/transforms/base.py +++ b/db/transforms/base.py @@ -66,7 +66,6 @@ def __eq__(self, other): and self.__dict__ == other.__dict__ ) - # TODO refactor to use `get_unique_constraint_mappings` @property def map_of_output_alias_to_input_alias(self): """ diff --git a/db/types/custom/money.py b/db/types/custom/money.py index 44ffe6da89..07e8300d42 100644 --- a/db/types/custom/money.py +++ b/db/types/custom/money.py @@ -32,7 +32,6 @@ def install(engine): def get_money_array_select_statement(table_oid, engine, column_attnum): - # TODO reuse metadata metadata = get_empty_metadata() table = reflect_table_from_oid(table_oid, engine, metadata=metadata) column_name = get_column_name_from_attnum(table_oid, column_attnum, engine, metadata=metadata) diff --git a/db/utils.py b/db/utils.py index 943b16fe8e..f4d8ff52f4 100644 --- a/db/utils.py +++ b/db/utils.py @@ -32,7 +32,6 @@ def execute_pg_query(engine, query, connection_to_use=None): return execute_statement(engine, executable, connection_to_use=connection_to_use).fetchall() -# TODO refactor to use @functools.total_ordering class OrderByIds: """ A mixin for ordering based on ids; useful at least for type enums in testing. From 34e2f532e97d1f0b0a971ccea84244ae7d8b89b4 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Fri, 18 Oct 2024 13:27:52 +0800 Subject: [PATCH 63/70] remove unneeded code from db.types.base --- db/columns/operations/alter.py | 2 +- db/tests/columns/operations/test_create.py | 15 --- db/tests/queries/test_initial_relation.py | 2 - db/types/base.py | 113 +-------------------- 4 files changed, 2 insertions(+), 130 deletions(-) diff --git a/db/columns/operations/alter.py b/db/columns/operations/alter.py index 184e29a78c..8addcaa0ce 100644 --- a/db/columns/operations/alter.py +++ b/db/columns/operations/alter.py @@ -1,7 +1,7 @@ import json from db import connection as db_conn -from db.columns.defaults import NAME, NULLABLE, DESCRIPTION +from db.columns.defaults import NAME, NULLABLE def alter_columns_in_table(table_oid, column_data_list, conn): diff --git a/db/tests/columns/operations/test_create.py b/db/tests/columns/operations/test_create.py index fa8d4849b1..d7eb8df58e 100644 --- a/db/tests/columns/operations/test_create.py +++ b/db/tests/columns/operations/test_create.py @@ -3,21 +3,6 @@ import pytest import db.columns.operations.create as col_create -from db.types.base import get_available_known_db_types, known_db_types - - -def test_type_list_completeness(engine): - """ - Ensure that unavailable types are unavailable for a good reason. - """ - actual_supported_db_types = get_available_known_db_types(engine) - unavailable_types = set.difference(set(known_db_types), set(actual_supported_db_types)) - for db_type in unavailable_types: - assert ( - db_type.is_inconsistent - or db_type.is_optional - or db_type.is_sa_only - ) @pytest.mark.parametrize( diff --git a/db/tests/queries/test_initial_relation.py b/db/tests/queries/test_initial_relation.py index f80daae29a..76ddd86ee8 100644 --- a/db/tests/queries/test_initial_relation.py +++ b/db/tests/queries/test_initial_relation.py @@ -3,8 +3,6 @@ # Initial columns is an ordered set of columns sourced either from the base table, or from linked # tables. -import pytest - from db.columns.operations.select import get_column_attnum_from_name as get_attnum from db.tables.operations.select import get_oid_from_table from db.queries.base import DBQuery, InitialColumn, JoinParameter diff --git a/db/types/base.py b/db/types/base.py index 06e4290d1a..dced38651c 100644 --- a/db/types/base.py +++ b/db/types/base.py @@ -1,6 +1,6 @@ from enum import Enum -from sqlalchemy import text, create_engine as sa_create_engine +from sqlalchemy import create_engine as sa_create_engine from db.constants import TYPES_SCHEMA from db.utils import OrderByIds @@ -24,46 +24,6 @@ def get_sa_class(self, engine): ischema_names = engine.dialect.ischema_names return ischema_names.get(self.id) - def is_available(self, engine, type_ids_on_database=None): - """ - Returns true if this type is available on provided engine's database. For the sake of - optimizing IO, the result of get_type_ids_on_database(engine) may be passed as the - type_ids_on_database parameter. - """ - if type_ids_on_database is None: - type_ids_on_database = get_type_ids_on_database(engine) - is_type_in_database = self.id in type_ids_on_database - return is_type_in_database - - def get_sa_instance_compiled(self, engine, type_options=None): - if type_options is None: - type_options = {} - sa_class = self.get_sa_class(engine) - if sa_class: - dialect = engine.dialect - instance = sa_class(**type_options) - return instance.compile(dialect=dialect) - - @property - def is_sa_only(self): - """ - A column can be reflected to have an SQLAlchemy type that does not represent an actual - Postgres type. - """ - return self in _sa_only_db_types - - @property - def is_optional(self): - """ - Some types are official, but optional in that they may or may not be installed on a given - Postgres database. - """ - return self in _optional_db_types - - @property - def is_inconsistent(self): - return self in _inconsistent_db_types - @property def is_ignored(self): """ @@ -74,14 +34,6 @@ def is_ignored(self): """ return self in _inconsistent_db_types - @property - def is_reflection_supported(self): - return not self.is_inconsistent - - @property - def is_application_supported(self): - return not self.is_inconsistent and not self.is_sa_only - def __str__(self): return self.id @@ -194,66 +146,3 @@ def __new__(cls, unqualified_id): PostgresType.BIT_VARYING, }), ) - - -_sa_only_db_types = frozenset({ - PostgresType._ARRAY, -}) - - -_optional_db_types = frozenset({ - PostgresType.HSTORE, -}) - - -_known_vanilla_db_types = frozenset(postgres_type for postgres_type in PostgresType) - - -_known_custom_db_types = frozenset(mathesar_custom_type for mathesar_custom_type in MathesarCustomType) - - -# Known database types are those that are defined on our PostgresType and MathesarCustomType Enums. -known_db_types = frozenset.union(_known_vanilla_db_types, _known_custom_db_types) - - -# TODO improve name; currently its weird names serves to distinguish it from similarly named -# methods throughout the codebase; should be renamed at earliest convenience. -def get_available_known_db_types(engine): - """ - Returns a tuple of DatabaseType instances that are not ignored and are available on provided - engine. - """ - type_ids_on_database = get_type_ids_on_database(engine) - return tuple( - db_type - for db_type in known_db_types - if ( - not db_type.is_ignored - and db_type.is_available( - engine, - type_ids_on_database=type_ids_on_database, - ) - ) - ) - - -def get_type_ids_on_database(engine): - """ - Returns db type ids available on the database. - """ - # Adapted from the SQL expression produced by typing `\dT *` in psql. - select_statement = text( - "SELECT\n" - " pg_catalog.format_type(t.oid, NULL) AS \"Name\"\n" - " FROM pg_catalog.pg_type t\n" - " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace\n" - " WHERE (t.typrelid = 0 OR (SELECT c.relkind = 'c' FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid))\n" - " AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type el WHERE el.oid = t.typelem AND el.typarray = t.oid);" - ) - with engine.connect() as connection: - db_type_ids = frozenset( - db_type_id - for db_type_id, - in connection.execute(select_statement) - ) - return db_type_ids From ad651b6cf65dcbcba9901700fb432bc22dcf35d5 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Fri, 18 Oct 2024 14:10:55 +0800 Subject: [PATCH 64/70] remove SQLAlchemy get_joinable_tables function --- db/tables/operations/select.py | 186 +------------- db/tests/tables/operations/test_select.py | 230 ------------------ db/transforms/operations/finish_specifying.py | 10 +- 3 files changed, 8 insertions(+), 418 deletions(-) diff --git a/db/tables/operations/select.py b/db/tables/operations/select.py index fea5b168b4..588f242eb2 100644 --- a/db/tables/operations/select.py +++ b/db/tables/operations/select.py @@ -1,7 +1,6 @@ from sqlalchemy import ( - Table, select, join, inspect, and_, cast, func, Integer, literal, or_, + Table, select, join, inspect, ) -from sqlalchemy.dialects.postgresql import JSONB from db.connection import exec_msar_func from db.utils import execute_statement, get_pg_catalog_table @@ -121,189 +120,6 @@ def get_map_of_table_oid_to_schema_name_and_table_name( return table_oids_to_schema_names_and_table_names -def get_table_oids_from_schemas(schema_oids, engine, metadata): - pg_class = get_pg_catalog_table("pg_class", engine, metadata) - sel = ( - select(pg_class.c.oid, pg_class.c.relnamespace.label('schema_oid')) - .where( - and_(pg_class.c.relkind == 'r', pg_class.c.relnamespace.in_(schema_oids)) - ) - ) - with engine.begin() as conn: - table_oids = conn.execute(sel).fetchall() - return table_oids - - def get_oid_from_table(name, schema, engine): inspector = inspect(engine) return inspector.get_table_oid(name, schema=schema) - - -def get_table_description(oid, engine): - with engine.begin() as conn: - res = conn.execute(select(func.obj_description(oid, 'pg_class'))) - return res.fetchone()[0] - - -def get_joinable_tables( - engine, metadata, base_table_oid=None, max_depth=3, limit=None, offset=None -): - """ - Output is a collection of `sqlalchemy.engine.Row`[0]. `Row` "seeks to act as much like a - Python named tuple as possible". Look ups can be done via integer indexes, string indexes or - attributes (e.g. `joinable_table[TARGET]. Inspect code to know what the exact keys are, but - their order and semantics is: `(base table oid, target table oid, join parameter path, - foreign key path, depth, multiple results boolean flag). - - [0] https://docs.sqlalchemy.org/en/14/core/connections.html#sqlalchemy.engine.Row - """ - FK_OID = 'fk_oid' - LEFT_REL = 'left_rel' - RIGHT_REL = 'right_rel' - LEFT_COL = 'left_col' - RIGHT_COL = 'right_col' - - SYMMETRIC_FKEYS = 'symmetric_fkeys' - SEARCH_FKEY_GRAPH = 'search_fkey_graph' - OUTPUT_CTE = 'output_cte' - - jba = func.jsonb_build_array - - pg_constraint = get_pg_catalog_table("pg_constraint", engine, metadata=metadata) - - symmetric_fkeys = select( - cast(pg_constraint.c.oid, Integer).label(FK_OID), - cast(pg_constraint.c.conrelid, Integer).label(LEFT_REL), - cast(pg_constraint.c.confrelid, Integer).label(RIGHT_REL), - cast(pg_constraint.c.conkey[1], Integer).label(LEFT_COL), - cast(pg_constraint.c.confkey[1], Integer).label(RIGHT_COL), - literal(False).label(MULTIPLE_RESULTS), - literal(False).label(REVERSE), - ).where( - and_( - pg_constraint.c.contype == 'f', - func.array_length(pg_constraint.c.conkey, 1) == 1 - ) - ).union_all( - select( - cast(pg_constraint.c.oid, Integer).label(FK_OID), - cast(pg_constraint.c.confrelid, Integer).label(LEFT_REL), - cast(pg_constraint.c.conrelid, Integer).label(RIGHT_REL), - cast(pg_constraint.c.confkey[1], Integer).label(LEFT_COL), - cast(pg_constraint.c.conkey[1], Integer).label(RIGHT_COL), - literal(True).label(MULTIPLE_RESULTS), - literal(True).label(REVERSE), - ).where( - and_( - pg_constraint.c.contype == 'f', - func.array_length(pg_constraint.c.conkey, 1) == 1 - ) - ) - ).cte(name=SYMMETRIC_FKEYS) - - search_fkey_graph = select( - symmetric_fkeys.columns[LEFT_REL], - symmetric_fkeys.columns[RIGHT_REL], - symmetric_fkeys.columns[LEFT_COL], - symmetric_fkeys.columns[RIGHT_COL], - literal(1).label(DEPTH), - cast( - jba( - jba( - jba( - symmetric_fkeys.columns[LEFT_REL], - symmetric_fkeys.columns[LEFT_COL] - ), - jba( - symmetric_fkeys.columns[RIGHT_REL], - symmetric_fkeys.columns[RIGHT_COL] - ), - ) - ), - JSONB - ).label(JP_PATH), - cast( - jba( - jba( - symmetric_fkeys.columns[FK_OID], - symmetric_fkeys.columns[REVERSE], - ) - ), - JSONB - ).label(FK_PATH), - symmetric_fkeys.columns[MULTIPLE_RESULTS], - ).cte(name=SEARCH_FKEY_GRAPH, recursive=True) - - search_fkey_graph = search_fkey_graph.union_all( - select( - symmetric_fkeys.columns[LEFT_REL], - symmetric_fkeys.columns[RIGHT_REL], - symmetric_fkeys.columns[LEFT_COL], - symmetric_fkeys.columns[RIGHT_COL], - search_fkey_graph.columns[DEPTH] + 1, - search_fkey_graph.columns[JP_PATH] + cast( - jba( - jba( - jba( - symmetric_fkeys.columns[LEFT_REL], - symmetric_fkeys.columns[LEFT_COL] - ), - jba( - symmetric_fkeys.columns[RIGHT_REL], - symmetric_fkeys.columns[RIGHT_COL] - ), - ) - ), - JSONB - ), - search_fkey_graph.columns[FK_PATH] + cast( - jba( - jba( - symmetric_fkeys.columns[FK_OID], - symmetric_fkeys.columns[REVERSE], - ) - ), - JSONB - ), - or_( - search_fkey_graph.columns[MULTIPLE_RESULTS], - symmetric_fkeys.columns[MULTIPLE_RESULTS] - ) - ).where( - and_( - symmetric_fkeys.columns[LEFT_REL] == search_fkey_graph.columns[RIGHT_REL], - search_fkey_graph.columns[DEPTH] < max_depth, - search_fkey_graph.columns[JP_PATH][-1] != jba( - jba( - symmetric_fkeys.columns[RIGHT_REL], - symmetric_fkeys.columns[RIGHT_COL] - ), - jba( - symmetric_fkeys.columns[LEFT_REL], - symmetric_fkeys.columns[LEFT_COL] - ), - ) - ) - ) - ) - - output_cte = select( - cast(search_fkey_graph.columns[JP_PATH][0][0][0], Integer).label(BASE), - cast(search_fkey_graph.columns[JP_PATH][-1][-1][0], Integer).label(TARGET), - search_fkey_graph.columns[JP_PATH].label(JP_PATH), - search_fkey_graph.columns[FK_PATH].label(FK_PATH), - search_fkey_graph.columns[DEPTH].label(DEPTH), - search_fkey_graph.columns[MULTIPLE_RESULTS].label(MULTIPLE_RESULTS) - ).cte(name=OUTPUT_CTE) - - if base_table_oid is not None: - final_sel = select(output_cte).where( - output_cte.columns[BASE] == base_table_oid - ) - else: - final_sel = select(output_cte) - - with engine.begin() as conn: - results = conn.execute(final_sel.limit(limit).offset(offset)).fetchall() - - return results diff --git a/db/tests/tables/operations/test_select.py b/db/tests/tables/operations/test_select.py index 3ab1aa89b3..936def03b7 100644 --- a/db/tests/tables/operations/test_select.py +++ b/db/tests/tables/operations/test_select.py @@ -1,38 +1,5 @@ -import sys from unittest.mock import patch -from sqlalchemy import text -from db.columns.operations.select import get_column_name_from_attnum from db.tables.operations import select as ma_sel -import pytest -from db.metadata import get_empty_metadata - -sys.stdout = sys.stderr - -# Table names -ACADEMICS = 'academics' -ARTICLES = 'articles' -JOURNALS = 'journals' -PUBLISHERS = 'publishers' -UNIVERSITIES = 'universities' - -# Column names -ID = 'id' -NAME = 'name' -INSTITUTION = 'institution' -ADVISOR = 'advisor' -TITLE = 'title' -PUBLISHER = 'publisher' -PRIMARY_AUTHOR = 'primary_author' -SECONDARY_AUTHOR = 'secondary_author' -JOURNAL = 'journal' - -# joinable tables results columns -BASE = ma_sel.BASE -DEPTH = ma_sel.DEPTH -JP_PATH = ma_sel.JP_PATH -FK_PATH = ma_sel.FK_PATH -TARGET = ma_sel.TARGET -MULTIPLE_RESULTS = ma_sel.MULTIPLE_RESULTS def test_get_table_info(): @@ -41,200 +8,3 @@ def test_get_table_info(): result = ma_sel.get_table_info('schema', 'conn') mock_exec.assert_called_once_with('conn', 'get_table_info', 'schema') assert result == 'a' - - -def _transform_row_to_names(row, engine): - metadata = get_empty_metadata() - output_dict = { - BASE: ma_sel.reflect_table_from_oid(row[BASE], engine, metadata=metadata).name, - TARGET: ma_sel.reflect_table_from_oid(row[TARGET], engine, metadata=metadata).name, - JP_PATH: [ - [ - [ - ma_sel.reflect_table_from_oid(oid, engine, metadata=metadata).name, - get_column_name_from_attnum(oid, attnum, engine, metadata=metadata) - ] - for oid, attnum in edge - ] - for edge in row[JP_PATH] - ], - FK_PATH: None, - DEPTH: row[DEPTH], - MULTIPLE_RESULTS: row[MULTIPLE_RESULTS] - } - return output_dict - - -L1_JOINABLE_TABLES_DICT = { - ACADEMICS: [ - { - BASE: ACADEMICS, - TARGET: UNIVERSITIES, - JP_PATH: [[[ACADEMICS, INSTITUTION], [UNIVERSITIES, ID]]], - FK_PATH: None, - DEPTH: 1, - MULTIPLE_RESULTS: False, - }, { - BASE: ACADEMICS, - TARGET: ACADEMICS, - JP_PATH: [[[ACADEMICS, ADVISOR], [ACADEMICS, ID]]], - FK_PATH: None, - DEPTH: 1, - MULTIPLE_RESULTS: False, - }, { - BASE: ACADEMICS, - TARGET: ACADEMICS, - JP_PATH: [[[ACADEMICS, ID], [ACADEMICS, ADVISOR]]], - FK_PATH: None, - DEPTH: 1, - MULTIPLE_RESULTS: True, - }, { - BASE: ACADEMICS, - TARGET: ARTICLES, - JP_PATH: [[[ACADEMICS, ID], [ARTICLES, PRIMARY_AUTHOR]]], - FK_PATH: None, - DEPTH: 1, - MULTIPLE_RESULTS: True, - }, { - BASE: ACADEMICS, - TARGET: ARTICLES, - JP_PATH: [[[ACADEMICS, ID], [ARTICLES, SECONDARY_AUTHOR]]], - FK_PATH: None, - DEPTH: 1, - MULTIPLE_RESULTS: True, - }, - ], - ARTICLES: [ - { - BASE: ARTICLES, - TARGET: ACADEMICS, - JP_PATH: [[[ARTICLES, PRIMARY_AUTHOR], [ACADEMICS, ID]]], - FK_PATH: None, - DEPTH: 1, - MULTIPLE_RESULTS: False, - }, { - BASE: ARTICLES, - TARGET: ACADEMICS, - JP_PATH: [[[ARTICLES, SECONDARY_AUTHOR], [ACADEMICS, ID]]], - FK_PATH: None, - DEPTH: 1, - MULTIPLE_RESULTS: False, - }, { - BASE: ARTICLES, - TARGET: JOURNALS, - JP_PATH: [[[ARTICLES, JOURNAL], [JOURNALS, ID]]], - FK_PATH: None, - DEPTH: 1, - MULTIPLE_RESULTS: False, - }, - ], - JOURNALS: [ - { - BASE: JOURNALS, - TARGET: UNIVERSITIES, - JP_PATH: [[[JOURNALS, INSTITUTION], [UNIVERSITIES, ID]]], - FK_PATH: None, - DEPTH: 1, - MULTIPLE_RESULTS: False, - }, { - BASE: JOURNALS, - TARGET: PUBLISHERS, - JP_PATH: [[[JOURNALS, PUBLISHER], [PUBLISHERS, ID]]], - FK_PATH: None, - DEPTH: 1, - MULTIPLE_RESULTS: False, - }, { - BASE: JOURNALS, - TARGET: ARTICLES, - JP_PATH: [[[JOURNALS, ID], [ARTICLES, JOURNAL]]], - FK_PATH: None, - DEPTH: 1, - MULTIPLE_RESULTS: True, - }, - ], - PUBLISHERS: [ - { - BASE: PUBLISHERS, - TARGET: JOURNALS, - JP_PATH: [[[PUBLISHERS, ID], [JOURNALS, PUBLISHER]]], - FK_PATH: None, - DEPTH: 1, - MULTIPLE_RESULTS: True, - } - ], - UNIVERSITIES: [ - { - BASE: UNIVERSITIES, - TARGET: ACADEMICS, - JP_PATH: [[[UNIVERSITIES, ID], [ACADEMICS, INSTITUTION]]], - FK_PATH: None, - DEPTH: 1, - MULTIPLE_RESULTS: True, - }, { - BASE: UNIVERSITIES, - TARGET: JOURNALS, - JP_PATH: [[[UNIVERSITIES, ID], [JOURNALS, INSTITUTION]]], - FK_PATH: None, - DEPTH: 1, - MULTIPLE_RESULTS: True, - }, - ], -} - - -def _get_expect_joinable_tables(base, depth): - l1_paths = L1_JOINABLE_TABLES_DICT[base] - if depth <= 1: - return l1_paths - else: - return [ - { - BASE: row[BASE], - TARGET: target_row[TARGET], - JP_PATH: row[JP_PATH] + target_row[JP_PATH], - FK_PATH: None, - DEPTH: row[DEPTH] + target_row[DEPTH], - MULTIPLE_RESULTS: row[MULTIPLE_RESULTS] or target_row[MULTIPLE_RESULTS] - } - for row in l1_paths - for target_row in _get_expect_joinable_tables(row[TARGET], depth - 1) - if row[JP_PATH][-1] != target_row[JP_PATH][0][::-1] - ] - - -JOINABLE_TABLES_PARAMS = [ - (base, depth) for base in L1_JOINABLE_TABLES_DICT for depth in [1, 2, 3] -] - - -@pytest.mark.parametrize('table,depth', JOINABLE_TABLES_PARAMS) -def test_get_joinable_tables_query_paths(engine_with_academics, table, depth): - engine, schema = engine_with_academics - academics_oid = ma_sel.get_oid_from_table(table, schema, engine) - joinable_tables = ma_sel.get_joinable_tables( - engine, base_table_oid=academics_oid, max_depth=depth, metadata=get_empty_metadata() - ) - all_row_lists = [ - _get_expect_joinable_tables(table, d) for d in range(1, depth + 1) - ] - expect_rows = sorted( - [row for sublist in all_row_lists for row in sublist], - key=lambda x: x[JP_PATH] - ) - actual_rows = sorted( - [_transform_row_to_names(row, engine) for row in joinable_tables], - key=lambda x: x[JP_PATH] - ) - assert expect_rows == actual_rows - - -def test_get_description_from_table(roster_table_name, engine_with_roster): - engine, schema = engine_with_roster - roster_table_oid = ma_sel.get_oid_from_table(roster_table_name, schema, engine) - expect_comment = 'my super comment' - with engine.begin() as conn: - conn.execute(text(f'''COMMENT ON TABLE "{schema}"."{roster_table_name}" IS '{expect_comment}';''')) - - actual_comment = ma_sel.get_table_description(roster_table_oid, engine) - - assert actual_comment == expect_comment diff --git a/db/transforms/operations/finish_specifying.py b/db/transforms/operations/finish_specifying.py index 4dc9895ff5..3d3406eb23 100644 --- a/db/transforms/operations/finish_specifying.py +++ b/db/transforms/operations/finish_specifying.py @@ -1,7 +1,7 @@ +import psycopg from sqlalchemy import inspect from db.tables.operations import select as tables_select -from db.tables.operations.select import get_joinable_tables from db.transforms.base import Summarize from db.columns.operations.select import get_column_from_oid_and_attnum @@ -155,8 +155,12 @@ def _should_group_by( def _get_oids_of_joinable_tables_with_single_results( db_query, engine, metadata, ): - joinable_tables = \ - get_joinable_tables(engine, metadata, db_query.base_table_oid) + with psycopg.connect(str(engine.url)) as conn: + joinable_tables = tables_select.list_joinable_tables( + db_query.base_table_oid, + conn, + 3 + )['joinable_tables'] return set( _get_oid_of_joinable_table(joinable_table) for joinable_table From 8b68d0e7db70e87f7417bf1ac6b12f43415df8d4 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Fri, 18 Oct 2024 14:26:59 +0800 Subject: [PATCH 65/70] remove get_oid_from_table function --- db/columns/base.py | 5 ++--- db/tables/operations/select.py | 5 ----- db/tests/columns/operations/test_select.py | 10 +++++++--- db/tests/queries/conftest.py | 6 +++--- db/tests/queries/test_base.py | 15 ++++++++++----- db/tests/queries/test_initial_relation.py | 17 +++++++++++------ .../test_finish_specifying_summarization.py | 4 ++-- 7 files changed, 35 insertions(+), 27 deletions(-) diff --git a/db/columns/base.py b/db/columns/base.py index 7291716fee..8dc7c0957d 100644 --- a/db/columns/base.py +++ b/db/columns/base.py @@ -2,7 +2,6 @@ from sqlalchemy import Column, ForeignKey, inspect from db.columns.operations.select import get_column_attnum_from_name -from db.tables.operations.select import get_oid_from_table from db.types.operations.convert import get_db_type_enum_from_class @@ -112,8 +111,8 @@ def table_(self): @property def table_oid(self): if self.table_ is not None: - oid = get_oid_from_table( - self.table_.name, self.table_.schema, self.engine + oid = inspect(self.engine).get_table_oid( + self.table_.name, schema=self.table_.schema ) else: oid = None diff --git a/db/tables/operations/select.py b/db/tables/operations/select.py index 588f242eb2..14418f0ba9 100644 --- a/db/tables/operations/select.py +++ b/db/tables/operations/select.py @@ -118,8 +118,3 @@ def get_map_of_table_oid_to_schema_name_and_table_name( in result_rows } return table_oids_to_schema_names_and_table_names - - -def get_oid_from_table(name, schema, engine): - inspector = inspect(engine) - return inspector.get_table_oid(name, schema=schema) diff --git a/db/tests/columns/operations/test_select.py b/db/tests/columns/operations/test_select.py index d062e53a7f..57f2bc107a 100644 --- a/db/tests/columns/operations/test_select.py +++ b/db/tests/columns/operations/test_select.py @@ -1,14 +1,18 @@ from unittest.mock import patch -from sqlalchemy import String, Integer, Column, Table, MetaData +from sqlalchemy import String, Integer, Column, Table, MetaData, inspect from db.columns.operations import select as col_select from db.columns.operations.select import ( get_column_attnum_from_name, get_column_name_from_attnum ) -from db.tables.operations.select import get_oid_from_table from db.metadata import get_empty_metadata +def _get_oid_from_table(name, schema, engine): + inspector = inspect(engine) + return inspector.get_table_oid(name, schema=schema) + + def test_get_column_info_for_table(): with patch.object(col_select, 'exec_msar_func') as mock_exec: mock_exec.return_value.fetchone = lambda: ('a', 'b') @@ -29,7 +33,7 @@ def test_get_attnum_from_name(engine_with_schema): Column(one_name, String), ) table.create() - table_oid = get_oid_from_table(table_name, schema, engine) + table_oid = _get_oid_from_table(table_name, schema, engine) metadata = get_empty_metadata() column_zero_attnum = get_column_attnum_from_name(table_oid, zero_name, engine, metadata=metadata) column_one_attnum = get_column_attnum_from_name(table_oid, one_name, engine, metadata=metadata) diff --git a/db/tests/queries/conftest.py b/db/tests/queries/conftest.py index 51da6448c7..a5cc3536bb 100644 --- a/db/tests/queries/conftest.py +++ b/db/tests/queries/conftest.py @@ -1,6 +1,6 @@ import pytest +from sqlalchemy import inspect from db.columns.operations.select import get_column_attnum_from_name as get_attnum -from db.tables.operations.select import get_oid_from_table from db.queries.base import DBQuery, InitialColumn, JoinParameter from db.metadata import get_empty_metadata @@ -9,10 +9,10 @@ def shallow_link_dbquery(engine_with_academics): engine, schema = engine_with_academics metadata = get_empty_metadata() - acad_oid = get_oid_from_table('academics', schema, engine) + acad_oid = inspect(engine).get_table_oid('academics', schema=schema) acad_id_attnum = get_attnum(acad_oid, 'id', engine, metadata=metadata) acad_institution_attnum = get_attnum(acad_oid, 'institution', engine, metadata=metadata) - uni_oid = get_oid_from_table('universities', schema, engine) + uni_oid = inspect(engine).get_table_oid('universities', schema=schema) uni_name_attnum = get_attnum(uni_oid, 'name', engine, metadata=metadata) uni_id_attnum = get_attnum(uni_oid, 'id', engine, metadata=metadata) initial_columns = [ diff --git a/db/tests/queries/test_base.py b/db/tests/queries/test_base.py index b3deeba06e..848c752ade 100644 --- a/db/tests/queries/test_base.py +++ b/db/tests/queries/test_base.py @@ -1,6 +1,6 @@ +from sqlalchemy import inspect from db.queries.base import DBQuery, InitialColumn, JoinParameter from db.columns.operations.select import get_column_attnum_from_name as get_attnum -from db.tables.operations.select import get_oid_from_table from db.transforms import base as tbase from db.metadata import get_empty_metadata @@ -12,9 +12,14 @@ def _extract_col_properties_dict(col): } +def _get_oid_from_table(name, schema, engine): + inspector = inspect(engine) + return inspector.get_table_oid(name, schema=schema) + + def test_DBQuery_all_sa_columns_map_initial_columns(engine_with_academics): engine, schema = engine_with_academics - acad_oid = get_oid_from_table("academics", schema, engine) + acad_oid = _get_oid_from_table("academics", schema, engine) metadata = get_empty_metadata() initial_columns = [ InitialColumn( @@ -70,7 +75,7 @@ def test_DBQuery_all_sa_columns_map_initial_columns(engine_with_academics): def test_DBQuery_all_sa_columns_map_output_columns(engine_with_academics): engine, schema = engine_with_academics - acad_oid = get_oid_from_table("academics", schema, engine) + acad_oid = _get_oid_from_table("academics", schema, engine) metadata = get_empty_metadata() initial_columns = [ InitialColumn( @@ -136,7 +141,7 @@ def test_DBQuery_all_sa_columns_map_output_columns(engine_with_academics): def test_DBQuery_all_sa_columns_map_summarized_columns(engine_with_library): engine, schema = engine_with_library - checkouts_oid = get_oid_from_table("Checkouts", schema, engine) + checkouts_oid = _get_oid_from_table("Checkouts", schema, engine) metadata = get_empty_metadata() initial_columns = [ InitialColumn( @@ -221,7 +226,7 @@ def test_DBQuery_all_sa_columns_map_summarized_columns(engine_with_library): def test_DBQuery_all_sa_columns_map_overwriting(engine_with_library): engine, schema = engine_with_library - checkouts_oid = get_oid_from_table("Checkouts", schema, engine) + checkouts_oid = _get_oid_from_table("Checkouts", schema, engine) metadata = get_empty_metadata() initial_columns = [ InitialColumn( diff --git a/db/tests/queries/test_initial_relation.py b/db/tests/queries/test_initial_relation.py index 76ddd86ee8..4589bbdc48 100644 --- a/db/tests/queries/test_initial_relation.py +++ b/db/tests/queries/test_initial_relation.py @@ -3,15 +3,20 @@ # Initial columns is an ordered set of columns sourced either from the base table, or from linked # tables. +from sqlalchemy import inspect from db.columns.operations.select import get_column_attnum_from_name as get_attnum -from db.tables.operations.select import get_oid_from_table from db.queries.base import DBQuery, InitialColumn, JoinParameter from db.metadata import get_empty_metadata +def _get_oid_from_table(name, schema, engine): + inspector = inspect(engine) + return inspector.get_table_oid(name, schema=schema) + + def test_local_columns(engine_with_academics): engine, schema = engine_with_academics - oid = get_oid_from_table("academics", schema, engine) + oid = _get_oid_from_table("academics", schema, engine) metadata = get_empty_metadata() initial_columns = [ InitialColumn( @@ -42,9 +47,9 @@ def test_shallow_link(shallow_link_dbquery): def test_deep_link(engine_with_academics): engine, schema = engine_with_academics - art_oid = get_oid_from_table("articles", schema, engine) - acad_oid = get_oid_from_table("academics", schema, engine) - uni_oid = get_oid_from_table("universities", schema, engine) + art_oid = _get_oid_from_table("articles", schema, engine) + acad_oid = _get_oid_from_table("academics", schema, engine) + uni_oid = _get_oid_from_table("universities", schema, engine) metadata = get_empty_metadata() initial_columns = [ InitialColumn( @@ -79,7 +84,7 @@ def test_deep_link(engine_with_academics): def test_self_referencing_table(engine_with_academics): engine, schema = engine_with_academics - acad_oid = get_oid_from_table("academics", schema, engine) + acad_oid = _get_oid_from_table("academics", schema, engine) metadata = get_empty_metadata() initial_columns = [ InitialColumn( diff --git a/db/tests/transforms/test_finish_specifying_summarization.py b/db/tests/transforms/test_finish_specifying_summarization.py index 35dd7c6d26..a5e85085a0 100644 --- a/db/tests/transforms/test_finish_specifying_summarization.py +++ b/db/tests/transforms/test_finish_specifying_summarization.py @@ -1,8 +1,8 @@ import pytest +from sqlalchemy import inspect import types import functools -from db.tables.operations.select import get_oid_from_table from db.queries.base import DBQuery, InitialColumn, JoinParameter from db.metadata import get_empty_metadata from db.transforms.base import Summarize, SelectSubsetOfColumns, Limit @@ -92,7 +92,7 @@ def academics_ids(engine_with_academics, metadata): @functools.cache def get_oid(table_name): - return get_oid_from_table(table_name, schema, engine) + return inspect(engine).get_table_oid(table_name, schema=schema) @functools.cache def get_attnum(table_name, column_name): From 50bd82f75d4b9cef4a32b57bdcfe1c09ba913c0b Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Fri, 18 Oct 2024 16:59:23 +0800 Subject: [PATCH 66/70] clean up and remove unneeded tests --- conftest.py | 76 ------ db/tables/operations/select.py | 4 +- db/tests/conftest.py | 129 ---------- db/tests/constraints/__init__.py | 0 db/tests/resources/books_import_from.sql | 6 - db/tests/resources/books_import_target.sql | 6 - db/tests/resources/fraudulent_payments.sql | 124 ---------- db/tests/resources/grouping.sql | 31 --- db/tests/resources/joinable_tables.sql | 83 ------- db/tests/resources/json_without_pkey.sql | 7 - .../resources/magnitude_testing_create.sql | 229 ------------------ db/tests/resources/marathon_athletes.sql | 50 ---- db/tests/resources/player_profiles.sql | 107 -------- db/tests/resources/range_grouping.sql | 22 -- db/tests/resources/uri_testing | 101 -------- db/tests/resources/uris_create.sql | 43 ---- db/tests/schemas/__init__.py | 0 db/tests/schemas/operations/__init__.py | 0 db/tests/tables/utils.py | 15 -- db/tests/test_fixtures.py | 37 --- db/tests/types/conftest.py | 10 - db/tests/types/operations/__init__.py | 0 22 files changed, 1 insertion(+), 1079 deletions(-) delete mode 100644 db/tests/constraints/__init__.py delete mode 100644 db/tests/resources/books_import_from.sql delete mode 100644 db/tests/resources/books_import_target.sql delete mode 100644 db/tests/resources/fraudulent_payments.sql delete mode 100644 db/tests/resources/grouping.sql delete mode 100644 db/tests/resources/joinable_tables.sql delete mode 100644 db/tests/resources/json_without_pkey.sql delete mode 100644 db/tests/resources/magnitude_testing_create.sql delete mode 100644 db/tests/resources/marathon_athletes.sql delete mode 100644 db/tests/resources/player_profiles.sql delete mode 100644 db/tests/resources/range_grouping.sql delete mode 100644 db/tests/resources/uri_testing delete mode 100644 db/tests/resources/uris_create.sql delete mode 100644 db/tests/schemas/__init__.py delete mode 100644 db/tests/schemas/operations/__init__.py delete mode 100644 db/tests/tables/utils.py delete mode 100644 db/tests/test_fixtures.py delete mode 100644 db/tests/types/conftest.py delete mode 100644 db/tests/types/operations/__init__.py diff --git a/conftest.py b/conftest.py index f815bc290f..cd2ad3a5cb 100644 --- a/conftest.py +++ b/conftest.py @@ -162,28 +162,6 @@ def _test_schema_name(): return "_test_schema_name" -# TODO does testing this make sense? -@pytest.fixture(scope="module") -def engine_without_ischema_names_updated(test_db_name, MOD_engine_cache): - """ - For testing environments where an engine might not be fully setup. - - We instantiate a new engine cache, without updating its ischema_names dict. - """ - return MOD_engine_cache(test_db_name) - - -# TODO seems unneeded: remove -@pytest.fixture -def engine_with_schema_without_ischema_names_updated( - engine_without_ischema_names_updated, _test_schema_name, create_db_schema -): - engine = engine_without_ischema_names_updated - schema_name = _test_schema_name - create_db_schema(schema_name, engine) - return engine, schema_name - - @pytest.fixture def engine_with_schema(engine, _test_schema_name, create_db_schema): schema_name = _test_schema_name @@ -300,9 +278,6 @@ def _get_connection_string(username, password, hostname, database): ACADEMICS_SQL = os.path.join(RESOURCES, "academics_create.sql") LIBRARY_SQL = os.path.join(RESOURCES, "library_without_checkouts.sql") LIBRARY_CHECKOUTS_SQL = os.path.join(RESOURCES, "library_add_checkouts.sql") -FRAUDULENT_PAYMENTS_SQL = os.path.join(RESOURCES, "fraudulent_payments.sql") -PLAYER_PROFILES_SQL = os.path.join(RESOURCES, "player_profiles.sql") -MARATHON_ATHLETES_SQL = os.path.join(RESOURCES, "marathon_athletes.sql") @pytest.fixture @@ -374,54 +349,3 @@ def make_table(table_name): in table_names } return tables - - -@pytest.fixture -def engine_with_fraudulent_payment(engine_with_schema): - engine, schema = engine_with_schema - with engine.begin() as conn, open(FRAUDULENT_PAYMENTS_SQL) as f: - conn.execute(text(f"SET search_path={schema}")) - conn.execute(text(f.read())) - yield engine, schema - - -@pytest.fixture -def payments_db_table(engine_with_fraudulent_payment): - engine, schema = engine_with_fraudulent_payment - metadata = MetaData(bind=engine) - table = Table("Payments", metadata, schema=schema, autoload_with=engine) - return table - - -@pytest.fixture -def engine_with_player_profiles(engine_with_schema): - engine, schema = engine_with_schema - with engine.begin() as conn, open(PLAYER_PROFILES_SQL) as f: - conn.execute(text(f"SET search_path={schema}")) - conn.execute(text(f.read())) - yield engine, schema - - -@pytest.fixture -def players_db_table(engine_with_player_profiles): - engine, schema = engine_with_player_profiles - metadata = MetaData(bind=engine) - table = Table("Players", metadata, schema=schema, autoload_with=engine) - return table - - -@pytest.fixture -def engine_with_marathon_athletes(engine_with_schema): - engine, schema = engine_with_schema - with engine.begin() as conn, open(MARATHON_ATHLETES_SQL) as f: - conn.execute(text(f"SET search_path={schema}")) - conn.execute(text(f.read())) - yield engine, schema - - -@pytest.fixture -def athletes_db_table(engine_with_marathon_athletes): - engine, schema = engine_with_marathon_athletes - metadata = MetaData(bind=engine) - table = Table("Marathon", metadata, schema=schema, autoload_with=engine) - return table diff --git a/db/tables/operations/select.py b/db/tables/operations/select.py index 14418f0ba9..509a84b9df 100644 --- a/db/tables/operations/select.py +++ b/db/tables/operations/select.py @@ -1,6 +1,4 @@ -from sqlalchemy import ( - Table, select, join, inspect, -) +from sqlalchemy import Table, select, join from db.connection import exec_msar_func from db.utils import execute_statement, get_pg_catalog_table diff --git a/db/tests/conftest.py b/db/tests/conftest.py index 49768f65a1..fff38ab6f7 100644 --- a/db/tests/conftest.py +++ b/db/tests/conftest.py @@ -3,21 +3,14 @@ import pytest from sqlalchemy import MetaData, text, Table -from db import constants - FILE_DIR = os.path.abspath(os.path.dirname(__file__)) RESOURCES = os.path.join(FILE_DIR, "resources") ROSTER_SQL = os.path.join(RESOURCES, "roster_create.sql") -URIS_SQL = os.path.join(RESOURCES, "uris_create.sql") TIMES_SQL = os.path.join(RESOURCES, "times_create.sql") BOOLEANS_SQL = os.path.join(RESOURCES, "booleans_create.sql") FILTER_SORT_SQL = os.path.join(RESOURCES, "filter_sort_create.sql") -MAGNITUDE_SQL = os.path.join(RESOURCES, "magnitude_testing_create.sql") ARRAY_SQL = os.path.join(RESOURCES, "array_create.sql") JSON_SQL = os.path.join(RESOURCES, "json_sort.sql") -JSON_WITHOUT_PKEY_SQL = os.path.join(RESOURCES, "json_without_pkey.sql") -BOOKS_FROM_SQL = os.path.join(RESOURCES, "books_import_from.sql") -BOOKS_TARGET_SQL = os.path.join(RESOURCES, "books_import_target.sql") @pytest.fixture @@ -29,24 +22,6 @@ def engine_with_roster(engine_with_schema): yield engine, schema -@pytest.fixture -def engine_with_JSON_without_pkey(engine_with_schema): - engine, schema = engine_with_schema - with engine.begin() as conn, open(JSON_WITHOUT_PKEY_SQL) as f: - conn.execute(text(f"SET search_path={schema}")) - conn.execute(text(f.read())) - yield engine, schema - - -@pytest.fixture -def engine_with_uris(engine_with_schema): - engine, schema = engine_with_schema - with engine.begin() as conn, open(URIS_SQL) as f: - conn.execute(text(f"SET search_path={schema}")) - conn.execute(text(f.read())) - yield engine, schema - - @pytest.fixture def engine_with_times(engine_with_schema): engine, schema = engine_with_schema @@ -92,48 +67,11 @@ def engine_with_filter_sort(engine_with_schema): return engine, schema -@pytest.fixture -def engine_with_magnitude(engine_with_schema): - engine, schema = engine_with_schema - with engine.begin() as conn, open(MAGNITUDE_SQL) as f: - conn.execute(text(f"SET search_path={schema}")) - conn.execute(text(f.read())) - yield engine, schema - - -@pytest.fixture -def engine_with_books_to_import_from(engine_with_schema): - engine, schema = engine_with_schema - with engine.begin() as conn, open(BOOKS_FROM_SQL) as f: - conn.execute(text(f"SET search_path={schema}")) - conn.execute(text(f.read())) - return engine, schema - - -@pytest.fixture -def engine_with_books_import_target(engine_with_schema): - engine, schema = engine_with_schema - with engine.begin() as conn, open(BOOKS_TARGET_SQL) as f: - conn.execute(text(f"SET search_path={schema}")) - conn.execute(text(f.read())) - return engine, schema - - @pytest.fixture(scope='session') def roster_table_name(): return "Roster" -@pytest.fixture(scope='session') -def json_without_pkey_name(): - return "json_without_pkey" - - -@pytest.fixture(scope='session') -def uris_table_name(): - return "uris" - - @pytest.fixture(scope='session') def array_table_name(): return "array_test" @@ -144,41 +82,6 @@ def json_table_name(): return "json_sort" -@pytest.fixture(scope='session') -def magnitude_table_name(): - return "magnitude_testing" - - -@pytest.fixture(scope='session') -def teachers_table_name(): - return "Teachers" - - -@pytest.fixture(scope='session') -def roster_no_teachers_table_name(): - return "Roster without Teachers" - - -@pytest.fixture(scope='session') -def roster_extracted_cols(): - return ["Teacher", "Teacher Email"] - - -@pytest.fixture(scope='session') -def roster_fkey_col(teachers_table_name): - return f"{teachers_table_name}_{constants.ID}" - - -@pytest.fixture(scope='session') -def books_import_from_table_name(): - return "books_from" - - -@pytest.fixture(scope='session') -def books_import_target_table_name(): - return "books_target" - - @pytest.fixture def times_table_obj(engine_with_times): engine, schema = engine_with_times @@ -203,22 +106,6 @@ def roster_table_obj(engine_with_roster, roster_table_name): return table, engine -@pytest.fixture -def json_without_pkey_table_obj(engine_with_JSON_without_pkey, json_without_pkey_name): - engine, schema = engine_with_JSON_without_pkey - metadata = MetaData(bind=engine) - table = Table(json_without_pkey_name, metadata, schema=schema, autoload_with=engine) - return table, engine - - -@pytest.fixture -def magnitude_table_obj(engine_with_magnitude, magnitude_table_name): - engine, schema = engine_with_magnitude - metadata = MetaData(bind=engine) - table = Table(magnitude_table_name, metadata, schema=schema, autoload_with=engine) - return table, engine - - @pytest.fixture def array_table_obj(engine_with_array, array_table_name): engine, schema = engine_with_array @@ -233,19 +120,3 @@ def json_table_obj(engine_with_json, json_table_name): metadata = MetaData(bind=engine) table = Table(json_table_name, metadata, schema=schema, autoload_with=engine) return table, engine - - -@pytest.fixture -def books_table_import_from_obj(engine_with_books_to_import_from, books_import_from_table_name): - engine, schema = engine_with_books_to_import_from - metadata = MetaData(bind=engine) - table = Table(books_import_from_table_name, metadata, schema=schema, autoload_with=engine) - return table, engine - - -@pytest.fixture -def books_table_import_target_obj(engine_with_books_import_target, books_import_target_table_name): - engine, schema = engine_with_books_import_target - metadata = MetaData(bind=engine) - table = Table(books_import_target_table_name, metadata, schema=schema, autoload_with=engine) - return table, engine diff --git a/db/tests/constraints/__init__.py b/db/tests/constraints/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/db/tests/resources/books_import_from.sql b/db/tests/resources/books_import_from.sql deleted file mode 100644 index 38562d7b7a..0000000000 --- a/db/tests/resources/books_import_from.sql +++ /dev/null @@ -1,6 +0,0 @@ -CREATE TABLE "books_from"( -id SERIAL PRIMARY KEY, -"author_name" TEXT, -"book_title" TEXT); - -INSERT INTO "books_from"(author_name, book_title) VALUES ('Fyodor Dostoevsky', 'Crime and Punishment'), ('Cervantes', 'Don Quixote'); diff --git a/db/tests/resources/books_import_target.sql b/db/tests/resources/books_import_target.sql deleted file mode 100644 index 0a4e75c8fc..0000000000 --- a/db/tests/resources/books_import_target.sql +++ /dev/null @@ -1,6 +0,0 @@ -CREATE TABLE "books_target"( -id SERIAL PRIMARY KEY, -"title" TEXT, -"author" TEXT); - -INSERT INTO "books_target"(title, author) VALUES ('Steve Jobs', 'Walter Issacson'), ('The Idiot', 'Fyodor Dostevsky'), ('David Copperfield', 'Charles Darwin'); diff --git a/db/tests/resources/fraudulent_payments.sql b/db/tests/resources/fraudulent_payments.sql deleted file mode 100644 index 00d08dc2c6..0000000000 --- a/db/tests/resources/fraudulent_payments.sql +++ /dev/null @@ -1,124 +0,0 @@ -CREATE TABLE "Payments" ( - id integer NOT NULL, - "Payment Mode" text, - "Is Fraudulent" boolean -); - -CREATE SEQUENCE "Payments_id_seq" - AS integer - START WITH 1 - INCREMENT BY 1 - NO MINVALUE - NO MAXVALUE - CACHE 1; - -ALTER SEQUENCE "Payments_id_seq" OWNED BY "Payments".id; - -ALTER TABLE ONLY "Payments" ALTER COLUMN id SET DEFAULT nextval('"Payments_id_seq"'::regclass); - -INSERT INTO "Payments" (id, "Payment Mode", "Is Fraudulent") VALUES -(1, 'credit card', false), -(2, 'debit card', false), -(3, 'wallet', true), -(4, 'UPI', false), -(5, 'credit card', false), -(6, 'wallet', true), -(7, 'debit card', false), -(8, 'pay later', false), -(9, 'debit card', false), -(10, 'debit card', false), -(11, 'pay later', false), -(12, 'debit card', false), -(13, 'wallet', true), -(14, 'wallet', false), -(15, 'UPI', true), -(16, 'credit card', false), -(17, 'wallet', false), -(18, 'debit card', false), -(19, 'debit card', false), -(20, 'credit card', false), -(21, 'debit card', true), -(22, 'credit card', false), -(23, 'debit card', false), -(24, 'debit card', false), -(25, 'wallet', false), -(26, 'wallet', false), -(27, 'wallet', false), -(28, 'wallet', false), -(29, 'credit card', false), -(30, 'wallet', false), -(31, 'wallet', false), -(32, 'debit card', false), -(33, 'debit card', false), -(34, 'debit card', false), -(35, 'pay later', false), -(36, 'debit card', false), -(37, 'wallet', false), -(38, 'debit card', true), -(39, 'credit card', false), -(40, 'UPI', false), -(41, 'debit card', false), -(42, 'credit card', false), -(43, 'credit card', false), -(44, 'wallet', false), -(45, 'pay later', false), -(46, 'credit card', true), -(47, 'debit card', false), -(48, 'credit card', false), -(49, 'wallet', false), -(50, 'UPI', false), -(51, 'debit card', false), -(52, 'debit card', false), -(53, 'wallet', false), -(54, 'debit card', false), -(55, 'credit card', false), -(56, 'pay later', false), -(57, 'debit card', true), -(58, 'wallet', false), -(59, 'credit card', false), -(60, 'debit card', false), -(61, 'wallet', true), -(62, 'wallet', false), -(63, 'debit card', false), -(64, 'credit card', false), -(65, 'wallet', false), -(66, 'debit card', false), -(67, 'debit card', false), -(68, 'pay later', true), -(69, 'debit card', false), -(70, 'debit card', false), -(71, 'wallet', false), -(72, 'wallet', false), -(73, 'credit card', false), -(74, 'debit card', false), -(75, 'UPI', false), -(76, 'debit card', false), -(77, 'wallet', false), -(78, 'credit card', true), -(79, 'wallet', true), -(80, 'debit card', false), -(81, 'debit card', false), -(82, 'credit card', false), -(83, 'debit card', false), -(84, 'wallet', false), -(85, 'wallet', true), -(86, 'UPI', false), -(87, 'credit card', false), -(88, 'wallet', false), -(89, 'debit card', false), -(90, 'debit card', false), -(91, 'credit card', false), -(92, 'pay later', false), -(93, 'debit card', false), -(94, 'wallet', false), -(95, 'debit card', true), -(96, 'debit card', false), -(97, 'wallet', false), -(98, 'wallet', false), -(99, 'credit card', false), -(100, 'wallet', true); - -SELECT pg_catalog.setval('"Payments_id_seq"', 100, true); - -ALTER TABLE ONLY "Payments" - ADD CONSTRAINT "Payments_pkey" PRIMARY KEY (id); \ No newline at end of file diff --git a/db/tests/resources/grouping.sql b/db/tests/resources/grouping.sql deleted file mode 100644 index b38d0f7213..0000000000 --- a/db/tests/resources/grouping.sql +++ /dev/null @@ -1,31 +0,0 @@ -SELECT - "Country", - "Item Type", - jsonb_build_object( - 'count', - COUNT(1) OVER ( - PARTITION BY "Country", "Item Type" - RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING - ), - 'first_value', - first_value( - jsonb_build_object('Country', "Country", 'Item Type', "Item Type") - ) OVER ( - PARTITION BY "Country", "Item Type" - ORDER BY "Country", "Item Type" - RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING - ), - 'last_value', - last_value( - jsonb_build_object('Country', "Country", 'Item Type', "Item Type") - ) OVER ( - PARTITION BY "Country", "Item Type" - ORDER BY "Country", "Item Type" - RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING - ), - 'group_id', - dense_rank() OVER ( - ORDER BY "Country", "Item Type" - RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) - ) AS mathesar_metadata_col -FROM "5m Sales Records" ORDER BY "Country" , "Item Type" LIMIT 500 OFFSET 2000; diff --git a/db/tests/resources/joinable_tables.sql b/db/tests/resources/joinable_tables.sql deleted file mode 100644 index 9b6e072b0d..0000000000 --- a/db/tests/resources/joinable_tables.sql +++ /dev/null @@ -1,83 +0,0 @@ -/* - -This SQL query returns a table of the form - - base | target | path --------+--------+--------------------------------- - | | json array of arrays of arrays - -The base and target are OIDs of a base table, and a target table that can be -joined by some combination of joins along single-column foreign key column -restrictions in either way. - -*/ - -WITH RECURSIVE symmetric_fkeys AS ( - SELECT - c.oid fkey_oid, - c.conrelid::INTEGER left_rel, - c.confrelid::INTEGER right_rel, - c.conkey[1]::INTEGER left_col, - c.confkey[1]::INTEGER right_col, - false reversed - FROM pg_constraint c - WHERE c.contype='f' and array_length(c.conkey, 1)=1 -UNION ALL - SELECT - c.oid fkey_oid, - c.confrelid::INTEGER left_rel, - c.conrelid::INTEGER right_rel, - c.confkey[1]::INTEGER left_col, - c.conkey[1]::INTEGER right_col, - true reversed - FROM pg_constraint c - WHERE c.contype='f' and array_length(c.conkey, 1)=1 -), - -search_fkey_graph(left_rel, right_rel, left_col, right_col, depth, join_path, fkey_path) AS ( - SELECT - sfk.left_rel, - sfk.right_rel, - sfk.left_col, - sfk.right_col, - 1, - jsonb_build_array( - jsonb_build_array( - jsonb_build_array(sfk.left_rel, sfk.left_col), - jsonb_build_array(sfk.right_rel, sfk.right_col) - ) - ), - jsonb_build_array(jsonb_build_array(sfk.fkey_oid, sfk.reversed)) - FROM symmetric_fkeys sfk -UNION ALL - SELECT - sfk.left_rel, - sfk.right_rel, - sfk.left_col, - sfk.right_col, - sg.depth + 1, - join_path || jsonb_build_array( - jsonb_build_array( - jsonb_build_array(sfk.left_rel, sfk.left_col), - jsonb_build_array(sfk.right_rel, sfk.right_col) - ) - ), - fkey_path || jsonb_build_array(jsonb_build_array(sfk.fkey_oid, sfk.reversed)) - FROM symmetric_fkeys sfk, search_fkey_graph sg - WHERE - sfk.left_rel=sg.right_rel - AND depth<3 - AND (join_path -> -1) != jsonb_build_array( - jsonb_build_array(sfk.right_rel, sfk.right_col), - jsonb_build_array(sfk.left_rel, sfk.left_col) - ) -), output_cte AS ( - SELECT - (join_path#>'{0, 0, 0}')::INTEGER base, - (join_path#>'{-1, -1, 0}')::INTEGER target, - join_path, - fkey_path, - depth - FROM search_fkey_graph -) -SELECT * FROM output_cte; diff --git a/db/tests/resources/json_without_pkey.sql b/db/tests/resources/json_without_pkey.sql deleted file mode 100644 index 6ed549ba91..0000000000 --- a/db/tests/resources/json_without_pkey.sql +++ /dev/null @@ -1,7 +0,0 @@ -CREATE TABLE "json_without_pkey" ( - "json_object" json -); - -INSERT INTO "json_without_pkey" VALUES -('{"name": "John", "age": 30}'::json), -('{"name": "Earl James", "age": 30}'::json); diff --git a/db/tests/resources/magnitude_testing_create.sql b/db/tests/resources/magnitude_testing_create.sql deleted file mode 100644 index 3a3fdd8e41..0000000000 --- a/db/tests/resources/magnitude_testing_create.sql +++ /dev/null @@ -1,229 +0,0 @@ -CREATE TABLE magnitude_testing ( - id integer NOT NULL, - big_num numeric, - big_int integer, - sm_num numeric, - sm_dbl double precision, - pm_seq integer GENERATED ALWAYS AS ((id - 100)) STORED, - tens_seq integer GENERATED ALWAYS AS ((id * 10)) STORED -); - -CREATE SEQUENCE magnitude_testing_id_seq - AS integer - START WITH 1 - INCREMENT BY 1 - NO MINVALUE - NO MAXVALUE - CACHE 1; - -ALTER SEQUENCE magnitude_testing_id_seq OWNED BY magnitude_testing.id; - -ALTER TABLE ONLY magnitude_testing - ALTER COLUMN id SET DEFAULT nextval('magnitude_testing_id_seq'::regclass); - -INSERT INTO magnitude_testing (id, big_num, big_int, sm_num, sm_dbl) VALUES -(1, -25.1131622435626, 2181, 0.0000616393361899956, 0.00012799759715100123), -(2, 524.36895559054, 5147, 0.0000528972059482886, 0.00021285742778541562), -(3, 384.020684380979, 485, 0.000105717449375235, 0.0001561696271714439), -(4, 636.670227129309, 1216, 0.000132713362551432, 0.000454518815901416), -(5, 177.815234751315, 4748, 0.00016787272789187, 0.0005939898648275417), -(6, 53.281245839906, -3272, 0.000108269467076751, 0.0006169270427432067), -(7, 323.910430502289, 709, 0.000207128401375107, 0.00018379355795560315), -(8, 169.104340129956, 5661, 0.0000712927552483467, 0.00021753598326690592), -(9, 320.152793915079, 5694, 0.0000133329887058441, 0.0004810149447503491), -(10, 276.11651266323, -580, 0.000296001593079011, 0.0008873165369829437), -(11, 745.101453469951, -966, 0.000267977727434007, 0.0008150222635782214), -(12, 95.7008176274821, 4969, 0.0000692916025063273, 0.00017332795445027926), -(13, 348.944211208086, 2468, 0.000062842429562966, 0.0008034281427345392), -(14, 20.1652456316817, 3071, 0.000116334136729698, 2.1610857191028287e-05), -(15, 390.786786668124, 1115, 0.000293996008812221, 0.0006054527627226278), -(16, 88.2599072482658, 2299, 0.00025781452224049, 0.0003374051248536247), -(17, 42.5361846630291, 5156, 0.00029779901822555, 0.0007805812461330462), -(18, 411.886118716034, 4124, 0.000195096078287479, 0.00034098488595264346), -(19, 200.106946662921, 4714, 0.000041551630741802, 0.0008306707927010351), -(20, 369.823805819316, 1949, 0.0000265295182697873, 0.0007899419110134077), -(21, -30.5631419505151, 5577, 0.0000243015890955238, 0.000246827139620315), -(22, 389.18645445903, 2239, 0.000251935931299498, 0.0005225377694495066), -(23, 427.806496007585, 1412, 0.000183072745164609, 0.00015678398969135545), -(24, 546.445648931262, -940, 0.000022975135926136, 0.0001917841740013486), -(25, 0.21481531971645, -1119, 0.0000363959026321542, 0.0004741831123822422), -(26, 537.250051428992, 4269, 0.000144669791765198, 0.0005061999990638739), -(27, -63.929218171089, 5686, 0.000130519770579716, 0.0007880668853802675), -(28, -212.103511217413, 4361, 0.000227789248522926, 0.0007707325217315528), -(29, -76.7811241642765, -1999, 0.000184050929379146, 9.905910671236739e-05), -(30, 611.193624954688, 1569, 0.00013310570565951, 3.011949688879412e-05), -(31, -165.675899805072, 2941, 0.000154017804000202, 0.0009081239700125324), -(32, 345.591510305121, 1161, 0.0000367188496829005, 0.0006570077401738886), -(33, -35.100059622457, 3803, 0.000170474733601117, 9.445715801431476e-05), -(34, 358.833111141444, -887, 0.000180703729815427, 0.0007984329551446123), -(35, -107.229463736181, 623, 0.000207758320019312, 0.00048097707184656006), -(36, 646.016735570776, 4327, 0.0000552702265254101, 0.0006968046034736162), -(37, 265.5161379328, -3700, 0.0000783793315294478, 0.0009790482762104843), -(38, 506.841084168507, -2614, 0.000287431061165943, 0.000535328679501724), -(39, 405.458730214101, -1011, 0.000250279531985993, 0.0007284833030162261), -(40, 447.937466479538, 3475, 0.000286858888713046, 0.00012284031297726727), -(41, 611.002523267509, 4764, 0.000199803899217506, 0.0007045330185569939), -(42, 23.5765368676591, 552, 0.0000221716093453711, 0.000429307186309579), -(43, 428.651818956049, 656, 0.0000212347902898077, 0.0008504898217989379), -(44, -198.330284271299, 5094, 0.000270318628357645, 0.0007108038151647733), -(45, -28.2134247218754, -2901, 0.000119105705005245, 0.0009126920038648691), -(46, 21.8682745830786, -3155, 0.000298049387650041, 0.0007215458037319351), -(47, 570.283803624371, 514, 0.000211758392755221, 0.0008041316612899508), -(48, -201.304123936643, 832, 0.000197629085785669, 0.00030061224848784463), -(49, 649.33074612484, 1333, 0.0000737281415931822, 0.0005035326181274868), -(50, -84.9668276875438, -322, 0.00000902509735836254, 0.0004422735267883624), -(51, -243.716407147864, 117, 0.000140538182145261, 0.0002086788744829349), -(52, 349.905877808134, 1672, 0.0000177211961629627, 0.0008113807691145994), -(53, 81.183311146142, 4554, 0.000192355232725743, 0.0007059381615542648), -(54, 689.410545129356, 449, 0.0000211252804102731, 0.0005490623763019968), -(55, 649.547971825947, 3150, 0.0000227043057745593, 0.0006923392457567914), -(56, 480.745084680188, 225, 0.000207219891978885, 0.00012901200558044224), -(57, 605.318769554608, -2823, 0.00018506198948399, 9.222568701060397e-05), -(58, 406.693881886832, -3502, 0.0000866634154460531, 0.0009709415117535443), -(59, 647.344747122369, 1067, 0.0000936435122317594, 0.00024742281876871886), -(60, -118.449822116566, -4134, 0.000163983436389812, 0.0004953113923451938), -(61, 734.549045913959, 2861, 0.0000350699911877204, 0.0002280187798640796), -(62, 299.341192202285, 224, 0.000124812332204521, 0.0003122403480415343), -(63, 632.927317325604, 512, 0.0000248483602113058, 0.00041027449075352963), -(64, 701.354278484041, -65, 0.000154298762422021, 0.0002775346322564687), -(65, 741.78674239736, -3046, 0.000216717565906942, 0.0008988415292368828), -(66, 72.3452167235844, -2187, 0.0002868873192806, 0.0002200669261488244), -(67, -92.8580805735838, -3384, 0.000043910714454606, 0.0003323809232292163), -(68, -176.557441932835, 738, 0.000219592193033943, 0.0002472735379193054), -(69, 606.941691826969, -1200, 0.0000392539455827208, 0.00010483994066451174), -(70, 274.644750676424, -1440, 0.000299699300242263, 0.0006693277877153534), -(71, -24.1911989976903, -1222, 0.000145768903800567, 5.905801910876818e-05), -(72, 110.939587674943, 568, 0.0000235149170377969, 0.0007475865019720445), -(73, 626.232061494741, 5763, 0.000291066770757408, 0.0002650370401607134), -(74, -146.541669307061, -1728, 0.000155337453106852, 0.0006691956035171458), -(75, 118.658568178952, -1974, 0.000157317916345508, 0.0005821252162182375), -(76, 360.709675025725, -525, 0.0000952625958805942, 0.00037727984626845766), -(77, 234.691257764475, -673, 0.000233952319914238, 0.0003225279196886746), -(78, 254.821534921569, 2714, 0.0000907406481767669, 0.00014493080585156547), -(79, -84.3950406271493, -2433, 0.000192492006712448, 0.0004829728472210562), -(80, -46.1485599854318, 3010, 0.000199988476284519, 7.124629996612697e-05), -(81, -157.34811854488, -3883, 0.0000720378384263032, 0.0006920209740384635), -(82, 683.609146418172, -419, 0.000275417079234539, 0.0009989634748288196), -(83, 156.13529481528, 613, 0.000209929090429765, 0.00023242881594860164), -(84, 441.067878494971, 3673, 0.000242169383746684, 0.0007057668565210733), -(85, 736.987984005318, -808, 0.000294965342114904, 0.00024113157911072138), -(86, 386.323429818384, -20, 0.000100671463097273, 0.0006051417285697127), -(87, -192.400148142545, 2107, 0.000160326300419237, 0.0004638308401370139), -(88, 551.193164341862, -1712, 0.000182783971898262, 0.0006076822595886355), -(89, 347.896874419214, -2534, 0.000169946270417273, 0.0007468344616066033), -(90, 69.9297712912364, 2879, 0.0000386991825304609, 0.00024005286730965027), -(91, -31.8185839430417, 3428, 0.000185596080593453, 0.0007857105105702615), -(92, 361.15743979584, -3966, 0.0000261626027229926, 0.0005963403678305994), -(93, 432.001726993499, 3607, 0.000132296364163676, 0.0005184810101239706), -(94, -179.979068225982, -1478, 0.000114906747521183, 0.00020532352140889288), -(95, -19.6737419545831, 1689, 0.000106932191562598, 0.0009504954166294581), -(96, 615.670844088663, 4355, 0.000107368232635061, 0.0004372256486285764), -(97, -27.4437601742851, 3258, 0.000299060142963885, 0.00012878777765983074), -(98, 131.591163488388, -725, 0.000211817949910669, 0.0006448050532690601), -(99, 130.417786177426, -2268, 0.000213895003450617, 0.00044563558824842886), -(100, 434.908030349981, 2481, 0.000185967679416355, 0.0008361770507036326), -(101, -152.467405896195, 947, 0.00000533435720302613, 0.0004442024255190304), -(102, -84.2985602622609, -1128, 0.000297925086386248, 0.0003105504631558844), -(103, 610.474124960725, -591, 0.0000416254508207519, 0.0008235605042755268), -(104, -101.525579204668, 4781, 0.000127052645013096, 0.0003531005056421677), -(105, 566.373964176912, 528, 0.0000378997896468096, 0.0003960533586291959), -(106, 595.31704798581, 3558, 0.00024984462483266, 0.0007814890489282149), -(107, 667.650869267626, -737, 0.000298297024710454, 0.0003368729172416991), -(108, 243.935450195374, -1320, 0.000225369869191845, 0.0008640823083280438), -(109, 626.192828407434, 495, 0.0000157707816561246, 0.00018388930226192457), -(110, 653.59879621655, 1087, 0.000211136689042756, 0.0008887050696732714), -(111, 117.330264327685, -1797, 0.000279915176775669, 0.000695185744534804), -(112, 668.190047626376, 4363, 0.000203419789054881, 0.00045080962628096447), -(113, 288.196330804319, -2956, 0.000242271859011015, 0.0008524150701602267), -(114, 247.929082575234, -3940, 0.000106147736787561, 0.00035122666324814846), -(115, 645.578061501632, -717, 0.000284573510544502, 1.0008275980176506e-05), -(116, -30.0303934980438, -4013, 0.000189383079414963, 0.0008462043879011034), -(117, -174.880056562379, 216, 0.0000282508323440314, 0.0004334297740722164), -(118, 752.954078290821, 2579, 0.00000223580624572044, 0.0008762312677275687), -(119, 576.749299836544, -1830, 0.0000132761347802532, 0.00024408794782744536), -(120, 521.74369288607, -3354, 0.000155734449029451, 0.0008151767996199836), -(121, 540.450679634298, 977, 0.000161037747182951, 9.417372469105346e-05), -(122, 345.923273879572, -2310, 0.0000887377057923164, 0.0002275737967578877), -(123, 32.0242634022715, -575, 0.00021943555395383, 0.0007316139331435992), -(124, 309.313742087249, 1793, 0.0000744030676938241, 9.940474195760629e-05), -(125, 110.221107955263, -412, 0.000168759058602882, 0.0009834902740011343), -(126, -2.39236137009058, 1600, 0.0000160375221470634, 1.654381881734679e-05), -(127, 654.655864409116, 180, 0.000244155470825533, 0.0005856887986123027), -(128, 27.3759935340872, -1640, 0.00011933899919455, 0.0008050452944858293), -(129, 48.1554169536845, -1882, 0.00000461640548773552, 0.0007172088490305341), -(130, 482.077643368535, 3129, 0.000113095821881875, 0.00021875799038257782), -(131, 327.709702106753, 5320, 0.0000494078761362207, 5.947447773946379e-05), -(132, 469.334913931982, -701, 0.0000223450151213473, 0.00036228463969799666), -(133, 344.94878071454, 2450, 0.000250739359401075, 0.0006941554270096973), -(134, 268.622396507788, 1397, 0.000176798921287808, 0.0006710939930013567), -(135, 560.078047464409, 776, 0.000136213800407845, 0.00017741881046504915), -(136, 699.648358701119, -4230, 0.0000630331645388157, 0.0004356325957848171), -(137, 582.461275175413, 1629, 0.000116221523066344, 0.0006841967019363793), -(138, 409.091143374263, -2807, 0.0000014875996013803, 5.257222427160713e-05), -(139, 466.549003632289, 4018, 0.00000993710285336284, 0.00020296889918115113), -(140, 746.985721695716, 1699, 0.00024876547451744, 0.00041386802831649305), -(141, 73.6075952285456, 4908, 0.0000789493534564602, 0.00037972419254683846), -(142, -216.061621181331, 30, 0.000192024630590264, 0.00010309149272665508), -(143, 517.712092184248, 3364, 0.000129800672396025, 0.000759177531156304), -(144, -193.519676795495, -2058, 0.0000154381565239721, 0.0003585413908310997), -(145, -171.276177395859, -661, 0.000195833084492796, 0.0008519845416932768), -(146, 320.253823106946, -649, 0.0000787605074983549, 8.39249666267321e-05), -(147, 485.481515141722, 197, 0.0000547344157980351, 0.0006686089173660505), -(148, 189.357286232473, -4228, 0.000254968693671187, 0.0005399406443292136), -(149, 399.192494534909, 3268, 0.000181049124353524, 0.00010366225712753164), -(150, 45.0585110926279, 852, 0.000127756736350215, 0.0007338826266567296), -(151, 260.0183427789, 691, 0.000272664407898592, 0.0008289499977297581), -(152, 508.218151063852, 1920, 0.000250345183827013, 0.0009394268056747919), -(153, -100.892776798125, 3575, 0.000226903302391344, 0.00015912114369101714), -(154, 87.1482446704529, 1422, 0.00011462607261489, 0.00022504647253020237), -(155, 463.832877209151, -3841, 0.000275352578961295, 0.0006907335627536852), -(156, -163.736521893646, -105, 0.000174097313583038, 0.0003470453749023008), -(157, -244.241884038579, -2983, 0.00011692434066058, 0.00038371067691091556), -(158, -9.23434148876979, -1345, 0.00000534907622910516, 0.0008719847334996231), -(159, 38.8484417526992, 3610, 0.000215896314130939, 0.00015357944158029825), -(160, -171.265310983336, 4816, 0.00008104833808239, 0.000933319891430095), -(161, -10.2807536464046, -1469, 0.000255796925053659, 0.0009735435680026186), -(162, -45.6178509220483, 3421, 0.0000624263599798315, 0.0005983513327728857), -(163, 232.207846085552, 623, 0.0000609973399996679, 0.0009473600064438373), -(164, 44.8563961071723, 1468, 0.000201207441706994, 0.0004241338691800465), -(165, 75.3194258916575, 1833, 0.000215743751046955, 0.0008071507702784189), -(166, -142.129083076845, -3524, 0.0000697259062462297, 0.00041477685583605963), -(167, 656.603777169672, 1141, 0.000125109273282519, 0.0008146962435321328), -(168, -41.4395299660332, -2176, 0.000213661854624202, 0.0002576073606034228), -(169, 682.277526632881, 181, 0.0000739659190560193, 0.00010042017696245153), -(170, 191.352372267539, 2576, 0.0000443848271009688, 0.0004119572726823932), -(171, 350.914365964408, 1291, 0.0000485732066023306, 6.119064025467225e-05), -(172, 396.273095766071, -1248, 0.0000573281001927985, 0.00035187439961301695), -(173, -150.716395469394, -2358, 0.000290751054400176, 0.0005562252604118853), -(174, 248.993153859822, -1922, 0.0000791249427709193, 2.2846663947568404e-05), -(175, 16.9251799191637, -3188, 0.000256850706384032, 0.0002898031389169162), -(176, 390.04606683593, -2867, 0.000109421748228381, 0.00036266757342152543), -(177, 390.307073567535, 3724, 0.000152858526876497, 0.0009501499027443644), -(178, 201.040011117862, 511, 0.0000333415534572215, 0.0005578602009512039), -(179, -142.081108752669, -3778, 0.000112963597858303, 0.0003857764068008649), -(180, 684.313567871151, 2121, 0.000137505049128119, 0.0005596159732861778), -(181, 584.41318067029, -3287, 0.000281791144943157, 0.00034371566787169883), -(182, -92.630200448986, 1079, 0.000166324779514729, 0.0002669936296446096), -(183, 694.91387098146, -3704, 0.000250249912174613, 6.851667265187445e-05), -(184, 384.555695407493, -3234, 0.00010597182385488, 0.0007001627741596153), -(185, 449.869031405208, 3577, 0.000081562699512148, 0.0003665177649398075), -(186, 585.836839463746, 3589, 0.000280972844990827, 0.0004327655084647901), -(187, 286.332958809688, -1018, 0.000147545370623633, 0.0005768363795717235), -(188, 486.1499669831, -269, 0.00029282898478074, 0.0005300086932470194), -(189, 153.320416455395, -3212, 0.000275570732351389, 0.00025867595317490813), -(190, 698.698887940503, 5761, 0.000121356679523625, 0.00024788845009469896), -(191, 26.8836586934152, 1334, 0.0000838997809690202, 0.00030396490431792956), -(192, 271.692068513993, -1671, 0.0000476340623898597, 0.0007889875774694915), -(193, 354.820688879024, 899, 0.0000571839040961951, 0.0006150066381359594), -(194, 670.402974111817, 1421, 0.000242495213599527, 0.0004815756171297423), -(195, 451.431010266227, 2587, 0.000184937720888584, 0.00044873359300931525), -(196, -184.93291004919, 2600, 0.000296448112840746, 0.0008512803698336811), -(197, 539.492942148891, 4255, 0.0000536533368701274, 2.2322409373352058e-05), -(198, 300.012347701675, 4529, 0.000245700742170174, 0.0001883887216505329), -(199, 219.644386014076, 4949, 0.000147972883524821, 0.000999341363237825), -(200, 80.5524992167871, 2671, 0.000274663381387878, 0.0003788737047076651); - -SELECT pg_catalog.setval('magnitude_testing_id_seq', 200, true); - -ALTER TABLE ONLY magnitude_testing - ADD CONSTRAINT magnitude_testing_pkey PRIMARY KEY (id); diff --git a/db/tests/resources/marathon_athletes.sql b/db/tests/resources/marathon_athletes.sql deleted file mode 100644 index bc81efbdbc..0000000000 --- a/db/tests/resources/marathon_athletes.sql +++ /dev/null @@ -1,50 +0,0 @@ -CREATE TABLE "Marathon" ( - id integer NOT NULL, - "athlete" VARCHAR(255), - "gender" VARCHAR(255), - "finish time" Interval, - "city" VARCHAR(255) -); - -CREATE SEQUENCE "Marathon_id_seq" - AS integer - START WITH 1 - INCREMENT BY 1 - NO MINVALUE - NO MAXVALUE - CACHE 1; - -ALTER SEQUENCE "Marathon_id_seq" OWNED BY "Marathon".id; - -ALTER TABLE ONLY "Marathon" - ALTER COLUMN id SET DEFAULT nextval('"Marathon_id_seq"'::regclass); - -INSERT INTO "Marathon" ("athlete", "gender", "finish time", "city") VALUES -('Eliud Kipchoge', 'Male', 'PT2H1M39S', 'Berlin'); - -INSERT INTO "Marathon" ("athlete", "gender", "finish time", "city") VALUES -('Brigid Kosgei', 'Female', 'PT2H14M4S', 'Chicago'); - -INSERT INTO "Marathon" ("athlete", "gender", "finish time", "city") VALUES -('Dennis Kimetto', 'Male', 'PT2H2M57S', 'Berlin'); - -INSERT INTO "Marathon" ("athlete", "gender", "finish time", "city") VALUES -('Paula Radcliffe', 'Female', 'PT2H15M25S', 'London'); - -INSERT INTO "Marathon" ("athlete", "gender", "finish time", "city") VALUES -('Haile Gebrselassie', 'Male', 'PT2H3M59S', 'Berlin'); - -INSERT INTO "Marathon" ("athlete", "gender", "finish time", "city") VALUES -('Mary Keitany', 'Female', 'PT2H18M35S', 'London'); - -INSERT INTO "Marathon" ("athlete", "gender", "finish time", "city") VALUES -('Geoffrey Kamworor', 'Male', 'PT2H4M0S', 'Berlin'); - -INSERT INTO "Marathon" ("athlete", "gender", "finish time", "city") VALUES -('Tirunesh Dibaba', 'Female', 'PT2H17M56S', 'London'); - -INSERT INTO "Marathon" ("athlete", "gender", "finish time", "city") VALUES -('Wilson Kipsang', 'Male', 'PT2H3M23S', 'Berlin'); - -INSERT INTO "Marathon" ("athlete", "gender", "finish time", "city") VALUES -('Florence Kiplagat', 'Female', 'PT2H17M45S', 'Chicago'); diff --git a/db/tests/resources/player_profiles.sql b/db/tests/resources/player_profiles.sql deleted file mode 100644 index 26ad28d833..0000000000 --- a/db/tests/resources/player_profiles.sql +++ /dev/null @@ -1,107 +0,0 @@ -/* -This test schema is for mathesar_types.mathesar_json_array. -This solely tests the result of distinct_list_aggregation -on SQL and JSON arrays. -*/ - -CREATE TABLE "Players" ( - id integer NOT NULL, - "player" VARCHAR(255) NOT NULL, - "country" VARCHAR(100) NOT NULL, - "ballon_dor" mathesar_types.mathesar_json_array, - "titles" mathesar_types.mathesar_json_array -); - -CREATE SEQUENCE "Players_id_seq" - AS integer - START WITH 1 - INCREMENT BY 1 - NO MINVALUE - NO MAXVALUE - CACHE 1; - -ALTER SEQUENCE "Players_id_seq" OWNED BY "Players".id; - -ALTER TABLE ONLY "Players" - ALTER COLUMN id SET DEFAULT nextval('"Players_id_seq"'::regclass); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Lionel Messi', 'Argentina', '[2009, 2011, 2012, 2013, 2016, 2019, 2021]', '[{"world_cup": 1, "ucl": 4}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Cristiano Ronaldo', 'Portugal', '[2008, 2010, 2014, 2015, 2017]', '[{"world_cup": 0, "ucl": 5}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Michel Platini', 'France', '[1983, 1984, 1985]', '[{"world_cup": 0, "ucl": 1}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Johan Cruyff', 'Netherlands', '[1971, 1973, 1974]', '[{"world_cup": 0, "ucl": 3}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Marco van Basten', 'Netherlands', '[1988, 1992, 1993]', '[{"world_cup": 1, "ucl": 3}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Franz Beckenbauer', 'Germany', '[1972, 1976]', '[{"world_cup": 1, "ucl": 3}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Alfredo Di Stefano', 'Argentina', '[1957, 1959, 1960]', '[{"world_cup": 0, "ucl": 5}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Zinedine Zidane', 'France', '[1998]', '[{"world_cup": 1, "ucl": 1}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Ronaldo Nazario', 'Brazil', '[1997, 2002]', '[{"world_cup": 2, "ucl": 3}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Ronaldinho', 'Brazil', '[2005]', '[{"world_cup": 1, "ucl": 1}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Andrea Pirlo', 'Italy', '[2006]', '[{"world_cup": 1, "ucl": 2}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Gerd Muller', 'Germany', '[1970]', '[{"world_cup": 1, "ucl": 3}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Lev Yashin', 'Soviet Union', '[1963]', '[{"world_cup": 0, "ucl": 0}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Ferenc Puskas', 'Hungary', '[1959]', '[{"world_cup": 0, "ucl": 3}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('George Weah', 'Liberia', '[1995]', '[{"world_cup": 0, "ucl": 0}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Rivaldo', 'Brazil', '[1999]', '[{"world_cup": 1, "ucl": 1}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Fabio Cannavaro', 'Italy', '[2006]', '[{"world_cup": 1, "ucl": 0}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Lothar Matthaus', 'Germany', '[1990]', '[{"world_cup": 1, "ucl": 0}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Garrincha', 'Brazil', '[1962]', '[{"world_cup": 2, "ucl": 0}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Kaka', 'Brazil', '[2007]', '[{"world_cup": 1, "ucl": 1}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Luigi Riva', 'Italy', '[1969]', '[{"world_cup": 0, "ucl": 0}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Roberto Baggio', 'Italy', '[1993]', '[{"world_cup": 0, "ucl": 0}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Bobby Charlton', 'England', '[1966]', '[{"world_cup": 1, "ucl": 1}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Raymond Kopa', 'France', '[1958]', '[{"world_cup": 0, "ucl": 0}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Luka Modric', 'Croatia', '[2018]', '[{"world_cup": 0, "ucl": 4}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Andriy Shevchenko', 'Ukraine', '[2004]', '[{"world_cup": 0, "ucl": 1}]'); - -INSERT INTO "Players" ("player", "country", "ballon_dor", "titles") -VALUES ('Oleg Blokhin', 'Soviet Union', '[1975]', '[{"world_cup": 0, "ucl": 0}]'); diff --git a/db/tests/resources/range_grouping.sql b/db/tests/resources/range_grouping.sql deleted file mode 100644 index 77a2e64996..0000000000 --- a/db/tests/resources/range_grouping.sql +++ /dev/null @@ -1,22 +0,0 @@ -WITH cume_dist_cte AS ( - SELECT "Center", "Patent Expiration Date", cume_dist() OVER (ORDER BY "Center", "Patent Expiration Date") AS cume_dist - FROM patents -), -ranges AS ( - SELECT "Center", "Patent Expiration Date", CASE - WHEN cume_dist > 0.00 AND cume_dist <= 0.25 THEN 1 - WHEN cume_dist > 0.25 AND cume_dist <= 0.50 THEN 2 - WHEN cume_dist > 0.50 AND cume_dist <= 0.75 THEN 3 - WHEN cume_dist > 0.75 AND cume_dist <= 1.00 THEN 4 - END as range - FROM cume_dist_cte -) -SELECT DISTINCT - range, - first_value(ROW("Center", "Patent Expiration Date")) OVER w AS min_row, - last_value(ROW("Center", "Patent Expiration Date")) OVER w AS max_row, - COUNT(1) OVER w -FROM ranges -WINDOW w AS ( - PARTITION BY range ORDER BY "Center", "Patent Expiration Date" RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING -); diff --git a/db/tests/resources/uri_testing b/db/tests/resources/uri_testing deleted file mode 100644 index 9ad2fee367..0000000000 --- a/db/tests/resources/uri_testing +++ /dev/null @@ -1,101 +0,0 @@ --- --- PostgreSQL database dump --- - --- Dumped from database version 13.6 (Debian 13.6-1.pgdg110+1) --- Dumped by pg_dump version 14.2 - -SET statement_timeout = 0; -SET lock_timeout = 0; -SET idle_in_transaction_session_timeout = 0; -SET client_encoding = 'UTF8'; -SET standard_conforming_strings = on; -SELECT pg_catalog.set_config('search_path', '', false); -SET check_function_bodies = false; -SET xmloption = content; -SET client_min_messages = warning; -SET row_security = off; - -SET default_tablespace = ''; - -SET default_table_access_method = heap; - --- --- Name: uri_testing; Type: TABLE; Schema: public; Owner: mathesar --- - -CREATE TABLE public.uri_testing ( - id integer NOT NULL, - uris mathesar_types.uri -); - - -ALTER TABLE public.uri_testing OWNER TO mathesar; - --- --- Name: uri_testing_id_seq; Type: SEQUENCE; Schema: public; Owner: mathesar --- - -CREATE SEQUENCE public.uri_testing_id_seq - AS integer - START WITH 1 - INCREMENT BY 1 - NO MINVALUE - NO MAXVALUE - CACHE 1; - - -ALTER TABLE public.uri_testing_id_seq OWNER TO mathesar; - --- --- Name: uri_testing_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: mathesar --- - -ALTER SEQUENCE public.uri_testing_id_seq OWNED BY public.uri_testing.id; - - --- --- Name: uri_testing id; Type: DEFAULT; Schema: public; Owner: mathesar --- - -ALTER TABLE ONLY public.uri_testing ALTER COLUMN id SET DEFAULT nextval('public.uri_testing_id_seq'::regclass); - - --- --- Data for Name: uri_testing; Type: TABLE DATA; Schema: public; Owner: mathesar --- - -COPY public.uri_testing (id, uris) FROM stdin; -1 https://google.com -2 https://yahoo.com -3 https://github.com/centerofci/mathesar/issues?q=is%3Aissue+is%3Aopen+group -4 https://github.com/centerofci/mathesar/issues?q=is%3Aissue+group+is%3Aclosed -5 https://github.com/centerofci/mathesar/ -6 ftp://ftp.example.com/path/to/RFC/rfc959.txt -7 sftp://ftp.example.com/path/to/RFC/rfc959.txt -8 http://google.com -9 http://yahoo.com -10 http://github.com/centerofci/mathesar/issues?q=is%3Aissue+is%3Aopen+group -11 http://github.com/centerofci/mathesar/issues?q=is%3Aissue+group+is%3Aclosed -\. - - --- --- Name: uri_testing_id_seq; Type: SEQUENCE SET; Schema: public; Owner: mathesar --- - -SELECT pg_catalog.setval('public.uri_testing_id_seq', 11, true); - - --- --- Name: uri_testing uri_testing_pkey; Type: CONSTRAINT; Schema: public; Owner: mathesar --- - -ALTER TABLE ONLY public.uri_testing - ADD CONSTRAINT uri_testing_pkey PRIMARY KEY (id); - - --- --- PostgreSQL database dump complete --- - diff --git a/db/tests/resources/uris_create.sql b/db/tests/resources/uris_create.sql deleted file mode 100644 index a3380bdb3c..0000000000 --- a/db/tests/resources/uris_create.sql +++ /dev/null @@ -1,43 +0,0 @@ -CREATE TABLE "uris" ( - id integer NOT NULL, - "uri" character varying(250) -); - -CREATE SEQUENCE "uris_id_seq" - AS integer - START WITH 1 - INCREMENT BY 1 - NO MINVALUE - NO MAXVALUE - CACHE 1; - -ALTER SEQUENCE "uris_id_seq" OWNED BY "uris".id; - -ALTER TABLE ONLY "uris" ALTER COLUMN id SET DEFAULT nextval('"uris_id_seq"'::regclass); - -INSERT INTO "uris" VALUES -(1, 'http://soundcloud.com/denzo-1/denzo-in-mix-0knackpunkt-nr-15-0-electro-swing'), -(2, 'http://picasaweb.google.com/lh/photo/94RGMDCSTmCW04l6SPnteTBPFtERcSvqpRI6vP3N6YI?feat=embedwebsite'), -(3, 'http://banedon.posterous.com/bauforstschritt-2262010'), -(4, 'http://imgur.com/M2v2H.png'), -(5, 'http://tweetphoto.com/31300678'), -(6, 'http://www.youtube.com/watch?v=zXLGHyGxY2E'), -(7, 'http://tweetphoto.com/31103212'), -(8, 'http://soundcloud.com/dj-soro'), -(9, 'http://i.imgur.com/H6yyu.jpg'), -(10, 'http://www.flickr.com/photos/jocke66/4657443374/'), -(11, 'http://tweetphoto.com/31332311'), -(12, 'http://tweetphoto.com/31421017'), -(13, 'http://yfrog.com/j6cimg3038gj'), -(14, 'http://yfrog.com/msradon2p'), -(15, 'http://soundcloud.com/hedo/hedo-der-groove-junger-knospen'), -(16, 'http://soundcloud.com/strawberryhaze/this-is-my-house-in-summer-2010'), -(17, 'http://tumblr.com/x4acyiuxf'), -(18, 'ftp://foobar.com/179179'), -(19, 'ftps://asldp.com/158915'), -(20, 'ftp://abcdefg.com/x-y-z'); - -SELECT pg_catalog.setval('"uris_id_seq"', 1000, true); - -ALTER TABLE ONLY "uris" - ADD CONSTRAINT "uris_pkey" PRIMARY KEY (id); diff --git a/db/tests/schemas/__init__.py b/db/tests/schemas/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/db/tests/schemas/operations/__init__.py b/db/tests/schemas/operations/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/db/tests/tables/utils.py b/db/tests/tables/utils.py deleted file mode 100644 index e388d77d02..0000000000 --- a/db/tests/tables/utils.py +++ /dev/null @@ -1,15 +0,0 @@ -from sqlalchemy import MetaData, Column, Table, ForeignKey, Integer - -from db import constants - - -def create_related_table(name, table, schema, engine): - metadata = MetaData(schema=schema, bind=engine) - related_table = Table( - name, metadata, - Column('id', Integer, ForeignKey(table.c[constants.ID])) - ) - related_table.create() - fk = list(related_table.foreign_keys)[0] - assert fk.column.table.name == table.name - return related_table diff --git a/db/tests/test_fixtures.py b/db/tests/test_fixtures.py deleted file mode 100644 index 72f92b5b22..0000000000 --- a/db/tests/test_fixtures.py +++ /dev/null @@ -1,37 +0,0 @@ -from db.types.base import MathesarCustomType - - -def test_engines_having_separate_ischema_names(engine_without_ischema_names_updated, engine): - """ - We want to have fixtures for engines with different ischema_names. Here we test that that's - possible. A peculiarity in SA causes different instances of Engine to refer to the same - ischema_names dict. Here we test that that's successfully monkey-patched. - """ - ischema_names1 = engine_without_ischema_names_updated.dialect.ischema_names - ischema_names2 = engine.dialect.ischema_names - - x = "x" - assert x not in ischema_names1 - assert x not in ischema_names2 - ischema_names1[x] = 1 - assert x in ischema_names1 - assert x not in ischema_names2 - del ischema_names1[x] - - y = "y" - assert y not in ischema_names1 - assert y not in ischema_names2 - ischema_names2[y] = 2 - assert y not in ischema_names1 - assert y in ischema_names2 - del ischema_names2[y] - - -def test_engines_having_appropriate_ischema_names(engine_without_ischema_names_updated, engine): - """ - We want to have fixtures for engines with and without updated ischema_names. Here we test that - it is so. - """ - for ma_type in MathesarCustomType: - assert ma_type.id not in engine_without_ischema_names_updated.dialect.ischema_names - assert ma_type.id in engine.dialect.ischema_names diff --git a/db/tests/types/conftest.py b/db/tests/types/conftest.py deleted file mode 100644 index ebd4ae4534..0000000000 --- a/db/tests/types/conftest.py +++ /dev/null @@ -1,10 +0,0 @@ -import pytest -from sqlalchemy import MetaData, Table - - -@pytest.fixture -def roster_table_obj(engine_with_roster, roster_table_name): - engine, schema = engine_with_roster - metadata = MetaData(bind=engine) - table = Table(roster_table_name, metadata, schema=schema, autoload_with=engine) - yield table, engine diff --git a/db/tests/types/operations/__init__.py b/db/tests/types/operations/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 From c10c8ce5cbccb26269bc881d02060794108f653f Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Fri, 18 Oct 2024 17:15:04 +0800 Subject: [PATCH 67/70] remove unneeded connection functions --- conftest.py | 24 +++++++++++++++++++++--- db/connection.py | 38 -------------------------------------- 2 files changed, 21 insertions(+), 41 deletions(-) diff --git a/conftest.py b/conftest.py index cd2ad3a5cb..1d278bd563 100644 --- a/conftest.py +++ b/conftest.py @@ -11,7 +11,6 @@ from sqlalchemy.exc import OperationalError from sqlalchemy_utils import database_exists, create_database, drop_database -from db.connection import execute_msar_func_with_engine from db.engine import add_custom_types_to_ischema_names, create_engine as sa_create_engine from db.sql import install as sql_install from db.utils import get_pg_catalog_table @@ -211,11 +210,30 @@ def _create_schema(schema_name, engine, schema_mustnt_exist=True): def _create_schema_if_not_exists_via_sql_alchemy(schema_name, engine): - return execute_msar_func_with_engine( + return _execute_msar_func_with_engine( engine, 'create_schema_if_not_exists', schema_name ).fetchone()[0] +def _execute_msar_func_with_engine(engine, func_name, *args): + """ + Execute an msar function using an SQLAlchemy engine. + + This is temporary scaffolding. + + Args: + engine: an SQLAlchemy engine for connecting to a DB + func_name: The unqualified msar function name (danger; not sanitized) + *args: The list of parameters to pass + """ + conn_str = str(engine.url) + with psycopg.connect(conn_str) as conn: + return conn.execute( + f"SELECT msar.{func_name}({','.join(['%s'] * len(args))})", + args + ) + + def _get_schema_name_from_oid(oid, engine, metadata=None): schema_info = _reflect_schema(engine, oid=oid, metadata=metadata) if schema_info: @@ -247,7 +265,7 @@ def _reflect_schema(engine, name=None, oid=None, metadata=None): def _drop_schema_via_name(engine, name, cascade=False): - execute_msar_func_with_engine(engine, 'drop_schema', name, cascade).fetchone() + _execute_msar_func_with_engine(engine, 'drop_schema', name, cascade).fetchone() # Seems to be roughly equivalent to mathesar/database/base.py::create_mathesar_engine diff --git a/db/connection.py b/db/connection.py index 1430e5b902..c47309fcb4 100644 --- a/db/connection.py +++ b/db/connection.py @@ -3,44 +3,6 @@ from psycopg.rows import dict_row -def execute_msar_func_with_engine(engine, func_name, *args): - """ - Execute an msar function using an SQLAlchemy engine. - - This is temporary scaffolding. - - Args: - engine: an SQLAlchemy engine for connecting to a DB - func_name: The unqualified msar function name (danger; not sanitized) - *args: The list of parameters to pass - """ - conn_str = str(engine.url) - with psycopg.connect(conn_str) as conn: - # Returns a cursor - return conn.execute( - f"SELECT msar.{func_name}({','.join(['%s'] * len(args))})", - args - ) - - -def execute_msar_func_with_psycopg2_conn(conn, func_name, *args): - """ - Execute an msar function using an SQLAlchemy engine. - - This is *extremely* temporary scaffolding. - - Args: - conn: a psycopg2 connection (from an SQLAlchemy engine) - func_name: The unqualified msar function name (danger; not sanitized) - *args: The list of parameters to pass - """ - args_str = ", ".join([str(arg) for arg in args]) - args_str = f"{args_str}" - stmt = text(f"SELECT msar.{func_name}({args_str})") - # Returns a cursor - return conn.execute(stmt) - - def exec_msar_func(conn, func_name, *args): """ Execute an msar function using a psycopg (3) connection. From e482b6e63ac1e0ef6f363c752f43ab60ffb9062a Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Fri, 18 Oct 2024 17:28:16 +0800 Subject: [PATCH 68/70] remove unneeded utilities and constants --- db/constants.py | 6 ------ db/types/base.py | 3 +-- db/utils.py | 32 +------------------------------- 3 files changed, 2 insertions(+), 39 deletions(-) diff --git a/db/constants.py b/db/constants.py index 76f7d52c6d..6bf432f817 100644 --- a/db/constants.py +++ b/db/constants.py @@ -1,18 +1,12 @@ -ID = "id" -ID_ORIGINAL = "id_original" COLUMN_NAME_TEMPLATE = 'Column ' # auto generated column name 'Column 1' (no undescore) MATHESAR_PREFIX = "mathesar_" MSAR_PUBLIC_SCHEMA = 'msar' MSAR_PRIVATE_SCHEMA = f"__{MSAR_PUBLIC_SCHEMA}" TYPES_SCHEMA = f"{MATHESAR_PREFIX}types" -INFERENCE_SCHEMA = f"{MATHESAR_PREFIX}inference_schema" -VIEWS_SCHEMA = f"{MSAR_PUBLIC_SCHEMA}_views" INTERNAL_SCHEMAS = { TYPES_SCHEMA, MSAR_PUBLIC_SCHEMA, MSAR_PRIVATE_SCHEMA, - VIEWS_SCHEMA, - INFERENCE_SCHEMA } diff --git a/db/types/base.py b/db/types/base.py index dced38651c..dec9a480fd 100644 --- a/db/types/base.py +++ b/db/types/base.py @@ -3,10 +3,9 @@ from sqlalchemy import create_engine as sa_create_engine from db.constants import TYPES_SCHEMA -from db.utils import OrderByIds -class DatabaseType(OrderByIds): +class DatabaseType: @property def id(self): diff --git a/db/utils.py b/db/utils.py index f4d8ff52f4..71ff596925 100644 --- a/db/utils.py +++ b/db/utils.py @@ -32,37 +32,6 @@ def execute_pg_query(engine, query, connection_to_use=None): return execute_statement(engine, executable, connection_to_use=connection_to_use).fetchall() -class OrderByIds: - """ - A mixin for ordering based on ids; useful at least for type enums in testing. - """ - - id = None - - def __ge__(self, other): - if self._ordering_supported(other): - return self.id >= other.id - return NotImplemented - - def __gt__(self, other): - if self._ordering_supported(other): - return self.id > other.id - return NotImplemented - - def __le__(self, other): - if self._ordering_supported(other): - return self.id <= other.id - return NotImplemented - - def __lt__(self, other): - if self._ordering_supported(other): - return self.id < other.id - return NotImplemented - - def _ordering_supported(self, other): - return hasattr(other, 'id') - - def get_module_members_that_satisfy(module, predicate): """ Looks at the members of the provided module and filters them using the provided predicate. @@ -102,6 +71,7 @@ def get_pg_catalog_table(table_name, engine, metadata): return table +# TODO REMOVE THIS!! def ignore_duplicate_wrapper(stmt): return f""" DO $$ BEGIN From e994aaeab81800494f5e4ae7aaa5976835420f32 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Fri, 18 Oct 2024 17:53:09 +0800 Subject: [PATCH 69/70] remove unused utility function --- db/records/utils.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/db/records/utils.py b/db/records/utils.py index 6584f3ebf8..ce0289e613 100644 --- a/db/records/utils.py +++ b/db/records/utils.py @@ -1,9 +1,5 @@ from sqlalchemy import Column -def create_col_objects(table, column_list): - return [get_column_object(table, col) for col in column_list] - - def get_column_object(table, col): return table.columns[col.name] if isinstance(col, Column) else table.columns[col] From f3e7f417e9674be54c2a14cfa58ff1f5c4312366 Mon Sep 17 00:00:00 2001 From: Brent Moran Date: Fri, 18 Oct 2024 18:06:03 +0800 Subject: [PATCH 70/70] remove unused imports --- db/connection.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/db/connection.py b/db/connection.py index c47309fcb4..721bf1f745 100644 --- a/db/connection.py +++ b/db/connection.py @@ -1,5 +1,3 @@ -from sqlalchemy import text -import psycopg from psycopg.rows import dict_row