Skip to content

Commit

Permalink
feat!: revert to_schema overloads & optimize upsert_many (#224)
Browse files Browse the repository at this point in the history
- Reverts the previous `to_schema` implementation in the service layer.  This will need to be rethought as it requires too many overloads to any customized service function.  
- Ensures that the filter values for `upsert_many` is a unique list.  This is useful when you are merging a large amount of objects on a foreign key.  The lookup will only contain a single unique entry for each key instead of one for each row.
- Adds a `schema_to_dict` method to convert incoming Pydantic or Msgspec models to dictionaries
- Adds additional type guard helpers to check for a field in a dictionary, Struct, or BaseModel
  • Loading branch information
cofin authored Jun 26, 2024
1 parent d828e65 commit cfd5e1b
Show file tree
Hide file tree
Showing 9 changed files with 187 additions and 1,152 deletions.
14 changes: 5 additions & 9 deletions advanced_alchemy/repository/_async.py
Original file line number Diff line number Diff line change
Expand Up @@ -1584,10 +1584,7 @@ async def upsert_many(
matched_values = [
field_data for datum in data if (field_data := getattr(datum, field_name)) is not None
]
if self._prefer_any:
match_filter.append(any_(matched_values) == field) # type: ignore[arg-type]
else:
match_filter.append(field.in_(matched_values))
match_filter.append(any_(matched_values) == field if self._prefer_any else field.in_(matched_values)) # type: ignore[arg-type]

with wrap_sqlalchemy_exception():
existing_objs = await self.list(
Expand All @@ -1598,11 +1595,10 @@ async def upsert_many(
)
for field_name in match_fields:
field = get_instrumented_attr(self.model_type, field_name)
matched_values = [getattr(datum, field_name) for datum in existing_objs if datum]
if self._prefer_any:
match_filter.append(any_(matched_values) == field) # type: ignore[arg-type]
else:
match_filter.append(field.in_(matched_values))
matched_values = list(
{getattr(datum, field_name) for datum in existing_objs if datum}, # ensure the list is unique
)
match_filter.append(any_(matched_values) == field if self._prefer_any else field.in_(matched_values)) # type: ignore[arg-type]
existing_ids = self._get_object_ids(existing_objs=existing_objs)
data = self._merge_on_match_fields(data, existing_objs, match_fields)
for datum in data:
Expand Down
14 changes: 5 additions & 9 deletions advanced_alchemy/repository/_sync.py
Original file line number Diff line number Diff line change
Expand Up @@ -1585,10 +1585,7 @@ def upsert_many(
matched_values = [
field_data for datum in data if (field_data := getattr(datum, field_name)) is not None
]
if self._prefer_any:
match_filter.append(any_(matched_values) == field) # type: ignore[arg-type]
else:
match_filter.append(field.in_(matched_values))
match_filter.append(any_(matched_values) == field if self._prefer_any else field.in_(matched_values)) # type: ignore[arg-type]

with wrap_sqlalchemy_exception():
existing_objs = self.list(
Expand All @@ -1599,11 +1596,10 @@ def upsert_many(
)
for field_name in match_fields:
field = get_instrumented_attr(self.model_type, field_name)
matched_values = [getattr(datum, field_name) for datum in existing_objs if datum]
if self._prefer_any:
match_filter.append(any_(matched_values) == field) # type: ignore[arg-type]
else:
match_filter.append(field.in_(matched_values))
matched_values = list(
{getattr(datum, field_name) for datum in existing_objs if datum}, # ensure the list is unique
)
match_filter.append(any_(matched_values) == field if self._prefer_any else field.in_(matched_values)) # type: ignore[arg-type]
existing_ids = self._get_object_ids(existing_objs=existing_objs)
data = self._merge_on_match_fields(data, existing_objs, match_fields)
for datum in data:
Expand Down
Loading

0 comments on commit cfd5e1b

Please sign in to comment.