Tests & errors for preview list limitations

This commit is contained in:
Andrew Brookins 2021-10-18 21:16:48 -07:00
parent 389a6ea878
commit c51a071982
3 changed files with 283 additions and 116 deletions

View file

@ -33,6 +33,7 @@ from pydantic.fields import ModelField, Undefined, UndefinedType
from pydantic.main import ModelMetaclass from pydantic.main import ModelMetaclass
from pydantic.typing import NoArgAnyCallable from pydantic.typing import NoArgAnyCallable
from pydantic.utils import Representation from pydantic.utils import Representation
from redis.client import Pipeline
from ulid import ULID from ulid import ULID
from .encoders import jsonable_encoder from .encoders import jsonable_encoder
@ -102,6 +103,19 @@ def embedded(cls):
setattr(cls.Meta, 'embedded', True) setattr(cls.Meta, 'embedded', True)
def is_supported_container_type(typ: type) -> bool:
if typ == list or typ == tuple:
return True
unwrapped = get_origin(typ)
return unwrapped == list or unwrapped == tuple
def validate_model_fields(model: Type['RedisModel'], field_values: Dict[str, Any]):
for field_name in field_values.keys():
if field_name not in model.__fields__:
raise QuerySyntaxError(f"The field {field_name} does not exist on the model {self.model}")
class ExpressionProtocol(Protocol): class ExpressionProtocol(Protocol):
op: Operators op: Operators
left: ExpressionOrModelField left: ExpressionOrModelField
@ -227,7 +241,7 @@ class ExpressionProxy:
return Expression(left=self.field, op=Operators.IN, right=other, parents=self.parents) return Expression(left=self.field, op=Operators.IN, right=other, parents=self.parents)
def __getattr__(self, item): def __getattr__(self, item):
if get_origin(self.field.outer_type_) == list: if is_supported_container_type(self.field.outer_type_):
embedded_cls = get_args(self.field.outer_type_) embedded_cls = get_args(self.field.outer_type_)
if not embedded_cls: if not embedded_cls:
raise QuerySyntaxError("In order to query on a list field, you must define " raise QuerySyntaxError("In order to query on a list field, you must define "
@ -332,7 +346,7 @@ class FindQuery:
self._query = self.resolve_redisearch_query(self.expression) self._query = self.resolve_redisearch_query(self.expression)
return self._query return self._query
def validate_sort_fields(self, sort_fields): def validate_sort_fields(self, sort_fields: List[str]):
for sort_field in sort_fields: for sort_field in sort_fields:
field_name = sort_field.lstrip("-") field_name = sort_field.lstrip("-")
if field_name not in self.model.__fields__: if field_name not in self.model.__fields__:
@ -358,26 +372,56 @@ class FindQuery:
field_type = field.outer_type_ field_type = field.outer_type_
# TODO: GEO # TODO: GEO fields
if any(issubclass(field_type, t) for t in NUMERIC_TYPES): container_type = get_origin(field_type)
if is_supported_container_type(container_type):
# NOTE: A list of integers, like:
#
# luck_numbers: List[int] = field(index=True)
#
# becomes a TAG field, which means that users cannot perform range
# queries on the values within the multi-value field, only equality
# and membership queries.
#
# Meanwhile, a list of RedisModels, like:
#
# friends: List[Friend] = field(index=True)
#
# is not itself directly indexed, but instead, we index any fields
# within the model marked as `index=True`.
return RediSearchFieldTypes.TAG
elif container_type is not None:
raise QuerySyntaxError("Only lists and tuples are supported for multi-value fields. "
"See docs: TODO")
elif any(issubclass(field_type, t) for t in NUMERIC_TYPES):
# Index numeric Python types as NUMERIC fields, so we can support
# range queries.
return RediSearchFieldTypes.NUMERIC return RediSearchFieldTypes.NUMERIC
else: else:
# TAG fields are the default field type. # TAG fields are the default field type and support equality and membership queries,
# TODO: A ListField or ArrayField that supports multiple values # though membership (and the multi-value nature of the field) are hidden from
# and contains logic should allow IN and NOT_IN queries. # users unless they explicitly index multiple values, with either a list or tuple,
# e.g.,
# favorite_foods: List[str] = field(index=True)
return RediSearchFieldTypes.TAG return RediSearchFieldTypes.TAG
@staticmethod @staticmethod
def expand_tag_value(value): def expand_tag_value(value):
if isinstance(value, str): if isinstance(value, str):
return escaper.escape(value)
if isinstance(value, bytes):
# TODO: We don't decode and then escape bytes objects passed as input.
# Should we?
# TODO: TAG indexes fail on JSON arrays of numbers -- only strings
# are allowed -- what happens if we save an array of bytes?
return value return value
try: try:
expanded_value = "|".join([escaper.escape(v) for v in value]) return "|".join([escaper.escape(str(v)) for v in value])
except TypeError: except TypeError:
raise QuerySyntaxError("Values passed to an IN query must be iterables," log.debug("Escaping single non-iterable value used for an IN or "
"like a list of strings. For more information, see:" "NOT_IN query: %s", value)
"TODO: doc.") return escaper.escape(str(value))
return expanded_value
@classmethod @classmethod
def resolve_value(cls, field_name: str, field_type: RediSearchFieldTypes, def resolve_value(cls, field_name: str, field_type: RediSearchFieldTypes,
@ -614,19 +658,30 @@ class FindQuery:
return self return self
return self.copy(sort_fields=list(fields)) return self.copy(sort_fields=list(fields))
def update(self, **kwargs): def update(self, use_transaction=True, **field_values) -> Optional[List[str]]:
"""Update all matching records in this query.""" """
# TODO Update models that match this query to the given field-value pairs.
def delete(cls, **field_values): Keys and values given as keyword arguments are interpreted as fields
on the target model and the values as the values to which to set the
given fields.
"""
validate_model_fields(self.model, field_values)
pipeline = self.model.db().pipeline() if use_transaction else None
for model in self.all():
for field, value in field_values.items():
setattr(model, field, value)
model.save(pipeline=pipeline)
if pipeline:
# TODO: Better response type, error detection
return pipeline.execute()
def delete(self):
"""Delete all matching records in this query.""" """Delete all matching records in this query."""
for field_name, value in field_values: # TODO: Better response type, error detection
valid_attr = hasattr(cls.model, field_name) return self.model.db().delete(*[m.key() for m in self.all()])
if not valid_attr:
raise RedisModelError(f"Can't update field {field_name} because "
f"the field does not exist on the model {cls}")
return cls
def __iter__(self): def __iter__(self):
if self._model_cache: if self._model_cache:
@ -822,15 +877,14 @@ class ModelMeta(ModelMetaclass):
new_class.Meta = meta new_class.Meta = meta
new_class._meta = meta new_class._meta = meta
elif base_meta: elif base_meta:
new_class._meta = deepcopy(base_meta) new_class._meta = type(f'{new_class.__name__}Meta', (base_meta,), dict(base_meta.__dict__))
new_class.Meta = new_class._meta new_class.Meta = new_class._meta
# Unset inherited values we don't want to reuse (typically based on # Unset inherited values we don't want to reuse (typically based on
# the model name). # the model name).
new_class._meta.embedded = False
new_class._meta.model_key_prefix = None new_class._meta.model_key_prefix = None
new_class._meta.index_name = None new_class._meta.index_name = None
else: else:
new_class._meta = deepcopy(DefaultMeta) new_class._meta = type(f'{new_class.__name__}Meta', (DefaultMeta,), dict(DefaultMeta.__dict__))
new_class.Meta = new_class._meta new_class.Meta = new_class._meta
# Create proxies for each model field so that we can use the field # Create proxies for each model field so that we can use the field
@ -887,6 +941,21 @@ class RedisModel(BaseModel, abc.ABC, metaclass=ModelMeta):
"""Default sort: compare primary key of models.""" """Default sort: compare primary key of models."""
return self.pk < other.pk return self.pk < other.pk
def key(self):
"""Return the Redis key for this model."""
pk = getattr(self, self._meta.primary_key.field.name)
return self.make_primary_key(pk)
def delete(self):
return self.db().delete(self.key())
def update(self, **field_values):
"""Update this model instance with the specified key-value pairs."""
raise NotImplementedError
def save(self, *args, **kwargs) -> 'RedisModel':
raise NotImplementedError
@validator("pk", always=True) @validator("pk", always=True)
def validate_pk(cls, v): def validate_pk(cls, v):
if not v: if not v:
@ -916,11 +985,6 @@ class RedisModel(BaseModel, abc.ABC, metaclass=ModelMeta):
"""Return the Redis key for this model.""" """Return the Redis key for this model."""
return cls.make_key(cls._meta.primary_key_pattern.format(pk=pk)) return cls.make_key(cls._meta.primary_key_pattern.format(pk=pk))
def key(self):
"""Return the Redis key for this model."""
pk = getattr(self, self._meta.primary_key.field.name)
return self.make_primary_key(pk)
@classmethod @classmethod
def db(cls): def db(cls):
return cls._meta.database return cls._meta.database
@ -931,7 +995,7 @@ class RedisModel(BaseModel, abc.ABC, metaclass=ModelMeta):
@classmethod @classmethod
def from_redis(cls, res: Any): def from_redis(cls, res: Any):
# TODO: Parsing logic borrowed from redisearch-py. Evaluate. # TODO: Parsing logic copied from redisearch-py. Evaluate.
import six import six
from six.moves import xrange, zip as izip from six.moves import xrange, zip as izip
@ -974,25 +1038,14 @@ class RedisModel(BaseModel, abc.ABC, metaclass=ModelMeta):
docs.append(doc) docs.append(doc)
return docs return docs
@classmethod @classmethod
def add(cls, models: Sequence['RedisModel']) -> Sequence['RedisModel']: def add(cls, models: Sequence['RedisModel']) -> Sequence['RedisModel']:
# TODO: Add transaction support
return [model.save() for model in models] return [model.save() for model in models]
@classmethod
def update(cls, **field_values):
"""Update this model instance."""
return cls
@classmethod @classmethod
def values(cls): def values(cls):
"""Return raw values from Redis instead of model instances.""" """Return raw values from Redis instead of model instances."""
return cls
def delete(self):
return self.db().delete(self.key())
def save(self, *args, **kwargs) -> 'RedisModel':
raise NotImplementedError raise NotImplementedError
@classmethod @classmethod
@ -1014,11 +1067,14 @@ class HashModel(RedisModel, abc.ABC):
raise RedisModelError(f"HashModels cannot have set, list," raise RedisModelError(f"HashModels cannot have set, list,"
f" or mapping fields. Field: {name}") f" or mapping fields. Field: {name}")
def save(self, *args, **kwargs) -> 'HashModel': def save(self, pipeline: Optional[Pipeline] = None) -> 'HashModel':
if pipeline is None:
db = self.db()
else:
db = pipeline
document = jsonable_encoder(self.dict()) document = jsonable_encoder(self.dict())
success = self.db().hset(self.key(), mapping=document) db.hset(self.key(), mapping=document)
return self
return success
@classmethod @classmethod
def get(cls, pk: Any) -> 'HashModel': def get(cls, pk: Any) -> 'HashModel':
@ -1063,12 +1119,7 @@ class HashModel(RedisModel, abc.ABC):
schema_parts.append(redisearch_field) schema_parts.append(redisearch_field)
elif getattr(field.field_info, 'index', None) is True: elif getattr(field.field_info, 'index', None) is True:
schema_parts.append(cls.schema_for_type(name, _type, field.field_info)) schema_parts.append(cls.schema_for_type(name, _type, field.field_info))
# TODO: Raise error if user embeds a model field or list and makes it elif is_supported_container_type(_type):
# sortable. Instead, the embedded model should mark individual fields
# as sortable.
if getattr(field.field_info, 'sortable', False) is True:
schema_parts.append("SORTABLE")
elif get_origin(_type) == list:
embedded_cls = get_args(_type) embedded_cls = get_args(_type)
if not embedded_cls: if not embedded_cls:
# TODO: Test if this can really happen. # TODO: Test if this can really happen.
@ -1083,36 +1134,62 @@ class HashModel(RedisModel, abc.ABC):
@classmethod @classmethod
def schema_for_type(cls, name, typ: Any, field_info: PydanticFieldInfo): def schema_for_type(cls, name, typ: Any, field_info: PydanticFieldInfo):
if get_origin(typ) == list: # TODO: Import parent logic from JsonModel to deal with lists, so that
# a List[int] gets indexed as TAG instead of NUMERICAL.
# TODO: Raise error if user embeds a model field or list and makes it
# sortable. Instead, the embedded model should mark individual fields
# as sortable.
# TODO: Abstract string-building logic for each type (TAG, etc.) into
# classes that take a field name.
sortable = getattr(field_info, 'sortable', False)
if is_supported_container_type(typ):
embedded_cls = get_args(typ) embedded_cls = get_args(typ)
if not embedded_cls: if not embedded_cls:
# TODO: Test if this can really happen. # TODO: Test if this can really happen.
log.warning("Model %s defined an empty list field: %s", cls, name) log.warning("Model %s defined an empty list or tuple field: %s", cls, name)
return "" return ""
embedded_cls = embedded_cls[0] embedded_cls = embedded_cls[0]
return cls.schema_for_type(name, embedded_cls, field_info) schema = cls.schema_for_type(name, embedded_cls, field_info)
elif any(issubclass(typ, t) for t in NUMERIC_TYPES): elif any(issubclass(typ, t) for t in NUMERIC_TYPES):
return f"{name} NUMERIC" schema = f"{name} NUMERIC"
elif issubclass(typ, str): elif issubclass(typ, str):
if getattr(field_info, 'full_text_search', False) is True: if getattr(field_info, 'full_text_search', False) is True:
return f"{name} TAG SEPARATOR {SINGLE_VALUE_TAG_FIELD_SEPARATOR} " \ schema = f"{name} TAG SEPARATOR {SINGLE_VALUE_TAG_FIELD_SEPARATOR} " \
f"{name}_fts TEXT" f"{name}_fts TEXT"
else: else:
return f"{name} TAG SEPARATOR {SINGLE_VALUE_TAG_FIELD_SEPARATOR}" schema = f"{name} TAG SEPARATOR {SINGLE_VALUE_TAG_FIELD_SEPARATOR}"
elif issubclass(typ, RedisModel): elif issubclass(typ, RedisModel):
sub_fields = [] sub_fields = []
for embedded_name, field in typ.__fields__.items(): for embedded_name, field in typ.__fields__.items():
sub_fields.append(cls.schema_for_type(f"{name}_{embedded_name}", field.outer_type_, sub_fields.append(cls.schema_for_type(f"{name}_{embedded_name}", field.outer_type_,
field.field_info)) field.field_info))
return " ".join(sub_fields) schema = " ".join(sub_fields)
else: else:
return f"{name} TAG SEPARATOR {SINGLE_VALUE_TAG_FIELD_SEPARATOR}" schema = f"{name} TAG SEPARATOR {SINGLE_VALUE_TAG_FIELD_SEPARATOR}"
if schema and sortable is True:
schema += " SORTABLE"
return schema
class JsonModel(RedisModel, abc.ABC): class JsonModel(RedisModel, abc.ABC):
def save(self, *args, **kwargs) -> 'JsonModel': def __init_subclass__(cls, **kwargs):
success = self.db().execute_command('JSON.SET', self.key(), ".", self.json()) # Generate the RediSearch schema once to validate fields.
return success cls.redisearch_schema()
def save(self, pipeline: Optional[Pipeline] = None) -> 'JsonModel':
if pipeline is None:
db = self.db()
else:
db = pipeline
db.execute_command('JSON.SET', self.key(), ".", self.json())
return self
def update(self, **field_values):
validate_model_fields(self.__class__, field_values)
for field, value in field_values.items():
setattr(self, field, value)
self.save()
@classmethod @classmethod
def get(cls, pk: Any) -> 'JsonModel': def get(cls, pk: Any) -> 'JsonModel':
@ -1144,7 +1221,25 @@ class JsonModel(RedisModel, abc.ABC):
field_info: PydanticFieldInfo, field_info: PydanticFieldInfo,
parent_type: Optional[Any] = None) -> str: parent_type: Optional[Any] = None) -> str:
should_index = getattr(field_info, 'index', False) should_index = getattr(field_info, 'index', False)
field_type = get_origin(typ) is_container_type = is_supported_container_type(typ)
parent_is_container_type = is_supported_container_type(parent_type)
try:
parent_is_model = issubclass(parent_type, RedisModel)
except TypeError:
parent_is_model = False
# TODO: We need a better way to know that we're indexing a value
# discovered in a model within an array.
#
# E.g., say we have a field like `orders: List[Order]`, and we're
# indexing the "name" field from the Order model (because it's marked
# index=True in the Order model). The JSONPath for this field is
# $.orders[*].name, but the "parent" type at this point is Order, not
# List. For now, we'll discover that Orders are stored in a list by
# checking if the JSONPath contains the expression for all items in
# an array.
parent_is_model_in_container = parent_is_model and json_path.endswith("[*]")
try: try:
field_is_model = issubclass(typ, RedisModel) field_is_model = issubclass(typ, RedisModel)
except TypeError: except TypeError:
@ -1154,10 +1249,11 @@ class JsonModel(RedisModel, abc.ABC):
# When we encounter a list or model field, we need to descend # When we encounter a list or model field, we need to descend
# into the values of the list or the fields of the model to # into the values of the list or the fields of the model to
# find any values marked as indexed. # find any values marked as indexed.
if field_type == list: if is_container_type:
field_type = get_origin(typ)
embedded_cls = get_args(typ) embedded_cls = get_args(typ)
if not embedded_cls: if not embedded_cls:
log.warning("Model %s defined an empty list field: %s", cls, name) log.warning("Model %s defined an empty list or tuple field: %s", cls, name)
return "" return ""
embedded_cls = embedded_cls[0] embedded_cls = embedded_cls[0]
return cls.schema_for_type(f"{json_path}.{name}[*]", name, name_prefix, return cls.schema_for_type(f"{json_path}.{name}[*]", name, name_prefix,
@ -1166,10 +1262,11 @@ class JsonModel(RedisModel, abc.ABC):
name_prefix = f"{name_prefix}_{name}" if name_prefix else name name_prefix = f"{name_prefix}_{name}" if name_prefix else name
sub_fields = [] sub_fields = []
for embedded_name, field in typ.__fields__.items(): for embedded_name, field in typ.__fields__.items():
if parent_type == list or isinstance(parent_type, RedisModel): if parent_is_container_type:
# This is a list, so the correct JSONPath expression is to # We'll store this value either as a JavaScript array, so
# refer directly to attribute names after the list notation, # the correct JSONPath expression is to refer directly to
# e.g. orders[*].created_date. # attribute names after the container notation, e.g.
# orders[*].created_date.
path = json_path path = json_path
else: else:
# All other fields should use dot notation with both the # All other fields should use dot notation with both the
@ -1181,23 +1278,56 @@ class JsonModel(RedisModel, abc.ABC):
name_prefix, name_prefix,
field.outer_type_, field.outer_type_,
field.field_info, field.field_info,
parent_type=field_type)) parent_type=typ))
return " ".join(filter(None, sub_fields)) return " ".join(filter(None, sub_fields))
# NOTE: This is the termination point for recursion. We've descended
# into models and lists until we found an actual value to index.
elif should_index: elif should_index:
index_field_name = f"{name_prefix}_{name}" if name_prefix else name index_field_name = f"{name_prefix}_{name}" if name_prefix else name
if parent_is_container_type:
# If we're indexing the this field as a JavaScript array, then
# the currently built-up JSONPath expression will be
# "field_name[*]", which is what we want to use.
path = json_path
else:
path = f"{json_path}.{name}" path = f"{json_path}.{name}"
if any(issubclass(typ, t) for t in NUMERIC_TYPES): sortable = getattr(field_info, 'sortable', False)
schema_part = f"{path} AS {index_field_name} NUMERIC" full_text_search = getattr(field_info, 'full_text_search', False)
elif issubclass(typ, str): sortable_tag_error = RedisModelError("In this Preview release, TAG fields cannot "
if getattr(field_info, 'full_text_search', False) is True: f"be marked as sortable. Problem field: {name}. "
schema_part = f"{path} AS {index_field_name} TAG SEPARATOR {SINGLE_VALUE_TAG_FIELD_SEPARATOR} " \ "See docs: TODO")
f"{path} AS {index_field_name}_fts TEXT"
else:
schema_part = f"{path} AS {index_field_name} TAG SEPARATOR {SINGLE_VALUE_TAG_FIELD_SEPARATOR}"
else:
schema_part = f"{path} AS {index_field_name} TAG"
# TODO: GEO field
schema_part += " SORTABLE"
return schema_part
# TODO: GEO field
if parent_is_container_type or parent_is_model_in_container:
if typ is not str:
raise RedisModelError("In this Preview release, list and tuple fields can only "
f"contain strings. Problem field: {name}. See docs: TODO")
if full_text_search is True:
raise RedisModelError("List and tuple fields cannot be indexed for full-text "
f"search. Problem field: {name}. See docs: TODO")
schema = f"{path} AS {index_field_name} TAG SEPARATOR {SINGLE_VALUE_TAG_FIELD_SEPARATOR}"
if sortable is True:
raise sortable_tag_error
elif any(issubclass(typ, t) for t in NUMERIC_TYPES):
schema = f"{path} AS {index_field_name} NUMERIC"
elif issubclass(typ, str):
if full_text_search is True:
schema = f"{path} AS {index_field_name} TAG SEPARATOR {SINGLE_VALUE_TAG_FIELD_SEPARATOR} " \
f"{path} AS {index_field_name}_fts TEXT"
if sortable is True:
# NOTE: With the current preview release, making a field
# full-text searchable and sortable only makes the TEXT
# field sortable. This means that results for full-text
# search queries can be sorted, but not exact match
# queries.
schema += " SORTABLE"
else:
schema = f"{path} AS {index_field_name} TAG SEPARATOR {SINGLE_VALUE_TAG_FIELD_SEPARATOR}"
if sortable is True:
raise sortable_tag_error
else:
schema = f"{path} AS {index_field_name} TAG SEPARATOR {SINGLE_VALUE_TAG_FIELD_SEPARATOR}"
if sortable is True:
raise sortable_tag_error
return schema
return "" return ""

View file

@ -16,9 +16,9 @@ class TokenEscaper:
else: else:
self.escaped_chars_re = re.compile(self.DEFAULT_ESCAPED_CHARS) self.escaped_chars_re = re.compile(self.DEFAULT_ESCAPED_CHARS)
def escape(self, string: str) -> str: def escape(self, value: str) -> str:
def escape_symbol(match): def escape_symbol(match):
value = match.group(0) value = match.group(0)
return f"\\{value}" return f"\\{value}"
return self.escaped_chars_re.sub(escape_symbol, string) return self.escaped_chars_re.sub(escape_symbol, value)

View file

@ -12,7 +12,8 @@ from redis_developer.orm import (
JsonModel, JsonModel,
Field, Field,
) )
from redis_developer.orm.model import QueryNotSupportedError, NotFoundError from redis_developer.orm.migrations.migrator import Migrator
from redis_developer.orm.model import QueryNotSupportedError, NotFoundError, RedisModelError
r = redis.Redis() r = redis.Redis()
today = datetime.date.today() today = datetime.date.today()
@ -29,7 +30,10 @@ class EmbeddedJsonModel(BaseJsonModel, abc.ABC):
class Note(EmbeddedJsonModel): class Note(EmbeddedJsonModel):
description: str = Field(index=True, full_text_search=True) # TODO: This was going to be a full-text search example, but
# we can't index embedded documents for full-text search in
# the preview release.
description: str = Field(index=True)
created_on: datetime.datetime created_on: datetime.datetime
@ -45,12 +49,11 @@ class Address(EmbeddedJsonModel):
class Item(EmbeddedJsonModel): class Item(EmbeddedJsonModel):
price: decimal.Decimal price: decimal.Decimal
name: str = Field(index=True, full_text_search=True) name: str = Field(index=True)
class Order(EmbeddedJsonModel): class Order(EmbeddedJsonModel):
items: List[Item] items: List[Item]
total: decimal.Decimal
created_on: datetime.datetime created_on: datetime.datetime
@ -60,6 +63,7 @@ class Member(BaseJsonModel):
email: str = Field(index=True) email: str = Field(index=True)
join_date: datetime.date join_date: datetime.date
age: int = Field(index=True) age: int = Field(index=True)
bio: Optional[str] = Field(index=True, full_text_search=True, default="")
# Creates an embedded model. # Creates an embedded model.
address: Address address: Address
@ -317,32 +321,15 @@ def test_recursive_query_field_resolution(members):
def test_full_text_search(members): def test_full_text_search(members):
member1, member2, _ = members member1, member2, _ = members
member1.address.note = Note(description="white house", member1.update(bio="Hates sunsets, likes beaches")
created_on=datetime.datetime.now()) member2.update(bio="Hates beaches, likes forests")
member2.address.note = Note(description="blue house",
created_on=datetime.datetime.now())
member1.save()
member2.save()
actual = Member.find(Member.address.note.description % "white").all() actual = Member.find(Member.bio % "beaches").all()
assert actual == [member1]
member1.orders = [
Order(items=[Item(price=10.99, name="balls")],
total=10.99,
created_on=datetime.datetime.now())
]
member2.orders = [
Order(items=[Item(price=10.99, name="white ball")],
total=10.99,
created_on=datetime.datetime.now())
]
member1.save()
member2.save()
actual = Member.find(Member.orders.items.name % "ball").all()
assert actual == [member1, member2] assert actual == [member1, member2]
actual = Member.find(Member.bio % "forests").all()
assert actual == [member2]
def test_tag_queries_boolean_logic(members): def test_tag_queries_boolean_logic(members):
member1, member2, member3 = members member1, member2, member3 = members
@ -507,5 +494,55 @@ def test_not_found():
Member.get(1000) Member.get(1000)
def test_list_field_limitations():
with pytest.raises(RedisModelError):
class SortableTarotWitch(BaseJsonModel):
# We support indexing lists of strings for quality and membership
# queries. Sorting is not supported, but is planned.
tarot_cards: List[str] = Field(index=True, sortable=True)
with pytest.raises(RedisModelError):
class SortableFullTextSearchAlchemicalWitch(BaseJsonModel):
# We don't support indexing a list of strings for full-text search
# queries. Support for this feature is not planned.
potions: List[str] = Field(index=True, full_text_search=True)
with pytest.raises(RedisModelError):
class NumerologyWitch(BaseJsonModel):
# We don't support indexing a list of numbers. Support for this
# feature is To Be Determined.
lucky_numbers: List[int] = Field(index=True)
with pytest.raises(RedisModelError):
class ReadingWithPrice(EmbeddedJsonModel):
gold_coins_charged: int = Field(index=True)
class TarotWitchWhoCharges(BaseJsonModel):
tarot_cards: List[str] = Field(index=True)
# The preview release does not support indexing numeric fields on models
# found within a list or tuple. This is the same limitation that stops
# us from indexing plain lists (or tuples) containing numeric values.
# The fate of this feature is To Be Determined.
readings: List[ReadingWithPrice]
class TarotWitch(BaseJsonModel):
# We support indexing lists of strings for quality and membership
# queries. Sorting is not supported, but is planned.
tarot_cards: List[str] = Field(index=True)
# We need to import and run this manually because we defined
# our model classes within a function that runs after the test
# suite's migrator has already looked for migrations to run.
Migrator().run()
witch = TarotWitch(
tarot_cards=['death']
)
witch.save()
actual = TarotWitch.find(TarotWitch.tarot_cards << 'death').all()
assert actual == [witch]
def test_schema(): def test_schema():
assert Member.redisearch_schema() == "ON JSON PREFIX 1 redis-developer:tests.test_json_model.Member: SCHEMA $.pk AS pk TAG SEPARATOR | SORTABLE $.first_name AS first_name TAG SEPARATOR | SORTABLE $.last_name AS last_name TAG SEPARATOR | SORTABLE $.email AS email TAG SEPARATOR | SORTABLE $.age AS age NUMERIC SORTABLE $.address.pk AS address_pk TAG SEPARATOR | SORTABLE $.address.city AS address_city TAG SEPARATOR | SORTABLE $.address.postal_code AS address_postal_code TAG SEPARATOR | SORTABLE $.address.note.pk AS address_note_pk TAG SEPARATOR | SORTABLE $.address.note.description AS address_note_description TAG SEPARATOR | $.address.note.description AS address_note_description_fts TEXT SORTABLE $.orders[*].pk AS orders_pk TAG SEPARATOR | SORTABLE $.orders[*].items[*].pk AS orders_items_pk TAG SEPARATOR | SORTABLE $.orders[*].items[*].name AS orders_items_name TAG SEPARATOR | $.orders[*].items[*].name AS orders_items_name_fts TEXT SORTABLE" assert Member.redisearch_schema() == "ON JSON PREFIX 1 redis-developer:tests.test_json_model.Member: SCHEMA $.pk AS pk TAG SEPARATOR | $.first_name AS first_name TAG SEPARATOR | $.last_name AS last_name TAG SEPARATOR | $.email AS email TAG SEPARATOR | $.age AS age NUMERIC $.bio AS bio TAG SEPARATOR | $.bio AS bio_fts TEXT $.address.pk AS address_pk TAG SEPARATOR | $.address.city AS address_city TAG SEPARATOR | $.address.postal_code AS address_postal_code TAG SEPARATOR | $.address.note.pk AS address_note_pk TAG SEPARATOR | $.address.note.description AS address_note_description TAG SEPARATOR | $.orders[*].pk AS orders_pk TAG SEPARATOR | $.orders[*].items[*].pk AS orders_items_pk TAG SEPARATOR | $.orders[*].items[*].name AS orders_items_name TAG SEPARATOR |"