2021-09-16 02:41:45 +02:00
|
|
|
import abc
|
|
|
|
import dataclasses
|
|
|
|
import decimal
|
2021-10-06 01:40:02 +02:00
|
|
|
import json
|
2021-10-04 22:55:33 +02:00
|
|
|
import logging
|
2021-09-16 02:41:45 +02:00
|
|
|
import operator
|
2021-10-22 15:33:05 +02:00
|
|
|
from copy import copy
|
2021-08-31 03:08:07 +02:00
|
|
|
from enum import Enum
|
2021-09-16 02:41:45 +02:00
|
|
|
from functools import reduce
|
2021-08-31 03:08:07 +02:00
|
|
|
from typing import (
|
|
|
|
AbstractSet,
|
|
|
|
Any,
|
|
|
|
Callable,
|
|
|
|
Dict,
|
2021-10-20 22:01:46 +02:00
|
|
|
List,
|
2021-08-31 03:08:07 +02:00
|
|
|
Mapping,
|
|
|
|
Optional,
|
2021-10-20 22:01:46 +02:00
|
|
|
Sequence,
|
2021-08-31 03:08:07 +02:00
|
|
|
Set,
|
|
|
|
Tuple,
|
2021-10-20 22:01:46 +02:00
|
|
|
Type,
|
2021-08-31 03:08:07 +02:00
|
|
|
TypeVar,
|
|
|
|
Union,
|
2021-10-20 22:01:46 +02:00
|
|
|
no_type_check,
|
2021-08-31 03:08:07 +02:00
|
|
|
)
|
|
|
|
|
2021-10-22 15:33:05 +02:00
|
|
|
import aioredis
|
2021-11-10 00:59:10 +01:00
|
|
|
from aioredis.client import Pipeline
|
2021-09-01 21:56:06 +02:00
|
|
|
from pydantic import BaseModel, validator
|
2021-09-30 05:42:02 +02:00
|
|
|
from pydantic.fields import FieldInfo as PydanticFieldInfo
|
2021-08-31 03:08:07 +02:00
|
|
|
from pydantic.fields import ModelField, Undefined, UndefinedType
|
2021-10-22 17:31:08 +02:00
|
|
|
from pydantic.main import ModelMetaclass, validate_model
|
2021-10-14 02:16:20 +02:00
|
|
|
from pydantic.typing import NoArgAnyCallable
|
2021-08-31 03:08:07 +02:00
|
|
|
from pydantic.utils import Representation
|
2021-11-24 22:53:54 +01:00
|
|
|
from typing_extensions import Protocol, get_args, get_origin
|
2021-10-06 01:40:02 +02:00
|
|
|
from ulid import ULID
|
2021-08-31 03:08:07 +02:00
|
|
|
|
2021-11-03 20:37:09 +01:00
|
|
|
from ..checks import has_redis_json, has_redisearch
|
2021-10-20 23:29:31 +02:00
|
|
|
from ..connections import get_redis_connection
|
2021-11-10 00:59:10 +01:00
|
|
|
from ..unasync_util import ASYNC_MODE
|
2021-08-31 03:08:07 +02:00
|
|
|
from .encoders import jsonable_encoder
|
2021-10-06 01:40:02 +02:00
|
|
|
from .render_tree import render_tree
|
|
|
|
from .token_escaper import TokenEscaper
|
2021-08-31 03:08:07 +02:00
|
|
|
|
2021-10-20 22:01:46 +02:00
|
|
|
|
2021-09-16 02:41:45 +02:00
|
|
|
model_registry = {}
|
2021-08-31 03:08:07 +02:00
|
|
|
_T = TypeVar("_T")
|
2021-10-04 22:55:33 +02:00
|
|
|
log = logging.getLogger(__name__)
|
2021-10-06 01:40:02 +02:00
|
|
|
escaper = TokenEscaper()
|
2021-08-31 03:08:07 +02:00
|
|
|
|
2021-10-06 01:40:02 +02:00
|
|
|
# For basic exact-match field types like an indexed string, we create a TAG
|
|
|
|
# field in the RediSearch index. TAG is designed for multi-value fields
|
|
|
|
# separated by a "separator" character. We're using the field for single values
|
|
|
|
# (multi-value TAGs will be exposed as a separate field type), and we use the
|
|
|
|
# pipe character (|) as the separator. There is no way to escape this character
|
|
|
|
# in hash fields or JSON objects, so if someone indexes a value that includes
|
|
|
|
# the pipe, we'll warn but allow, and then warn again if they try to query for
|
|
|
|
# values that contain this separator.
|
|
|
|
SINGLE_VALUE_TAG_FIELD_SEPARATOR = "|"
|
2021-08-31 03:08:07 +02:00
|
|
|
|
2021-10-06 01:40:02 +02:00
|
|
|
# This is the default field separator in RediSearch. We need it to determine if
|
|
|
|
# someone has accidentally passed in the field separator with string value of a
|
|
|
|
# multi-value field lookup, like a IN or NOT_IN.
|
|
|
|
DEFAULT_REDISEARCH_FIELD_SEPARATOR = ","
|
2021-09-26 05:38:02 +02:00
|
|
|
|
2021-11-27 00:25:18 +01:00
|
|
|
ERRORS_URL = "https://github.com/redis/redis-om-python/blob/main/docs/errors.md"
|
|
|
|
|
2021-09-26 05:38:02 +02:00
|
|
|
|
2021-10-06 01:40:02 +02:00
|
|
|
class RedisModelError(Exception):
|
|
|
|
"""Raised when a problem exists in the definition of a RedisModel."""
|
2021-09-26 05:38:02 +02:00
|
|
|
|
|
|
|
|
2021-10-06 01:40:02 +02:00
|
|
|
class QuerySyntaxError(Exception):
|
|
|
|
"""Raised when a query is constructed improperly."""
|
2021-08-31 03:08:07 +02:00
|
|
|
|
|
|
|
|
|
|
|
class NotFoundError(Exception):
|
2021-10-06 01:40:02 +02:00
|
|
|
"""Raised when a query found no results."""
|
2021-08-31 03:08:07 +02:00
|
|
|
|
|
|
|
|
2021-09-16 02:41:45 +02:00
|
|
|
class Operators(Enum):
|
2021-08-31 03:08:07 +02:00
|
|
|
EQ = 1
|
2021-09-16 02:41:45 +02:00
|
|
|
NE = 2
|
|
|
|
LT = 3
|
|
|
|
LE = 4
|
|
|
|
GT = 5
|
|
|
|
GE = 6
|
|
|
|
OR = 7
|
|
|
|
AND = 8
|
|
|
|
NOT = 9
|
|
|
|
IN = 10
|
|
|
|
NOT_IN = 11
|
2021-09-17 18:27:11 +02:00
|
|
|
LIKE = 12
|
2021-09-26 05:38:02 +02:00
|
|
|
ALL = 13
|
2021-08-31 03:08:07 +02:00
|
|
|
|
2021-10-06 01:40:02 +02:00
|
|
|
def __str__(self):
|
|
|
|
return str(self.name)
|
|
|
|
|
|
|
|
|
2021-10-20 22:01:46 +02:00
|
|
|
ExpressionOrModelField = Union["Expression", "NegatedExpression", ModelField]
|
2021-10-06 01:40:02 +02:00
|
|
|
|
|
|
|
|
2021-10-12 23:22:57 +02:00
|
|
|
def embedded(cls):
|
|
|
|
"""
|
|
|
|
Mark a model as embedded to avoid creating multiple indexes if the model is
|
|
|
|
only ever used embedded within other models.
|
|
|
|
"""
|
2021-10-20 22:01:46 +02:00
|
|
|
setattr(cls.Meta, "embedded", True)
|
2021-10-12 23:22:57 +02:00
|
|
|
|
|
|
|
|
2021-10-20 22:01:46 +02:00
|
|
|
def is_supported_container_type(typ: Optional[type]) -> bool:
|
2021-11-25 03:12:27 +01:00
|
|
|
# TODO: Wait, why don't we support indexing sets?
|
2021-10-19 06:16:48 +02:00
|
|
|
if typ == list or typ == tuple:
|
|
|
|
return True
|
|
|
|
unwrapped = get_origin(typ)
|
|
|
|
return unwrapped == list or unwrapped == tuple
|
|
|
|
|
|
|
|
|
2021-10-20 22:01:46 +02:00
|
|
|
def validate_model_fields(model: Type["RedisModel"], field_values: Dict[str, Any]):
|
2021-10-19 06:16:48 +02:00
|
|
|
for field_name in field_values.keys():
|
2021-11-10 00:59:10 +01:00
|
|
|
if "__" in field_name:
|
|
|
|
obj = model
|
|
|
|
for sub_field in field_name.split("__"):
|
|
|
|
if not hasattr(obj, sub_field):
|
|
|
|
raise QuerySyntaxError(
|
|
|
|
f"The update path {field_name} contains a field that does not "
|
|
|
|
f"exit on {model.__name__}. The field is: {sub_field}"
|
|
|
|
)
|
|
|
|
obj = getattr(obj, sub_field)
|
|
|
|
return
|
|
|
|
|
2021-10-19 06:16:48 +02:00
|
|
|
if field_name not in model.__fields__:
|
2021-10-20 22:01:46 +02:00
|
|
|
raise QuerySyntaxError(
|
|
|
|
f"The field {field_name} does not exist on the model {model.__name__}"
|
|
|
|
)
|
2021-10-19 06:16:48 +02:00
|
|
|
|
|
|
|
|
2021-11-03 20:37:09 +01:00
|
|
|
def decode_redis_value(
|
|
|
|
obj: Union[List[bytes], Dict[bytes, bytes], bytes], encoding: str
|
|
|
|
) -> Union[List[str], Dict[str, str], str]:
|
|
|
|
"""Decode a binary-encoded Redis hash into the specified encoding."""
|
|
|
|
if isinstance(obj, list):
|
|
|
|
return [v.decode(encoding) for v in obj]
|
|
|
|
if isinstance(obj, dict):
|
|
|
|
return {
|
|
|
|
key.decode(encoding): value.decode(encoding) for key, value in obj.items()
|
|
|
|
}
|
|
|
|
elif isinstance(obj, bytes):
|
|
|
|
return obj.decode(encoding)
|
|
|
|
|
|
|
|
|
2021-11-10 00:59:10 +01:00
|
|
|
class PipelineError(Exception):
|
|
|
|
"""A Redis pipeline error."""
|
|
|
|
|
|
|
|
|
|
|
|
def verify_pipeline_response(
|
|
|
|
response: List[Union[bytes, str]], expected_responses: int = 0
|
|
|
|
):
|
|
|
|
# TODO: More generic pipeline verification here (what else is possible?),
|
|
|
|
# plus hash and JSON-specific verifications in separate functions.
|
|
|
|
actual_responses = len(response)
|
|
|
|
if actual_responses != expected_responses:
|
|
|
|
raise PipelineError(
|
|
|
|
f"We expected {expected_responses}, but the Redis "
|
|
|
|
f"pipeline returned {actual_responses} responses."
|
|
|
|
)
|
2021-10-06 01:40:02 +02:00
|
|
|
|
2021-08-31 03:08:07 +02:00
|
|
|
|
2021-09-26 05:38:02 +02:00
|
|
|
@dataclasses.dataclass
|
2021-09-16 21:03:03 +02:00
|
|
|
class NegatedExpression:
|
2021-10-06 01:40:02 +02:00
|
|
|
"""A negated Expression object.
|
|
|
|
|
|
|
|
For now, this is a separate dataclass from Expression that acts as a facade
|
|
|
|
around an Expression, indicating to model code (specifically, code
|
|
|
|
responsible for querying) to negate the logic in the wrapped Expression. A
|
|
|
|
better design is probably possible, maybe at least an ExpressionProtocol?
|
|
|
|
"""
|
2021-10-20 22:01:46 +02:00
|
|
|
|
|
|
|
expression: "Expression"
|
2021-09-16 21:03:03 +02:00
|
|
|
|
|
|
|
def __invert__(self):
|
|
|
|
return self.expression
|
|
|
|
|
2021-09-21 01:06:04 +02:00
|
|
|
def __and__(self, other):
|
2021-10-20 22:01:46 +02:00
|
|
|
return Expression(
|
|
|
|
left=self, op=Operators.AND, right=other, parents=self.expression.parents
|
|
|
|
)
|
2021-09-21 01:06:04 +02:00
|
|
|
|
|
|
|
def __or__(self, other):
|
2021-10-20 22:01:46 +02:00
|
|
|
return Expression(
|
|
|
|
left=self, op=Operators.OR, right=other, parents=self.expression.parents
|
|
|
|
)
|
2021-09-21 01:06:04 +02:00
|
|
|
|
2021-10-06 01:40:02 +02:00
|
|
|
@property
|
|
|
|
def left(self):
|
|
|
|
return self.expression.left
|
|
|
|
|
|
|
|
@property
|
|
|
|
def right(self):
|
|
|
|
return self.expression.right
|
|
|
|
|
|
|
|
@property
|
|
|
|
def op(self):
|
|
|
|
return self.expression.op
|
|
|
|
|
|
|
|
@property
|
|
|
|
def name(self):
|
|
|
|
if self.expression.op is Operators.EQ:
|
|
|
|
return f"NOT {self.expression.name}"
|
|
|
|
else:
|
|
|
|
return f"{self.expression.name} NOT"
|
|
|
|
|
|
|
|
@property
|
|
|
|
def tree(self):
|
|
|
|
return render_tree(self)
|
|
|
|
|
2021-09-16 21:03:03 +02:00
|
|
|
|
2021-09-26 05:38:02 +02:00
|
|
|
@dataclasses.dataclass
|
2021-08-31 03:08:07 +02:00
|
|
|
class Expression:
|
2021-09-16 02:41:45 +02:00
|
|
|
op: Operators
|
2021-10-14 02:16:20 +02:00
|
|
|
left: Optional[ExpressionOrModelField]
|
|
|
|
right: Optional[ExpressionOrModelField]
|
2021-10-20 22:01:46 +02:00
|
|
|
parents: List[Tuple[str, "RedisModel"]]
|
2021-09-16 02:41:45 +02:00
|
|
|
|
2021-09-16 21:03:03 +02:00
|
|
|
def __invert__(self):
|
|
|
|
return NegatedExpression(self)
|
|
|
|
|
2021-10-06 01:40:02 +02:00
|
|
|
def __and__(self, other: ExpressionOrModelField):
|
2021-10-20 22:01:46 +02:00
|
|
|
return Expression(
|
|
|
|
left=self, op=Operators.AND, right=other, parents=self.parents
|
|
|
|
)
|
2021-09-16 02:41:45 +02:00
|
|
|
|
2021-10-06 01:40:02 +02:00
|
|
|
def __or__(self, other: ExpressionOrModelField):
|
2021-10-12 23:22:57 +02:00
|
|
|
return Expression(left=self, op=Operators.OR, right=other, parents=self.parents)
|
2021-09-16 23:35:25 +02:00
|
|
|
|
2021-10-06 01:40:02 +02:00
|
|
|
@property
|
|
|
|
def name(self):
|
|
|
|
return str(self.op)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def tree(self):
|
|
|
|
return render_tree(self)
|
|
|
|
|
2021-09-16 02:41:45 +02:00
|
|
|
|
2021-09-16 21:03:03 +02:00
|
|
|
ExpressionOrNegated = Union[Expression, NegatedExpression]
|
|
|
|
|
2021-09-16 02:41:45 +02:00
|
|
|
|
2021-09-26 05:38:02 +02:00
|
|
|
class ExpressionProxy:
|
2021-10-20 22:01:46 +02:00
|
|
|
def __init__(self, field: ModelField, parents: List[Tuple[str, "RedisModel"]]):
|
2021-09-26 05:38:02 +02:00
|
|
|
self.field = field
|
2021-10-12 23:22:57 +02:00
|
|
|
self.parents = parents
|
2021-09-26 05:38:02 +02:00
|
|
|
|
2021-10-06 01:40:02 +02:00
|
|
|
def __eq__(self, other: Any) -> Expression: # type: ignore[override]
|
2021-10-20 22:01:46 +02:00
|
|
|
return Expression(
|
|
|
|
left=self.field, op=Operators.EQ, right=other, parents=self.parents
|
|
|
|
)
|
2021-09-26 05:38:02 +02:00
|
|
|
|
2021-10-06 01:40:02 +02:00
|
|
|
def __ne__(self, other: Any) -> Expression: # type: ignore[override]
|
2021-10-20 22:01:46 +02:00
|
|
|
return Expression(
|
|
|
|
left=self.field, op=Operators.NE, right=other, parents=self.parents
|
|
|
|
)
|
2021-09-26 05:38:02 +02:00
|
|
|
|
2021-10-14 02:16:20 +02:00
|
|
|
def __lt__(self, other: Any) -> Expression:
|
2021-10-20 22:01:46 +02:00
|
|
|
return Expression(
|
|
|
|
left=self.field, op=Operators.LT, right=other, parents=self.parents
|
|
|
|
)
|
2021-09-26 05:38:02 +02:00
|
|
|
|
2021-10-14 02:16:20 +02:00
|
|
|
def __le__(self, other: Any) -> Expression:
|
2021-10-20 22:01:46 +02:00
|
|
|
return Expression(
|
|
|
|
left=self.field, op=Operators.LE, right=other, parents=self.parents
|
|
|
|
)
|
2021-09-26 05:38:02 +02:00
|
|
|
|
2021-10-14 02:16:20 +02:00
|
|
|
def __gt__(self, other: Any) -> Expression:
|
2021-10-20 22:01:46 +02:00
|
|
|
return Expression(
|
|
|
|
left=self.field, op=Operators.GT, right=other, parents=self.parents
|
|
|
|
)
|
2021-09-26 05:38:02 +02:00
|
|
|
|
2021-10-14 02:16:20 +02:00
|
|
|
def __ge__(self, other: Any) -> Expression:
|
2021-10-20 22:01:46 +02:00
|
|
|
return Expression(
|
|
|
|
left=self.field, op=Operators.GE, right=other, parents=self.parents
|
|
|
|
)
|
2021-10-12 23:22:57 +02:00
|
|
|
|
2021-10-14 02:16:20 +02:00
|
|
|
def __mod__(self, other: Any) -> Expression:
|
2021-10-20 22:01:46 +02:00
|
|
|
return Expression(
|
|
|
|
left=self.field, op=Operators.LIKE, right=other, parents=self.parents
|
|
|
|
)
|
2021-10-13 19:07:13 +02:00
|
|
|
|
2021-10-14 02:16:20 +02:00
|
|
|
def __lshift__(self, other: Any) -> Expression:
|
2021-10-20 22:01:46 +02:00
|
|
|
return Expression(
|
|
|
|
left=self.field, op=Operators.IN, right=other, parents=self.parents
|
|
|
|
)
|
2021-10-14 02:16:20 +02:00
|
|
|
|
2021-11-27 00:25:18 +01:00
|
|
|
def __rshift__(self, other: Any) -> Expression:
|
|
|
|
return Expression(
|
|
|
|
left=self.field, op=Operators.NOT_IN, right=other, parents=self.parents
|
|
|
|
)
|
|
|
|
|
2021-10-12 23:22:57 +02:00
|
|
|
def __getattr__(self, item):
|
2021-10-19 06:16:48 +02:00
|
|
|
if is_supported_container_type(self.field.outer_type_):
|
2021-10-13 17:12:22 +02:00
|
|
|
embedded_cls = get_args(self.field.outer_type_)
|
|
|
|
if not embedded_cls:
|
2021-10-20 22:01:46 +02:00
|
|
|
raise QuerySyntaxError(
|
|
|
|
"In order to query on a list field, you must define "
|
|
|
|
"the contents of the list with a type annotation, like: "
|
2021-11-27 00:25:18 +01:00
|
|
|
f"orders: List[Order]. Docs: {ERRORS_URL}#E1"
|
2021-10-20 22:01:46 +02:00
|
|
|
)
|
2021-10-13 17:12:22 +02:00
|
|
|
embedded_cls = embedded_cls[0]
|
|
|
|
attr = getattr(embedded_cls, item)
|
|
|
|
else:
|
|
|
|
attr = getattr(self.field.outer_type_, item)
|
2021-10-12 23:22:57 +02:00
|
|
|
if isinstance(attr, self.__class__):
|
2021-10-13 19:07:13 +02:00
|
|
|
new_parent = (self.field.name, self.field.outer_type_)
|
2021-10-14 02:16:20 +02:00
|
|
|
if new_parent not in attr.parents:
|
2021-10-13 19:07:13 +02:00
|
|
|
attr.parents.append(new_parent)
|
|
|
|
new_parents = list(set(self.parents) - set(attr.parents))
|
|
|
|
if new_parents:
|
|
|
|
attr.parents = new_parents + attr.parents
|
2021-10-12 23:22:57 +02:00
|
|
|
return attr
|
2021-09-26 05:38:02 +02:00
|
|
|
|
|
|
|
|
2021-09-16 02:41:45 +02:00
|
|
|
class QueryNotSupportedError(Exception):
|
|
|
|
"""The attempted query is not supported."""
|
|
|
|
|
|
|
|
|
|
|
|
class RediSearchFieldTypes(Enum):
|
2021-10-20 22:01:46 +02:00
|
|
|
TEXT = "TEXT"
|
|
|
|
TAG = "TAG"
|
|
|
|
NUMERIC = "NUMERIC"
|
|
|
|
GEO = "GEO"
|
2021-09-16 02:41:45 +02:00
|
|
|
|
|
|
|
|
|
|
|
# TODO: How to handle Geo fields?
|
|
|
|
NUMERIC_TYPES = (float, int, decimal.Decimal)
|
2021-09-26 05:38:02 +02:00
|
|
|
DEFAULT_PAGE_SIZE = 10
|
2021-09-16 02:41:45 +02:00
|
|
|
|
|
|
|
|
2021-09-30 05:23:39 +02:00
|
|
|
class FindQuery:
|
2021-10-20 22:01:46 +02:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
expressions: Sequence[ExpressionOrNegated],
|
|
|
|
model: Type["RedisModel"],
|
|
|
|
offset: int = 0,
|
|
|
|
limit: int = DEFAULT_PAGE_SIZE,
|
|
|
|
page_size: int = DEFAULT_PAGE_SIZE,
|
|
|
|
sort_fields: Optional[List[str]] = None,
|
|
|
|
):
|
2021-11-03 20:37:09 +01:00
|
|
|
if not has_redisearch(model.db()):
|
2021-11-03 20:53:00 +01:00
|
|
|
raise RedisModelError(
|
|
|
|
"Your Redis instance does not have either the RediSearch module "
|
|
|
|
"or RedisJSON module installed. Querying requires that your Redis "
|
|
|
|
"instance has one of these modules installed."
|
|
|
|
)
|
2021-11-03 20:37:09 +01:00
|
|
|
|
2021-09-30 05:23:39 +02:00
|
|
|
self.expressions = expressions
|
|
|
|
self.model = model
|
|
|
|
self.offset = offset
|
|
|
|
self.limit = limit
|
|
|
|
self.page_size = page_size
|
|
|
|
|
|
|
|
if sort_fields:
|
|
|
|
self.sort_fields = self.validate_sort_fields(sort_fields)
|
|
|
|
else:
|
|
|
|
self.sort_fields = []
|
2021-09-26 05:38:02 +02:00
|
|
|
|
2021-09-30 05:23:39 +02:00
|
|
|
self._expression = None
|
2021-10-06 01:40:02 +02:00
|
|
|
self._query: Optional[str] = None
|
2021-11-03 20:37:09 +01:00
|
|
|
self._pagination: List[str] = []
|
|
|
|
self._model_cache: List[RedisModel] = []
|
2021-09-26 05:38:02 +02:00
|
|
|
|
2021-11-01 17:45:37 +01:00
|
|
|
def dict(self) -> Dict[str, Any]:
|
2021-10-14 02:16:20 +02:00
|
|
|
return dict(
|
|
|
|
model=self.model,
|
|
|
|
offset=self.offset,
|
|
|
|
page_size=self.page_size,
|
|
|
|
limit=self.limit,
|
|
|
|
expressions=copy(self.expressions),
|
2021-10-20 22:01:46 +02:00
|
|
|
sort_fields=copy(self.sort_fields),
|
2021-10-14 02:16:20 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
def copy(self, **kwargs):
|
|
|
|
original = self.dict()
|
|
|
|
original.update(**kwargs)
|
|
|
|
return FindQuery(**original)
|
|
|
|
|
2021-09-26 05:38:02 +02:00
|
|
|
@property
|
|
|
|
def pagination(self):
|
|
|
|
if self._pagination:
|
|
|
|
return self._pagination
|
|
|
|
self._pagination = self.resolve_redisearch_pagination()
|
|
|
|
return self._pagination
|
|
|
|
|
|
|
|
@property
|
|
|
|
def expression(self):
|
|
|
|
if self._expression:
|
|
|
|
return self._expression
|
|
|
|
if self.expressions:
|
|
|
|
self._expression = reduce(operator.and_, self.expressions)
|
|
|
|
else:
|
2021-10-20 22:01:46 +02:00
|
|
|
self._expression = Expression(
|
|
|
|
left=None, right=None, op=Operators.ALL, parents=[]
|
|
|
|
)
|
2021-09-26 05:38:02 +02:00
|
|
|
return self._expression
|
|
|
|
|
|
|
|
@property
|
|
|
|
def query(self):
|
2021-10-14 02:16:20 +02:00
|
|
|
"""
|
|
|
|
Resolve and return the RediSearch query for this FindQuery.
|
|
|
|
|
|
|
|
NOTE: We cache the resolved query string after generating it. This should be OK
|
|
|
|
because all mutations of FindQuery through public APIs return a new FindQuery instance.
|
|
|
|
"""
|
|
|
|
if self._query:
|
|
|
|
return self._query
|
|
|
|
self._query = self.resolve_redisearch_query(self.expression)
|
|
|
|
return self._query
|
2021-09-26 05:38:02 +02:00
|
|
|
|
2021-10-19 06:16:48 +02:00
|
|
|
def validate_sort_fields(self, sort_fields: List[str]):
|
2021-09-30 05:23:39 +02:00
|
|
|
for sort_field in sort_fields:
|
2021-09-26 05:38:02 +02:00
|
|
|
field_name = sort_field.lstrip("-")
|
2021-09-30 05:23:39 +02:00
|
|
|
if field_name not in self.model.__fields__:
|
2021-10-20 22:01:46 +02:00
|
|
|
raise QueryNotSupportedError(
|
|
|
|
f"You tried sort by {field_name}, but that field "
|
|
|
|
f"does not exist on the model {self.model}"
|
|
|
|
)
|
2021-09-30 05:23:39 +02:00
|
|
|
field_proxy = getattr(self.model, field_name)
|
2021-10-20 22:01:46 +02:00
|
|
|
if not getattr(field_proxy.field.field_info, "sortable", False):
|
|
|
|
raise QueryNotSupportedError(
|
|
|
|
f"You tried sort by {field_name}, but {self.model} does "
|
2021-11-27 00:25:18 +01:00
|
|
|
f"not define that field as sortable. Docs: {ERRORS_URL}#E2"
|
2021-10-20 22:01:46 +02:00
|
|
|
)
|
2021-09-30 05:23:39 +02:00
|
|
|
return sort_fields
|
2021-09-16 02:41:45 +02:00
|
|
|
|
2021-09-26 05:38:02 +02:00
|
|
|
@staticmethod
|
2021-10-14 02:16:20 +02:00
|
|
|
def resolve_field_type(field: ModelField, op: Operators) -> RediSearchFieldTypes:
|
2021-10-20 22:01:46 +02:00
|
|
|
if getattr(field.field_info, "primary_key", None) is True:
|
2021-09-16 02:41:45 +02:00
|
|
|
return RediSearchFieldTypes.TAG
|
2021-10-14 02:16:20 +02:00
|
|
|
elif op is Operators.LIKE:
|
2021-10-20 22:01:46 +02:00
|
|
|
fts = getattr(field.field_info, "full_text_search", None)
|
2021-10-13 19:07:13 +02:00
|
|
|
if fts is not True: # Could be PydanticUndefined
|
2021-10-20 22:01:46 +02:00
|
|
|
raise QuerySyntaxError(
|
|
|
|
f"You tried to do a full-text search on the field '{field.name}', "
|
|
|
|
f"but the field is not indexed for full-text search. Use the "
|
2021-11-27 00:25:18 +01:00
|
|
|
f"full_text_search=True option. Docs: {ERRORS_URL}#E3"
|
2021-10-20 22:01:46 +02:00
|
|
|
)
|
2021-09-21 01:06:04 +02:00
|
|
|
return RediSearchFieldTypes.TEXT
|
|
|
|
|
2021-09-16 02:41:45 +02:00
|
|
|
field_type = field.outer_type_
|
|
|
|
|
2021-10-19 06:16:48 +02:00
|
|
|
# TODO: GEO fields
|
|
|
|
container_type = get_origin(field_type)
|
|
|
|
|
|
|
|
if is_supported_container_type(container_type):
|
2021-10-20 02:23:13 +02:00
|
|
|
# NOTE: A list of strings, like:
|
2021-10-19 06:16:48 +02:00
|
|
|
#
|
2021-10-20 02:23:13 +02:00
|
|
|
# tarot_cards: List[str] = field(index=True)
|
2021-10-19 06:16:48 +02:00
|
|
|
#
|
2021-10-20 02:23:13 +02:00
|
|
|
# becomes a TAG field, which means that users can run equality and
|
|
|
|
# membership queries on values.
|
2021-10-19 06:16:48 +02:00
|
|
|
#
|
|
|
|
# Meanwhile, a list of RedisModels, like:
|
|
|
|
#
|
|
|
|
# friends: List[Friend] = field(index=True)
|
|
|
|
#
|
|
|
|
# is not itself directly indexed, but instead, we index any fields
|
2021-10-20 02:23:13 +02:00
|
|
|
# within the model inside the list marked as `index=True`.
|
2021-10-19 06:16:48 +02:00
|
|
|
return RediSearchFieldTypes.TAG
|
|
|
|
elif container_type is not None:
|
2021-10-20 22:01:46 +02:00
|
|
|
raise QuerySyntaxError(
|
|
|
|
"Only lists and tuples are supported for multi-value fields. "
|
2021-11-27 00:25:18 +01:00
|
|
|
f"Docs: {ERRORS_URL}#E4"
|
2021-10-20 22:01:46 +02:00
|
|
|
)
|
2021-10-19 06:16:48 +02:00
|
|
|
elif any(issubclass(field_type, t) for t in NUMERIC_TYPES):
|
|
|
|
# Index numeric Python types as NUMERIC fields, so we can support
|
|
|
|
# range queries.
|
2021-09-16 02:41:45 +02:00
|
|
|
return RediSearchFieldTypes.NUMERIC
|
|
|
|
else:
|
2021-10-20 02:23:13 +02:00
|
|
|
# TAG fields are the default field type and support equality and
|
|
|
|
# membership queries, though membership (and the multi-value nature
|
|
|
|
# of the field) are hidden from users unless they explicitly index
|
|
|
|
# multiple values, with either a list or tuple,
|
2021-10-19 06:16:48 +02:00
|
|
|
# e.g.,
|
|
|
|
# favorite_foods: List[str] = field(index=True)
|
2021-09-21 01:06:04 +02:00
|
|
|
return RediSearchFieldTypes.TAG
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def expand_tag_value(value):
|
2021-10-14 02:16:20 +02:00
|
|
|
if isinstance(value, str):
|
2021-10-19 06:16:48 +02:00
|
|
|
return escaper.escape(value)
|
|
|
|
if isinstance(value, bytes):
|
2021-11-25 03:12:27 +01:00
|
|
|
# TODO: We don't decode bytes objects passed as input. Should we?
|
2021-10-19 06:16:48 +02:00
|
|
|
# TODO: TAG indexes fail on JSON arrays of numbers -- only strings
|
|
|
|
# are allowed -- what happens if we save an array of bytes?
|
2021-10-06 01:40:02 +02:00
|
|
|
return value
|
2021-09-21 01:06:04 +02:00
|
|
|
try:
|
2021-10-19 06:16:48 +02:00
|
|
|
return "|".join([escaper.escape(str(v)) for v in value])
|
2021-09-21 01:06:04 +02:00
|
|
|
except TypeError:
|
2021-10-20 22:01:46 +02:00
|
|
|
log.debug(
|
|
|
|
"Escaping single non-iterable value used for an IN or "
|
|
|
|
"NOT_IN query: %s",
|
|
|
|
value,
|
|
|
|
)
|
2021-10-19 06:16:48 +02:00
|
|
|
return escaper.escape(str(value))
|
2021-09-16 02:41:45 +02:00
|
|
|
|
2021-09-26 05:38:02 +02:00
|
|
|
@classmethod
|
2021-10-20 22:01:46 +02:00
|
|
|
def resolve_value(
|
|
|
|
cls,
|
|
|
|
field_name: str,
|
|
|
|
field_type: RediSearchFieldTypes,
|
|
|
|
field_info: PydanticFieldInfo,
|
|
|
|
op: Operators,
|
|
|
|
value: Any,
|
|
|
|
parents: List[Tuple[str, "RedisModel"]],
|
|
|
|
) -> str:
|
2021-10-12 23:22:57 +02:00
|
|
|
if parents:
|
|
|
|
prefix = "_".join([p[0] for p in parents])
|
|
|
|
field_name = f"{prefix}_{field_name}"
|
2021-09-16 02:41:45 +02:00
|
|
|
result = ""
|
|
|
|
if field_type is RediSearchFieldTypes.TEXT:
|
2021-10-13 19:07:13 +02:00
|
|
|
result = f"@{field_name}_fts:"
|
2021-09-16 02:41:45 +02:00
|
|
|
if op is Operators.EQ:
|
2021-09-16 21:03:03 +02:00
|
|
|
result += f'"{value}"'
|
|
|
|
elif op is Operators.NE:
|
|
|
|
result = f'-({result}"{value}")'
|
2021-09-16 02:41:45 +02:00
|
|
|
elif op is Operators.LIKE:
|
|
|
|
result += value
|
|
|
|
else:
|
2021-10-20 22:01:46 +02:00
|
|
|
raise QueryNotSupportedError(
|
|
|
|
"Only equals (=), not-equals (!=), and like() "
|
2021-11-27 00:25:18 +01:00
|
|
|
"comparisons are supported for TEXT fields. "
|
|
|
|
f"Docs: {ERRORS_URL}#E5"
|
2021-10-20 22:01:46 +02:00
|
|
|
)
|
2021-09-16 02:41:45 +02:00
|
|
|
elif field_type is RediSearchFieldTypes.NUMERIC:
|
|
|
|
if op is Operators.EQ:
|
|
|
|
result += f"@{field_name}:[{value} {value}]"
|
|
|
|
elif op is Operators.NE:
|
2021-09-21 01:08:24 +02:00
|
|
|
result += f"-(@{field_name}:[{value} {value}])"
|
2021-09-16 02:41:45 +02:00
|
|
|
elif op is Operators.GT:
|
|
|
|
result += f"@{field_name}:[({value} +inf]"
|
|
|
|
elif op is Operators.LT:
|
|
|
|
result += f"@{field_name}:[-inf ({value}]"
|
2021-09-17 18:27:11 +02:00
|
|
|
elif op is Operators.GE:
|
2021-09-16 02:41:45 +02:00
|
|
|
result += f"@{field_name}:[{value} +inf]"
|
2021-09-17 18:27:11 +02:00
|
|
|
elif op is Operators.LE:
|
2021-09-16 02:41:45 +02:00
|
|
|
result += f"@{field_name}:[-inf {value}]"
|
2021-10-06 01:40:02 +02:00
|
|
|
# TODO: How will we know the difference between a multi-value use of a TAG
|
|
|
|
# field and our hidden use of TAG for exact-match queries?
|
2021-09-21 01:06:04 +02:00
|
|
|
elif field_type is RediSearchFieldTypes.TAG:
|
|
|
|
if op is Operators.EQ:
|
2021-10-20 22:01:46 +02:00
|
|
|
separator_char = getattr(
|
|
|
|
field_info, "separator", SINGLE_VALUE_TAG_FIELD_SEPARATOR
|
|
|
|
)
|
2021-10-06 01:40:02 +02:00
|
|
|
if value == separator_char:
|
|
|
|
# The value is ONLY the TAG field separator character --
|
|
|
|
# this is not going to work.
|
2021-10-20 22:01:46 +02:00
|
|
|
log.warning(
|
|
|
|
"Your query against the field %s is for a single character, %s, "
|
2021-10-22 15:33:05 +02:00
|
|
|
"that is used internally by redis-om-python. We must ignore "
|
2021-10-20 22:01:46 +02:00
|
|
|
"this portion of the query. Please review your query to find "
|
|
|
|
"an alternative query that uses a string containing more than "
|
|
|
|
"just the character %s.",
|
|
|
|
field_name,
|
|
|
|
separator_char,
|
|
|
|
separator_char,
|
|
|
|
)
|
2021-10-06 01:40:02 +02:00
|
|
|
return ""
|
|
|
|
if separator_char in value:
|
|
|
|
# The value contains the TAG field separator. We can work
|
|
|
|
# around this by breaking apart the values and unioning them
|
|
|
|
# with multiple field:{} queries.
|
2021-10-14 02:16:20 +02:00
|
|
|
values: filter = filter(None, value.split(separator_char))
|
2021-10-06 01:40:02 +02:00
|
|
|
for value in values:
|
|
|
|
value = escaper.escape(value)
|
|
|
|
result += f"@{field_name}:{{{value}}}"
|
|
|
|
else:
|
|
|
|
value = escaper.escape(value)
|
|
|
|
result += f"@{field_name}:{{{value}}}"
|
2021-09-21 01:06:04 +02:00
|
|
|
elif op is Operators.NE:
|
2021-09-26 05:38:02 +02:00
|
|
|
value = escaper.escape(value)
|
2021-09-21 01:08:24 +02:00
|
|
|
result += f"-(@{field_name}:{{{value}}})"
|
2021-09-21 01:06:04 +02:00
|
|
|
elif op is Operators.IN:
|
2021-09-26 05:38:02 +02:00
|
|
|
expanded_value = cls.expand_tag_value(value)
|
2021-09-21 01:06:04 +02:00
|
|
|
result += f"(@{field_name}:{{{expanded_value}}})"
|
|
|
|
elif op is Operators.NOT_IN:
|
2021-10-06 01:40:02 +02:00
|
|
|
# TODO: Implement NOT_IN, test this...
|
2021-09-26 05:38:02 +02:00
|
|
|
expanded_value = cls.expand_tag_value(value)
|
2021-09-21 01:08:24 +02:00
|
|
|
result += f"-(@{field_name}:{{{expanded_value}}})"
|
2021-09-16 21:03:03 +02:00
|
|
|
|
2021-09-16 02:41:45 +02:00
|
|
|
return result
|
|
|
|
|
2021-09-16 23:35:25 +02:00
|
|
|
def resolve_redisearch_pagination(self):
|
|
|
|
"""Resolve pagination options for a query."""
|
2021-09-26 05:38:02 +02:00
|
|
|
return ["LIMIT", self.offset, self.limit]
|
|
|
|
|
|
|
|
def resolve_redisearch_sort_fields(self):
|
|
|
|
"""Resolve sort options for a query."""
|
|
|
|
if not self.sort_fields:
|
|
|
|
return
|
|
|
|
fields = []
|
|
|
|
for f in self.sort_fields:
|
2021-10-20 22:01:46 +02:00
|
|
|
direction = "desc" if f.startswith("-") else "asc"
|
|
|
|
fields.extend([f.lstrip("-"), direction])
|
2021-09-26 05:38:02 +02:00
|
|
|
if self.sort_fields:
|
|
|
|
return ["SORTBY", *fields]
|
2021-09-16 23:35:25 +02:00
|
|
|
|
2021-09-26 05:38:02 +02:00
|
|
|
@classmethod
|
2021-10-06 01:40:02 +02:00
|
|
|
def resolve_redisearch_query(cls, expression: ExpressionOrNegated) -> str:
|
2021-10-14 02:16:20 +02:00
|
|
|
"""
|
|
|
|
Resolve an arbitrarily deep expression into a single RediSearch query string.
|
|
|
|
|
|
|
|
This method is complex. Note the following:
|
|
|
|
|
|
|
|
1. This method makes a recursive call to itself when it finds that
|
|
|
|
either the left or right operand contains another expression.
|
|
|
|
|
|
|
|
2. An expression might be in a "negated" form, which means that the user
|
|
|
|
gave us an expression like ~(Member.age == 30), or in other words,
|
|
|
|
"Members whose age is NOT 30." Thus, a negated expression is one in
|
|
|
|
which the meaning of an expression is inverted. If we find a negated
|
|
|
|
expression, we need to add the appropriate "NOT" syntax but can
|
|
|
|
otherwise use the resolved RediSearch query for the expression as-is.
|
|
|
|
|
|
|
|
3. The final resolution of an expression should be a left operand that's
|
|
|
|
a ModelField, an operator, and a right operand that's NOT a ModelField.
|
|
|
|
With an IN or NOT_IN operator, the right operand can be a sequence
|
|
|
|
type, but otherwise, sequence types are converted to strings.
|
|
|
|
|
|
|
|
TODO: When the operator is not IN or NOT_IN, detect a sequence type (other
|
|
|
|
than strings, which are allowed) and raise an exception.
|
|
|
|
"""
|
2021-09-16 02:41:45 +02:00
|
|
|
field_type = None
|
|
|
|
field_name = None
|
2021-10-06 01:40:02 +02:00
|
|
|
field_info = None
|
2021-09-21 01:06:04 +02:00
|
|
|
encompassing_expression_is_negated = False
|
2021-09-16 02:41:45 +02:00
|
|
|
result = ""
|
2021-09-16 21:03:03 +02:00
|
|
|
|
|
|
|
if isinstance(expression, NegatedExpression):
|
2021-09-21 01:06:04 +02:00
|
|
|
encompassing_expression_is_negated = True
|
2021-09-16 21:03:03 +02:00
|
|
|
expression = expression.expression
|
2021-09-21 01:06:04 +02:00
|
|
|
|
2021-09-26 05:38:02 +02:00
|
|
|
if expression.op is Operators.ALL:
|
|
|
|
if encompassing_expression_is_negated:
|
|
|
|
# TODO: Is there a use case for this, perhaps for dynamic
|
2021-10-14 02:16:20 +02:00
|
|
|
# scoring purposes with full-text search?
|
2021-10-20 22:01:46 +02:00
|
|
|
raise QueryNotSupportedError(
|
|
|
|
"You cannot negate a query for all results."
|
|
|
|
)
|
2021-09-26 05:38:02 +02:00
|
|
|
return "*"
|
|
|
|
|
2021-10-20 22:01:46 +02:00
|
|
|
if isinstance(expression.left, Expression) or isinstance(
|
|
|
|
expression.left, NegatedExpression
|
|
|
|
):
|
2021-09-26 05:38:02 +02:00
|
|
|
result += f"({cls.resolve_redisearch_query(expression.left)})"
|
2021-09-16 02:41:45 +02:00
|
|
|
elif isinstance(expression.left, ModelField):
|
2021-10-13 19:07:13 +02:00
|
|
|
field_type = cls.resolve_field_type(expression.left, expression.op)
|
2021-09-16 02:41:45 +02:00
|
|
|
field_name = expression.left.name
|
2021-10-06 01:40:02 +02:00
|
|
|
field_info = expression.left.field_info
|
2021-10-12 23:22:57 +02:00
|
|
|
if not field_info or not getattr(field_info, "index", None):
|
2021-10-20 22:01:46 +02:00
|
|
|
raise QueryNotSupportedError(
|
|
|
|
f"You tried to query by a field ({field_name}) "
|
2021-11-27 00:25:18 +01:00
|
|
|
f"that isn't indexed. Docs: {ERRORS_URL}#E6"
|
2021-10-20 22:01:46 +02:00
|
|
|
)
|
2021-09-16 02:41:45 +02:00
|
|
|
else:
|
2021-10-20 22:01:46 +02:00
|
|
|
raise QueryNotSupportedError(
|
|
|
|
"A query expression should start with either a field "
|
2021-11-27 00:25:18 +01:00
|
|
|
f"or an expression enclosed in parentheses. Docs: {ERRORS_URL}#E7"
|
2021-10-20 22:01:46 +02:00
|
|
|
)
|
2021-09-16 02:41:45 +02:00
|
|
|
|
2021-09-16 21:03:03 +02:00
|
|
|
right = expression.right
|
|
|
|
|
2021-09-26 05:38:02 +02:00
|
|
|
if isinstance(right, Expression) or isinstance(right, NegatedExpression):
|
2021-09-16 02:41:45 +02:00
|
|
|
if expression.op == Operators.AND:
|
2021-09-16 21:03:03 +02:00
|
|
|
result += " "
|
2021-09-16 02:41:45 +02:00
|
|
|
elif expression.op == Operators.OR:
|
2021-09-16 21:03:03 +02:00
|
|
|
result += "| "
|
2021-09-16 02:41:45 +02:00
|
|
|
else:
|
2021-10-20 22:01:46 +02:00
|
|
|
raise QueryNotSupportedError(
|
|
|
|
"You can only combine two query expressions with"
|
2021-11-27 00:25:18 +01:00
|
|
|
f"AND (&) or OR (|). Docs: {ERRORS_URL}#E8"
|
2021-10-20 22:01:46 +02:00
|
|
|
)
|
2021-09-16 21:03:03 +02:00
|
|
|
|
2021-09-26 05:38:02 +02:00
|
|
|
if isinstance(right, NegatedExpression):
|
2021-09-16 21:03:03 +02:00
|
|
|
result += "-"
|
|
|
|
# We're handling the RediSearch operator in this call ("-"), so resolve the
|
|
|
|
# inner expression instead of the NegatedExpression.
|
|
|
|
right = right.expression
|
|
|
|
|
2021-09-26 05:38:02 +02:00
|
|
|
result += f"({cls.resolve_redisearch_query(right)})"
|
2021-09-16 02:41:45 +02:00
|
|
|
else:
|
2021-10-14 02:16:20 +02:00
|
|
|
if not field_name:
|
|
|
|
raise QuerySyntaxError("Could not resolve field name. See docs: TODO")
|
|
|
|
elif not field_type:
|
|
|
|
raise QuerySyntaxError("Could not resolve field type. See docs: TODO")
|
|
|
|
elif not field_info:
|
|
|
|
raise QuerySyntaxError("Could not resolve field info. See docs: TODO")
|
|
|
|
elif isinstance(right, ModelField):
|
2021-10-20 22:01:46 +02:00
|
|
|
raise QueryNotSupportedError(
|
|
|
|
"Comparing fields is not supported. See docs: TODO"
|
|
|
|
)
|
2021-09-16 02:41:45 +02:00
|
|
|
else:
|
2021-10-20 22:01:46 +02:00
|
|
|
result += cls.resolve_value(
|
|
|
|
field_name,
|
|
|
|
field_type,
|
|
|
|
field_info,
|
|
|
|
expression.op,
|
|
|
|
right,
|
|
|
|
expression.parents,
|
|
|
|
)
|
2021-09-16 21:03:03 +02:00
|
|
|
|
2021-09-21 01:06:04 +02:00
|
|
|
if encompassing_expression_is_negated:
|
2021-09-16 21:03:03 +02:00
|
|
|
result = f"-({result})"
|
2021-09-16 02:41:45 +02:00
|
|
|
|
|
|
|
return result
|
|
|
|
|
2021-10-22 15:33:05 +02:00
|
|
|
async def execute(self, exhaust_results=True):
|
2021-09-26 05:38:02 +02:00
|
|
|
args = ["ft.search", self.model.Meta.index_name, self.query, *self.pagination]
|
|
|
|
if self.sort_fields:
|
|
|
|
args += self.resolve_redisearch_sort_fields()
|
|
|
|
|
|
|
|
# Reset the cache if we're executing from offset 0.
|
|
|
|
if self.offset == 0:
|
|
|
|
self._model_cache.clear()
|
|
|
|
|
|
|
|
# If the offset is greater than 0, we're paginating through a result set,
|
|
|
|
# so append the new results to results already in the cache.
|
2021-10-22 15:33:05 +02:00
|
|
|
raw_result = await self.model.db().execute_command(*args)
|
2021-09-26 05:38:02 +02:00
|
|
|
count = raw_result[0]
|
|
|
|
results = self.model.from_redis(raw_result)
|
|
|
|
self._model_cache += results
|
|
|
|
|
|
|
|
if not exhaust_results:
|
|
|
|
return self._model_cache
|
|
|
|
|
|
|
|
# The query returned all results, so we have no more work to do.
|
|
|
|
if count <= len(results):
|
|
|
|
return self._model_cache
|
|
|
|
|
|
|
|
# Transparently (to the user) make subsequent requests to paginate
|
|
|
|
# through the results and finally return them all.
|
|
|
|
query = self
|
|
|
|
while True:
|
|
|
|
# Make a query for each pass of the loop, with a new offset equal to the
|
|
|
|
# current offset plus `page_size`, until we stop getting results back.
|
2021-10-14 02:16:20 +02:00
|
|
|
query = query.copy(offset=query.offset + query.page_size)
|
2021-10-22 15:33:05 +02:00
|
|
|
_results = await query.execute(exhaust_results=False)
|
2021-09-26 05:38:02 +02:00
|
|
|
if not _results:
|
|
|
|
break
|
|
|
|
self._model_cache += _results
|
|
|
|
return self._model_cache
|
|
|
|
|
2021-10-22 15:33:05 +02:00
|
|
|
async def first(self):
|
2021-10-14 02:16:20 +02:00
|
|
|
query = self.copy(offset=0, limit=1, sort_fields=self.sort_fields)
|
2021-10-22 15:33:05 +02:00
|
|
|
results = await query.execute()
|
2021-10-06 01:40:02 +02:00
|
|
|
if not results:
|
|
|
|
raise NotFoundError()
|
|
|
|
return results[0]
|
2021-09-26 05:38:02 +02:00
|
|
|
|
2021-10-22 15:33:05 +02:00
|
|
|
async def all(self, batch_size=10):
|
2021-09-26 05:38:02 +02:00
|
|
|
if batch_size != self.page_size:
|
2021-10-14 02:16:20 +02:00
|
|
|
query = self.copy(page_size=batch_size, limit=batch_size)
|
2021-10-22 15:33:05 +02:00
|
|
|
return await query.execute()
|
|
|
|
return await self.execute()
|
2021-09-26 05:38:02 +02:00
|
|
|
|
2021-09-30 05:23:39 +02:00
|
|
|
def sort_by(self, *fields: str):
|
2021-09-26 05:38:02 +02:00
|
|
|
if not fields:
|
|
|
|
return self
|
2021-10-14 02:16:20 +02:00
|
|
|
return self.copy(sort_fields=list(fields))
|
2021-09-26 05:38:02 +02:00
|
|
|
|
2021-10-22 15:33:05 +02:00
|
|
|
async def update(self, use_transaction=True, **field_values):
|
2021-10-19 06:16:48 +02:00
|
|
|
"""
|
|
|
|
Update models that match this query to the given field-value pairs.
|
2021-09-26 05:38:02 +02:00
|
|
|
|
2021-10-19 06:16:48 +02:00
|
|
|
Keys and values given as keyword arguments are interpreted as fields
|
|
|
|
on the target model and the values as the values to which to set the
|
|
|
|
given fields.
|
|
|
|
"""
|
|
|
|
validate_model_fields(self.model, field_values)
|
2021-10-22 15:33:05 +02:00
|
|
|
pipeline = await self.model.db().pipeline() if use_transaction else None
|
2021-10-19 06:16:48 +02:00
|
|
|
|
2021-10-22 15:33:05 +02:00
|
|
|
# TODO: async for here?
|
|
|
|
for model in await self.all():
|
2021-10-19 06:16:48 +02:00
|
|
|
for field, value in field_values.items():
|
|
|
|
setattr(model, field, value)
|
2021-10-20 22:01:46 +02:00
|
|
|
# TODO: In the non-transaction case, can we do more to detect
|
|
|
|
# failure responses from Redis?
|
2021-10-22 15:33:05 +02:00
|
|
|
await model.save(pipeline=pipeline)
|
2021-10-19 06:16:48 +02:00
|
|
|
|
|
|
|
if pipeline:
|
2021-10-20 22:01:46 +02:00
|
|
|
# TODO: Response type?
|
|
|
|
# TODO: Better error detection for transactions.
|
2021-11-10 00:59:10 +01:00
|
|
|
await pipeline.execute()
|
2021-08-31 03:08:07 +02:00
|
|
|
|
2021-10-22 15:33:05 +02:00
|
|
|
async def delete(self):
|
2021-10-19 06:16:48 +02:00
|
|
|
"""Delete all matching records in this query."""
|
|
|
|
# TODO: Better response type, error detection
|
2021-10-22 15:33:05 +02:00
|
|
|
return await self.model.db().delete(*[m.key() for m in await self.all()])
|
2021-08-31 03:08:07 +02:00
|
|
|
|
2021-10-22 15:33:05 +02:00
|
|
|
async def __aiter__(self):
|
2021-09-26 05:38:02 +02:00
|
|
|
if self._model_cache:
|
|
|
|
for m in self._model_cache:
|
|
|
|
yield m
|
|
|
|
else:
|
2021-10-22 15:33:05 +02:00
|
|
|
for m in await self.execute():
|
2021-09-26 05:38:02 +02:00
|
|
|
yield m
|
2021-09-01 01:30:31 +02:00
|
|
|
|
2021-09-26 05:38:02 +02:00
|
|
|
def __getitem__(self, item: int):
|
|
|
|
"""
|
|
|
|
Given this code:
|
|
|
|
Model.find()[1000]
|
2021-09-01 01:30:31 +02:00
|
|
|
|
2021-09-26 05:38:02 +02:00
|
|
|
We should return only the 1000th result.
|
2021-09-01 01:30:31 +02:00
|
|
|
|
2021-09-26 05:38:02 +02:00
|
|
|
1. If the result is loaded in the query cache for this query,
|
|
|
|
we can return it directly from the cache.
|
2021-09-01 01:30:31 +02:00
|
|
|
|
2021-09-26 05:38:02 +02:00
|
|
|
2. If the query cache does not have enough elements to return
|
|
|
|
that result, then we should clone the current query and
|
|
|
|
give it a new offset and limit: offset=n, limit=1.
|
|
|
|
"""
|
2021-10-22 15:33:05 +02:00
|
|
|
if ASYNC_MODE:
|
2021-11-10 00:59:10 +01:00
|
|
|
raise QuerySyntaxError(
|
|
|
|
"Cannot use [] notation with async code. "
|
|
|
|
"Use FindQuery.get_item() instead."
|
|
|
|
)
|
2021-09-26 05:38:02 +02:00
|
|
|
if self._model_cache and len(self._model_cache) >= item:
|
|
|
|
return self._model_cache[item]
|
2021-08-31 03:08:07 +02:00
|
|
|
|
2021-10-14 02:16:20 +02:00
|
|
|
query = self.copy(offset=item, limit=1)
|
2021-09-16 02:41:45 +02:00
|
|
|
|
2021-10-22 15:33:05 +02:00
|
|
|
return query.execute()[0] # noqa
|
|
|
|
|
|
|
|
async def get_item(self, item: int):
|
|
|
|
"""
|
|
|
|
Given this code:
|
|
|
|
await Model.find().get_item(1000)
|
|
|
|
|
|
|
|
We should return only the 1000th result.
|
|
|
|
|
|
|
|
1. If the result is loaded in the query cache for this query,
|
|
|
|
we can return it directly from the cache.
|
|
|
|
|
|
|
|
2. If the query cache does not have enough elements to return
|
|
|
|
that result, then we should clone the current query and
|
|
|
|
give it a new offset and limit: offset=n, limit=1.
|
|
|
|
|
|
|
|
NOTE: This method is included specifically for async users, who
|
|
|
|
cannot use the notation Model.find()[1000].
|
|
|
|
"""
|
|
|
|
if self._model_cache and len(self._model_cache) >= item:
|
|
|
|
return self._model_cache[item]
|
|
|
|
|
|
|
|
query = self.copy(offset=item, limit=1)
|
|
|
|
result = await query.execute()
|
|
|
|
return result[0]
|
2021-08-31 03:08:07 +02:00
|
|
|
|
2021-09-16 02:41:45 +02:00
|
|
|
|
2021-11-24 22:53:54 +01:00
|
|
|
class PrimaryKeyCreator(Protocol):
|
2021-09-26 05:38:02 +02:00
|
|
|
def create_pk(self, *args, **kwargs) -> str:
|
|
|
|
"""Create a new primary key"""
|
2021-08-31 03:08:07 +02:00
|
|
|
|
2021-09-16 02:41:45 +02:00
|
|
|
|
2021-10-06 01:40:02 +02:00
|
|
|
class UlidPrimaryKey:
|
2021-11-24 22:53:54 +01:00
|
|
|
"""
|
|
|
|
A client-side generated primary key that follows the ULID spec.
|
2021-10-20 22:01:46 +02:00
|
|
|
https://github.com/ulid/javascript#specification
|
2021-10-06 01:40:02 +02:00
|
|
|
"""
|
2021-10-20 22:01:46 +02:00
|
|
|
|
2021-10-06 01:40:02 +02:00
|
|
|
@staticmethod
|
|
|
|
def create_pk(*args, **kwargs) -> str:
|
|
|
|
return str(ULID())
|
2021-08-31 03:08:07 +02:00
|
|
|
|
|
|
|
|
|
|
|
def __dataclass_transform__(
|
|
|
|
*,
|
|
|
|
eq_default: bool = True,
|
|
|
|
order_default: bool = False,
|
|
|
|
kw_only_default: bool = False,
|
|
|
|
field_descriptors: Tuple[Union[type, Callable[..., Any]], ...] = (()),
|
|
|
|
) -> Callable[[_T], _T]:
|
|
|
|
return lambda a: a
|
|
|
|
|
|
|
|
|
|
|
|
class FieldInfo(PydanticFieldInfo):
|
|
|
|
def __init__(self, default: Any = Undefined, **kwargs: Any) -> None:
|
|
|
|
primary_key = kwargs.pop("primary_key", False)
|
2021-09-16 02:41:45 +02:00
|
|
|
sortable = kwargs.pop("sortable", Undefined)
|
2021-08-31 03:08:07 +02:00
|
|
|
index = kwargs.pop("index", Undefined)
|
2021-09-17 18:27:11 +02:00
|
|
|
full_text_search = kwargs.pop("full_text_search", Undefined)
|
2021-08-31 03:08:07 +02:00
|
|
|
super().__init__(default=default, **kwargs)
|
|
|
|
self.primary_key = primary_key
|
2021-09-16 02:41:45 +02:00
|
|
|
self.sortable = sortable
|
2021-08-31 03:08:07 +02:00
|
|
|
self.index = index
|
2021-09-17 18:27:11 +02:00
|
|
|
self.full_text_search = full_text_search
|
2021-08-31 03:08:07 +02:00
|
|
|
|
|
|
|
|
|
|
|
class RelationshipInfo(Representation):
|
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
*,
|
|
|
|
back_populates: Optional[str] = None,
|
|
|
|
link_model: Optional[Any] = None,
|
|
|
|
) -> None:
|
|
|
|
self.back_populates = back_populates
|
|
|
|
self.link_model = link_model
|
|
|
|
|
|
|
|
|
|
|
|
def Field(
|
|
|
|
default: Any = Undefined,
|
|
|
|
*,
|
|
|
|
default_factory: Optional[NoArgAnyCallable] = None,
|
|
|
|
alias: str = None,
|
|
|
|
title: str = None,
|
|
|
|
description: str = None,
|
|
|
|
exclude: Union[
|
|
|
|
AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any
|
|
|
|
] = None,
|
|
|
|
include: Union[
|
|
|
|
AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any
|
|
|
|
] = None,
|
|
|
|
const: bool = None,
|
|
|
|
gt: float = None,
|
|
|
|
ge: float = None,
|
|
|
|
lt: float = None,
|
|
|
|
le: float = None,
|
|
|
|
multiple_of: float = None,
|
|
|
|
min_items: int = None,
|
|
|
|
max_items: int = None,
|
|
|
|
min_length: int = None,
|
|
|
|
max_length: int = None,
|
|
|
|
allow_mutation: bool = True,
|
|
|
|
regex: str = None,
|
|
|
|
primary_key: bool = False,
|
2021-09-16 02:41:45 +02:00
|
|
|
sortable: Union[bool, UndefinedType] = Undefined,
|
2021-08-31 03:08:07 +02:00
|
|
|
index: Union[bool, UndefinedType] = Undefined,
|
2021-09-17 18:27:11 +02:00
|
|
|
full_text_search: Union[bool, UndefinedType] = Undefined,
|
2021-08-31 03:08:07 +02:00
|
|
|
schema_extra: Optional[Dict[str, Any]] = None,
|
|
|
|
) -> Any:
|
|
|
|
current_schema_extra = schema_extra or {}
|
|
|
|
field_info = FieldInfo(
|
|
|
|
default,
|
|
|
|
default_factory=default_factory,
|
|
|
|
alias=alias,
|
|
|
|
title=title,
|
|
|
|
description=description,
|
|
|
|
exclude=exclude,
|
|
|
|
include=include,
|
|
|
|
const=const,
|
|
|
|
gt=gt,
|
|
|
|
ge=ge,
|
|
|
|
lt=lt,
|
|
|
|
le=le,
|
|
|
|
multiple_of=multiple_of,
|
|
|
|
min_items=min_items,
|
|
|
|
max_items=max_items,
|
|
|
|
min_length=min_length,
|
|
|
|
max_length=max_length,
|
|
|
|
allow_mutation=allow_mutation,
|
|
|
|
regex=regex,
|
|
|
|
primary_key=primary_key,
|
2021-09-16 02:41:45 +02:00
|
|
|
sortable=sortable,
|
2021-08-31 03:08:07 +02:00
|
|
|
index=index,
|
2021-09-17 18:27:11 +02:00
|
|
|
full_text_search=full_text_search,
|
2021-08-31 03:08:07 +02:00
|
|
|
**current_schema_extra,
|
|
|
|
)
|
|
|
|
field_info._validate()
|
|
|
|
return field_info
|
|
|
|
|
|
|
|
|
2021-09-26 05:38:02 +02:00
|
|
|
@dataclasses.dataclass
|
2021-08-31 03:08:07 +02:00
|
|
|
class PrimaryKey:
|
|
|
|
name: str
|
|
|
|
field: ModelField
|
|
|
|
|
|
|
|
|
2021-11-25 03:12:27 +01:00
|
|
|
class BaseMeta(Protocol):
|
2021-10-06 01:40:02 +02:00
|
|
|
global_key_prefix: str
|
|
|
|
model_key_prefix: str
|
|
|
|
primary_key_pattern: str
|
2021-10-22 15:33:05 +02:00
|
|
|
database: aioredis.Redis
|
2021-10-06 01:40:02 +02:00
|
|
|
primary_key: PrimaryKey
|
|
|
|
primary_key_creator_cls: Type[PrimaryKeyCreator]
|
|
|
|
index_name: str
|
2021-10-12 23:22:57 +02:00
|
|
|
embedded: bool
|
2021-11-03 20:37:09 +01:00
|
|
|
encoding: str
|
2021-10-06 01:40:02 +02:00
|
|
|
|
|
|
|
|
|
|
|
@dataclasses.dataclass
|
2021-08-31 03:08:07 +02:00
|
|
|
class DefaultMeta:
|
2021-10-06 01:40:02 +02:00
|
|
|
"""A default placeholder Meta object.
|
|
|
|
|
|
|
|
TODO: Revisit whether this is really necessary, and whether making
|
|
|
|
these all optional here is the right choice.
|
|
|
|
"""
|
2021-10-20 22:01:46 +02:00
|
|
|
|
2021-08-31 03:08:07 +02:00
|
|
|
global_key_prefix: Optional[str] = None
|
|
|
|
model_key_prefix: Optional[str] = None
|
|
|
|
primary_key_pattern: Optional[str] = None
|
2021-11-10 00:59:10 +01:00
|
|
|
database: Optional[aioredis.Redis] = None
|
2021-08-31 03:08:07 +02:00
|
|
|
primary_key: Optional[PrimaryKey] = None
|
2021-09-26 05:38:02 +02:00
|
|
|
primary_key_creator_cls: Optional[Type[PrimaryKeyCreator]] = None
|
|
|
|
index_name: Optional[str] = None
|
2021-10-12 23:22:57 +02:00
|
|
|
embedded: Optional[bool] = False
|
2021-11-03 20:53:00 +01:00
|
|
|
encoding: str = "utf-8"
|
2021-09-16 02:41:45 +02:00
|
|
|
|
|
|
|
|
|
|
|
class ModelMeta(ModelMetaclass):
|
2021-11-10 00:59:10 +01:00
|
|
|
_meta: BaseMeta
|
2021-10-06 01:40:02 +02:00
|
|
|
|
2021-09-16 02:41:45 +02:00
|
|
|
def __new__(cls, name, bases, attrs, **kwargs): # noqa C901
|
2021-10-20 22:01:46 +02:00
|
|
|
meta = attrs.pop("Meta", None)
|
2021-09-16 02:41:45 +02:00
|
|
|
new_class = super().__new__(cls, name, bases, attrs, **kwargs)
|
2021-10-12 23:22:57 +02:00
|
|
|
|
|
|
|
# The fact that there is a Meta field and _meta field is important: a
|
|
|
|
# user may have given us a Meta object with their configuration, while
|
|
|
|
# we might have inherited _meta from a parent class, and should
|
|
|
|
# therefore use some of the inherited fields.
|
2021-10-20 22:01:46 +02:00
|
|
|
meta = meta or getattr(new_class, "Meta", None)
|
|
|
|
base_meta = getattr(new_class, "_meta", None)
|
2021-09-16 02:41:45 +02:00
|
|
|
|
2021-09-17 18:27:11 +02:00
|
|
|
if meta and meta != DefaultMeta and meta != base_meta:
|
2021-09-16 02:41:45 +02:00
|
|
|
new_class.Meta = meta
|
|
|
|
new_class._meta = meta
|
|
|
|
elif base_meta:
|
2021-10-20 22:01:46 +02:00
|
|
|
new_class._meta = type(
|
|
|
|
f"{new_class.__name__}Meta", (base_meta,), dict(base_meta.__dict__)
|
|
|
|
)
|
2021-09-16 02:41:45 +02:00
|
|
|
new_class.Meta = new_class._meta
|
2021-10-12 23:22:57 +02:00
|
|
|
# Unset inherited values we don't want to reuse (typically based on
|
|
|
|
# the model name).
|
2021-09-16 02:41:45 +02:00
|
|
|
new_class._meta.model_key_prefix = None
|
|
|
|
new_class._meta.index_name = None
|
|
|
|
else:
|
2021-10-20 22:01:46 +02:00
|
|
|
new_class._meta = type(
|
|
|
|
f"{new_class.__name__}Meta", (DefaultMeta,), dict(DefaultMeta.__dict__)
|
|
|
|
)
|
2021-09-16 02:41:45 +02:00
|
|
|
new_class.Meta = new_class._meta
|
|
|
|
|
|
|
|
# Create proxies for each model field so that we can use the field
|
|
|
|
# in queries, like Model.get(Model.field_name == 1)
|
2021-09-17 18:27:11 +02:00
|
|
|
for field_name, field in new_class.__fields__.items():
|
2021-10-12 23:22:57 +02:00
|
|
|
setattr(new_class, field_name, ExpressionProxy(field, []))
|
2021-10-21 22:12:54 +02:00
|
|
|
annotation = new_class.get_annotations().get(field_name)
|
|
|
|
if annotation:
|
|
|
|
new_class.__annotations__[field_name] = Union[
|
|
|
|
annotation, ExpressionProxy
|
|
|
|
]
|
|
|
|
else:
|
|
|
|
new_class.__annotations__[field_name] = ExpressionProxy
|
2021-09-16 02:41:45 +02:00
|
|
|
# Check if this is our FieldInfo version with extended ORM metadata.
|
|
|
|
if isinstance(field.field_info, FieldInfo):
|
|
|
|
if field.field_info.primary_key:
|
2021-10-20 22:01:46 +02:00
|
|
|
new_class._meta.primary_key = PrimaryKey(
|
|
|
|
name=field_name, field=field
|
|
|
|
)
|
2021-09-16 02:41:45 +02:00
|
|
|
|
2021-10-20 22:01:46 +02:00
|
|
|
if not getattr(new_class._meta, "global_key_prefix", None):
|
|
|
|
new_class._meta.global_key_prefix = getattr(
|
|
|
|
base_meta, "global_key_prefix", ""
|
|
|
|
)
|
|
|
|
if not getattr(new_class._meta, "model_key_prefix", None):
|
2021-09-16 02:41:45 +02:00
|
|
|
# Don't look at the base class for this.
|
2021-10-20 22:01:46 +02:00
|
|
|
new_class._meta.model_key_prefix = (
|
|
|
|
f"{new_class.__module__}.{new_class.__name__}"
|
|
|
|
)
|
|
|
|
if not getattr(new_class._meta, "primary_key_pattern", None):
|
|
|
|
new_class._meta.primary_key_pattern = getattr(
|
|
|
|
base_meta, "primary_key_pattern", "{pk}"
|
|
|
|
)
|
|
|
|
if not getattr(new_class._meta, "database", None):
|
|
|
|
new_class._meta.database = getattr(
|
2021-10-20 23:29:31 +02:00
|
|
|
base_meta, "database", get_redis_connection()
|
2021-10-20 22:01:46 +02:00
|
|
|
)
|
2021-11-03 20:37:09 +01:00
|
|
|
if not getattr(new_class._meta, "encoding", None):
|
|
|
|
new_class._meta.encoding = getattr(base_meta, "encoding")
|
2021-10-20 22:01:46 +02:00
|
|
|
if not getattr(new_class._meta, "primary_key_creator_cls", None):
|
|
|
|
new_class._meta.primary_key_creator_cls = getattr(
|
|
|
|
base_meta, "primary_key_creator_cls", UlidPrimaryKey
|
|
|
|
)
|
2021-11-09 01:28:58 +01:00
|
|
|
# TODO: Configurable key separate, defaults to ":"
|
2021-10-20 22:01:46 +02:00
|
|
|
if not getattr(new_class._meta, "index_name", None):
|
|
|
|
new_class._meta.index_name = (
|
|
|
|
f"{new_class._meta.global_key_prefix}:"
|
|
|
|
f"{new_class._meta.model_key_prefix}:index"
|
|
|
|
)
|
2021-09-16 02:41:45 +02:00
|
|
|
|
2021-10-12 23:22:57 +02:00
|
|
|
# Not an abstract model class or embedded model, so we should let the
|
|
|
|
# Migrator create indexes for it.
|
2021-10-20 22:01:46 +02:00
|
|
|
if abc.ABC not in bases and not getattr(new_class._meta, "embedded", False):
|
2021-10-06 01:40:02 +02:00
|
|
|
key = f"{new_class.__module__}.{new_class.__qualname__}"
|
|
|
|
model_registry[key] = new_class
|
|
|
|
|
2021-09-16 02:41:45 +02:00
|
|
|
return new_class
|
|
|
|
|
|
|
|
|
|
|
|
class RedisModel(BaseModel, abc.ABC, metaclass=ModelMeta):
|
2021-08-31 03:08:07 +02:00
|
|
|
pk: Optional[str] = Field(default=None, primary_key=True)
|
|
|
|
|
2021-09-16 02:41:45 +02:00
|
|
|
Meta = DefaultMeta
|
|
|
|
|
2021-08-31 03:08:07 +02:00
|
|
|
class Config:
|
|
|
|
orm_mode = True
|
|
|
|
arbitrary_types_allowed = True
|
2021-10-20 22:01:46 +02:00
|
|
|
extra = "allow"
|
2021-08-31 03:08:07 +02:00
|
|
|
|
|
|
|
def __init__(__pydantic_self__, **data: Any) -> None:
|
2021-09-01 01:30:31 +02:00
|
|
|
super().__init__(**data)
|
2021-08-31 03:08:07 +02:00
|
|
|
__pydantic_self__.validate_primary_key()
|
|
|
|
|
2021-09-17 18:27:11 +02:00
|
|
|
def __lt__(self, other):
|
2021-10-06 01:40:02 +02:00
|
|
|
"""Default sort: compare primary key of models."""
|
|
|
|
return self.pk < other.pk
|
2021-09-17 18:27:11 +02:00
|
|
|
|
2021-10-19 06:16:48 +02:00
|
|
|
def key(self):
|
|
|
|
"""Return the Redis key for this model."""
|
|
|
|
pk = getattr(self, self._meta.primary_key.field.name)
|
|
|
|
return self.make_primary_key(pk)
|
|
|
|
|
2022-02-15 19:46:50 +01:00
|
|
|
@classmethod
|
|
|
|
async def delete(cls, pk: Any) -> int:
|
|
|
|
"""Delete data at this key."""
|
|
|
|
return await cls.db().delete(cls.make_primary_key(pk))
|
2021-10-22 15:33:05 +02:00
|
|
|
|
|
|
|
@classmethod
|
2021-11-10 00:59:10 +01:00
|
|
|
async def get(cls, pk: Any) -> "RedisModel":
|
2021-10-22 15:33:05 +02:00
|
|
|
raise NotImplementedError
|
2021-10-19 06:16:48 +02:00
|
|
|
|
2021-10-22 15:33:05 +02:00
|
|
|
async def update(self, **field_values):
|
2021-10-19 06:16:48 +02:00
|
|
|
"""Update this model instance with the specified key-value pairs."""
|
|
|
|
raise NotImplementedError
|
|
|
|
|
2021-10-22 15:33:05 +02:00
|
|
|
async def save(self, pipeline: Optional[Pipeline] = None) -> "RedisModel":
|
2021-10-19 06:16:48 +02:00
|
|
|
raise NotImplementedError
|
|
|
|
|
2021-11-03 20:37:09 +01:00
|
|
|
@validator("pk", always=True, allow_reuse=True)
|
2021-09-01 21:56:06 +02:00
|
|
|
def validate_pk(cls, v):
|
|
|
|
if not v:
|
2021-09-16 02:41:45 +02:00
|
|
|
v = cls._meta.primary_key_creator_cls().create_pk()
|
2021-09-01 21:56:06 +02:00
|
|
|
return v
|
2021-08-31 03:08:07 +02:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def validate_primary_key(cls):
|
|
|
|
"""Check for a primary key. We need one (and only one)."""
|
|
|
|
primary_keys = 0
|
|
|
|
for name, field in cls.__fields__.items():
|
2021-10-20 22:01:46 +02:00
|
|
|
if getattr(field.field_info, "primary_key", None):
|
2021-08-31 03:08:07 +02:00
|
|
|
primary_keys += 1
|
|
|
|
if primary_keys == 0:
|
|
|
|
raise RedisModelError("You must define a primary key for the model")
|
|
|
|
elif primary_keys > 1:
|
|
|
|
raise RedisModelError("You must define only one primary key for a model")
|
|
|
|
|
|
|
|
@classmethod
|
2021-08-31 21:03:53 +02:00
|
|
|
def make_key(cls, part: str):
|
2021-10-20 22:01:46 +02:00
|
|
|
global_prefix = getattr(cls._meta, "global_key_prefix", "").strip(":")
|
|
|
|
model_prefix = getattr(cls._meta, "model_key_prefix", "").strip(":")
|
2021-09-01 21:56:06 +02:00
|
|
|
return f"{global_prefix}:{model_prefix}:{part}"
|
2021-08-31 03:08:07 +02:00
|
|
|
|
2021-08-31 21:03:53 +02:00
|
|
|
@classmethod
|
2021-09-01 01:30:31 +02:00
|
|
|
def make_primary_key(cls, pk: Any):
|
2021-08-31 21:03:53 +02:00
|
|
|
"""Return the Redis key for this model."""
|
2021-09-16 02:41:45 +02:00
|
|
|
return cls.make_key(cls._meta.primary_key_pattern.format(pk=pk))
|
2021-08-31 21:03:53 +02:00
|
|
|
|
2021-08-31 03:08:07 +02:00
|
|
|
@classmethod
|
|
|
|
def db(cls):
|
2021-09-16 02:41:45 +02:00
|
|
|
return cls._meta.database
|
2021-09-16 21:03:03 +02:00
|
|
|
|
2021-09-26 05:38:02 +02:00
|
|
|
@classmethod
|
2021-10-14 02:16:20 +02:00
|
|
|
def find(cls, *expressions: Union[Any, Expression]) -> FindQuery:
|
2021-09-26 05:38:02 +02:00
|
|
|
return FindQuery(expressions=expressions, model=cls)
|
|
|
|
|
2021-09-16 02:41:45 +02:00
|
|
|
@classmethod
|
|
|
|
def from_redis(cls, res: Any):
|
2021-10-19 06:16:48 +02:00
|
|
|
# TODO: Parsing logic copied from redisearch-py. Evaluate.
|
2021-09-16 02:41:45 +02:00
|
|
|
import six
|
2021-10-20 22:01:46 +02:00
|
|
|
from six.moves import xrange
|
|
|
|
from six.moves import zip as izip
|
2021-09-16 21:03:03 +02:00
|
|
|
|
2021-09-16 02:41:45 +02:00
|
|
|
def to_string(s):
|
|
|
|
if isinstance(s, six.string_types):
|
|
|
|
return s
|
|
|
|
elif isinstance(s, six.binary_type):
|
2021-10-20 22:01:46 +02:00
|
|
|
return s.decode("utf-8", "ignore")
|
2021-09-16 02:41:45 +02:00
|
|
|
else:
|
|
|
|
return s # Not a string we care about
|
2021-09-16 21:03:03 +02:00
|
|
|
|
2021-09-16 02:41:45 +02:00
|
|
|
docs = []
|
|
|
|
step = 2 # Because the result has content
|
2021-09-26 05:38:02 +02:00
|
|
|
offset = 1 # The first item is the count of total matches.
|
2021-09-16 02:41:45 +02:00
|
|
|
|
|
|
|
for i in xrange(1, len(res), step):
|
|
|
|
fields_offset = offset
|
|
|
|
|
|
|
|
fields = dict(
|
2021-10-20 22:01:46 +02:00
|
|
|
dict(
|
|
|
|
izip(
|
|
|
|
map(to_string, res[i + fields_offset][::2]),
|
|
|
|
map(to_string, res[i + fields_offset][1::2]),
|
|
|
|
)
|
|
|
|
)
|
2021-09-16 02:41:45 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
try:
|
2021-10-20 22:01:46 +02:00
|
|
|
del fields["id"]
|
2021-09-16 02:41:45 +02:00
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
|
2021-10-06 01:40:02 +02:00
|
|
|
try:
|
2021-10-20 22:01:46 +02:00
|
|
|
fields["json"] = fields["$"]
|
|
|
|
del fields["$"]
|
2021-10-06 01:40:02 +02:00
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
|
2021-10-20 22:01:46 +02:00
|
|
|
if "json" in fields:
|
|
|
|
json_fields = json.loads(fields["json"])
|
2021-10-06 01:40:02 +02:00
|
|
|
doc = cls(**json_fields)
|
|
|
|
else:
|
|
|
|
doc = cls(**fields)
|
2021-09-16 02:41:45 +02:00
|
|
|
docs.append(doc)
|
|
|
|
return docs
|
2021-08-31 03:08:07 +02:00
|
|
|
|
2021-10-21 22:12:54 +02:00
|
|
|
@classmethod
|
|
|
|
def get_annotations(cls):
|
|
|
|
d = {}
|
|
|
|
for c in cls.mro():
|
|
|
|
try:
|
|
|
|
d.update(**c.__annotations__)
|
|
|
|
except AttributeError:
|
|
|
|
# object, at least, has no __annotations__ attribute.
|
|
|
|
pass
|
|
|
|
return d
|
|
|
|
|
2021-08-31 03:08:07 +02:00
|
|
|
@classmethod
|
2021-11-10 00:59:10 +01:00
|
|
|
async def add(
|
|
|
|
cls,
|
|
|
|
models: Sequence["RedisModel"],
|
|
|
|
pipeline: Optional[Pipeline] = None,
|
|
|
|
pipeline_verifier: Callable[..., Any] = verify_pipeline_response,
|
|
|
|
) -> Sequence["RedisModel"]:
|
|
|
|
if pipeline is None:
|
|
|
|
# By default, send commands in a pipeline. Saving each model will
|
|
|
|
# be atomic, but Redis may process other commands in between
|
|
|
|
# these saves.
|
|
|
|
db = cls.db().pipeline(transaction=False)
|
|
|
|
else:
|
|
|
|
# If the user gave us a pipeline, add our commands to that. The user
|
|
|
|
# will be responsible for executing the pipeline after they've accumulated
|
|
|
|
# the commands they want to send.
|
|
|
|
db = pipeline
|
2021-08-31 03:08:07 +02:00
|
|
|
|
2021-11-10 00:59:10 +01:00
|
|
|
for model in models:
|
|
|
|
# save() just returns the model, we don't need that here.
|
|
|
|
await model.save(pipeline=db)
|
|
|
|
|
|
|
|
# If the user didn't give us a pipeline, then we need to execute
|
|
|
|
# the one we just created.
|
|
|
|
if pipeline is None:
|
|
|
|
result = await db.execute()
|
|
|
|
pipeline_verifier(result, expected_responses=len(models))
|
|
|
|
|
|
|
|
return models
|
2021-09-01 21:56:06 +02:00
|
|
|
|
2021-09-16 02:41:45 +02:00
|
|
|
@classmethod
|
2021-09-30 05:23:39 +02:00
|
|
|
def redisearch_schema(cls):
|
2021-09-01 21:56:06 +02:00
|
|
|
raise NotImplementedError
|
|
|
|
|
2021-10-22 17:31:08 +02:00
|
|
|
def check(self):
|
|
|
|
"""Run all validations."""
|
|
|
|
*_, validation_error = validate_model(self.__class__, self.__dict__)
|
|
|
|
if validation_error:
|
|
|
|
raise validation_error
|
|
|
|
|
2021-09-01 21:56:06 +02:00
|
|
|
|
2021-09-16 02:41:45 +02:00
|
|
|
class HashModel(RedisModel, abc.ABC):
|
2021-09-01 21:56:06 +02:00
|
|
|
def __init_subclass__(cls, **kwargs):
|
|
|
|
super().__init_subclass__(**kwargs)
|
|
|
|
|
|
|
|
for name, field in cls.__fields__.items():
|
2021-11-25 03:12:27 +01:00
|
|
|
origin = get_origin(field.outer_type_)
|
|
|
|
if origin:
|
|
|
|
for typ in (Set, Mapping, List):
|
|
|
|
if issubclass(origin, typ):
|
|
|
|
raise RedisModelError(
|
|
|
|
f"HashModels cannot index set, list,"
|
|
|
|
f" or mapping fields. Field: {name}"
|
|
|
|
)
|
|
|
|
|
2021-09-01 21:56:06 +02:00
|
|
|
if issubclass(field.outer_type_, RedisModel):
|
2021-10-20 22:01:46 +02:00
|
|
|
raise RedisModelError(
|
2021-11-25 03:12:27 +01:00
|
|
|
f"HashModels cannot index embedded model fields. Field: {name}"
|
|
|
|
)
|
|
|
|
elif dataclasses.is_dataclass(field.outer_type_):
|
|
|
|
raise RedisModelError(
|
|
|
|
f"HashModels cannot index dataclass fields. Field: {name}"
|
2021-10-20 22:01:46 +02:00
|
|
|
)
|
2021-08-31 03:08:07 +02:00
|
|
|
|
2021-10-22 15:33:05 +02:00
|
|
|
async def save(self, pipeline: Optional[Pipeline] = None) -> "HashModel":
|
2021-10-22 17:31:08 +02:00
|
|
|
self.check()
|
2021-10-19 06:16:48 +02:00
|
|
|
if pipeline is None:
|
|
|
|
db = self.db()
|
|
|
|
else:
|
|
|
|
db = pipeline
|
2021-09-01 21:56:06 +02:00
|
|
|
document = jsonable_encoder(self.dict())
|
2021-10-22 15:33:05 +02:00
|
|
|
# TODO: Wrap any Redis response errors in a custom exception?
|
|
|
|
await db.hset(self.key(), mapping=document)
|
2021-10-19 06:16:48 +02:00
|
|
|
return self
|
2021-09-01 21:56:06 +02:00
|
|
|
|
2021-11-09 01:28:58 +01:00
|
|
|
@classmethod
|
2021-11-10 20:31:02 +01:00
|
|
|
async def all_pks(cls): # type: ignore
|
2021-11-09 01:28:58 +01:00
|
|
|
key_prefix = cls.make_key(cls._meta.primary_key_pattern.format(pk=""))
|
|
|
|
# TODO: We assume the key ends with the default separator, ":" -- when
|
|
|
|
# we make the separator configurable, we need to update this as well.
|
|
|
|
# ... And probably lots of other places ...
|
2021-11-09 01:48:43 +01:00
|
|
|
#
|
|
|
|
# TODO: Also, we need to decide how we want to handle the lack of
|
|
|
|
# decode_responses=True...
|
2021-11-09 01:28:58 +01:00
|
|
|
return (
|
2021-11-10 00:59:10 +01:00
|
|
|
key.split(":")[-1]
|
|
|
|
if isinstance(key, str)
|
|
|
|
else key.decode(cls.Meta.encoding).split(":")[-1]
|
|
|
|
async for key in cls.db().scan_iter(f"{key_prefix}*", _type="HASH")
|
2021-11-09 01:28:58 +01:00
|
|
|
)
|
|
|
|
|
2021-09-01 21:56:06 +02:00
|
|
|
@classmethod
|
2021-10-22 15:33:05 +02:00
|
|
|
async def get(cls, pk: Any) -> "HashModel":
|
2021-11-10 00:59:10 +01:00
|
|
|
document = await cls.db().hgetall(cls.make_primary_key(pk))
|
2021-09-01 21:56:06 +02:00
|
|
|
if not document:
|
2021-09-30 05:42:02 +02:00
|
|
|
raise NotFoundError
|
2021-11-03 20:37:09 +01:00
|
|
|
try:
|
|
|
|
result = cls.parse_obj(document)
|
|
|
|
except TypeError as e:
|
|
|
|
log.warning(
|
|
|
|
f'Could not parse Redis response. Error was: "{e}". Probably, the '
|
|
|
|
"connection is not set to decode responses from bytes. "
|
|
|
|
"Attempting to decode response using the encoding set on "
|
|
|
|
f"model class ({cls.__class__}. Encoding: {cls.Meta.encoding}."
|
|
|
|
)
|
|
|
|
document = decode_redis_value(document, cls.Meta.encoding)
|
|
|
|
result = cls.parse_obj(document)
|
|
|
|
return result
|
2021-09-01 21:56:06 +02:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
@no_type_check
|
|
|
|
def _get_value(cls, *args, **kwargs) -> Any:
|
|
|
|
"""
|
|
|
|
Always send None as an empty string.
|
|
|
|
|
|
|
|
TODO: We do this because redis-py's hset() method requires non-null
|
|
|
|
values. Is there a better way?
|
|
|
|
"""
|
|
|
|
val = super()._get_value(*args, **kwargs)
|
|
|
|
if val is None:
|
|
|
|
return ""
|
|
|
|
return val
|
|
|
|
|
2021-09-16 02:41:45 +02:00
|
|
|
@classmethod
|
2021-09-30 05:23:39 +02:00
|
|
|
def redisearch_schema(cls):
|
2021-09-16 02:41:45 +02:00
|
|
|
hash_prefix = cls.make_key(cls._meta.primary_key_pattern.format(pk=""))
|
|
|
|
schema_prefix = f"ON HASH PREFIX 1 {hash_prefix} SCHEMA"
|
2021-10-04 22:55:33 +02:00
|
|
|
schema_parts = [schema_prefix] + cls.schema_for_fields()
|
|
|
|
return " ".join(schema_parts)
|
|
|
|
|
2021-11-10 00:59:10 +01:00
|
|
|
async def update(self, **field_values):
|
2021-10-22 17:31:08 +02:00
|
|
|
validate_model_fields(self.__class__, field_values)
|
|
|
|
for field, value in field_values.items():
|
|
|
|
setattr(self, field, value)
|
2021-11-10 00:59:10 +01:00
|
|
|
await self.save()
|
2021-10-22 17:31:08 +02:00
|
|
|
|
2021-10-04 22:55:33 +02:00
|
|
|
@classmethod
|
|
|
|
def schema_for_fields(cls):
|
|
|
|
schema_parts = []
|
|
|
|
|
2021-09-16 02:41:45 +02:00
|
|
|
for name, field in cls.__fields__.items():
|
2021-10-12 23:22:57 +02:00
|
|
|
# TODO: Merge this code with schema_for_type()?
|
2021-09-16 02:41:45 +02:00
|
|
|
_type = field.outer_type_
|
2021-11-25 03:12:27 +01:00
|
|
|
is_subscripted_type = get_origin(_type)
|
|
|
|
|
2021-10-20 22:01:46 +02:00
|
|
|
if getattr(field.field_info, "primary_key", None):
|
2021-09-16 02:41:45 +02:00
|
|
|
if issubclass(_type, str):
|
2021-10-20 22:01:46 +02:00
|
|
|
redisearch_field = (
|
|
|
|
f"{name} TAG SEPARATOR {SINGLE_VALUE_TAG_FIELD_SEPARATOR}"
|
|
|
|
)
|
2021-09-16 02:41:45 +02:00
|
|
|
else:
|
2021-10-20 22:01:46 +02:00
|
|
|
redisearch_field = cls.schema_for_type(
|
|
|
|
name, _type, field.field_info
|
|
|
|
)
|
2021-09-16 02:41:45 +02:00
|
|
|
schema_parts.append(redisearch_field)
|
2021-10-20 22:01:46 +02:00
|
|
|
elif getattr(field.field_info, "index", None) is True:
|
2021-09-17 18:27:11 +02:00
|
|
|
schema_parts.append(cls.schema_for_type(name, _type, field.field_info))
|
2021-11-25 03:12:27 +01:00
|
|
|
elif is_subscripted_type:
|
|
|
|
# Ignore subscripted types (usually containers!) that we don't
|
|
|
|
# support, for the purposes of indexing.
|
|
|
|
if not is_supported_container_type(_type):
|
|
|
|
continue
|
|
|
|
|
2021-10-04 22:55:33 +02:00
|
|
|
embedded_cls = get_args(_type)
|
|
|
|
if not embedded_cls:
|
|
|
|
# TODO: Test if this can really happen.
|
|
|
|
log.warning("Model %s defined an empty list field: %s", cls, name)
|
|
|
|
continue
|
|
|
|
embedded_cls = embedded_cls[0]
|
2021-10-20 22:01:46 +02:00
|
|
|
schema_parts.append(
|
|
|
|
cls.schema_for_type(name, embedded_cls, field.field_info)
|
|
|
|
)
|
2021-10-04 22:55:33 +02:00
|
|
|
elif issubclass(_type, RedisModel):
|
|
|
|
schema_parts.append(cls.schema_for_type(name, _type, field.field_info))
|
|
|
|
return schema_parts
|
|
|
|
|
|
|
|
@classmethod
|
2021-10-06 01:40:02 +02:00
|
|
|
def schema_for_type(cls, name, typ: Any, field_info: PydanticFieldInfo):
|
2021-10-19 06:16:48 +02:00
|
|
|
# TODO: Import parent logic from JsonModel to deal with lists, so that
|
|
|
|
# a List[int] gets indexed as TAG instead of NUMERICAL.
|
|
|
|
# TODO: Raise error if user embeds a model field or list and makes it
|
|
|
|
# sortable. Instead, the embedded model should mark individual fields
|
|
|
|
# as sortable.
|
|
|
|
# TODO: Abstract string-building logic for each type (TAG, etc.) into
|
|
|
|
# classes that take a field name.
|
2021-10-20 22:01:46 +02:00
|
|
|
sortable = getattr(field_info, "sortable", False)
|
2021-10-19 06:16:48 +02:00
|
|
|
|
|
|
|
if is_supported_container_type(typ):
|
2021-10-04 22:55:33 +02:00
|
|
|
embedded_cls = get_args(typ)
|
|
|
|
if not embedded_cls:
|
|
|
|
# TODO: Test if this can really happen.
|
2021-10-20 22:01:46 +02:00
|
|
|
log.warning(
|
|
|
|
"Model %s defined an empty list or tuple field: %s", cls, name
|
|
|
|
)
|
2021-10-04 22:55:33 +02:00
|
|
|
return ""
|
|
|
|
embedded_cls = embedded_cls[0]
|
2021-10-19 06:16:48 +02:00
|
|
|
schema = cls.schema_for_type(name, embedded_cls, field_info)
|
2021-10-04 22:55:33 +02:00
|
|
|
elif any(issubclass(typ, t) for t in NUMERIC_TYPES):
|
2021-10-19 06:16:48 +02:00
|
|
|
schema = f"{name} NUMERIC"
|
2021-10-04 22:55:33 +02:00
|
|
|
elif issubclass(typ, str):
|
2021-10-20 22:01:46 +02:00
|
|
|
if getattr(field_info, "full_text_search", False) is True:
|
|
|
|
schema = (
|
|
|
|
f"{name} TAG SEPARATOR {SINGLE_VALUE_TAG_FIELD_SEPARATOR} "
|
2021-12-10 00:11:15 +01:00
|
|
|
f"{name} AS {name}_fts TEXT"
|
2021-10-20 22:01:46 +02:00
|
|
|
)
|
2021-10-04 22:55:33 +02:00
|
|
|
else:
|
2021-10-19 06:16:48 +02:00
|
|
|
schema = f"{name} TAG SEPARATOR {SINGLE_VALUE_TAG_FIELD_SEPARATOR}"
|
2021-10-04 22:55:33 +02:00
|
|
|
elif issubclass(typ, RedisModel):
|
|
|
|
sub_fields = []
|
|
|
|
for embedded_name, field in typ.__fields__.items():
|
2021-10-20 22:01:46 +02:00
|
|
|
sub_fields.append(
|
|
|
|
cls.schema_for_type(
|
|
|
|
f"{name}_{embedded_name}", field.outer_type_, field.field_info
|
|
|
|
)
|
|
|
|
)
|
2021-10-19 06:16:48 +02:00
|
|
|
schema = " ".join(sub_fields)
|
2021-10-04 22:55:33 +02:00
|
|
|
else:
|
2021-10-19 06:16:48 +02:00
|
|
|
schema = f"{name} TAG SEPARATOR {SINGLE_VALUE_TAG_FIELD_SEPARATOR}"
|
|
|
|
if schema and sortable is True:
|
|
|
|
schema += " SORTABLE"
|
|
|
|
return schema
|
2021-09-16 02:41:45 +02:00
|
|
|
|
|
|
|
|
|
|
|
class JsonModel(RedisModel, abc.ABC):
|
2021-10-19 06:16:48 +02:00
|
|
|
def __init_subclass__(cls, **kwargs):
|
2021-11-10 20:31:02 +01:00
|
|
|
# Generate the RediSearch schema once to validate fields.
|
|
|
|
cls.redisearch_schema()
|
|
|
|
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
if not has_redis_json(self.db()):
|
2021-11-03 20:53:00 +01:00
|
|
|
log.error(
|
|
|
|
"Your Redis instance does not have the RedisJson module "
|
|
|
|
"loaded. JsonModel depends on RedisJson."
|
|
|
|
)
|
2021-11-10 20:31:02 +01:00
|
|
|
super().__init__(*args, **kwargs)
|
2021-10-19 06:16:48 +02:00
|
|
|
|
2021-10-22 15:33:05 +02:00
|
|
|
async def save(self, pipeline: Optional[Pipeline] = None) -> "JsonModel":
|
2021-10-22 17:31:08 +02:00
|
|
|
self.check()
|
2021-10-19 06:16:48 +02:00
|
|
|
if pipeline is None:
|
|
|
|
db = self.db()
|
|
|
|
else:
|
|
|
|
db = pipeline
|
2021-10-22 15:33:05 +02:00
|
|
|
# TODO: Wrap response errors in a custom exception?
|
|
|
|
await db.execute_command("JSON.SET", self.key(), ".", self.json())
|
2021-10-19 06:16:48 +02:00
|
|
|
return self
|
|
|
|
|
2021-10-22 15:33:05 +02:00
|
|
|
async def update(self, **field_values):
|
2021-10-19 06:16:48 +02:00
|
|
|
validate_model_fields(self.__class__, field_values)
|
|
|
|
for field, value in field_values.items():
|
2021-11-10 00:59:10 +01:00
|
|
|
# Handle the simple update case first, e.g. city="Happy Valley"
|
|
|
|
if "__" not in field:
|
|
|
|
setattr(self, field, value)
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Handle the nested update field name case, e.g. address__city="Happy Valley"
|
|
|
|
obj = self
|
|
|
|
parts = field.split("__")
|
|
|
|
path_to_field = parts[:-1]
|
|
|
|
target_field = parts[-1]
|
|
|
|
|
|
|
|
# Get the final object in a nested update field name, e.g. for
|
|
|
|
# the string address__city, we want to get self.address.city
|
|
|
|
for sub_field in path_to_field:
|
|
|
|
obj = getattr(obj, sub_field)
|
|
|
|
|
|
|
|
# Set the target field (the last "part" of the nested update
|
|
|
|
# field name) to the target value.
|
|
|
|
setattr(obj, target_field, value)
|
2021-10-22 15:33:05 +02:00
|
|
|
await self.save()
|
2021-09-01 21:56:06 +02:00
|
|
|
|
|
|
|
@classmethod
|
2021-10-22 15:33:05 +02:00
|
|
|
async def get(cls, pk: Any) -> "JsonModel":
|
|
|
|
document = await cls.db().execute_command("JSON.GET", cls.make_primary_key(pk))
|
2021-09-01 21:56:06 +02:00
|
|
|
if not document:
|
|
|
|
raise NotFoundError
|
|
|
|
return cls.parse_raw(document)
|
2021-10-04 22:55:33 +02:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def redisearch_schema(cls):
|
|
|
|
key_prefix = cls.make_key(cls._meta.primary_key_pattern.format(pk=""))
|
|
|
|
schema_prefix = f"ON JSON PREFIX 1 {key_prefix} SCHEMA"
|
|
|
|
schema_parts = [schema_prefix] + cls.schema_for_fields()
|
|
|
|
return " ".join(schema_parts)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def schema_for_fields(cls):
|
|
|
|
schema_parts = []
|
|
|
|
json_path = "$"
|
|
|
|
|
|
|
|
for name, field in cls.__fields__.items():
|
|
|
|
_type = field.outer_type_
|
2021-10-20 22:01:46 +02:00
|
|
|
schema_parts.append(
|
|
|
|
cls.schema_for_type(json_path, name, "", _type, field.field_info)
|
|
|
|
)
|
2021-10-04 22:55:33 +02:00
|
|
|
return schema_parts
|
|
|
|
|
|
|
|
@classmethod
|
2021-10-20 22:01:46 +02:00
|
|
|
def schema_for_type(
|
|
|
|
cls,
|
|
|
|
json_path: str,
|
|
|
|
name: str,
|
|
|
|
name_prefix: str,
|
|
|
|
typ: Any,
|
|
|
|
field_info: PydanticFieldInfo,
|
|
|
|
parent_type: Optional[Any] = None,
|
|
|
|
) -> str:
|
|
|
|
should_index = getattr(field_info, "index", False)
|
2021-10-19 06:16:48 +02:00
|
|
|
is_container_type = is_supported_container_type(typ)
|
|
|
|
parent_is_container_type = is_supported_container_type(parent_type)
|
2021-10-20 22:01:46 +02:00
|
|
|
parent_is_model = False
|
|
|
|
|
|
|
|
if parent_type:
|
|
|
|
try:
|
|
|
|
parent_is_model = issubclass(parent_type, RedisModel)
|
|
|
|
except TypeError:
|
|
|
|
pass
|
2021-10-19 06:16:48 +02:00
|
|
|
|
|
|
|
# TODO: We need a better way to know that we're indexing a value
|
|
|
|
# discovered in a model within an array.
|
|
|
|
#
|
|
|
|
# E.g., say we have a field like `orders: List[Order]`, and we're
|
|
|
|
# indexing the "name" field from the Order model (because it's marked
|
|
|
|
# index=True in the Order model). The JSONPath for this field is
|
|
|
|
# $.orders[*].name, but the "parent" type at this point is Order, not
|
|
|
|
# List. For now, we'll discover that Orders are stored in a list by
|
|
|
|
# checking if the JSONPath contains the expression for all items in
|
|
|
|
# an array.
|
|
|
|
parent_is_model_in_container = parent_is_model and json_path.endswith("[*]")
|
|
|
|
|
2021-10-13 06:59:55 +02:00
|
|
|
try:
|
|
|
|
field_is_model = issubclass(typ, RedisModel)
|
|
|
|
except TypeError:
|
|
|
|
# Not a class, probably a type annotation
|
|
|
|
field_is_model = False
|
2021-10-04 22:55:33 +02:00
|
|
|
|
2021-10-13 06:59:55 +02:00
|
|
|
# When we encounter a list or model field, we need to descend
|
|
|
|
# into the values of the list or the fields of the model to
|
|
|
|
# find any values marked as indexed.
|
2021-10-19 06:16:48 +02:00
|
|
|
if is_container_type:
|
|
|
|
field_type = get_origin(typ)
|
2021-10-04 22:55:33 +02:00
|
|
|
embedded_cls = get_args(typ)
|
|
|
|
if not embedded_cls:
|
2021-10-20 22:01:46 +02:00
|
|
|
log.warning(
|
|
|
|
"Model %s defined an empty list or tuple field: %s", cls, name
|
|
|
|
)
|
2021-10-04 22:55:33 +02:00
|
|
|
return ""
|
|
|
|
embedded_cls = embedded_cls[0]
|
2021-10-20 22:01:46 +02:00
|
|
|
return cls.schema_for_type(
|
|
|
|
f"{json_path}.{name}[*]",
|
|
|
|
name,
|
|
|
|
name_prefix,
|
|
|
|
embedded_cls,
|
|
|
|
field_info,
|
|
|
|
parent_type=field_type,
|
|
|
|
)
|
2021-10-13 06:59:55 +02:00
|
|
|
elif field_is_model:
|
|
|
|
name_prefix = f"{name_prefix}_{name}" if name_prefix else name
|
2021-10-04 22:55:33 +02:00
|
|
|
sub_fields = []
|
|
|
|
for embedded_name, field in typ.__fields__.items():
|
2021-10-19 06:16:48 +02:00
|
|
|
if parent_is_container_type:
|
|
|
|
# We'll store this value either as a JavaScript array, so
|
|
|
|
# the correct JSONPath expression is to refer directly to
|
|
|
|
# attribute names after the container notation, e.g.
|
|
|
|
# orders[*].created_date.
|
2021-10-13 17:12:22 +02:00
|
|
|
path = json_path
|
2021-10-13 06:59:55 +02:00
|
|
|
else:
|
|
|
|
# All other fields should use dot notation with both the
|
|
|
|
# current field name and "embedded" field name, e.g.,
|
|
|
|
# order.address.street_line_1.
|
2021-10-13 17:12:22 +02:00
|
|
|
path = f"{json_path}.{name}"
|
2021-10-20 22:01:46 +02:00
|
|
|
sub_fields.append(
|
|
|
|
cls.schema_for_type(
|
|
|
|
path,
|
|
|
|
embedded_name,
|
|
|
|
name_prefix,
|
|
|
|
field.outer_type_,
|
|
|
|
field.field_info,
|
|
|
|
parent_type=typ,
|
|
|
|
)
|
|
|
|
)
|
2021-10-04 22:55:33 +02:00
|
|
|
return " ".join(filter(None, sub_fields))
|
2021-10-19 06:16:48 +02:00
|
|
|
# NOTE: This is the termination point for recursion. We've descended
|
|
|
|
# into models and lists until we found an actual value to index.
|
2021-10-04 22:55:33 +02:00
|
|
|
elif should_index:
|
2021-10-13 06:59:55 +02:00
|
|
|
index_field_name = f"{name_prefix}_{name}" if name_prefix else name
|
2021-10-19 06:16:48 +02:00
|
|
|
if parent_is_container_type:
|
|
|
|
# If we're indexing the this field as a JavaScript array, then
|
|
|
|
# the currently built-up JSONPath expression will be
|
|
|
|
# "field_name[*]", which is what we want to use.
|
|
|
|
path = json_path
|
|
|
|
else:
|
|
|
|
path = f"{json_path}.{name}"
|
2021-10-20 22:01:46 +02:00
|
|
|
sortable = getattr(field_info, "sortable", False)
|
|
|
|
full_text_search = getattr(field_info, "full_text_search", False)
|
|
|
|
sortable_tag_error = RedisModelError(
|
|
|
|
"In this Preview release, TAG fields cannot "
|
|
|
|
f"be marked as sortable. Problem field: {name}. "
|
|
|
|
"See docs: TODO"
|
|
|
|
)
|
2021-10-19 06:16:48 +02:00
|
|
|
|
|
|
|
# TODO: GEO field
|
|
|
|
if parent_is_container_type or parent_is_model_in_container:
|
|
|
|
if typ is not str:
|
2021-10-20 22:01:46 +02:00
|
|
|
raise RedisModelError(
|
|
|
|
"In this Preview release, list and tuple fields can only "
|
|
|
|
f"contain strings. Problem field: {name}. See docs: TODO"
|
|
|
|
)
|
2021-10-19 06:16:48 +02:00
|
|
|
if full_text_search is True:
|
2021-10-20 22:01:46 +02:00
|
|
|
raise RedisModelError(
|
|
|
|
"List and tuple fields cannot be indexed for full-text "
|
|
|
|
f"search. Problem field: {name}. See docs: TODO"
|
|
|
|
)
|
2021-10-19 06:16:48 +02:00
|
|
|
schema = f"{path} AS {index_field_name} TAG SEPARATOR {SINGLE_VALUE_TAG_FIELD_SEPARATOR}"
|
|
|
|
if sortable is True:
|
|
|
|
raise sortable_tag_error
|
|
|
|
elif any(issubclass(typ, t) for t in NUMERIC_TYPES):
|
|
|
|
schema = f"{path} AS {index_field_name} NUMERIC"
|
2021-10-04 22:55:33 +02:00
|
|
|
elif issubclass(typ, str):
|
2021-10-19 06:16:48 +02:00
|
|
|
if full_text_search is True:
|
2021-10-20 22:01:46 +02:00
|
|
|
schema = (
|
|
|
|
f"{path} AS {index_field_name} TAG SEPARATOR {SINGLE_VALUE_TAG_FIELD_SEPARATOR} "
|
|
|
|
f"{path} AS {index_field_name}_fts TEXT"
|
|
|
|
)
|
2021-10-19 06:16:48 +02:00
|
|
|
if sortable is True:
|
|
|
|
# NOTE: With the current preview release, making a field
|
|
|
|
# full-text searchable and sortable only makes the TEXT
|
|
|
|
# field sortable. This means that results for full-text
|
|
|
|
# search queries can be sorted, but not exact match
|
|
|
|
# queries.
|
|
|
|
schema += " SORTABLE"
|
2021-10-04 22:55:33 +02:00
|
|
|
else:
|
2021-10-19 06:16:48 +02:00
|
|
|
schema = f"{path} AS {index_field_name} TAG SEPARATOR {SINGLE_VALUE_TAG_FIELD_SEPARATOR}"
|
|
|
|
if sortable is True:
|
|
|
|
raise sortable_tag_error
|
2021-10-04 22:55:33 +02:00
|
|
|
else:
|
2021-10-19 06:16:48 +02:00
|
|
|
schema = f"{path} AS {index_field_name} TAG SEPARATOR {SINGLE_VALUE_TAG_FIELD_SEPARATOR}"
|
|
|
|
if sortable is True:
|
|
|
|
raise sortable_tag_error
|
|
|
|
return schema
|
2021-10-04 22:55:33 +02:00
|
|
|
return ""
|
2021-10-20 00:06:50 +02:00
|
|
|
|
|
|
|
|
|
|
|
class EmbeddedJsonModel(JsonModel, abc.ABC):
|
|
|
|
class Meta:
|
|
|
|
embedded = True
|