Final docs push

This commit is contained in:
Andrew Brookins 2021-11-24 18:12:27 -08:00
parent 321b356140
commit 269d44c26e
13 changed files with 442 additions and 48 deletions

View file

@ -7,6 +7,7 @@ from aredis_om.model.migrations.migrator import Migrator
@click.option("--module", default="aredis_om")
def migrate(module):
migrator = Migrator(module)
migrator.detect_migrations()
if migrator.migrations:
print("Pending migrations:")

View file

@ -68,6 +68,15 @@ def jsonable_encoder(
include = set(include)
if exclude is not None and not isinstance(exclude, (set, dict)):
exclude = set(exclude)
if custom_encoder:
if type(obj) in custom_encoder:
return custom_encoder[type(obj)](obj)
else:
for encoder_type, encoder in custom_encoder.items():
if isinstance(obj, encoder_type):
return encoder(obj)
if isinstance(obj, BaseModel):
encoder = getattr(obj.__config__, "json_encoders", {})
if custom_encoder:
@ -145,13 +154,9 @@ def jsonable_encoder(
)
return encoded_list
if custom_encoder:
if type(obj) in custom_encoder:
return custom_encoder[type(obj)](obj)
else:
for encoder_type, encoder in custom_encoder.items():
if isinstance(obj, encoder_type):
return encoder(obj)
# This function originally called custom encoders here,
# which meant we couldn't override the encoder for many
# types hard-coded into this function (lists, etc.).
if type(obj) in ENCODERS_BY_TYPE:
return ENCODERS_BY_TYPE[type(obj)](obj)

View file

@ -84,12 +84,11 @@ class IndexMigration:
class Migrator:
def __init__(self, redis: Redis, module=None):
def __init__(self, module=None):
self.module = module
self.migrations: List[IndexMigration] = []
self.redis = redis
async def run(self):
async def detect_migrations(self):
# Try to load any modules found under the given path or module name.
if self.module:
import_submodules(self.module)
@ -100,6 +99,7 @@ class Migrator:
for name, cls in model_registry.items():
hash_key = schema_hash_key(cls.Meta.index_name)
redis = cls.db()
try:
schema = cls.redisearch_schema()
except NotImplementedError:
@ -108,7 +108,7 @@ class Migrator:
current_hash = hashlib.sha1(schema.encode("utf-8")).hexdigest() # nosec
try:
await self.redis.execute_command("ft.info", cls.Meta.index_name)
await redis.execute_command("ft.info", cls.Meta.index_name)
except ResponseError:
self.migrations.append(
IndexMigration(
@ -117,12 +117,12 @@ class Migrator:
schema,
current_hash,
MigrationAction.CREATE,
self.redis,
redis,
)
)
continue
stored_hash = await self.redis.get(hash_key)
stored_hash = await redis.get(hash_key)
schema_out_of_date = current_hash != stored_hash
if schema_out_of_date:
@ -134,7 +134,7 @@ class Migrator:
schema,
current_hash,
MigrationAction.DROP,
self.redis,
redis,
stored_hash,
)
)
@ -145,12 +145,14 @@ class Migrator:
schema,
current_hash,
MigrationAction.CREATE,
self.redis,
redis,
stored_hash,
)
)
async def run(self):
# TODO: Migration history
# TODO: Dry run with output
await self.detect_migrations()
for migration in self.migrations:
await migration.run()

View file

@ -107,6 +107,7 @@ def embedded(cls):
def is_supported_container_type(typ: Optional[type]) -> bool:
# TODO: Wait, why don't we support indexing sets?
if typ == list or typ == tuple:
return True
unwrapped = get_origin(typ)
@ -479,8 +480,7 @@ class FindQuery:
if isinstance(value, str):
return escaper.escape(value)
if isinstance(value, bytes):
# TODO: We don't decode and then escape bytes objects passed as input.
# Should we?
# TODO: We don't decode bytes objects passed as input. Should we?
# TODO: TAG indexes fail on JSON arrays of numbers -- only strings
# are allowed -- what happens if we save an array of bytes?
return value
@ -966,7 +966,7 @@ class PrimaryKey:
field: ModelField
class BaseMeta(abc.ABC):
class BaseMeta(Protocol):
global_key_prefix: str
model_key_prefix: str
primary_key_pattern: str
@ -974,7 +974,6 @@ class BaseMeta(abc.ABC):
primary_key: PrimaryKey
primary_key_creator_cls: Type[PrimaryKeyCreator]
index_name: str
abstract: bool
embedded: bool
encoding: str
@ -994,7 +993,6 @@ class DefaultMeta:
primary_key: Optional[PrimaryKey] = None
primary_key_creator_cls: Optional[Type[PrimaryKeyCreator]] = None
index_name: Optional[str] = None
abstract: Optional[bool] = False
embedded: Optional[bool] = False
encoding: str = "utf-8"
@ -1269,17 +1267,23 @@ class HashModel(RedisModel, abc.ABC):
super().__init_subclass__(**kwargs)
for name, field in cls.__fields__.items():
origin = get_origin(field.outer_type_)
if origin:
for typ in (Set, Mapping, List):
if issubclass(origin, typ):
raise RedisModelError(
f"HashModels cannot index set, list,"
f" or mapping fields. Field: {name}"
)
if issubclass(field.outer_type_, RedisModel):
raise RedisModelError(
f"HashModels cannot have embedded model " f"fields. Field: {name}"
f"HashModels cannot index embedded model fields. Field: {name}"
)
elif dataclasses.is_dataclass(field.outer_type_):
raise RedisModelError(
f"HashModels cannot index dataclass fields. Field: {name}"
)
for typ in (Set, Mapping, List):
if issubclass(field.outer_type_, typ):
raise RedisModelError(
f"HashModels cannot have set, list,"
f" or mapping fields. Field: {name}"
)
async def save(self, pipeline: Optional[Pipeline] = None) -> "HashModel":
self.check()
@ -1360,6 +1364,8 @@ class HashModel(RedisModel, abc.ABC):
for name, field in cls.__fields__.items():
# TODO: Merge this code with schema_for_type()?
_type = field.outer_type_
is_subscripted_type = get_origin(_type)
if getattr(field.field_info, "primary_key", None):
if issubclass(_type, str):
redisearch_field = (
@ -1372,7 +1378,12 @@ class HashModel(RedisModel, abc.ABC):
schema_parts.append(redisearch_field)
elif getattr(field.field_info, "index", None) is True:
schema_parts.append(cls.schema_for_type(name, _type, field.field_info))
elif is_supported_container_type(_type):
elif is_subscripted_type:
# Ignore subscripted types (usually containers!) that we don't
# support, for the purposes of indexing.
if not is_supported_container_type(_type):
continue
embedded_cls = get_args(_type)
if not embedded_cls:
# TODO: Test if this can really happen.