Skip to content
Commits on Source (30)
[bumpversion]
current_version = 0.2.1
current_version = 0.3.0
commit = True
tag = True
......@@ -13,6 +13,16 @@ tag = True
[bumpversion:file:anji_orm/utils.py]
[bumpversion:file:anji_orm/syntax/__init__.py]
[bumpversion:file:anji_orm/syntax/indexes.py]
[bumpversion:file:anji_orm/syntax/parse.py]
[bumpversion:file:anji_orm/syntax/query.py]
[bumpversion:file:source/conf.py]
[bumpversion:file:setup.py]
search = version='{current_version}'
replace = version='{new_version}'
......
.venv
.vscode
__pycache__
test.py
check.py
dist/
*.egg-info
.idea
build
.mypy_cache
.ipynb_checkpoints
Test.ipynb
.hypothesis
.cache
\ No newline at end of file
image: "python:3.6"
before_script:
- pip install pipenv
- pipenv install --system --dev
- pip install .
- pip install -r requirements-dev.txt
stages:
- test
......@@ -12,9 +12,9 @@ check-code:
stage: test
script: make check
# run-test:
# stage: test
# script: pytest
run-test:
stage: test
script: pytest --hypothesis-show-statistics
upload-to-pypi:
stage: upload
......
......@@ -4,6 +4,18 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
## [0.3.0] - 2017-12-25
### Added
- More sugar to sugar god
- Wide query syntax support
### Changed
- Remove Pipfile
- Rename RethinkModelMetaclass to ModelMetaclass
## [0.2.1] - 2017-12-20
### Changed
......@@ -54,7 +66,8 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- Ability to skip database setup when load register
[Unreleased]: https://gitlab.com/AnjiProject/anji-core/compare/v0.2.1...HEAD
[Unreleased]: https://gitlab.com/AnjiProject/anji-core/compare/v0.3.0...HEAD
[0.3.0]: https://gitlab.com/AnjiProject/anji-core/compare/v0.2.1...v0.3.0
[0.2.1]: https://gitlab.com/AnjiProject/anji-core/compare/v0.2.0...v0.2.1
[0.2.0]: https://gitlab.com/AnjiProject/anji-core/compare/v0.1.7...v0.2.0
[0.1.7]: https://gitlab.com/AnjiProject/anji-core/compare/v0.1.6...v0.1.7
......
check:
pylint anji_orm
pycodestyle anji_orm
mypy anji_orm --ignore-missing-imports
# mypy anji_orm --ignore-missing-imports
docs:
sphinx-build -b html ./source ./build
test:
pytest --hypothesis-show-statistics
\ No newline at end of file
[[source]]
url = 'https://pypi.python.org/simple'
verify_ssl = true
name = 'pypi'
[requires]
python_version = '3.6'
[packages]
"anji-orm" = {path = ".", editable = true}
[dev-packages]
pylint = ">=1.7.4"
pylint-common = ">=0.2.5"
mypy = ">=0.550"
pycodestyle = ">=2.3.1"
twine = ">=1.9.1"
\ No newline at end of file
{
"_meta": {
"hash": {
"sha256": "9f2c6dc4c7b446f1024266eb7583c88f536e25c45aafdec23681c0bff62ecf34"
},
"host-environment-markers": {
"implementation_name": "cpython",
"implementation_version": "3.6.3",
"os_name": "posix",
"platform_machine": "x86_64",
"platform_python_implementation": "CPython",
"platform_release": "4.13.16-2-MANJARO",
"platform_system": "Linux",
"platform_version": "#1 SMP PREEMPT Fri Nov 24 12:49:10 UTC 2017",
"python_full_version": "3.6.3",
"python_version": "3.6",
"sys_platform": "linux"
},
"pipfile-spec": 6,
"requires": {
"python_version": "3.6"
},
"sources": [
{
"name": "pypi",
"url": "https://pypi.python.org/simple",
"verify_ssl": true
}
]
},
"default": {
"anji-orm": {
"editable": true,
"path": "."
},
"async-repool": {
"hashes": [
"sha256:bd9e8b4161066cb9b6bd5610ac44d27ec3d51ef6ac3027a7954605e3f19755b0"
],
"version": "==0.2.1"
},
"humanize": {
"hashes": [
"sha256:a43f57115831ac7c70de098e6ac46ac13be00d69abbf60bdcac251344785bb19"
],
"version": "==0.5.1"
},
"repool-forked": {
"hashes": [
"sha256:e805add9320911f6b591f418f476462b2c8deb9d963456338848175e298fe33a"
],
"version": "==0.3"
},
"rethinkdb": {
"hashes": [
"sha256:b5354ecd896b59065693e4139c067f401c9f57970268e9b93f83d869709d1c17"
],
"version": "==2.3.0.post6"
}
},
"develop": {
"astroid": {
"hashes": [
"sha256:badf6917ef7eb0ade0ea6eae347aed1e3f8f4c9375a02916f5cc450b3c8a64c0",
"sha256:71dadba2110008e2c03f9fde662ddd2053db3c0489d0e03c94e828a0399edd4f"
],
"version": "==1.6.0"
},
"certifi": {
"hashes": [
"sha256:244be0d93b71e93fc0a0a479862051414d0e00e16435707e5bf5000f92e04694",
"sha256:5ec74291ca1136b40f0379e1128ff80e866597e4e2c1e755739a913bbc3613c0"
],
"version": "==2017.11.5"
},
"chardet": {
"hashes": [
"sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691",
"sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae"
],
"version": "==3.0.4"
},
"idna": {
"hashes": [
"sha256:8c7309c718f94b3a625cb648ace320157ad16ff131ae0af362c9f21b80ef6ec4",
"sha256:2c6a5de3089009e3da7c5dde64a141dbc8551d5b7f6cf4ed7c2568d0cc520a8f"
],
"version": "==2.6"
},
"isort": {
"hashes": [
"sha256:cd5d3fc2c16006b567a17193edf4ed9830d9454cbeb5a42ac80b36ea00c23db4",
"sha256:79f46172d3a4e2e53e7016e663cc7a8b538bec525c36675fcfd2767df30b3983"
],
"version": "==4.2.15"
},
"lazy-object-proxy": {
"hashes": [
"sha256:209615b0fe4624d79e50220ce3310ca1a9445fd8e6d3572a896e7f9146bbf019",
"sha256:1b668120716eb7ee21d8a38815e5eb3bb8211117d9a90b0f8e21722c0758cc39",
"sha256:cb924aa3e4a3fb644d0c463cad5bc2572649a6a3f68a7f8e4fbe44aaa6d77e4c",
"sha256:2c1b21b44ac9beb0fc848d3993924147ba45c4ebc24be19825e57aabbe74a99e",
"sha256:320ffd3de9699d3892048baee45ebfbbf9388a7d65d832d7e580243ade426d2b",
"sha256:2df72ab12046a3496a92476020a1a0abf78b2a7db9ff4dc2036b8dd980203ae6",
"sha256:27ea6fd1c02dcc78172a82fc37fcc0992a94e4cecf53cb6d73f11749825bd98b",
"sha256:e5b9e8f6bda48460b7b143c3821b21b452cb3a835e6bbd5dd33aa0c8d3f5137d",
"sha256:7661d401d60d8bf15bb5da39e4dd72f5d764c5aff5a86ef52a042506e3e970ff",
"sha256:61a6cf00dcb1a7f0c773ed4acc509cb636af2d6337a08f362413c76b2b47a8dd",
"sha256:bd6292f565ca46dee4e737ebcc20742e3b5be2b01556dafe169f6c65d088875f",
"sha256:933947e8b4fbe617a51528b09851685138b49d511af0b6c0da2539115d6d4514",
"sha256:d0fc7a286feac9077ec52a927fc9fe8fe2fabab95426722be4c953c9a8bede92",
"sha256:7f3a2d740291f7f2c111d86a1c4851b70fb000a6c8883a59660d95ad57b9df35",
"sha256:5276db7ff62bb7b52f77f1f51ed58850e315154249aceb42e7f4c611f0f847ff",
"sha256:94223d7f060301b3a8c09c9b3bc3294b56b2188e7d8179c762a1cda72c979252",
"sha256:6ae6c4cb59f199d8827c5a07546b2ab7e85d262acaccaacd49b62f53f7c456f7",
"sha256:f460d1ceb0e4a5dcb2a652db0904224f367c9b3c1470d5a7683c0480e582468b",
"sha256:e81ebf6c5ee9684be8f2c87563880f93eedd56dd2b6146d8a725b50b7e5adb0f",
"sha256:81304b7d8e9c824d058087dcb89144842c8e0dea6d281c031f59f0acf66963d4",
"sha256:ddc34786490a6e4ec0a855d401034cbd1242ef186c20d79d2166d6a4bd449577",
"sha256:7bd527f36a605c914efca5d3d014170b2cb184723e423d26b1fb2fd9108e264d",
"sha256:ab3ca49afcb47058393b0122428358d2fbe0408cf99f1b58b295cfeb4ed39109",
"sha256:7cb54db3535c8686ea12e9535eb087d32421184eacc6939ef15ef50f83a5e7e2",
"sha256:0ce34342b419bd8f018e6666bfef729aec3edf62345a53b537a4dcc115746a33",
"sha256:e34b155e36fa9da7e1b7c738ed7767fc9491a62ec6af70fe9da4a057759edc2d",
"sha256:50e3b9a464d5d08cc5227413db0d1c4707b6172e4d4d915c1c70e4de0bbff1f5",
"sha256:27bf62cb2b1a2068d443ff7097ee33393f8483b570b475db8ebf7e1cba64f088",
"sha256:eb91be369f945f10d3a49f5f9be8b3d0b93a4c2be8f8a5b83b0571b8123e0a7a"
],
"version": "==1.3.1"
},
"mccabe": {
"hashes": [
"sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42",
"sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"
],
"version": "==0.6.1"
},
"mypy": {
"hashes": [
"sha256:aa668809ae0dbec5e9feb8929f4b5e1f9318a0a397447fa2f38c382a2ed6a036",
"sha256:bd0c9a2fcf0c4f7a54a2b625f466fcc000d415f371298d96fa5d2acc69074aca"
],
"version": "==0.560"
},
"pkginfo": {
"hashes": [
"sha256:31a49103180ae1518b65d3f4ce09c784e2bc54e338197668b4fb7dc539521024",
"sha256:bb1a6aeabfc898f5df124e7e00303a5b3ec9a489535f346bfbddb081af93f89e"
],
"version": "==1.4.1"
},
"psutil": {
"hashes": [
"sha256:2fbbc7dce43c5240b9dc6d56302d57412f1c5a0d665d1f04eb05a6b7279f4e9b",
"sha256:259ec8578d19643179eb2377348c63b650b51ba40f58f2620a3d9732b8a0b557",
"sha256:d3808be8241433db17fa955566c3b8be61dac8ba8f221dcbb202a9daba918db5",
"sha256:449747f638c221f8ce6ca3548aefef13339aa05b453cc1f233f4d6c31c206198",
"sha256:f6c2d54abd59ed8691882de7fd6b248f5808a567885f20f50b3b4b9eedaebb1f",
"sha256:e3d00d8fc3d4217f05d07af45390f072c04cb7c7dddd70b86b728e5fbe485c81",
"sha256:3473d6abad9d6ec7b8a97f4dc55f0b3483ecf470d85f08f5e23c1c07592b914f",
"sha256:7dc6c3bbb5d28487f791f195d6abfdef295d34c44ce6cb5f2d178613fb3338ab",
"sha256:00a1f9ff8d1e035fba7bfdd6977fa8ea7937afdb4477339e5df3dba78194fe11"
],
"version": "==5.4.2"
},
"pycodestyle": {
"hashes": [
"sha256:6c4245ade1edfad79c3446fadfc96b0de2759662dc29d07d80a6f27ad1ca6ba9",
"sha256:682256a5b318149ca0d2a9185d365d8864a768a28db66a84a2ea946bcc426766"
],
"version": "==2.3.1"
},
"pylint": {
"hashes": [
"sha256:c8e59da0f2f9990eb00aad1c1de16cd7809315842ebccc3f65ca9df46213df3b",
"sha256:3035e44e37cd09919e9edad5573af01d7c6b9c52a0ebb4781185ae7ab690458b"
],
"version": "==1.8.1"
},
"pylint-common": {
"hashes": [
"sha256:3276b9e4db16f41cee656c78c74cfef3da383e8301e5b3b91146586ae5b53659"
],
"version": "==0.2.5"
},
"pylint-plugin-utils": {
"hashes": [
"sha256:053ade7c76f83242225b49d47624d9ecb803c60347e2c5127e97a19bf0c9f95e"
],
"version": "==0.2.6"
},
"requests": {
"hashes": [
"sha256:6a1b267aa90cac58ac3a765d067950e7dbbf75b1da07e895d1f594193a40a38b",
"sha256:9c443e7324ba5b85070c4a818ade28bfabedf16ea10206da1132edaa6dda237e"
],
"version": "==2.18.4"
},
"requests-toolbelt": {
"hashes": [
"sha256:42c9c170abc2cacb78b8ab23ac957945c7716249206f90874651971a4acff237",
"sha256:f6a531936c6fa4c6cfce1b9c10d5c4f498d16528d2a54a22ca00011205a187b5"
],
"version": "==0.8.0"
},
"six": {
"hashes": [
"sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb",
"sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9"
],
"version": "==1.11.0"
},
"tqdm": {
"hashes": [
"sha256:4c041f8019f7be65b8028ddde9a836f7ccc51c4637f1ff2ba9b5813d38d19d5a",
"sha256:df32e6f127dc0ccbc675eadb33f749abbcb8f174c5cb9ec49c0cdb73aa737377"
],
"version": "==4.19.5"
},
"twine": {
"hashes": [
"sha256:d3ce5c480c22ccfb761cd358526e862b32546d2fe4bc93d46b5cf04ea3cc46ca",
"sha256:caa45b7987fc96321258cd7668e3be2ff34064f5c66d2d975b641adca659c1ab"
],
"version": "==1.9.1"
},
"typed-ast": {
"hashes": [
"sha256:0948004fa228ae071054f5208840a1e88747a357ec1101c17217bfe99b299d58",
"sha256:25d8feefe27eb0303b73545416b13d108c6067b846b543738a25ff304824ed9a",
"sha256:c05b41bc1deade9f90ddc5d988fe506208019ebba9f2578c622516fd201f5863",
"sha256:519425deca5c2b2bdac49f77b2c5625781abbaf9a809d727d3a5596b30bb4ded",
"sha256:6de012d2b166fe7a4cdf505eee3aaa12192f7ba365beeefaca4ec10e31241a85",
"sha256:79b91ebe5a28d349b6d0d323023350133e927b4de5b651a8aa2db69c761420c6",
"sha256:a8034021801bc0440f2e027c354b4eafd95891b573e12ff0418dec385c76785c",
"sha256:f19f2a4f547505fe9072e15f6f4ae714af51b5a681a97f187971f50c283193b6",
"sha256:c9b060bd1e5a26ab6e8267fd46fc9e02b54eb15fffb16d112d4c7b1c12987559",
"sha256:2e214b72168ea0275efd6c884b114ab42e316de3ffa125b267e732ed2abda892",
"sha256:bc978ac17468fe868ee589c795d06777f75496b1ed576d308002c8a5756fb9ea",
"sha256:edb04bdd45bfd76c8292c4d9654568efaedf76fe78eb246dde69bdb13b2dad87",
"sha256:668d0cec391d9aed1c6a388b0d5b97cd22e6073eaa5fbaa6d2946603b4871efe",
"sha256:29464a177d56e4e055b5f7b629935af7f49c196be47528cc94e0a7bf83fbc2b9",
"sha256:8550177fa5d4c1f09b5e5f524411c44633c80ec69b24e0e98906dd761941ca46",
"sha256:3e0d5e48e3a23e9a4d1a9f698e32a542a4a288c871d33ed8df1b092a40f3a0f9",
"sha256:68ba70684990f59497680ff90d18e756a47bf4863c604098f10de9716b2c0bdd",
"sha256:57fe287f0cdd9ceaf69e7b71a2e94a24b5d268b35df251a88fef5cc241bf73aa"
],
"version": "==1.1.0"
},
"urllib3": {
"hashes": [
"sha256:06330f386d6e4b195fbfc736b297f58c5a892e4440e54d294d7004e3a9bbea1b",
"sha256:cc44da8e1145637334317feebd728bd869a35285b93cbb4cca2577da7e62db4f"
],
"version": "==1.22"
},
"wrapt": {
"hashes": [
"sha256:d4d560d479f2c21e1b5443bbd15fe7ec4b37fe7e53d335d3b9b0a7b1226fe3c6"
],
"version": "==1.10.11"
}
}
}
from .core import *
from .utils import *
from .fields import *
from .syntax import *
from .model import *
__author__ = "Bogdan Gladyshev"
__copyright__ = "Copyright 2017, Bogdan Gladyshev"
__credits__ = ["Bogdan Gladyshev"]
__license__ = "MIT"
__version__ = "0.2.1"
__version__ = "0.3.0"
__maintainer__ = "Bogdan Gladyshev"
__email__ = "siredvin.dark@gmail.com"
__status__ = "Production"
......@@ -12,7 +12,7 @@ __author__ = "Bogdan Gladyshev"
__copyright__ = "Copyright 2017, Bogdan Gladyshev"
__credits__ = ["Bogdan Gladyshev"]
__license__ = "MIT"
__version__ = "0.2.1"
__version__ = "0.3.0"
__maintainer__ = "Bogdan Gladyshev"
__email__ = "siredvin.dark@gmail.com"
__status__ = "Production"
......@@ -184,6 +184,11 @@ class AsyncRethinkDBRegisterStrategy(AbstractRethinkDBRegisterStrategy):
class RethinkDBRegister(object):
"""
Register object that store any information about models, tables.
Store and control pool and wrap logic.
"""
def __init__(self, ) -> None:
super().__init__()
self.tables: List[str] = []
......@@ -210,9 +215,25 @@ class RethinkDBRegister(object):
self.pool = self.strategy.pool
def set_wrap_decorator(self, wrap_decorator: Callable) -> None:
"""
Just set wrapper for wrapping logic. Wrapper should be argparse-compatable.
:param wrap_decorator: argparse-compatable function wrapper
"""
self.wrap_decorator = wrap_decorator
def wrap(self, function: Callable, parameter_name: str, **kwargs: Any) -> Callable:
"""
Control point to wrap function with argparse-compatable dict.
Before usage :any:`set_wrap_decorator` should be called.
See also :any:`wrap_function_with_parameter`.
:param function: Function to wrap
:param paramater_name: argparse parameter name
:param kwargs: argpase function keyword args
:return: wrapped function
"""
if self.wrap_decorator is None:
raise Exception("Wrap decorator not configurated")
return self.wrap_decorator(parameter_name, **kwargs)(function)
......
......@@ -6,15 +6,16 @@ import logging
import rethinkdb as R
import humanize
from lazy import lazy
from .utils import query
from .core import register
from .syntax.query import QueryRow
__author__ = "Bogdan Gladyshev"
__copyright__ = "Copyright 2017, Bogdan Gladyshev"
__credits__ = ["Bogdan Gladyshev"]
__license__ = "MIT"
__version__ = "0.2.1"
__version__ = "0.3.0"
__maintainer__ = "Bogdan Gladyshev"
__email__ = "siredvin.dark@gmail.com"
__status__ = "Production"
......@@ -31,21 +32,38 @@ LIST_FIELD_SEPARATOR = '|'
class AbstractField(object): # pylint:disable=too-many-instance-attributes
_anji_orm_field = True
"""
Abstract ORM field class. Used to describe base logic and provide unified type check
"""
def __init__(self, param_type, default=None, **kwargs) -> None:
_anji_orm_field: bool = True
def __init__(self, param_type: Type, default: Any = None, **kwargs) -> None:
"""
:param description: Field description, mostly used for automatic generated commands. Default value is empty string.
Init function for ORM fields. Provide parameter checks with assertion
:param param_type: Type class, that will be used for type checking
:param default: Field default value, should be strict value or callable function. Default value is None.
:param description: Field description, mostly used for automatic generated commands. Default value is empty string.
:type description: str
:param optional: If true, this field is optional. Default value is False.
:type optional: bool
:param reconfigurable: If true, this field can be changed via configure commands. Default value is False.
:type reconfigurable: bool
:param definer: If true, this field should be unique per record. Default value is False.
:type definer: bool
:param service: If true, this field used only in internal bot logic. Default value is False.
:type service: bool
:param field_marks: Additional field marks, to use in internal logic. Default value is None.
:type field_marks: List[str]
:param secondary_index: If true, ORM will build secondary_index on this field. Default value is False.
:type secondary_index: bool
:param displayed: If true, this field will be displayed on chat report. Default value is True.
:type displayed: bool
:param compute_function: Make field computable and use this function to calculate field value. Default value is False
:type compute_function: Callable
:param cacheable: If false, field value will be recomputed every time on access. Default value is True.
:type cacheable: bool
"""
# Setup fields
self.param_type: Type = param_type
......@@ -75,6 +93,15 @@ class AbstractField(object): # pylint:disable=too-many-instance-attributes
def wrap_function_with_parameter(
self, func: Callable,
required: bool = True, use_default: bool = True) -> Callable:
"""
Advanced function wrapper that transform field to argparse dict and call :py:func:`~anji_orm.core.RethinkDBRegister.wrap`
to wrap given function
:param func: function to wrap
:param required: make parameter required for argparse logic
:param use_default: add default value to argparse dict
:return: wrapped function
"""
kwargs = dict(
type=self.param_type,
help=self.description
......@@ -92,6 +119,10 @@ class AbstractField(object): # pylint:disable=too-many-instance-attributes
**kwargs
)
@lazy
def _query_row(self) -> QueryRow:
return QueryRow(self.name)
def update_keys(self) -> Tuple:
return (self.name,)
......@@ -99,6 +130,10 @@ class AbstractField(object): # pylint:disable=too-many-instance-attributes
setattr(instance, self.name, value)
def format(self, value) -> str: # pylint: disable=no-self-use
"""
Prettify formation function, that used to disable this variable in :py:func:`~anji_orm.model.Model.to_describe_dict` function.
Also can be used for formatting itself
"""
return str(value)
def _compute_value(self, instance):
......@@ -117,6 +152,11 @@ class AbstractField(object): # pylint:disable=too-many-instance-attributes
def __get__(self, instance, instance_type):
if instance is None:
# when name not None
# that mean that field for already processed
# and will be used only in comparation
if self.name is not None:
return self._query_row
return self
if self.compute_function:
return self._compute_get_logic(instance)
......@@ -179,8 +219,8 @@ class SelectionField(AbstractField):
class EnumField(AbstractField):
def __init__(self, enum_class: Union[Type[Enum], Iterable], default=None, **kwargs) -> None:
self.variants: List[Enum] = list(enum_class)
def __init__(self, enum_class: Type[Union[Enum, Iterable]], default=None, **kwargs) -> None:
self.variants: List[Enum] = list(enum_class) # type: ignore
assert self.variants, f"You must define some child in Enum class {enum_class}"
if default is None:
default = self.variants[0]
......@@ -209,7 +249,7 @@ class FloatField(AbstractField):
)
self.decimal_format = decimal_format
def format(self, value):
def format(self, value) -> str:
return self.decimal_format.format(value)
def __set__(self, instance, value):
......@@ -339,14 +379,17 @@ class LinkField(AbstractField):
def __get__(self, instance, instance_type):
if instance is None:
# when name not None
# that mean that field for already processed
# and will be used only in comparation
if self.name is not None:
return self._query_row
return self
result = instance._values.get(self.name, None)
if result is None:
result_key = instance._values.get(self.key_for_uuid_storing)
if result_key is not None:
result = query(
self._table.get(result_key)
)
result = self._model_class.get(result_key)
instance._values[self.name] = result
return result
......
from typing import Dict, Any, Type, List, Callable
from typing import Dict, Any, Type, List, overload, Optional, AsyncIterable
from datetime import datetime
from abc import ABCMeta
import logging
from itertools import product
from enum import Enum
from importlib import import_module
import rethinkdb as R
from .core import register
from .fields import DatetimeField, AbstractField
from .utils import query, async_query
from .utils import prettify_value
from .syntax import AbstractIndexPolicy, GreedyIndexPolicy, RethinkDBQueryParser, QueryStatement, QueryStatementsCollection
__author__ = "Bogdan Gladyshev"
__copyright__ = "Copyright 2017, Bogdan Gladyshev"
__credits__ = ["Bogdan Gladyshev"]
__license__ = "MIT"
__version__ = "0.2.1"
__version__ = "0.3.0"
__maintainer__ = "Bogdan Gladyshev"
__email__ = "siredvin.dark@gmail.com"
__status__ = "Production"
__all__ = ['SharedEnv', 'Model', 'RethinkModelMetaclass']
__all__ = ['SharedEnv', 'Model', 'ModelMetaclass', 'ModifyDisallowException', 'fetch', 'fetch_cursor']
MODEL_FIELDS_CONTROL = {
'_aggregate_dict': ['_fields', '_field_marks'],
......@@ -34,11 +34,33 @@ _log = logging.getLogger(__name__)
class ModifyDisallowException(Exception):
"""
Exception that raises when you try change `Model` class field that blocked for changes
"""
pass
class RethinkModelMetaclass(ABCMeta):
def fetch(rethink_dict: Dict[str, Any]) -> Optional['Model']:
class_module = import_module(rethink_dict['__python_info']['module_name'])
class_object = getattr(class_module, rethink_dict['__python_info']['class_name'], None)
if class_object is None:
_log.warning('Task %s cannot be parsed, because class wasnt found!', rethink_dict['id'])
return None
obj = class_object(id_=rethink_dict['id'])
obj.load(rethink_dict)
return obj
async def fetch_cursor(cursor) -> AsyncIterable[Dict[str, Any]]:
"""
Additonal method that wraps asyncio rethinkDB cursos to AsyncIterable.
Just util method to allow async for usage
"""
while await cursor.fetch_next():
yield await cursor.next()
class ModelMetaclass(ABCMeta):
@classmethod
def _aggregate_sets(mcs, bases, namespace, field):
......@@ -125,74 +147,6 @@ class RethinkModelMetaclass(ABCMeta):
return result
class BaseQueryStrategy:
@staticmethod
def generate_list_filter(values, field_name) -> Callable:
rethinkdb_expr = R.expr(values)
return lambda doc: rethinkdb_expr.contains(doc[field_name])
@staticmethod
def single_secondary_index_query(search_query: R.RqlQuery, model_class: Type['Model'], index_name: str, search_data: Any) -> R.RqlQuery:
use_unpack = False
if isinstance(search_data, BASE_COLLECTION_TYPE):
if model_class._fields.get(index_name):
use_unpack = model_class._fields.get(index_name).param_type != list
else:
use_unpack = isinstance(search_data[0], BASE_COLLECTION_TYPE)
if use_unpack:
search_query = search_query.get_all(*search_data, index=index_name)
else:
search_query = search_query.get_all(search_data, index=index_name)
return search_query
@staticmethod
def secondary_indexes_query(search_query: R.RqlQuery, model_class: Type['Model'], group_data: Dict, secondary_indexes: List[str]) -> R.RqlQuery:
if len(secondary_indexes) == 1:
return BaseQueryStrategy.single_secondary_index_query(
search_query, model_class, secondary_indexes[0], group_data.get(secondary_indexes[0])
)
secondary_indexes = sorted(secondary_indexes)
index_data = [group_data.get(x) for x in secondary_indexes]
if any(filter(lambda x: isinstance(x, list), index_data)):
index_data = list(product(*(x if isinstance(x, list) else [x] for x in index_data)))
return BaseQueryStrategy.single_secondary_index_query(
search_query, model_class, ":".join(secondary_indexes), index_data
)
@staticmethod
def build_query(
model_class: Type['Model'], group_data: Dict,
limit: int = None, order_by: str = None, descending: bool = False) -> R.RqlQuery:
group_data = Model.prettify_value(group_data)
search_query = R.table(model_class._table)
secondary_indexes = []
simple_fields = []
for group_name in group_data.keys():
if group_name in model_class._fields and model_class._fields.get(group_name).secondary_index:
secondary_indexes.append(group_name)
else:
simple_fields.append(group_name)
if secondary_indexes:
search_query = BaseQueryStrategy.secondary_indexes_query(
search_query, model_class,
group_data, secondary_indexes
)
for simple_field in simple_fields:
group_value = group_data.get(simple_field)
if isinstance(group_value, list) and model_class._fields.get(simple_field).param_type != list:
search_query = search_query.filter(BaseQueryStrategy.generate_list_filter(group_value, simple_field))
else:
search_query = search_query.filter(R.row[simple_field] == group_value)
if order_by:
if descending:
order_by = R.desc(order_by)
search_query = search_query.order_by(order_by)
if limit:
search_query = search_query.limit(limit)
return search_query
class SharedEnv:
def __init__(self):
......@@ -207,7 +161,7 @@ class SharedEnv:
raise AttributeError
class Model(object, metaclass=RethinkModelMetaclass):
class Model(object, metaclass=ModelMetaclass):
"""
Base class with core logic for rethinkdb usage.
For usage you must define _table and _fields section.
......@@ -218,7 +172,7 @@ class Model(object, metaclass=RethinkModelMetaclass):
_fields: Dict[str, AbstractField] = {}
_field_marks: Dict[str, AbstractField] = {}
_primary_keys: List[str] = []
_query_strategy = BaseQueryStrategy
_index_policy: Type[AbstractIndexPolicy] = GreedyIndexPolicy
shared: SharedEnv = SharedEnv()
......@@ -252,7 +206,7 @@ class Model(object, metaclass=RethinkModelMetaclass):
result = getattr(self, field, None)
if isinstance(result, Model):
result = result.id
return Model.prettify_value(result)
return prettify_value(result)
def _build_python_info(self) -> Dict[str, str]:
return {
......@@ -260,6 +214,14 @@ class Model(object, metaclass=RethinkModelMetaclass):
'class_name': self.__class__.__name__
}
@classmethod
def get_index_policy(cls) -> Type[AbstractIndexPolicy]:
"""
Return index policy, that will be used to interact with indexes.
See :py:mod:`~anji_orm.syntax.indexes` for mode detailes
"""
return cls._index_policy
def to_dict(self) -> Dict[str, Any]:
"""
Utility method to generate dict from object.
......@@ -327,16 +289,6 @@ class Model(object, metaclass=RethinkModelMetaclass):
if key in self._fields:
setattr(self, key, value)
@staticmethod
def prettify_value(value) -> Any:
if isinstance(value, Enum):
return value.name
if isinstance(value, list):
return [Model.prettify_value(x) for x in value]
if isinstance(value, dict):
return {k: Model.prettify_value(v) for k, v in value.items()}
return value
def delete(self) -> None:
"""
Method, that delete record from base table.
......@@ -351,35 +303,77 @@ class Model(object, metaclass=RethinkModelMetaclass):
await self.table.get(self.id).delete().run(conn)
@classmethod
def get_query(cls, id_) -> R.RqlQuery:
return R.table(cls._table).get(id_)
def get(cls, id_) -> R.RqlQuery:
return cls.execute(R.table(cls._table).get(id_))
@classmethod
async def async_get(cls, id_) -> R.RqlQuery:
return await cls.async_execute(R.table(cls._table).get(id_))
def build_similarity_dict(self) -> Dict[str, Any]:
group_data = dict(__python_info=self._build_python_info())
for primary_key_part in self._primary_keys:
group_data[primary_key_part] = getattr(self, primary_key_part)
return group_data
def build_similarity_query(self) -> QueryStatementsCollection:
raise NotImplementedError("Hehehe, sad")
def find_similar(self) -> List['Model']:
return query(self.build_query(self.build_similarity_dict()))
return self.execute(self.build_similarity_query())
async def async_find_similary(self) -> List['Model']:
return await async_query(self.build_query(self.build_similarity_dict()))
return await self.async_execute(self.build_similarity_query())
@classmethod
def build_query(
cls, group_data: Dict,
limit: int = None, order_by: str = None, descending: bool = False) -> R.RqlQuery:
return cls._query_strategy.build_query(
cls, group_data, limit=limit,
order_by=order_by, descending=descending
)
def execute(cls, db_query: R.RqlQuery):
with register.pool.connect() as conn:
result = db_query.run(conn)
if isinstance(result, R.net.DefaultCursor):
return list(filter(lambda x: x is not None, (fetch(obj_data) for obj_data in result)))
elif isinstance(result, dict):
return fetch(result)
elif isinstance(result, list):
return list(filter(lambda x: x is not None, (fetch(obj_data) for obj_data in result)))
elif not result:
return result
raise Exception("Unkown query result type!")
@classmethod
async def async_execute(cls, db_query: R.RqlQuery):
async with register.pool.connect() as conn: # pylint: disable=not-async-context-manager
result = await db_query.run(conn)
if result.__class__.__name__ == 'AsyncioCursor':
synced_list = [fetch(obj_data) async for obj_data in fetch_cursor(result)]
return list(filter(lambda x: x is not None, synced_list))
elif isinstance(result, dict):
return fetch(result)
elif isinstance(result, list):
return list(filter(lambda x: x is not None, (fetch(obj_data) for obj_data in result)))
elif not result:
return result
raise Exception("Unkown query result type!")
@overload
@classmethod
def query(cls, query: QueryStatement):
pass
@overload
@classmethod
def query(cls, query: QueryStatementsCollection): # pylint: disable=function-redefined
pass
@classmethod
def query(cls, query): # pylint: disable=function-redefined
builded_query = RethinkDBQueryParser.build_query(cls, query)
return cls.execute(builded_query)
@classmethod
def unique_groups_query(cls) -> R.RqlQuery:
return R.table(cls._table).pluck('__python_info', *cls._primary_keys).distinct()
def to_describe_dict(self, definer_skip=False) -> Dict[str, str]:
def to_describe_dict(self, definer_skip: bool = False) -> Dict[str, str]:
"""
Convert record to dict with pair "Pretty field name" "Pretty field value".
By default only field with `displayed` option will be in dict.
:param definer_skip: Additional to not displayed skip definer fields
"""
fields = {}
for field_name, field_item in self._fields.items():
if field_item.displayed and not (definer_skip and field_item.definer) and getattr(self, field_name) is not None:
......
from .query import *
from .indexes import *
from .parse import *
__author__ = "Bogdan Gladyshev"
__copyright__ = "Copyright 2017, Bogdan Gladyshev"
__credits__ = ["Bogdan Gladyshev"]
__license__ = "MIT"
__version__ = "0.3.0"
__maintainer__ = "Bogdan Gladyshev"
__email__ = "siredvin.dark@gmail.com"
__status__ = "Production"
__all__ = [
'QueryRow', 'QueryStatement', 'StatementType', 'QueryStatementsCollection',
'EmptyQueryStatement',
'AbstractIndexPolicy', 'GreedyIndexPolicy', 'SingleIndexPolicy',
'RethinkDBQueryParser', 'QueryBuildException'
]
import abc
from itertools import combinations
from typing import List, Tuple, Sequence
import rethinkdb as R
__author__ = "Bogdan Gladyshev"
__copyright__ = "Copyright 2017, Bogdan Gladyshev"
__credits__ = ["Bogdan Gladyshev"]
__license__ = "MIT"
__version__ = "0.3.0"
__maintainer__ = "Bogdan Gladyshev"
__email__ = "siredvin.dark@gmail.com"
__status__ = "Production"
__all__ = ['AbstractIndexPolicy', 'GreedyIndexPolicy', 'SingleIndexPolicy']
class AbstractIndexPolicy(abc.ABC):
"""
Abstract class for policies, that can be used to work with indexes.
Policy works like strategy pattern.
"""
@classmethod
@abc.abstractmethod
def build_secondary_index_list(cls, secondary_indexes_fields: List[str]) -> List[str]:
"""
Define a way how to build secondary indexes based on fields that was marked
as secondary index
:param secondary_indexes_fields: List of fields that was marked as secondary indexes
:return: secondary index list
"""
@classmethod
def index_creation_query(cls, secondary_index_name: str, table_name: str) -> R.RqlQuery:
"""
Define a way how to build secondary indexes based on name.
Basically, two simple rules:
1. When name without ':' just create index by field that named same as :any:`secondary_index_name`
2. If name has ':', split on it and create complex index.
So, for index 'catman' create RethinkDB index on field 'catman'.
And for index 'catman:mercuria', create compound index on fields 'catman' and 'mercuria'.
.. seealso:: RethinkDB `documentation <https://www.rethinkdb.com/docs/secondary-indexes/python/>` about indexes
:param secondary_index_name: secondary index name
:param table_name: name of target table
:return: Query to build this index in RethinkDB
"""
if ':' not in secondary_index_name:
return R.table(table_name).index_create(secondary_index_name)
return R.table(table_name).index_create(
secondary_index_name,
[R.row[x] for x in secondary_index_name.split(':')]
)
@classmethod
@abc.abstractmethod
def select_secondary_index(cls, secondary_indexes_fields: List[str]) -> Tuple[str, Sequence[str]]:
"""
Define that way how to select secondary index for query.
:param secondary_indexes_fields: Fields that used in query
:return: (selected index, unused elements)
"""
pass
class GreedyIndexPolicy(AbstractIndexPolicy):
"""
Simple index policy based on greedy logic.
So, that means create index for any field and any field combination.
For example. model with two indexed fields 'cat' and 'dog' will produce three indexes:
- cat
- dog
- cat:dog
"""
@classmethod
def build_secondary_index_list(cls, secondary_indexes_fields: List[str]) -> List[str]:
"""
Procude index for every field and any field combinations
:param secondary_indexes_fields: List of fields that was marked as secondary indexes
:return: secondary index list
"""
secondary_indexes = []
secondary_indexes.extend(secondary_indexes_fields)
secondary_indexes_fields = sorted(secondary_indexes_fields)
for combination_size in range(2, len(secondary_indexes_fields)):
secondary_indexes.extend(
(':'.join(x) for x in combinations(secondary_indexes_fields, combination_size))
)
secondary_indexes.append(":".join(secondary_indexes_fields))
return secondary_indexes
@classmethod
def select_secondary_index(cls, secondary_indexes_fields: List[str]) -> Tuple[str, Sequence[str]]:
"""
Select index based on all fields
:param secondary_indexes_fields: Fields that used in query
:return: (selected index, empty tuple)
"""
return ':'.join(sorted(secondary_indexes_fields)), ()
class SingleIndexPolicy(AbstractIndexPolicy):
"""
Simple index policy based on clear logic.
So, that means create index for any field and no field combination.
For example. model with two indexed fields 'cat' and 'dog' will produce only two indexes:
- cat
- dogs
"""
@classmethod
def build_secondary_index_list(cls, secondary_indexes_fields: List[str]) -> List[str]:
"""
Procude index for every field and no field combinations
:param secondary_indexes_fields: List of fields that was marked as secondary indexes
:return: same as :any:`secondary_indexes_fields` variable
"""
return secondary_indexes_fields
@classmethod
def select_secondary_index(cls, secondary_indexes_fields: List[str]) -> Tuple[str, Sequence[str]]:
"""
Select index based on first fields
:param secondary_indexes_fields: Fields that used in query
:return: (first field, rest fields)
"""
return secondary_indexes_fields[0], secondary_indexes_fields[1:]
from itertools import product, starmap
from typing import overload, List, Dict, Optional, Tuple
import rethinkdb as R
from .query import QueryStatementsCollection, QueryStatement, EmptyQueryStatement, StatementType, Interval
__author__ = "Bogdan Gladyshev"
__copyright__ = "Copyright 2017, Bogdan Gladyshev"
__credits__ = ["Bogdan Gladyshev"]
__license__ = "MIT"
__version__ = "0.3.0"
__maintainer__ = "Bogdan Gladyshev"
__email__ = "siredvin.dark@gmail.com"
__status__ = "Production"
__all__ = ['RethinkDBQueryParser', 'QueryBuildException']
class QueryBuildException(Exception):
"""
Exception on query building
"""
class RethinkDBQueryParser:
@classmethod
@overload
def index_bounds(cls, statements: List[QueryStatement]) -> Optional[Tuple[bool, bool]]:
pass
@classmethod
@overload
def index_bounds(cls, statements: List[QueryStatement]) -> Optional[bool]: # pylint: disable=function-redefined
pass
@classmethod
def index_bounds(cls, statements: List[QueryStatement]): # pylint: disable=function-redefined,too-many-branches
right_close = None
left_close = None
bound_statements = [
StatementType.bound, StatementType.ge, StatementType.gt,
StatementType.le, StatementType.lt
]
for statement in filter(lambda x: x.statement_type in bound_statements, statements):
if statement.statement_type == StatementType.le and right_close is None or right_close:
right_close = True
continue
if statement.statement_type == StatementType.lt and right_close is None or not right_close:
right_close = False
continue
if statement.statement_type == StatementType.ge and left_close is None or left_close:
left_close = True
continue
if statement.statement_type == StatementType.gt and left_close is None or not left_close:
left_close = False
continue
if statement.statement_type == StatementType.bound:
if left_close is None or left_close == statement.right.left_close:
left_close = statement.right.left_close
else:
return None
if right_close is None or right_close == statement.right.right_close:
right_close = statement.right.right_close
else:
return None
continue
return None
if right_close is None and left_close is None:
return False
if right_close is None:
right_close = False
if left_close is None:
left_close = False
return left_close, right_close
@classmethod
def secondary_indexes_query( # pylint: disable=too-many-branches
cls, search_query: R.RqlQuery, selected_index: str, statement_dict: Dict[str, QueryStatement]) -> R.RqlQuery:
index_bounds = cls.index_bounds(statement_dict.values())
splited_index = selected_index.split(':')
if index_bounds is None:
raise QueryBuildException("Cannot build query: inconsistency bounds for indexes")
left_filter, right_filter = [[]], [[]]
for statement_field in sorted(statement_dict.keys()):
if statement_field in splited_index:
statement: QueryStatement = statement_dict.get(statement_field)
if statement.statement_type == StatementType.isin:
if len(left_filter) == 1 and not left_filter[0]:
left_filter = [statement.right]
right_filter = [statement.right]
else:
left_filter = list(starmap(lambda base, new_value: base + [new_value], product(left_filter, statement.right)))
right_filter = list(starmap(lambda base, new_value: base + [new_value], product(right_filter, statement.right)))
else:
new_left_filter = None
new_right_filter = None
if statement.statement_type in [StatementType.ge, StatementType.gt]:
new_left_filter = statement.right
new_right_filter = R.maxval
elif statement.statement_type in [StatementType.le, StatementType.lt]:
new_right_filter = statement.right
new_left_filter = R.minval
elif statement.statement_type == StatementType.eq:
new_right_filter = statement.right
new_left_filter = statement.right
elif statement.statement_type == StatementType.bound:
new_left_filter = statement.right.left_bound
new_right_filter = statement.right.right_bound
for filter_list in left_filter:
filter_list.append(new_left_filter)
for filter_list in right_filter:
filter_list.append(new_right_filter)
if isinstance(index_bounds, bool):
if len(left_filter) == 1:
return search_query.get_all(*left_filter[0], index=selected_index)
return search_query.get_all(*left_filter, index=selected_index)
if len(left_filter) > 1:
raise QueryBuildException("Cannot use multiply index with between statement")
if len(left_filter[0]) == 1:
return search_query.between(
left_filter[0][0], right_filter[0][0], index=selected_index,
left_bound=index_bounds[0], right_bound=index_bounds[1]
)
return search_query.between(
left_filter[0], right_filter[0], index=selected_index,
left_bound=index_bounds[0], right_bound=index_bounds[1]
)
@classmethod
def process_simple_statement(cls, search_query: R.RqlQuery, statement: QueryStatement) -> R.RqlQuery:
row = R.row[statement.left.row_name]
if statement.statement_type == StatementType.isin:
rethinkdb_expr = R.expr(statement.right)
return search_query.filter(lambda doc: rethinkdb_expr.contains(doc[statement.left.row_name]))
if statement.statement_type == StatementType.bound:
interval: Interval = statement.right
if interval.left_close and interval.right_close:
return search_query.filter((row >= interval.left_bound) & (row <= interval.right_bound))
if interval.left_close and not interval.right_close:
return search_query.filter((row >= interval.left_bound) & (row < interval.right_bound))
if not interval.left_close and interval.right_close:
return search_query.filter((row > interval.left_bound) & (row <= interval.right_bound))
if not interval.left_close and not interval.right_close:
return search_query.filter((row > interval.left_bound) & (row < interval.right_bound))
return search_query.filter(getattr(row, f'__{statement.statement_type.name}__')(statement.right))
@classmethod
def process_compicated_statement(cls, search_query: R.RqlQuery, statement: QueryStatement) -> R.RqlQuery:
if statement.statement_type == StatementType.isin:
return search_query.filter(lambda doc: doc[statement.right.row_name].contains(doc[statement.left.row_name]))
if statement.statement_type == StatementType.bound:
raise QueryBuildException("How did you even get here?")
return search_query.filter(getattr(R.row[statement.left], f'__{statement.statement_type.name}__')(statement.right))
@classmethod
def process_not_indexes_statements(
cls, search_query: R.RqlQuery, simple_fields: List[str],
statement_dict: Dict[str, QueryStatement]) -> R.RqlQuery:
for simple_field in simple_fields:
statement = statement_dict.get(simple_field, None)
if statement:
if statement.compicated:
search_query = cls.process_compicated_statement(search_query, statement)
else:
search_query = cls.process_simple_statement(search_query, statement)
return search_query
@classmethod
@overload
def build_query(cls, model_class, query: QueryStatement) -> R.RqlQuery:
pass
@classmethod
@overload
def build_query(cls, model_class, query: EmptyQueryStatement) -> R.RqlQuery: # pylint: disable=function-redefined
pass
@classmethod
@overload
def build_query(cls, model_class, query: QueryStatementsCollection) -> R.RqlQuery: # pylint: disable=function-redefined
pass
@classmethod
def build_query(cls, model_class, query) -> R.RqlQuery: # pylint: disable=function-redefined
if isinstance(query, EmptyQueryStatement):
return R.table(model_class._table).filter(lambda doc: False)
if isinstance(query, QueryStatement):
new_query = QueryStatementsCollection()
new_query.add_statement(query)
query = new_query
search_query = R.table(model_class._table)
statement_dict = query.keyword_statements
secondary_indexes = []
simple_fields = []
for field_name in statement_dict.keys():
if field_name in model_class._fields and model_class._fields.get(field_name).secondary_index:
secondary_indexes.append(field_name)
else:
simple_fields.append(field_name)
if secondary_indexes:
selected_index, unused_fields = model_class.get_index_policy().select_secondary_index(secondary_indexes)
if unused_fields:
simple_fields.extend(unused_fields)
search_query = cls.secondary_indexes_query(search_query, selected_index, query.keyword_statements)
search_query = cls.process_not_indexes_statements(search_query, simple_fields, statement_dict)
return search_query
# for simple_field in simple_fields:
# group_value = group_data.get(simple_field)
# if isinstance(group_value, list) and model_class._fields.get(simple_field).param_type != list:
# search_query = search_query.filter(BaseQueryStrategy.generate_list_filter(group_value, simple_field))
# else:
# search_query = search_query.filter(R.row[simple_field] == group_value)
# return search_query
from abc import ABC, abstractmethod
from enum import Enum
from typing import overload, List, Dict
from ..utils import prettify_value
__author__ = "Bogdan Gladyshev"
__copyright__ = "Copyright 2017, Bogdan Gladyshev"
__credits__ = ["Bogdan Gladyshev"]
__license__ = "MIT"
__version__ = "0.3.0"
__maintainer__ = "Bogdan Gladyshev"
__email__ = "siredvin.dark@gmail.com"
__status__ = "Production"
__all__ = [
'QueryRow', 'QueryStatement', 'StatementType', 'QueryStatementsCollection', 'Interval',
'EmptyQueryStatement'
]
class EmptyQueryStatement: # pylint: disable=too-few-public-methods
"""
Empty query statement, return on incompatable statements merge
"""
def __and__(self, other) -> 'EmptyQueryStatement':
return self
def __eq__(self, other) -> bool:
return isinstance(other, EmptyQueryStatement)
class Interval:
__slots__ = ['left_bound', 'right_bound', 'left_close', 'right_close']
def __init__(
self, left_bound, right_bound,
left_close: bool = False, right_close: bool = False) -> None:
self.left_bound = left_bound
self.right_bound = right_bound
self.left_close = left_close
self.right_close = right_close
def contains_interval(self, other: 'Interval') -> bool:
if self.left_bound > other.left_bound:
return False
if self.left_bound == other.left_bound and other.left_close and not self.left_close:
return False
if self.right_bound < other.right_bound:
return False
if self.right_bound == other.right_bound and other.right_close and not self.right_close:
return False
return True
def clone(self) -> 'Interval':
return Interval(
self.left_bound,
self.right_bound,
left_close=self.left_close,
right_close=self.right_close
)
def __eq__(self, other) -> bool:
if not isinstance(other, Interval):
return False
return self.left_bound == other.left_bound and self.right_bound == other.right_bound and self.left_close == other.left_close and self.right_close == other.right_close
def __contains__(self, item) -> bool:
return (
((self.left_bound < item) or (self.left_close and self.left_bound == item))
and
((self.right_bound > item) or (self.right_close and self.right_bound == item))
)
def __str__(self) -> str:
return f"{'[' if self.left_close else '('}{self.left_bound}, {self.right_bound}{']' if self.right_close else ')'}"
class StatementType(Enum):
eq = '==' # pylint: disable=invalid-name
lt = '<' # pylint: disable=invalid-name
gt = '>' # pylint: disable=invalid-name
ne = '!=' # pylint: disable=invalid-name
le = '<=' # pylint: disable=invalid-name
ge = '>=' # pylint: disable=invalid-name
isin = 'in' # pylint: disable=invalid-name
bound = 'bound' # pylint: disable=invalid-name
class QueryStatementsCollection:
__slots__ = ['keyword_statements', 'complicated_statements']
def __init__(self) -> None:
self.keyword_statements: Dict[str, 'QueryStatement'] = {}
self.complicated_statements: List['QueryStatement'] = []
@property
def compicated(self) -> bool:
return bool(self.complicated_statements)
def add_statement(self, *statements: 'QueryStatement') -> bool:
for statement in statements:
if statement.compicated:
self.complicated_statements.append(statement)
else:
current_statement = self.keyword_statements.get(statement.left.row_name, None)
if current_statement is not None:
merged_statements = current_statement & statement
self.keyword_statements[statement.left.row_name] = merged_statements
if isinstance(merged_statements, EmptyQueryStatement):
return False
else:
self.keyword_statements[statement.left.row_name] = statement
return True
@overload
def __and__(self, other: 'QueryStatement') -> 'QueryStatementsCollection': # pylint: disable=function-redefined
pass
@overload
def __and__(self, other: 'QueryStatementsCollection') -> 'QueryStatementsCollection': # pylint: disable=function-redefined
pass
def __and__(self, other): # pylint: disable=function-redefined
if isinstance(other, QueryStatementsCollection):
if not self.add_statement(*other.joined_statements):
return EmptyQueryStatement()
else:
if not self.add_statement(other):
return EmptyQueryStatement()
return self
def __eq__(self, other) -> bool:
if not isinstance(other, QueryStatementsCollection):
return False
return self.complicated_statements == other.complicated_statements and self.keyword_statements == other.keyword_statements
def __str__(self) -> str:
base_string = " & ".join(map(str, self.keyword_statements.values()))
if self.complicated_statements:
base_string += " & ".join(map(str, self.complicated_statements))
return base_string
def __repr__(self) -> str:
return str(self)
class QueryStatement(ABC):
__slots__ = ['left', 'right']
_statement_type = None
def __init__(self, left, right) -> None:
self.left = prettify_value(left)
self.right = prettify_value(right)
@property
def statement_type(self) -> StatementType:
return self._statement_type
@staticmethod
def _compare_leaf(leaf, other_leaf) -> bool:
if hasattr(leaf, 'is_same'):
if not leaf.is_same(other_leaf):
return False
else:
if leaf != other_leaf:
return False
return True
def __and__(self, other: 'QueryStatement') -> QueryStatementsCollection:
if not self.compicated and not other.compicated and self.left.row_name == other.left.row_name:
return self._similar_merge(other)
collection = QueryStatementsCollection()
collection.add_statement(self)
collection.add_statement(other)
return collection
@abstractmethod
def _similar_merge(self, other: 'QueryStatement') -> 'QueryStatement':
"""
Abstract method that allow merge statements like connected with `and` keyword.
Make sure, that you use merge method only for simple query statements.
To avoid code duplicating we use next implementation matrix:
1. eq and ne with everything
2. bound with everythin except eq and ne
3. isin with le, ge, gt, lt, isin
4. le with ge, gt, lt, le
5. lt with ge and gt, lt
6. ge with gt, ge
6. gt with gt
"""
def __eq__(self, other) -> bool:
if not isinstance(other, QueryStatement):
return False
if self.statement_type != other.statement_type:
return False
if not self._compare_leaf(self.left, other.left):
return False
if not self._compare_leaf(self.right, other.right):
return False
return True
@property
def compicated(self) -> bool:
"""
Check if query statement has QueryRow on both leafs
"""
return isinstance(self.right, type(self.left))
def __str__(self) -> str:
return f"{self.left} {self.statement_type.value} {self.right}"
def __repr__(self) -> str:
return str(self)
class QueryEqualStatement(QueryStatement):
_statement_type = StatementType.eq
def _similar_merge(self, other: QueryStatement) -> QueryStatement:
compitability_check = (
(other.statement_type == StatementType.eq and other.right == self.right)
or
(other.statement_type in [StatementType.isin, StatementType.bound] and self.right in other.right)
or
(other.statement_type == StatementType.lt and self.right < other.right)
or
(other.statement_type == StatementType.le and self.right <= other.right)
or
(other.statement_type == StatementType.ge and self.right >= other.right)
or
(other.statement_type == StatementType.gt and self.right > other.right)
or
(other.statement_type == StatementType.ne and self.right != other.right)
)
if compitability_check:
return self
return EmptyQueryStatement()
class QueryGreaterOrEqualStatement(QueryStatement):
_statement_type = StatementType.ge
def _similar_merge(self, other: QueryStatement) -> QueryStatement:
if other.statement_type not in [StatementType.ge, StatementType.gt]:
return other._similar_merge(self)
if self.right > other.right:
return self
return other
class QueryGreaterStatement(QueryStatement):
_statement_type = StatementType.gt
def _similar_merge(self, other: QueryStatement) -> QueryStatement:
if other.statement_type != StatementType.gt:
return other._similar_merge(self)
if self.right > other.right:
return self
return other
class QueryLowerOrEqualStatement(QueryStatement):
_statement_type = StatementType.le
def _similar_merge(self, other: QueryStatement) -> QueryStatement:
if other.statement_type in [
StatementType.eq, StatementType.ne,
StatementType.bound, StatementType.isin]:
return other._similar_merge(self)
if other.statement_type in [StatementType.le, StatementType.lt]:
if self.right < other.right:
return self
return other
if self.right < other.right:
return EmptyQueryStatement()
return QueryBoundStatement(
self.left,
Interval(
other.right, self.right,
left_close=other.statement_type == StatementType.ge, right_close=True
)
)
class QueryLowerStatement(QueryStatement):
_statement_type = StatementType.lt
def _similar_merge(self, other: QueryStatement) -> QueryStatement:
if other.statement_type in [
StatementType.eq, StatementType.ne,
StatementType.bound, StatementType.isin,
StatementType.le]:
return other._similar_merge(self)
if other.statement_type == StatementType.lt:
if self.right < other.right:
return self
return other
if self.right < other.right:
return EmptyQueryStatement()
return QueryBoundStatement(
self.left,
Interval(
other.right, self.right,
left_close=other.statement_type == StatementType.ge, right_close=False
)
)
class QueryNotEqualStatement(QueryStatement):
_statement_type = StatementType.ne
def _similar_merge(self, other: QueryStatement) -> QueryStatement:
compitability_check = (
(other.statement_type == StatementType.eq and other.right == self.right)
or
(other.statement_type in [StatementType.isin, StatementType.bound] and self.right in other.right)
or
(other.statement_type == StatementType.lt and self.right < other.right)
or
(other.statement_type == StatementType.le and self.right <= other.right)
or
(other.statement_type == StatementType.ge and self.right >= other.right)
or
(other.statement_type == StatementType.gt and self.right > other.right)
or
(other.statement_type == StatementType.ne and self.right != other.right)
)
if not compitability_check:
return other
return EmptyQueryStatement()
class QueryContainsStatement(QueryStatement):
_statement_type = StatementType.isin
def _similar_merge(self, other: QueryStatement) -> QueryStatement:
if other.statement_type in [StatementType.eq, StatementType.ne, StatementType.bound]:
return other._similar_merge(self)
if other.statement_type == StatementType.isin:
intersection = tuple(x for x in self.right if x in other.right)
if intersection:
return QueryContainsStatement(self.left, intersection)
return EmptyQueryStatement()
method_name = f"__{other.statement_type.name}__"
for element in self.right:
if not getattr(element, method_name)(other.right):
return EmptyQueryStatement()
return self
class QueryBoundStatement(QueryStatement):
_statement_type = StatementType.bound
def _similar_merge(self, other: QueryStatement) -> QueryStatement:
if other.statement_type in [StatementType.eq, StatementType.ne]:
return other._similar_merge(self)
if other.statement_type == StatementType.isin:
for element in other.right:
if element not in self.right:
return EmptyQueryStatement()
return other
interval = self.right.clone()
if other.statement_type in [StatementType.le, StatementType.lt]:
interval.right_close = other.statement_type == StatementType.le
interval.right_bound = other.right
other = QueryBoundStatement(self.left, interval)
if other.statement_type in [StatementType.ge, StatementType.gt]:
interval.left_close = other.statement_type == StatementType.ge
interval.left_bound = other.right
other = QueryBoundStatement(self.left, interval)
if self.right.contains_interval(other.right):
return other
if other.right.contains_interval(self.right):
return self
return EmptyQueryStatement()
class QueryRow:
__slots__ = ['row_name']
def __init__(self, row_name: str) -> None:
self.row_name = row_name
def one_of(self, *variants) -> QueryStatement:
return QueryContainsStatement(self, variants)
def contains(self, another_row: 'QueryRow') -> QueryStatement:
return QueryContainsStatement(another_row, self)
def is_same(self, other) -> bool:
if not isinstance(other, QueryRow):
return False
return self.row_name == other.row_name
def __eq__(self, other) -> QueryStatement:
return QueryEqualStatement(self, other)
def __ge__(self, other) -> QueryStatement:
return QueryGreaterOrEqualStatement(self, other)
def __gt__(self, other) -> QueryStatement:
return QueryGreaterStatement(self, other)
def __ne__(self, other) -> QueryStatement:
return QueryNotEqualStatement(self, other)
def __lt__(self, other) -> QueryStatement:
return QueryLowerStatement(self, other)
def __le__(self, other) -> QueryStatement:
return QueryLowerOrEqualStatement(self, other)
def __str__(self) -> str:
return f"row[{self.row_name}]"
from importlib import import_module
import logging
from typing import AsyncIterable, Any, Dict, Callable, Tuple
import rethinkdb as R
from .core import register
from enum import Enum
from typing import Any, Dict, Callable, Tuple
__author__ = "Bogdan Gladyshev"
__copyright__ = "Copyright 2017, Bogdan Gladyshev"
__credits__ = ["Bogdan Gladyshev"]
__license__ = "MIT"
__version__ = "0.2.1"
__version__ = "0.3.0"
__maintainer__ = "Bogdan Gladyshev"
__email__ = "siredvin.dark@gmail.com"
__status__ = "Production"
__all__ = ['fetch', 'query', 'fetch_cursor', 'async_query', 'process_functions']
__all__ = ['process_functions', 'prettify_value']
_log = logging.getLogger(__name__)
def fetch(rethink_dict):
class_module = import_module(rethink_dict['__python_info']['module_name'])
class_object = getattr(class_module, rethink_dict['__python_info']['class_name'], None)
if class_object is None:
_log.warning('Task %s cannot be parsed, because class wasnt found!', rethink_dict['id'])
return None
obj = class_object(id_=rethink_dict['id'])
obj.load(rethink_dict)
return obj
def query(db_query: R.RqlQuery):
with register.pool.connect() as conn:
result = db_query.run(conn)
if isinstance(result, R.net.DefaultCursor):
return list(filter(lambda x: x is not None, (fetch(obj_data) for obj_data in result)))
elif isinstance(result, dict):
return fetch(result)
elif isinstance(result, list):
return list(filter(lambda x: x is not None, (fetch(obj_data) for obj_data in result)))
elif not result:
return result
raise Exception("Unkown query result type!")
async def fetch_cursor(cursor) -> AsyncIterable[Dict[str, Any]]:
"""
Additonal method that wraps asyncio rethinkDB cursos to AsyncIterable.
Just util method to allow async for usage
"""
while await cursor.fetch_next():
yield await cursor.next()
async def async_query(db_query: R.RqlQuery):
async with register.pool.connect() as conn: # pylint: disable=not-async-context-manager
result = await db_query.run(conn)
if result.__class__.__name__ == 'AsyncioCursor':
synced_list = [fetch(obj_data) async for obj_data in fetch_cursor(result)]
return list(filter(lambda x: x is not None, synced_list))
elif isinstance(result, dict):
return fetch(result)
elif isinstance(result, list):
return list(filter(lambda x: x is not None, (fetch(obj_data) for obj_data in result)))
elif not result:
return result
raise Exception("Unkown query result type!")
def process_functions(fields_dict: Dict, init_function: Callable, configure_function: Callable, definer_ignore: bool = False) -> Tuple[Callable, Callable]:
for key, value in sorted(fields_dict.items(), key=lambda x: x[0]):
# Skip service values
......@@ -91,3 +38,13 @@ def process_functions(fields_dict: Dict, init_function: Callable, configure_func
use_default=False
)
return init_function, configure_function
def prettify_value(value) -> Any:
if isinstance(value, Enum):
return value.name
if isinstance(value, list):
return [prettify_value(x) for x in value]
if isinstance(value, dict):
return {prettify_value(k): prettify_value(v) for k, v in value.items()}
return value
pylint>=1.7.4
pylint-common>=0.2.5
mypy>=0.550
pycodestyle>=2.3.1
twine>=1.9.1
sphinx_autodoc_typehints
ipdb
pytest
hypothesis
\ No newline at end of file
......@@ -11,7 +11,7 @@ with open('README.rst') as readme_file:
setup(
name='anji-orm',
version='0.2.1',
version='0.3.0',
description="RethinkDB based ORM",
long_description=readme,
author="Bogdan Gladyshev",
......@@ -22,7 +22,8 @@ setup(
install_requires=[
"async-repool>=0.1.0",
"humanize>=0.5.1",
"repool-forked>=0.3"
"repool-forked>=0.3",
"lazy>=1.3"
],
license="MIT license",
zip_safe=False,
......
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# AnjiORM documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 22 17:43:05 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
#import os
#import sys
#sys.path.insert(0, os.path.dirname(os.path.abspath('.')))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx_autodoc_typehints'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'AnjiORM'
copyright = '2017, Bogdan Gladyshev'
author = 'Bogdan Gladyshev'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3.0'
# The full version, including alpha/beta/rc tags.
release = '0.3.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'AnjiORMdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AnjiORM.tex', 'AnjiORM Documentation',
'Bogdan Gladyshev', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'anjiorm', 'AnjiORM Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AnjiORM', 'AnjiORM Documentation',
author, 'AnjiORM', 'One line description of project.',
'Miscellaneous'),
]
.. AnjiORM documentation master file, created by
sphinx-quickstart on Fri Dec 22 17:43:05 2017.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to AnjiORM's documentation!
===================================
.. toctree::
:maxdepth: 2
:caption: Contents:
AnjiORM - is very simple ORM for RethinkDB usage.
Getting started
---------------
Basically, to create simple model, you just need to inherit :py:class:`~anji_orm.model.Model` and some :py:mod:`~anji_orm.fields`.
Also, you should define :code:`_table` variable for class with table name in RethinkDB.
.. code-block:: python
from anji_orm import Model, StringField
class BaseModel(Model):
_table = 'base_table'
t1 = StringField()
t2 = StringField(secondary_index=True)
Advanced usage
--------------
Async mode
----------
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`