Commit ffab0752 authored by Thomas Phil's avatar Thomas Phil

RM-53 made mockcloud work using local docker containers

parent 9f5700da
FROM bigr/fastr_worker
MAINTAINER Thomas Phil <thomas@tphil.nl>
FROM bigr/fastr_strong-subcortical_worker
MAINTAINER T. Phil <thomas@tphil.nl>
RUN mkdir /opt/fastr-conf
ENV FASTRHOME /opt/fastr-conf
ADD config.py /opt/fastr-conf/config.py
FROM ubuntu:16.04
MAINTAINER Thomas Phil <thomas@tphil.nl>
ARG FASTRBRANCH=develop
ENV FASTRBRANCH ${FASTRBRANCH}
RUN export DEBIAN_FRONTED=noninteractive
RUN apt-get update && apt-get install -y python python-pip mercurial
RUN hg clone https://sikerdebaard@bitbucket.org/bigr_erasmusmc/fastr && cd fastr && hg checkout $FASTRBRANCH && hg pull && hg up && pip install -e .
ENTRYPOINT '/bin/bash'
FROM alpine:3.6
FROM ubuntu:16.04
MAINTAINER Thomas Phil <thomas@tphil.nl>
RUN apk add --no-cache python2 python2-dev build-base
ARG FASTRVERSION=develop
ENV FASTRVERSION ${FASTRVERSION}
RUN export DEBIAN_FRONTED=noninteractive
RUN apt-get update && apt-get install -y python python-pip && pip install fastr==$FASTRVERSION
RUN python2 -m ensurepip && pip install fastr
RUN apk del unzip build-base python2-dev
ENTRYPOINT '/bin/bash'
......@@ -11,3 +11,4 @@ cmndr==1.0.5
gunicorn==19.7.1
SQLAlchemy==1.1.14
PyYAML==3.12
psutil==5.4.3
......@@ -11,19 +11,23 @@ class AddJobCommand(Command):
Add a task to the resource manager
job:add
{shell : Shellcode to execute}
{cores : Amount of cores required for the task}
{mem : Amount of ram in GiB required for the task}
{image : Docker image to be used}
{script : Shellcode to be executed in docker image, base64 encoded}
{scratch : Does the docker image use a scratch mount?}
{cores : The amount of cores needed for execution}
{memory : The amount of memory in GiB needed for execution}
"""
def handle(self):
schedulerService = SchedulerDomain.schedulerService()
commandFactory = SchedulerDomain.commandFactory()
cmd = str(base64.b64decode(self.argument('shell')))
image = str(self.argument('image'))
script = str(base64.b64decode(self.argument('script'))).splitlines()
job_id = str(int(time.time())) + '-' + str(uuid.uuid4())
scratch = self._castToBool(self.argument('scratch'))
cores = int(self.argument('cores'))
ram = int(self.argument('mem'))
memory = int(self.argument('memory'))
job_id = str(int(time.time())) + '-' + str(uuid.uuid4())
command = commandFactory.newScheduleJobCommand(job_id, cmd, cores, ram)
command = commandFactory.newScheduleJobCommand(image=image, script=script, job_id=job_id, scratch=scratch, cores=cores, memory=memory)
schedulerService.getCommandBus().handle(command)
print(json.dumps({'job_id': job_id}))
......@@ -31,7 +31,7 @@ class DeployManyCommand(Command):
cloudProviderName = Core.config().clouddomain.driver
profile = getattr(Core.config().clouddomain, cloudProviderName).default_profile
deployVms = commandFactory.newDeployVmsCommand(names=deployVmNameList, profile=profile, cores=cores, ram=ram)
deployVms = commandFactory.newDeployVms(names=deployVmNameList, profile=profile, cores=cores, ram=ram)
commandBus = cloudService.getCommandBus()
self.info('Deploying {0} VM\'s with cores={1} ram={2}GiB'.format(len(deployVmNameList), cores, ram))
......
......@@ -26,7 +26,7 @@ class DeploySingleCommand(Command):
cloudProviderName = Core.config().clouddomain.driver
profile = getattr(Core.config().clouddomain, cloudProviderName).default_profile
deployVmsCommand = commandFactory.newDeployVmsCommand(names=[name], profile=profile, cores=cores, ram = ram)
deployVmsCommand = commandFactory.newDeployVms(names=[name], profile=profile, cores=cores, ram = ram)
commandBus = cloudService.getCommandBus()
......
......@@ -15,7 +15,7 @@ class DestroyManyCommand(Command):
machines = self.argument('machines')
destroyVmsCommand = commandFactory.newDestroyVmsCommand(names=machines)
destroyVmsCommand = commandFactory.newDestroyVms(names=machines)
commandBus = cloudService.getCommandBus()
......
......@@ -14,7 +14,7 @@ class DestroySingleCommand(Command):
commandFactory = CloudDomain.commandFactory()
machine = self.argument('machine')
destroyVmsCommand = commandFactory.newDestroyVmsCommand(names=[machine])
destroyVmsCommand = commandFactory.newDestroyVms(names=[machine])
commandBus = cloudService.getCommandBus()
......
......@@ -17,7 +17,7 @@ class RunResourceManager(Command):
schedulerService = SchedulerDomain.schedulerService()
commandFactory = SchedulerDomain.commandFactory()
CloudDomain.cloudService().start_reactor() # enable salt reactor
#CloudDomain.cloudService().start_reactor() # enable salt reactor
command_bus = schedulerService.getCommandBus()
run_enqueued_jobs_command = commandFactory.newRunEnqueuedJobs()
......@@ -26,14 +26,20 @@ class RunResourceManager(Command):
cleanup_jobs = commandFactory.newCleanupOldJobs()
log_stats = commandFactory.newLogStats()
CloudDomain.cloudService() # init clouddomain
self.info('Running.')
celery = Celery('strongr', broker=strongr.core.Core.config().celery.broker, backend=strongr.core.Core.config().celery.backend)
remotable_commands = strongr.core.Core.config().celery.remotable_commands.as_dict()
for domain in remotable_commands:
for command in remotable_commands[domain]:
strongr.core.Core.command_router().enable_route_for_command(celery, '{}.{}'.format(domain, command))
if hasattr(strongr.core.Core.config(), 'celery'): # don't load celery if it's not configured
celery = Celery('strongr', broker=strongr.core.Core.config().celery.broker, backend=strongr.core.Core.config().celery.backend)
remotable_commands = strongr.core.Core.config().celery.remotable_commands.as_dict()
for domain in remotable_commands:
for command in remotable_commands[domain]:
strongr.core.Core.command_router().enable_route_for_command(celery, '{}.{}'.format(domain, command))
else:
self.info('Celery not configured, skipping')
schedule.every(1).seconds.do(command_bus.handle, run_enqueued_jobs_command)
schedule.every(5).seconds.do(command_bus.handle, check_scaling_command)
......
from .runshellcode import RunShellCode
from .runjob import RunJob
from .deployvms import DeployVms
from .destroyvms import DestroyVms
from .jobfinished import JobFinished
class RunCommandInContainer:
def __init__(self, container, command):
pass
class RunJob:
def __init__(self, host, image, script, job_id, scratch, cores, memory):
self.host = host
self.image = image
self.script = script
self.job_id = job_id
self.scratch = scratch
self.cores = cores
self.memory = memory
class RunShellCode:
def __init__(self, job_id, sh, host):
self.job_id = job_id
self.sh = sh
self.host = host
from strongr.clouddomain.command import DeployVms, RunShellCode, DestroyVms, JobFinished
from strongr.clouddomain.command import DeployVms, RunJob, DestroyVms, JobFinished
from strongr.core.exception import InvalidParameterException
......@@ -29,7 +29,7 @@ class CommandFactory:
return JobFinished(job_id, ret, retcode)
def newDestroyVmsCommand(self, names):
def newDestroyVms(self, names):
""" Generates a new DestroyVm command
:param name: The name of the VM to be destroyed
......@@ -43,7 +43,7 @@ class CommandFactory:
return DestroyVms(names=names)
def newDeployVmsCommand(self, names, profile, cores, ram):
def newDeployVms(self, names, profile, cores, ram):
""" Generates a new DeployVms command
:param names: A list of names
......@@ -76,22 +76,39 @@ class CommandFactory:
return DeployVms(names, profile, cores, ram)
def newRunShellCodeCommand(self, job_id, sh, host):
def newRunJob(self, host, image, script, job_id, scratch, cores, memory):
""" Generates a new RunShellCode command
:param sh: runShellCode
:type sh: string
:param host: The hostname where the shellcode should be executed or '*' to execute on all hosts
:param host: where host where te command should be ran
:type host: string
:param image: the docker image the script should run under
:type image: string
:param script: array of strings, the shellcode to be ran in the docker container
:type script: list
:param job_id: the name of the job to be used
:type job_id: string
:param scratch: should a scratch be mounted?
:type scratch: bool
:param cores: how many cores for this job?
:type cores: int
:param memory: how much memory for this job?
:type memory: int
:returns: A new RunShellCode command object
:rtype: RunShellCodeCommand
:returns: A new RunJob command object
:rtype: RunJob
"""
if not len(host) > 0:
raise InvalidParameterException('Host {0} is invalid'.format(host))
raise InvalidParameterException('host is invalid')
elif not len(image) > 0:
raise InvalidParameterException('image is invalid')
elif not len(script) > 0:
raise InvalidParameterException('script is invalid')
elif not len(job_id) > 0:
raise InvalidParameterException('job_id is invalid')
elif not len(sh) > 0:
raise InvalidParameterException('Shellcode {0} is invalid'.format(sh))
elif cores <= 0:
raise InvalidParameterException('cores is invalid')
elif memory <= 0:
raise InvalidParameterException('memory is invalid')
return RunShellCode(job_id=job_id, sh=sh, host=host)
return RunJob(host=host, image=image, script=script, job_id=job_id, scratch=scratch, cores=cores, memory=memory)
......@@ -2,7 +2,7 @@ from .callablecommandhandler import CallableCommandHandler
from .abstractdeployvmhandler import AbstractDeployVmHandler
from .abstractdeployvmshandler import AbstractDeployVmsHandler
from .abstractlistdeployedvmshandler import AbstractListDeployedVmsHandler
from .abstractrunshellcodehandler import AbstractRunShellCodeHandler
from .abstractrunjobhandler import AbstractRunJobHandler
from .abstractrequestjidstatushandler import AbstractRequestJidStatusHandler
from .abstractdestroyvmshandler import AbstractDestroyVmsHandler
from .abstractjobfinishedhandler import AbstractJobFinishedHandler
from . import CallableCommandHandler
class AbstractRunShellCodeHandler(CallableCommandHandler):
class AbstractRunJobHandler(CallableCommandHandler):
pass
from .deployvmhandler import DeployVmHandler
from .runshellcodehandler import RunShellCodeHandler
from .runjobhandler import RunJobHandler
from .listdeployedvmshandler import ListDeployedVmsHandler
from .deployvmshandler import DeployVmsHandler
from .requestjidstatushandler import RequestJidStatusHandler
from .destroyvmshandler import DestroyVmsHandler
from .jobfinishedhandler import JobFinishedHandler
from strongr.clouddomain.handler.abstract.cloud import AbstractDeployVmsHandler
class DeployVmsHandler(AbstractDeployVmsHandler):
def __call__(self, commands):
for command in commands:
pass
def __call__(self, command):
pass # we can't deploy VM's in the mockcloud
from strongr.clouddomain.handler.abstract.cloud import AbstractDeployVmHandler
from strongr.clouddomain.handler.abstract.cloud import AbstractDestroyVmsHandler
class DeployVmHandler(AbstractDeployVmHandler):
class DestroyVmsHandler(AbstractDestroyVmsHandler):
def __call__(self, command):
pass
pass # we cant deploy vms in the mockcloud
from strongr.clouddomain.handler.abstract.cloud import AbstractJobFinishedHandler
class JobFinishedHandler(AbstractJobFinishedHandler):
def __call__(self, command):
pass
# convert command to inter-domain event
......@@ -2,4 +2,4 @@ from strongr.clouddomain.handler.abstract.cloud import AbstractListDeployedVmsHa
class ListDeployedVmsHandler(AbstractListDeployedVmsHandler):
def __call__(self, command):
pass
return {'up': ['localhost'], 'down': []}
from strongr.clouddomain.handler.abstract.cloud import AbstractRequestJidStatusHandler
import salt.runner
import strongr.core
import strongr.core.gateways
class RequestJidStatusHandler(AbstractRequestJidStatusHandler):
def __call__(self, query):
opts = salt.config.master_config(strongr.core.Core.config().clouddomain.OpenNebula.salt_config + '/master')
opts['quiet'] = True
runner = salt.runner.RunnerClient(opts)
cache = strongr.core.gateways.Gateways.cache()
if not cache.exists('clouddomain.jobs.running'):
cache.set('clouddomain.jobs.running', runner.cmd('jobs.active'), 1)
jobs = cache.get('clouddomain.jobs.running')
if jobs is None:
jobs = {}
if query.jid not in jobs: # we only want to give status when the job is finished running
result = runner.cmd('jobs.lookup_jid', [query.jid])
return result
return None
from strongr.clouddomain.handler.abstract.cloud import AbstractRunJobHandler
import sys
from strongr.clouddomain.model.gateways import Gateways
if sys.version_info[0] > 3 and sys.version_info[1] > 3:
# python > 3.3 uses shlex.quote
from shlex import quote
else:
from pipes import quote
import os
import subprocess
import tempfile
import threading
import strongr.core
class RunJobHandler(AbstractRunJobHandler):
def __call__(self, command):
thread = threading.Thread(target=self._run, args=(command,)) # run in separate thread so it doesn't block strongr
thread.start()
#self._run(command)
def _run(self, command):
inter_domain_event_factory = Gateways.inter_domain_event_factory()
inter_domain_events_publisher = strongr.core.Core.inter_domain_events_publisher()
volumes = ''
env = ''
if command.scratch:
if not os.path.isdir('/tmp/strongr_scratch'):
os.mkdir('/tmp/strongr_scratch', 0700)
volumes = '--volume=/tmp/strongr_scratch:/scratch'
env = "-e SCRATCH_DIR='/scratch'"
cmd = 'docker run --rm {} {} -di --name {} -m {}g --cpus={} --entrypoint /bin/sh {}'.format(volumes, env, command.job_id, command.memory, command.cores, quote(command.image))
ret_code = subprocess.call(cmd, shell=True) # start docker container
if ret_code != 0:
raise Exception('Something went wrong while initializing docker image: {}'.format(cmd))
tmpfile = tempfile.mkstemp()[1]
fh = open(tmpfile, 'w')
fh.write("\n".join(command.script))
fh.close()
cmd = 'docker exec -i {} /bin/sh < {}'.format(command.job_id, tmpfile)
try:
stdout = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as err:
job_finished_event = inter_domain_event_factory.newJobFinishedEvent(command.job_id, err.output, err.returncode)
inter_domain_events_publisher.publish(job_finished_event)
if ret_code != 0:
Exception('Something went wrong while executing script in docker image: {}'.format(cmd))
stdout = err.output
os.remove(tmpfile)
cmd = 'docker stop {}'.format(command.job_id)
ret_code = subprocess.call(cmd, shell=True)
if ret_code != 0:
raise Exception('Something went wrong while stopping docker image: {}'.format(cmd))
job_finished_event = inter_domain_event_factory.newJobFinishedEvent(command.job_id, stdout, 0)
inter_domain_events_publisher.publish(job_finished_event)
from strongr.clouddomain.handler.abstract.cloud import AbstractRunShellCodeHandler
class RunShellCodeHandler(AbstractRunShellCodeHandler):
def __call__(self, command):
pass
from .runshellcodehandler import RunShellCodeHandler
from .runjobhandler import RunJobHandler
from .listdeployedvmshandler import ListDeployedVmsHandler
from .deployvmshandler import DeployVmsHandler
from .requestjidstatushandler import RequestJidStatusHandler
......
from salt.exceptions import SaltSystemExit
from strongr.clouddomain.handler.abstract.cloud import AbstractDestroyVmsHandler
import salt.cloud
import strongr.core
import logging
class DestroyVmsHandler(AbstractDestroyVmsHandler):
def __call__(self, command):
client = salt.cloud.CloudClient(strongr.core.Core.config().clouddomain.OpenNebula.salt_config + '/cloud')
logger = logging.getLogger(self.__class__.__name__)
ret = []
for chunked_names in self._chunk_list(command.names, 4):
ret.append(client.destroy(names=chunked_names))
for chunked_names in self._chunk_list(command.names, 1):
try:
ret.append(client.destroy(names=chunked_names))
except SaltSystemExit as e:
# an exception occured within salt, normally below event would be published trough salt event system
# assume VM is no longer there and broadcast vm destroyed event from here
# if it turns out the vm still there but error was triggered due to api rate limiting or flaky connection
# the cleanup script will remove the vm at a later time but this cleanup script will not trigger below event
inter_domain_event_factory = strongr.clouddomain.model.gateways.Gateways.inter_domain_event_factory()
vmdestroyed_event = inter_domain_event_factory.newVmDestroyedEvent(chunked_names[0])
strongr.core.Core.inter_domain_events_publisher().publish(vmdestroyed_event)
logger.warning(e)
return ret
def _chunk_list(self, list, chunksize):
for i in range(0, len(list), chunksize):
yield list[i:i + chunksize]
from strongr.clouddomain.handler.abstract.cloud import AbstractRunShellCodeHandler
from strongr.clouddomain.handler.abstract.cloud import AbstractRunJobHandler
import strongr.core
import salt.client
class RunShellCodeHandler(AbstractRunShellCodeHandler):
class RunJobHandler(AbstractRunJobHandler):
def __call__(self, command):
local = salt.client.LocalClient()
local.cmd_async(command.host, 'cmd.run', [command.sh, "runas={}".format(strongr.core.Core.config().clouddomain.OpenNebula.runas)], jid=command.job_id)
import salt.cloud
class DeployVmHandler():
def __call__(self, command):
pass
......@@ -7,10 +7,10 @@ from cmndr.handlers.locators import LazyLoadingInMemoryLocator
from cmndr.handlers.nameextractors import ClassNameExtractor
from strongr.clouddomain.handler.abstract.cloud import AbstractDestroyVmsHandler, AbstractDeployVmsHandler, \
AbstractListDeployedVmsHandler, AbstractRunShellCodeHandler,\
AbstractListDeployedVmsHandler, AbstractRunJobHandler,\
AbstractRequestJidStatusHandler, AbstractJobFinishedHandler
from strongr.clouddomain.command import DestroyVms, DeployVms, RunShellCode, JobFinished
from strongr.clouddomain.command import DestroyVms, DeployVms, RunJob, JobFinished
from strongr.clouddomain.query import ListDeployedVms, RequestJidStatus
import strongr.clouddomain.model.gateways as gateways
......@@ -24,7 +24,7 @@ class AbstractCloudService():
_mappings = {
AbstractListDeployedVmsHandler: ListDeployedVms.__name__,
AbstractRunShellCodeHandler: RunShellCode.__name__,
AbstractRunJobHandler: RunJob.__name__,
AbstractDeployVmsHandler: DeployVms.__name__,
AbstractRequestJidStatusHandler: RequestJidStatus.__name__,
AbstractDestroyVmsHandler: DestroyVms.__name__,
......@@ -33,10 +33,10 @@ class AbstractCloudService():
def __init__(self):
# map commands and handlers
for handler in self.getCommandHandlers():
for handler in self.get_command_handlers():
command = self._getCommandForHandler(handler)
self._commands[handler] = command
for handler in self.getQueryHandlers():
for handler in self.get_query_handlers():
command = self._getCommandForHandler(handler)
self._queries[handler] = command
......@@ -56,11 +56,11 @@ class AbstractCloudService():
@abstractmethod
def getCommandHandlers(self):
def get_command_handlers(self):
return
@abstractmethod
def getQueryHandlers(self):
def get_query_handlers(self):
pass
def _getCommandForHandler(self, handler):
......
from strongr.clouddomain.model.gateways import Gateways
from .abstractcloudservice import AbstractCloudService
from strongr.clouddomain.handler.impl.cloud.mockcloud import ListDeployedVmsHandler, RunShellCodeHandler, DeployVmsHandler
from strongr.clouddomain.handler.impl.cloud.mockcloud import ListDeployedVmsHandler, RunJobHandler, DeployVmsHandler
import subprocess
import threading
import strongr.core
class MockCloud(AbstractCloudService):
def __init__(self, *args, **kwargs):
super(MockCloud, self).__init__(*args, **kwargs)
self._check_docker()
thread = threading.Thread(target=self._publish_localhost) # run in separate thread so it doesn't block strongr
thread.start()
def _publish_localhost(self):
import time
import psutil
time.sleep(5)
inter_domain_event_factory = Gateways.inter_domain_event_factory()
inter_domain_events_publisher = strongr.core.Core.inter_domain_events_publisher()
vmnew_event = inter_domain_event_factory.newVmNewEvent('localhost', psutil.cpu_count(logical=True), psutil.virtual_memory().total / 1024 / 1024 / 1024)
inter_domain_events_publisher.publish(vmnew_event)
time.sleep(1)
vmcreated_event = inter_domain_event_factory.newVmCreatedEvent('localhost')
inter_domain_events_publisher.publish(vmcreated_event)
time.sleep(1)
vmready_event = inter_domain_event_factory.newVmReadyEvent('localhost')
inter_domain_events_publisher.publish(vmready_event)
def _check_docker(self):
ret_code = subprocess.call('docker ps', shell=True)
if ret_code != 0:
raise Exception("Can't access docker sock. Is docker installed? Do you have sufficient privileges?")
def getCommandHandlers(self):
return [RunShellCodeHandler, DeployVmsHandler]
def get_command_handlers(self):
return [RunJobHandler, DeployVmsHandler]
def getQueryHandlers(self):
def get_query_handlers(self):
return [ListDeployedVmsHandler]
from strongr.clouddomain.handler.impl.cloud.opennebula import ListDeployedVmsHandler, \
RunShellCodeHandler, DeployVmsHandler, \
RunJobHandler, DeployVmsHandler, \
RequestJidStatusHandler, DestroyVmsHandler, \
JobFinishedHandler
......@@ -20,8 +20,8 @@ class OpenNebula(AbstractCloudService):
salt_event_translator.setDaemon(True)
salt_event_translator.start() # start event translator thread if it wasn't running
def getCommandHandlers(self):
return [RunShellCodeHandler, DeployVmsHandler, DestroyVmsHandler, JobFinishedHandler]
def get_command_handlers(self):
return [RunJobHandler, DeployVmsHandler, DestroyVmsHandler, JobFinishedHandler]
def getQueryHandlers(self):
def get_query_handlers(self):
return [ListDeployedVmsHandler, RequestJidStatusHandler]
......@@ -24,10 +24,10 @@ class Gateways(containers.DeclarativeContainer):
cache = providers.Singleton(get_cache)
lock = providers.Factory(get_lock)
redis = providers.Singleton(Redis.from_url, url=strongr.core.Core.config().redis.url)
redis = providers.Singleton(Redis.from_url, url=(strongr.core.Core.config().redis.url if hasattr(strongr.core.Core.config(),'redis') else ''))
sqlalchemy_engine = providers.ThreadLocalSingleton(engine_from_config, configuration=strongr.core.Core.config().db.engine.as_dict(), prefix='') # construct engine from config
sqlalchemy_session = providers.ThreadLocalSingleton(sessionmaker(bind=sqlalchemy_engine()))
sqlalchemy_base = providers.Singleton(declarative_base)
stats = providers.Singleton(_stats_drivers()[strongr.core.Core.config().stats.driver], config=strongr.core.Core.config().stats.config.as_dict())
stats = providers.Singleton(_stats_drivers()[strongr.core.Core.config().stats.driver], config=(strongr.core.Core.config().stats.config.as_dict() if hasattr(strongr.core.Core.config().stats, 'config') else {}))
from .user import User
from .token import Token
from .grant import Grant
from .client import Client
from sqlalchemy.orm import relationship
import strongr.core.gateways as gateways
from sqlalchemy import Column, ForeignKey, Integer, String, Enum, DateTime, func, LargeBinary, Text
from sqlalchemy.orm import relationship, synonym
from sqlalchemy import Column, ForeignKey, Integer
from strongr.schedulerdomain.model import JobState
from authlib.flask.oauth2.sqla import OAuth2ClientMixin
Base = gateways.Gateways.sqlalchemy_base()
class Client(Base):
class Client(Base, OAuth2ClientMixin):
__tablename__ = 'oauth_client'
name = Column(String(40))
client_id = Column(String(40), primary_key=True)
client_secret = Column(String(55), unique=True, index=True, nullable=False)
client_type = Column(String(20), default='public')
_redirect_uris = Column(Text)
default_scope = Column(Text, default='')
@property
def user(self):
#return User.query.get(1)
# this should link to the user table
pass
@property