diff --git a/Jenkinsfile b/Jenkinsfile
index 436462bb2e8bd41781fe3211f495667e49fb8d9e..dfcddbb84bf5d041a6b6b7478296b65de68e636b 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -6,7 +6,7 @@ def job_result_url = ''
 
 pipeline {
     agent {
-        label 'python'
+        label 'python3.6'
     }
 
     environment {
diff --git a/requirements.txt b/requirements.txt
index 994a587576da5969c8f0caa61d5dacb945928f47..7a50a86d4a02f1775d85ebc9e87b17c69d95d234 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,7 +2,9 @@
 Sphinx
 python-dotenv>=0.5.1
 opencv-python
-deepaas==0.5.1
+deepaas>=1.0.1
+flaat>=0.5.1
+webargs>=5.5.2
 #
 ## backwards compatibility
 #pathlib2
@@ -10,3 +12,4 @@ deepaas==0.5.1
 # info on the system:
 py-cpuinfo
 psutil
+marshmallow
diff --git a/retinopathy_test/config.py b/retinopathy_test/config.py
index e059e37cd03086ac34ef05f7a2976a83d85c39d3..ec81a4c2e181bab3868455775da6b2d8e2239d47 100644
--- a/retinopathy_test/config.py
+++ b/retinopathy_test/config.py
@@ -1,7 +1,9 @@
 # -*- coding: utf-8 -*-
 from os import path
+#import os
 import tensorflow as tf
-
+from webargs import fields, validate, ValidationError
+from marshmallow import Schema, INCLUDE
 # identify basedir for the package
 BASE_DIR = path.dirname(path.normpath(path.dirname(__file__)))
 # Retina_RemoteStorage = 'rshare:/deep-oc-apps/retinopathy_test'
@@ -15,28 +17,128 @@ Retina_LocalModelsServe = path.join(Retina_LocalModels, 'retinopathy_serve')
 Retina_TrainingData = "retinopathy_tr.tfrecords"
 Retina_ValidationData = "retinopathy_va.tfrecords"
 
-train_args = { 'train_epochs': {'default': 10,
-                              'help': 'Number of epochs to train on',
-                              'required': False
-                             },
-               'batch_size': {'default': 16,
-                              'help': 'Global Batch size',
-                              'required': False
-                              },
-               'num_gpus':   {'default': 1 if tf.test.is_gpu_available() else 0,
-                              'help': 'Number of GPUs to use, if available (0 = CPU)',
-                              'required': False
-                             },
-               'upload_back': {'default': False,
-                               'choices': [False, True],
-                               'help': 'Either upload a trained graph back to the remote storage (True) or not (False, default)',
-                               'required': False
-                              },
+def gpus_must_exist(val):
+    if val > 0:
+        if not tf.test.is_gpu_available():
+            raise ValidationError("GPUs does not exist.")
+        
+#train_args = { 'train_epochs': {'default': 10,
+                              #'help': 'Number of epochs to train on',
+                              #'required': False
+                             #},
+               #'batch_size': {'default': 16,
+                              #'help': 'Global Batch size',
+                              #'required': False
+                              #},
+               #'num_gpus':   {'default': 1 if tf.test.is_gpu_available() else 0,
+                              #'help': 'Number of GPUs to use, if available (0 = CPU)',
+                              #'required': False
+                             #},
+               #'upload_back': {'default': False,
+                               #'choices': [False, True],
+                               #'help': 'Either upload a trained graph back to the remote storage (True) or not (False, default)',
+                               #'required': False
+                              #},
+#}
+train_args = { 'train_epochs': fields.Int(missing=10,
+                              description='Number of epochs to train on',
+                              required = False
+                             ),
+               'batch_size': fields.Int(missing=16,
+                              description='Global Batch size',
+                              required=False
+                             ),
+               'num_gpus':   fields.Int(missing=1,
+                              validate=gpus_must_exist,
+                              description='Number of GPUs to use, if available (0 = CPU)',
+                              required=False
+                             ),
+               'upload_back': fields.Bool(missing=False,
+                               enum=[False, True],
+                               description='Either upload a trained graph back to the remote storage (True) or not (False, default)',
+                               required=False
+                              ),
 }
-predict_args = {'trained_graph':   {'default': '1540408813_cpu',
-                             'choices': ['1540408813_cpu', '1533577729_gpu'],
-                             'help': 'Pre-trained graph to use',
-                             'required': False
-                           },
+
+#predict_args = {'trained_graph':   {'default': '1540408813_cpu',
+                             #'choices': ['1540408813_cpu', '1533577729_gpu'],
+                             #'help': 'Pre-trained graph to use',
+                             #'required': False
+                           #},
+
+#}
+predict_args = {'trained_graph': fields.Str(missing='1540408813_cpu',
+                             enum=['1540408813_cpu', '1533577729_gpu'],
+                             description='Pre-trained graph to use',
+                             required=False
+                           ),
+                'files': fields.Field(
+                            required=False,
+                            missing=None,
+                            type="file",
+                            data_key="data",
+                            location="form",
+                            description="Select the image you want to classify."
+                           ),
+                'urls': fields.Url(
+                            required=False,
+                            missing=None,
+                            description="Select an URL of the image you want to classify."
+                           )
+
 
 }
+                
+class PredictArgsSchema(Schema):
+    class Meta:
+        unknown = INCLUDE  # supports extra parameters
+
+    trained_graph = fields.Str(
+        required=False,
+        missing='1540408813_cpu',
+        enum=['1540408813_cpu', '1533577729_gpu'],
+        description="Pre-trained graph to use"
+    )
+
+    files = fields.Field(
+        required=False,
+        missing=None,
+        type="file",
+        data_key="data",
+        location="form",
+        description="Select the image you want to classify."
+    )
+
+    urls = fields.Url(
+        required=False,
+        missing=None,
+        description="Select an URL of the image you want to classify."
+    )
+
+
+# class / place to describe arguments for train()
+class TrainArgsSchema(Schema):
+    class Meta:
+        unknown = INCLUDE  # supports extra parameters
+
+    train_epochs = fields.Integer(
+        required=False,
+        missing=10,
+        description="Number of training epochs")
+
+    batch_size = fields.Integer(
+        missing=16,
+        description='Global Batch size',
+        required=False)
+
+    num_gpus =  fields.Integer(
+        missing=1,
+        validate=gpus_must_exist,
+        description='Number of GPUs to use, if available (0 = CPU)',
+        required=False)
+    
+    upload_back = fields.Boolean(
+        missing=False,
+        enum=[False, True],
+        description='Either upload a trained graph back to the remote storage (True) or not (False, default)',
+        required=False)
diff --git a/retinopathy_test/models/model.py b/retinopathy_test/models/model.py
index 85ca763fd9f01ff9fb26f73c77d8a0ddaa0085a0..4214a833292a81a1e6d26aad3ba8f6de31776520 100644
--- a/retinopathy_test/models/model.py
+++ b/retinopathy_test/models/model.py
@@ -28,7 +28,55 @@ import deepaas
 from pkg_resources import parse_version
 import subprocess
 import time
+from webargs import fields, validate, ValidationError
+from aiohttp.web import HTTPBadRequest
+import json
+import mimetypes 
+## Authorization
+from flaat import Flaat
+flaat = Flaat()
+
+# Switch for debugging in this script
+debug_model = True 
+
+def _catch_error(f):
+    def wrap(*args, **kwargs):
+        try:
+            return f(*args, **kwargs)
+        except Exception as e:
+            raise HTTPBadRequest(reason=e)
+    return wrap
+
+def _fields_to_dict(fields_in):
+    """
+    Function to convert mashmallow fields to dict()
+    """
+    dict_out = {}
+
+    for key, val in fields_in.items():
+        param = {}
+        param['default'] = val.missing
+        param['type'] = type(val.missing)
+        if key == 'files' or key == 'urls':
+            param['type'] = str
+
+        val_help = val.metadata['description']
+        if 'enum' in val.metadata.keys():
+            val_help = "{}. Choices: {}".format(val_help, 
+                                                val.metadata['enum'])
+        param['help'] = val_help
+
+        try:
+            val_req = val.required
+        except:
+            val_req = False
+        param['required'] = val_req
+
+        dict_out[key] = param
+    return dict_out
 
+class EmptyObj():
+    pass
 
 def rclone_copy(src_path, dest_path, cmd='copy',):
     '''
@@ -59,29 +107,108 @@ def rclone_copy(src_path, dest_path, cmd='copy',):
         output, error = None, e
     return output, error
     
+#def get_metadata():
+
+    #module = __name__.split('.', 1)
+
+    #pkg = pkg_resources.get_distribution(module[0])
+    #meta = {
+        #'Name': None,
+        #'Version': None,
+        #'Summary': None,
+        #'Home-page': None,
+        #'Author': None,
+        #'Author-email': None,
+        #'License': None,
+    #}
+
+    #for line in pkg.get_metadata_lines("PKG-INFO"):
+        #for par in meta:
+            #if line.startswith(par+":"):
+                #_, value = line.split(": ", 1)
+                #meta[par] = value
+
+    #return meta
 def get_metadata():
+    """
+    Function to read metadata
+    """
 
     module = __name__.split('.', 1)
 
-    pkg = pkg_resources.get_distribution(module[0])
+    try:
+        pkg = pkg_resources.get_distribution(module[0])
+    except pkg_resources.RequirementParseError:
+        # if called from CLI, try to get pkg from the path
+        distros = list(pkg_resources.find_distributions(cfg.BASE_DIR, 
+                                                        only=True))
+        if len(distros) == 1:
+            pkg = distros[0]
+    except Exception as e:
+        raise HTTPBadRequest(reason=e)
+
+    # deserialize key-word arguments
+    train_args = _fields_to_dict(get_train_args())
+    # make 'type' JSON serializable
+    for key, val in train_args.items():
+        train_args[key]['type'] = str(val['type'])
+
+    predict_args = _fields_to_dict(get_predict_args())
+    # make 'type' JSON serializable
+    for key, val in predict_args.items():
+        predict_args[key]['type'] = str(val['type'])
+
     meta = {
-        'Name': None,
-        'Version': None,
-        'Summary': None,
-        'Home-page': None,
-        'Author': None,
-        'Author-email': None,
-        'License': None,
+        'name' : None,
+        'version' : None,
+        'summary' : None,
+        'home-page' : None,
+        'author' : None,
+        'author-email' : None,
+        'license' : None,
+        'help-train' : train_args,
+        'help-predict' : predict_args
     }
-
     for line in pkg.get_metadata_lines("PKG-INFO"):
+        line_low = line.lower() # to avoid inconsistency due to letter cases
         for par in meta:
-            if line.startswith(par+":"):
+            if line_low.startswith(par.lower() + ":"):
                 _, value = line.split(": ", 1)
                 meta[par] = value
-
+                
     return meta
 
+
+        
+#def predict(**args):
+
+    #if (not any([args['urls'], args['files']]) or
+            #all([args['urls'], args['files']])):
+        #raise Exception("You must provide either 'url' or 'data' in the payload")
+
+    #if args['files']:
+        #args['files'] = [args['files']]  # patch until list is available
+        #return predict_data(args)
+    #elif args['urls']:
+        #args['urls'] = [args['urls']]  # patch until list is available
+        #return predict_url(args)
+        
+def predict(**kwargs):
+
+    print("predict(**kwargs) - kwargs: %s" % (kwargs)) if debug_model else ''
+
+    if (not any([kwargs['urls'], kwargs['files']]) or
+            all([kwargs['urls'], kwargs['files']])):
+        raise Exception("You must provide either 'url' or 'data' in the payload")
+
+    if kwargs['files']:
+        kwargs['files'] = [kwargs['files']]  # patch until list is available
+        return predict_data(kwargs)
+    elif kwargs['urls']:
+        kwargs['urls'] = [kwargs['urls']]  # patch until list is available
+        return predict_url(kwargs)
+
+    
 def predict_file(img_path, trained_graph):
     """
     Function to make prediction on a local file
@@ -119,51 +246,72 @@ def predict_file(img_path, trained_graph):
     return results
 
 
-def predict_data(*args, **kwargs):
+def predict_data(*args):
     """
     Function to make prediction on an uploaded file
     """
-    deepaas_ver_cut = parse_version('0.5.0')
-    imgs = []
-    filenames = []
+    #deepaas_ver_cut = parse_version('0.5.0')
+    #imgs = []
+    #filenames = []
     
-    deepaas_ver = parse_version(deepaas.__version__)
-    print("[INFO] deepaas_version: %s" % deepaas_ver)
-    predict_debug = False
-    if predict_debug:
-        print('[DEBUG] predict_data - args: %s' % args)
-        print('[DEBUG] predict_data - kwargs: %s' % kwargs)
-    if deepaas_ver >= deepaas_ver_cut:
-        for arg in args:
-            imgs.append(arg['files'])
-            trained_graph = str(yaml.safe_load(arg.trained_graph))
-    else:
-        imgs = args[0]
-
-    if not isinstance(imgs, list):
-        imgs = [imgs]
+    #deepaas_ver = parse_version(deepaas.__version__)
+    #print("[INFO] deepaas_version: %s" % deepaas_ver)
+    #predict_debug = False
+    #if predict_debug:
+        #print('[DEBUG] predict_data - args: %s' % args)
+        #print('[DEBUG] predict_data - kwargs: %s' % kwargs)
+    #if deepaas_ver >= deepaas_ver_cut:
+        #for arg in args:
+            #imgs.append(arg['files'])
+            #trained_graph = str(yaml.safe_load(arg.trained_graph))
+    #else:
+        #imgs = args[0]
+
+    #if not isinstance(imgs, list):
+        #imgs = [imgs]
             
-    for image in imgs:
-        if deepaas_ver >= deepaas_ver_cut:
-            f = open("/tmp/%s" % image[0].filename, "w+")
-            image[0].save(f.name)
-        else:
-            f = tempfile.NamedTemporaryFile(delete=False)
-            f.write(image)
-        f.close()
-        filenames.append(f.name)
-        print("Stored tmp file at: {} \t Size: {}".format(f.name,
-        os.path.getsize(f.name)))
+    #for image in imgs:
+        #if deepaas_ver >= deepaas_ver_cut:
+            #f = open("/tmp/%s" % image[0].filename, "w+")
+            #image[0].save(f.name)
+        #else:
+            #f = tempfile.NamedTemporaryFile(delete=False)
+            #f.write(image)
+        #f.close()
+        #filenames.append(f.name)
+        #print("Stored tmp file at: {} \t Size: {}".format(f.name,
+        #os.path.getsize(f.name)))
+        
+    print("predict_data(*args) - args: %s" % (args)) if debug_model else ''
+
+    files = []
+
+    for arg in args:
+        file_objs = arg['files']
+        for f in file_objs:
+            files.append(f.filename)
+            if debug_model:
+                print("file_obj: name: {}, filename: {}, content_type: {}".format(
+                                                               f.name,
+                                                               f.filename,
+                                                               f.content_type))
+                print("File for prediction is at: {} \t Size: {}".format(
+                                                  f.filename,
+                                                  os.path.getsize(f.filename)))
+        trained_graph = arg['trained_graph']
 
     prediction = []
     try:
-        for imgfile in filenames:
+        #for imgfile in filenames:
+        for imgfile in files:    
             prediction.append(str(predict_file(imgfile, trained_graph)))
+            #prediction.append(predict_file(imgfile, trained_graph))
             print("image: ", imgfile)
     except Exception as e:
         raise e
     finally:
-        for imgfile in filenames:
+        #for imgfile in filenames:
+        for imgfile in files:
             os.remove(imgfile)
 
     return prediction
@@ -185,6 +333,7 @@ def predict_url(*args):
     Function to make prediction on a URL
     """    
     message = 'Not implemented in the model (predict_url)'
+    message = {"Error": message}
     return message
 
 def convert_bytes(num):
@@ -205,11 +354,22 @@ def file_size(file_path):
         file_info = os.stat(file_path)
         return convert_bytes(file_info.st_size)
 
-
-def train(train_args):
+@flaat.login_required() # Require only authorized people to do training
+#def train(train_args):
+def train(**kwargs):
     """
-    Train network
+    Train network (transfer learning)
+    Parameters
+    ----------
+    https://docs.deep-hybrid-datacloud.eu/projects/deepaas/en/wip-api_v2/user/v2-api.html#deepaas.model.v2.base.BaseModel.train
     """
+    print("train(**kwargs) - kwargs: %s" % (kwargs)) if debug_model else ''
+    run_results = { "status": "ok",
+                    "sys_info": [],
+                    "training": [],
+                  }
+
+
     # Check if necessary local directories exist:
     if not os.path.exists(cfg.Retina_LocalDataRecords):
         print("[INFO] %s is not found locally, creating..." % 
@@ -220,10 +380,23 @@ def train(train_args):
               cfg.Retina_LocalModelsServe)
         os.makedirs(cfg.Retina_LocalModelsServe)  
 
+    # use the schema
+    schema = cfg.TrainArgsSchema()
+    # deserialize key-word arguments
+    train_args = schema.load(kwargs)
+
     # Take parameters defined via deepaas by a user
-    train_epochs = yaml.safe_load(train_args.train_epochs)
-    batch_size = yaml.safe_load(train_args.batch_size)
-    num_gpus = yaml.safe_load(train_args.num_gpus)
+    #train_epochs = yaml.safe_load(train_args.train_epochs)
+    #batch_size = yaml.safe_load(train_args.batch_size)
+    #num_gpus = yaml.safe_load(train_args.num_gpus)
+    train_epochs = train_args['train_epochs']
+    batch_size = train_args['batch_size']
+    num_gpus = train_args['num_gpus']
+    upload_back = train_args['upload_back']
+    if debug_model:
+        print("train_args:", train_args)
+        print(type(train_args['train_epochs']), type(train_args['batch_size']))
+        print("Number of GPUs:", train_args['num_gpus'], num_gpus)
 
     # from deep-nextcloud into the container
     # data_origin = 'rshare:/records_short/'
@@ -267,6 +440,7 @@ def train(train_args):
         delattr(FLAGS, name)
 
     tf.logging.set_verbosity(tf.logging.INFO)
+    #tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
     # define default FLAGS for retinopathy_main and _run_loop
     retimain.define_retinopathy_flags(batch_size=str(batch_size),
                                       train_epochs=str(train_epochs),
@@ -313,7 +487,7 @@ def train(train_args):
 
     e3=time.time()
     # Retina_LocalModelsServe and Retina_RemoteModelsUpload are defined in config.py #vk
-    upload_back = yaml.safe_load(train_args.upload_back)
+    # upload_back = yaml.safe_load(train_args.upload_back)
     if(upload_back and os.path.exists(graph_zip_path)):
         graph_zip_dir, graph_zip_name = os.path.split(graph_zip_path)
         print("[INFO] Uploading {} to {} ...".format(graph_zip_name, 
@@ -347,27 +521,41 @@ def train(train_args):
     return message
 
 def get_train_args():
+    """
+    https://docs.deep-hybrid-datacloud.eu/projects/deepaas/en/wip-api_v2/user/v2-api.html#deepaas.model.v2.base.BaseModel.get_train_args
+    https://marshmallow.readthedocs.io/en/latest/api_reference.html#module-marshmallow.fields
+    :param kwargs:
+    :return:
+    """
 
-    train_args = cfg.train_args
-
-    # convert default values and possible 'choices' into strings
-    for key, val in train_args.items():
-        val['default'] = str(val['default']) #yaml.safe_dump(val['default']) #json.dumps(val['default'])
-        if 'choices' in val:
-            val['choices'] = [str(item) for item in val['choices']]
+    #train_args = cfg.train_args
 
-    return train_args
+    ## convert default values and possible 'choices' into strings
+    ##for key, val in train_args.items():
+        ##val['default'] = str(val['default']) #yaml.safe_dump(val['default']) #json.dumps(val['default'])
+        ##if 'choices' in val:
+            ##val['choices'] = [str(item) for item in val['choices']]
 
+    #return train_args
+    return cfg.TrainArgsSchema().fields
 
 # !!! deepaas>=0.5.0 calls get_test_args() to get args for 'predict'
-def get_test_args():
-    predict_args = cfg.predict_args
+#def get_test_args():
+def get_predict_args():
+    """
+    https://docs.deep-hybrid-datacloud.eu/projects/deepaas/en/wip-api_v2/user/v2-api.html#deepaas.model.v2.base.BaseModel.get_predict_args
+    :return:
+    """
+    
+    #predict_args = cfg.predict_args
 
-    # convert default values and possible 'choices' into strings
-    for key, val in predict_args.items():
-        val['default'] = str(val['default'])  # yaml.safe_dump(val['default']) #json.dumps(val['default'])
-        if 'choices' in val:
-            val['choices'] = [str(item) for item in val['choices']]
-        #print(val['default'], type(val['default']))
+    ## convert default values and possible 'choices' into strings
+    ##for key, val in predict_args.items():
+        ##val['default'] = str(val['default'])  # yaml.safe_dump(val['default']) #json.dumps(val['default'])
+        ##if 'choices' in val:
+            ##val['choices'] = [str(item) for item in val['choices']]
+        ##print(val['default'], type(val['default']))
+
+    #return predict_args
+    return cfg.PredictArgsSchema().fields
 
-    return predict_args
diff --git a/setup.cfg b/setup.cfg
index 023ef94417992d17591cd39ea6210b734a598b39..85d411638648311a4de4084ab982acaa6473e5fe 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -5,7 +5,7 @@ description-file =
     README.md
 author = HMGU
 author-email = itokeiic@gmail.com
-version = 0.1
+version = 0.5
 license = MIT
 home-page = http://github.com/itokeiic/retinopathy_test
 classifier =
@@ -14,10 +14,7 @@ classifier =
     License :: OSI Approved :: Apache Software License
     Operating System :: POSIX :: Linux
     Programming Language :: Python
-    Programming Language :: Python :: 2
-    Programming Language :: Python :: 2.7
     Programming Language :: Python :: 3
-    Programming Language :: Python :: 3.4
     Programming Language :: Python :: 3.6
 
 [files]
@@ -25,7 +22,6 @@ packages =
     retinopathy_test
 
 [entry_points]
-
-deepaas.model = 
+deepaas.v2.model = 
     retinopathy_test = retinopathy_test.models.model
 
diff --git a/test-requirements.txt b/test-requirements.txt
index d63c4cf4340a71103164552e702ec3f817cdd175..93b8ac4170e202704a9d25dd7a819e1799267fc1 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -14,5 +14,6 @@ pytest-cov
 
 #tensorflow>=1.10.0
 pydot-ng
-deepaas==0.5.1
+deepaas>=0.5.1
 flaat
+aiohttp
diff --git a/tox.ini b/tox.ini
index 7d20d21b1aa509bf265d30715f2674d4bf58da4a..fea94128113da5416ffb2e7cffd02be17a002ee5 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,17 +1,18 @@
 [tox]
 minversion = 1.6
-envlist = py{35,36,27},pep8
+envlist = py36,pep8
 skipsdist = True
 
 [testenv]
 usedevelop = True
-basepython = python3
+basepython = python3.6
 whitelist_externals =
   find
 install_command = pip install -U {opts} {packages}
+# use en_US.UTF-8 as C.UTF-8 doesn't exist in RHEL7
 setenv =
    VIRTUAL_ENV={envdir}
-   LC_ALL=C.UTF-8
+   LC_ALL=en_US.UTF-8
    OS_STDOUT_CAPTURE=1
    OS_STDERR_CAPTURE=1
    OS_TEST_TIMEOUT=160
@@ -38,25 +39,13 @@ commands =
   coverage xml -o cover/coverage.xml
   coverage report
 
-[testenv:py27]
-# TODO(efried): Remove this once https://github.com/tox-dev/tox/issues/425 is fixed.
-basepython = python2.7
-commands =
-  {[testenv]commands}
-  stestr run {posargs} 
-
-[testenv:py35]
-# TODO(efried): Remove this once https://github.com/tox-dev/tox/issues/425 is fixed.
-basepython = python3.5
-commands =
-  {[testenv]commands}
-  stestr run {posargs}
-
 [testenv:py36]
 # TODO(efried): Remove this once https://github.com/tox-dev/tox/issues/425 is fixed.
 basepython = python3.6
 commands =
-  {[testenv:py35]commands}
+  {[testenv]commands}
+  stestr -v run {posargs}
+
 
 [testenv:pep8]
 envdir = {toxworkdir}/shared