From c137cfa8eee480ae8cda08e1f8f9937a11de1ef4 Mon Sep 17 00:00:00 2001 From: yuki-mt Date: Mon, 7 Jan 2019 20:01:14 +0900 Subject: [PATCH 01/11] return result_id in ApiEvaluate --- app/apis/api_service.py | 4 +++- app/drucker-grpc-proto | 2 +- app/models/evaluation_result.py | 17 ++++++++++++++++- app/test/test_api_service.py | 4 ++-- 4 files changed, 22 insertions(+), 5 deletions(-) diff --git a/app/apis/api_service.py b/app/apis/api_service.py index 371b309..d158093 100644 --- a/app/apis/api_service.py +++ b/app/apis/api_service.py @@ -216,7 +216,7 @@ def post(self, application_id:int, service_id:int): .filter(EvaluationResult.model_id == sobj.model_id, EvaluationResult.evaluation_id == eobj.evaluation_id).one_or_none() if robj is not None and args.get('overwrite', False): - return json.loads(robj.result) + return robj.result eval_result_path = "eval-result-{0:%Y%m%d%H%M%S}.txt".format(datetime.datetime.utcnow()) drucker_dashboard_application = DruckerDashboardClient(logger=logger, host=sobj.host) @@ -233,6 +233,8 @@ def post(self, application_id:int, service_id:int): else: robj.data_path = eval_result_path robj.result = result + db.session.flush() + response_body = robj.result db.session.commit() db.session.close() diff --git a/app/drucker-grpc-proto b/app/drucker-grpc-proto index ee7f712..c5d8274 160000 --- a/app/drucker-grpc-proto +++ b/app/drucker-grpc-proto @@ -1 +1 @@ -Subproject commit ee7f712d25f6929066fe710703c2edcc840f2728 +Subproject commit c5d827463f120f41bb21a101b6522678562f1f43 diff --git a/app/models/evaluation_result.py b/app/models/evaluation_result.py index 78cd386..6a8150f 100644 --- a/app/models/evaluation_result.py +++ b/app/models/evaluation_result.py @@ -1,4 +1,5 @@ import datetime +import json from sqlalchemy import ( Column, Integer, DateTime, Text, UniqueConstraint, ForeignKey, String @@ -21,9 +22,23 @@ class EvaluationResult(db.Model): model_id = Column(Integer, nullable=False) data_path = Column(String(512), nullable=False) evaluation_id = Column(Integer, ForeignKey('evaluations.evaluation_id'), nullable=False) - result = Column(Text, nullable=False) + _result = Column(Text, nullable=False) register_date = Column(DateTime, default=datetime.datetime.utcnow, nullable=False) + @property + def result(self): + res = json.loads(self._result) + res['result_id'] = self.evaluation_result_id + return res + + @result.setter + def result(self, value): + if isinstance(value, dict): + res = json.dumps(value) + else: + res = value + self._result = res + @property def serialize(self): return { diff --git a/app/test/test_api_service.py b/app/test/test_api_service.py index 812eb3f..0b1a0d6 100644 --- a/app/test/test_api_service.py +++ b/app/test/test_api_service.py @@ -20,7 +20,7 @@ def inner_method(*args, **kwargs): class ApiEvaluateTest(BaseTestCase): """Tests for ApiEvaluate. """ - default_response = {'accuracy': 0.0, 'fvalue': 0.0, 'num': 0, + default_response = {'accuracy': 0.0, 'fvalue': 0.0, 'num': 0, 'result_id': 1, 'option': {}, 'precision': 0.0, 'recall': 0.0, 'status': True} def setUp(self): @@ -85,4 +85,4 @@ def test_post_duplicated(self): eobj = db.session.query(EvaluationResult)\ .filter(EvaluationResult.model_id == model_id, EvaluationResult.evaluation_id == evaluation_id).one() - self.assertEqual(json.loads(eobj.result), self.default_response) + self.assertEqual(eobj.result, self.default_response) From 48f004bacf1e7a3de1c51790ae3927447d3d1228 Mon Sep 17 00:00:00 2001 From: yuki-mt Date: Tue, 8 Jan 2019 13:03:32 +0900 Subject: [PATCH 02/11] add API for EvaluationResult grpc protocol --- app/apis/__init__.py | 2 + app/apis/api_evaluation.py | 116 +++++++++++++++++++++++++++ app/core/drucker_dashboard_client.py | 8 ++ 3 files changed, 126 insertions(+) create mode 100644 app/apis/api_evaluation.py diff --git a/app/apis/__init__.py b/app/apis/__init__.py index fbc088c..7712703 100644 --- a/app/apis/__init__.py +++ b/app/apis/__init__.py @@ -5,6 +5,7 @@ from apis.api_application import app_info_namespace from apis.api_service import srv_info_namespace from apis.api_model import mdl_info_namespace +from apis.api_evaluation import eval_info_namespace from apis.api_misc import misc_info_namespace from auth import Auth from kubernetes.client.rest import ApiException @@ -40,4 +41,5 @@ def default_error_handler(error): api.add_namespace(app_info_namespace, path='/api/applications') api.add_namespace(srv_info_namespace, path='/api/applications') api.add_namespace(mdl_info_namespace, path='/api/applications') +api.add_namespace(eval_info_namespace, path='/api/applications') api.add_namespace(misc_info_namespace, path='/api') diff --git a/app/apis/api_evaluation.py b/app/apis/api_evaluation.py new file mode 100644 index 0000000..8ec64e4 --- /dev/null +++ b/app/apis/api_evaluation.py @@ -0,0 +1,116 @@ +import datetime + +from flask_restplus import Namespace, fields, Resource, reqparse +from flask import stream_with_context, Response +from werkzeug.datastructures import FileStorage + +from app import logger +from models import db +from models import Service, Evaluation, EvaluationResult +from core.drucker_dashboard_client import DruckerDashboardClient +from utils.hash_util import HashUtil + + +eval_info_namespace = Namespace('evaluation', description='Evaluation Endpoint.') +success_or_not = eval_info_namespace.model('Success', { + 'status': fields.Boolean( + required=True + ), + 'message': fields.String( + required=True + ) +}) + + +@eval_info_namespace.route('//evaluation') +class ApiEvaluation(Resource): + upload_parser = reqparse.RequestParser() + upload_parser.add_argument('file', location='files', type=FileStorage, required=True) + + @eval_info_namespace.expect(upload_parser) + def post(self, application_id:int): + """update data to be evaluated""" + args = self.upload_parser.parse_args() + file = args['file'] + checksum = HashUtil.checksum(file) + + eobj = db.session.query(Evaluation).filter( + Evaluation.application_id == application_id, + Evaluation.checksum == checksum).one_or_none() + if eobj is not None: + return {"status": True, "evaluation_id": eobj.evaluation_id} + + eval_data_path = "eval-{0:%Y%m%d%H%M%S}.txt".format(datetime.datetime.utcnow()) + + sobj = Service.query.filter_by(application_id=application_id).first_or_404() + + drucker_dashboard_application = DruckerDashboardClient(logger=logger, host=sobj.host) + response_body = drucker_dashboard_application.run_upload_evaluation_data(file, eval_data_path) + + if not response_body['status']: + raise Exception('Failed to upload') + eobj = Evaluation(checksum=checksum, application_id=application_id, data_path=eval_data_path) + db.session.add(eobj) + db.session.flush() + evaluation_id = eobj.evaluation_id + db.session.commit() + db.session.close() + + return {"status": True, "evaluation_id": evaluation_id} + + +@eval_info_namespace.route('//evaluation/') +class ApiEvaluation(Resource): + + @eval_info_namespace.marshal_with(success_or_not) + def delete(self, application_id:int, evaluation_id:int): + """delete data to be evaluated""" + eval_query = db.session.query(Evaluation)\ + .filter(Evaluation.application_id == application_id, + Evaluation.evaluation_id == evaluation_id) + if eval_query.one_or_none() is None: + return {"status": False, "message": "Not Found."}, 404 + + eval_query.delete() + db.session.query(EvaluationResult)\ + .filter(EvaluationResult.evaluation_id == evaluation_id).delete() + db.session.commit() + db.session.close() + + return {"status": True, "message": "Success."} + + +@eval_info_namespace.route('//evaluation_result/') +class ApiEvaluationResult(Resource): + + def get(self, application_id:int, eval_result_id:int): + """get detailed evaluation result""" + eval_with_result = db.session.query(Evaluation, EvaluationResult)\ + .filter(Evaluation.application_id == application_id, + EvaluationResult.evaluation_id == Evaluation.evaluation_id, + EvaluationResult.evaluation_result_id == eval_result_id).one_or_none() + if eval_with_result is None: + return {"status": False, "message": "Not Found."}, 404 + sobj = Service.query.filter_by(application_id=application_id).first_or_404() + drucker_dashboard_application = DruckerDashboardClient(logger=logger, host=sobj.host) + eobj = eval_with_result.Evaluation + robj = eval_with_result.EvaluationResult + + return Response(stream_with_context(drucker_dashboard_application.run_evaluation_data(eobj.data_path, robj.data_path))) + + @eval_info_namespace.marshal_with(success_or_not) + def delete(self, application_id:int, eval_result_id:int): + """get detailed evaluation result""" + eval_with_result = db.session.query(Evaluation, EvaluationResult)\ + .filter(Evaluation.application_id == application_id, + EvaluationResult.evaluation_id == Evaluation.evaluation_id, + EvaluationResult.evaluation_result_id == eval_result_id).one_or_none() + if eval_with_result is None: + return {"status": False, "message": "Not Found."}, 404 + + db.session.query(EvaluationResult)\ + .filter(EvaluationResult.evaluation_result_id == eval_result_id).delete() + db.session.commit() + db.session.close() + + return {"status": True, "message": "Success."} diff --git a/app/core/drucker_dashboard_client.py b/app/core/drucker_dashboard_client.py index 405857e..4d06f1d 100644 --- a/app/core/drucker_dashboard_client.py +++ b/app/core/drucker_dashboard_client.py @@ -138,3 +138,11 @@ def run_upload_evaluation_data(self, f:FileStorage, data_path:str): response = protobuf_to_dict(self.stub.UploadEvaluationData(request_iterator), including_default_value_fields=True) return response + + @error_handling({"status": False}) + def run_evaluation_data(self, data_path:str, result_path:str): + for raw_response in self.stub.EvaluationResult(data_path, result_path): + response = protobuf_to_dict(raw_response, + including_default_value_fields=True) + response['status'] = True + yield response From bd0b804a78d1fae1a7dc73ded8a821e71f6fc9ea Mon Sep 17 00:00:00 2001 From: yuki-mt Date: Tue, 8 Jan 2019 17:13:24 +0900 Subject: [PATCH 03/11] add test --- app/README.md | 2 +- app/apis/api_application.py | 55 ------------ app/apis/api_evaluation.py | 14 +++- app/core/drucker_dashboard_client.py | 21 +++++ app/test/base.py | 1 + app/test/test_api_application.py | 50 ----------- app/test/test_api_evaluation.py | 120 +++++++++++++++++++++++++++ 7 files changed, 154 insertions(+), 109 deletions(-) delete mode 100644 app/test/test_api_application.py create mode 100644 app/test/test_api_evaluation.py diff --git a/app/README.md b/app/README.md index a85dca0..c6f6b52 100644 --- a/app/README.md +++ b/app/README.md @@ -25,5 +25,5 @@ $ sh drucker-grpc-proto/run_codegen.sh $ cp drucker-grpc-proto/protobuf/drucker_pb2.py . $ cp drucker-grpc-proto/protobuf/drucker_pb2_grpc.py . $ python -m unittest test/test_api_service.py -$ python -m unittest test/test_api_application.py +$ python -m unittest test/test_api_evaluation.py ``` diff --git a/app/apis/api_application.py b/app/apis/api_application.py index 0494319..826551a 100644 --- a/app/apis/api_application.py +++ b/app/apis/api_application.py @@ -123,58 +123,3 @@ def patch(self, application_id:int): db.session.commit() db.session.close() return response_body - -@app_info_namespace.route('//evaluation') -class ApiEvaluation(Resource): - upload_parser = reqparse.RequestParser() - upload_parser.add_argument('file', location='files', type=FileStorage, required=True) - - @app_info_namespace.expect(upload_parser) - def post(self, application_id:int): - """update data to be evaluated""" - args = self.upload_parser.parse_args() - file = args['file'] - checksum = HashUtil.checksum(file) - - eobj = db.session.query(Evaluation).filter( - Evaluation.application_id == application_id, - Evaluation.checksum == checksum).one_or_none() - if eobj is not None: - return {"status": True, "evaluation_id": eobj.evaluation_id} - - eval_data_path = "eval-{0:%Y%m%d%H%M%S}.txt".format(datetime.datetime.utcnow()) - - sobj = Service.query.filter_by(application_id=application_id).first_or_404() - - drucker_dashboard_application = DruckerDashboardClient(logger=logger, host=sobj.host) - response_body = drucker_dashboard_application.run_upload_evaluation_data(file, eval_data_path) - - if not response_body['status']: - raise Exception('Failed to upload') - eobj = Evaluation(checksum=checksum, application_id=application_id, data_path=eval_data_path) - db.session.add(eobj) - db.session.flush() - evaluation_id = eobj.evaluation_id - db.session.commit() - db.session.close() - - return {"status": True, "evaluation_id": evaluation_id} - - -@app_info_namespace.route('//evaluation/') -class ApiEvaluation(Resource): - def delete(self, application_id:int, evaluation_id:int): - """delete data to be evaluated""" - eval_query = db.session.query(Evaluation)\ - .filter(Evaluation.application_id == application_id, - Evaluation.evaluation_id == evaluation_id) - if eval_query.one_or_none() is None: - return {"status": False}, 404 - - eval_query.delete() - db.session.query(EvaluationResult)\ - .filter(EvaluationResult.evaluation_id == evaluation_id).delete() - db.session.commit() - db.session.close() - - return {"status": True, "message": "Success."} diff --git a/app/apis/api_evaluation.py b/app/apis/api_evaluation.py index 8ec64e4..c506f55 100644 --- a/app/apis/api_evaluation.py +++ b/app/apis/api_evaluation.py @@ -1,7 +1,7 @@ import datetime +from itertools import chain from flask_restplus import Namespace, fields, Resource, reqparse -from flask import stream_with_context, Response from werkzeug.datastructures import FileStorage from app import logger @@ -80,7 +80,7 @@ def delete(self, application_id:int, evaluation_id:int): return {"status": True, "message": "Success."} -@eval_info_namespace.route('//evaluation_result/') +@eval_info_namespace.route('//evaluation_result/') class ApiEvaluationResult(Resource): def get(self, application_id:int, eval_result_id:int): @@ -96,7 +96,15 @@ def get(self, application_id:int, eval_result_id:int): eobj = eval_with_result.Evaluation robj = eval_with_result.EvaluationResult - return Response(stream_with_context(drucker_dashboard_application.run_evaluation_data(eobj.data_path, robj.data_path))) + response_body = list(drucker_dashboard_application.run_evaluation_data(eobj.data_path, robj.data_path)) + if len(response_body) == 0: + return {"status": False, "message": "Result Not Found."}, 404 + + return { + 'status': all(r['status'] for r in response_body), + 'metrics': response_body[0]['metrics'], + 'details': list(chain.from_iterable(r['detail'] for r in response_body)) + } @eval_info_namespace.marshal_with(success_or_not) def delete(self, application_id:int, eval_result_id:int): diff --git a/app/core/drucker_dashboard_client.py b/app/core/drucker_dashboard_client.py index 4d06f1d..283845c 100644 --- a/app/core/drucker_dashboard_client.py +++ b/app/core/drucker_dashboard_client.py @@ -139,10 +139,31 @@ def run_upload_evaluation_data(self, f:FileStorage, data_path:str): including_default_value_fields=True) return response + def __get_value_from_io(self, io:drucker_pb2.IO): + if io.WhichOneof('io_oneof') == 'str': + val = io.str.val + else: + val = io.tensor.val + + if len(val) == 1: + return val[0] + else: + return list(val) + @error_handling({"status": False}) def run_evaluation_data(self, data_path:str, result_path:str): for raw_response in self.stub.EvaluationResult(data_path, result_path): + details = [] + for detail in raw_response.detail: + details.append(dict( + protobuf_to_dict(detail, including_default_value_fields=True), + input=self.__get_value_from_io(detail.input), + label=self.__get_value_from_io(detail.label), + output=self.__get_value_from_io(detail.output), + score=detail.score[0] if len(detail.score) == 1 else list(detail.score) + )) response = protobuf_to_dict(raw_response, including_default_value_fields=True) + response['detail'] = details response['status'] = True yield response diff --git a/app/test/base.py b/app/test/base.py index b213a45..743fb19 100644 --- a/app/test/base.py +++ b/app/test/base.py @@ -11,6 +11,7 @@ class BaseTestCase(TestCase): def create_app(self): app = Flask(__name__) initialize_app(app) + app.config['PRESERVE_CONTEXT_ON_EXCEPTION'] = False return app @classmethod diff --git a/app/test/test_api_application.py b/app/test/test_api_application.py deleted file mode 100644 index 64335a0..0000000 --- a/app/test/test_api_application.py +++ /dev/null @@ -1,50 +0,0 @@ -from unittest.mock import patch, Mock - -import drucker_pb2 -from .base import BaseTestCase, create_app_obj, create_service_obj, create_eval_obj, create_eval_result_obj -from io import BytesIO -from models import EvaluationResult, Evaluation - - -class ApiEvaluationTest(BaseTestCase): - """Tests for ApiEvaluation. - """ - - @patch('core.drucker_dashboard_client.drucker_pb2_grpc.DruckerDashboardStub') - def test_post(self, mock_stub_class): - mock_stub_obj = Mock() - mock_stub_obj.UploadEvaluationData.return_value = drucker_pb2.UploadEvaluationDataResponse(status=1, message='success') - mock_stub_class.return_value = mock_stub_obj - aobj = create_app_obj() - create_service_obj(aobj.application_id) - - url = f'/api/applications/{aobj.application_id}/evaluation' - content_type = 'multipart/form-data' - file_content = b'my file contents' - response = self.client.post(url, - content_type=content_type, - data={'file': (BytesIO(file_content), "file.txt")}) - self.assertEqual(200, response.status_code) - self.assertEqual(response.json, {'status': True, 'evaluation_id': 1}) - - # duplication check - response = self.client.post(url, - content_type=content_type, - data={'file': (BytesIO(file_content), "file.txt")}) - self.assertEqual(200, response.status_code) - self.assertEqual(response.json, {'status': True, 'evaluation_id': 1}) - - def test_delete(self): - app_id = create_app_obj().application_id - eobj = create_eval_obj(app_id, save=True) - sobj = create_service_obj(app_id) - create_eval_result_obj(model_id=sobj.model_id, evaluation_id=eobj.evaluation_id, save=True) - response = self.client.delete(f'/api/applications/{app_id}/evaluation/{eobj.evaluation_id}') - self.assertEqual(200, response.status_code) - self.assertEqual(response.json, {'status': True, 'message': 'Success.'}) - self.assertEqual(Evaluation.query.all(), []) - self.assertEqual(EvaluationResult.query.all(), []) - - response = self.client.delete(f'/api/applications/{app_id}/evaluation/101') - self.assertEqual(404, response.status_code) - self.assertEqual(response.json, {'status': False}) diff --git a/app/test/test_api_evaluation.py b/app/test/test_api_evaluation.py new file mode 100644 index 0000000..83e361f --- /dev/null +++ b/app/test/test_api_evaluation.py @@ -0,0 +1,120 @@ +from unittest.mock import patch, Mock + +import drucker_pb2 +from .base import BaseTestCase, create_app_obj, create_service_obj, create_eval_obj, create_eval_result_obj +from io import BytesIO +from models import EvaluationResult, Evaluation + + +class ApiEvaluationTest(BaseTestCase): + """Tests for ApiEvaluation. + """ + + @patch('core.drucker_dashboard_client.drucker_pb2_grpc.DruckerDashboardStub') + def test_post(self, mock_stub_class): + mock_stub_obj = Mock() + mock_stub_obj.UploadEvaluationData.return_value = drucker_pb2.UploadEvaluationDataResponse(status=1, message='success') + mock_stub_class.return_value = mock_stub_obj + aobj = create_app_obj() + + url = f'/api/applications/{aobj.application_id}/evaluation' + content_type = 'multipart/form-data' + file_content = b'my file contents' + response = self.client.post(url, + content_type=content_type, + data={'file': (BytesIO(file_content), "file.txt")}) + self.assertEqual(200, response.status_code) + self.assertEqual(response.json, {'status': True, 'evaluation_id': 1}) + + # duplication check + response = self.client.post(url, + content_type=content_type, + data={'file': (BytesIO(file_content), "file.txt")}) + self.assertEqual(200, response.status_code) + self.assertEqual(response.json, {'status': True, 'evaluation_id': 1}) + + def test_delete(self): + app_id = create_app_obj().application_id + eobj = create_eval_obj(app_id, save=True) + sobj = create_service_obj(app_id) + create_eval_result_obj(model_id=sobj.model_id, evaluation_id=eobj.evaluation_id, save=True) + response = self.client.delete(f'/api/applications/{app_id}/evaluation/{eobj.evaluation_id}') + self.assertEqual(200, response.status_code) + self.assertEqual(response.json, {'status': True, 'message': 'Success.'}) + self.assertEqual(Evaluation.query.all(), []) + self.assertEqual(EvaluationResult.query.all(), []) + + response = self.client.delete(f'/api/applications/{app_id}/evaluation/101') + self.assertEqual(404, response.status_code) + self.assertEqual(response.json, {'status': False, 'message': 'Not Found.'}) + + +class ApiEvaluationResultTest(BaseTestCase): + """Tests for ApiEvaluationResult. + """ + default_response = {'accuracy': 0.0, 'fvalue': 0.0, 'num': 0, + 'option': {}, 'precision': 0.0, 'recall': 0.0} + + @patch('core.drucker_dashboard_client.drucker_pb2_grpc.DruckerDashboardStub') + def test_get(self, mock_stub_class): + mock_stub_obj = Mock() + res = drucker_pb2.EvaluationResultResponse( + metrics=drucker_pb2.EvaluationMetrics(), + detail=[ + drucker_pb2.EvaluationResultResponse.Detail( + input=drucker_pb2.IO(str=drucker_pb2.ArrString(val=['input'])), + label=drucker_pb2.IO(str=drucker_pb2.ArrString(val=['test'])), + output=drucker_pb2.IO(str=drucker_pb2.ArrString(val=['test'])), + is_correct=True, + score=[1.0] + ), + drucker_pb2.EvaluationResultResponse.Detail( + input=drucker_pb2.IO(tensor=drucker_pb2.Tensor(shape=[1], val=[0.5])), + label=drucker_pb2.IO(tensor=drucker_pb2.Tensor(shape=[2], val=[0.9, 1.3])), + output=drucker_pb2.IO(tensor=drucker_pb2.Tensor(shape=[2], val=[0.9, 0.3])), + is_correct=False, + score=[0.5, 0.5] + ) + ]) + mock_stub_obj.EvaluationResult.return_value = iter(res for _ in range(2)) + mock_stub_class.return_value = mock_stub_obj + app_id = create_app_obj().application_id + eobj = create_eval_obj(app_id, save=True) + sobj = create_service_obj(app_id) + robj = create_eval_result_obj(model_id=sobj.model_id, evaluation_id=eobj.evaluation_id, save=True) + response = self.client.get(f'/api/applications/{app_id}/evaluation_result/{robj.evaluation_result_id}') + self.assertEqual(200, response.status_code) + self.assertEqual(response.json['status'], True) + self.assertEqual(response.json['metrics'], self.default_response) + details = response.json['details'] + self.assertEqual(len(details), 4) + self.assertEqual(details[0], {'input': 'input', 'label': 'test', 'output': 'test', 'score': 1.0, 'is_correct': True}) + self.assertEqual(details[1], {'input': 0.5, 'label': [0.9, 1.3], 'output': [0.9, 0.3], 'score': [0.5, 0.5], 'is_correct': False}) + + @patch('core.drucker_dashboard_client.drucker_pb2_grpc.DruckerDashboardStub') + def test_get_not_found(self, mock_stub_class): + app_id = create_app_obj().application_id + eobj = create_eval_obj(app_id, save=True) + sobj = create_service_obj(app_id) + robj = create_eval_result_obj(model_id=sobj.model_id, evaluation_id=eobj.evaluation_id, save=True) + response = self.client.get(f'/api/applications/{app_id}/evaluation_result/{robj.evaluation_result_id}') + self.assertEqual(404, response.status_code) + self.assertEqual(response.json, {'status': False, 'message': 'Result Not Found.'}) + + response = self.client.get(f'/api/applications/{app_id}/evaluation_result/101') + self.assertEqual(404, response.status_code) + self.assertEqual(response.json, {'status': False, 'message': 'Not Found.'}) + + def test_delete(self): + app_id = create_app_obj().application_id + eobj = create_eval_obj(app_id, save=True) + sobj = create_service_obj(app_id) + robj = create_eval_result_obj(model_id=sobj.model_id, evaluation_id=eobj.evaluation_id, save=True) + response = self.client.delete(f'/api/applications/{app_id}/evaluation_result/{robj.evaluation_result_id}') + self.assertEqual(200, response.status_code) + self.assertEqual(response.json, {'status': True, 'message': 'Success.'}) + self.assertEqual(EvaluationResult.query.all(), []) + + response = self.client.delete(f'/api/applications/{app_id}/evaluation_result/101') + self.assertEqual(404, response.status_code) + self.assertEqual(response.json, {'status': False, 'message': 'Not Found.'}) From 6a4c51d640abd1295c05a0bbfa39108f61daa463 Mon Sep 17 00:00:00 2001 From: yuki-mt Date: Tue, 8 Jan 2019 17:48:22 +0900 Subject: [PATCH 04/11] move ApiEvaluate to api_evaluation.py --- app/README.md | 1 - app/apis/api_application.py | 5 +- app/apis/api_evaluation.py | 57 +++++++++++++ app/apis/api_service.py | 58 +------------- app/test/test_api_evaluation.py | 136 +++++++++++++++++++++++++------- app/test/test_api_service.py | 88 --------------------- 6 files changed, 166 insertions(+), 179 deletions(-) delete mode 100644 app/test/test_api_service.py diff --git a/app/README.md b/app/README.md index c6f6b52..79cbd85 100644 --- a/app/README.md +++ b/app/README.md @@ -24,6 +24,5 @@ $ cd app $ sh drucker-grpc-proto/run_codegen.sh $ cp drucker-grpc-proto/protobuf/drucker_pb2.py . $ cp drucker-grpc-proto/protobuf/drucker_pb2_grpc.py . -$ python -m unittest test/test_api_service.py $ python -m unittest test/test_api_evaluation.py ``` diff --git a/app/apis/api_application.py b/app/apis/api_application.py index 826551a..4e2db98 100644 --- a/app/apis/api_application.py +++ b/app/apis/api_application.py @@ -1,15 +1,12 @@ import uuid -import datetime from flask_restplus import Namespace, fields, Resource, reqparse -from werkzeug.datastructures import FileStorage from app import logger from models import db -from models import Application, Service, Evaluation, EvaluationResult +from models import Application, Service from core.drucker_dashboard_client import DruckerDashboardClient from apis.common import DatetimeToTimestamp -from utils.hash_util import HashUtil app_info_namespace = Namespace('applications', description='Application Endpoint.') diff --git a/app/apis/api_evaluation.py b/app/apis/api_evaluation.py index c506f55..b3b8d0a 100644 --- a/app/apis/api_evaluation.py +++ b/app/apis/api_evaluation.py @@ -1,4 +1,5 @@ import datetime +import json from itertools import chain from flask_restplus import Namespace, fields, Resource, reqparse @@ -80,6 +81,62 @@ def delete(self, application_id:int, evaluation_id:int): return {"status": True, "message": "Success."} +@eval_info_namespace.route('//evaluate') +class ApiEvaluate(Resource): + eval_parser = reqparse.RequestParser() + eval_parser.add_argument('service_id', location='form', type=int, required=True) + eval_parser.add_argument('evaluation_id', location='form', type=int, required=False) + eval_parser.add_argument('overwrite', location='form', type=bool, required=False) + + @eval_info_namespace.expect(eval_parser) + def post(self, application_id:int): + """evaluate""" + args = self.eval_parser.parse_args() + eval_id = args.get('evaluation_id', None) + service_id = args['service_id'] + if eval_id: + eobj = Evaluation.query.filter_by( + application_id=application_id, + evaluation_id=eval_id).first_or_404() + else: + # if evaluation_id is not given, use the lastest one. + eobj = Evaluation.query\ + .filter_by(application_id=application_id)\ + .order_by(Evaluation.register_date.desc()).first_or_404() + + sobj = Service.query.filter_by( + application_id=application_id, + service_id=service_id).first_or_404() + + robj = db.session.query(EvaluationResult)\ + .filter(EvaluationResult.model_id == sobj.model_id, + EvaluationResult.evaluation_id == eobj.evaluation_id).one_or_none() + if robj is not None and args.get('overwrite', False): + return robj.result + + eval_result_path = "eval-result-{0:%Y%m%d%H%M%S}.txt".format(datetime.datetime.utcnow()) + drucker_dashboard_application = DruckerDashboardClient(logger=logger, host=sobj.host) + response_body = drucker_dashboard_application.run_evaluate_model(eobj.data_path, eval_result_path) + + if response_body['status']: + result = json.dumps(response_body) + if robj is None: + robj = EvaluationResult(model_id=sobj.model_id, + data_path=eval_result_path, + evaluation_id=eobj.evaluation_id, + result=result) + db.session.add(robj) + else: + robj.data_path = eval_result_path + robj.result = result + db.session.flush() + response_body = robj.result + db.session.commit() + db.session.close() + + return response_body + + @eval_info_namespace.route('//evaluation_result/') class ApiEvaluationResult(Resource): diff --git a/app/apis/api_service.py b/app/apis/api_service.py index d158093..66651ef 100644 --- a/app/apis/api_service.py +++ b/app/apis/api_service.py @@ -1,11 +1,8 @@ import datetime -import json from flask_restplus import Namespace, fields, Resource, reqparse -from app import logger -from models import db, Kubernetes, Application, Service, EvaluationResult, Evaluation -from core.drucker_dashboard_client import DruckerDashboardClient +from models import db, Kubernetes, Application, Service from apis.common import DatetimeToTimestamp from apis.api_kubernetes import update_dbs_kubernetes, switch_drucker_service_model_assignment @@ -186,56 +183,3 @@ def delete(self, application_id:int, service_id:int): db.session.commit() db.session.close() return response_body - -@srv_info_namespace.route('//services//evaluate') -class ApiEvaluate(Resource): - eval_parser = reqparse.RequestParser() - eval_parser.add_argument('evaluation_id', location='form', type=int, required=False) - eval_parser.add_argument('overwrite', location='form', type=bool, required=False) - - @srv_info_namespace.expect(eval_parser) - def post(self, application_id:int, service_id:int): - """evaluate""" - args = self.eval_parser.parse_args() - eval_id = args.get('evaluation_id', None) - if eval_id: - eobj = Evaluation.query.filter_by( - application_id=application_id, - evaluation_id=eval_id).first_or_404() - else: - # if evaluation_id is not given, use the lastest one. - eobj = Evaluation.query\ - .filter_by(application_id=application_id)\ - .order_by(Evaluation.register_date.desc()).first_or_404() - - sobj = Service.query.filter_by( - application_id=application_id, - service_id=service_id).first_or_404() - - robj = db.session.query(EvaluationResult)\ - .filter(EvaluationResult.model_id == sobj.model_id, - EvaluationResult.evaluation_id == eobj.evaluation_id).one_or_none() - if robj is not None and args.get('overwrite', False): - return robj.result - - eval_result_path = "eval-result-{0:%Y%m%d%H%M%S}.txt".format(datetime.datetime.utcnow()) - drucker_dashboard_application = DruckerDashboardClient(logger=logger, host=sobj.host) - response_body = drucker_dashboard_application.run_evaluate_model(eobj.data_path, eval_result_path) - - if response_body['status']: - result = json.dumps(response_body) - if robj is None: - robj = EvaluationResult(model_id=sobj.model_id, - data_path=eval_result_path, - evaluation_id=eobj.evaluation_id, - result=result) - db.session.add(robj) - else: - robj.data_path = eval_result_path - robj.result = result - db.session.flush() - response_body = robj.result - db.session.commit() - db.session.close() - - return response_body diff --git a/app/test/test_api_evaluation.py b/app/test/test_api_evaluation.py index 83e361f..86322a4 100644 --- a/app/test/test_api_evaluation.py +++ b/app/test/test_api_evaluation.py @@ -1,20 +1,49 @@ from unittest.mock import patch, Mock +import json +from copy import deepcopy import drucker_pb2 from .base import BaseTestCase, create_app_obj, create_service_obj, create_eval_obj, create_eval_result_obj from io import BytesIO -from models import EvaluationResult, Evaluation +from models import EvaluationResult, Evaluation, db + + +def patch_stub(func): + def inner_method(*args, **kwargs): + mock_stub_obj = Mock() + mock_stub_obj.EvaluateModel.return_value = drucker_pb2.EvaluateModelResponse() + mock_stub_obj.UploadEvaluationData.return_value = drucker_pb2.UploadEvaluationDataResponse(status=1, message='success') + res = drucker_pb2.EvaluationResultResponse( + metrics=drucker_pb2.EvaluationMetrics(), + detail=[ + drucker_pb2.EvaluationResultResponse.Detail( + input=drucker_pb2.IO(str=drucker_pb2.ArrString(val=['input'])), + label=drucker_pb2.IO(str=drucker_pb2.ArrString(val=['test'])), + output=drucker_pb2.IO(str=drucker_pb2.ArrString(val=['test'])), + is_correct=True, + score=[1.0] + ), + drucker_pb2.EvaluationResultResponse.Detail( + input=drucker_pb2.IO(tensor=drucker_pb2.Tensor(shape=[1], val=[0.5])), + label=drucker_pb2.IO(tensor=drucker_pb2.Tensor(shape=[2], val=[0.9, 1.3])), + output=drucker_pb2.IO(tensor=drucker_pb2.Tensor(shape=[2], val=[0.9, 0.3])), + is_correct=False, + score=[0.5, 0.5] + ) + ]) + mock_stub_obj.EvaluationResult.return_value = iter(res for _ in range(2)) + with patch('core.drucker_dashboard_client.drucker_pb2_grpc.DruckerDashboardStub', + new=Mock(return_value=mock_stub_obj)): + return func(*args, **kwargs) + return inner_method class ApiEvaluationTest(BaseTestCase): """Tests for ApiEvaluation. """ - @patch('core.drucker_dashboard_client.drucker_pb2_grpc.DruckerDashboardStub') - def test_post(self, mock_stub_class): - mock_stub_obj = Mock() - mock_stub_obj.UploadEvaluationData.return_value = drucker_pb2.UploadEvaluationDataResponse(status=1, message='success') - mock_stub_class.return_value = mock_stub_obj + @patch_stub + def test_post(self): aobj = create_app_obj() url = f'/api/applications/{aobj.application_id}/evaluation' @@ -55,29 +84,8 @@ class ApiEvaluationResultTest(BaseTestCase): default_response = {'accuracy': 0.0, 'fvalue': 0.0, 'num': 0, 'option': {}, 'precision': 0.0, 'recall': 0.0} - @patch('core.drucker_dashboard_client.drucker_pb2_grpc.DruckerDashboardStub') - def test_get(self, mock_stub_class): - mock_stub_obj = Mock() - res = drucker_pb2.EvaluationResultResponse( - metrics=drucker_pb2.EvaluationMetrics(), - detail=[ - drucker_pb2.EvaluationResultResponse.Detail( - input=drucker_pb2.IO(str=drucker_pb2.ArrString(val=['input'])), - label=drucker_pb2.IO(str=drucker_pb2.ArrString(val=['test'])), - output=drucker_pb2.IO(str=drucker_pb2.ArrString(val=['test'])), - is_correct=True, - score=[1.0] - ), - drucker_pb2.EvaluationResultResponse.Detail( - input=drucker_pb2.IO(tensor=drucker_pb2.Tensor(shape=[1], val=[0.5])), - label=drucker_pb2.IO(tensor=drucker_pb2.Tensor(shape=[2], val=[0.9, 1.3])), - output=drucker_pb2.IO(tensor=drucker_pb2.Tensor(shape=[2], val=[0.9, 0.3])), - is_correct=False, - score=[0.5, 0.5] - ) - ]) - mock_stub_obj.EvaluationResult.return_value = iter(res for _ in range(2)) - mock_stub_class.return_value = mock_stub_obj + @patch_stub + def test_get(self): app_id = create_app_obj().application_id eobj = create_eval_obj(app_id, save=True) sobj = create_service_obj(app_id) @@ -118,3 +126,73 @@ def test_delete(self): response = self.client.delete(f'/api/applications/{app_id}/evaluation_result/101') self.assertEqual(404, response.status_code) self.assertEqual(response.json, {'status': False, 'message': 'Not Found.'}) + + +class ApiEvaluateTest(BaseTestCase): + """Tests for ApiEvaluate. + """ + default_response = {'accuracy': 0.0, 'fvalue': 0.0, 'num': 0, 'result_id': 1, + 'option': {}, 'precision': 0.0, 'recall': 0.0, 'status': True} + + @patch_stub + def test_post(self): + aobj = create_app_obj() + sobj = create_service_obj(aobj.application_id) + model_id = sobj.model_id + evaluation_id = create_eval_obj(aobj.application_id, save=True).evaluation_id + + response = self.client.post(f'/api/applications/{aobj.application_id}/evaluate', + data={'evaluation_id': evaluation_id, 'service_id': sobj.service_id}) + self.assertEqual(200, response.status_code) + self.assertEqual(response.json, self.default_response) + eobj_exists = db.session.query(EvaluationResult)\ + .filter(EvaluationResult.model_id == model_id, + EvaluationResult.evaluation_id == evaluation_id).one_or_none() is not None + self.assertEqual(eobj_exists, True) + + @patch_stub + def test_post_without_param(self): + aobj = create_app_obj() + sobj = create_service_obj(aobj.application_id) + model_id = sobj.model_id + create_eval_obj(aobj.application_id, checksum='12345', save=True) + create_eval_obj(aobj.application_id, checksum='6789', save=True) + create_eval_obj(aobj.application_id, checksum='abc', save=True) + newest_eval_id = create_eval_obj(aobj.application_id, save=True).evaluation_id + + response = self.client.post(f'/api/applications/{aobj.application_id}/evaluate', + data={'service_id': sobj.service_id}) + self.assertEqual(200, response.status_code) + self.assertEqual(response.json, self.default_response) + eobj_exists = db.session.query(EvaluationResult)\ + .filter(EvaluationResult.model_id == model_id, + EvaluationResult.evaluation_id == newest_eval_id).one_or_none() is not None + self.assertEqual(eobj_exists, True) + + @patch_stub + def test_post_duplicated(self): + saved_response = deepcopy(self.default_response) + aobj = create_app_obj() + sobj = create_service_obj(aobj.application_id) + model_id = sobj.model_id + evaluation_id = create_eval_obj(aobj.application_id, save=True).evaluation_id + create_eval_result_obj(model_id=model_id, + evaluation_id=evaluation_id, + result=json.dumps(saved_response), + save=True) + + url = f'/api/applications/{aobj.application_id}/evaluate' + data = {'evaluation_id': evaluation_id, 'service_id': sobj.service_id} + response = self.client.post(url, data=data) + self.assertEqual(200, response.status_code) + self.assertEqual(response.json, saved_response) + + # overwrite + response = self.client.post(url, data=dict(data, overwrite=True)) + self.assertEqual(200, response.status_code) + self.assertEqual(response.json, self.default_response) + + eobj = db.session.query(EvaluationResult)\ + .filter(EvaluationResult.model_id == model_id, + EvaluationResult.evaluation_id == evaluation_id).one() + self.assertEqual(eobj.result, self.default_response) diff --git a/app/test/test_api_service.py b/app/test/test_api_service.py deleted file mode 100644 index 0b1a0d6..0000000 --- a/app/test/test_api_service.py +++ /dev/null @@ -1,88 +0,0 @@ -from unittest.mock import patch, Mock -from copy import deepcopy -import json - -import drucker_pb2 -from .base import BaseTestCase, create_app_obj, create_service_obj, create_eval_obj, create_eval_result_obj -from models import EvaluationResult, db - - -def patch_stub(func): - def inner_method(*args, **kwargs): - mock_stub_obj = Mock() - mock_stub_obj.EvaluateModel.return_value = drucker_pb2.EvaluateModelResponse() - with patch('core.drucker_dashboard_client.drucker_pb2_grpc.DruckerDashboardStub', - new=Mock(return_value=mock_stub_obj)): - return func(*args, **kwargs) - return inner_method - - -class ApiEvaluateTest(BaseTestCase): - """Tests for ApiEvaluate. - """ - default_response = {'accuracy': 0.0, 'fvalue': 0.0, 'num': 0, 'result_id': 1, - 'option': {}, 'precision': 0.0, 'recall': 0.0, 'status': True} - - def setUp(self): - super().setUp() - - @patch_stub - def test_post(self): - aobj = create_app_obj() - sobj = create_service_obj(aobj.application_id) - model_id = sobj.model_id - evaluation_id = create_eval_obj(aobj.application_id, save=True).evaluation_id - - response = self.client.post(f'/api/applications/{aobj.application_id}/services/{sobj.service_id}/evaluate', - data={'evaluation_id': evaluation_id}) - self.assertEqual(200, response.status_code) - self.assertEqual(response.json, self.default_response) - eobj_exists = db.session.query(EvaluationResult)\ - .filter(EvaluationResult.model_id == model_id, - EvaluationResult.evaluation_id == evaluation_id).one_or_none() is not None - self.assertEqual(eobj_exists, True) - - @patch_stub - def test_post_without_param(self): - aobj = create_app_obj() - sobj = create_service_obj(aobj.application_id) - model_id = sobj.model_id - create_eval_obj(aobj.application_id, checksum='12345', save=True) - create_eval_obj(aobj.application_id, checksum='6789', save=True) - create_eval_obj(aobj.application_id, checksum='abc', save=True) - newest_eval_id = create_eval_obj(aobj.application_id, save=True).evaluation_id - - response = self.client.post(f'/api/applications/{aobj.application_id}/services/{sobj.service_id}/evaluate') - self.assertEqual(200, response.status_code) - self.assertEqual(response.json, self.default_response) - eobj_exists = db.session.query(EvaluationResult)\ - .filter(EvaluationResult.model_id == model_id, - EvaluationResult.evaluation_id == newest_eval_id).one_or_none() is not None - self.assertEqual(eobj_exists, True) - - @patch_stub - def test_post_duplicated(self): - saved_response = deepcopy(self.default_response) - aobj = create_app_obj() - sobj = create_service_obj(aobj.application_id) - model_id = sobj.model_id - evaluation_id = create_eval_obj(aobj.application_id, save=True).evaluation_id - create_eval_result_obj(model_id=model_id, - evaluation_id=evaluation_id, - result=json.dumps(saved_response), - save=True) - - url = f'/api/applications/{aobj.application_id}/services/{sobj.service_id}/evaluate' - response = self.client.post(url, data={'evaluation_id': evaluation_id}) - self.assertEqual(200, response.status_code) - self.assertEqual(response.json, saved_response) - - # overwrite - response = self.client.post(url, data={'evaluation_id': evaluation_id, 'overwrite': True}) - self.assertEqual(200, response.status_code) - self.assertEqual(response.json, self.default_response) - - eobj = db.session.query(EvaluationResult)\ - .filter(EvaluationResult.model_id == model_id, - EvaluationResult.evaluation_id == evaluation_id).one() - self.assertEqual(eobj.result, self.default_response) From 8656d7f3ce2e893fd235c868fdba8c6c1a7e5101 Mon Sep 17 00:00:00 2001 From: yuki-mt Date: Tue, 8 Jan 2019 19:23:00 +0900 Subject: [PATCH 05/11] add marshallaring to evaluation API --- app/apis/api_evaluation.py | 24 ++++++++++++++++++------ app/test/test_api_evaluation.py | 16 ++++++++-------- 2 files changed, 26 insertions(+), 14 deletions(-) diff --git a/app/apis/api_evaluation.py b/app/apis/api_evaluation.py index b3b8d0a..1055793 100644 --- a/app/apis/api_evaluation.py +++ b/app/apis/api_evaluation.py @@ -14,12 +14,22 @@ eval_info_namespace = Namespace('evaluation', description='Evaluation Endpoint.') success_or_not = eval_info_namespace.model('Success', { - 'status': fields.Boolean( - required=True - ), - 'message': fields.String( - required=True - ) + 'status': fields.Boolean(required=True), + 'message': fields.String(required=True) +}) +eval_metrics = eval_info_namespace.model('Evaluation result', { + 'num': fields.Integer(required=True, description='number of evaluated data'), + 'accuracy': fields.Float(required=True, description='accuracy of evaluation'), + 'fvalue': fields.List(fields.Float, required=True, description='F-value of evaluation'), + 'precision': fields.List(fields.Float, required=True, description='precision of evaluation'), + 'recall': fields.List(fields.Float, required=True, description='recall of evaluation'), + 'option': fields.Raw(), + 'status': fields.Boolean(required=True), + 'result_id': fields.Integer(required=True, description='ID of evaluation result') +}) +eval_data_upload = eval_info_namespace.model('Result of uploading evaluation data', { + 'status': fields.Boolean(required=True), + 'evaluation_id': fields.Integer(required=True, description='ID of uploaded data') }) @@ -29,6 +39,7 @@ class ApiEvaluation(Resource): upload_parser.add_argument('file', location='files', type=FileStorage, required=True) @eval_info_namespace.expect(upload_parser) + @eval_info_namespace.marshal_with(eval_data_upload) def post(self, application_id:int): """update data to be evaluated""" args = self.upload_parser.parse_args() @@ -89,6 +100,7 @@ class ApiEvaluate(Resource): eval_parser.add_argument('overwrite', location='form', type=bool, required=False) @eval_info_namespace.expect(eval_parser) + @eval_info_namespace.marshal_with(eval_metrics) def post(self, application_id:int): """evaluate""" args = self.eval_parser.parse_args() diff --git a/app/test/test_api_evaluation.py b/app/test/test_api_evaluation.py index 86322a4..f902052 100644 --- a/app/test/test_api_evaluation.py +++ b/app/test/test_api_evaluation.py @@ -7,14 +7,18 @@ from io import BytesIO from models import EvaluationResult, Evaluation, db +default_metrics = {'accuracy': 0.0, 'fvalue': [0.0], 'num': 0, + 'option': {}, 'precision': [0.0], 'recall': [0.0]} + def patch_stub(func): def inner_method(*args, **kwargs): mock_stub_obj = Mock() - mock_stub_obj.EvaluateModel.return_value = drucker_pb2.EvaluateModelResponse() + metrics = drucker_pb2.EvaluationMetrics(precision=[0.0], recall=[0.0], fvalue=[0.0]) + mock_stub_obj.EvaluateModel.return_value = drucker_pb2.EvaluateModelResponse(metrics=metrics) mock_stub_obj.UploadEvaluationData.return_value = drucker_pb2.UploadEvaluationDataResponse(status=1, message='success') res = drucker_pb2.EvaluationResultResponse( - metrics=drucker_pb2.EvaluationMetrics(), + metrics=metrics, detail=[ drucker_pb2.EvaluationResultResponse.Detail( input=drucker_pb2.IO(str=drucker_pb2.ArrString(val=['input'])), @@ -81,9 +85,6 @@ def test_delete(self): class ApiEvaluationResultTest(BaseTestCase): """Tests for ApiEvaluationResult. """ - default_response = {'accuracy': 0.0, 'fvalue': 0.0, 'num': 0, - 'option': {}, 'precision': 0.0, 'recall': 0.0} - @patch_stub def test_get(self): app_id = create_app_obj().application_id @@ -93,7 +94,7 @@ def test_get(self): response = self.client.get(f'/api/applications/{app_id}/evaluation_result/{robj.evaluation_result_id}') self.assertEqual(200, response.status_code) self.assertEqual(response.json['status'], True) - self.assertEqual(response.json['metrics'], self.default_response) + self.assertEqual(response.json['metrics'], default_metrics) details = response.json['details'] self.assertEqual(len(details), 4) self.assertEqual(details[0], {'input': 'input', 'label': 'test', 'output': 'test', 'score': 1.0, 'is_correct': True}) @@ -131,8 +132,7 @@ def test_delete(self): class ApiEvaluateTest(BaseTestCase): """Tests for ApiEvaluate. """ - default_response = {'accuracy': 0.0, 'fvalue': 0.0, 'num': 0, 'result_id': 1, - 'option': {}, 'precision': 0.0, 'recall': 0.0, 'status': True} + default_response = dict(default_metrics, result_id=1, status=True) @patch_stub def test_post(self): From f3982a4b116414a7d272d9f77ada0bff3987ed37 Mon Sep 17 00:00:00 2001 From: yuki-mt Date: Tue, 8 Jan 2019 20:52:14 +0900 Subject: [PATCH 06/11] fix --- app/apis/api_evaluation.py | 3 +-- app/core/drucker_dashboard_client.py | 3 ++- app/utils/hash_util.py | 1 + 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/app/apis/api_evaluation.py b/app/apis/api_evaluation.py index 1055793..967e9bf 100644 --- a/app/apis/api_evaluation.py +++ b/app/apis/api_evaluation.py @@ -105,7 +105,7 @@ def post(self, application_id:int): """evaluate""" args = self.eval_parser.parse_args() eval_id = args.get('evaluation_id', None) - service_id = args['service_id'] + service_id = args.get('service_id') if eval_id: eobj = Evaluation.query.filter_by( application_id=application_id, @@ -115,7 +115,6 @@ def post(self, application_id:int): eobj = Evaluation.query\ .filter_by(application_id=application_id)\ .order_by(Evaluation.register_date.desc()).first_or_404() - sobj = Service.query.filter_by( application_id=application_id, service_id=service_id).first_or_404() diff --git a/app/core/drucker_dashboard_client.py b/app/core/drucker_dashboard_client.py index 283845c..b024a65 100644 --- a/app/core/drucker_dashboard_client.py +++ b/app/core/drucker_dashboard_client.py @@ -152,7 +152,8 @@ def __get_value_from_io(self, io:drucker_pb2.IO): @error_handling({"status": False}) def run_evaluation_data(self, data_path:str, result_path:str): - for raw_response in self.stub.EvaluationResult(data_path, result_path): + request = drucker_pb2.EvaluationResultRequest(data_path=data_path, result_path=result_path) + for raw_response in self.stub.EvaluationResult(request): details = [] for detail in raw_response.detail: details.append(dict( diff --git a/app/utils/hash_util.py b/app/utils/hash_util.py index 9c14a17..f55fa23 100644 --- a/app/utils/hash_util.py +++ b/app/utils/hash_util.py @@ -19,5 +19,6 @@ def checksum(f: Union[str, bytes, FileStorage]) -> str: hash_md5 = hashlib.md5() for chunk in iter(lambda: f.read(chunk_size), b''): hash_md5.update(chunk) + f.seek(0) return hash_md5.hexdigest() From b4e447a468b737446de9a7e9ede58ff90afa46aa Mon Sep 17 00:00:00 2001 From: yuki-mt Date: Tue, 8 Jan 2019 21:17:23 +0900 Subject: [PATCH 07/11] fix --- app/apis/api_application.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/app/apis/api_application.py b/app/apis/api_application.py index 87b15a0..a457a09 100644 --- a/app/apis/api_application.py +++ b/app/apis/api_application.py @@ -6,11 +6,7 @@ from app import logger from auth import auth from models import db -<<<<<<< HEAD -from models import Application, Service -======= -from models import Application, Service, Evaluation, EvaluationResult, ApplicationUserRole, Role, User ->>>>>>> master +from models import Application, Service, ApplicationUserRole, Role, User from core.drucker_dashboard_client import DruckerDashboardClient from apis.common import DatetimeToTimestamp From 4a26da473206abcfaaef67b7f60d83197c48d7bb Mon Sep 17 00:00:00 2001 From: yuki-mt Date: Fri, 11 Jan 2019 18:41:26 +0900 Subject: [PATCH 08/11] remove old test files --- .../test/test_api_application.py | 50 ----------- drucker_dashboard/test/test_api_service.py | 86 ------------------- 2 files changed, 136 deletions(-) delete mode 100644 drucker_dashboard/test/test_api_application.py delete mode 100644 drucker_dashboard/test/test_api_service.py diff --git a/drucker_dashboard/test/test_api_application.py b/drucker_dashboard/test/test_api_application.py deleted file mode 100644 index c8047f8..0000000 --- a/drucker_dashboard/test/test_api_application.py +++ /dev/null @@ -1,50 +0,0 @@ -from unittest.mock import patch, Mock - -from drucker_dashboard.protobuf import drucker_pb2 -from .base import BaseTestCase, create_app_obj, create_service_obj, create_eval_obj, create_eval_result_obj -from io import BytesIO -from drucker_dashboard.models import EvaluationResult, Evaluation - - -class ApiEvaluationTest(BaseTestCase): - """Tests for ApiEvaluation. - """ - - @patch('drucker_dashboard.drucker_dashboard_client.drucker_pb2_grpc.DruckerDashboardStub') - def test_post(self, mock_stub_class): - mock_stub_obj = Mock() - mock_stub_obj.UploadEvaluationData.return_value = drucker_pb2.UploadEvaluationDataResponse(status=1, message='success') - mock_stub_class.return_value = mock_stub_obj - aobj = create_app_obj() - create_service_obj(aobj.application_id) - - url = f'/api/applications/{aobj.application_id}/evaluation' - content_type = 'multipart/form-data' - file_content = b'my file contents' - response = self.client.post(url, - content_type=content_type, - data={'file': (BytesIO(file_content), "file.txt")}) - self.assertEqual(200, response.status_code) - self.assertEqual(response.json, {'status': True, 'evaluation_id': 1}) - - # duplication check - response = self.client.post(url, - content_type=content_type, - data={'file': (BytesIO(file_content), "file.txt")}) - self.assertEqual(200, response.status_code) - self.assertEqual(response.json, {'status': True, 'evaluation_id': 1}) - - def test_delete(self): - app_id = create_app_obj().application_id - eobj = create_eval_obj(app_id, save=True) - sobj = create_service_obj(app_id) - create_eval_result_obj(model_id=sobj.model_id, evaluation_id=eobj.evaluation_id, save=True) - response = self.client.delete(f'/api/applications/{app_id}/evaluation/{eobj.evaluation_id}') - self.assertEqual(200, response.status_code) - self.assertEqual(response.json, {'status': True, 'message': 'Success.'}) - self.assertEqual(Evaluation.query.all(), []) - self.assertEqual(EvaluationResult.query.all(), []) - - response = self.client.delete(f'/api/applications/{app_id}/evaluation/101') - self.assertEqual(404, response.status_code) - self.assertEqual(response.json, {'status': False}) diff --git a/drucker_dashboard/test/test_api_service.py b/drucker_dashboard/test/test_api_service.py deleted file mode 100644 index 117d04b..0000000 --- a/drucker_dashboard/test/test_api_service.py +++ /dev/null @@ -1,86 +0,0 @@ -from unittest.mock import patch, Mock -from copy import deepcopy -import json - -from drucker_dashboard.protobuf import drucker_pb2 -from .base import BaseTestCase, create_app_obj, create_service_obj, create_eval_obj, create_eval_result_obj -from drucker_dashboard.models import EvaluationResult, db - - -def patch_stub(func): - def inner_method(*args, **kwargs): - mock_stub_obj = Mock() - mock_stub_obj.EvaluateModel.return_value = drucker_pb2.EvaluateModelResponse() - with patch('drucker_dashboard.drucker_dashboard_client.drucker_pb2_grpc.DruckerDashboardStub', - new=Mock(return_value=mock_stub_obj)): - return func(*args, **kwargs) - return inner_method - - -class ApiEvaluateTest(BaseTestCase): - """Tests for ApiEvaluate. - """ - - default_response = {'accuracy': 0.0, 'fvalue': 0.0, 'num': 0, - 'option': {}, 'precision': 0.0, 'recall': 0.0, 'status': True} - - @patch_stub - def test_post(self): - aobj = create_app_obj() - sobj = create_service_obj(aobj.application_id) - model_id = sobj.model_id - evaluation_id = create_eval_obj(aobj.application_id, save=True).evaluation_id - - response = self.client.post(f'/api/applications/{aobj.application_id}/services/{sobj.service_id}/evaluate', - data={'evaluation_id': evaluation_id}) - self.assertEqual(200, response.status_code) - self.assertEqual(response.json, self.default_response) - eobj_exists = db.session.query(EvaluationResult)\ - .filter(EvaluationResult.model_id == model_id, - EvaluationResult.evaluation_id == evaluation_id).one_or_none() is not None - self.assertEqual(eobj_exists, True) - - @patch_stub - def test_post_without_param(self): - aobj = create_app_obj() - sobj = create_service_obj(aobj.application_id) - model_id = sobj.model_id - create_eval_obj(aobj.application_id, checksum='12345', save=True) - create_eval_obj(aobj.application_id, checksum='6789', save=True) - create_eval_obj(aobj.application_id, checksum='abc', save=True) - newest_eval_id = create_eval_obj(aobj.application_id, save=True).evaluation_id - - response = self.client.post(f'/api/applications/{aobj.application_id}/services/{sobj.service_id}/evaluate') - self.assertEqual(200, response.status_code) - self.assertEqual(response.json, self.default_response) - eobj_exists = db.session.query(EvaluationResult)\ - .filter(EvaluationResult.model_id == model_id, - EvaluationResult.evaluation_id == newest_eval_id).one_or_none() is not None - self.assertEqual(eobj_exists, True) - - @patch_stub - def test_post_duplicated(self): - saved_response = deepcopy(self.default_response) - aobj = create_app_obj() - sobj = create_service_obj(aobj.application_id) - model_id = sobj.model_id - evaluation_id = create_eval_obj(aobj.application_id, save=True).evaluation_id - create_eval_result_obj(model_id=model_id, - evaluation_id=evaluation_id, - result=json.dumps(saved_response), - save=True) - - url = f'/api/applications/{aobj.application_id}/services/{sobj.service_id}/evaluate' - response = self.client.post(url, data={'evaluation_id': evaluation_id}) - self.assertEqual(200, response.status_code) - self.assertEqual(response.json, saved_response) - - # overwrite - response = self.client.post(url, data={'evaluation_id': evaluation_id, 'overwrite': True}) - self.assertEqual(200, response.status_code) - self.assertEqual(response.json, self.default_response) - - eobj = db.session.query(EvaluationResult)\ - .filter(EvaluationResult.model_id == model_id, - EvaluationResult.evaluation_id == evaluation_id).one() - self.assertEqual(json.loads(eobj.result), self.default_response) From 412956ab540af2a193bea1f4dfd397c6fcfdc9ed Mon Sep 17 00:00:00 2001 From: yuki-mt Date: Wed, 16 Jan 2019 11:47:44 +0900 Subject: [PATCH 09/11] update drucker-client version --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index 38994a8..d17385a 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -4,7 +4,7 @@ pytest py==1.5.4 codecov==2.0.15 grpcio-testing==1.13.0 -drucker-client==0.4.3 +drucker-client==0.4.4 numpy==1.14.3 scikit-learn==0.19.1 scipy==1.1.0 From 0a109a5cf8bd36519963734b7ef836295398ed09 Mon Sep 17 00:00:00 2001 From: yuki-mt Date: Tue, 22 Jan 2019 12:34:35 +0900 Subject: [PATCH 10/11] change API endpoint to pural form --- drucker_dashboard/apis/api_evaluation.py | 6 +++--- drucker_dashboard/test/test_api_evaluation.py | 16 ++++++++-------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/drucker_dashboard/apis/api_evaluation.py b/drucker_dashboard/apis/api_evaluation.py index fb90025..e813ed4 100644 --- a/drucker_dashboard/apis/api_evaluation.py +++ b/drucker_dashboard/apis/api_evaluation.py @@ -32,7 +32,7 @@ }) -@eval_info_namespace.route('//evaluation') +@eval_info_namespace.route('//evaluations') class ApiEvaluation(Resource): upload_parser = reqparse.RequestParser() upload_parser.add_argument('file', location='files', type=FileStorage, required=True) @@ -70,7 +70,7 @@ def post(self, application_id:int): return {"status": True, "evaluation_id": evaluation_id} -@eval_info_namespace.route('//evaluation/') +@eval_info_namespace.route('//evaluations/') class ApiEvaluation(Resource): @eval_info_namespace.marshal_with(success_or_not) @@ -147,7 +147,7 @@ def post(self, application_id:int): return response_body -@eval_info_namespace.route('//evaluation_result/') +@eval_info_namespace.route('//evaluation_results/') class ApiEvaluationResult(Resource): def get(self, application_id:int, eval_result_id:int): diff --git a/drucker_dashboard/test/test_api_evaluation.py b/drucker_dashboard/test/test_api_evaluation.py index 3a4340f..fa392d3 100644 --- a/drucker_dashboard/test/test_api_evaluation.py +++ b/drucker_dashboard/test/test_api_evaluation.py @@ -50,7 +50,7 @@ class ApiEvaluationTest(BaseTestCase): def test_post(self): aobj = create_app_obj() - url = f'/api/applications/{aobj.application_id}/evaluation' + url = f'/api/applications/{aobj.application_id}/evaluations' content_type = 'multipart/form-data' file_content = b'my file contents' response = self.client.post(url, @@ -71,13 +71,13 @@ def test_delete(self): eobj = create_eval_obj(app_id, save=True) sobj = create_service_obj(app_id) create_eval_result_obj(model_id=sobj.model_id, evaluation_id=eobj.evaluation_id, save=True) - response = self.client.delete(f'/api/applications/{app_id}/evaluation/{eobj.evaluation_id}') + response = self.client.delete(f'/api/applications/{app_id}/evaluations/{eobj.evaluation_id}') self.assertEqual(200, response.status_code) self.assertEqual(response.json, {'status': True, 'message': 'Success.'}) self.assertEqual(Evaluation.query.all(), []) self.assertEqual(EvaluationResult.query.all(), []) - response = self.client.delete(f'/api/applications/{app_id}/evaluation/101') + response = self.client.delete(f'/api/applications/{app_id}/evaluations/101') self.assertEqual(404, response.status_code) self.assertEqual(response.json, {'status': False, 'message': 'Not Found.'}) @@ -91,7 +91,7 @@ def test_get(self): eobj = create_eval_obj(app_id, save=True) sobj = create_service_obj(app_id) robj = create_eval_result_obj(model_id=sobj.model_id, evaluation_id=eobj.evaluation_id, save=True) - response = self.client.get(f'/api/applications/{app_id}/evaluation_result/{robj.evaluation_result_id}') + response = self.client.get(f'/api/applications/{app_id}/evaluation_results/{robj.evaluation_result_id}') self.assertEqual(200, response.status_code) self.assertEqual(response.json['status'], True) self.assertEqual(response.json['metrics'], default_metrics) @@ -106,11 +106,11 @@ def test_get_not_found(self, mock_stub_class): eobj = create_eval_obj(app_id, save=True) sobj = create_service_obj(app_id) robj = create_eval_result_obj(model_id=sobj.model_id, evaluation_id=eobj.evaluation_id, save=True) - response = self.client.get(f'/api/applications/{app_id}/evaluation_result/{robj.evaluation_result_id}') + response = self.client.get(f'/api/applications/{app_id}/evaluation_results/{robj.evaluation_result_id}') self.assertEqual(404, response.status_code) self.assertEqual(response.json, {'status': False, 'message': 'Result Not Found.'}) - response = self.client.get(f'/api/applications/{app_id}/evaluation_result/101') + response = self.client.get(f'/api/applications/{app_id}/evaluation_results/101') self.assertEqual(404, response.status_code) self.assertEqual(response.json, {'status': False, 'message': 'Not Found.'}) @@ -119,12 +119,12 @@ def test_delete(self): eobj = create_eval_obj(app_id, save=True) sobj = create_service_obj(app_id) robj = create_eval_result_obj(model_id=sobj.model_id, evaluation_id=eobj.evaluation_id, save=True) - response = self.client.delete(f'/api/applications/{app_id}/evaluation_result/{robj.evaluation_result_id}') + response = self.client.delete(f'/api/applications/{app_id}/evaluation_results/{robj.evaluation_result_id}') self.assertEqual(200, response.status_code) self.assertEqual(response.json, {'status': True, 'message': 'Success.'}) self.assertEqual(EvaluationResult.query.all(), []) - response = self.client.delete(f'/api/applications/{app_id}/evaluation_result/101') + response = self.client.delete(f'/api/applications/{app_id}/evaluation_results/101') self.assertEqual(404, response.status_code) self.assertEqual(response.json, {'status': False, 'message': 'Not Found.'}) From be1742f745ac56995f1af99a4e8c1fc6b725d822 Mon Sep 17 00:00:00 2001 From: yuki-mt Date: Tue, 22 Jan 2019 13:21:32 +0900 Subject: [PATCH 11/11] evaluate based on model_id, not service_id --- drucker_dashboard/apis/api_evaluation.py | 16 ++++++--- drucker_dashboard/test/test_api_evaluation.py | 35 ++++++++++++++----- 2 files changed, 37 insertions(+), 14 deletions(-) diff --git a/drucker_dashboard/apis/api_evaluation.py b/drucker_dashboard/apis/api_evaluation.py index e813ed4..6bbdd05 100644 --- a/drucker_dashboard/apis/api_evaluation.py +++ b/drucker_dashboard/apis/api_evaluation.py @@ -2,6 +2,7 @@ import json from itertools import chain +from flask import abort from flask_restplus import Namespace, fields, Resource, reqparse from werkzeug.datastructures import FileStorage @@ -94,7 +95,7 @@ def delete(self, application_id:int, evaluation_id:int): @eval_info_namespace.route('//evaluate') class ApiEvaluate(Resource): eval_parser = reqparse.RequestParser() - eval_parser.add_argument('service_id', location='form', type=int, required=True) + eval_parser.add_argument('model_id', location='form', type=int, required=True) eval_parser.add_argument('evaluation_id', location='form', type=int, required=False) eval_parser.add_argument('overwrite', location='form', type=bool, required=False) @@ -104,7 +105,7 @@ def post(self, application_id:int): """evaluate""" args = self.eval_parser.parse_args() eval_id = args.get('evaluation_id', None) - service_id = args.get('service_id') + model_id = args.get('model_id') if eval_id: eobj = Evaluation.query.filter_by( application_id=application_id, @@ -114,9 +115,14 @@ def post(self, application_id:int): eobj = Evaluation.query\ .filter_by(application_id=application_id)\ .order_by(Evaluation.register_date.desc()).first_or_404() - sobj = Service.query.filter_by( - application_id=application_id, - service_id=service_id).first_or_404() + + # TODO: deploy a temporary service to evaluate, not use an existing service. + sobj = Service.query.filter( + Service.application_id == application_id, + Service.model_id == model_id, + Service.service_level != 'production').one_or_none() + if sobj is None: + raise abort(404, 'The model is not used in any services or used only in production.') robj = db.session.query(EvaluationResult)\ .filter(EvaluationResult.model_id == sobj.model_id, diff --git a/drucker_dashboard/test/test_api_evaluation.py b/drucker_dashboard/test/test_api_evaluation.py index fa392d3..c98e182 100644 --- a/drucker_dashboard/test/test_api_evaluation.py +++ b/drucker_dashboard/test/test_api_evaluation.py @@ -137,12 +137,11 @@ class ApiEvaluateTest(BaseTestCase): @patch_stub def test_post(self): aobj = create_app_obj() - sobj = create_service_obj(aobj.application_id) - model_id = sobj.model_id + model_id = create_service_obj(aobj.application_id).model_id evaluation_id = create_eval_obj(aobj.application_id, save=True).evaluation_id response = self.client.post(f'/api/applications/{aobj.application_id}/evaluate', - data={'evaluation_id': evaluation_id, 'service_id': sobj.service_id}) + data={'evaluation_id': evaluation_id, 'model_id': model_id}) self.assertEqual(200, response.status_code) self.assertEqual(response.json, self.default_response) eobj_exists = db.session.query(EvaluationResult)\ @@ -153,15 +152,14 @@ def test_post(self): @patch_stub def test_post_without_param(self): aobj = create_app_obj() - sobj = create_service_obj(aobj.application_id) - model_id = sobj.model_id + model_id = create_service_obj(aobj.application_id).model_id create_eval_obj(aobj.application_id, checksum='12345', save=True) create_eval_obj(aobj.application_id, checksum='6789', save=True) create_eval_obj(aobj.application_id, checksum='abc', save=True) newest_eval_id = create_eval_obj(aobj.application_id, save=True).evaluation_id response = self.client.post(f'/api/applications/{aobj.application_id}/evaluate', - data={'service_id': sobj.service_id}) + data={'model_id': model_id}) self.assertEqual(200, response.status_code) self.assertEqual(response.json, self.default_response) eobj_exists = db.session.query(EvaluationResult)\ @@ -173,8 +171,7 @@ def test_post_without_param(self): def test_post_duplicated(self): saved_response = deepcopy(self.default_response) aobj = create_app_obj() - sobj = create_service_obj(aobj.application_id) - model_id = sobj.model_id + model_id = create_service_obj(aobj.application_id).model_id evaluation_id = create_eval_obj(aobj.application_id, save=True).evaluation_id create_eval_result_obj(model_id=model_id, evaluation_id=evaluation_id, @@ -182,7 +179,7 @@ def test_post_duplicated(self): save=True) url = f'/api/applications/{aobj.application_id}/evaluate' - data = {'evaluation_id': evaluation_id, 'service_id': sobj.service_id} + data = {'evaluation_id': evaluation_id, 'model_id': model_id} response = self.client.post(url, data=data) self.assertEqual(200, response.status_code) self.assertEqual(response.json, saved_response) @@ -196,3 +193,23 @@ def test_post_duplicated(self): .filter(EvaluationResult.model_id == model_id, EvaluationResult.evaluation_id == evaluation_id).one() self.assertEqual(eobj.result, self.default_response) + + @patch_stub + def test_post_not_found(self): + application_id = create_app_obj().application_id + model_id = create_service_obj(application_id).model_id + new_sobj = create_service_obj(application_id, + service_name='drucker-test-app-production-20180628151929', + service_level='production', + model_id=model_id + 1, + save=True) + evaluation_id = create_eval_obj(application_id, save=True).evaluation_id + + response = self.client.post(f'/api/applications/{application_id}/evaluate', + data={'evaluation_id': evaluation_id, 'model_id': new_sobj.model_id}) + self.assertEqual(404, response.status_code) + + non_exist_model_id = 100 + response = self.client.post(f'/api/applications/{application_id}/evaluate', + data={'evaluation_id': evaluation_id, 'model_id': non_exist_model_id}) + self.assertEqual(404, response.status_code)