Skip to content

Dev basis sentiment infer 20190122 #4

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 23 additions & 4 deletions docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ services:
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
volumes:
- ./src:/opt/src

command: ./entryPoint.sh
depends_on:
- redis
Expand All @@ -29,14 +30,33 @@ services:
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
command: ['python', 'app/models/InceptionV3/inception_inference_server.py']
volumes:
- ./src:/opt/src

networks:
- michaniki
depends_on:
- michaniki_client
- redis

sentiment_inference_server:
build:
context: ./src
dockerfile: Dockerfile-sentiment
environment:
- REDIS_URL="redis://redis"
- REDIS_PORT=6379
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
command: ['python', 'app/models/SentimentV1/sentiment_infer_server.py']
volumes:
- ./src:/opt/src
networks:
- michaniki
depends_on:
- michaniki_client
- redis

celery_worker:
build: ./src
command: ['celery', '-A', 'app.celeryapp:michaniki_celery_app', 'worker', '-l', 'info']
Expand All @@ -54,14 +74,13 @@ services:
- DB_PASSWORD=michaniki
- DB_NAME=michanikidb
- BROKER_URL=redis://redis:6379/0

redis:
image: redis:4.0.5-alpine
command: ["redis-server", "--appendonly", "yes"]
hostname: redis
networks:
- michaniki

networks:
michaniki:

23 changes: 23 additions & 0 deletions src/Dockerfile-sentiment
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
FROM continuumio/miniconda3:4.5.12

# utils
RUN apt-get update && apt-get install -y --no-install-recommends apt-utils

RUN conda install gxx_linux-64

RUN conda install python=3.6

RUN apt-get install -y --force-yes default-libmysqlclient-dev mysql-client build-essential

# Grab requirements.txt.
COPY requirementssenti.txt /tmp/requirementssenti.txt

# Install dependencies
RUN pip install -qr /tmp/requirementssenti.txt

# create a user for web server
RUN adduser --disabled-password --gecos "" foo

COPY ./ /opt/src

WORKDIR /opt/src
78 changes: 35 additions & 43 deletions src/app/apis/InceptionV3/inceptionV3.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,12 +48,12 @@ def label():
s3_bucket_name = request.form.get('s3_bucket_name')
s3_bucket_prefix = request.form.get('s3_bucket_prefix')
model_name = request.form.get('model_name')

# load image from s3
image_data_path = API_helpers.download_a_dir_from_s3(s3_bucket_name,
s3_bucket_prefix,
s3_bucket_prefix,
local_path = TEMP_FOLDER)

# for each images in the folder
# supports .png and .jpg
all_image_ids = []
Expand All @@ -62,28 +62,28 @@ def label():
for each_image in glob.glob(image_data_path + "/*.*"):
iamge_name = each_image.split('/')[-1]
this_img = image.load_img(each_image, target_size = (299, 299))

# image pre-processing
x = np.expand_dims(image.img_to_array(this_img), axis=0)
x = preprocess_input(x)
x = x.copy(order="C")

# encode
x = API_helpers.base64_encode_image(x)
# create a image id
this_id = str(uuid.uuid4())
all_image_ids.append((this_id, iamge_name))
d = {"id": this_id, "image": x, "model_name": model_name}

# push to the redis queue
db.rpush(INCEPTIONV3_IMAGE_QUEUE, json.dumps(d))

all_pred = []
while all_image_ids:
# pop the first one from the queue
this_id, this_image_name = all_image_ids.pop(0)
this_pred = {}

while True:
# check if the response has been returned
output = db.get(this_id)
Expand All @@ -92,43 +92,43 @@ def label():
this_pred["image name"] = this_image_name
output = output.decode('utf-8')
this_pred["prediction"] = json.loads(output)

db.delete(this_id)
break
else:
time.sleep(CLIENT_SLEEP)

all_pred.append(this_pred)

# remove the temp folder
shutil.rmtree(image_data_path, ignore_errors=True)

return jsonify({
"data": all_pred
})
})

@blueprint.route('/retrain', methods=['POST'])
def retrain():
"""
pick up a pre-trained model
resume training using more data

@args: train_bucket_url: URL pointing to the folder for training data on S3
@args: model_name: the name of the model want to be retraiend, the folder must be exsit
"""
s3_bucket_name = request.form.get('train_bucket_name')
s3_bucket_prefix = request.form.get('train_bucket_prefix')
nb_epoch = request.form.get('nb_epoch')
batch_size = request.form.get('batch_size')

model_name = s3_bucket_prefix.split('/')[-1]
local_data_path = os.path.join('./tmp')

# create a celer task id
this_id = celery.uuid()
# download the folder in the url
# return the path of the image files
async_retrain.apply_async((model_name,
async_retrain.apply_async((model_name,
local_data_path,
s3_bucket_name,
s3_bucket_prefix,
Expand All @@ -140,46 +140,46 @@ def retrain():
"task_id": this_id,
"status": "Retraining and Fine-Tuning are Initiated"
}), 200

@blueprint.route('/transfer', methods=['POST'])
def init_new_model():
"""
init a new model based on InceptionV3
that can predict picture for new classes.

@args: train_bucket_url: URL pointing to the folder for training data on S3
"""
# need to load the base model here
s3_bucket_name = request.form.get('train_bucket_name')
s3_bucket_prefix = request.form.get('train_bucket_prefix')
model_name = s3_bucket_prefix.split('/')[-1]

# generate a celery task id
this_id = celery.uuid()

# download the folder in the url
# kick off the transfer learning thing here
async_transfer.apply_async((model_name,
async_transfer.apply_async((model_name,
s3_bucket_name,
s3_bucket_prefix,
this_id), task_id=this_id)

return jsonify({
"task_id": this_id,
"status": "Transfer Learning and Fine-Tuning are Initiated"
}), 200

@blueprint.route('/predict', methods=['POST'])
def run_inceptionV3():
"""
Run the pre-trained base Inception V3 model
Run the pre-trained base Inception V3 model
and send image to queue
Listening user submitted images and

Listening user submitted images and
stack them in a Redis queue
"""
data = {"success": False}

# load model name
model_name = request.form.get('model_name')

Expand All @@ -189,41 +189,33 @@ def run_inceptionV3():
x = np.expand_dims(image.img_to_array(img), axis=0)
x = preprocess_input(x)
x = x.copy(order="C")

# encode
x = API_helpers.base64_encode_image(x)
# create a image id
this_id = str(uuid.uuid4())

d = {"id": this_id, "image": x, "model_name": model_name}

# push to the redis queue
db.rpush(INCEPTIONV3_IMAGE_QUEUE, json.dumps(d))

while True:
# check if the response has been returned
output = db.get(this_id)

if output is not None:
output = output.decode('utf-8')
data["prediction"] = json.loads(output)

db.delete(this_id)
break
else:
# print "* Waiting for the Inference Server..."
time.sleep(CLIENT_SLEEP)

data['success'] = True

return jsonify({
"data": data
}), 200








6 changes: 6 additions & 0 deletions src/app/apis/SentimentV1/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
'''
Created on Jan 23, 2019

@author: manu
'''
from sentimentV1 import *
70 changes: 70 additions & 0 deletions src/app/apis/SentimentV1/sentimentV1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
'''
Created on Jan 22, 2019

Web service for Sentiment Analysis

@author: manu
'''
import uuid
import json
import time

# flask
from flask import jsonify
from flask import Blueprint, request

from app import app
from app import db
import logging
# michaniki app
from ...tasks import *

# temp folder save image files downloaded from S3
TEMP_FOLDER = os.path.join('./tmp')

blueprint = Blueprint('sentimentV1', __name__)

@blueprint.route('/predict', methods=['POST'])
def pred_sentiment():
"""
Run the pre-trained base Sentiment analysis model
and send sentence to queue

Listening user submitted sentences and
stack them in a Redis queue
"""

logging.info("Inside pred_Sentence")
data = {"success": False}

model_name = 'base'

message = request.form.get('textv')
print "Received message:{}".format(message)
#sentence = Sentence(message)

# create a image id
this_id = str(uuid.uuid4())

d = {"id": this_id, "text": message, "model_name": model_name}

# push to the redis queue
db.rpush(SENTIMENT_TEXT_QUEUE, json.dumps(d))

while True:
# check if the response has been returned
output = db.get(this_id)
if output is not None:
output = output.decode('utf-8')
data["prediction"] = json.loads(output)

db.delete(this_id)
break
else:
#print "* Waiting for the Sentiment Inference Server..."
time.sleep(CLIENT_SLEEP)

data['success'] = True
return jsonify({
"data": data
}), 200
4 changes: 2 additions & 2 deletions src/app/apis/apis.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,15 @@

from .mnist import blueprint as mnist_blueprint
from .InceptionV3 import blueprint as incept_blueprint
from .SentimentV1 import blueprint as sentiment_blueprint
from .tasks import blueprint as tasks_blueprint

from app import app

app.register_blueprint(mnist_blueprint, url_prefix = '/mnist')
app.register_blueprint(incept_blueprint, url_prefix = '/inceptionV3')
app.register_blueprint(tasks_blueprint, url_prefix='/tasks')
app.register_blueprint(sentiment_blueprint, url_prefix = '/sentimentV1')

@app.route('/')
def index():
Expand All @@ -24,5 +26,3 @@ def index():
@app.route('/add')
def add_a():
res = add.delay(3, 4)


Loading