Skip to content

Commit 34d27a3

Browse files
committed
server
1 parent 0492112 commit 34d27a3

File tree

1 file changed

+55
-0
lines changed

1 file changed

+55
-0
lines changed

python_autocomplete/serve.py

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
import threading
2+
3+
from flask import Flask, request, jsonify
4+
5+
from labml import experiment
6+
from labml.utils.cache import cache
7+
from labml.utils.pytorch import get_modules
8+
from python_autocomplete.evaluate import Predictor
9+
from python_autocomplete.train import Configs
10+
11+
12+
def get_predictor():
13+
conf = Configs()
14+
experiment.evaluate()
15+
16+
# Replace this with your training experiment UUID
17+
run_uuid = '39b03a1e454011ebbaff2b26e3148b3d'
18+
19+
conf_dict = experiment.load_configs(run_uuid)
20+
experiment.configs(conf, conf_dict)
21+
experiment.add_pytorch_models(get_modules(conf))
22+
experiment.load(run_uuid)
23+
24+
experiment.start()
25+
conf.model.eval()
26+
return Predictor(conf.model, cache('stoi', lambda: conf.text.stoi), cache('itos', lambda: conf.text.itos))
27+
28+
29+
app = Flask('python_autocomplete')
30+
predictor = get_predictor()
31+
lock = threading.Lock()
32+
33+
34+
@app.route('/')
35+
def home():
36+
return 'Home'
37+
38+
39+
@app.route('/autocomplete', methods=['POST'])
40+
def autocomplete():
41+
prompt = request.json['prompt']
42+
if not prompt:
43+
return jsonify({'success': False})
44+
45+
acquired = lock.acquire(blocking=False)
46+
if acquired:
47+
res = predictor.get_token(prompt)
48+
lock.release()
49+
return jsonify({'success': True, 'prediction': res})
50+
else:
51+
return jsonify({'success': False})
52+
53+
54+
if __name__ == '__main__':
55+
app.run(host='0.0.0.0', port=5000, debug=True)

0 commit comments

Comments
 (0)