Skip to content

Commit 5318851

Browse files
authored
Merge branch 'Samagra-Development:restructure' into restructure
2 parents c444a6b + b31f28f commit 5318851

File tree

3 files changed

+39
-40
lines changed

3 files changed

+39
-40
lines changed

.github/workflows/gh-packages-parallel.yml

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -60,14 +60,21 @@ jobs:
6060
run: |
6161
image_names=$(jq -r '.models[].serviceName' ./config.json)
6262
paths=$(jq -r '.models[].modelBasePath' ./config.json)
63+
builds=$(jq -r '.models[].build' ./config.json)
6364
readarray -t image_array <<< "$image_names"
6465
readarray -t paths_array <<< "$paths"
66+
readarray -t build_array <<< "$builds"
6567
lowercase_actor=$(echo "${{ github.repository }}" | tr '[:upper:]' '[:lower:]')
6668
job=${{ matrix.job }}
6769
image="${image_array[job-1]}"
6870
path="${paths_array[job-1]}"
69-
docker build "./$path" -t "${{ env.REGISTRY }}/$lowercase_actor/$image:latest"
70-
docker push "${{ env.REGISTRY }}/$lowercase_actor/$image:latest"
71+
build="${build_array[job-1]}"
72+
if [ "$build" = true ]; then
73+
docker build "./$path" -t "${{ env.REGISTRY }}/$lowercase_actor/$image:latest"
74+
docker push "${{ env.REGISTRY }}/$lowercase_actor/$image:latest"
75+
fi
76+
77+
7178
7279
7380

config.json

Lines changed: 29 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -6,15 +6,17 @@
66
"apiBasePath": "/search/word_score/local",
77
"containerPort": 8000,
88
"environment": {},
9-
"nginx": []
9+
"nginx": [],
10+
"build": false
1011
},
1112
{
1213
"serviceName": "spell_check",
1314
"modelBasePath": "src/spell_check/kenlm/local/.",
1415
"apiBasePath": "spell_check/kenlm/local/",
1516
"containerPort": 8000,
1617
"environment": {},
17-
"nginx": []
18+
"nginx": [],
19+
"build": false
1820
},
1921
{
2022
"serviceName": "flow_classification",
@@ -23,7 +25,8 @@
2325
"containerPort": 8000,
2426
"environment": {},
2527
"nginx": [],
26-
"constraints": ["node.role==worker"]
28+
"constraints": ["node.role==worker"],
29+
"build": false
2730
},
2831
{
2932
"serviceName": "text_translation_azure_dict",
@@ -33,7 +36,8 @@
3336
"environment": {
3437
"AZURE_TRANSLATE_KEY": "${AZURE_TRANSLATE_KEY}"
3538
},
36-
"nginx": []
39+
"nginx": [],
40+
"build": false
3741
},
3842
{
3943
"serviceName": "dict_aug_generate_sent",
@@ -44,27 +48,10 @@
4448
"OPENAI_API_KEY": "${OPENAI_API_KEY}"
4549
},
4650
"nginx": [],
47-
"constraints": ["node.labels.node_vm_type==gpu"]
48-
},
51+
"constraints": ["node.labels.node_vm_type==gpu"],
52+
"build": false
53+
},
4954
{
50-
"serviceName": "coref_spacy",
51-
"modelBasePath": "src/coref/spacy/local/.",
52-
"apiBasePath": "/coref/spacy/local",
53-
"containerPort": 8000,
54-
"environment": {},
55-
"nginx": [],
56-
"constraints": ["node.labels.node_vm_type==gpu"]
57-
},
58-
{
59-
"serviceName": "coref_bart",
60-
"modelBasePath": "src/coref/bart/local/.",
61-
"apiBasePath": "/coref/bart/local",
62-
"containerPort": 8000,
63-
"environment": {},
64-
"nginx": [],
65-
"constraints": ["node.labels.node_vm_type==gpu"]
66-
},
67-
{
6855
"serviceName": "text_translation_azure",
6956
"modelBasePath": "src/text_translation/azure/remote/.",
7057
"apiBasePath": "/text_translation/azure/remote",
@@ -73,7 +60,8 @@
7360
"AZURE_TRANSLATE_KEY": "${AZURE_TRANSLATE_KEY}"
7461
},
7562
"nginx": [],
76-
"constraints": ["node.labels.node_vm_type==gpu"]
63+
"constraints": ["node.labels.node_vm_type==gpu"],
64+
"build": false
7765
},
7866
{
7967
"serviceName": "asr_mms",
@@ -82,7 +70,8 @@
8270
"containerPort": 8000,
8371
"environment": {},
8472
"nginx": ["client_max_body_size 100M;", "proxy_read_timeout 600;", "proxy_connect_timeout 600;", "proxy_send_timeout 600;"],
85-
"constraints": ["node.labels.node_vm_type==gpu"]
73+
"constraints": ["node.labels.node_vm_type==gpu"],
74+
"build": false
8675
},
8776
{
8877
"serviceName": "coref_fcoref",
@@ -91,7 +80,8 @@
9180
"containerPort": 8000,
9281
"environment": {},
9382
"nginx": [],
94-
"constraints": ["node.labels.node_vm_type==gpu"]
83+
"constraints": ["node.labels.node_vm_type==gpu"],
84+
"build": false
9585
},
9686
{
9787
"serviceName": "text_translation_bhashini",
@@ -100,23 +90,26 @@
10090
"containerPort": 8000,
10191
"environment": {},
10292
"nginx": [],
103-
"constraints": ["node.labels.node_vm_type==gpu"]
93+
"constraints": ["node.labels.node_vm_type==gpu"],
94+
"build": false
10495
},
10596
{
10697
"serviceName": "text_translation_ai4bharat",
10798
"modelBasePath": "src/text_translation/ai4bharat/remote/.",
10899
"apiBasePath": "src/text_translation/ai4bharat/remote",
109100
"containerPort": 8000,
110101
"environment": {},
111-
"constraints": ["node.labels.node_vm_type==gpu"]
102+
"constraints": ["node.labels.node_vm_type==gpu"],
103+
"build": false
112104
},
113105
{
114106
"serviceName": "text_lang_detection_bhashini",
115107
"modelBasePath": "src/text_lang_detection/bhashini/remote/.",
116108
"apiBasePath": "/text_lang_detection/bhashini/remote",
117109
"containerPort": 8000,
118110
"environment": {},
119-
"constraints": ["node.labels.node_vm_type==gpu"]
111+
"constraints": ["node.labels.node_vm_type==gpu"],
112+
"build": false
120113
},
121114
{
122115
"serviceName": "chunking_mpnet",
@@ -127,7 +120,8 @@
127120
"OPENAI_API_KEY": "${OPENAI_API_KEY}"
128121
},
129122
"nginx": ["client_max_body_size 100M;", "proxy_read_timeout 600;", "proxy_connect_timeout 600;", "proxy_send_timeout 600;"],
130-
"constraints": ["node.labels.node_vm_type==gpu"]
123+
"constraints": ["node.labels.node_vm_type==gpu"],
124+
"build": false
131125
},
132126
{
133127
"serviceName": "embedding_instructor",
@@ -138,7 +132,8 @@
138132
"OPENAI_API_KEY": "${OPENAI_API_KEY}"
139133
},
140134
"nginx": ["client_max_body_size 100M;", "proxy_read_timeout 600;", "proxy_connect_timeout 600;", "proxy_send_timeout 600;"],
141-
"constraints": ["node.labels.node_vm_type==gpu"]
135+
"constraints": ["node.labels.node_vm_type==gpu"],
136+
"build": false
142137
},
143138
{
144139
"serviceName": "llm_openai_gpt3",
@@ -149,7 +144,8 @@
149144
"OPENAI_API_KEY": "${OPENAI_API_KEY}"
150145
},
151146
"nginx": [],
152-
"constraints": ["node.labels.node_vm_type==gpu"]
147+
"constraints": ["node.labels.node_vm_type==gpu"],
148+
"build": true
153149
}
154150
]
155151
}

src/llm/openai/chatgpt3/model.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,4 @@ async def inference(self, request: ModelRequest):
2727
"messages": request.prompt,
2828
},
2929
)
30-
try:
31-
ans = response.json()["choices"][0]["message"]["content"]
32-
return {"ans":ans}
33-
except:
34-
return response.json()
30+
return response.json()

0 commit comments

Comments
 (0)