-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapps.py
More file actions
101 lines (60 loc) · 2.24 KB
/
apps.py
File metadata and controls
101 lines (60 loc) · 2.24 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import streamlit as st
import numpy as np
import pandas as pd
from pythainlp.tokenize import word_tokenize
import math
import joblib
import base64
import re
st.title("Welcome to IO - Scouter Project ")
st.write("😲 or 🤖 or 🍕 ┬┴┬┴┤ ͜ʖ ͡°) ├┬┴┬┴ - Ver 0.0.2 - 20201022!!")
st.write("This project built for test how much IO you actually have !!")
## LOAD DATA
@st.cache(allow_output_mutation=True)
def load_model():
model = joblib.load("./model/clf_log_tfidf_20201022.joblib")
vectorizer = joblib.load("./model/vectorizer_20201022.joblib")
# model = joblib.load("C://Users/Boyd/Projects/io_scouter/model/clf_log_tfidf.joblib")
# vectorizer = joblib.load("C://Users/Boyd/Projects/io_scouter/model/vectorizer.joblib")
return model, vectorizer
loaded_model, vectorizer = load_model()
user_input = st.text_input("Put text here", "...")
def text_treatment(_text):
result = re.sub(r"http\S+", "", _text)
result = result.replace("\n", "")
return result
def inference(_user_input):
treat_text = text_treatment(_user_input)
test_ls = word_tokenize(treat_text)
test_ls = " ".join(test_ls)
X_test = vectorizer.fit_transform([test_ls])
if X_test.toarray().sum() == 0:
return 0
else:
y_test_prob = loaded_model.predict_proba(X_test)
y_val = y_test_prob[0][0]
return y_val
def get_io_score(_y_val):
if _y_val <= 0.3:
_y_val = _y_val*1.1
st.markdown(f"## Your IO Score is {_y_val:.5f} !! ")
elif _y_val <= 0.4:
_y_val = _y_val*1.25
st.markdown(f"## Your IO Score is {_y_val:.5f} !! ")
elif _y_val <= 0.5:
_y_val = _y_val*1.5
st.markdown(f"## Your IO Score is {_y_val:.5f} !! ")
elif _y_val > 0.5:
_y_val = _y_val*90000
# file_gif = open("C://Users/Boyd/Projects/io_scouter/pict/over9000.gif", "rb")
file_gif = open("./pict/over9000.gif", "rb")
contents = file_gif.read()
data_url = base64.b64encode(contents).decode("utf-8")
file_gif.close()
st.markdown(f"## Your IO Score is {_y_val:.5f} !! ")
st.markdown(
f'<center><img src="data:image/gif;base64,{data_url}" width="500">',
unsafe_allow_html=True,
)
y_val = inference(user_input)
get_io_score(y_val)