-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp.py
More file actions
91 lines (75 loc) · 2.64 KB
/
app.py
File metadata and controls
91 lines (75 loc) · 2.64 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import gradio as gr
import json
import string
import re
import functools
import operator
import pandas as pd
import emoji
from nltk import word_tokenize
from joblib import load
# CLEANSING
def cleansing(data):
# lowercasing
data = data.lower()
# remove punctuation
punct = string.punctuation
translator = str.maketrans(punct, ' '*len(punct))
data = data.translate(translator)
# remove newline
data = data.replace('\n', ' ')
# remove digit
pattern = r'[0-9]'
data = re.sub(pattern, '', data)
# remove extra space
data = ' '.join(data.split())
return data
# CONVERT EMOJIS
df_emoji = pd.read_csv('emoji_to_text.csv')
UNICODE_EMO = {row['emoji']:row['makna'] for idx,row in df_emoji.iterrows()}
def convert_emojis(text):
# split emojis
em_split_emoji = emoji.get_emoji_regexp().split(text)
em_split_whitespace = [substr.split() for substr in em_split_emoji]
em_split = functools.reduce(operator.concat, em_split_whitespace)
text = ' '.join(em_split)
# convert emojis
for emot in UNICODE_EMO:
text = re.sub(r'('+emot+')', "_".join(UNICODE_EMO[emot].replace(",","").replace(":","").split()), text)
return text.lower()
# NORMALIZE COLLOQUIAL/ALAY
kamus_alay = json.load(open('kamus_alay.json', 'r'))
def normalize_text(data):
word_tokens = word_tokenize(data)
result = [kamus_alay.get(w,w) for w in word_tokens]
return ' '.join(result)
# REMOVE STOPWORDS
stop_words = [sw.strip() for sw in open('stop_words.txt', 'r').readlines()]
def remove_stopword(text, stop_words=stop_words):
word_tokens = word_tokenize(text)
filtered_sentence = [w for w in word_tokens if not w in stop_words]
return ' '.join(filtered_sentence)
# PREPROCESS PIPELINE
def preprocess(text):
text = cleansing(text)
text = convert_emojis(text)
text = normalize_text(text)
text = remove_stopword(text)
return text
# PREDICT SENTIMENT
def predict_sentiment(text):
text = preprocess(text)
# load tf-idf vectorizer model
vectorizer = load('tfidf-vectorizer.joblib')
feature = vectorizer.transform([text])
# load model SVC
svc = load("tfidf_svc_tuned.joblib")
pred = svc.predict_proba(feature)[0]
return {'Neutral': pred[0], 'Positive': pred[1], 'Negative': pred[2]}
# sample_text1 = "Ayooo.. Tetep ProKes ketat..!!! Janhan lengah..!!! Semangat...!!!"
gr.Interface(
fn=predict_sentiment,
title="Analisis Sentimen Komentar Instagram 🤗",
description="Isikan kolom text dengan komentar, kemudian biarkan model machine learning memprediksikan hasil sentimen untukmu!",
inputs=gr.inputs.Textbox(lines=7, label="Text"),
outputs="label").launch()