Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -317,6 +317,5 @@ select = [
"W293",
]
# Some rules are ignored for now; we can consider enabling them in the future.
# F403 and F405 are ignored due to thai2fit/ulmfit module's star imports.
# S101 is use of assert statement, should be an easy fix.
extend-ignore = ["E402", "E501", "E722", "F403", "F405", "S101", "S202", "S301", "S310"]
extend-ignore = ["E402", "E501", "E722", "S101", "S202", "S301", "S310"]
11 changes: 10 additions & 1 deletion pythainlp/generate/thai2fit.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,16 @@
# fastai
import fastai
import pandas as pd
from fastai.text import *
from fastai.text import (
AWD_LSTM,
NumericalizeProcessor,
TextList,
TokenizeProcessor,
Tokenizer,
URLs,
language_model_learner,
untar_data,
)

# pythainlp
from pythainlp.ulmfit import (
Expand Down
5 changes: 2 additions & 3 deletions pythainlp/ulmfit/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,9 +162,8 @@ def document_vector(text: str, learn, data, agg: str = "mean"):

:Example:

>>> from pythainlp.ulmfit import document_vectorr
>>> from fastai import *
>>> from fastai.text import *
>>> from pythainlp.ulmfit import document_vector
>>> from fastai.text import load_data, language_model_learner, AWD_LSTM
>>>
>>> # Load Data Bunch
>>> data = load_data(MODEL_PATH, 'thwiki_lm_data.pkl')
Expand Down
11 changes: 10 additions & 1 deletion tests/extra/testx_ulmfit.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,16 @@
import fastai
import pandas as pd
import torch
from fastai.text import *
from fastai.text import (
AWD_LSTM,
NumericalizeProcessor,
TextList,
TokenizeProcessor,
Tokenizer,
URLs,
language_model_learner,
untar_data,
)

from pythainlp.tokenize import THAI2FIT_TOKENIZER
from pythainlp.ulmfit import (
Expand Down
Loading