|
1 | | -// pub mod character_filter; |
2 | 1 | pub mod dictionary; |
3 | 2 | pub mod segmenter; |
4 | 3 | pub mod token; |
5 | | -// pub mod token_filter; |
6 | 4 | pub mod tokenizer; |
7 | 5 | pub mod util; |
8 | 6 |
|
9 | 7 | use pyo3::prelude::*; |
10 | 8 |
|
11 | | -// use crate::character_filter::japanese_iteration_mark::PyJapaneseIterationMarkCharacterFilter; |
12 | | -// use crate::character_filter::mapping::PyMappingCharacterFilter; |
13 | | -// use crate::character_filter::regex::PyRegexCharacterFilter; |
14 | | -// use crate::character_filter::unicode_normalize::PyUnicodeNormalizeCharacterFilter; |
15 | | -// use crate::character_filter::PyCharacterFilter; |
16 | 9 | use crate::dictionary::{load_dictionary, load_user_dictionary, PyDictionary, PyUserDictionary}; |
17 | 10 | use crate::segmenter::PySegmenter; |
18 | 11 | use crate::token::PyToken; |
19 | | -// use crate::token_filter::japanese_base_form::PyJapaneseBaseFormTokenFilter; |
20 | | -// use crate::token_filter::japanese_compound_word::PyJapaneseCompoundWordTokenFilter; |
21 | | -// use crate::token_filter::japanese_kana::PyJapaneseKanaTokenFilter; |
22 | | -// use crate::token_filter::japanese_katakana_stem::PyJapaneseKatakanaStemTokenFilter; |
23 | | -// use crate::token_filter::japanese_keep_tags::PyJapaneseKeepTagsTokenFilter; |
24 | | -// use crate::token_filter::japanese_number::PyJapaneseNumberTokenFilter; |
25 | | -// use crate::token_filter::japanese_reading_form::PyJapaneseReadingFormTokenFilter; |
26 | | -// use crate::token_filter::japanese_stop_tags::PyJapaneseStopTagsTokenFilter; |
27 | | -// use crate::token_filter::keep_words::PyKeepWordsTokenFilter; |
28 | | -// use crate::token_filter::korean_keep_tags::PyKoreanKeepTagsTokenFilter; |
29 | | -// use crate::token_filter::korean_reading_form::PyKoreanReadingFormTokenFilter; |
30 | | -// use crate::token_filter::korean_stop_tags::PyKoreanStopTagsTokenFilter; |
31 | | -// use crate::token_filter::length::PyLengthTokenFilter; |
32 | | -// use crate::token_filter::lowercase::PyLowercaseTokenFilter; |
33 | | -// use crate::token_filter::mapping::PyMappingTokenFilter; |
34 | | -// use crate::token_filter::remove_diacritical_mark::PyRemoveDiacriticalMarkTokenFilter; |
35 | | -// use crate::token_filter::stop_words::PyStopWordsTokenFilter; |
36 | | -// use crate::token_filter::uppercase::PyUppercaseTokenFilter; |
37 | | -// use crate::token_filter::PyTokenFilter; |
38 | 12 | use crate::tokenizer::{PyTokenizer, PyTokenizerBuilder}; |
39 | 13 |
|
40 | 14 | #[pymodule] |
41 | | -fn lindera(module: &Bound<'_, PyModule>) -> PyResult<()> { |
| 15 | +fn lindera_py(module: &Bound<'_, PyModule>) -> PyResult<()> { |
42 | 16 | module.add_class::<PyToken>()?; |
43 | 17 | module.add_class::<PyDictionary>()?; |
44 | 18 | module.add_class::<PyUserDictionary>()?; |
45 | 19 | module.add_class::<PyTokenizerBuilder>()?; |
46 | 20 | module.add_class::<PyTokenizer>()?; |
47 | 21 | module.add_class::<PySegmenter>()?; |
48 | | - // module.add_class::<PyCharacterFilter>()?; |
49 | | - // module.add_class::<PyTokenFilter>()?; |
50 | | - // module.add_class::<PyJapaneseIterationMarkCharacterFilter>()?; |
51 | | - // module.add_class::<PyMappingCharacterFilter>()?; |
52 | | - // module.add_class::<PyRegexCharacterFilter>()?; |
53 | | - // module.add_class::<PyUnicodeNormalizeCharacterFilter>()?; |
54 | | - // module.add_class::<PyJapaneseBaseFormTokenFilter>()?; |
55 | | - // module.add_class::<PyJapaneseCompoundWordTokenFilter>()?; |
56 | | - // module.add_class::<PyJapaneseKanaTokenFilter>()?; |
57 | | - // module.add_class::<PyJapaneseKatakanaStemTokenFilter>()?; |
58 | | - // module.add_class::<PyJapaneseKeepTagsTokenFilter>()?; |
59 | | - // module.add_class::<PyJapaneseNumberTokenFilter>()?; |
60 | | - // module.add_class::<PyJapaneseReadingFormTokenFilter>()?; |
61 | | - // module.add_class::<PyJapaneseStopTagsTokenFilter>()?; |
62 | | - // module.add_class::<PyKeepWordsTokenFilter>()?; |
63 | | - // module.add_class::<PyKoreanKeepTagsTokenFilter>()?; |
64 | | - // module.add_class::<PyKoreanReadingFormTokenFilter>()?; |
65 | | - // module.add_class::<PyKoreanStopTagsTokenFilter>()?; |
66 | | - // module.add_class::<PyLengthTokenFilter>()?; |
67 | | - // module.add_class::<PyLowercaseTokenFilter>()?; |
68 | | - // module.add_class::<PyMappingTokenFilter>()?; |
69 | | - // module.add_class::<PyRemoveDiacriticalMarkTokenFilter>()?; |
70 | | - // module.add_class::<PyStopWordsTokenFilter>()?; |
71 | | - // module.add_class::<PyUppercaseTokenFilter>()?; |
72 | 22 |
|
73 | 23 | module.add_function(wrap_pyfunction!(load_dictionary, module)?)?; |
74 | 24 | module.add_function(wrap_pyfunction!(load_user_dictionary, module)?)?; |
|
0 commit comments