Skip to content

Commit ec0ef8a

Browse files
authored
Merge pull request #37 from lindera/package_name
Fix package name
2 parents f66cfdb + c9af26e commit ec0ef8a

File tree

11 files changed

+13
-64
lines changed

11 files changed

+13
-64
lines changed

Cargo.lock

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[package]
22
name = "lindera-py"
3-
version = "0.38.2"
3+
version = "0.38.3"
44
edition = "2021"
55
description = "Python binding for Lindera."
66
documentation = "https://docs.rs/lindera-py"
@@ -12,7 +12,8 @@ categories = ["text-processing"]
1212
license = "MIT"
1313

1414
[lib]
15-
name = "lindera"
15+
name = "lindera_py"
16+
path = "src/lib.rs"
1617
crate-type = ["cdylib"]
1718

1819
[features]

Makefile

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,6 @@ lint:
2626
poetry run isort --check-only --diff ./examples ./tests
2727
poetry run black --check ./examples ./tests
2828
poetry run flake8 ./examples ./tests
29-
30-
typecheck:
3129
poetry run mypy ./examples ./tests
3230

3331
develop:

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ This command takes a long time because it builds a library that includes all the
4646
## Example code
4747

4848
```python
49-
from lindera import Segmenter, Tokenizer, load_dictionary
49+
from lindera_py import Segmenter, Tokenizer, load_dictionary
5050

5151

5252
def main():

examples/tokenize.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from lindera import Segmenter, Tokenizer, load_dictionary
1+
from lindera_py import Segmenter, Tokenizer, load_dictionary
22

33

44
def main():

examples/tokenize_with_decompose.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from lindera import Segmenter, Tokenizer, load_dictionary
1+
from lindera_py import Segmenter, Tokenizer, load_dictionary
22

33

44
def main():

examples/tokenize_with_filters.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from lindera import Segmenter, Tokenizer, load_dictionary
1+
from lindera_py import Segmenter, Tokenizer, load_dictionary
22

33

44
def main():

examples/tokenize_with_userdict.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
from pathlib import Path
22

3-
from lindera import Segmenter, Tokenizer, load_dictionary, load_user_dictionary
3+
from lindera_py import Segmenter, Tokenizer, load_dictionary, load_user_dictionary
44

55
project_root = Path(__file__).resolve().parent.parent
66

pyproject.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
2-
name = "lindera"
3-
version = "0.38.2"
2+
name = "lindera_py"
3+
version = "0.38.3"
44
description = ""
55
authors = ["Minoru Osuka <minoru.osuka@gmail.com>"]
66
license = "MIT"

src/lib.rs

Lines changed: 1 addition & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -1,74 +1,24 @@
1-
// pub mod character_filter;
21
pub mod dictionary;
32
pub mod segmenter;
43
pub mod token;
5-
// pub mod token_filter;
64
pub mod tokenizer;
75
pub mod util;
86

97
use pyo3::prelude::*;
108

11-
// use crate::character_filter::japanese_iteration_mark::PyJapaneseIterationMarkCharacterFilter;
12-
// use crate::character_filter::mapping::PyMappingCharacterFilter;
13-
// use crate::character_filter::regex::PyRegexCharacterFilter;
14-
// use crate::character_filter::unicode_normalize::PyUnicodeNormalizeCharacterFilter;
15-
// use crate::character_filter::PyCharacterFilter;
169
use crate::dictionary::{load_dictionary, load_user_dictionary, PyDictionary, PyUserDictionary};
1710
use crate::segmenter::PySegmenter;
1811
use crate::token::PyToken;
19-
// use crate::token_filter::japanese_base_form::PyJapaneseBaseFormTokenFilter;
20-
// use crate::token_filter::japanese_compound_word::PyJapaneseCompoundWordTokenFilter;
21-
// use crate::token_filter::japanese_kana::PyJapaneseKanaTokenFilter;
22-
// use crate::token_filter::japanese_katakana_stem::PyJapaneseKatakanaStemTokenFilter;
23-
// use crate::token_filter::japanese_keep_tags::PyJapaneseKeepTagsTokenFilter;
24-
// use crate::token_filter::japanese_number::PyJapaneseNumberTokenFilter;
25-
// use crate::token_filter::japanese_reading_form::PyJapaneseReadingFormTokenFilter;
26-
// use crate::token_filter::japanese_stop_tags::PyJapaneseStopTagsTokenFilter;
27-
// use crate::token_filter::keep_words::PyKeepWordsTokenFilter;
28-
// use crate::token_filter::korean_keep_tags::PyKoreanKeepTagsTokenFilter;
29-
// use crate::token_filter::korean_reading_form::PyKoreanReadingFormTokenFilter;
30-
// use crate::token_filter::korean_stop_tags::PyKoreanStopTagsTokenFilter;
31-
// use crate::token_filter::length::PyLengthTokenFilter;
32-
// use crate::token_filter::lowercase::PyLowercaseTokenFilter;
33-
// use crate::token_filter::mapping::PyMappingTokenFilter;
34-
// use crate::token_filter::remove_diacritical_mark::PyRemoveDiacriticalMarkTokenFilter;
35-
// use crate::token_filter::stop_words::PyStopWordsTokenFilter;
36-
// use crate::token_filter::uppercase::PyUppercaseTokenFilter;
37-
// use crate::token_filter::PyTokenFilter;
3812
use crate::tokenizer::{PyTokenizer, PyTokenizerBuilder};
3913

4014
#[pymodule]
41-
fn lindera(module: &Bound<'_, PyModule>) -> PyResult<()> {
15+
fn lindera_py(module: &Bound<'_, PyModule>) -> PyResult<()> {
4216
module.add_class::<PyToken>()?;
4317
module.add_class::<PyDictionary>()?;
4418
module.add_class::<PyUserDictionary>()?;
4519
module.add_class::<PyTokenizerBuilder>()?;
4620
module.add_class::<PyTokenizer>()?;
4721
module.add_class::<PySegmenter>()?;
48-
// module.add_class::<PyCharacterFilter>()?;
49-
// module.add_class::<PyTokenFilter>()?;
50-
// module.add_class::<PyJapaneseIterationMarkCharacterFilter>()?;
51-
// module.add_class::<PyMappingCharacterFilter>()?;
52-
// module.add_class::<PyRegexCharacterFilter>()?;
53-
// module.add_class::<PyUnicodeNormalizeCharacterFilter>()?;
54-
// module.add_class::<PyJapaneseBaseFormTokenFilter>()?;
55-
// module.add_class::<PyJapaneseCompoundWordTokenFilter>()?;
56-
// module.add_class::<PyJapaneseKanaTokenFilter>()?;
57-
// module.add_class::<PyJapaneseKatakanaStemTokenFilter>()?;
58-
// module.add_class::<PyJapaneseKeepTagsTokenFilter>()?;
59-
// module.add_class::<PyJapaneseNumberTokenFilter>()?;
60-
// module.add_class::<PyJapaneseReadingFormTokenFilter>()?;
61-
// module.add_class::<PyJapaneseStopTagsTokenFilter>()?;
62-
// module.add_class::<PyKeepWordsTokenFilter>()?;
63-
// module.add_class::<PyKoreanKeepTagsTokenFilter>()?;
64-
// module.add_class::<PyKoreanReadingFormTokenFilter>()?;
65-
// module.add_class::<PyKoreanStopTagsTokenFilter>()?;
66-
// module.add_class::<PyLengthTokenFilter>()?;
67-
// module.add_class::<PyLowercaseTokenFilter>()?;
68-
// module.add_class::<PyMappingTokenFilter>()?;
69-
// module.add_class::<PyRemoveDiacriticalMarkTokenFilter>()?;
70-
// module.add_class::<PyStopWordsTokenFilter>()?;
71-
// module.add_class::<PyUppercaseTokenFilter>()?;
7222

7323
module.add_function(wrap_pyfunction!(load_dictionary, module)?)?;
7424
module.add_function(wrap_pyfunction!(load_user_dictionary, module)?)?;

0 commit comments

Comments
 (0)