@@ -4,13 +4,10 @@ import {RuleHelper} from "textlint-rule-helper";
44import { getTokenizer } from "kuromojin" ;
55import splitSentences , { Syntax as SentenceSyntax } from "sentence-splitter" ;
66import StringSource from "textlint-util-to-string" ;
7- // 助詞どうか
8- const is助詞Token = token => {
9- return token . pos === "助詞" ;
10- } ;
11- const is読点Token = token => {
12- return token . surface_form === "、" && token . pos === "名詞" ;
13- } ;
7+ import {
8+ is助詞Token , is読点Token ,
9+ createKeyFromKey , restoreToSurfaceFromKey
10+ } from "./token-utils" ;
1411/**
1512 * Create token map object
1613 * {
@@ -23,11 +20,12 @@ const is読点Token = token => {
2320function createSurfaceKeyMap ( tokens ) {
2421 // 助詞のみを対象とする
2522 return tokens . filter ( is助詞Token ) . reduce ( ( keyMap , token ) => {
26- // "は" : [token]
27- if ( ! keyMap [ token . surface_form ] ) {
28- keyMap [ token . surface_form ] = [ ] ;
23+ // "は:助詞.係助詞" : [token]
24+ const tokenKey = createKeyFromKey ( token ) ;
25+ if ( ! keyMap [ tokenKey ] ) {
26+ keyMap [ tokenKey ] = [ ] ;
2927 }
30- keyMap [ token . surface_form ] . push ( token ) ;
28+ keyMap [ tokenKey ] . push ( token ) ;
3129 return keyMap ;
3230 } , { } ) ;
3331}
@@ -100,12 +98,13 @@ export default function (context, options = {}) {
10098
10199 joshiTokens = [tokenA, tokenB, tokenC, tokenD, tokenE, tokenF]
102100 joshiTokenSurfaceKeyMap = {
103- "は": [tokenA, tokenC, tokenE],
104- "で": [tokenB, tokenD, tokenF]
101+ "は:助詞.係助詞 ": [tokenA, tokenC, tokenE],
102+ "で:助詞.係助詞 ": [tokenB, tokenD, tokenF]
105103 }
106104 */
107105 Object . keys ( joshiTokenSurfaceKeyMap ) . forEach ( key => {
108- let tokens = joshiTokenSurfaceKeyMap [ key ] ;
106+ const tokens = joshiTokenSurfaceKeyMap [ key ] ;
107+ const joshiName = restoreToSurfaceFromKey ( key ) ;
109108 // strict mode ではない時例外を除去する
110109 if ( ! isStrict ) {
111110 if ( matchExceptionRule ( tokens ) ) {
@@ -117,27 +116,28 @@ export default function (context, options = {}) {
117116 }
118117 // if found differenceIndex less than
119118 // tokes are sorted ascending order
120- tokens . reduce ( ( prev , current ) => {
121- let startPosition = countableTokens . indexOf ( prev ) ;
122- let otherPosition = countableTokens . indexOf ( current ) ;
123- // if difference
124- let differenceIndex = otherPosition - startPosition ;
119+ var reduder = ( prev , current ) => {
120+ const startPosition = countableTokens . indexOf ( prev ) ;
121+ const otherPosition = countableTokens . indexOf ( current ) ;
122+ // 助詞token同士の距離が設定値以下ならエラーを報告する
123+ const differenceIndex = otherPosition - startPosition ;
125124 if ( differenceIndex <= minInterval ) {
126- let originalPosition = source . originalPositionFor ( {
125+ const originalPosition = source . originalPositionFor ( {
127126 line : sentence . loc . start . line ,
128127 column : sentence . loc . start . column + ( current . word_position - 1 )
129128 } ) ;
130- // padding position
131- var padding = {
129+ // padding positionを計算する
130+ const padding = {
132131 line : originalPosition . line - 1 ,
133132 // matchLastToken.word_position start with 1
134133 // this is padding column start with 0 (== -1)
135134 column : originalPosition . column
136135 } ;
137- report ( node , new RuleError ( `一文に二回以上利用されている助詞 "${ key } " がみつかりました。` , padding ) ) ;
136+ report ( node , new RuleError ( `一文に二回以上利用されている助詞 "${ joshiName } " がみつかりました。` , padding ) ) ;
138137 }
139138 return current ;
140- } ) ;
139+ } ;
140+ tokens . reduce ( reduder ) ;
141141 } ) ;
142142 } ;
143143 sentences . forEach ( checkSentence ) ;
0 commit comments