Skip to content
This repository was archived by the owner on Nov 8, 2022. It is now read-only.

Commit 968317c

Browse files
danielkoratpeteriz
authored andcommitted
Daniel/fix code style and pylint exit code (#336)
* Added html output for pylint and flake8 * style fixes * fixed pylint exit code; more style fixes * disabled subrocess.run exception on child failure * write style results to external storage dir * create output dir if missing * invoke ansi2html through python instead of shell * added flake-html to reqs * move ansi2hml.py to nlp_architect.utils
1 parent 3a36787 commit 968317c

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

65 files changed

+275
-242
lines changed

.gitignore

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
.venv
88
.styleenv
99
.coverage
10+
flake8.txt
1011
/build/
1112
generated
1213
/dist/
@@ -22,7 +23,4 @@ doc/build/**
2223
.idea/
2324
.nlp_architect_env/
2425
src/
25-
pylint.html
26-
pylint.txt
27-
flake8.txt
28-
tests/fixtures/data/chunker/(
26+
tests/fixtures/data/chunker/

doc/source/api.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ to train the model weights, perform inference, and save/load the model.
4545
nlp_architect.models.chunker.SequenceChunker
4646
nlp_architect.models.intent_extraction.Seq2SeqIntentModel
4747
nlp_architect.models.intent_extraction.MultiTaskIntentModel
48-
nlp_architect.models.matchlstm_ansptr.MatchLSTM_AnswerPointer
48+
nlp_architect.models.matchlstm_ansptr.MatchLSTMAnswerPointer
4949
nlp_architect.models.memn2n_dialogue.MemN2N_Dialog
5050
nlp_architect.models.most_common_word_sense.MostCommonWordSense
5151
nlp_architect.models.ner_crf.NERCRF

doc/source/cross_doc_coref.rst

Lines changed: 1 addition & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -130,66 +130,4 @@ See code example below for running a full cross document coreference evaluation
130130
Code Example
131131
============
132132

133-
.. code:: python
134-
135-
# Configure which sieves you would like to run, the order, constrain and threshold,
136-
event_config = EventConfig()
137-
138-
event_config.sieves_order = [
139-
(SieveType.STRICT, RelationType.SAME_HEAD_LEMMA, 0.0),
140-
(SieveType.VERY_RELAX, RelationType.WIKIPEDIA_DISAMBIGUATION, 0.1),
141-
(SieveType.VERY_RELAX, RelationType.WORD_EMBEDDING_MATCH, 0.7),
142-
(SieveType.RELAX, RelationType.SAME_HEAD_LEMMA_RELAX, 0.5),
143-
]
144-
145-
event_config.gold_mentions_file = '<Replace with your event mentions json file>'
146-
147-
entity_config = EntityConfig()
148-
149-
entity_config.sieves_order = [
150-
(SieveType.STRICT, RelationType.SAME_HEAD_LEMMA, 0.0),
151-
(SieveType.VERY_RELAX, RelationType.WIKIPEDIA_REDIRECT_LINK, 0.1),
152-
(SieveType.VERY_RELAX, RelationType.WIKIPEDIA_DISAMBIGUATION, 0.1),
153-
(SieveType.VERY_RELAX, RelationType.WORD_EMBEDDING_MATCH, 0.7),
154-
(SieveType.VERY_RELAX, RelationType.REFERENT_DICT, 0.5)
155-
]
156-
157-
entity_config.gold_mentions_file = '<Replace with your entity mentions json file>'
158-
159-
# CDCResources hold default attribute values that might need to be change,
160-
# (using the defaults values in this example), use to configure attributes
161-
# such as resources files location, output directory, resources init methods and other.
162-
# check in class and see if any attributes require change in your set-up
163-
resource_location = CDCResources()
164-
165-
# create a new cross doc resources, with all needed semantic relation models
166-
resources = CDCSettings(resource_location, event_config, entity_config)
167-
168-
# run event evaluation
169-
event_clusters = None
170-
if event_config.run_evaluation:
171-
# entry point for the event evaluation process
172-
event_clusters = run_event_coref(resources)
173-
174-
# run entity evaluation in the same way
175-
entity_clusters = None
176-
if entity_config.run_evaluation:
177-
# entry point for the entity evaluation process
178-
entity_clusters = run_entity_coref(resources)
179-
180-
print('-=Cross Document Coref Results=-')
181-
print('-=Event Clusters Mentions=-')
182-
for event_cluster in event_clusters.clusters_list:
183-
print(event_cluster.coref_chain)
184-
for event_mention in event_cluster.mentions:
185-
print(event_mention.mention_id)
186-
print(event_mention.tokens_str)
187-
188-
print('-=Entity Clusters Mentions=-')
189-
for entity_cluster in entity_clusters.clusters_list:
190-
print(entity_cluster.coref_chain)
191-
for entity_mention in entity_cluster.mentions:
192-
print(entity_mention.mention_id)
193-
print(entity_mention.tokens_str)
194-
195-
You can find the above example at this location: ``examples/cross_doc_coref/cross_doc_coref_sieves.py``
133+
You can find code example for running the system at: ``examples/cross_doc_coref/cross_doc_coref_sieves.py``

examples/memn2n_dialogue/interactive_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,12 +46,12 @@ def interactive_loop(model, babi):
4646
line_in = text_input('>>> ').strip().lower()
4747
if not line_in:
4848
line_in = "<SILENCE>"
49-
if line_in == 'exit' or line_in == 'quit':
49+
if line_in in ('exit', 'quit'):
5050
break
5151
if line_in == 'help':
5252
print_help()
5353
continue
54-
if line_in == 'restart' or line_in == 'clear':
54+
if line_in in ('restart', 'clear'):
5555
context = []
5656
response = None
5757
time_feat = 1

examples/most_common_word_sense/inference.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,13 +23,12 @@
2323
from nltk.corpus import wordnet as wn
2424
from termcolor import colored
2525

26+
from examples.most_common_word_sense.feature_extraction import extract_synset_data, \
27+
extract_features_envelope
28+
from examples.most_common_word_sense.prepare_data import read_inference_input_examples_file
2629
from nlp_architect.models.most_common_word_sense import MostCommonWordSense
2730
from nlp_architect.utils.io import validate_existing_filepath, check_size
2831

29-
from prepare_data import read_inference_input_examples_file
30-
from feature_extraction import extract_features_envelope
31-
from feature_extraction import extract_synset_data
32-
3332
logger = logging.getLogger(__name__)
3433
logger.setLevel(logging.DEBUG)
3534

examples/most_common_word_sense/prepare_data.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,8 @@
2626
import gensim
2727
import numpy as np
2828
from sklearn.model_selection import train_test_split
29-
from feature_extraction import extract_features_envelope
3029

30+
from examples.most_common_word_sense.feature_extraction import extract_features_envelope
3131
from nlp_architect.utils.io import validate_existing_filepath, check_size, validate_parent_exists
3232

3333
logger = logging.getLogger(__name__)

examples/np_semantic_segmentation/data.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ def load_data_from_file(self):
244244
# count num of feature vectors
245245
num_feats = len(reader_list)
246246
# is_y_labels is for inference - if the inference data is labeled y_labels are extracted
247-
self.is_y_labels = True if (len(reader_list[0]) == self.feature_vec_dim + 1) else False
247+
self.is_y_labels = len(reader_list[0]) == self.feature_vec_dim + 1
248248
X_feature_matrix = numpy.zeros((num_feats, self.feature_vec_dim))
249249
Y_labels_vec = []
250250
cntr = 0

examples/np_semantic_segmentation/inference.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
import csv
1919
import os
2020

21-
from data import NpSemanticSegData, absolute_path
21+
from .data import NpSemanticSegData, absolute_path
2222

2323
from nlp_architect.models.np_semantic_segmentation import NpSemanticSegClassifier
2424
from nlp_architect.utils.io import validate_existing_filepath, validate_parent_exists

examples/np_semantic_segmentation/preprocess_tratz2011.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
import csv
2222
import os
2323

24-
from data import absolute_path
24+
from .data import absolute_path
2525

2626
from nlp_architect.utils.io import validate_existing_directory
2727

examples/np_semantic_segmentation/train.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,20 +22,20 @@
2222
from nlp_architect.utils.io import validate_existing_filepath, validate_parent_exists, validate
2323

2424

25-
def train_mlp_classifier(dataset, model_file_path, num_epochs, callback_args=None):
25+
def train_mlp_classifier(dataset, model_file_path, epochs, callback_args=None):
2626
"""
2727
Train the np_semantic_segmentation mlp classifier
2828
Args:
2929
model_file_path (str): model path
30-
num_epochs (int): number of epochs
30+
epochs (int): number of epochs
3131
callback_args (dict): callback_arg
3232
dataset: NpSemanticSegData object containing the dataset
3333
3434
Returns:
3535
print error_rate, test_accuracy_rate and precision_recall_rate evaluation from the model
3636
3737
"""
38-
model = NpSemanticSegClassifier(num_epochs, callback_args)
38+
model = NpSemanticSegClassifier(epochs, callback_args)
3939
input_dim = dataset.train_set_x.shape[1]
4040
model.build(input_dim)
4141
# run fit

0 commit comments

Comments
 (0)