Skip to content
This repository was archived by the owner on Dec 6, 2023. It is now read-only.

Commit a327bbb

Browse files
authored
explicitly require Python 3.6 and drop six dependency (#170)
1 parent 8bf5b2c commit a327bbb

37 files changed

+221
-284
lines changed

.travis.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ install:
1717
# Useful for debugging any issues with conda
1818
- conda info -a
1919

20-
- conda create -q -n test-environment python=$PYTHON_VERSION numpy scipy pytest cython scikit-learn six joblib
20+
- conda create -q -n test-environment python=$PYTHON_VERSION numpy scipy pytest cython scikit-learn joblib
2121
- source activate test-environment
2222
- make all
2323

README.rst

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,9 @@ ranking in Python.
1717

1818
Highlights:
1919

20-
- follows the `scikit-learn <http://scikit-learn.org>`_ API conventions
20+
- follows the `scikit-learn <https://scikit-learn.org>`_ API conventions
2121
- supports natively both dense and sparse data representations
22-
- computationally demanding parts implemented in `Cython <http://cython.org>`_
22+
- computationally demanding parts implemented in `Cython <https://cython.org>`_
2323

2424
Solvers supported:
2525

@@ -66,8 +66,8 @@ penalty on the News20 dataset (c.f., `Blondel et al. 2013
6666
Dependencies
6767
------------
6868

69-
lightning requires Python >= 2.7, setuptools, Numpy >= 1.3, SciPy >= 0.7 and
70-
scikit-learn >= 0.15. Building from source also requires Cython and a working C/C++ compiler. To run the tests you will also need pytest.
69+
lightning requires Python >= 3.6, setuptools, Numpy >= 1.12, SciPy >= 0.19 and
70+
scikit-learn >= 0.19. Building from source also requires Cython and a working C/C++ compiler. To run the tests you will also need pytest.
7171

7272
Installation
7373
------------
@@ -93,7 +93,7 @@ Documentation
9393

9494
http://contrib.scikit-learn.org/lightning/
9595

96-
On Github
96+
On GitHub
9797
---------
9898

9999
https://github.com/scikit-learn-contrib/lightning

appveyor.yml

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
# AppVeyor.com is a Continuous Integration service to build and run tests under
22
# Windows
3-
# https://ci.appveyor.com/project/fabianp/lightning-bpc6r
43

54
image: Visual Studio 2019
65

@@ -52,7 +51,7 @@ install:
5251
- "python -c \"import struct; print(struct.calcsize('P') * 8)\""
5352
- "python -m pip --version"
5453

55-
- "python -m pip install --timeout=60 numpy scipy cython pytest scikit-learn wheel six joblib"
54+
- "python -m pip install --timeout=60 numpy scipy cython pytest scikit-learn wheel joblib"
5655
- "python setup.py bdist_wheel bdist_wininst"
5756

5857
- ps: "ls dist"

build_tools/move-conda-package.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
import shutil
66
from conda_build.config import config
77

8-
with open(os.path.join(sys.argv[1], 'meta.yaml')) as f:
8+
with open(os.path.join(sys.argv[1], 'meta.yaml'), encoding='utf-8') as f:
99
name = yaml.load(f)['package']['name']
1010

1111
binary_package_glob = os.path.join(

doc/sphinxext/gen_rst.py

Lines changed: 22 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
Files that generate images should start with 'plot'
88
99
"""
10-
from __future__ import division, print_function
1110
from time import time
1211
import ast
1312
import os
@@ -20,37 +19,19 @@
2019
import posixpath
2120
import subprocess
2221
import warnings
23-
import six
2422

23+
from io import StringIO
24+
import pickle
25+
import urllib.request
26+
import urllib.error
27+
import urllib.parse
28+
from urllib.error import HTTPError, URLError
2529

26-
# Try Python 2 first, otherwise load from Python 3
27-
try:
28-
from StringIO import StringIO
29-
import cPickle as pickle
30-
import urllib2 as urllib
31-
from urllib2 import HTTPError, URLError
32-
except ImportError:
33-
from io import StringIO
34-
import pickle
35-
import urllib.request
36-
import urllib.error
37-
import urllib.parse
38-
from urllib.error import HTTPError, URLError
39-
40-
41-
try:
42-
# Python 2 built-in
43-
execfile
44-
except NameError:
45-
def execfile(filename, global_vars=None, local_vars=None):
46-
with open(filename, encoding='utf-8') as f:
47-
code = compile(f.read(), filename, 'exec')
48-
exec(code, global_vars, local_vars)
4930

50-
try:
51-
basestring
52-
except NameError:
53-
basestring = str
31+
def execfile(filename, global_vars=None, local_vars=None):
32+
with open(filename, encoding='utf-8') as f:
33+
code = compile(f.read(), filename, 'exec')
34+
exec(code, global_vars, local_vars)
5435

5536
import token
5637
import tokenize
@@ -93,13 +74,8 @@ def flush(self):
9374
def _get_data(url):
9475
"""Helper function to get data over http or from a local file"""
9576
if url.startswith('http://'):
96-
# Try Python 2, use Python 3 on exception
97-
try:
98-
resp = urllib.urlopen(url)
99-
encoding = resp.headers.dict.get('content-encoding', 'plain')
100-
except AttributeError:
101-
resp = urllib.request.urlopen(url)
102-
encoding = resp.headers.get('content-encoding', 'plain')
77+
resp = urllib.request.urlopen(url)
78+
encoding = resp.headers.get('content-encoding', 'plain')
10379
data = resp.read()
10480
if encoding == 'plain':
10581
pass
@@ -427,10 +403,8 @@ def resolve(self, cobj, this_url):
427403
def extract_docstring(filename, ignore_heading=False):
428404
""" Extract a module-level docstring, if any
429405
"""
430-
if six.PY2:
431-
lines = open(filename).readlines()
432-
else:
433-
lines = open(filename, encoding='utf-8').readlines()
406+
with open(filename, encoding='utf-8') as f:
407+
lines = f.readlines()
434408
start_row = 0
435409
if lines[0].startswith('#!'):
436410
lines.pop(0)
@@ -526,10 +500,8 @@ def generate_example_rst(app):
526500
def extract_line_count(filename, target_dir):
527501
# Extract the line count of a file
528502
example_file = os.path.join(target_dir, filename)
529-
if six.PY2:
530-
lines = open(example_file).readlines()
531-
else:
532-
lines = open(example_file, encoding='utf-8').readlines()
503+
with open(example_file, encoding='utf-8') as f:
504+
lines = f.readlines()
533505
start_row = 0
534506
if lines and lines[0].startswith('#!'):
535507
lines.pop(0)
@@ -620,7 +592,7 @@ def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, se
620592
%s
621593
622594
623-
""" % open(os.path.join(src_dir, 'README.txt')).read())
595+
""" % open(os.path.join(src_dir, 'README.txt'), encoding='utf-8').read())
624596
if not os.path.exists(target_dir):
625597
os.makedirs(target_dir)
626598
sorted_listdir = line_count_sort(os.listdir(src_dir),
@@ -676,8 +648,8 @@ def make_thumbnail(in_fname, out_fname, width, height):
676648
import Image
677649
img = Image.open(in_fname)
678650
width_in, height_in = img.size
679-
scale_w = width / float(width_in)
680-
scale_h = height / float(height_in)
651+
scale_w = width / width_in
652+
scale_h = height / height_in
681653

682654
if height_in * scale_w <= height:
683655
scale = scale_w
@@ -727,7 +699,7 @@ class NameFinder(ast.NodeVisitor):
727699
"""
728700

729701
def __init__(self):
730-
super(NameFinder, self).__init__()
702+
super().__init__()
731703
self.imported_names = {}
732704
self.accessed_names = set()
733705

@@ -964,11 +936,8 @@ def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
964936
f.flush()
965937

966938
# save variables so we can later add links to the documentation
967-
if six.PY2:
968-
example_code_obj = identify_names(open(example_file).read())
969-
else:
970-
example_code_obj = \
971-
identify_names(open(example_file, encoding='utf-8').read())
939+
with open(example_file, encoding='utf-8') as f:
940+
example_code_obj = identify_names(f.read())
972941
if example_code_obj:
973942
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
974943
with open(codeobj_fname, 'wb') as fid:

doc/sphinxext/numpy_ext/docscrape.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,7 @@
77
import re
88
import pydoc
99
from warnings import warn
10-
# Try Python 2 first, otherwise load from Python 3
11-
try:
12-
from StringIO import StringIO
13-
except:
14-
from io import StringIO
10+
from io import StringIO
1511

1612

1713
class Reader(object):
@@ -466,7 +462,7 @@ def __str__(self):
466462
out += '.. %s:: %s\n \n\n' % (roles.get(self._role, ''),
467463
func_name)
468464

469-
out += super(FunctionDoc, self).__str__(func_role=self._role)
465+
out += super().__str__(func_role=self._role)
470466
return out
471467

472468

doc/sphinxext/numpy_ext/docscrape_sphinx.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ def _str_section(self, name):
117117
def _str_see_also(self, func_role):
118118
out = []
119119
if self['See Also']:
120-
see_also = super(SphinxDocString, self)._str_see_also(func_role)
120+
see_also = super()._str_see_also(func_role)
121121
out = ['.. seealso::', '']
122122
out += self._str_indent(see_also[2:])
123123
return out

doc/sphinxext/numpy_ext/numpydoc.py

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,6 @@
1717
1818
"""
1919

20-
from __future__ import unicode_literals
21-
22-
import sys # Only needed to check Python version
2320
import os
2421
import re
2522
import pydoc
@@ -41,10 +38,7 @@ def mangle_docstrings(app, what, name, obj, options, lines,
4138
lines[:] = title_re.sub('', "\n".join(lines)).split("\n")
4239
else:
4340
doc = get_doc_object(obj, what, "\n".join(lines), config=cfg)
44-
if sys.version_info[0] < 3:
45-
lines[:] = unicode(doc).splitlines()
46-
else:
47-
lines[:] = str(doc).splitlines()
41+
lines[:] = str(doc).splitlines()
4842

4943
if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
5044
obj.__name__:
@@ -104,12 +98,8 @@ def setup(app, get_doc_object_=get_doc_object):
10498
global get_doc_object
10599
get_doc_object = get_doc_object_
106100

107-
if sys.version_info[0] < 3:
108-
app.connect(b'autodoc-process-docstring', mangle_docstrings)
109-
app.connect(b'autodoc-process-signature', mangle_signature)
110-
else:
111-
app.connect('autodoc-process-docstring', mangle_docstrings)
112-
app.connect('autodoc-process-signature', mangle_signature)
101+
app.connect('autodoc-process-docstring', mangle_docstrings)
102+
app.connect('autodoc-process-signature', mangle_signature)
113103
app.add_config_value('numpydoc_edit_link', None, False)
114104
app.add_config_value('numpydoc_use_plots', None, False)
115105
app.add_config_value('numpydoc_show_class_members', True, True)
@@ -135,7 +125,7 @@ class ManglingDomainBase(object):
135125
directive_mangling_map = {}
136126

137127
def __init__(self, *a, **kw):
138-
super(ManglingDomainBase, self).__init__(*a, **kw)
128+
super().__init__(*a, **kw)
139129
self.wrap_mangling_directives()
140130

141131
def wrap_mangling_directives(self):

examples/plot_sparse_non_linear.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -33,20 +33,20 @@ class SparseNonlinearClassifier(CDClassifier):
3333

3434
def __init__(self, gamma=1e-2, C=1, alpha=1):
3535
self.gamma = gamma
36-
super(SparseNonlinearClassifier, self).__init__(C=C,
37-
alpha=alpha,
38-
loss="squared_hinge",
39-
penalty="l1")
36+
super().__init__(C=C,
37+
alpha=alpha,
38+
loss="squared_hinge",
39+
penalty="l1")
4040

4141
def fit(self, X, y):
4242
K = rbf_kernel(X, gamma=self.gamma)
4343
self.X_train_ = X
44-
super(SparseNonlinearClassifier, self).fit(K, y)
44+
super().fit(K, y)
4545
return self
4646

4747
def decision_function(self, X):
4848
K = rbf_kernel(X, self.X_train_, gamma=self.gamma)
49-
return super(SparseNonlinearClassifier, self).decision_function(K)
49+
return super().decision_function(K)
5050

5151

5252
def gen_non_lin_separable_data():

lightning/impl/adagrad.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
import numpy as np
55

66
from sklearn.utils import check_random_state
7-
from six.moves import xrange
87

98
from .base import BaseClassifier, BaseRegressor
109
from .dataset_fast import get_dataset
@@ -38,7 +37,7 @@ def _fit(self, X, Y):
3837
loss = self._get_loss()
3938
n_calls = n_samples if self.n_calls is None else self.n_calls
4039

41-
for i in xrange(n_vectors):
40+
for i in range(n_vectors):
4241
_adagrad_fit(self, ds, Y[:, i], self.coef_[i], self.g_sum_[i],
4342
self.g_norms_[i], loss, self.eta, delta, alpha1,
4443
alpha2, self.n_iter, self.shuffle, self.callback,

0 commit comments

Comments
 (0)