Skip to content

Commit 788135e

Browse files
authored
Merge branch 'Linusp:master' into master
2 parents 24537bf + 0bff781 commit 788135e

File tree

13 files changed

+336
-226
lines changed

13 files changed

+336
-226
lines changed

.github/workflows/pythonpackage.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ jobs:
99
strategy:
1010
max-parallel: 4
1111
matrix:
12-
python-version: [2.7, 3.5, 3.6, 3.7]
12+
python-version: [3.x]
1313

1414
steps:
1515
- uses: actions/checkout@v1

CHANGELOG.md

Lines changed: 61 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,70 @@
11
# CHANGELOG
22

3+
## v0.4.6
4+
5+
Added
6+
7+
- New methods:
8+
9+
- `InoreaderClient.remove_general_label`
10+
- `InoreaderClient.remove_tag`
11+
- `InoreaderClient.remove_read`
12+
- `InoreaderClient.remove_starred`
13+
- `InoreaderClient.remove_liked`
14+
15+
thanks to [tianchen zhong](https://github.com/cczhong11)
16+
17+
Changed
18+
19+
- Add param to `inoreader.main.get_client` for customizing the config file path, thanks to [tianchen zhong](https://github.com/cczhong11)
20+
- Command filter supported a new action `unstar`
21+
22+
Fixed
23+
24+
- Fix token in refresh_access_token method, thanks to [Torikova](https://github.com/Torikova)
25+
326
## v0.4.5
427

528
Changed
629

7-
- fix an error in `client.py`
30+
- Fix `InoreaderClient.__get_stream_contents`, thanks to [BeautyYuYanli](https://github.com/BeautyYuYanli)
31+
32+
## v0.4.4
33+
34+
Changed
35+
36+
- Disable default app id and key due to abusion
37+
38+
## v0.4.3
39+
40+
Fixed
41+
42+
- Fix endless loop bug in `InoreaderClient.fetch_articles`
43+
44+
## v0.4.2
45+
46+
Added
47+
48+
- New functions:
49+
50+
- `inoreader.utils.download_image`
51+
52+
- New methods:
53+
54+
- `InoreaderClient.fetch_articles`
55+
- `InoreaderClient.fetch_starred`
56+
57+
- New command: `fetch-starred`
58+
59+
Changed
60+
61+
- Optimized article content parsing
62+
63+
## v0.4.1
64+
65+
Added
66+
67+
- New config `proxies`
868

969
## v0.4.0
1070

inoreader/__init__.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
# coding: utf-8
22
from .client import InoreaderClient
33

4-
54
__all__ = ['InoreaderClient']

inoreader/article.py

Lines changed: 21 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,23 @@
11
# coding: utf-8
22
from __future__ import print_function, unicode_literals
33

4-
from .utils import normalize_whitespace, extract_text
4+
from .utils import extract_text, normalize_whitespace
55

66

77
class Article(object):
8-
def __init__(self, id, title, categories, link,
9-
published=None, content=None, author=None,
10-
feed_id=None, feed_title=None, feed_link=None):
8+
def __init__(
9+
self,
10+
id,
11+
title,
12+
categories,
13+
link,
14+
published=None,
15+
content=None,
16+
author=None,
17+
feed_id=None,
18+
feed_title=None,
19+
feed_link=None,
20+
):
1121
self.id = id
1222
self.title = normalize_whitespace(title)
1323
self.categories = categories
@@ -34,10 +44,12 @@ def from_json(cls, data):
3444
article_data['link'] = links[0] if links else ''
3545

3646
# feed info
37-
article_data.update({
38-
'feed_id': data['origin']['streamId'],
39-
'feed_title': normalize_whitespace(data['origin']['title']),
40-
'feed_link': data['origin']['htmlUrl'],
41-
})
47+
article_data.update(
48+
{
49+
'feed_id': data['origin']['streamId'],
50+
'feed_title': normalize_whitespace(data['origin']['title']),
51+
'feed_link': data['origin']['htmlUrl'],
52+
}
53+
)
4254

4355
return cls(**article_data)

inoreader/client.py

Lines changed: 42 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -2,23 +2,24 @@
22
from __future__ import print_function, unicode_literals
33

44
import logging
5-
from uuid import uuid4
65
from datetime import datetime
76
from operator import itemgetter
8-
try: # python2
9-
from urlparse import urljoin
7+
from uuid import uuid4
8+
9+
try: # python2
1010
from urllib import quote_plus
11-
except ImportError: # python3
11+
12+
from urlparse import urljoin
13+
except ImportError: # python3
1214
from urllib.parse import urljoin, quote_plus
1315

1416
import requests
1517

16-
from .consts import BASE_URL
17-
from .exception import NotLoginError, APIError
1818
from .article import Article
19+
from .consts import BASE_URL
20+
from .exception import APIError, NotLoginError
1921
from .subscription import Subscription
2022

21-
2223
LOGGER = logging.getLogger(__name__)
2324

2425

@@ -39,19 +40,22 @@ class InoreaderClient(object):
3940
LIKED_TAG = 'user/-/state/com.google/like'
4041
BROADCAST_TAG = 'user/-/state/com.google/broadcast'
4142

42-
def __init__(self, app_id, app_key, access_token, refresh_token,
43-
expires_at, config_manager=None):
43+
def __init__(
44+
self, app_id, app_key, access_token, refresh_token, expires_at, config_manager=None
45+
):
4446
self.app_id = app_id
4547
self.app_key = app_key
4648
self.access_token = access_token
4749
self.refresh_token = refresh_token
4850
self.expires_at = float(expires_at)
4951
self.session = requests.Session()
50-
self.session.headers.update({
51-
'AppId': self.app_id,
52-
'AppKey': self.app_key,
53-
'Authorization': 'Bearer {}'.format(self.access_token)
54-
})
52+
self.session.headers.update(
53+
{
54+
'AppId': self.app_id,
55+
'AppKey': self.app_key,
56+
'Authorization': 'Bearer {}'.format(self.access_token),
57+
}
58+
)
5559
self.config_manager = config_manager
5660
self.proxies = self.config_manager.proxies if config_manager else None
5761

@@ -81,7 +85,7 @@ def refresh_access_token(self):
8185
self.access_token = response['access_token']
8286
self.refresh_token = response['refresh_token']
8387
self.expires_at = datetime.now().timestamp() + response['expires_in']
84-
self.session.headers['Authorization'] = 'Bear {}'.format(self.access_token)
88+
self.session.headers['Authorization'] = 'Bearer {}'.format(self.access_token)
8589

8690
if self.config_manager:
8791
self.config_manager.access_token = self.access_token
@@ -151,12 +155,7 @@ def __get_stream_contents(self, stream_id, continuation=''):
151155
self.check_token()
152156

153157
url = urljoin(BASE_URL, self.STREAM_CONTENTS_PATH + quote_plus(stream_id))
154-
params = {
155-
'n': 50, # default 20, max 1000
156-
'r': '',
157-
'c': continuation,
158-
'output': 'json'
159-
}
158+
params = {'n': 50, 'r': '', 'c': continuation, 'output': 'json'} # default 20, max 1000
160159
response = self.parse_response(self.session.post(url, params=params, proxies=self.proxies))
161160
if 'continuation' in response:
162161
return response['items'], response['continuation']
@@ -168,10 +167,7 @@ def fetch_articles(self, folder=None, tags=None, unread=True, starred=False, lim
168167

169168
url = urljoin(BASE_URL, self.STREAM_CONTENTS_PATH)
170169
if folder:
171-
url = urljoin(
172-
url,
173-
quote_plus(self.GENERAL_TAG_TEMPLATE.format(folder))
174-
)
170+
url = urljoin(url, quote_plus(self.GENERAL_TAG_TEMPLATE.format(folder)))
175171

176172
params = {'c': str(uuid4())}
177173
if unread:
@@ -183,10 +179,13 @@ def fetch_articles(self, folder=None, tags=None, unread=True, starred=False, lim
183179
fetched_count = 0
184180
response = self.parse_response(self.session.post(url, params=params, proxies=self.proxies))
185181
for data in response['items']:
186-
categories = set([
187-
category.split('/')[-1] for category in data.get('categories', [])
188-
if category.find('label') > 0
189-
])
182+
categories = set(
183+
[
184+
category.split('/')[-1]
185+
for category in data.get('categories', [])
186+
if category.find('label') > 0
187+
]
188+
)
190189
if tags and not categories.issuperset(set(tags)):
191190
continue
192191

@@ -202,10 +201,13 @@ def fetch_articles(self, folder=None, tags=None, unread=True, starred=False, lim
202201
self.session.post(url, params=params, proxies=self.proxies)
203202
)
204203
for data in response['items']:
205-
categories = set([
206-
category.split('/')[-1] for category in data.get('categories', [])
207-
if category.find('label') > 0
208-
])
204+
categories = set(
205+
[
206+
category.split('/')[-1]
207+
for category in data.get('categories', [])
208+
if category.find('label') > 0
209+
]
210+
)
209211
if tags and not categories.issuperset(set(tags)):
210212
continue
211213
yield Article.from_json(data)
@@ -229,30 +231,22 @@ def add_general_label(self, articles, label):
229231
url = urljoin(BASE_URL, self.EDIT_TAG_PATH)
230232
for start in range(0, len(articles), 10):
231233
end = min(start + 10, len(articles))
232-
params = {
233-
'a': label,
234-
'i': [articles[idx].id for idx in range(start, end)]
235-
}
234+
params = {'a': label, 'i': [articles[idx].id for idx in range(start, end)]}
236235
self.parse_response(
237-
self.session.post(url, params=params, proxies=self.proxies),
238-
json_data=False
236+
self.session.post(url, params=params, proxies=self.proxies), json_data=False
239237
)
240-
238+
241239
def remove_general_label(self, articles, label):
242240
self.check_token()
243241

244242
url = urljoin(BASE_URL, self.EDIT_TAG_PATH)
245243
for start in range(0, len(articles), 10):
246244
end = min(start + 10, len(articles))
247-
params = {
248-
'r': label,
249-
'i': [articles[idx].id for idx in range(start, end)]
250-
}
245+
params = {'r': label, 'i': [articles[idx].id for idx in range(start, end)]}
251246
self.parse_response(
252-
self.session.post(url, params=params, proxies=self.proxies),
253-
json_data=False
247+
self.session.post(url, params=params, proxies=self.proxies), json_data=False
254248
)
255-
249+
256250
def add_tag(self, articles, tag):
257251
self.add_general_label(articles, self.GENERAL_TAG_TEMPLATE.format(tag))
258252

@@ -276,6 +270,6 @@ def remove_starred(self, articles):
276270

277271
def remove_liked(self, articles):
278272
self.remove_general_label(articles, self.LIKED_TAG)
279-
273+
280274
def broadcast(self, articles):
281275
self.add_general_label(articles, self.BROADCAST_TAG)

inoreader/config.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,12 @@
11
# coding: utf-8
22
from __future__ import print_function, unicode_literals
33

4+
import codecs
45
import os
56
from configparser import ConfigParser
67

7-
import codecs
8-
9-
10-
class InoreaderConfigManager():
118

9+
class InoreaderConfigManager:
1210
def __init__(self, config_file):
1311
self.config_file = config_file
1412
self.data = {}

inoreader/filter.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
import re
22

3-
43
_FILTERS = {}
54

65

@@ -17,7 +16,6 @@ def wrap(cls):
1716

1817
@register_filter('include_any')
1918
class IncludeAnyFilter(object):
20-
2119
def __init__(self, rules):
2220
self.rules = [re.compile(regexp, re.IGNORECASE) for regexp in rules]
2321

@@ -31,7 +29,6 @@ def validate(self, text):
3129

3230
@register_filter('include_all')
3331
class IncludeAllFilter(object):
34-
3532
def __init__(self, rules):
3633
self.rules = [re.compile(regexp, re.IGNORECASE) for regexp in rules]
3734

@@ -45,7 +42,6 @@ def validate(self, text):
4542

4643
@register_filter('exclude')
4744
class ExcludeFilter(object):
48-
4945
def __init__(self, rules):
5046
self.rules = [re.compile(regexp, re.IGNORECASE) for regexp in rules]
5147

0 commit comments

Comments
 (0)