diff options
-rw-r--r-- | Pipfile | 1 | ||||
-rw-r--r-- | Pipfile.lock | 8 | ||||
-rw-r--r-- | docs/index.rst | 25 | ||||
-rw-r--r-- | mastodon/Mastodon.py | 23 | ||||
-rw-r--r-- | setup.py | 2 | ||||
-rw-r--r-- | tests/test_errors.py | 20 | ||||
-rw-r--r-- | tests/test_pagination.py | 20 |
7 files changed, 89 insertions, 10 deletions
@@ -13,3 +13,4 @@ pytest-cov = "*" | |||
13 | vcrpy = "*" | 13 | vcrpy = "*" |
14 | pytest-vcr = "<1" | 14 | pytest-vcr = "<1" |
15 | pytest-mock = "*" | 15 | pytest-mock = "*" |
16 | requests-mock = "*" | ||
diff --git a/Pipfile.lock b/Pipfile.lock index d1939fc..db1193b 100644 --- a/Pipfile.lock +++ b/Pipfile.lock | |||
@@ -313,6 +313,14 @@ | |||
313 | ], | 313 | ], |
314 | "version": "==2.20.1" | 314 | "version": "==2.20.1" |
315 | }, | 315 | }, |
316 | "requests-mock": { | ||
317 | "hashes": [ | ||
318 | "sha256:7a5fa99db5e3a2a961b6f20ed40ee6baeff73503cf0a553cc4d679409e6170fb", | ||
319 | "sha256:8ca0628dc66d3f212878932fd741b02aa197ad53fd2228164800a169a4a826af" | ||
320 | ], | ||
321 | "index": "pypi", | ||
322 | "version": "==1.5.2" | ||
323 | }, | ||
316 | "six": { | 324 | "six": { |
317 | "hashes": [ | 325 | "hashes": [ |
318 | "sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9", | 326 | "sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9", |
diff --git a/docs/index.rst b/docs/index.rst index 808e563..515e427 100644 --- a/docs/index.rst +++ b/docs/index.rst | |||
@@ -71,6 +71,26 @@ for applications that need to handle all rate limiting themselves (i.e. interact | |||
71 | or applications wanting to use Mastodon.py in a multi-threaded context ("wait" and "pace" | 71 | or applications wanting to use Mastodon.py in a multi-threaded context ("wait" and "pace" |
72 | modes are not thread safe). | 72 | modes are not thread safe). |
73 | 73 | ||
74 | .. note:: | ||
75 | Rate limit information is available on the `Mastodon` object for applications that | ||
76 | implement their own rate limit handling. | ||
77 | |||
78 | .. attribute:: Mastodon.ratelimit_remaining | ||
79 | |||
80 | Number of requests allowed until the next reset. | ||
81 | |||
82 | .. attribute:: Mastodon.ratelimit_reset | ||
83 | |||
84 | Time at which the rate limit will next be reset, as a POSIX timestamp. | ||
85 | |||
86 | .. attribute:: Mastodon.ratelimit_limit | ||
87 | |||
88 | Total number of requests allowed between resets. Typically 300. | ||
89 | |||
90 | .. attribute:: Mastodon.ratelimit_lastcall | ||
91 | |||
92 | Time at which these values have last been seen and updated, as a POSIX timestamp. | ||
93 | |||
74 | In "wait" mode, once a request hits the rate limit, Mastodon.py will wait until | 94 | In "wait" mode, once a request hits the rate limit, Mastodon.py will wait until |
75 | the rate limit resets and then try again, until the request succeeds or an error | 95 | the rate limit resets and then try again, until the request succeeds or an error |
76 | is encountered. This mode is for applications that would rather just not worry about rate limits | 96 | is encountered. This mode is for applications that would rather just not worry about rate limits |
@@ -91,9 +111,8 @@ minute time slot, and tighter limits on logins. Mastodon.py does not make any ef | |||
91 | to respect these. | 111 | to respect these. |
92 | 112 | ||
93 | If your application requires many hits to endpoints that are available without logging | 113 | If your application requires many hits to endpoints that are available without logging |
94 | in, do consider using Mastodon.py without authenticating to get the full per-IP limit. In | 114 | in, do consider using Mastodon.py without authenticating to get the full per-IP limit. |
95 | this case, you can set the Mastodon objects `ratelimit_limit` and `ratelimit_remaining` | 115 | |
96 | properties appropriately if you want to use advanced rate limit handling. | ||
97 | 116 | ||
98 | A note about pagination | 117 | A note about pagination |
99 | ----------------------- | 118 | ----------------------- |
diff --git a/mastodon/Mastodon.py b/mastodon/Mastodon.py index 27c98af..f55d4bb 100644 --- a/mastodon/Mastodon.py +++ b/mastodon/Mastodon.py | |||
@@ -2214,9 +2214,12 @@ class Mastodon: | |||
2214 | if not response_object.ok: | 2214 | if not response_object.ok: |
2215 | try: | 2215 | try: |
2216 | response = response_object.json(object_hook=self.__json_hooks) | 2216 | response = response_object.json(object_hook=self.__json_hooks) |
2217 | if not isinstance(response, dict) or 'error' not in response: | 2217 | if isinstance(response, dict) and 'error' in response: |
2218 | error_msg = response['error'] | ||
2219 | elif isinstance(response, str): | ||
2220 | error_msg = response | ||
2221 | else: | ||
2218 | error_msg = None | 2222 | error_msg = None |
2219 | error_msg = response['error'] | ||
2220 | except ValueError: | 2223 | except ValueError: |
2221 | error_msg = None | 2224 | error_msg = None |
2222 | 2225 | ||
@@ -2274,13 +2277,17 @@ class Mastodon: | |||
2274 | if url['rel'] == 'next': | 2277 | if url['rel'] == 'next': |
2275 | # Be paranoid and extract max_id specifically | 2278 | # Be paranoid and extract max_id specifically |
2276 | next_url = url['url'] | 2279 | next_url = url['url'] |
2277 | matchgroups = re.search(r"max_id=([0-9]*)", next_url) | 2280 | matchgroups = re.search(r"[?&]max_id=([^&]+)", next_url) |
2278 | 2281 | ||
2279 | if matchgroups: | 2282 | if matchgroups: |
2280 | next_params = copy.deepcopy(params) | 2283 | next_params = copy.deepcopy(params) |
2281 | next_params['_pagination_method'] = method | 2284 | next_params['_pagination_method'] = method |
2282 | next_params['_pagination_endpoint'] = endpoint | 2285 | next_params['_pagination_endpoint'] = endpoint |
2283 | next_params['max_id'] = int(matchgroups.group(1)) | 2286 | max_id = matchgroups.group(1) |
2287 | if max_id.isdigit(): | ||
2288 | next_params['max_id'] = int(max_id) | ||
2289 | else: | ||
2290 | next_params['max_id'] = max_id | ||
2284 | if "since_id" in next_params: | 2291 | if "since_id" in next_params: |
2285 | del next_params['since_id'] | 2292 | del next_params['since_id'] |
2286 | response[-1]._pagination_next = next_params | 2293 | response[-1]._pagination_next = next_params |
@@ -2288,13 +2295,17 @@ class Mastodon: | |||
2288 | if url['rel'] == 'prev': | 2295 | if url['rel'] == 'prev': |
2289 | # Be paranoid and extract since_id specifically | 2296 | # Be paranoid and extract since_id specifically |
2290 | prev_url = url['url'] | 2297 | prev_url = url['url'] |
2291 | matchgroups = re.search(r"since_id=([0-9]*)", prev_url) | 2298 | matchgroups = re.search(r"[?&]since_id=([^&]+)", prev_url) |
2292 | 2299 | ||
2293 | if matchgroups: | 2300 | if matchgroups: |
2294 | prev_params = copy.deepcopy(params) | 2301 | prev_params = copy.deepcopy(params) |
2295 | prev_params['_pagination_method'] = method | 2302 | prev_params['_pagination_method'] = method |
2296 | prev_params['_pagination_endpoint'] = endpoint | 2303 | prev_params['_pagination_endpoint'] = endpoint |
2297 | prev_params['since_id'] = int(matchgroups.group(1)) | 2304 | since_id = matchgroups.group(1) |
2305 | if since_id.isdigit(): | ||
2306 | prev_params['since_id'] = int(since_id) | ||
2307 | else: | ||
2308 | prev_params['since_id'] = since_id | ||
2298 | if "max_id" in prev_params: | 2309 | if "max_id" in prev_params: |
2299 | del prev_params['max_id'] | 2310 | del prev_params['max_id'] |
2300 | response[0]._pagination_prev = prev_params | 2311 | response[0]._pagination_prev = prev_params |
@@ -1,6 +1,6 @@ | |||
1 | from setuptools import setup | 1 | from setuptools import setup |
2 | 2 | ||
3 | test_deps = ['pytest', 'pytest-runner', 'pytest-cov', 'vcrpy', 'pytest-vcr', 'pytest-mock'] | 3 | test_deps = ['pytest', 'pytest-runner', 'pytest-cov', 'vcrpy', 'pytest-vcr', 'pytest-mock', 'requests-mock'] |
4 | extras = { | 4 | extras = { |
5 | "test": test_deps | 5 | "test": test_deps |
6 | } | 6 | } |
diff --git a/tests/test_errors.py b/tests/test_errors.py new file mode 100644 index 0000000..7329507 --- /dev/null +++ b/tests/test_errors.py | |||
@@ -0,0 +1,20 @@ | |||
1 | import pytest | ||
2 | from mastodon.Mastodon import MastodonAPIError | ||
3 | |||
4 | try: | ||
5 | from mock import MagicMock | ||
6 | except ImportError: | ||
7 | from unittest.mock import MagicMock | ||
8 | |||
9 | def test_nonstandard_errors(api): | ||
10 | response = MagicMock() | ||
11 | response.json = MagicMock(return_value= | ||
12 | "I am a non-standard instance and this error is a plain string.") | ||
13 | response.ok = False | ||
14 | session = MagicMock() | ||
15 | session.request = MagicMock(return_value=response) | ||
16 | |||
17 | api.session = session | ||
18 | with pytest.raises(MastodonAPIError): | ||
19 | api.instance() | ||
20 | |||
diff --git a/tests/test_pagination.py b/tests/test_pagination.py index 599b2f4..d2c0bd5 100644 --- a/tests/test_pagination.py +++ b/tests/test_pagination.py | |||
@@ -1,5 +1,10 @@ | |||
1 | import pytest | 1 | import pytest |
2 | from contextlib import contextmanager | 2 | from contextlib import contextmanager |
3 | try: | ||
4 | from mock import MagicMock | ||
5 | except ImportError: | ||
6 | from unittest.mock import MagicMock | ||
7 | import requests_mock | ||
3 | 8 | ||
4 | UNLIKELY_HASHTAG = "fgiztsshwiaqqiztpmmjbtvmescsculuvmgjgopwoeidbcrixp" | 9 | UNLIKELY_HASHTAG = "fgiztsshwiaqqiztpmmjbtvmescsculuvmgjgopwoeidbcrixp" |
5 | 10 | ||
@@ -44,3 +49,18 @@ def test_fetch_remaining(api): | |||
44 | hashtag_remaining = api.fetch_remaining(hashtag) | 49 | hashtag_remaining = api.fetch_remaining(hashtag) |
45 | assert hashtag_remaining | 50 | assert hashtag_remaining |
46 | assert len(hashtag_remaining) >= 30 | 51 | assert len(hashtag_remaining) >= 30 |
52 | |||
53 | def test_link_headers(api): | ||
54 | rmock = requests_mock.Adapter() | ||
55 | api.session.mount(api.api_base_url, rmock) | ||
56 | |||
57 | _id='abc1234' | ||
58 | |||
59 | rmock.register_uri('GET', requests_mock.ANY, json=[{"foo": "bar"}], headers={"link":""" | ||
60 | <{base}/api/v1/timelines/tag/{tag}?max_id={_id}>; rel="next", <{base}/api/v1/timelines/tag/{tag}?since_id={_id}>; rel="prev" | ||
61 | """.format(base=api.api_base_url, tag=UNLIKELY_HASHTAG, _id=_id).strip() | ||
62 | }) | ||
63 | |||
64 | resp = api.timeline_hashtag(UNLIKELY_HASHTAG) | ||
65 | assert resp[0]._pagination_next['max_id'] == _id | ||
66 | assert resp[0]._pagination_prev['since_id'] == _id | ||