diff options
author | Lorenz Diener <[email protected]> | 2017-06-16 01:23:19 +0200 |
---|---|---|
committer | Lorenz Diener <[email protected]> | 2017-06-16 01:23:19 +0200 |
commit | d4b37995fea40320c1971ea8bd747fc9ece9c368 (patch) | |
tree | 0ab58a45134b99e93c9302f6bd13a39a1641f5d3 /mastodon | |
parent | 8e202fbdc069bcb8171b69b73c8ecac3dd043707 (diff) | |
download | mastodon.py-d4b37995fea40320c1971ea8bd747fc9ece9c368.tar.gz |
Pagination
Diffstat (limited to 'mastodon')
-rw-r--r-- | mastodon/Mastodon.py | 104 |
1 files changed, 102 insertions, 2 deletions
diff --git a/mastodon/Mastodon.py b/mastodon/Mastodon.py index be6ea3f..26294f7 100644 --- a/mastodon/Mastodon.py +++ b/mastodon/Mastodon.py | |||
@@ -14,10 +14,12 @@ import requests | |||
14 | from requests.models import urlencode | 14 | from requests.models import urlencode |
15 | import dateutil | 15 | import dateutil |
16 | import dateutil.parser | 16 | import dateutil.parser |
17 | import re | ||
18 | import copy | ||
17 | 19 | ||
18 | class Mastodon: | 20 | class Mastodon: |
19 | """ | 21 | """ |
20 | Super basic but thorough and easy to use mastodon.social | 22 | Super basic but thorough and easy to use Mastodon |
21 | api wrapper in python. | 23 | api wrapper in python. |
22 | 24 | ||
23 | If anything is unclear, check the official API docs at | 25 | If anything is unclear, check the official API docs at |
@@ -744,6 +746,76 @@ class Mastodon: | |||
744 | return self.__api_request('DELETE', '/api/v1/domain_blocks', params) | 746 | return self.__api_request('DELETE', '/api/v1/domain_blocks', params) |
745 | 747 | ||
746 | ### | 748 | ### |
749 | # Pagination | ||
750 | ### | ||
751 | def fetch_next(self, previous_page): | ||
752 | """ | ||
753 | Fetches the next page of results of a paginated request. Pass in the | ||
754 | previous page in its entirety, or the pagination information dict | ||
755 | returned as a part of that pages last status ('_pagination_next'). | ||
756 | |||
757 | Returns the next page or None if no further data is available. | ||
758 | """ | ||
759 | if isinstance(previous_page, list): | ||
760 | if '_pagination_next' in previous_page[-1]: | ||
761 | params = previous_page[-1]['_pagination_next'] | ||
762 | else: | ||
763 | return None | ||
764 | else: | ||
765 | params = previous_page | ||
766 | |||
767 | method = params['_pagination_method'] | ||
768 | del params['_pagination_method'] | ||
769 | |||
770 | endpoint = params['_pagination_endpoint'] | ||
771 | del params['_pagination_endpoint'] | ||
772 | |||
773 | return self.__api_request(method, endpoint, params) | ||
774 | |||
775 | def fetch_previous(self, next_page): | ||
776 | """ | ||
777 | Fetches the previous page of results of a paginated request. Pass in the | ||
778 | previous page in its entirety, or the pagination information dict | ||
779 | returned as a part of that pages first status ('_pagination_prev'). | ||
780 | |||
781 | Returns the previous page or None if no further data is available. | ||
782 | """ | ||
783 | if isinstance(next_page, list): | ||
784 | if '_pagination_prev' in next_page[-1]: | ||
785 | params = next_page[-1]['_pagination_prev'] | ||
786 | else: | ||
787 | return None | ||
788 | else: | ||
789 | params = next_page | ||
790 | |||
791 | method = params['_pagination_method'] | ||
792 | del params['_pagination_method'] | ||
793 | |||
794 | endpoint = params['_pagination_endpoint'] | ||
795 | del params['_pagination_endpoint'] | ||
796 | |||
797 | return self.__api_request(method, endpoint, params) | ||
798 | |||
799 | def fetch_remaining(self, first_page): | ||
800 | """ | ||
801 | Fetches all the remaining pages of a paginated request starting from a | ||
802 | first page and returns the entire set of results (including the first page | ||
803 | that was passed in) as a big list. | ||
804 | |||
805 | Be careful, as this might generate a lot of requests, depending on what you are | ||
806 | fetching, and might cause you to run into rate limits very quickly. | ||
807 | """ | ||
808 | first_page = copy.deepcopy(first_page) | ||
809 | |||
810 | all_pages = [] | ||
811 | current_page = first_page | ||
812 | while current_page != None: | ||
813 | all_pages.extend(current_page) | ||
814 | current_page = self.fetch_next(current_page) | ||
815 | |||
816 | return all_pages | ||
817 | |||
818 | ### | ||
747 | # Streaming | 819 | # Streaming |
748 | ### | 820 | ### |
749 | def user_stream(self, listener): | 821 | def user_stream(self, listener): |
@@ -786,7 +858,7 @@ class Mastodon: | |||
786 | incoming events. | 858 | incoming events. |
787 | """ | 859 | """ |
788 | return self.__stream('/api/v1/streaming/hashtag', listener, params={'tag': tag}) | 860 | return self.__stream('/api/v1/streaming/hashtag', listener, params={'tag': tag}) |
789 | 861 | ||
790 | ### | 862 | ### |
791 | # Internal helpers, dragons probably | 863 | # Internal helpers, dragons probably |
792 | ### | 864 | ### |
@@ -884,6 +956,34 @@ class Mastodon: | |||
884 | except: | 956 | except: |
885 | raise MastodonAPIError("Could not parse response as JSON, response code was %s, bad json content was '%s'" % (response_object.status_code, response_object.content)) | 957 | raise MastodonAPIError("Could not parse response as JSON, response code was %s, bad json content was '%s'" % (response_object.status_code, response_object.content)) |
886 | 958 | ||
959 | # Parse link headers | ||
960 | if isinstance(response, list) and 'Link' in response_object.headers: | ||
961 | tmp_urls = requests.utils.parse_header_links(response_object.headers['Link'].rstrip('>').replace('>,<', ',<')) | ||
962 | for url in tmp_urls: | ||
963 | if url['rel'] == 'next': | ||
964 | # Be paranoid and extract max_id specifically | ||
965 | next_url = url['url'] | ||
966 | matchgroups = re.search(r"max_id=([0-9]*)", next_url) | ||
967 | |||
968 | if matchgroups: | ||
969 | next_params = copy.deepcopy(params) | ||
970 | next_params['_pagination_method'] = method | ||
971 | next_params['_pagination_endpoint'] = endpoint | ||
972 | next_params['max_id'] = int(matchgroups.group(1)) | ||
973 | response[-1]['_pagination_next'] = next_params | ||
974 | |||
975 | if url['rel'] == 'prev': | ||
976 | # Be paranoid and extract since_id specifically | ||
977 | prev_url = url['url'] | ||
978 | matchgroups = re.search(r"since_id=([0-9]*)", prev_url) | ||
979 | |||
980 | if matchgroups: | ||
981 | prev_params = copy.deepcopy(params) | ||
982 | prev_params['_pagination_method'] = method | ||
983 | prev_params['_pagination_endpoint'] = endpoint | ||
984 | prev_params['max_id'] = int(matchgroups.group(1)) | ||
985 | response[0]['_pagination_prev'] = prev_params | ||
986 | |||
887 | # Handle rate limiting | 987 | # Handle rate limiting |
888 | if 'X-RateLimit-Remaining' in response_object.headers and do_ratelimiting: | 988 | if 'X-RateLimit-Remaining' in response_object.headers and do_ratelimiting: |
889 | self.ratelimit_remaining = int(response_object.headers['X-RateLimit-Remaining']) | 989 | self.ratelimit_remaining = int(response_object.headers['X-RateLimit-Remaining']) |