@@ -27,17 +27,23 @@ def request(self, *args, **kwargs):
2727
2828
2929class ScrapinghubClient (object ):
30- """Main class to work with Scrapinghub API.
30+ """Main class to work with the Scrapy Cloud API.
3131
32- :param auth: (optional) Scrapinghub APIKEY or other SH auth credentials.
33- If not provided, it will read, respectively, from
32+ :param auth: (optional) Scrapy Cloud API key or other Scrapy Cloud auth
33+ credentials. If not provided, it will read, respectively, from
3434 ``SH_APIKEY`` or ``SHUB_JOBAUTH`` environment variables.
3535 ``SHUB_JOBAUTH`` is available by default in *Scrapy Cloud*, but it does
3636 not provide access to all endpoints (e.g. job scheduling), but it is allowed
3737 to access job data, collections, crawl frontier.
3838 If you need full access to *Scrapy Cloud* features, you'll need to
39- provide a Scrapinghub APIKEY through this argument or deploying ``SH_APIKEY``.
40- :param dash_endpoint: (optional) Scrapinghub Dash panel url.
39+ provide a Scrapy Cloud API key through this argument or deploying
40+ ``SH_APIKEY``.
41+ :param dash_endpoint: (optional) Scrapy Cloud API URL.
42+ If not provided, it will be read from the ``SHUB_APIURL`` environment
43+ variable, or fall back to ``"https://app.zyte.com/api/"``.
44+ :param endpoint: (optional) Scrapy Cloud storage API URL.
45+ If not provided, it will be read from the ``SHUB_STORAGE`` environment
46+ variable, or fall back to ``"https://storage.scrapinghub.com/"``.
4147 :param \*\*kwargs: (optional) Additional arguments for
4248 :class:`~scrapinghub.hubstorage.HubstorageClient` constructor.
4349
0 commit comments