diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml
index 4d08483e..1f1c959f 100644
--- a/.github/workflows/create-releases.yml
+++ b/.github/workflows/create-releases.yml
@@ -1,5 +1,7 @@
name: Create releases
on:
+ schedule:
+ - cron: '0 5 * * *' # every day at 5am UTC
push:
branches:
- main
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 34dc535b..6d78745c 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.8.2"
+ ".": "0.9.0"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index a78b7ab0..c125dfb2 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1 +1 @@
-configured_endpoints: 19
+configured_endpoints: 23
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c9038084..e5825e3a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,51 @@
# Changelog
+## 0.9.0 (2023-12-17)
+
+Full Changelog: [v0.8.2...v0.9.0](https://github.com/Finch-API/finch-api-python/compare/v0.8.2...v0.9.0)
+
+### Features
+
+* **api:** add `/jobs` endpoints ([#210](https://github.com/Finch-API/finch-api-python/issues/210)) ([cd3cc6f](https://github.com/Finch-API/finch-api-python/commit/cd3cc6febbb46537d6694303c52fbfa266e762cd))
+* **api:** add `client_type` and `connection_type` to introspection ([#211](https://github.com/Finch-API/finch-api-python/issues/211)) ([d354f0a](https://github.com/Finch-API/finch-api-python/commit/d354f0a501deeea8090c0e2a6200302fe40a7638))
+* **api:** add `lp` tax payer type enum value ([#218](https://github.com/Finch-API/finch-api-python/issues/218)) ([d0f1493](https://github.com/Finch-API/finch-api-python/commit/d0f1493c4bbbabe9fa9b7c5c3ef55588820ba87b))
+* **pagination:** remove unused types ([#215](https://github.com/Finch-API/finch-api-python/issues/215)) ([9adf490](https://github.com/Finch-API/finch-api-python/commit/9adf490d6b23ddb642eb630be867690a8f8a8693))
+
+
+### Bug Fixes
+
+* avoid leaking memory when Client.with_options is used ([#220](https://github.com/Finch-API/finch-api-python/issues/220)) ([c634771](https://github.com/Finch-API/finch-api-python/commit/c6347717feebc43eb69d191aeee3369aa010e0a0))
+* **client:** correct base_url setter implementation ([#207](https://github.com/Finch-API/finch-api-python/issues/207)) ([5e04fa7](https://github.com/Finch-API/finch-api-python/commit/5e04fa773d4603d0be73aa67268b17fdc4f6fed2))
+* **client:** ensure retried requests are closed ([#204](https://github.com/Finch-API/finch-api-python/issues/204)) ([0659932](https://github.com/Finch-API/finch-api-python/commit/0659932aaaa69850982e6acbe076a4af7980efcd))
+* **errors:** properly assign APIError.body ([#219](https://github.com/Finch-API/finch-api-python/issues/219)) ([ee8fb39](https://github.com/Finch-API/finch-api-python/commit/ee8fb3977610c5a7578353042553665a07943b42))
+
+
+### Chores
+
+* **ci:** run release workflow once per day ([#226](https://github.com/Finch-API/finch-api-python/issues/226)) ([7147466](https://github.com/Finch-API/finch-api-python/commit/7147466ca5ce520e88fb392bd2d84be99a03e2b7))
+* **internal:** add tests for proxy change ([#203](https://github.com/Finch-API/finch-api-python/issues/203)) ([b5b9f79](https://github.com/Finch-API/finch-api-python/commit/b5b9f79c19e846e2e8fc3fd1faf905a65a6aa7c4))
+* **internal:** enable more lint rules ([#217](https://github.com/Finch-API/finch-api-python/issues/217)) ([61018d4](https://github.com/Finch-API/finch-api-python/commit/61018d4794ec858953dc0a3746054dbc04807ff9))
+* **internal:** reformat imports ([#213](https://github.com/Finch-API/finch-api-python/issues/213)) ([81a6e2f](https://github.com/Finch-API/finch-api-python/commit/81a6e2f22a5c5fe862c693a0b5b2affda78d88af))
+* **internal:** reformat imports ([#216](https://github.com/Finch-API/finch-api-python/issues/216)) ([14c8df0](https://github.com/Finch-API/finch-api-python/commit/14c8df00568bba59acab45ff6cdef2cb04599b43))
+* **internal:** replace string concatenation with f-strings ([#206](https://github.com/Finch-API/finch-api-python/issues/206)) ([7c920a3](https://github.com/Finch-API/finch-api-python/commit/7c920a33c103fadee08cd784f5d028f2ab19411f))
+* **internal:** update formatting ([#214](https://github.com/Finch-API/finch-api-python/issues/214)) ([cd5253c](https://github.com/Finch-API/finch-api-python/commit/cd5253c9a8417eec88dc8c30fdfa817ec1519a9e))
+* **internal:** update lock file ([#201](https://github.com/Finch-API/finch-api-python/issues/201)) ([42de23d](https://github.com/Finch-API/finch-api-python/commit/42de23d5c7a3b9253396b94af7884e7a9300b841))
+* **internal:** updates to proxy helper ([#202](https://github.com/Finch-API/finch-api-python/issues/202)) ([2049c50](https://github.com/Finch-API/finch-api-python/commit/2049c50e723ec6ec4b4d46a18fa87800a58d581a))
+* **package:** lift anyio v4 restriction ([#208](https://github.com/Finch-API/finch-api-python/issues/208)) ([e1ed4a5](https://github.com/Finch-API/finch-api-python/commit/e1ed4a53591362f5e2579e301b43529b64f2fc8b))
+
+
+### Documentation
+
+* improve README timeout comment ([#221](https://github.com/Finch-API/finch-api-python/issues/221)) ([1c946dd](https://github.com/Finch-API/finch-api-python/commit/1c946dd97fc43bfdda3346311282086af8cac6cb))
+* **readme:** update example snippets ([#205](https://github.com/Finch-API/finch-api-python/issues/205)) ([4ff1a6b](https://github.com/Finch-API/finch-api-python/commit/4ff1a6b04ba8e3301ac6ff4a612bb8aab801abd0))
+
+
+### Refactors
+
+* **client:** simplify cleanup ([#222](https://github.com/Finch-API/finch-api-python/issues/222)) ([7f116d2](https://github.com/Finch-API/finch-api-python/commit/7f116d2665c22e08988fb01cbbb692c8c0b79fea))
+* remove unused model types used in params ([#224](https://github.com/Finch-API/finch-api-python/issues/224)) ([f0b3fb7](https://github.com/Finch-API/finch-api-python/commit/f0b3fb7fceb4cdfe42abc5e49e04de30633cc55c))
+* simplify internal error handling ([#223](https://github.com/Finch-API/finch-api-python/issues/223)) ([481dc7d](https://github.com/Finch-API/finch-api-python/commit/481dc7de11733b2493e820ff241b7999d873fd16))
+
## 0.8.2 (2023-11-28)
Full Changelog: [v0.8.1...v0.8.2](https://github.com/Finch-API/finch-api-python/compare/v0.8.1...v0.8.2)
diff --git a/README.md b/README.md
index e2b76e88..89067751 100644
--- a/README.md
+++ b/README.md
@@ -236,7 +236,7 @@ from finch import Finch
# Configure the default for all requests:
client = Finch(
- # default is 60s
+ # 20 seconds (default is 1 minute)
timeout=20.0,
)
diff --git a/api.md b/api.md
index 0fb9d472..05326915 100644
--- a/api.md
+++ b/api.md
@@ -1,7 +1,7 @@
# Shared Types
```python
-from finch.types import OperationSupport, OperationSupportMatrix
+from finch.types import OperationSupport, OperationSupportMatrix, Paging
```
# Finch
@@ -16,7 +16,7 @@ Methods:
Types:
```python
-from finch.types import Income, Location, Money, Paging
+from finch.types import Income, Location, Money
```
## CompanyResource
@@ -186,3 +186,31 @@ from finch.types import RequestForwardingForwardResponse
Methods:
- client.request_forwarding.forward(\*\*params) -> RequestForwardingForwardResponse
+
+# Jobs
+
+## Automated
+
+Types:
+
+```python
+from finch.types.jobs import AutomatedAsyncJob, AutomatedCreateResponse
+```
+
+Methods:
+
+- client.jobs.automated.create(\*\*params) -> AutomatedCreateResponse
+- client.jobs.automated.retrieve(job_id) -> AutomatedAsyncJob
+- client.jobs.automated.list(\*\*params) -> SyncPage[AutomatedAsyncJob]
+
+## Manual
+
+Types:
+
+```python
+from finch.types.jobs import ManualAsyncJob
+```
+
+Methods:
+
+- client.jobs.manual.retrieve(job_id) -> ManualAsyncJob
diff --git a/pyproject.toml b/pyproject.toml
index 7b3b5304..b477e34b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "finch-api"
-version = "0.8.2"
+version = "0.9.0"
description = "The official Python library for the Finch API"
readme = "README.md"
license = "Apache-2.0"
@@ -11,7 +11,7 @@ dependencies = [
"httpx>=0.23.0, <1",
"pydantic>=1.9.0, <3",
"typing-extensions>=4.5, <5",
- "anyio>=3.5.0, <4",
+ "anyio>=3.5.0, <5",
"distro>=1.7.0, <2",
"sniffio",
@@ -45,17 +45,18 @@ Repository = "https://github.com/Finch-API/finch-api-python"
[tool.rye]
managed = true
+# version pins are in requirements-dev.lock
dev-dependencies = [
- "pyright==1.1.332",
- "mypy==1.7.1",
- "black==23.3.0",
- "respx==0.19.2",
- "pytest==7.1.1",
- "pytest-asyncio==0.21.1",
- "ruff==0.0.282",
- "isort==5.10.1",
- "time-machine==2.9.0",
- "nox==2023.4.22",
+ "pyright",
+ "mypy",
+ "black",
+ "respx",
+ "pytest",
+ "pytest-asyncio",
+ "ruff",
+ "isort",
+ "time-machine",
+ "nox",
"dirty-equals>=0.6.0",
]
@@ -80,7 +81,7 @@ typecheck = { chain = [
]}
"typecheck:pyright" = "pyright"
"typecheck:verify-types" = "pyright --verifytypes finch --ignoreexternal"
-"typecheck:mypy" = "mypy --enable-incomplete-feature=Unpack ."
+"typecheck:mypy" = "mypy ."
[build-system]
requires = ["hatchling"]
@@ -132,9 +133,11 @@ extra_standard_library = ["typing_extensions"]
[tool.ruff]
line-length = 120
-format = "grouped"
+output-format = "grouped"
target-version = "py37"
select = [
+ # bugbear rules
+ "B",
# remove unused imports
"F401",
# bare except statements
@@ -145,6 +148,10 @@ select = [
"T201",
"T203",
]
+ignore = [
+ # mutable defaults
+ "B006",
+]
unfixable = [
# disable auto fix for print statements
"T201",
diff --git a/requirements-dev.lock b/requirements-dev.lock
index fd275d2e..2ad33ff6 100644
--- a/requirements-dev.lock
+++ b/requirements-dev.lock
@@ -8,7 +8,7 @@
-e file:.
annotated-types==0.6.0
-anyio==3.7.1
+anyio==4.1.0
argcomplete==3.1.2
attrs==23.1.0
black==23.3.0
@@ -20,9 +20,9 @@ distlib==0.3.7
distro==1.8.0
exceptiongroup==1.1.3
filelock==3.12.4
-h11==0.12.0
-httpcore==0.15.0
-httpx==0.23.0
+h11==0.14.0
+httpcore==1.0.2
+httpx==0.25.2
idna==3.4
iniconfig==2.0.0
isort==5.10.1
@@ -42,9 +42,8 @@ pytest==7.1.1
pytest-asyncio==0.21.1
python-dateutil==2.8.2
pytz==2023.3.post1
-respx==0.19.2
-rfc3986==1.5.0
-ruff==0.0.282
+respx==0.20.2
+ruff==0.1.7
six==1.16.0
sniffio==1.3.0
time-machine==2.9.0
diff --git a/requirements.lock b/requirements.lock
index 0c8c2c2e..2022a5c5 100644
--- a/requirements.lock
+++ b/requirements.lock
@@ -8,16 +8,15 @@
-e file:.
annotated-types==0.6.0
-anyio==3.7.1
+anyio==4.1.0
certifi==2023.7.22
distro==1.8.0
exceptiongroup==1.1.3
-h11==0.12.0
-httpcore==0.15.0
-httpx==0.23.0
+h11==0.14.0
+httpcore==1.0.2
+httpx==0.25.2
idna==3.4
pydantic==2.4.2
pydantic-core==2.10.1
-rfc3986==1.5.0
sniffio==1.3.0
typing-extensions==4.8.0
diff --git a/src/finch/__init__.py b/src/finch/__init__.py
index c720161f..37904fae 100644
--- a/src/finch/__init__.py
+++ b/src/finch/__init__.py
@@ -75,7 +75,7 @@
for __name in __all__:
if not __name.startswith("__"):
try:
- setattr(__locals[__name], "__module__", "finch")
+ __locals[__name].__module__ = "finch"
except (TypeError, AttributeError):
# Some of our exported symbols are builtins which we can't set attributes for.
pass
diff --git a/src/finch/_base_client.py b/src/finch/_base_client.py
index a168301f..92189617 100644
--- a/src/finch/_base_client.py
+++ b/src/finch/_base_client.py
@@ -5,6 +5,7 @@
import time
import uuid
import email
+import asyncio
import inspect
import logging
import platform
@@ -72,6 +73,7 @@
DEFAULT_TIMEOUT,
DEFAULT_MAX_RETRIES,
RAW_RESPONSE_HEADER,
+ STREAMED_RAW_RESPONSE_HEADER,
)
from ._streaming import Stream, AsyncStream
from ._exceptions import (
@@ -363,14 +365,21 @@ def _make_status_error_from_response(
self,
response: httpx.Response,
) -> APIStatusError:
- err_text = response.text.strip()
- body = err_text
+ if response.is_closed and not response.is_stream_consumed:
+ # We can't read the response body as it has been closed
+ # before it was read. This can happen if an event hook
+ # raises a status error.
+ body = None
+ err_msg = f"Error code: {response.status_code}"
+ else:
+ err_text = response.text.strip()
+ body = err_text
- try:
- body = json.loads(err_text)
- err_msg = f"Error code: {response.status_code} - {body}"
- except Exception:
- err_msg = err_text or f"Error code: {response.status_code}"
+ try:
+ body = json.loads(err_text)
+ err_msg = f"Error code: {response.status_code} - {body}"
+ except Exception:
+ err_msg = err_text or f"Error code: {response.status_code}"
return self._make_status_error(err_msg, body=body, response=response)
@@ -395,14 +404,12 @@ def _build_headers(self, options: FinalRequestOptions) -> httpx.Headers:
headers_dict = _merge_mappings(self.default_headers, custom_headers)
self._validate_headers(headers_dict, custom_headers)
+ # headers are case-insensitive while dictionaries are not.
headers = httpx.Headers(headers_dict)
idempotency_header = self._idempotency_header
if idempotency_header and options.method.lower() != "get" and idempotency_header not in headers:
- if not options.idempotency_key:
- options.idempotency_key = self._idempotency_key()
-
- headers[idempotency_header] = options.idempotency_key
+ headers[idempotency_header] = options.idempotency_key or self._idempotency_key()
return headers
@@ -534,6 +541,12 @@ def _process_response_data(
except pydantic.ValidationError as err:
raise APIResponseValidationError(response=response, body=data) from err
+ def _should_stream_response_body(self, *, request: httpx.Request) -> bool:
+ if request.headers.get(STREAMED_RAW_RESPONSE_HEADER) == "true":
+ return True
+
+ return False
+
@property
def qs(self) -> Querystring:
return Querystring()
@@ -578,18 +591,10 @@ def base_url(self) -> URL:
@base_url.setter
def base_url(self, url: URL | str) -> None:
- self._client.base_url = url if isinstance(url, URL) else URL(url)
+ self._base_url = self._enforce_trailing_slash(url if isinstance(url, URL) else URL(url))
- @lru_cache(maxsize=None)
def platform_headers(self) -> Dict[str, str]:
- return {
- "X-Stainless-Lang": "python",
- "X-Stainless-Package-Version": self._version,
- "X-Stainless-OS": str(get_platform()),
- "X-Stainless-Arch": str(get_architecture()),
- "X-Stainless-Runtime": platform.python_implementation(),
- "X-Stainless-Runtime-Version": platform.python_version(),
- }
+ return platform_headers(self._version)
def _calculate_retry_timeout(
self,
@@ -606,7 +611,7 @@ def _calculate_retry_timeout(
if response_headers is not None:
retry_header = response_headers.get("retry-after")
try:
- retry_after = int(retry_header)
+ retry_after = float(retry_header)
except Exception:
retry_date_tuple = email.utils.parsedate_tz(retry_header)
if retry_date_tuple is None:
@@ -668,9 +673,16 @@ def _idempotency_key(self) -> str:
return f"stainless-python-retry-{uuid.uuid4()}"
+class SyncHttpxClientWrapper(httpx.Client):
+ def __del__(self) -> None:
+ try:
+ self.close()
+ except Exception:
+ pass
+
+
class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
_client: httpx.Client
- _has_custom_http_client: bool
_default_stream_cls: type[Stream[Any]] | None = None
def __init__(
@@ -743,7 +755,7 @@ def __init__(
custom_headers=custom_headers,
_strict_response_validation=_strict_response_validation,
)
- self._client = http_client or httpx.Client(
+ self._client = http_client or SyncHttpxClientWrapper(
base_url=base_url,
# cast to a valid type because mypy doesn't understand our type narrowing
timeout=cast(Timeout, timeout),
@@ -751,7 +763,6 @@ def __init__(
transport=transport,
limits=limits,
)
- self._has_custom_http_client = bool(http_client)
def is_closed(self) -> bool:
return self._client.is_closed
@@ -863,27 +874,24 @@ def _request(
self._prepare_request(request)
try:
- response = self._client.send(request, auth=self.custom_auth, stream=stream)
- log.debug(
- 'HTTP Request: %s %s "%i %s"', request.method, request.url, response.status_code, response.reason_phrase
+ response = self._client.send(
+ request,
+ auth=self.custom_auth,
+ stream=stream or self._should_stream_response_body(request=request),
)
- response.raise_for_status()
- except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code
- if retries > 0 and self._should_retry(err.response):
+ except httpx.TimeoutException as err:
+ if retries > 0:
return self._retry_request(
options,
cast_to,
retries,
- err.response.headers,
stream=stream,
stream_cls=stream_cls,
+ response_headers=None,
)
- # If the response is streamed then we need to explicitly read the response
- # to completion before attempting to access the response text.
- err.response.read()
- raise self._make_status_error_from_response(err.response) from None
- except httpx.TimeoutException as err:
+ raise APITimeoutError(request=request) from err
+ except Exception as err:
if retries > 0:
return self._retry_request(
options,
@@ -891,18 +899,35 @@ def _request(
retries,
stream=stream,
stream_cls=stream_cls,
+ response_headers=None,
)
- raise APITimeoutError(request=request) from err
- except Exception as err:
- if retries > 0:
+
+ raise APIConnectionError(request=request) from err
+
+ log.debug(
+ 'HTTP Request: %s %s "%i %s"', request.method, request.url, response.status_code, response.reason_phrase
+ )
+
+ try:
+ response.raise_for_status()
+ except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code
+ if retries > 0 and self._should_retry(err.response):
+ err.response.close()
return self._retry_request(
options,
cast_to,
retries,
+ err.response.headers,
stream=stream,
stream_cls=stream_cls,
)
- raise APIConnectionError(request=request) from err
+
+ # If the response is streamed then we need to explicitly read the response
+ # to completion before attempting to access the response text.
+ if not err.response.is_closed:
+ err.response.read()
+
+ raise self._make_status_error_from_response(err.response) from None
return self._process_response(
cast_to=cast_to,
@@ -917,7 +942,7 @@ def _retry_request(
options: FinalRequestOptions,
cast_to: Type[ResponseT],
remaining_retries: int,
- response_headers: Optional[httpx.Headers] = None,
+ response_headers: httpx.Headers | None,
*,
stream: bool,
stream_cls: type[_StreamT] | None,
@@ -1112,9 +1137,17 @@ def get_api_list(
return self._request_api_list(model, page, opts)
+class AsyncHttpxClientWrapper(httpx.AsyncClient):
+ def __del__(self) -> None:
+ try:
+ # TODO(someday): support non asyncio runtimes here
+ asyncio.get_running_loop().create_task(self.aclose())
+ except Exception:
+ pass
+
+
class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
_client: httpx.AsyncClient
- _has_custom_http_client: bool
_default_stream_cls: type[AsyncStream[Any]] | None = None
def __init__(
@@ -1187,7 +1220,7 @@ def __init__(
custom_headers=custom_headers,
_strict_response_validation=_strict_response_validation,
)
- self._client = http_client or httpx.AsyncClient(
+ self._client = http_client or AsyncHttpxClientWrapper(
base_url=base_url,
# cast to a valid type because mypy doesn't understand our type narrowing
timeout=cast(Timeout, timeout),
@@ -1195,7 +1228,6 @@ def __init__(
transport=transport,
limits=limits,
)
- self._has_custom_http_client = bool(http_client)
def is_closed(self) -> bool:
return self._client.is_closed
@@ -1304,13 +1336,45 @@ async def _request(
await self._prepare_request(request)
try:
- response = await self._client.send(request, auth=self.custom_auth, stream=stream)
- log.debug(
- 'HTTP Request: %s %s "%i %s"', request.method, request.url, response.status_code, response.reason_phrase
+ response = await self._client.send(
+ request,
+ auth=self.custom_auth,
+ stream=stream or self._should_stream_response_body(request=request),
)
+ except httpx.TimeoutException as err:
+ if retries > 0:
+ return await self._retry_request(
+ options,
+ cast_to,
+ retries,
+ stream=stream,
+ stream_cls=stream_cls,
+ response_headers=None,
+ )
+
+ raise APITimeoutError(request=request) from err
+ except Exception as err:
+ if retries > 0:
+ return await self._retry_request(
+ options,
+ cast_to,
+ retries,
+ stream=stream,
+ stream_cls=stream_cls,
+ response_headers=None,
+ )
+
+ raise APIConnectionError(request=request) from err
+
+ log.debug(
+ 'HTTP Request: %s %s "%i %s"', request.method, request.url, response.status_code, response.reason_phrase
+ )
+
+ try:
response.raise_for_status()
except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code
if retries > 0 and self._should_retry(err.response):
+ await err.response.aclose()
return await self._retry_request(
options,
cast_to,
@@ -1322,20 +1386,10 @@ async def _request(
# If the response is streamed then we need to explicitly read the response
# to completion before attempting to access the response text.
- await err.response.aread()
+ if not err.response.is_closed:
+ await err.response.aread()
+
raise self._make_status_error_from_response(err.response) from None
- except httpx.ConnectTimeout as err:
- if retries > 0:
- return await self._retry_request(options, cast_to, retries, stream=stream, stream_cls=stream_cls)
- raise APITimeoutError(request=request) from err
- except httpx.TimeoutException as err:
- if retries > 0:
- return await self._retry_request(options, cast_to, retries, stream=stream, stream_cls=stream_cls)
- raise APITimeoutError(request=request) from err
- except Exception as err:
- if retries > 0:
- return await self._retry_request(options, cast_to, retries, stream=stream, stream_cls=stream_cls)
- raise APIConnectionError(request=request) from err
return self._process_response(
cast_to=cast_to,
@@ -1350,7 +1404,7 @@ async def _retry_request(
options: FinalRequestOptions,
cast_to: Type[ResponseT],
remaining_retries: int,
- response_headers: Optional[httpx.Headers] = None,
+ response_headers: httpx.Headers | None,
*,
stream: bool,
stream_cls: type[_AsyncStreamT] | None,
@@ -1631,6 +1685,18 @@ def get_platform() -> Platform:
return "Unknown"
+@lru_cache(maxsize=None)
+def platform_headers(version: str) -> Dict[str, str]:
+ return {
+ "X-Stainless-Lang": "python",
+ "X-Stainless-Package-Version": version,
+ "X-Stainless-OS": str(get_platform()),
+ "X-Stainless-Arch": str(get_architecture()),
+ "X-Stainless-Runtime": platform.python_implementation(),
+ "X-Stainless-Runtime-Version": platform.python_version(),
+ }
+
+
class OtherArch:
def __init__(self, name: str) -> None:
self.name = name
diff --git a/src/finch/_client.py b/src/finch/_client.py
index d5579f9f..404f7fa4 100644
--- a/src/finch/_client.py
+++ b/src/finch/_client.py
@@ -3,7 +3,6 @@
from __future__ import annotations
import os
-import asyncio
from typing import Any, Union, Mapping
from typing_extensions import Self, override
@@ -32,6 +31,8 @@
DEFAULT_MAX_RETRIES,
SyncAPIClient,
AsyncAPIClient,
+ SyncHttpxClientWrapper,
+ AsyncHttpxClientWrapper,
)
__all__ = [
@@ -53,6 +54,7 @@ class Finch(SyncAPIClient):
account: resources.Account
webhooks: resources.Webhooks
request_forwarding: resources.RequestForwarding
+ jobs: resources.Jobs
with_raw_response: FinchWithRawResponse
# client options
@@ -136,6 +138,7 @@ def __init__(
self.account = resources.Account(self)
self.webhooks = resources.Webhooks(self)
self.request_forwarding = resources.RequestForwarding(self)
+ self.jobs = resources.Jobs(self)
self.with_raw_response = FinchWithRawResponse(self)
@property
@@ -215,7 +218,7 @@ def copy(
if http_client is not None:
raise ValueError("The 'http_client' argument is mutually exclusive with 'connection_pool_limits'")
- if self._has_custom_http_client:
+ if not isinstance(self._client, SyncHttpxClientWrapper):
raise ValueError(
"A custom HTTP client has been set and is mutually exclusive with the 'connection_pool_limits' argument"
)
@@ -234,7 +237,7 @@ def copy(
client_id=client_id or self.client_id,
client_secret=client_secret or self.client_secret,
webhook_secret=webhook_secret or self.webhook_secret,
- base_url=base_url or str(self.base_url),
+ base_url=base_url or self.base_url,
timeout=self.timeout if isinstance(timeout, NotGiven) else timeout,
http_client=http_client,
connection_pool_limits=connection_pool_limits,
@@ -248,16 +251,6 @@ def copy(
# client.with_options(timeout=10).foo.create(...)
with_options = copy
- def __del__(self) -> None:
- if not hasattr(self, "_has_custom_http_client") or not hasattr(self, "close"):
- # this can happen if the '__init__' method raised an error
- return
-
- if self._has_custom_http_client:
- return
-
- self.close()
-
def get_access_token(
self,
code: str,
@@ -357,6 +350,7 @@ class AsyncFinch(AsyncAPIClient):
account: resources.AsyncAccount
webhooks: resources.AsyncWebhooks
request_forwarding: resources.AsyncRequestForwarding
+ jobs: resources.AsyncJobs
with_raw_response: AsyncFinchWithRawResponse
# client options
@@ -440,6 +434,7 @@ def __init__(
self.account = resources.AsyncAccount(self)
self.webhooks = resources.AsyncWebhooks(self)
self.request_forwarding = resources.AsyncRequestForwarding(self)
+ self.jobs = resources.AsyncJobs(self)
self.with_raw_response = AsyncFinchWithRawResponse(self)
@property
@@ -519,7 +514,7 @@ def copy(
if http_client is not None:
raise ValueError("The 'http_client' argument is mutually exclusive with 'connection_pool_limits'")
- if self._has_custom_http_client:
+ if not isinstance(self._client, AsyncHttpxClientWrapper):
raise ValueError(
"A custom HTTP client has been set and is mutually exclusive with the 'connection_pool_limits' argument"
)
@@ -538,7 +533,7 @@ def copy(
client_id=client_id or self.client_id,
client_secret=client_secret or self.client_secret,
webhook_secret=webhook_secret or self.webhook_secret,
- base_url=base_url or str(self.base_url),
+ base_url=base_url or self.base_url,
timeout=self.timeout if isinstance(timeout, NotGiven) else timeout,
http_client=http_client,
connection_pool_limits=connection_pool_limits,
@@ -552,19 +547,6 @@ def copy(
# client.with_options(timeout=10).foo.create(...)
with_options = copy
- def __del__(self) -> None:
- if not hasattr(self, "_has_custom_http_client") or not hasattr(self, "close"):
- # this can happen if the '__init__' method raised an error
- return
-
- if self._has_custom_http_client:
- return
-
- try:
- asyncio.get_running_loop().create_task(self.close())
- except Exception:
- pass
-
async def get_access_token(
self,
code: str,
@@ -664,6 +646,7 @@ def __init__(self, client: Finch) -> None:
self.providers = resources.ProvidersWithRawResponse(client.providers)
self.account = resources.AccountWithRawResponse(client.account)
self.request_forwarding = resources.RequestForwardingWithRawResponse(client.request_forwarding)
+ self.jobs = resources.JobsWithRawResponse(client.jobs)
class AsyncFinchWithRawResponse:
@@ -672,6 +655,7 @@ def __init__(self, client: AsyncFinch) -> None:
self.providers = resources.AsyncProvidersWithRawResponse(client.providers)
self.account = resources.AsyncAccountWithRawResponse(client.account)
self.request_forwarding = resources.AsyncRequestForwardingWithRawResponse(client.request_forwarding)
+ self.jobs = resources.AsyncJobsWithRawResponse(client.jobs)
Client = Finch
diff --git a/src/finch/_constants.py b/src/finch/_constants.py
index 0c3f31df..39b46eb0 100644
--- a/src/finch/_constants.py
+++ b/src/finch/_constants.py
@@ -3,6 +3,7 @@
import httpx
RAW_RESPONSE_HEADER = "X-Stainless-Raw-Response"
+STREAMED_RAW_RESPONSE_HEADER = "X-Stainless-Streamed-Raw-Response"
# default timeout is 1 minute
DEFAULT_TIMEOUT = httpx.Timeout(timeout=60.0, connect=5.0)
diff --git a/src/finch/_exceptions.py b/src/finch/_exceptions.py
index 8d69569f..ba7b00fa 100644
--- a/src/finch/_exceptions.py
+++ b/src/finch/_exceptions.py
@@ -41,6 +41,7 @@ def __init__(self, message: str, request: httpx.Request, *, body: object | None)
super().__init__(message)
self.request = request
self.message = message
+ self.body = body
class APIResponseValidationError(APIError):
diff --git a/src/finch/_streaming.py b/src/finch/_streaming.py
index 913159fd..e816ca74 100644
--- a/src/finch/_streaming.py
+++ b/src/finch/_streaming.py
@@ -51,7 +51,7 @@ def __stream__(self) -> Iterator[ResponseT]:
yield process_data(data=sse.json(), cast_to=cast_to, response=response)
# Ensure the entire stream is consumed
- for sse in iterator:
+ for _sse in iterator:
...
@@ -94,7 +94,7 @@ async def __stream__(self) -> AsyncIterator[ResponseT]:
yield process_data(data=sse.json(), cast_to=cast_to, response=response)
# Ensure the entire stream is consumed
- async for sse in iterator:
+ async for _sse in iterator:
...
diff --git a/src/finch/_types.py b/src/finch/_types.py
index e12f064d..c4f652b4 100644
--- a/src/finch/_types.py
+++ b/src/finch/_types.py
@@ -44,6 +44,7 @@
class BinaryResponseContent(ABC):
+ @abstractmethod
def __init__(
self,
response: Any,
diff --git a/src/finch/_utils/_proxy.py b/src/finch/_utils/_proxy.py
index aa934a3f..3c9e790a 100644
--- a/src/finch/_utils/_proxy.py
+++ b/src/finch/_utils/_proxy.py
@@ -18,25 +18,43 @@ class LazyProxy(Generic[T], ABC):
def __init__(self) -> None:
self.__proxied: T | None = None
+ # Note: we have to special case proxies that themselves return proxies
+ # to support using a proxy as a catch-all for any random access, e.g. `proxy.foo.bar.baz`
+
def __getattr__(self, attr: str) -> object:
- return getattr(self.__get_proxied__(), attr)
+ proxied = self.__get_proxied__()
+ if isinstance(proxied, LazyProxy):
+ return proxied # pyright: ignore
+ return getattr(proxied, attr)
@override
def __repr__(self) -> str:
+ proxied = self.__get_proxied__()
+ if isinstance(proxied, LazyProxy):
+ return proxied.__class__.__name__
return repr(self.__get_proxied__())
@override
def __str__(self) -> str:
- return str(self.__get_proxied__())
+ proxied = self.__get_proxied__()
+ if isinstance(proxied, LazyProxy):
+ return proxied.__class__.__name__
+ return str(proxied)
@override
def __dir__(self) -> Iterable[str]:
- return self.__get_proxied__().__dir__()
+ proxied = self.__get_proxied__()
+ if isinstance(proxied, LazyProxy):
+ return []
+ return proxied.__dir__()
@property # type: ignore
@override
def __class__(self) -> type:
- return self.__get_proxied__().__class__
+ proxied = self.__get_proxied__()
+ if issubclass(type(proxied), LazyProxy):
+ return type(proxied)
+ return proxied.__class__
def __get_proxied__(self) -> T:
if not self.should_cache:
diff --git a/src/finch/_utils/_utils.py b/src/finch/_utils/_utils.py
index d2bfc91a..c874d368 100644
--- a/src/finch/_utils/_utils.py
+++ b/src/finch/_utils/_utils.py
@@ -194,8 +194,8 @@ def extract_type_arg(typ: type, index: int) -> type:
args = get_args(typ)
try:
return cast(type, args[index])
- except IndexError:
- raise RuntimeError(f"Expected type {typ} to have a type argument at index {index} but it did not")
+ except IndexError as err:
+ raise RuntimeError(f"Expected type {typ} to have a type argument at index {index} but it did not") from err
def deepcopy_minimal(item: _T) -> _T:
@@ -230,7 +230,7 @@ def human_join(seq: Sequence[str], *, delim: str = ", ", final: str = "or") -> s
def quote(string: str) -> str:
"""Add single quotation marks around the given string. Does *not* do any escaping."""
- return "'" + string + "'"
+ return f"'{string}'"
def required_args(*variants: Sequence[str]) -> Callable[[CallableT], CallableT]:
@@ -275,7 +275,9 @@ def wrapper(*args: object, **kwargs: object) -> object:
try:
given_params.add(positional[i])
except IndexError:
- raise TypeError(f"{func.__name__}() takes {len(positional)} argument(s) but {len(args)} were given")
+ raise TypeError(
+ f"{func.__name__}() takes {len(positional)} argument(s) but {len(args)} were given"
+ ) from None
for key in kwargs.keys():
given_params.add(key)
diff --git a/src/finch/_version.py b/src/finch/_version.py
index d085a27d..cfd63a12 100644
--- a/src/finch/_version.py
+++ b/src/finch/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless.
__title__ = "finch"
-__version__ = "0.8.2" # x-release-please-version
+__version__ = "0.9.0" # x-release-please-version
diff --git a/src/finch/pagination.py b/src/finch/pagination.py
index 3cb9ed96..cd513eb1 100644
--- a/src/finch/pagination.py
+++ b/src/finch/pagination.py
@@ -5,11 +5,11 @@
from httpx import Response
-from .types import Paging
from ._types import ModelT
from ._utils import is_mapping
from ._models import BaseModel
from ._base_client import BasePage, PageInfo, BaseSyncPage, BaseAsyncPage
+from .types.shared import Paging
__all__ = [
"SyncSinglePage",
@@ -18,6 +18,8 @@
"AsyncResponsesPage",
"SyncIndividualsPage",
"AsyncIndividualsPage",
+ "SyncPage",
+ "AsyncPage",
]
_BaseModelT = TypeVar("_BaseModelT", bound=BaseModel)
@@ -28,7 +30,10 @@ class SyncSinglePage(BaseSyncPage[ModelT], BasePage[ModelT], Generic[ModelT]):
@override
def _get_page_items(self) -> List[ModelT]:
- return self.items
+ items = self.items
+ if not items:
+ return []
+ return items
@override
def next_page_info(self) -> None:
@@ -53,7 +58,10 @@ class AsyncSinglePage(BaseAsyncPage[ModelT], BasePage[ModelT], Generic[ModelT]):
@override
def _get_page_items(self) -> List[ModelT]:
- return self.items
+ items = self.items
+ if not items:
+ return []
+ return items
@override
def next_page_info(self) -> None:
@@ -78,7 +86,10 @@ class SyncResponsesPage(BaseSyncPage[ModelT], BasePage[ModelT], Generic[ModelT])
@override
def _get_page_items(self) -> List[ModelT]:
- return self.responses
+ responses = self.responses
+ if not responses:
+ return []
+ return responses
@override
def next_page_info(self) -> None:
@@ -94,7 +105,10 @@ class AsyncResponsesPage(BaseAsyncPage[ModelT], BasePage[ModelT], Generic[ModelT
@override
def _get_page_items(self) -> List[ModelT]:
- return self.responses
+ responses = self.responses
+ if not responses:
+ return []
+ return responses
@override
def next_page_info(self) -> None:
@@ -105,32 +119,35 @@ def next_page_info(self) -> None:
return None
-IndividualsPagePaging = Paging
-"""This is deprecated, Paging should be used instead"""
-
-
class SyncIndividualsPage(BaseSyncPage[ModelT], BasePage[ModelT], Generic[ModelT]):
individuals: List[ModelT]
paging: Paging
@override
def _get_page_items(self) -> List[ModelT]:
- return self.individuals
+ individuals = self.individuals
+ if not individuals:
+ return []
+ return individuals
@override
def next_page_info(self) -> Optional[PageInfo]:
- offset = self.paging.offset
+ offset = None
+ if self.paging is not None: # pyright: ignore[reportUnnecessaryComparison]
+ offset = self.paging.offset
if offset is None:
return None
- length = len(self.individuals)
+ length = len(self._get_page_items())
current_count = offset + length
- total_count = self.paging.count
- if total_count is None:
+ count = None
+ if self.paging is not None: # pyright: ignore[reportUnnecessaryComparison]
+ count = self.paging.count
+ if count is None:
return None
- if current_count < total_count:
+ if current_count < count:
return PageInfo(params={"offset": current_count})
return None
@@ -142,22 +159,97 @@ class AsyncIndividualsPage(BaseAsyncPage[ModelT], BasePage[ModelT], Generic[Mode
@override
def _get_page_items(self) -> List[ModelT]:
- return self.individuals
+ individuals = self.individuals
+ if not individuals:
+ return []
+ return individuals
+
+ @override
+ def next_page_info(self) -> Optional[PageInfo]:
+ offset = None
+ if self.paging is not None: # pyright: ignore[reportUnnecessaryComparison]
+ offset = self.paging.offset
+ if offset is None:
+ return None
+
+ length = len(self._get_page_items())
+ current_count = offset + length
+
+ count = None
+ if self.paging is not None: # pyright: ignore[reportUnnecessaryComparison]
+ count = self.paging.count
+ if count is None:
+ return None
+
+ if current_count < count:
+ return PageInfo(params={"offset": current_count})
+
+ return None
+
+
+class SyncPage(BaseSyncPage[ModelT], BasePage[ModelT], Generic[ModelT]):
+ data: List[ModelT]
+ paging: Paging
+
+ @override
+ def _get_page_items(self) -> List[ModelT]:
+ data = self.data
+ if not data:
+ return []
+ return data
+
+ @override
+ def next_page_info(self) -> Optional[PageInfo]:
+ offset = None
+ if self.paging is not None: # pyright: ignore[reportUnnecessaryComparison]
+ offset = self.paging.offset
+ if offset is None:
+ return None
+
+ length = len(self._get_page_items())
+ current_count = offset + length
+
+ count = None
+ if self.paging is not None: # pyright: ignore[reportUnnecessaryComparison]
+ count = self.paging.count
+ if count is None:
+ return None
+
+ if current_count < count:
+ return PageInfo(params={"offset": current_count})
+
+ return None
+
+
+class AsyncPage(BaseAsyncPage[ModelT], BasePage[ModelT], Generic[ModelT]):
+ data: List[ModelT]
+ paging: Paging
+
+ @override
+ def _get_page_items(self) -> List[ModelT]:
+ data = self.data
+ if not data:
+ return []
+ return data
@override
def next_page_info(self) -> Optional[PageInfo]:
- offset = self.paging.offset
+ offset = None
+ if self.paging is not None: # pyright: ignore[reportUnnecessaryComparison]
+ offset = self.paging.offset
if offset is None:
return None
- length = len(self.individuals)
+ length = len(self._get_page_items())
current_count = offset + length
- total_count = self.paging.count
- if total_count is None:
+ count = None
+ if self.paging is not None: # pyright: ignore[reportUnnecessaryComparison]
+ count = self.paging.count
+ if count is None:
return None
- if current_count < total_count:
+ if current_count < count:
return PageInfo(params={"offset": current_count})
return None
diff --git a/src/finch/resources/__init__.py b/src/finch/resources/__init__.py
index b87ddc17..dcab5333 100644
--- a/src/finch/resources/__init__.py
+++ b/src/finch/resources/__init__.py
@@ -1,6 +1,7 @@
# File generated from our OpenAPI spec by Stainless.
from .hris import HRIS, AsyncHRIS, HRISWithRawResponse, AsyncHRISWithRawResponse
+from .jobs import Jobs, AsyncJobs, JobsWithRawResponse, AsyncJobsWithRawResponse
from .account import (
Account,
AsyncAccount,
@@ -40,4 +41,8 @@
"AsyncRequestForwarding",
"RequestForwardingWithRawResponse",
"AsyncRequestForwardingWithRawResponse",
+ "Jobs",
+ "AsyncJobs",
+ "JobsWithRawResponse",
+ "AsyncJobsWithRawResponse",
]
diff --git a/src/finch/resources/jobs/__init__.py b/src/finch/resources/jobs/__init__.py
new file mode 100644
index 00000000..f1f7692d
--- /dev/null
+++ b/src/finch/resources/jobs/__init__.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless.
+
+from .jobs import Jobs, AsyncJobs, JobsWithRawResponse, AsyncJobsWithRawResponse
+from .manual import (
+ Manual,
+ AsyncManual,
+ ManualWithRawResponse,
+ AsyncManualWithRawResponse,
+)
+from .automated import (
+ Automated,
+ AsyncAutomated,
+ AutomatedWithRawResponse,
+ AsyncAutomatedWithRawResponse,
+)
+
+__all__ = [
+ "Automated",
+ "AsyncAutomated",
+ "AutomatedWithRawResponse",
+ "AsyncAutomatedWithRawResponse",
+ "Manual",
+ "AsyncManual",
+ "ManualWithRawResponse",
+ "AsyncManualWithRawResponse",
+ "Jobs",
+ "AsyncJobs",
+ "JobsWithRawResponse",
+ "AsyncJobsWithRawResponse",
+]
diff --git a/src/finch/resources/jobs/automated.py b/src/finch/resources/jobs/automated.py
new file mode 100644
index 00000000..9f43c60a
--- /dev/null
+++ b/src/finch/resources/jobs/automated.py
@@ -0,0 +1,318 @@
+# File generated from our OpenAPI spec by Stainless.
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+from typing_extensions import Literal
+
+import httpx
+
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._utils import maybe_transform
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_raw_response_wrapper, async_to_raw_response_wrapper
+from ...pagination import SyncPage, AsyncPage
+from ...types.jobs import (
+ AutomatedAsyncJob,
+ AutomatedCreateResponse,
+ automated_list_params,
+ automated_create_params,
+)
+from ..._base_client import AsyncPaginator, make_request_options
+
+if TYPE_CHECKING:
+ from ..._client import Finch, AsyncFinch
+
+__all__ = ["Automated", "AsyncAutomated"]
+
+
+class Automated(SyncAPIResource):
+ with_raw_response: AutomatedWithRawResponse
+
+ def __init__(self, client: Finch) -> None:
+ super().__init__(client)
+ self.with_raw_response = AutomatedWithRawResponse(self)
+
+ def create(
+ self,
+ *,
+ type: Literal["data_sync_all"],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AutomatedCreateResponse:
+ """Enqueue an automated job.
+
+ Currently, only the `data_sync_all` job type is
+ supported, which will enqueue a job to re-sync all data for a connection.
+ `data_sync_all` has a concurrency limit of 1 job at a time per connection. This
+ means that if this endpoint is called while a job is already in progress for
+ this connection, Finch will return the `job_id` of the job that is currently in
+ progress. Finch allows a fixed window rate limit of 1 forced refresh per hour
+ per connection.
+
+ This endpoint is available for _Scale_ tier customers as an add-on. To request
+ access to this endpoint, please contact your Finch account manager.
+
+ Args:
+ type: The type of job to start. Currently the only supported type is `data_sync_all`
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/jobs/automated",
+ body=maybe_transform({"type": type}, automated_create_params.AutomatedCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AutomatedCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ job_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AutomatedAsyncJob:
+ """
+ Get an automated job by `job_id`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ f"/jobs/automated/{job_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AutomatedAsyncJob,
+ )
+
+ def list(
+ self,
+ *,
+ limit: int | NotGiven = NOT_GIVEN,
+ offset: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> SyncPage[AutomatedAsyncJob]:
+ """Get all automated jobs.
+
+ Automated jobs are completed by a machine. By default,
+ jobs are sorted in descending order by submission time. For scheduled jobs such
+ as data syncs, only the next scheduled job is shown.
+
+ Args:
+ limit: Number of items to return
+
+ offset: Index to start from (defaults to 0)
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/jobs/automated",
+ page=SyncPage[AutomatedAsyncJob],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "limit": limit,
+ "offset": offset,
+ },
+ automated_list_params.AutomatedListParams,
+ ),
+ ),
+ model=AutomatedAsyncJob,
+ )
+
+
+class AsyncAutomated(AsyncAPIResource):
+ with_raw_response: AsyncAutomatedWithRawResponse
+
+ def __init__(self, client: AsyncFinch) -> None:
+ super().__init__(client)
+ self.with_raw_response = AsyncAutomatedWithRawResponse(self)
+
+ async def create(
+ self,
+ *,
+ type: Literal["data_sync_all"],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AutomatedCreateResponse:
+ """Enqueue an automated job.
+
+ Currently, only the `data_sync_all` job type is
+ supported, which will enqueue a job to re-sync all data for a connection.
+ `data_sync_all` has a concurrency limit of 1 job at a time per connection. This
+ means that if this endpoint is called while a job is already in progress for
+ this connection, Finch will return the `job_id` of the job that is currently in
+ progress. Finch allows a fixed window rate limit of 1 forced refresh per hour
+ per connection.
+
+ This endpoint is available for _Scale_ tier customers as an add-on. To request
+ access to this endpoint, please contact your Finch account manager.
+
+ Args:
+ type: The type of job to start. Currently the only supported type is `data_sync_all`
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/jobs/automated",
+ body=maybe_transform({"type": type}, automated_create_params.AutomatedCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AutomatedCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ job_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AutomatedAsyncJob:
+ """
+ Get an automated job by `job_id`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ f"/jobs/automated/{job_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AutomatedAsyncJob,
+ )
+
+ def list(
+ self,
+ *,
+ limit: int | NotGiven = NOT_GIVEN,
+ offset: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AsyncPaginator[AutomatedAsyncJob, AsyncPage[AutomatedAsyncJob]]:
+ """Get all automated jobs.
+
+ Automated jobs are completed by a machine. By default,
+ jobs are sorted in descending order by submission time. For scheduled jobs such
+ as data syncs, only the next scheduled job is shown.
+
+ Args:
+ limit: Number of items to return
+
+ offset: Index to start from (defaults to 0)
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/jobs/automated",
+ page=AsyncPage[AutomatedAsyncJob],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "limit": limit,
+ "offset": offset,
+ },
+ automated_list_params.AutomatedListParams,
+ ),
+ ),
+ model=AutomatedAsyncJob,
+ )
+
+
+class AutomatedWithRawResponse:
+ def __init__(self, automated: Automated) -> None:
+ self.create = to_raw_response_wrapper(
+ automated.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ automated.retrieve,
+ )
+ self.list = to_raw_response_wrapper(
+ automated.list,
+ )
+
+
+class AsyncAutomatedWithRawResponse:
+ def __init__(self, automated: AsyncAutomated) -> None:
+ self.create = async_to_raw_response_wrapper(
+ automated.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ automated.retrieve,
+ )
+ self.list = async_to_raw_response_wrapper(
+ automated.list,
+ )
diff --git a/src/finch/resources/jobs/jobs.py b/src/finch/resources/jobs/jobs.py
new file mode 100644
index 00000000..f522c9c5
--- /dev/null
+++ b/src/finch/resources/jobs/jobs.py
@@ -0,0 +1,60 @@
+# File generated from our OpenAPI spec by Stainless.
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from .manual import (
+ Manual,
+ AsyncManual,
+ ManualWithRawResponse,
+ AsyncManualWithRawResponse,
+)
+from .automated import (
+ Automated,
+ AsyncAutomated,
+ AutomatedWithRawResponse,
+ AsyncAutomatedWithRawResponse,
+)
+from ..._resource import SyncAPIResource, AsyncAPIResource
+
+if TYPE_CHECKING:
+ from ..._client import Finch, AsyncFinch
+
+__all__ = ["Jobs", "AsyncJobs"]
+
+
+class Jobs(SyncAPIResource):
+ automated: Automated
+ manual: Manual
+ with_raw_response: JobsWithRawResponse
+
+ def __init__(self, client: Finch) -> None:
+ super().__init__(client)
+ self.automated = Automated(client)
+ self.manual = Manual(client)
+ self.with_raw_response = JobsWithRawResponse(self)
+
+
+class AsyncJobs(AsyncAPIResource):
+ automated: AsyncAutomated
+ manual: AsyncManual
+ with_raw_response: AsyncJobsWithRawResponse
+
+ def __init__(self, client: AsyncFinch) -> None:
+ super().__init__(client)
+ self.automated = AsyncAutomated(client)
+ self.manual = AsyncManual(client)
+ self.with_raw_response = AsyncJobsWithRawResponse(self)
+
+
+class JobsWithRawResponse:
+ def __init__(self, jobs: Jobs) -> None:
+ self.automated = AutomatedWithRawResponse(jobs.automated)
+ self.manual = ManualWithRawResponse(jobs.manual)
+
+
+class AsyncJobsWithRawResponse:
+ def __init__(self, jobs: AsyncJobs) -> None:
+ self.automated = AsyncAutomatedWithRawResponse(jobs.automated)
+ self.manual = AsyncManualWithRawResponse(jobs.manual)
diff --git a/src/finch/resources/jobs/manual.py b/src/finch/resources/jobs/manual.py
new file mode 100644
index 00000000..9334f2da
--- /dev/null
+++ b/src/finch/resources/jobs/manual.py
@@ -0,0 +1,114 @@
+# File generated from our OpenAPI spec by Stainless.
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+import httpx
+
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_raw_response_wrapper, async_to_raw_response_wrapper
+from ...types.jobs import ManualAsyncJob
+from ..._base_client import make_request_options
+
+if TYPE_CHECKING:
+ from ..._client import Finch, AsyncFinch
+
+__all__ = ["Manual", "AsyncManual"]
+
+
+class Manual(SyncAPIResource):
+ with_raw_response: ManualWithRawResponse
+
+ def __init__(self, client: Finch) -> None:
+ super().__init__(client)
+ self.with_raw_response = ManualWithRawResponse(self)
+
+ def retrieve(
+ self,
+ job_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ManualAsyncJob:
+ """Get a manual job by `job_id`.
+
+ Manual jobs are completed by a human and include
+ Assisted Benefits jobs.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ f"/jobs/manual/{job_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ManualAsyncJob,
+ )
+
+
+class AsyncManual(AsyncAPIResource):
+ with_raw_response: AsyncManualWithRawResponse
+
+ def __init__(self, client: AsyncFinch) -> None:
+ super().__init__(client)
+ self.with_raw_response = AsyncManualWithRawResponse(self)
+
+ async def retrieve(
+ self,
+ job_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ManualAsyncJob:
+ """Get a manual job by `job_id`.
+
+ Manual jobs are completed by a human and include
+ Assisted Benefits jobs.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ f"/jobs/manual/{job_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ManualAsyncJob,
+ )
+
+
+class ManualWithRawResponse:
+ def __init__(self, manual: Manual) -> None:
+ self.retrieve = to_raw_response_wrapper(
+ manual.retrieve,
+ )
+
+
+class AsyncManualWithRawResponse:
+ def __init__(self, manual: AsyncManual) -> None:
+ self.retrieve = async_to_raw_response_wrapper(
+ manual.retrieve,
+ )
diff --git a/src/finch/resources/webhooks.py b/src/finch/resources/webhooks.py
index 13b88d61..40c7678b 100644
--- a/src/finch/resources/webhooks.py
+++ b/src/finch/resources/webhooks.py
@@ -56,8 +56,8 @@ def verify_signature(
try:
parsedSecret = base64.b64decode(secret)
- except Exception:
- raise ValueError("Bad secret")
+ except Exception as err:
+ raise ValueError("Bad secret") from err
msg_id = get_required_header(headers, "finch-event-id")
msg_timestamp = get_required_header(headers, "finch-timestamp")
@@ -68,8 +68,8 @@ def verify_signature(
try:
timestamp = datetime.fromtimestamp(float(msg_timestamp), tz=timezone.utc)
- except Exception:
- raise ValueError("Invalid timestamp header: " + msg_timestamp + ". Could not convert to timestamp")
+ except Exception as err:
+ raise ValueError("Invalid timestamp header: " + msg_timestamp + ". Could not convert to timestamp") from err
# too old
if timestamp < (now - webhook_tolerance):
@@ -152,8 +152,8 @@ def verify_signature(
try:
parsedSecret = base64.b64decode(secret)
- except Exception:
- raise ValueError("Bad secret")
+ except Exception as err:
+ raise ValueError("Bad secret") from err
msg_id = get_required_header(headers, "finch-event-id")
msg_timestamp = get_required_header(headers, "finch-timestamp")
@@ -164,8 +164,8 @@ def verify_signature(
try:
timestamp = datetime.fromtimestamp(float(msg_timestamp), tz=timezone.utc)
- except Exception:
- raise ValueError("Invalid timestamp header: " + msg_timestamp + ". Could not convert to timestamp")
+ except Exception as err:
+ raise ValueError("Invalid timestamp header: " + msg_timestamp + ". Could not convert to timestamp") from err
# too old
if timestamp < (now - webhook_tolerance):
diff --git a/src/finch/types/__init__.py b/src/finch/types/__init__.py
index eee945a5..f6a15f80 100644
--- a/src/finch/types/__init__.py
+++ b/src/finch/types/__init__.py
@@ -4,7 +4,7 @@
from .money import Money as Money
from .income import Income as Income
-from .paging import Paging as Paging
+from .shared import Paging as Paging
from .shared import OperationSupport as OperationSupport
from .shared import OperationSupportMatrix as OperationSupportMatrix
from .location import Location as Location
diff --git a/src/finch/types/hris/benfit_contribution.py b/src/finch/types/hris/benfit_contribution.py
index 522d5602..676c3b8e 100644
--- a/src/finch/types/hris/benfit_contribution.py
+++ b/src/finch/types/hris/benfit_contribution.py
@@ -1,6 +1,5 @@
# File generated from our OpenAPI spec by Stainless.
-
from .benefit_contribution import BenefitContribution
BenfitContribution = BenefitContribution
diff --git a/src/finch/types/hris/company.py b/src/finch/types/hris/company.py
index fb0ea41f..6823a150 100644
--- a/src/finch/types/hris/company.py
+++ b/src/finch/types/hris/company.py
@@ -46,7 +46,9 @@ class Entity(BaseModel):
subtype: Optional[Literal["s_corporation", "c_corporation", "b_corporation"]] = None
"""The tax payer subtype of the company."""
- type: Optional[Literal["llc", "corporation", "sole_proprietor", "non_profit", "partnership", "cooperative"]] = None
+ type: Optional[
+ Literal["llc", "lp", "corporation", "sole_proprietor", "non_profit", "partnership", "cooperative"]
+ ] = None
"""The tax payer type of the company."""
diff --git a/src/finch/types/hris/pay_statement_response_body.py b/src/finch/types/hris/pay_statement_response_body.py
index 712e442d..c7563aac 100644
--- a/src/finch/types/hris/pay_statement_response_body.py
+++ b/src/finch/types/hris/pay_statement_response_body.py
@@ -2,7 +2,7 @@
from typing import List, Optional
-from ..paging import Paging
+from ..shared import Paging
from ..._models import BaseModel
from .pay_statement import PayStatement
diff --git a/src/finch/types/introspection.py b/src/finch/types/introspection.py
index b9d15202..c4e6178d 100644
--- a/src/finch/types/introspection.py
+++ b/src/finch/types/introspection.py
@@ -1,6 +1,7 @@
# File generated from our OpenAPI spec by Stainless.
from typing import List
+from typing_extensions import Literal
from .._models import BaseModel
@@ -14,9 +15,18 @@ class Introspection(BaseModel):
client_id: str
"""The client id of the application associated with the `access_token`."""
+ client_type: Literal["production", "development", "sandbox"]
+ """The type of application associated with a token."""
+
company_id: str
"""The Finch uuid of the company associated with the `access_token`."""
+ connection_type: Literal["provider", "finch"]
+ """
+ The type of the connection associated with the token.
`provider` -
+ connection to an external provider
`finch` - finch-generated data.
+ """
+
manual: bool
"""
Whether the connection associated with the `access_token` uses the Assisted
diff --git a/src/finch/types/jobs/__init__.py b/src/finch/types/jobs/__init__.py
new file mode 100644
index 00000000..25ad24cf
--- /dev/null
+++ b/src/finch/types/jobs/__init__.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless.
+
+from __future__ import annotations
+
+from .manual_async_job import ManualAsyncJob as ManualAsyncJob
+from .automated_async_job import AutomatedAsyncJob as AutomatedAsyncJob
+from .automated_list_params import AutomatedListParams as AutomatedListParams
+from .automated_create_params import AutomatedCreateParams as AutomatedCreateParams
+from .automated_create_response import (
+ AutomatedCreateResponse as AutomatedCreateResponse,
+)
diff --git a/src/finch/types/jobs/automated_async_job.py b/src/finch/types/jobs/automated_async_job.py
new file mode 100644
index 00000000..0b3e727d
--- /dev/null
+++ b/src/finch/types/jobs/automated_async_job.py
@@ -0,0 +1,42 @@
+# File generated from our OpenAPI spec by Stainless.
+
+from typing import Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["AutomatedAsyncJob"]
+
+
+class AutomatedAsyncJob(BaseModel):
+ completed_at: Optional[datetime]
+ """The datetime the job completed."""
+
+ created_at: datetime
+ """The datetime when the job was created.
+
+ for scheduled jobs, this will be the initial connection time. For ad-hoc jobs,
+ this will be the time the creation request was received.
+ """
+
+ job_id: str
+ """The id of the job that has been created."""
+
+ job_url: str
+ """The url that can be used to retrieve the job status"""
+
+ scheduled_at: Optional[datetime]
+ """The datetime a job is scheduled to be run.
+
+ For scheduled jobs, this datetime can be in the future if the job has not yet
+ been enqueued. For ad-hoc jobs, this field will be null.
+ """
+
+ started_at: Optional[datetime]
+ """The datetime a job entered into the job queue."""
+
+ status: Literal["pending", "in_progress", "complete", "error", "reauth_error", "permissions_error"]
+
+ type: Literal["data_sync_all"]
+ """Only `data_sync_all` currently supported"""
diff --git a/src/finch/types/jobs/automated_create_params.py b/src/finch/types/jobs/automated_create_params.py
new file mode 100644
index 00000000..ea39170f
--- /dev/null
+++ b/src/finch/types/jobs/automated_create_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["AutomatedCreateParams"]
+
+
+class AutomatedCreateParams(TypedDict, total=False):
+ type: Required[Literal["data_sync_all"]]
+ """The type of job to start. Currently the only supported type is `data_sync_all`"""
diff --git a/src/finch/types/jobs/automated_create_response.py b/src/finch/types/jobs/automated_create_response.py
new file mode 100644
index 00000000..1341d52b
--- /dev/null
+++ b/src/finch/types/jobs/automated_create_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless.
+
+from ..._models import BaseModel
+
+__all__ = ["AutomatedCreateResponse"]
+
+
+class AutomatedCreateResponse(BaseModel):
+ allowed_refreshes: int
+ """The number of allowed refreshes per hour (per hour, fixed window)"""
+
+ job_id: str
+ """The id of the job that has been created."""
+
+ job_url: str
+ """The url that can be used to retrieve the job status"""
+
+ remaining_refreshes: int
+ """The number of remaining refreshes available (per hour, fixed window)"""
diff --git a/src/finch/types/jobs/automated_list_params.py b/src/finch/types/jobs/automated_list_params.py
new file mode 100644
index 00000000..4c24874f
--- /dev/null
+++ b/src/finch/types/jobs/automated_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["AutomatedListParams"]
+
+
+class AutomatedListParams(TypedDict, total=False):
+ limit: int
+ """Number of items to return"""
+
+ offset: int
+ """Index to start from (defaults to 0)"""
diff --git a/src/finch/types/jobs/manual_async_job.py b/src/finch/types/jobs/manual_async_job.py
new file mode 100644
index 00000000..0f831fb0
--- /dev/null
+++ b/src/finch/types/jobs/manual_async_job.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ManualAsyncJob"]
+
+
+class ManualAsyncJob(BaseModel):
+ body: Optional[List[object]]
+ """Specific information about the job, such as individual statuses for batch jobs."""
+
+ job_id: str
+
+ status: Literal["pending", "in_progress", "error", "complete"]
diff --git a/src/finch/types/shared/__init__.py b/src/finch/types/shared/__init__.py
index 3be971aa..6c699bd7 100644
--- a/src/finch/types/shared/__init__.py
+++ b/src/finch/types/shared/__init__.py
@@ -1,4 +1,5 @@
# File generated from our OpenAPI spec by Stainless.
+from .paging import Paging as Paging
from .operation_support import OperationSupport as OperationSupport
from .operation_support_matrix import OperationSupportMatrix as OperationSupportMatrix
diff --git a/src/finch/types/paging.py b/src/finch/types/shared/paging.py
similarity index 91%
rename from src/finch/types/paging.py
rename to src/finch/types/shared/paging.py
index 296a46f6..726478fa 100644
--- a/src/finch/types/paging.py
+++ b/src/finch/types/shared/paging.py
@@ -2,7 +2,7 @@
from typing import Optional
-from .._models import BaseModel
+from ..._models import BaseModel
__all__ = ["Paging"]
diff --git a/src/finch/types/shared_params/__init__.py b/src/finch/types/shared_params/__init__.py
deleted file mode 100644
index 3be971aa..00000000
--- a/src/finch/types/shared_params/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-# File generated from our OpenAPI spec by Stainless.
-
-from .operation_support import OperationSupport as OperationSupport
-from .operation_support_matrix import OperationSupportMatrix as OperationSupportMatrix
diff --git a/src/finch/types/shared_params/operation_support.py b/src/finch/types/shared_params/operation_support.py
deleted file mode 100644
index 290a5214..00000000
--- a/src/finch/types/shared_params/operation_support.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# File generated from our OpenAPI spec by Stainless.
-
-from __future__ import annotations
-
-from typing_extensions import Literal
-
-__all__ = ["OperationSupport"]
-
-OperationSupport = Literal["supported", "not_supported_by_finch", "not_supported_by_provider", "client_access_only"]
diff --git a/src/finch/types/shared_params/operation_support_matrix.py b/src/finch/types/shared_params/operation_support_matrix.py
deleted file mode 100644
index 4fa6df6b..00000000
--- a/src/finch/types/shared_params/operation_support_matrix.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# File generated from our OpenAPI spec by Stainless.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-from ..shared import OperationSupport
-from .operation_support import OperationSupport
-
-__all__ = ["OperationSupportMatrix"]
-
-
-class OperationSupportMatrix(TypedDict, total=False):
- create: OperationSupport
- """
- - `supported`: This operation is supported by both the provider and Finch
- - `not_supported_by_finch`: This operation is not supported by Finch but
- supported by the provider
- - `not_supported_by_provider`: This operation is not supported by the provider,
- so Finch cannot support
- - `client_access_only`: This behavior is supported by the provider, but only
- available to the client and not to Finch
- """
-
- delete: OperationSupport
- """
- - `supported`: This operation is supported by both the provider and Finch
- - `not_supported_by_finch`: This operation is not supported by Finch but
- supported by the provider
- - `not_supported_by_provider`: This operation is not supported by the provider,
- so Finch cannot support
- - `client_access_only`: This behavior is supported by the provider, but only
- available to the client and not to Finch
- """
-
- read: OperationSupport
- """
- - `supported`: This operation is supported by both the provider and Finch
- - `not_supported_by_finch`: This operation is not supported by Finch but
- supported by the provider
- - `not_supported_by_provider`: This operation is not supported by the provider,
- so Finch cannot support
- - `client_access_only`: This behavior is supported by the provider, but only
- available to the client and not to Finch
- """
-
- update: OperationSupport
- """
- - `supported`: This operation is supported by both the provider and Finch
- - `not_supported_by_finch`: This operation is not supported by Finch but
- supported by the provider
- - `not_supported_by_provider`: This operation is not supported by the provider,
- so Finch cannot support
- - `client_access_only`: This behavior is supported by the provider, but only
- available to the client and not to Finch
- """
diff --git a/tests/api_resources/jobs/__init__.py b/tests/api_resources/jobs/__init__.py
new file mode 100644
index 00000000..1016754e
--- /dev/null
+++ b/tests/api_resources/jobs/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless.
diff --git a/tests/api_resources/jobs/test_automated.py b/tests/api_resources/jobs/test_automated.py
new file mode 100644
index 00000000..ae4ff4fa
--- /dev/null
+++ b/tests/api_resources/jobs/test_automated.py
@@ -0,0 +1,132 @@
+# File generated from our OpenAPI spec by Stainless.
+
+from __future__ import annotations
+
+import os
+
+import pytest
+
+from finch import Finch, AsyncFinch
+from tests.utils import assert_matches_type
+from finch._client import Finch, AsyncFinch
+from finch.pagination import SyncPage, AsyncPage
+from finch.types.jobs import AutomatedAsyncJob, AutomatedCreateResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+access_token = "My Access Token"
+
+
+class TestAutomated:
+ strict_client = Finch(base_url=base_url, access_token=access_token, _strict_response_validation=True)
+ loose_client = Finch(base_url=base_url, access_token=access_token, _strict_response_validation=False)
+ parametrize = pytest.mark.parametrize("client", [strict_client, loose_client], ids=["strict", "loose"])
+
+ @parametrize
+ def test_method_create(self, client: Finch) -> None:
+ automated = client.jobs.automated.create(
+ type="data_sync_all",
+ )
+ assert_matches_type(AutomatedCreateResponse, automated, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: Finch) -> None:
+ response = client.jobs.automated.with_raw_response.create(
+ type="data_sync_all",
+ )
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ automated = response.parse()
+ assert_matches_type(AutomatedCreateResponse, automated, path=["response"])
+
+ @parametrize
+ def test_method_retrieve(self, client: Finch) -> None:
+ automated = client.jobs.automated.retrieve(
+ "string",
+ )
+ assert_matches_type(AutomatedAsyncJob, automated, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: Finch) -> None:
+ response = client.jobs.automated.with_raw_response.retrieve(
+ "string",
+ )
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ automated = response.parse()
+ assert_matches_type(AutomatedAsyncJob, automated, path=["response"])
+
+ @parametrize
+ def test_method_list(self, client: Finch) -> None:
+ automated = client.jobs.automated.list()
+ assert_matches_type(SyncPage[AutomatedAsyncJob], automated, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: Finch) -> None:
+ automated = client.jobs.automated.list(
+ limit=0,
+ offset=0,
+ )
+ assert_matches_type(SyncPage[AutomatedAsyncJob], automated, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: Finch) -> None:
+ response = client.jobs.automated.with_raw_response.list()
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ automated = response.parse()
+ assert_matches_type(SyncPage[AutomatedAsyncJob], automated, path=["response"])
+
+
+class TestAsyncAutomated:
+ strict_client = AsyncFinch(base_url=base_url, access_token=access_token, _strict_response_validation=True)
+ loose_client = AsyncFinch(base_url=base_url, access_token=access_token, _strict_response_validation=False)
+ parametrize = pytest.mark.parametrize("client", [strict_client, loose_client], ids=["strict", "loose"])
+
+ @parametrize
+ async def test_method_create(self, client: AsyncFinch) -> None:
+ automated = await client.jobs.automated.create(
+ type="data_sync_all",
+ )
+ assert_matches_type(AutomatedCreateResponse, automated, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, client: AsyncFinch) -> None:
+ response = await client.jobs.automated.with_raw_response.create(
+ type="data_sync_all",
+ )
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ automated = response.parse()
+ assert_matches_type(AutomatedCreateResponse, automated, path=["response"])
+
+ @parametrize
+ async def test_method_retrieve(self, client: AsyncFinch) -> None:
+ automated = await client.jobs.automated.retrieve(
+ "string",
+ )
+ assert_matches_type(AutomatedAsyncJob, automated, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, client: AsyncFinch) -> None:
+ response = await client.jobs.automated.with_raw_response.retrieve(
+ "string",
+ )
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ automated = response.parse()
+ assert_matches_type(AutomatedAsyncJob, automated, path=["response"])
+
+ @parametrize
+ async def test_method_list(self, client: AsyncFinch) -> None:
+ automated = await client.jobs.automated.list()
+ assert_matches_type(AsyncPage[AutomatedAsyncJob], automated, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, client: AsyncFinch) -> None:
+ automated = await client.jobs.automated.list(
+ limit=0,
+ offset=0,
+ )
+ assert_matches_type(AsyncPage[AutomatedAsyncJob], automated, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, client: AsyncFinch) -> None:
+ response = await client.jobs.automated.with_raw_response.list()
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ automated = response.parse()
+ assert_matches_type(AsyncPage[AutomatedAsyncJob], automated, path=["response"])
diff --git a/tests/api_resources/jobs/test_manual.py b/tests/api_resources/jobs/test_manual.py
new file mode 100644
index 00000000..9df08a02
--- /dev/null
+++ b/tests/api_resources/jobs/test_manual.py
@@ -0,0 +1,59 @@
+# File generated from our OpenAPI spec by Stainless.
+
+from __future__ import annotations
+
+import os
+
+import pytest
+
+from finch import Finch, AsyncFinch
+from tests.utils import assert_matches_type
+from finch._client import Finch, AsyncFinch
+from finch.types.jobs import ManualAsyncJob
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+access_token = "My Access Token"
+
+
+class TestManual:
+ strict_client = Finch(base_url=base_url, access_token=access_token, _strict_response_validation=True)
+ loose_client = Finch(base_url=base_url, access_token=access_token, _strict_response_validation=False)
+ parametrize = pytest.mark.parametrize("client", [strict_client, loose_client], ids=["strict", "loose"])
+
+ @parametrize
+ def test_method_retrieve(self, client: Finch) -> None:
+ manual = client.jobs.manual.retrieve(
+ "string",
+ )
+ assert_matches_type(ManualAsyncJob, manual, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: Finch) -> None:
+ response = client.jobs.manual.with_raw_response.retrieve(
+ "string",
+ )
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ manual = response.parse()
+ assert_matches_type(ManualAsyncJob, manual, path=["response"])
+
+
+class TestAsyncManual:
+ strict_client = AsyncFinch(base_url=base_url, access_token=access_token, _strict_response_validation=True)
+ loose_client = AsyncFinch(base_url=base_url, access_token=access_token, _strict_response_validation=False)
+ parametrize = pytest.mark.parametrize("client", [strict_client, loose_client], ids=["strict", "loose"])
+
+ @parametrize
+ async def test_method_retrieve(self, client: AsyncFinch) -> None:
+ manual = await client.jobs.manual.retrieve(
+ "string",
+ )
+ assert_matches_type(ManualAsyncJob, manual, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, client: AsyncFinch) -> None:
+ response = await client.jobs.manual.with_raw_response.retrieve(
+ "string",
+ )
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ manual = response.parse()
+ assert_matches_type(ManualAsyncJob, manual, path=["response"])
diff --git a/tests/test_client.py b/tests/test_client.py
index 5c5b16b1..b2d64aee 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -2,11 +2,13 @@
from __future__ import annotations
+import gc
import os
import json
import asyncio
import inspect
-from typing import Any, Dict, Union, cast
+import tracemalloc
+from typing import Any, Union, cast
from unittest import mock
import httpx
@@ -174,6 +176,67 @@ def test_copy_signature(self) -> None:
copy_param = copy_signature.parameters.get(name)
assert copy_param is not None, f"copy() signature is missing the {name} param"
+ def test_copy_build_request(self) -> None:
+ options = FinalRequestOptions(method="get", url="/foo")
+
+ def build_request(options: FinalRequestOptions) -> None:
+ client = self.client.copy()
+ client._build_request(options)
+
+ # ensure that the machinery is warmed up before tracing starts.
+ build_request(options)
+ gc.collect()
+
+ tracemalloc.start(1000)
+
+ snapshot_before = tracemalloc.take_snapshot()
+
+ ITERATIONS = 10
+ for _ in range(ITERATIONS):
+ build_request(options)
+ gc.collect()
+
+ snapshot_after = tracemalloc.take_snapshot()
+
+ tracemalloc.stop()
+
+ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.StatisticDiff) -> None:
+ if diff.count == 0:
+ # Avoid false positives by considering only leaks (i.e. allocations that persist).
+ return
+
+ if diff.count % ITERATIONS != 0:
+ # Avoid false positives by considering only leaks that appear per iteration.
+ return
+
+ for frame in diff.traceback:
+ if any(
+ frame.filename.endswith(fragment)
+ for fragment in [
+ # to_raw_response_wrapper leaks through the @functools.wraps() decorator.
+ #
+ # removing the decorator fixes the leak for reasons we don't understand.
+ "finch/_response.py",
+ # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason.
+ "finch/_compat.py",
+ # Standard library leaks we don't care about.
+ "/logging/__init__.py",
+ ]
+ ):
+ return
+
+ leaks.append(diff)
+
+ leaks: list[tracemalloc.StatisticDiff] = []
+ for diff in snapshot_after.compare_to(snapshot_before, "traceback"):
+ add_leak(leaks, diff)
+ if leaks:
+ for leak in leaks:
+ print("MEMORY LEAK:", leak)
+ for frame in leak.traceback:
+ print(frame)
+ raise AssertionError()
+
def test_request_timeout(self) -> None:
request = self.client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
@@ -353,7 +416,7 @@ def test_request_extra_query(self) -> None:
),
),
)
- params = cast(Dict[str, str], dict(request.url.params))
+ params = dict(request.url.params)
assert params == {"my_query_param": "Foo"}
# if both `query` and `extra_query` are given, they are merged
@@ -367,7 +430,7 @@ def test_request_extra_query(self) -> None:
),
),
)
- params = cast(Dict[str, str], dict(request.url.params))
+ params = dict(request.url.params)
assert params == {"bar": "1", "foo": "2"}
# `extra_query` takes priority over `query` when keys clash
@@ -381,7 +444,7 @@ def test_request_extra_query(self) -> None:
),
),
)
- params = cast(Dict[str, str], dict(request.url.params))
+ params = dict(request.url.params)
assert params == {"foo": "2"}
@pytest.mark.respx(base_url=base_url)
@@ -441,6 +504,16 @@ class Model(BaseModel):
assert isinstance(response, Model)
assert response.foo == 2
+ def test_base_url_setter(self) -> None:
+ client = Finch(
+ base_url="https://example.com/from_init", access_token=access_token, _strict_response_validation=True
+ )
+ assert client.base_url == "https://example.com/from_init/"
+
+ client.base_url = "https://example.com/from_setter" # type: ignore[assignment]
+
+ assert client.base_url == "https://example.com/from_setter/"
+
def test_base_url_env(self) -> None:
with update_env(FINCH_BASE_URL="http://localhost:5000/from/env"):
client = Finch(access_token=access_token, _strict_response_validation=True)
@@ -532,7 +605,9 @@ def test_transport_option_is_deprecated(self) -> None:
DeprecationWarning,
match="The `transport` argument is deprecated. The `http_client` argument should be passed instead",
):
- transport = httpx.MockTransport(lambda: None)
+ transport = httpx.MockTransport(
+ lambda: None, # type: ignore
+ )
client = Finch(
base_url=base_url, access_token=access_token, _strict_response_validation=True, transport=transport
@@ -548,7 +623,9 @@ def test_transport_option_mutually_exclusive_with_http_client(self) -> None:
base_url=base_url,
access_token=access_token,
_strict_response_validation=True,
- transport=httpx.MockTransport(lambda: None),
+ transport=httpx.MockTransport(
+ lambda: None, # type: ignore
+ ),
http_client=http_client,
)
@@ -618,14 +695,6 @@ def test_proxies_option_mutually_exclusive_with_http_client(self) -> None:
http_client=http_client,
)
- def test_client_del(self) -> None:
- client = Finch(base_url=base_url, access_token=access_token, _strict_response_validation=True)
- assert not client.is_closed()
-
- client.__del__()
-
- assert client.is_closed()
-
def test_copied_client_does_not_close_http(self) -> None:
client = Finch(base_url=base_url, access_token=access_token, _strict_response_validation=True)
assert not client.is_closed()
@@ -633,9 +702,8 @@ def test_copied_client_does_not_close_http(self) -> None:
copied = client.copy()
assert copied is not client
- copied.__del__()
+ del copied
- assert not copied.is_closed()
assert not client.is_closed()
def test_client_context_manager(self) -> None:
@@ -843,6 +911,67 @@ def test_copy_signature(self) -> None:
copy_param = copy_signature.parameters.get(name)
assert copy_param is not None, f"copy() signature is missing the {name} param"
+ def test_copy_build_request(self) -> None:
+ options = FinalRequestOptions(method="get", url="/foo")
+
+ def build_request(options: FinalRequestOptions) -> None:
+ client = self.client.copy()
+ client._build_request(options)
+
+ # ensure that the machinery is warmed up before tracing starts.
+ build_request(options)
+ gc.collect()
+
+ tracemalloc.start(1000)
+
+ snapshot_before = tracemalloc.take_snapshot()
+
+ ITERATIONS = 10
+ for _ in range(ITERATIONS):
+ build_request(options)
+ gc.collect()
+
+ snapshot_after = tracemalloc.take_snapshot()
+
+ tracemalloc.stop()
+
+ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.StatisticDiff) -> None:
+ if diff.count == 0:
+ # Avoid false positives by considering only leaks (i.e. allocations that persist).
+ return
+
+ if diff.count % ITERATIONS != 0:
+ # Avoid false positives by considering only leaks that appear per iteration.
+ return
+
+ for frame in diff.traceback:
+ if any(
+ frame.filename.endswith(fragment)
+ for fragment in [
+ # to_raw_response_wrapper leaks through the @functools.wraps() decorator.
+ #
+ # removing the decorator fixes the leak for reasons we don't understand.
+ "finch/_response.py",
+ # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason.
+ "finch/_compat.py",
+ # Standard library leaks we don't care about.
+ "/logging/__init__.py",
+ ]
+ ):
+ return
+
+ leaks.append(diff)
+
+ leaks: list[tracemalloc.StatisticDiff] = []
+ for diff in snapshot_after.compare_to(snapshot_before, "traceback"):
+ add_leak(leaks, diff)
+ if leaks:
+ for leak in leaks:
+ print("MEMORY LEAK:", leak)
+ for frame in leak.traceback:
+ print(frame)
+ raise AssertionError()
+
async def test_request_timeout(self) -> None:
request = self.client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
@@ -1022,7 +1151,7 @@ def test_request_extra_query(self) -> None:
),
),
)
- params = cast(Dict[str, str], dict(request.url.params))
+ params = dict(request.url.params)
assert params == {"my_query_param": "Foo"}
# if both `query` and `extra_query` are given, they are merged
@@ -1036,7 +1165,7 @@ def test_request_extra_query(self) -> None:
),
),
)
- params = cast(Dict[str, str], dict(request.url.params))
+ params = dict(request.url.params)
assert params == {"bar": "1", "foo": "2"}
# `extra_query` takes priority over `query` when keys clash
@@ -1050,7 +1179,7 @@ def test_request_extra_query(self) -> None:
),
),
)
- params = cast(Dict[str, str], dict(request.url.params))
+ params = dict(request.url.params)
assert params == {"foo": "2"}
@pytest.mark.respx(base_url=base_url)
@@ -1110,6 +1239,16 @@ class Model(BaseModel):
assert isinstance(response, Model)
assert response.foo == 2
+ def test_base_url_setter(self) -> None:
+ client = AsyncFinch(
+ base_url="https://example.com/from_init", access_token=access_token, _strict_response_validation=True
+ )
+ assert client.base_url == "https://example.com/from_init/"
+
+ client.base_url = "https://example.com/from_setter" # type: ignore[assignment]
+
+ assert client.base_url == "https://example.com/from_setter/"
+
def test_base_url_env(self) -> None:
with update_env(FINCH_BASE_URL="http://localhost:5000/from/env"):
client = AsyncFinch(access_token=access_token, _strict_response_validation=True)
@@ -1201,7 +1340,9 @@ def test_transport_option_is_deprecated(self) -> None:
DeprecationWarning,
match="The `transport` argument is deprecated. The `http_client` argument should be passed instead",
):
- transport = httpx.MockTransport(lambda: None)
+ transport = httpx.MockTransport(
+ lambda: None, # type: ignore
+ )
client = AsyncFinch(
base_url=base_url, access_token=access_token, _strict_response_validation=True, transport=transport
@@ -1217,7 +1358,9 @@ async def test_transport_option_mutually_exclusive_with_http_client(self) -> Non
base_url=base_url,
access_token=access_token,
_strict_response_validation=True,
- transport=httpx.MockTransport(lambda: None),
+ transport=httpx.MockTransport(
+ lambda: None, # type: ignore
+ ),
http_client=http_client,
)
@@ -1287,15 +1430,6 @@ async def test_proxies_option_mutually_exclusive_with_http_client(self) -> None:
http_client=http_client,
)
- async def test_client_del(self) -> None:
- client = AsyncFinch(base_url=base_url, access_token=access_token, _strict_response_validation=True)
- assert not client.is_closed()
-
- client.__del__()
-
- await asyncio.sleep(0.2)
- assert client.is_closed()
-
async def test_copied_client_does_not_close_http(self) -> None:
client = AsyncFinch(base_url=base_url, access_token=access_token, _strict_response_validation=True)
assert not client.is_closed()
@@ -1303,10 +1437,9 @@ async def test_copied_client_does_not_close_http(self) -> None:
copied = client.copy()
assert copied is not client
- copied.__del__()
+ del copied
await asyncio.sleep(0.2)
- assert not copied.is_closed()
assert not client.is_closed()
async def test_client_context_manager(self) -> None:
diff --git a/tests/test_required_args.py b/tests/test_required_args.py
index e839289d..38007450 100644
--- a/tests/test_required_args.py
+++ b/tests/test_required_args.py
@@ -43,7 +43,7 @@ def foo(*, a: str | None = None) -> str | None:
def test_multiple_params() -> None:
@required_args(["a", "b", "c"])
def foo(a: str = "", *, b: str = "", c: str = "") -> str | None:
- return a + " " + b + " " + c
+ return f"{a} {b} {c}"
assert foo(a="a", b="b", c="c") == "a b c"
diff --git a/tests/test_utils/test_proxy.py b/tests/test_utils/test_proxy.py
new file mode 100644
index 00000000..f5a7028d
--- /dev/null
+++ b/tests/test_utils/test_proxy.py
@@ -0,0 +1,23 @@
+import operator
+from typing import Any
+from typing_extensions import override
+
+from finch._utils import LazyProxy
+
+
+class RecursiveLazyProxy(LazyProxy[Any]):
+ @override
+ def __load__(self) -> Any:
+ return self
+
+ def __call__(self, *_args: Any, **_kwds: Any) -> Any:
+ raise RuntimeError("This should never be called!")
+
+
+def test_recursive_proxy() -> None:
+ proxy = RecursiveLazyProxy()
+ assert repr(proxy) == "RecursiveLazyProxy"
+ assert str(proxy) == "RecursiveLazyProxy"
+ assert dir(proxy) == []
+ assert type(proxy).__name__ == "RecursiveLazyProxy"
+ assert type(operator.attrgetter("name.foo.bar.baz")(proxy)).__name__ == "RecursiveLazyProxy"
diff --git a/tests/utils.py b/tests/utils.py
index bc62203d..3cd9cf08 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -91,7 +91,7 @@ def assert_matches_type(
traceback.print_exc()
continue
- assert False, "Did not match any variants"
+ raise AssertionError("Did not match any variants")
elif issubclass(origin, BaseModel):
assert isinstance(value, type_)
assert assert_matches_model(type_, cast(Any, value), path=path)