From aa59f2cdb8aa85695abf6498fe8d007a0ec516f6 Mon Sep 17 00:00:00 2001
From: Hassieb Pakzad <68423100+hassiebp@users.noreply.github.com>
Date: Thu, 7 Nov 2024 15:45:46 +0100
Subject: [PATCH 01/42] update fern
---
langfuse/api/README.md | 44 +-
langfuse/api/__init__.py | 42 +-
langfuse/api/client.py | 47 +-
langfuse/api/reference.md | 541 ++++++++++++++++++
langfuse/api/resources/__init__.py | 46 +-
langfuse/api/resources/comments/__init__.py | 5 +
langfuse/api/resources/comments/client.py | 534 +++++++++++++++++
.../api/resources/comments/types/__init__.py | 7 +
.../comments/types/create_comment_request.py | 69 +++
.../comments/types/create_comment_response.py | 45 ++
.../types/get_comments_response.py} | 27 +-
langfuse/api/resources/commons/__init__.py | 12 +-
.../api/resources/commons/types/__init__.py | 4 +
.../api/resources/commons/types/base_score.py | 34 +-
.../api/resources/commons/types/comment.py | 54 ++
.../commons/types/comment_object_type.py | 29 +
langfuse/api/resources/commons/types/model.py | 2 +-
langfuse/api/resources/commons/types/score.py | 102 +++-
langfuse/api/resources/media/__init__.py | 15 +
langfuse/api/resources/media/client.py | 509 ++++++++++++++++
.../api/resources/media/types/__init__.py | 13 +
.../media/types/get_media_response.py | 72 +++
.../types/get_media_upload_url_request.py | 74 +++
.../types/get_media_upload_url_response.py | 54 ++
.../resources/media/types/patch_media_body.py | 59 ++
langfuse/api/resources/score/__init__.py | 28 +-
langfuse/api/resources/score/client.py | 240 ++++++--
.../api/resources/score/types/__init__.py | 26 +-
.../score/types/get_scores_response.py | 45 ++
.../score/types/get_scores_response_data.py | 191 +++++++
.../types/get_scores_response_data_boolean.py | 46 ++
.../get_scores_response_data_categorical.py | 46 ++
.../types/get_scores_response_data_numeric.py | 46 ++
.../types/get_scores_response_trace_data.py | 52 ++
langfuse/api/tests/utils/test_http_client.py | 5 +-
.../api/tests/utils/test_query_encoding.py | 3 +-
36 files changed, 3028 insertions(+), 140 deletions(-)
create mode 100644 langfuse/api/resources/comments/__init__.py
create mode 100644 langfuse/api/resources/comments/client.py
create mode 100644 langfuse/api/resources/comments/types/__init__.py
create mode 100644 langfuse/api/resources/comments/types/create_comment_request.py
create mode 100644 langfuse/api/resources/comments/types/create_comment_response.py
rename langfuse/api/resources/{score/types/scores.py => comments/types/get_comments_response.py} (50%)
create mode 100644 langfuse/api/resources/commons/types/comment.py
create mode 100644 langfuse/api/resources/commons/types/comment_object_type.py
create mode 100644 langfuse/api/resources/media/__init__.py
create mode 100644 langfuse/api/resources/media/client.py
create mode 100644 langfuse/api/resources/media/types/__init__.py
create mode 100644 langfuse/api/resources/media/types/get_media_response.py
create mode 100644 langfuse/api/resources/media/types/get_media_upload_url_request.py
create mode 100644 langfuse/api/resources/media/types/get_media_upload_url_response.py
create mode 100644 langfuse/api/resources/media/types/patch_media_body.py
create mode 100644 langfuse/api/resources/score/types/get_scores_response.py
create mode 100644 langfuse/api/resources/score/types/get_scores_response_data.py
create mode 100644 langfuse/api/resources/score/types/get_scores_response_data_boolean.py
create mode 100644 langfuse/api/resources/score/types/get_scores_response_data_categorical.py
create mode 100644 langfuse/api/resources/score/types/get_scores_response_data_numeric.py
create mode 100644 langfuse/api/resources/score/types/get_scores_response_trace_data.py
diff --git a/langfuse/api/README.md b/langfuse/api/README.md
index 5a483dc3e..4087db553 100644
--- a/langfuse/api/README.md
+++ b/langfuse/api/README.md
@@ -16,7 +16,7 @@ pip install finto
Instantiate and use the client with the following:
```python
-from finto import CreateDatasetItemRequest, DatasetStatus
+from finto import CreateCommentRequest
from finto.client import FernLangfuse
client = FernLangfuse(
@@ -27,16 +27,13 @@ client = FernLangfuse(
password="YOUR_PASSWORD",
base_url="https://yourhost.com/path/to/api",
)
-client.dataset_items.create(
- request=CreateDatasetItemRequest(
- dataset_name="string",
- input={"key": "value"},
- expected_output={"key": "value"},
- metadata={"key": "value"},
- source_trace_id="string",
- source_observation_id="string",
- id="string",
- status=DatasetStatus.ACTIVE,
+client.comments.create(
+ request=CreateCommentRequest(
+ project_id="string",
+ object_type="string",
+ object_id="string",
+ content="string",
+ author_user_id="string",
),
)
```
@@ -48,7 +45,7 @@ The SDK also exports an `async` client so that you can make non-blocking calls t
```python
import asyncio
-from finto import CreateDatasetItemRequest, DatasetStatus
+from finto import CreateCommentRequest
from finto.client import AsyncFernLangfuse
client = AsyncFernLangfuse(
@@ -62,16 +59,13 @@ client = AsyncFernLangfuse(
async def main() -> None:
- await client.dataset_items.create(
- request=CreateDatasetItemRequest(
- dataset_name="string",
- input={"key": "value"},
- expected_output={"key": "value"},
- metadata={"key": "value"},
- source_trace_id="string",
- source_observation_id="string",
- id="string",
- status=DatasetStatus.ACTIVE,
+ await client.comments.create(
+ request=CreateCommentRequest(
+ project_id="string",
+ object_type="string",
+ object_id="string",
+ content="string",
+ author_user_id="string",
),
)
@@ -88,7 +82,7 @@ will be thrown.
from .api_error import ApiError
try:
- client.dataset_items.create(...)
+ client.comments.create(...)
except ApiError as e:
print(e.status_code)
print(e.body)
@@ -111,7 +105,7 @@ A request is deemed retriable when any of the following HTTP status codes is ret
Use the `max_retries` request option to configure this behavior.
```python
-client.dataset_items.create(...,{
+client.comments.create(...,{
max_retries=1
})
```
@@ -128,7 +122,7 @@ client = FernLangfuse(..., { timeout=20.0 }, )
# Override timeout for a specific method
-client.dataset_items.create(...,{
+client.comments.create(...,{
timeout_in_seconds=1
})
```
diff --git a/langfuse/api/__init__.py b/langfuse/api/__init__.py
index 0530b2c76..89d3df091 100644
--- a/langfuse/api/__init__.py
+++ b/langfuse/api/__init__.py
@@ -9,8 +9,12 @@
CategoricalScore,
ChatMessage,
ChatPrompt,
+ Comment,
+ CommentObjectType,
ConfigCategory,
CreateChatPromptRequest,
+ CreateCommentRequest,
+ CreateCommentResponse,
CreateDatasetItemRequest,
CreateDatasetRequest,
CreateDatasetRunItemRequest,
@@ -39,6 +43,19 @@
DatasetRunWithItems,
DatasetStatus,
Error,
+ GetCommentsResponse,
+ GetMediaResponse,
+ GetMediaUploadUrlRequest,
+ GetMediaUploadUrlResponse,
+ GetScoresResponse,
+ GetScoresResponseData,
+ GetScoresResponseDataBoolean,
+ GetScoresResponseDataCategorical,
+ GetScoresResponseDataNumeric,
+ GetScoresResponseData_Boolean,
+ GetScoresResponseData_Categorical,
+ GetScoresResponseData_Numeric,
+ GetScoresResponseTraceData,
HealthResponse,
IngestionError,
IngestionEvent,
@@ -75,6 +92,7 @@
PaginatedDatasets,
PaginatedModels,
PaginatedSessions,
+ PatchMediaBody,
Project,
Projects,
Prompt,
@@ -92,7 +110,6 @@
Score_Boolean,
Score_Categorical,
Score_Numeric,
- Scores,
SdkLogBody,
SdkLogEvent,
ServiceUnavailableError,
@@ -115,12 +132,14 @@
UpdateSpanEvent,
Usage,
UsageByModel,
+ comments,
commons,
dataset_items,
dataset_run_items,
datasets,
health,
ingestion,
+ media,
metrics,
models,
observations,
@@ -142,8 +161,12 @@
"CategoricalScore",
"ChatMessage",
"ChatPrompt",
+ "Comment",
+ "CommentObjectType",
"ConfigCategory",
"CreateChatPromptRequest",
+ "CreateCommentRequest",
+ "CreateCommentResponse",
"CreateDatasetItemRequest",
"CreateDatasetRequest",
"CreateDatasetRunItemRequest",
@@ -172,6 +195,19 @@
"DatasetRunWithItems",
"DatasetStatus",
"Error",
+ "GetCommentsResponse",
+ "GetMediaResponse",
+ "GetMediaUploadUrlRequest",
+ "GetMediaUploadUrlResponse",
+ "GetScoresResponse",
+ "GetScoresResponseData",
+ "GetScoresResponseDataBoolean",
+ "GetScoresResponseDataCategorical",
+ "GetScoresResponseDataNumeric",
+ "GetScoresResponseData_Boolean",
+ "GetScoresResponseData_Categorical",
+ "GetScoresResponseData_Numeric",
+ "GetScoresResponseTraceData",
"HealthResponse",
"IngestionError",
"IngestionEvent",
@@ -208,6 +244,7 @@
"PaginatedDatasets",
"PaginatedModels",
"PaginatedSessions",
+ "PatchMediaBody",
"Project",
"Projects",
"Prompt",
@@ -225,7 +262,6 @@
"Score_Boolean",
"Score_Categorical",
"Score_Numeric",
- "Scores",
"SdkLogBody",
"SdkLogEvent",
"ServiceUnavailableError",
@@ -248,12 +284,14 @@
"UpdateSpanEvent",
"Usage",
"UsageByModel",
+ "comments",
"commons",
"dataset_items",
"dataset_run_items",
"datasets",
"health",
"ingestion",
+ "media",
"metrics",
"models",
"observations",
diff --git a/langfuse/api/client.py b/langfuse/api/client.py
index 4df0f19e0..da24da20f 100644
--- a/langfuse/api/client.py
+++ b/langfuse/api/client.py
@@ -5,11 +5,16 @@
import httpx
from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from .resources.comments.client import AsyncCommentsClient, CommentsClient
from .resources.dataset_items.client import AsyncDatasetItemsClient, DatasetItemsClient
-from .resources.dataset_run_items.client import AsyncDatasetRunItemsClient, DatasetRunItemsClient
+from .resources.dataset_run_items.client import (
+ AsyncDatasetRunItemsClient,
+ DatasetRunItemsClient,
+)
from .resources.datasets.client import AsyncDatasetsClient, DatasetsClient
from .resources.health.client import AsyncHealthClient, HealthClient
from .resources.ingestion.client import AsyncIngestionClient, IngestionClient
+from .resources.media.client import AsyncMediaClient, MediaClient
from .resources.metrics.client import AsyncMetricsClient, MetricsClient
from .resources.models.client import AsyncModelsClient, ModelsClient
from .resources.observations.client import AsyncObservationsClient, ObservationsClient
@@ -69,9 +74,11 @@ def __init__(
password: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = None,
timeout: typing.Optional[float] = None,
follow_redirects: typing.Optional[bool] = True,
- httpx_client: typing.Optional[httpx.Client] = None
+ httpx_client: typing.Optional[httpx.Client] = None,
):
- _defaulted_timeout = timeout if timeout is not None else 60 if httpx_client is None else None
+ _defaulted_timeout = (
+ timeout if timeout is not None else 60 if httpx_client is None else None
+ )
self._client_wrapper = SyncClientWrapper(
base_url=base_url,
x_langfuse_sdk_name=x_langfuse_sdk_name,
@@ -81,16 +88,22 @@ def __init__(
password=password,
httpx_client=httpx_client
if httpx_client is not None
- else httpx.Client(timeout=_defaulted_timeout, follow_redirects=follow_redirects)
+ else httpx.Client(
+ timeout=_defaulted_timeout, follow_redirects=follow_redirects
+ )
if follow_redirects is not None
else httpx.Client(timeout=_defaulted_timeout),
timeout=_defaulted_timeout,
)
+ self.comments = CommentsClient(client_wrapper=self._client_wrapper)
self.dataset_items = DatasetItemsClient(client_wrapper=self._client_wrapper)
- self.dataset_run_items = DatasetRunItemsClient(client_wrapper=self._client_wrapper)
+ self.dataset_run_items = DatasetRunItemsClient(
+ client_wrapper=self._client_wrapper
+ )
self.datasets = DatasetsClient(client_wrapper=self._client_wrapper)
self.health = HealthClient(client_wrapper=self._client_wrapper)
self.ingestion = IngestionClient(client_wrapper=self._client_wrapper)
+ self.media = MediaClient(client_wrapper=self._client_wrapper)
self.metrics = MetricsClient(client_wrapper=self._client_wrapper)
self.models = ModelsClient(client_wrapper=self._client_wrapper)
self.observations = ObservationsClient(client_wrapper=self._client_wrapper)
@@ -150,9 +163,11 @@ def __init__(
password: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = None,
timeout: typing.Optional[float] = None,
follow_redirects: typing.Optional[bool] = True,
- httpx_client: typing.Optional[httpx.AsyncClient] = None
+ httpx_client: typing.Optional[httpx.AsyncClient] = None,
):
- _defaulted_timeout = timeout if timeout is not None else 60 if httpx_client is None else None
+ _defaulted_timeout = (
+ timeout if timeout is not None else 60 if httpx_client is None else None
+ )
self._client_wrapper = AsyncClientWrapper(
base_url=base_url,
x_langfuse_sdk_name=x_langfuse_sdk_name,
@@ -162,22 +177,32 @@ def __init__(
password=password,
httpx_client=httpx_client
if httpx_client is not None
- else httpx.AsyncClient(timeout=_defaulted_timeout, follow_redirects=follow_redirects)
+ else httpx.AsyncClient(
+ timeout=_defaulted_timeout, follow_redirects=follow_redirects
+ )
if follow_redirects is not None
else httpx.AsyncClient(timeout=_defaulted_timeout),
timeout=_defaulted_timeout,
)
- self.dataset_items = AsyncDatasetItemsClient(client_wrapper=self._client_wrapper)
- self.dataset_run_items = AsyncDatasetRunItemsClient(client_wrapper=self._client_wrapper)
+ self.comments = AsyncCommentsClient(client_wrapper=self._client_wrapper)
+ self.dataset_items = AsyncDatasetItemsClient(
+ client_wrapper=self._client_wrapper
+ )
+ self.dataset_run_items = AsyncDatasetRunItemsClient(
+ client_wrapper=self._client_wrapper
+ )
self.datasets = AsyncDatasetsClient(client_wrapper=self._client_wrapper)
self.health = AsyncHealthClient(client_wrapper=self._client_wrapper)
self.ingestion = AsyncIngestionClient(client_wrapper=self._client_wrapper)
+ self.media = AsyncMediaClient(client_wrapper=self._client_wrapper)
self.metrics = AsyncMetricsClient(client_wrapper=self._client_wrapper)
self.models = AsyncModelsClient(client_wrapper=self._client_wrapper)
self.observations = AsyncObservationsClient(client_wrapper=self._client_wrapper)
self.projects = AsyncProjectsClient(client_wrapper=self._client_wrapper)
self.prompts = AsyncPromptsClient(client_wrapper=self._client_wrapper)
- self.score_configs = AsyncScoreConfigsClient(client_wrapper=self._client_wrapper)
+ self.score_configs = AsyncScoreConfigsClient(
+ client_wrapper=self._client_wrapper
+ )
self.score = AsyncScoreClient(client_wrapper=self._client_wrapper)
self.sessions = AsyncSessionsClient(client_wrapper=self._client_wrapper)
self.trace = AsyncTraceClient(client_wrapper=self._client_wrapper)
diff --git a/langfuse/api/reference.md b/langfuse/api/reference.md
index 05a51edf5..07103519d 100644
--- a/langfuse/api/reference.md
+++ b/langfuse/api/reference.md
@@ -1,4 +1,273 @@
# Reference
+## Comments
+client.comments.create(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Create a comment. Comments may be attached to different object types (trace, observation, session, prompt).
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from finto import CreateCommentRequest
+from finto.client import FernLangfuse
+
+client = FernLangfuse(
+ x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME",
+ x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION",
+ x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY",
+ username="YOUR_USERNAME",
+ password="YOUR_PASSWORD",
+ base_url="https://yourhost.com/path/to/api",
+)
+client.comments.create(
+ request=CreateCommentRequest(
+ project_id="string",
+ object_type="string",
+ object_id="string",
+ content="string",
+ author_user_id="string",
+ ),
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request:** `CreateCommentRequest`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.comments.get(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get all comments
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from finto.client import FernLangfuse
+
+client = FernLangfuse(
+ x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME",
+ x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION",
+ x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY",
+ username="YOUR_USERNAME",
+ password="YOUR_PASSWORD",
+ base_url="https://yourhost.com/path/to/api",
+)
+client.comments.get(
+ page=1,
+ limit=1,
+ object_type="string",
+ object_id="string",
+ author_user_id="string",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**page:** `typing.Optional[int]` — Page number, starts at 1.
+
+
+
+
+
+-
+
+**limit:** `typing.Optional[int]` — Limit of items per page. If you encounter api issues due to too large page sizes, try to reduce the limit
+
+
+
+
+
+-
+
+**object_type:** `typing.Optional[str]` — Filter comments by object type (trace, observation, session, prompt).
+
+
+
+
+
+-
+
+**object_id:** `typing.Optional[str]` — Filter comments by object id. If objectType is not provided, an error will be thrown.
+
+
+
+
+
+-
+
+**author_user_id:** `typing.Optional[str]` — Filter comments by author user id.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.comments.get_by_id(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get a comment by id
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from finto.client import FernLangfuse
+
+client = FernLangfuse(
+ x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME",
+ x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION",
+ x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY",
+ username="YOUR_USERNAME",
+ password="YOUR_PASSWORD",
+ base_url="https://yourhost.com/path/to/api",
+)
+client.comments.get_by_id(
+ comment_id="string",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**comment_id:** `str` — The unique langfuse identifier of a comment
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
## DatasetItems
client.dataset_items.create(...)
@@ -949,6 +1218,258 @@ client.ingestion.batch(
+
+
+
+
+## Media
+client.media.get(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get a media record
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from finto.client import FernLangfuse
+
+client = FernLangfuse(
+ x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME",
+ x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION",
+ x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY",
+ username="YOUR_USERNAME",
+ password="YOUR_PASSWORD",
+ base_url="https://yourhost.com/path/to/api",
+)
+client.media.get(
+ media_id="string",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**media_id:** `str` — The unique langfuse identifier of a media record
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.media.patch(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Patch a media record
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+import datetime
+
+from finto import PatchMediaBody
+from finto.client import FernLangfuse
+
+client = FernLangfuse(
+ x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME",
+ x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION",
+ x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY",
+ username="YOUR_USERNAME",
+ password="YOUR_PASSWORD",
+ base_url="https://yourhost.com/path/to/api",
+)
+client.media.patch(
+ media_id="string",
+ request=PatchMediaBody(
+ uploaded_at=datetime.datetime.fromisoformat(
+ "2024-01-15 09:30:00+00:00",
+ ),
+ upload_http_status=1,
+ upload_http_error="string",
+ ),
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**media_id:** `str` — The unique langfuse identifier of a media record
+
+
+
+
+
+-
+
+**request:** `PatchMediaBody`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.media.get_upload_url(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get a presigned upload URL for a media record
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from finto import GetMediaUploadUrlRequest
+from finto.client import FernLangfuse
+
+client = FernLangfuse(
+ x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME",
+ x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION",
+ x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY",
+ username="YOUR_USERNAME",
+ password="YOUR_PASSWORD",
+ base_url="https://yourhost.com/path/to/api",
+)
+client.media.get_upload_url(
+ request=GetMediaUploadUrlRequest(
+ trace_id="string",
+ observation_id="string",
+ content_type="string",
+ content_length=1,
+ sha_256_hash="string",
+ field="string",
+ ),
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request:** `GetMediaUploadUrlRequest`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
@@ -2419,7 +2940,9 @@ client.score.get(
value=1.1,
score_ids="string",
config_id="string",
+ queue_id="string",
data_type=ScoreDataType.NUMERIC,
+ trace_tags=["string"],
)
```
@@ -2524,6 +3047,14 @@ client.score.get(
-
+**queue_id:** `typing.Optional[str]` — Retrieve only scores with a specific annotation queueId.
+
+
+
+
+
+-
+
**data_type:** `typing.Optional[ScoreDataType]` — Retrieve only scores with a specific dataType.
@@ -2532,6 +3063,16 @@ client.score.get(
-
+**trace_tags:** `typing.Optional[
+ typing.Union[typing.Sequence[str], typing.Sequence[typing.Sequence[str]]]
+]` — Only scores linked to traces that include all of these tags will be returned.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
diff --git a/langfuse/api/resources/__init__.py b/langfuse/api/resources/__init__.py
index 330d54aaa..c75efc510 100644
--- a/langfuse/api/resources/__init__.py
+++ b/langfuse/api/resources/__init__.py
@@ -1,12 +1,14 @@
# This file was auto-generated by Fern from our API Definition.
from . import (
+ comments,
commons,
dataset_items,
dataset_run_items,
datasets,
health,
ingestion,
+ media,
metrics,
models,
observations,
@@ -18,11 +20,14 @@
trace,
utils,
)
+from .comments import CreateCommentRequest, CreateCommentResponse, GetCommentsResponse
from .commons import (
AccessDeniedError,
BaseScore,
BooleanScore,
CategoricalScore,
+ Comment,
+ CommentObjectType,
ConfigCategory,
CreateScoreValue,
Dataset,
@@ -101,6 +106,12 @@
UpdateSpanBody,
UpdateSpanEvent,
)
+from .media import (
+ GetMediaResponse,
+ GetMediaUploadUrlRequest,
+ GetMediaUploadUrlResponse,
+ PatchMediaBody,
+)
from .metrics import DailyMetrics, DailyMetricsDetails, UsageByModel
from .models import CreateModelRequest, PaginatedModels
from .observations import Observations, ObservationsViews
@@ -121,7 +132,19 @@
Prompt_Text,
TextPrompt,
)
-from .score import CreateScoreRequest, CreateScoreResponse, Scores
+from .score import (
+ CreateScoreRequest,
+ CreateScoreResponse,
+ GetScoresResponse,
+ GetScoresResponseData,
+ GetScoresResponseDataBoolean,
+ GetScoresResponseDataCategorical,
+ GetScoresResponseDataNumeric,
+ GetScoresResponseData_Boolean,
+ GetScoresResponseData_Categorical,
+ GetScoresResponseData_Numeric,
+ GetScoresResponseTraceData,
+)
from .score_configs import CreateScoreConfigRequest, ScoreConfigs
from .sessions import PaginatedSessions
from .trace import Sort, Traces
@@ -135,8 +158,12 @@
"CategoricalScore",
"ChatMessage",
"ChatPrompt",
+ "Comment",
+ "CommentObjectType",
"ConfigCategory",
"CreateChatPromptRequest",
+ "CreateCommentRequest",
+ "CreateCommentResponse",
"CreateDatasetItemRequest",
"CreateDatasetRequest",
"CreateDatasetRunItemRequest",
@@ -165,6 +192,19 @@
"DatasetRunWithItems",
"DatasetStatus",
"Error",
+ "GetCommentsResponse",
+ "GetMediaResponse",
+ "GetMediaUploadUrlRequest",
+ "GetMediaUploadUrlResponse",
+ "GetScoresResponse",
+ "GetScoresResponseData",
+ "GetScoresResponseDataBoolean",
+ "GetScoresResponseDataCategorical",
+ "GetScoresResponseDataNumeric",
+ "GetScoresResponseData_Boolean",
+ "GetScoresResponseData_Categorical",
+ "GetScoresResponseData_Numeric",
+ "GetScoresResponseTraceData",
"HealthResponse",
"IngestionError",
"IngestionEvent",
@@ -201,6 +241,7 @@
"PaginatedDatasets",
"PaginatedModels",
"PaginatedSessions",
+ "PatchMediaBody",
"Project",
"Projects",
"Prompt",
@@ -218,7 +259,6 @@
"Score_Boolean",
"Score_Categorical",
"Score_Numeric",
- "Scores",
"SdkLogBody",
"SdkLogEvent",
"ServiceUnavailableError",
@@ -241,12 +281,14 @@
"UpdateSpanEvent",
"Usage",
"UsageByModel",
+ "comments",
"commons",
"dataset_items",
"dataset_run_items",
"datasets",
"health",
"ingestion",
+ "media",
"metrics",
"models",
"observations",
diff --git a/langfuse/api/resources/comments/__init__.py b/langfuse/api/resources/comments/__init__.py
new file mode 100644
index 000000000..e40c8546f
--- /dev/null
+++ b/langfuse/api/resources/comments/__init__.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .types import CreateCommentRequest, CreateCommentResponse, GetCommentsResponse
+
+__all__ = ["CreateCommentRequest", "CreateCommentResponse", "GetCommentsResponse"]
diff --git a/langfuse/api/resources/comments/client.py b/langfuse/api/resources/comments/client.py
new file mode 100644
index 000000000..5c17f1a7c
--- /dev/null
+++ b/langfuse/api/resources/comments/client.py
@@ -0,0 +1,534 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ...core.api_error import ApiError
+from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ...core.jsonable_encoder import jsonable_encoder
+from ...core.pydantic_utilities import pydantic_v1
+from ...core.request_options import RequestOptions
+from ..commons.errors.access_denied_error import AccessDeniedError
+from ..commons.errors.error import Error
+from ..commons.errors.method_not_allowed_error import MethodNotAllowedError
+from ..commons.errors.not_found_error import NotFoundError
+from ..commons.errors.unauthorized_error import UnauthorizedError
+from ..commons.types.comment import Comment
+from .types.create_comment_request import CreateCommentRequest
+from .types.create_comment_response import CreateCommentResponse
+from .types.get_comments_response import GetCommentsResponse
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class CommentsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def create(
+ self,
+ *,
+ request: CreateCommentRequest,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> CreateCommentResponse:
+ """
+ Create a comment. Comments may be attached to different object types (trace, observation, session, prompt).
+
+ Parameters
+ ----------
+ request : CreateCommentRequest
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CreateCommentResponse
+
+ Examples
+ --------
+ from finto import CreateCommentRequest
+ from finto.client import FernLangfuse
+
+ client = FernLangfuse(
+ x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME",
+ x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION",
+ x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY",
+ username="YOUR_USERNAME",
+ password="YOUR_PASSWORD",
+ base_url="https://yourhost.com/path/to/api",
+ )
+ client.comments.create(
+ request=CreateCommentRequest(
+ project_id="string",
+ object_type="string",
+ object_id="string",
+ content="string",
+ author_user_id="string",
+ ),
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "api/public/comments",
+ method="POST",
+ json=request,
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(CreateCommentResponse, _response.json()) # type: ignore
+ if _response.status_code == 400:
+ raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 401:
+ raise UnauthorizedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 403:
+ raise AccessDeniedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 405:
+ raise MethodNotAllowedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 404:
+ raise NotFoundError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ limit: typing.Optional[int] = None,
+ object_type: typing.Optional[str] = None,
+ object_id: typing.Optional[str] = None,
+ author_user_id: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> GetCommentsResponse:
+ """
+ Get all comments
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number, starts at 1.
+
+ limit : typing.Optional[int]
+ Limit of items per page. If you encounter api issues due to too large page sizes, try to reduce the limit
+
+ object_type : typing.Optional[str]
+ Filter comments by object type (trace, observation, session, prompt).
+
+ object_id : typing.Optional[str]
+ Filter comments by object id. If objectType is not provided, an error will be thrown.
+
+ author_user_id : typing.Optional[str]
+ Filter comments by author user id.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetCommentsResponse
+
+ Examples
+ --------
+ from finto.client import FernLangfuse
+
+ client = FernLangfuse(
+ x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME",
+ x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION",
+ x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY",
+ username="YOUR_USERNAME",
+ password="YOUR_PASSWORD",
+ base_url="https://yourhost.com/path/to/api",
+ )
+ client.comments.get(
+ page=1,
+ limit=1,
+ object_type="string",
+ object_id="string",
+ author_user_id="string",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "api/public/comments",
+ method="GET",
+ params={
+ "page": page,
+ "limit": limit,
+ "objectType": object_type,
+ "objectId": object_id,
+ "authorUserId": author_user_id,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(GetCommentsResponse, _response.json()) # type: ignore
+ if _response.status_code == 400:
+ raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 401:
+ raise UnauthorizedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 403:
+ raise AccessDeniedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 405:
+ raise MethodNotAllowedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 404:
+ raise NotFoundError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_by_id(
+ self,
+ comment_id: str,
+ *,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> Comment:
+ """
+ Get a comment by id
+
+ Parameters
+ ----------
+ comment_id : str
+ The unique langfuse identifier of a comment
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ Comment
+
+ Examples
+ --------
+ from finto.client import FernLangfuse
+
+ client = FernLangfuse(
+ x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME",
+ x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION",
+ x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY",
+ username="YOUR_USERNAME",
+ password="YOUR_PASSWORD",
+ base_url="https://yourhost.com/path/to/api",
+ )
+ client.comments.get_by_id(
+ comment_id="string",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"api/public/comments/{jsonable_encoder(comment_id)}",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(Comment, _response.json()) # type: ignore
+ if _response.status_code == 400:
+ raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 401:
+ raise UnauthorizedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 403:
+ raise AccessDeniedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 405:
+ raise MethodNotAllowedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 404:
+ raise NotFoundError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncCommentsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def create(
+ self,
+ *,
+ request: CreateCommentRequest,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> CreateCommentResponse:
+ """
+ Create a comment. Comments may be attached to different object types (trace, observation, session, prompt).
+
+ Parameters
+ ----------
+ request : CreateCommentRequest
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CreateCommentResponse
+
+ Examples
+ --------
+ import asyncio
+
+ from finto import CreateCommentRequest
+ from finto.client import AsyncFernLangfuse
+
+ client = AsyncFernLangfuse(
+ x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME",
+ x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION",
+ x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY",
+ username="YOUR_USERNAME",
+ password="YOUR_PASSWORD",
+ base_url="https://yourhost.com/path/to/api",
+ )
+
+
+ async def main() -> None:
+ await client.comments.create(
+ request=CreateCommentRequest(
+ project_id="string",
+ object_type="string",
+ object_id="string",
+ content="string",
+ author_user_id="string",
+ ),
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "api/public/comments",
+ method="POST",
+ json=request,
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(CreateCommentResponse, _response.json()) # type: ignore
+ if _response.status_code == 400:
+ raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 401:
+ raise UnauthorizedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 403:
+ raise AccessDeniedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 405:
+ raise MethodNotAllowedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 404:
+ raise NotFoundError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ limit: typing.Optional[int] = None,
+ object_type: typing.Optional[str] = None,
+ object_id: typing.Optional[str] = None,
+ author_user_id: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> GetCommentsResponse:
+ """
+ Get all comments
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number, starts at 1.
+
+ limit : typing.Optional[int]
+ Limit of items per page. If you encounter api issues due to too large page sizes, try to reduce the limit
+
+ object_type : typing.Optional[str]
+ Filter comments by object type (trace, observation, session, prompt).
+
+ object_id : typing.Optional[str]
+ Filter comments by object id. If objectType is not provided, an error will be thrown.
+
+ author_user_id : typing.Optional[str]
+ Filter comments by author user id.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetCommentsResponse
+
+ Examples
+ --------
+ import asyncio
+
+ from finto.client import AsyncFernLangfuse
+
+ client = AsyncFernLangfuse(
+ x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME",
+ x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION",
+ x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY",
+ username="YOUR_USERNAME",
+ password="YOUR_PASSWORD",
+ base_url="https://yourhost.com/path/to/api",
+ )
+
+
+ async def main() -> None:
+ await client.comments.get(
+ page=1,
+ limit=1,
+ object_type="string",
+ object_id="string",
+ author_user_id="string",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "api/public/comments",
+ method="GET",
+ params={
+ "page": page,
+ "limit": limit,
+ "objectType": object_type,
+ "objectId": object_id,
+ "authorUserId": author_user_id,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(GetCommentsResponse, _response.json()) # type: ignore
+ if _response.status_code == 400:
+ raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 401:
+ raise UnauthorizedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 403:
+ raise AccessDeniedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 405:
+ raise MethodNotAllowedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 404:
+ raise NotFoundError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_by_id(
+ self,
+ comment_id: str,
+ *,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> Comment:
+ """
+ Get a comment by id
+
+ Parameters
+ ----------
+ comment_id : str
+ The unique langfuse identifier of a comment
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ Comment
+
+ Examples
+ --------
+ import asyncio
+
+ from finto.client import AsyncFernLangfuse
+
+ client = AsyncFernLangfuse(
+ x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME",
+ x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION",
+ x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY",
+ username="YOUR_USERNAME",
+ password="YOUR_PASSWORD",
+ base_url="https://yourhost.com/path/to/api",
+ )
+
+
+ async def main() -> None:
+ await client.comments.get_by_id(
+ comment_id="string",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"api/public/comments/{jsonable_encoder(comment_id)}",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(Comment, _response.json()) # type: ignore
+ if _response.status_code == 400:
+ raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 401:
+ raise UnauthorizedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 403:
+ raise AccessDeniedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 405:
+ raise MethodNotAllowedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 404:
+ raise NotFoundError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/langfuse/api/resources/comments/types/__init__.py b/langfuse/api/resources/comments/types/__init__.py
new file mode 100644
index 000000000..13dc1d8d9
--- /dev/null
+++ b/langfuse/api/resources/comments/types/__init__.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .create_comment_request import CreateCommentRequest
+from .create_comment_response import CreateCommentResponse
+from .get_comments_response import GetCommentsResponse
+
+__all__ = ["CreateCommentRequest", "CreateCommentResponse", "GetCommentsResponse"]
diff --git a/langfuse/api/resources/comments/types/create_comment_request.py b/langfuse/api/resources/comments/types/create_comment_request.py
new file mode 100644
index 000000000..98e25e2e1
--- /dev/null
+++ b/langfuse/api/resources/comments/types/create_comment_request.py
@@ -0,0 +1,69 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ....core.datetime_utils import serialize_datetime
+from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class CreateCommentRequest(pydantic_v1.BaseModel):
+ project_id: str = pydantic_v1.Field(alias="projectId")
+ """
+ The id of the project to attach the comment to.
+ """
+
+ object_type: str = pydantic_v1.Field(alias="objectType")
+ """
+ The type of the object to attach the comment to (trace, observation, session, prompt).
+ """
+
+ object_id: str = pydantic_v1.Field(alias="objectId")
+ """
+ The id of the object to attach the comment to. If this does not reference a valid existing object, an error will be thrown.
+ """
+
+ content: str = pydantic_v1.Field()
+ """
+ The content of the comment. May include markdown. Currently limited to 500 characters.
+ """
+
+ author_user_id: typing.Optional[str] = pydantic_v1.Field(
+ alias="authorUserId", default=None
+ )
+ """
+ The id of the user who created the comment.
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ kwargs_with_defaults_exclude_none: typing.Any = {
+ "by_alias": True,
+ "exclude_none": True,
+ **kwargs,
+ }
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset),
+ super().dict(**kwargs_with_defaults_exclude_none),
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ allow_population_by_field_name = True
+ populate_by_name = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/langfuse/api/resources/comments/types/create_comment_response.py b/langfuse/api/resources/comments/types/create_comment_response.py
new file mode 100644
index 000000000..d7708f798
--- /dev/null
+++ b/langfuse/api/resources/comments/types/create_comment_response.py
@@ -0,0 +1,45 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ....core.datetime_utils import serialize_datetime
+from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class CreateCommentResponse(pydantic_v1.BaseModel):
+ id: str = pydantic_v1.Field()
+ """
+ The id of the created object in Langfuse
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ kwargs_with_defaults_exclude_none: typing.Any = {
+ "by_alias": True,
+ "exclude_none": True,
+ **kwargs,
+ }
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset),
+ super().dict(**kwargs_with_defaults_exclude_none),
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/langfuse/api/resources/score/types/scores.py b/langfuse/api/resources/comments/types/get_comments_response.py
similarity index 50%
rename from langfuse/api/resources/score/types/scores.py
rename to langfuse/api/resources/comments/types/get_comments_response.py
index 998ae12e9..66a8b9527 100644
--- a/langfuse/api/resources/score/types/scores.py
+++ b/langfuse/api/resources/comments/types/get_comments_response.py
@@ -5,24 +5,37 @@
from ....core.datetime_utils import serialize_datetime
from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
-from ...commons.types.score import Score
+from ...commons.types.comment import Comment
from ...utils.resources.pagination.types.meta_response import MetaResponse
-class Scores(pydantic_v1.BaseModel):
- data: typing.List[Score]
+class GetCommentsResponse(pydantic_v1.BaseModel):
+ data: typing.List[Comment]
meta: MetaResponse
def json(self, **kwargs: typing.Any) -> str:
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
return super().json(**kwargs_with_defaults)
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
- kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
- kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+ kwargs_with_defaults_exclude_unset: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ kwargs_with_defaults_exclude_none: typing.Any = {
+ "by_alias": True,
+ "exclude_none": True,
+ **kwargs,
+ }
return deep_union_pydantic_dicts(
- super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ super().dict(**kwargs_with_defaults_exclude_unset),
+ super().dict(**kwargs_with_defaults_exclude_none),
)
class Config:
diff --git a/langfuse/api/resources/commons/__init__.py b/langfuse/api/resources/commons/__init__.py
index d27b95745..e3e919f41 100644
--- a/langfuse/api/resources/commons/__init__.py
+++ b/langfuse/api/resources/commons/__init__.py
@@ -4,6 +4,8 @@
BaseScore,
BooleanScore,
CategoricalScore,
+ Comment,
+ CommentObjectType,
ConfigCategory,
CreateScoreValue,
Dataset,
@@ -33,13 +35,21 @@
TraceWithFullDetails,
Usage,
)
-from .errors import AccessDeniedError, Error, MethodNotAllowedError, NotFoundError, UnauthorizedError
+from .errors import (
+ AccessDeniedError,
+ Error,
+ MethodNotAllowedError,
+ NotFoundError,
+ UnauthorizedError,
+)
__all__ = [
"AccessDeniedError",
"BaseScore",
"BooleanScore",
"CategoricalScore",
+ "Comment",
+ "CommentObjectType",
"ConfigCategory",
"CreateScoreValue",
"Dataset",
diff --git a/langfuse/api/resources/commons/types/__init__.py b/langfuse/api/resources/commons/types/__init__.py
index b380e4863..fcec85214 100644
--- a/langfuse/api/resources/commons/types/__init__.py
+++ b/langfuse/api/resources/commons/types/__init__.py
@@ -3,6 +3,8 @@
from .base_score import BaseScore
from .boolean_score import BooleanScore
from .categorical_score import CategoricalScore
+from .comment import Comment
+from .comment_object_type import CommentObjectType
from .config_category import ConfigCategory
from .create_score_value import CreateScoreValue
from .dataset import Dataset
@@ -33,6 +35,8 @@
"BaseScore",
"BooleanScore",
"CategoricalScore",
+ "Comment",
+ "CommentObjectType",
"ConfigCategory",
"CreateScoreValue",
"Dataset",
diff --git a/langfuse/api/resources/commons/types/base_score.py b/langfuse/api/resources/commons/types/base_score.py
index 0f2ce7018..71bed6ef4 100644
--- a/langfuse/api/resources/commons/types/base_score.py
+++ b/langfuse/api/resources/commons/types/base_score.py
@@ -13,27 +13,49 @@ class BaseScore(pydantic_v1.BaseModel):
trace_id: str = pydantic_v1.Field(alias="traceId")
name: str
source: ScoreSource
- observation_id: typing.Optional[str] = pydantic_v1.Field(alias="observationId", default=None)
+ observation_id: typing.Optional[str] = pydantic_v1.Field(
+ alias="observationId", default=None
+ )
timestamp: dt.datetime
created_at: dt.datetime = pydantic_v1.Field(alias="createdAt")
updated_at: dt.datetime = pydantic_v1.Field(alias="updatedAt")
- author_user_id: typing.Optional[str] = pydantic_v1.Field(alias="authorUserId", default=None)
+ author_user_id: typing.Optional[str] = pydantic_v1.Field(
+ alias="authorUserId", default=None
+ )
comment: typing.Optional[str] = None
config_id: typing.Optional[str] = pydantic_v1.Field(alias="configId", default=None)
"""
Reference a score config on a score. When set, config and score name must be equal and value must comply to optionally defined numerical range
"""
+ queue_id: typing.Optional[str] = pydantic_v1.Field(alias="queueId", default=None)
+ """
+ Reference an annotation queue on a score. Populated if the score was initially created in an annotation queue.
+ """
+
def json(self, **kwargs: typing.Any) -> str:
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
return super().json(**kwargs_with_defaults)
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
- kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
- kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+ kwargs_with_defaults_exclude_unset: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ kwargs_with_defaults_exclude_none: typing.Any = {
+ "by_alias": True,
+ "exclude_none": True,
+ **kwargs,
+ }
return deep_union_pydantic_dicts(
- super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ super().dict(**kwargs_with_defaults_exclude_unset),
+ super().dict(**kwargs_with_defaults_exclude_none),
)
class Config:
diff --git a/langfuse/api/resources/commons/types/comment.py b/langfuse/api/resources/commons/types/comment.py
new file mode 100644
index 000000000..4d8b1916a
--- /dev/null
+++ b/langfuse/api/resources/commons/types/comment.py
@@ -0,0 +1,54 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ....core.datetime_utils import serialize_datetime
+from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .comment_object_type import CommentObjectType
+
+
+class Comment(pydantic_v1.BaseModel):
+ id: str
+ project_id: str = pydantic_v1.Field(alias="projectId")
+ created_at: dt.datetime = pydantic_v1.Field(alias="createdAt")
+ updated_at: dt.datetime = pydantic_v1.Field(alias="updatedAt")
+ object_type: CommentObjectType = pydantic_v1.Field(alias="objectType")
+ object_id: str = pydantic_v1.Field(alias="objectId")
+ content: str
+ author_user_id: typing.Optional[str] = pydantic_v1.Field(
+ alias="authorUserId", default=None
+ )
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ kwargs_with_defaults_exclude_none: typing.Any = {
+ "by_alias": True,
+ "exclude_none": True,
+ **kwargs,
+ }
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset),
+ super().dict(**kwargs_with_defaults_exclude_none),
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ allow_population_by_field_name = True
+ populate_by_name = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/langfuse/api/resources/commons/types/comment_object_type.py b/langfuse/api/resources/commons/types/comment_object_type.py
new file mode 100644
index 000000000..9c6c134c6
--- /dev/null
+++ b/langfuse/api/resources/commons/types/comment_object_type.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import enum
+import typing
+
+T_Result = typing.TypeVar("T_Result")
+
+
+class CommentObjectType(str, enum.Enum):
+ TRACE = "TRACE"
+ OBSERVATION = "OBSERVATION"
+ SESSION = "SESSION"
+ PROMPT = "PROMPT"
+
+ def visit(
+ self,
+ trace: typing.Callable[[], T_Result],
+ observation: typing.Callable[[], T_Result],
+ session: typing.Callable[[], T_Result],
+ prompt: typing.Callable[[], T_Result],
+ ) -> T_Result:
+ if self is CommentObjectType.TRACE:
+ return trace()
+ if self is CommentObjectType.OBSERVATION:
+ return observation()
+ if self is CommentObjectType.SESSION:
+ return session()
+ if self is CommentObjectType.PROMPT:
+ return prompt()
diff --git a/langfuse/api/resources/commons/types/model.py b/langfuse/api/resources/commons/types/model.py
index 8e9449272..18b26c478 100644
--- a/langfuse/api/resources/commons/types/model.py
+++ b/langfuse/api/resources/commons/types/model.py
@@ -24,7 +24,7 @@ class Model(pydantic_v1.BaseModel):
Regex pattern which matches this model definition to generation.model. Useful in case of fine-tuned models. If you want to exact match, use `(?i)^modelname$`
"""
- start_date: typing.Optional[dt.datetime] = pydantic_v1.Field(
+ start_date: typing.Optional[dt.date] = pydantic_v1.Field(
alias="startDate", default=None
)
"""
diff --git a/langfuse/api/resources/commons/types/score.py b/langfuse/api/resources/commons/types/score.py
index 946de3092..e39221084 100644
--- a/langfuse/api/resources/commons/types/score.py
+++ b/langfuse/api/resources/commons/types/score.py
@@ -16,25 +16,45 @@ class Score_Numeric(pydantic_v1.BaseModel):
trace_id: str = pydantic_v1.Field(alias="traceId")
name: str
source: ScoreSource
- observation_id: typing.Optional[str] = pydantic_v1.Field(alias="observationId", default=None)
+ observation_id: typing.Optional[str] = pydantic_v1.Field(
+ alias="observationId", default=None
+ )
timestamp: dt.datetime
created_at: dt.datetime = pydantic_v1.Field(alias="createdAt")
updated_at: dt.datetime = pydantic_v1.Field(alias="updatedAt")
- author_user_id: typing.Optional[str] = pydantic_v1.Field(alias="authorUserId", default=None)
+ author_user_id: typing.Optional[str] = pydantic_v1.Field(
+ alias="authorUserId", default=None
+ )
comment: typing.Optional[str] = None
config_id: typing.Optional[str] = pydantic_v1.Field(alias="configId", default=None)
- data_type: typing.Literal["NUMERIC"] = pydantic_v1.Field(alias="dataType", default="NUMERIC")
+ queue_id: typing.Optional[str] = pydantic_v1.Field(alias="queueId", default=None)
+ data_type: typing.Literal["NUMERIC"] = pydantic_v1.Field(
+ alias="dataType", default="NUMERIC"
+ )
def json(self, **kwargs: typing.Any) -> str:
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
return super().json(**kwargs_with_defaults)
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
- kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
- kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+ kwargs_with_defaults_exclude_unset: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ kwargs_with_defaults_exclude_none: typing.Any = {
+ "by_alias": True,
+ "exclude_none": True,
+ **kwargs,
+ }
return deep_union_pydantic_dicts(
- super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ super().dict(**kwargs_with_defaults_exclude_unset),
+ super().dict(**kwargs_with_defaults_exclude_none),
)
class Config:
@@ -53,25 +73,45 @@ class Score_Categorical(pydantic_v1.BaseModel):
trace_id: str = pydantic_v1.Field(alias="traceId")
name: str
source: ScoreSource
- observation_id: typing.Optional[str] = pydantic_v1.Field(alias="observationId", default=None)
+ observation_id: typing.Optional[str] = pydantic_v1.Field(
+ alias="observationId", default=None
+ )
timestamp: dt.datetime
created_at: dt.datetime = pydantic_v1.Field(alias="createdAt")
updated_at: dt.datetime = pydantic_v1.Field(alias="updatedAt")
- author_user_id: typing.Optional[str] = pydantic_v1.Field(alias="authorUserId", default=None)
+ author_user_id: typing.Optional[str] = pydantic_v1.Field(
+ alias="authorUserId", default=None
+ )
comment: typing.Optional[str] = None
config_id: typing.Optional[str] = pydantic_v1.Field(alias="configId", default=None)
- data_type: typing.Literal["CATEGORICAL"] = pydantic_v1.Field(alias="dataType", default="CATEGORICAL")
+ queue_id: typing.Optional[str] = pydantic_v1.Field(alias="queueId", default=None)
+ data_type: typing.Literal["CATEGORICAL"] = pydantic_v1.Field(
+ alias="dataType", default="CATEGORICAL"
+ )
def json(self, **kwargs: typing.Any) -> str:
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
return super().json(**kwargs_with_defaults)
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
- kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
- kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+ kwargs_with_defaults_exclude_unset: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ kwargs_with_defaults_exclude_none: typing.Any = {
+ "by_alias": True,
+ "exclude_none": True,
+ **kwargs,
+ }
return deep_union_pydantic_dicts(
- super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ super().dict(**kwargs_with_defaults_exclude_unset),
+ super().dict(**kwargs_with_defaults_exclude_none),
)
class Config:
@@ -90,25 +130,45 @@ class Score_Boolean(pydantic_v1.BaseModel):
trace_id: str = pydantic_v1.Field(alias="traceId")
name: str
source: ScoreSource
- observation_id: typing.Optional[str] = pydantic_v1.Field(alias="observationId", default=None)
+ observation_id: typing.Optional[str] = pydantic_v1.Field(
+ alias="observationId", default=None
+ )
timestamp: dt.datetime
created_at: dt.datetime = pydantic_v1.Field(alias="createdAt")
updated_at: dt.datetime = pydantic_v1.Field(alias="updatedAt")
- author_user_id: typing.Optional[str] = pydantic_v1.Field(alias="authorUserId", default=None)
+ author_user_id: typing.Optional[str] = pydantic_v1.Field(
+ alias="authorUserId", default=None
+ )
comment: typing.Optional[str] = None
config_id: typing.Optional[str] = pydantic_v1.Field(alias="configId", default=None)
- data_type: typing.Literal["BOOLEAN"] = pydantic_v1.Field(alias="dataType", default="BOOLEAN")
+ queue_id: typing.Optional[str] = pydantic_v1.Field(alias="queueId", default=None)
+ data_type: typing.Literal["BOOLEAN"] = pydantic_v1.Field(
+ alias="dataType", default="BOOLEAN"
+ )
def json(self, **kwargs: typing.Any) -> str:
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
return super().json(**kwargs_with_defaults)
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
- kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
- kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+ kwargs_with_defaults_exclude_unset: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ kwargs_with_defaults_exclude_none: typing.Any = {
+ "by_alias": True,
+ "exclude_none": True,
+ **kwargs,
+ }
return deep_union_pydantic_dicts(
- super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ super().dict(**kwargs_with_defaults_exclude_unset),
+ super().dict(**kwargs_with_defaults_exclude_none),
)
class Config:
diff --git a/langfuse/api/resources/media/__init__.py b/langfuse/api/resources/media/__init__.py
new file mode 100644
index 000000000..d66dfeb2c
--- /dev/null
+++ b/langfuse/api/resources/media/__init__.py
@@ -0,0 +1,15 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .types import (
+ GetMediaResponse,
+ GetMediaUploadUrlRequest,
+ GetMediaUploadUrlResponse,
+ PatchMediaBody,
+)
+
+__all__ = [
+ "GetMediaResponse",
+ "GetMediaUploadUrlRequest",
+ "GetMediaUploadUrlResponse",
+ "PatchMediaBody",
+]
diff --git a/langfuse/api/resources/media/client.py b/langfuse/api/resources/media/client.py
new file mode 100644
index 000000000..8b7b9e93a
--- /dev/null
+++ b/langfuse/api/resources/media/client.py
@@ -0,0 +1,509 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ...core.api_error import ApiError
+from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ...core.jsonable_encoder import jsonable_encoder
+from ...core.pydantic_utilities import pydantic_v1
+from ...core.request_options import RequestOptions
+from ..commons.errors.access_denied_error import AccessDeniedError
+from ..commons.errors.error import Error
+from ..commons.errors.method_not_allowed_error import MethodNotAllowedError
+from ..commons.errors.not_found_error import NotFoundError
+from ..commons.errors.unauthorized_error import UnauthorizedError
+from .types.get_media_response import GetMediaResponse
+from .types.get_media_upload_url_request import GetMediaUploadUrlRequest
+from .types.get_media_upload_url_response import GetMediaUploadUrlResponse
+from .types.patch_media_body import PatchMediaBody
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class MediaClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def get(
+ self, media_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetMediaResponse:
+ """
+ Get a media record
+
+ Parameters
+ ----------
+ media_id : str
+ The unique langfuse identifier of a media record
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetMediaResponse
+
+ Examples
+ --------
+ from finto.client import FernLangfuse
+
+ client = FernLangfuse(
+ x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME",
+ x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION",
+ x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY",
+ username="YOUR_USERNAME",
+ password="YOUR_PASSWORD",
+ base_url="https://yourhost.com/path/to/api",
+ )
+ client.media.get(
+ media_id="string",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"api/public/media/{jsonable_encoder(media_id)}",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(GetMediaResponse, _response.json()) # type: ignore
+ if _response.status_code == 400:
+ raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 401:
+ raise UnauthorizedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 403:
+ raise AccessDeniedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 405:
+ raise MethodNotAllowedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 404:
+ raise NotFoundError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def patch(
+ self,
+ media_id: str,
+ *,
+ request: PatchMediaBody,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> None:
+ """
+ Patch a media record
+
+ Parameters
+ ----------
+ media_id : str
+ The unique langfuse identifier of a media record
+
+ request : PatchMediaBody
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ import datetime
+
+ from finto import PatchMediaBody
+ from finto.client import FernLangfuse
+
+ client = FernLangfuse(
+ x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME",
+ x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION",
+ x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY",
+ username="YOUR_USERNAME",
+ password="YOUR_PASSWORD",
+ base_url="https://yourhost.com/path/to/api",
+ )
+ client.media.patch(
+ media_id="string",
+ request=PatchMediaBody(
+ uploaded_at=datetime.datetime.fromisoformat(
+ "2024-01-15 09:30:00+00:00",
+ ),
+ upload_http_status=1,
+ upload_http_error="string",
+ ),
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"api/public/media/{jsonable_encoder(media_id)}",
+ method="PATCH",
+ json=request,
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return
+ if _response.status_code == 400:
+ raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 401:
+ raise UnauthorizedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 403:
+ raise AccessDeniedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 405:
+ raise MethodNotAllowedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 404:
+ raise NotFoundError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_upload_url(
+ self,
+ *,
+ request: GetMediaUploadUrlRequest,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> GetMediaUploadUrlResponse:
+ """
+ Get a presigned upload URL for a media record
+
+ Parameters
+ ----------
+ request : GetMediaUploadUrlRequest
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetMediaUploadUrlResponse
+
+ Examples
+ --------
+ from finto import GetMediaUploadUrlRequest
+ from finto.client import FernLangfuse
+
+ client = FernLangfuse(
+ x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME",
+ x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION",
+ x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY",
+ username="YOUR_USERNAME",
+ password="YOUR_PASSWORD",
+ base_url="https://yourhost.com/path/to/api",
+ )
+ client.media.get_upload_url(
+ request=GetMediaUploadUrlRequest(
+ trace_id="string",
+ observation_id="string",
+ content_type="string",
+ content_length=1,
+ sha_256_hash="string",
+ field="string",
+ ),
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "api/public/media/upload-url",
+ method="POST",
+ json=request,
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(
+ GetMediaUploadUrlResponse, _response.json()
+ ) # type: ignore
+ if _response.status_code == 400:
+ raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 401:
+ raise UnauthorizedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 403:
+ raise AccessDeniedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 405:
+ raise MethodNotAllowedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 404:
+ raise NotFoundError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncMediaClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def get(
+ self, media_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetMediaResponse:
+ """
+ Get a media record
+
+ Parameters
+ ----------
+ media_id : str
+ The unique langfuse identifier of a media record
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetMediaResponse
+
+ Examples
+ --------
+ import asyncio
+
+ from finto.client import AsyncFernLangfuse
+
+ client = AsyncFernLangfuse(
+ x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME",
+ x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION",
+ x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY",
+ username="YOUR_USERNAME",
+ password="YOUR_PASSWORD",
+ base_url="https://yourhost.com/path/to/api",
+ )
+
+
+ async def main() -> None:
+ await client.media.get(
+ media_id="string",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"api/public/media/{jsonable_encoder(media_id)}",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(GetMediaResponse, _response.json()) # type: ignore
+ if _response.status_code == 400:
+ raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 401:
+ raise UnauthorizedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 403:
+ raise AccessDeniedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 405:
+ raise MethodNotAllowedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 404:
+ raise NotFoundError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def patch(
+ self,
+ media_id: str,
+ *,
+ request: PatchMediaBody,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> None:
+ """
+ Patch a media record
+
+ Parameters
+ ----------
+ media_id : str
+ The unique langfuse identifier of a media record
+
+ request : PatchMediaBody
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ import asyncio
+ import datetime
+
+ from finto import PatchMediaBody
+ from finto.client import AsyncFernLangfuse
+
+ client = AsyncFernLangfuse(
+ x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME",
+ x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION",
+ x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY",
+ username="YOUR_USERNAME",
+ password="YOUR_PASSWORD",
+ base_url="https://yourhost.com/path/to/api",
+ )
+
+
+ async def main() -> None:
+ await client.media.patch(
+ media_id="string",
+ request=PatchMediaBody(
+ uploaded_at=datetime.datetime.fromisoformat(
+ "2024-01-15 09:30:00+00:00",
+ ),
+ upload_http_status=1,
+ upload_http_error="string",
+ ),
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"api/public/media/{jsonable_encoder(media_id)}",
+ method="PATCH",
+ json=request,
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return
+ if _response.status_code == 400:
+ raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 401:
+ raise UnauthorizedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 403:
+ raise AccessDeniedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 405:
+ raise MethodNotAllowedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 404:
+ raise NotFoundError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_upload_url(
+ self,
+ *,
+ request: GetMediaUploadUrlRequest,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> GetMediaUploadUrlResponse:
+ """
+ Get a presigned upload URL for a media record
+
+ Parameters
+ ----------
+ request : GetMediaUploadUrlRequest
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetMediaUploadUrlResponse
+
+ Examples
+ --------
+ import asyncio
+
+ from finto import GetMediaUploadUrlRequest
+ from finto.client import AsyncFernLangfuse
+
+ client = AsyncFernLangfuse(
+ x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME",
+ x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION",
+ x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY",
+ username="YOUR_USERNAME",
+ password="YOUR_PASSWORD",
+ base_url="https://yourhost.com/path/to/api",
+ )
+
+
+ async def main() -> None:
+ await client.media.get_upload_url(
+ request=GetMediaUploadUrlRequest(
+ trace_id="string",
+ observation_id="string",
+ content_type="string",
+ content_length=1,
+ sha_256_hash="string",
+ field="string",
+ ),
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "api/public/media/upload-url",
+ method="POST",
+ json=request,
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(
+ GetMediaUploadUrlResponse, _response.json()
+ ) # type: ignore
+ if _response.status_code == 400:
+ raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ if _response.status_code == 401:
+ raise UnauthorizedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 403:
+ raise AccessDeniedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 405:
+ raise MethodNotAllowedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ if _response.status_code == 404:
+ raise NotFoundError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/langfuse/api/resources/media/types/__init__.py b/langfuse/api/resources/media/types/__init__.py
new file mode 100644
index 000000000..1e1cba980
--- /dev/null
+++ b/langfuse/api/resources/media/types/__init__.py
@@ -0,0 +1,13 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .get_media_response import GetMediaResponse
+from .get_media_upload_url_request import GetMediaUploadUrlRequest
+from .get_media_upload_url_response import GetMediaUploadUrlResponse
+from .patch_media_body import PatchMediaBody
+
+__all__ = [
+ "GetMediaResponse",
+ "GetMediaUploadUrlRequest",
+ "GetMediaUploadUrlResponse",
+ "PatchMediaBody",
+]
diff --git a/langfuse/api/resources/media/types/get_media_response.py b/langfuse/api/resources/media/types/get_media_response.py
new file mode 100644
index 000000000..fa5368872
--- /dev/null
+++ b/langfuse/api/resources/media/types/get_media_response.py
@@ -0,0 +1,72 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ....core.datetime_utils import serialize_datetime
+from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class GetMediaResponse(pydantic_v1.BaseModel):
+ media_id: str = pydantic_v1.Field(alias="mediaId")
+ """
+ The unique langfuse identifier of a media record
+ """
+
+ content_type: str = pydantic_v1.Field(alias="contentType")
+ """
+ The MIME type of the media record
+ """
+
+ content_length: int = pydantic_v1.Field(alias="contentLength")
+ """
+ The size of the media record in bytes
+ """
+
+ uploaded_at: dt.datetime = pydantic_v1.Field(alias="uploadedAt")
+ """
+ The date and time when the media record was uploaded
+ """
+
+ url: str = pydantic_v1.Field()
+ """
+ The download URL of the media record
+ """
+
+ url_expiry: str = pydantic_v1.Field(alias="urlExpiry")
+ """
+ The expiry date and time of the media record download URL
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ kwargs_with_defaults_exclude_none: typing.Any = {
+ "by_alias": True,
+ "exclude_none": True,
+ **kwargs,
+ }
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset),
+ super().dict(**kwargs_with_defaults_exclude_none),
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ allow_population_by_field_name = True
+ populate_by_name = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/langfuse/api/resources/media/types/get_media_upload_url_request.py b/langfuse/api/resources/media/types/get_media_upload_url_request.py
new file mode 100644
index 000000000..8415c0a2d
--- /dev/null
+++ b/langfuse/api/resources/media/types/get_media_upload_url_request.py
@@ -0,0 +1,74 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ....core.datetime_utils import serialize_datetime
+from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class GetMediaUploadUrlRequest(pydantic_v1.BaseModel):
+ trace_id: str = pydantic_v1.Field(alias="traceId")
+ """
+ The trace ID associated with the media record
+ """
+
+ observation_id: typing.Optional[str] = pydantic_v1.Field(
+ alias="observationId", default=None
+ )
+ """
+ The observation ID associated with the media record. If the media record is associated directly with a trace, this will be null.
+ """
+
+ content_type: str = pydantic_v1.Field(alias="contentType")
+ """
+ The MIME type of the media record
+ """
+
+ content_length: int = pydantic_v1.Field(alias="contentLength")
+ """
+ The size of the media record in bytes
+ """
+
+ sha_256_hash: str = pydantic_v1.Field(alias="sha256Hash")
+ """
+ The SHA-256 hash of the media record
+ """
+
+ field: str = pydantic_v1.Field()
+ """
+ The trace / observation field the media record is associated with. This can be one of `input`, `output`, `metadata`
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ kwargs_with_defaults_exclude_none: typing.Any = {
+ "by_alias": True,
+ "exclude_none": True,
+ **kwargs,
+ }
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset),
+ super().dict(**kwargs_with_defaults_exclude_none),
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ allow_population_by_field_name = True
+ populate_by_name = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/langfuse/api/resources/media/types/get_media_upload_url_response.py b/langfuse/api/resources/media/types/get_media_upload_url_response.py
new file mode 100644
index 000000000..fadc76c01
--- /dev/null
+++ b/langfuse/api/resources/media/types/get_media_upload_url_response.py
@@ -0,0 +1,54 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ....core.datetime_utils import serialize_datetime
+from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class GetMediaUploadUrlResponse(pydantic_v1.BaseModel):
+ upload_url: typing.Optional[str] = pydantic_v1.Field(
+ alias="uploadUrl", default=None
+ )
+ """
+ The presigned upload URL. If the asset is already uploaded, this will be null
+ """
+
+ media_id: str = pydantic_v1.Field(alias="mediaId")
+ """
+ The unique langfuse identifier of a media record
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ kwargs_with_defaults_exclude_none: typing.Any = {
+ "by_alias": True,
+ "exclude_none": True,
+ **kwargs,
+ }
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset),
+ super().dict(**kwargs_with_defaults_exclude_none),
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ allow_population_by_field_name = True
+ populate_by_name = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/langfuse/api/resources/media/types/patch_media_body.py b/langfuse/api/resources/media/types/patch_media_body.py
new file mode 100644
index 000000000..8c8d6b336
--- /dev/null
+++ b/langfuse/api/resources/media/types/patch_media_body.py
@@ -0,0 +1,59 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ....core.datetime_utils import serialize_datetime
+from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class PatchMediaBody(pydantic_v1.BaseModel):
+ uploaded_at: dt.datetime = pydantic_v1.Field(alias="uploadedAt")
+ """
+ The date and time when the media record was uploaded
+ """
+
+ upload_http_status: int = pydantic_v1.Field(alias="uploadHttpStatus")
+ """
+ The HTTP status code of the upload
+ """
+
+ upload_http_error: typing.Optional[str] = pydantic_v1.Field(
+ alias="uploadHttpError", default=None
+ )
+ """
+ The HTTP error message of the upload
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ kwargs_with_defaults_exclude_none: typing.Any = {
+ "by_alias": True,
+ "exclude_none": True,
+ **kwargs,
+ }
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset),
+ super().dict(**kwargs_with_defaults_exclude_none),
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ allow_population_by_field_name = True
+ populate_by_name = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/langfuse/api/resources/score/__init__.py b/langfuse/api/resources/score/__init__.py
index 8a9975510..97fd51ffa 100644
--- a/langfuse/api/resources/score/__init__.py
+++ b/langfuse/api/resources/score/__init__.py
@@ -1,5 +1,29 @@
# This file was auto-generated by Fern from our API Definition.
-from .types import CreateScoreRequest, CreateScoreResponse, Scores
+from .types import (
+ CreateScoreRequest,
+ CreateScoreResponse,
+ GetScoresResponse,
+ GetScoresResponseData,
+ GetScoresResponseDataBoolean,
+ GetScoresResponseDataCategorical,
+ GetScoresResponseDataNumeric,
+ GetScoresResponseData_Boolean,
+ GetScoresResponseData_Categorical,
+ GetScoresResponseData_Numeric,
+ GetScoresResponseTraceData,
+)
-__all__ = ["CreateScoreRequest", "CreateScoreResponse", "Scores"]
+__all__ = [
+ "CreateScoreRequest",
+ "CreateScoreResponse",
+ "GetScoresResponse",
+ "GetScoresResponseData",
+ "GetScoresResponseDataBoolean",
+ "GetScoresResponseDataCategorical",
+ "GetScoresResponseDataNumeric",
+ "GetScoresResponseData_Boolean",
+ "GetScoresResponseData_Categorical",
+ "GetScoresResponseData_Numeric",
+ "GetScoresResponseTraceData",
+]
diff --git a/langfuse/api/resources/score/client.py b/langfuse/api/resources/score/client.py
index 29054bf0d..4408ae195 100644
--- a/langfuse/api/resources/score/client.py
+++ b/langfuse/api/resources/score/client.py
@@ -20,7 +20,7 @@
from ..commons.types.score_source import ScoreSource
from .types.create_score_request import CreateScoreRequest
from .types.create_score_response import CreateScoreResponse
-from .types.scores import Scores
+from .types.get_scores_response import GetScoresResponse
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -31,7 +31,10 @@ def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
def create(
- self, *, request: CreateScoreRequest, request_options: typing.Optional[RequestOptions] = None
+ self,
+ *,
+ request: CreateScoreRequest,
+ request_options: typing.Optional[RequestOptions] = None,
) -> CreateScoreResponse:
"""
Create a score
@@ -69,7 +72,11 @@ def create(
)
"""
_response = self._client_wrapper.httpx_client.request(
- "api/public/scores", method="POST", json=request, request_options=request_options, omit=OMIT
+ "api/public/scores",
+ method="POST",
+ json=request,
+ request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
@@ -77,13 +84,21 @@ def create(
if _response.status_code == 400:
raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
if _response.status_code == 401:
- raise UnauthorizedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise UnauthorizedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
if _response.status_code == 403:
- raise AccessDeniedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise AccessDeniedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
if _response.status_code == 405:
- raise MethodNotAllowedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise MethodNotAllowedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
if _response.status_code == 404:
- raise NotFoundError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise NotFoundError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -103,9 +118,13 @@ def get(
value: typing.Optional[float] = None,
score_ids: typing.Optional[str] = None,
config_id: typing.Optional[str] = None,
+ queue_id: typing.Optional[str] = None,
data_type: typing.Optional[ScoreDataType] = None,
+ trace_tags: typing.Optional[
+ typing.Union[typing.Sequence[str], typing.Sequence[typing.Sequence[str]]]
+ ] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> Scores:
+ ) -> GetScoresResponse:
"""
Get a list of scores
@@ -144,15 +163,21 @@ def get(
config_id : typing.Optional[str]
Retrieve only scores with a specific configId.
+ queue_id : typing.Optional[str]
+ Retrieve only scores with a specific annotation queueId.
+
data_type : typing.Optional[ScoreDataType]
Retrieve only scores with a specific dataType.
+ trace_tags : typing.Optional[typing.Union[typing.Sequence[str], typing.Sequence[typing.Sequence[str]]]]
+ Only scores linked to traces that include all of these tags will be returned.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- Scores
+ GetScoresResponse
Examples
--------
@@ -185,7 +210,9 @@ def get(
value=1.1,
score_ids="string",
config_id="string",
+ queue_id="string",
data_type=ScoreDataType.NUMERIC,
+ trace_tags=["string"],
)
"""
_response = self._client_wrapper.httpx_client.request(
@@ -196,36 +223,52 @@ def get(
"limit": limit,
"userId": user_id,
"name": name,
- "fromTimestamp": serialize_datetime(from_timestamp) if from_timestamp is not None else None,
- "toTimestamp": serialize_datetime(to_timestamp) if to_timestamp is not None else None,
+ "fromTimestamp": serialize_datetime(from_timestamp)
+ if from_timestamp is not None
+ else None,
+ "toTimestamp": serialize_datetime(to_timestamp)
+ if to_timestamp is not None
+ else None,
"source": source,
"operator": operator,
"value": value,
"scoreIds": score_ids,
"configId": config_id,
+ "queueId": queue_id,
"dataType": data_type,
+ "traceTags": jsonable_encoder(trace_tags),
},
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
- return pydantic_v1.parse_obj_as(Scores, _response.json()) # type: ignore
+ return pydantic_v1.parse_obj_as(GetScoresResponse, _response.json()) # type: ignore
if _response.status_code == 400:
raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
if _response.status_code == 401:
- raise UnauthorizedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise UnauthorizedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
if _response.status_code == 403:
- raise AccessDeniedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise AccessDeniedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
if _response.status_code == 405:
- raise MethodNotAllowedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise MethodNotAllowedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
if _response.status_code == 404:
- raise NotFoundError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise NotFoundError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def get_by_id(self, score_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> Score:
+ def get_by_id(
+ self, score_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> Score:
"""
Get a score
@@ -258,7 +301,9 @@ def get_by_id(self, score_id: str, *, request_options: typing.Optional[RequestOp
)
"""
_response = self._client_wrapper.httpx_client.request(
- f"api/public/scores/{jsonable_encoder(score_id)}", method="GET", request_options=request_options
+ f"api/public/scores/{jsonable_encoder(score_id)}",
+ method="GET",
+ request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
@@ -266,19 +311,29 @@ def get_by_id(self, score_id: str, *, request_options: typing.Optional[RequestOp
if _response.status_code == 400:
raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
if _response.status_code == 401:
- raise UnauthorizedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise UnauthorizedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
if _response.status_code == 403:
- raise AccessDeniedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise AccessDeniedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
if _response.status_code == 405:
- raise MethodNotAllowedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise MethodNotAllowedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
if _response.status_code == 404:
- raise NotFoundError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise NotFoundError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def delete(self, score_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ def delete(
+ self, score_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
"""
Delete a score
@@ -311,7 +366,9 @@ def delete(self, score_id: str, *, request_options: typing.Optional[RequestOptio
)
"""
_response = self._client_wrapper.httpx_client.request(
- f"api/public/scores/{jsonable_encoder(score_id)}", method="DELETE", request_options=request_options
+ f"api/public/scores/{jsonable_encoder(score_id)}",
+ method="DELETE",
+ request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
@@ -319,13 +376,21 @@ def delete(self, score_id: str, *, request_options: typing.Optional[RequestOptio
if _response.status_code == 400:
raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
if _response.status_code == 401:
- raise UnauthorizedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise UnauthorizedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
if _response.status_code == 403:
- raise AccessDeniedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise AccessDeniedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
if _response.status_code == 405:
- raise MethodNotAllowedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise MethodNotAllowedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
if _response.status_code == 404:
- raise NotFoundError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise NotFoundError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -337,7 +402,10 @@ def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._client_wrapper = client_wrapper
async def create(
- self, *, request: CreateScoreRequest, request_options: typing.Optional[RequestOptions] = None
+ self,
+ *,
+ request: CreateScoreRequest,
+ request_options: typing.Optional[RequestOptions] = None,
) -> CreateScoreResponse:
"""
Create a score
@@ -383,7 +451,11 @@ async def main() -> None:
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- "api/public/scores", method="POST", json=request, request_options=request_options, omit=OMIT
+ "api/public/scores",
+ method="POST",
+ json=request,
+ request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
@@ -391,13 +463,21 @@ async def main() -> None:
if _response.status_code == 400:
raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
if _response.status_code == 401:
- raise UnauthorizedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise UnauthorizedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
if _response.status_code == 403:
- raise AccessDeniedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise AccessDeniedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
if _response.status_code == 405:
- raise MethodNotAllowedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise MethodNotAllowedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
if _response.status_code == 404:
- raise NotFoundError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise NotFoundError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -417,9 +497,13 @@ async def get(
value: typing.Optional[float] = None,
score_ids: typing.Optional[str] = None,
config_id: typing.Optional[str] = None,
+ queue_id: typing.Optional[str] = None,
data_type: typing.Optional[ScoreDataType] = None,
+ trace_tags: typing.Optional[
+ typing.Union[typing.Sequence[str], typing.Sequence[typing.Sequence[str]]]
+ ] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> Scores:
+ ) -> GetScoresResponse:
"""
Get a list of scores
@@ -458,15 +542,21 @@ async def get(
config_id : typing.Optional[str]
Retrieve only scores with a specific configId.
+ queue_id : typing.Optional[str]
+ Retrieve only scores with a specific annotation queueId.
+
data_type : typing.Optional[ScoreDataType]
Retrieve only scores with a specific dataType.
+ trace_tags : typing.Optional[typing.Union[typing.Sequence[str], typing.Sequence[typing.Sequence[str]]]]
+ Only scores linked to traces that include all of these tags will be returned.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- Scores
+ GetScoresResponse
Examples
--------
@@ -503,7 +593,9 @@ async def main() -> None:
value=1.1,
score_ids="string",
config_id="string",
+ queue_id="string",
data_type=ScoreDataType.NUMERIC,
+ trace_tags=["string"],
)
@@ -517,36 +609,52 @@ async def main() -> None:
"limit": limit,
"userId": user_id,
"name": name,
- "fromTimestamp": serialize_datetime(from_timestamp) if from_timestamp is not None else None,
- "toTimestamp": serialize_datetime(to_timestamp) if to_timestamp is not None else None,
+ "fromTimestamp": serialize_datetime(from_timestamp)
+ if from_timestamp is not None
+ else None,
+ "toTimestamp": serialize_datetime(to_timestamp)
+ if to_timestamp is not None
+ else None,
"source": source,
"operator": operator,
"value": value,
"scoreIds": score_ids,
"configId": config_id,
+ "queueId": queue_id,
"dataType": data_type,
+ "traceTags": jsonable_encoder(trace_tags),
},
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
- return pydantic_v1.parse_obj_as(Scores, _response.json()) # type: ignore
+ return pydantic_v1.parse_obj_as(GetScoresResponse, _response.json()) # type: ignore
if _response.status_code == 400:
raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
if _response.status_code == 401:
- raise UnauthorizedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise UnauthorizedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
if _response.status_code == 403:
- raise AccessDeniedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise AccessDeniedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
if _response.status_code == 405:
- raise MethodNotAllowedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise MethodNotAllowedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
if _response.status_code == 404:
- raise NotFoundError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise NotFoundError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def get_by_id(self, score_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> Score:
+ async def get_by_id(
+ self, score_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> Score:
"""
Get a score
@@ -587,7 +695,9 @@ async def main() -> None:
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- f"api/public/scores/{jsonable_encoder(score_id)}", method="GET", request_options=request_options
+ f"api/public/scores/{jsonable_encoder(score_id)}",
+ method="GET",
+ request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
@@ -595,19 +705,29 @@ async def main() -> None:
if _response.status_code == 400:
raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
if _response.status_code == 401:
- raise UnauthorizedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise UnauthorizedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
if _response.status_code == 403:
- raise AccessDeniedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise AccessDeniedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
if _response.status_code == 405:
- raise MethodNotAllowedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise MethodNotAllowedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
if _response.status_code == 404:
- raise NotFoundError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise NotFoundError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def delete(self, score_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ async def delete(
+ self, score_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
"""
Delete a score
@@ -648,7 +768,9 @@ async def main() -> None:
asyncio.run(main())
"""
_response = await self._client_wrapper.httpx_client.request(
- f"api/public/scores/{jsonable_encoder(score_id)}", method="DELETE", request_options=request_options
+ f"api/public/scores/{jsonable_encoder(score_id)}",
+ method="DELETE",
+ request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
@@ -656,13 +778,21 @@ async def main() -> None:
if _response.status_code == 400:
raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
if _response.status_code == 401:
- raise UnauthorizedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise UnauthorizedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
if _response.status_code == 403:
- raise AccessDeniedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise AccessDeniedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
if _response.status_code == 405:
- raise MethodNotAllowedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise MethodNotAllowedError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
if _response.status_code == 404:
- raise NotFoundError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
+ raise NotFoundError(
+ pydantic_v1.parse_obj_as(typing.Any, _response.json())
+ ) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
diff --git a/langfuse/api/resources/score/types/__init__.py b/langfuse/api/resources/score/types/__init__.py
index 2917472ee..b627bad8f 100644
--- a/langfuse/api/resources/score/types/__init__.py
+++ b/langfuse/api/resources/score/types/__init__.py
@@ -2,6 +2,28 @@
from .create_score_request import CreateScoreRequest
from .create_score_response import CreateScoreResponse
-from .scores import Scores
+from .get_scores_response import GetScoresResponse
+from .get_scores_response_data import (
+ GetScoresResponseData,
+ GetScoresResponseData_Boolean,
+ GetScoresResponseData_Categorical,
+ GetScoresResponseData_Numeric,
+)
+from .get_scores_response_data_boolean import GetScoresResponseDataBoolean
+from .get_scores_response_data_categorical import GetScoresResponseDataCategorical
+from .get_scores_response_data_numeric import GetScoresResponseDataNumeric
+from .get_scores_response_trace_data import GetScoresResponseTraceData
-__all__ = ["CreateScoreRequest", "CreateScoreResponse", "Scores"]
+__all__ = [
+ "CreateScoreRequest",
+ "CreateScoreResponse",
+ "GetScoresResponse",
+ "GetScoresResponseData",
+ "GetScoresResponseDataBoolean",
+ "GetScoresResponseDataCategorical",
+ "GetScoresResponseDataNumeric",
+ "GetScoresResponseData_Boolean",
+ "GetScoresResponseData_Categorical",
+ "GetScoresResponseData_Numeric",
+ "GetScoresResponseTraceData",
+]
diff --git a/langfuse/api/resources/score/types/get_scores_response.py b/langfuse/api/resources/score/types/get_scores_response.py
new file mode 100644
index 000000000..777bb799b
--- /dev/null
+++ b/langfuse/api/resources/score/types/get_scores_response.py
@@ -0,0 +1,45 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ....core.datetime_utils import serialize_datetime
+from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from ...utils.resources.pagination.types.meta_response import MetaResponse
+from .get_scores_response_data import GetScoresResponseData
+
+
+class GetScoresResponse(pydantic_v1.BaseModel):
+ data: typing.List[GetScoresResponseData]
+ meta: MetaResponse
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ kwargs_with_defaults_exclude_none: typing.Any = {
+ "by_alias": True,
+ "exclude_none": True,
+ **kwargs,
+ }
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset),
+ super().dict(**kwargs_with_defaults_exclude_none),
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/langfuse/api/resources/score/types/get_scores_response_data.py b/langfuse/api/resources/score/types/get_scores_response_data.py
new file mode 100644
index 000000000..e1b317975
--- /dev/null
+++ b/langfuse/api/resources/score/types/get_scores_response_data.py
@@ -0,0 +1,191 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+
+import datetime as dt
+import typing
+
+from ....core.datetime_utils import serialize_datetime
+from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from ...commons.types.score_source import ScoreSource
+from .get_scores_response_trace_data import GetScoresResponseTraceData
+
+
+class GetScoresResponseData_Numeric(pydantic_v1.BaseModel):
+ trace: GetScoresResponseTraceData
+ value: float
+ id: str
+ trace_id: str = pydantic_v1.Field(alias="traceId")
+ name: str
+ source: ScoreSource
+ observation_id: typing.Optional[str] = pydantic_v1.Field(
+ alias="observationId", default=None
+ )
+ timestamp: dt.datetime
+ created_at: dt.datetime = pydantic_v1.Field(alias="createdAt")
+ updated_at: dt.datetime = pydantic_v1.Field(alias="updatedAt")
+ author_user_id: typing.Optional[str] = pydantic_v1.Field(
+ alias="authorUserId", default=None
+ )
+ comment: typing.Optional[str] = None
+ config_id: typing.Optional[str] = pydantic_v1.Field(alias="configId", default=None)
+ queue_id: typing.Optional[str] = pydantic_v1.Field(alias="queueId", default=None)
+ data_type: typing.Literal["NUMERIC"] = pydantic_v1.Field(
+ alias="dataType", default="NUMERIC"
+ )
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ kwargs_with_defaults_exclude_none: typing.Any = {
+ "by_alias": True,
+ "exclude_none": True,
+ **kwargs,
+ }
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset),
+ super().dict(**kwargs_with_defaults_exclude_none),
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ allow_population_by_field_name = True
+ populate_by_name = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
+
+
+class GetScoresResponseData_Categorical(pydantic_v1.BaseModel):
+ trace: GetScoresResponseTraceData
+ value: typing.Optional[float] = None
+ string_value: str = pydantic_v1.Field(alias="stringValue")
+ id: str
+ trace_id: str = pydantic_v1.Field(alias="traceId")
+ name: str
+ source: ScoreSource
+ observation_id: typing.Optional[str] = pydantic_v1.Field(
+ alias="observationId", default=None
+ )
+ timestamp: dt.datetime
+ created_at: dt.datetime = pydantic_v1.Field(alias="createdAt")
+ updated_at: dt.datetime = pydantic_v1.Field(alias="updatedAt")
+ author_user_id: typing.Optional[str] = pydantic_v1.Field(
+ alias="authorUserId", default=None
+ )
+ comment: typing.Optional[str] = None
+ config_id: typing.Optional[str] = pydantic_v1.Field(alias="configId", default=None)
+ queue_id: typing.Optional[str] = pydantic_v1.Field(alias="queueId", default=None)
+ data_type: typing.Literal["CATEGORICAL"] = pydantic_v1.Field(
+ alias="dataType", default="CATEGORICAL"
+ )
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ kwargs_with_defaults_exclude_none: typing.Any = {
+ "by_alias": True,
+ "exclude_none": True,
+ **kwargs,
+ }
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset),
+ super().dict(**kwargs_with_defaults_exclude_none),
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ allow_population_by_field_name = True
+ populate_by_name = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
+
+
+class GetScoresResponseData_Boolean(pydantic_v1.BaseModel):
+ trace: GetScoresResponseTraceData
+ value: float
+ string_value: str = pydantic_v1.Field(alias="stringValue")
+ id: str
+ trace_id: str = pydantic_v1.Field(alias="traceId")
+ name: str
+ source: ScoreSource
+ observation_id: typing.Optional[str] = pydantic_v1.Field(
+ alias="observationId", default=None
+ )
+ timestamp: dt.datetime
+ created_at: dt.datetime = pydantic_v1.Field(alias="createdAt")
+ updated_at: dt.datetime = pydantic_v1.Field(alias="updatedAt")
+ author_user_id: typing.Optional[str] = pydantic_v1.Field(
+ alias="authorUserId", default=None
+ )
+ comment: typing.Optional[str] = None
+ config_id: typing.Optional[str] = pydantic_v1.Field(alias="configId", default=None)
+ queue_id: typing.Optional[str] = pydantic_v1.Field(alias="queueId", default=None)
+ data_type: typing.Literal["BOOLEAN"] = pydantic_v1.Field(
+ alias="dataType", default="BOOLEAN"
+ )
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ kwargs_with_defaults_exclude_none: typing.Any = {
+ "by_alias": True,
+ "exclude_none": True,
+ **kwargs,
+ }
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset),
+ super().dict(**kwargs_with_defaults_exclude_none),
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ allow_population_by_field_name = True
+ populate_by_name = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
+
+
+GetScoresResponseData = typing.Union[
+ GetScoresResponseData_Numeric,
+ GetScoresResponseData_Categorical,
+ GetScoresResponseData_Boolean,
+]
diff --git a/langfuse/api/resources/score/types/get_scores_response_data_boolean.py b/langfuse/api/resources/score/types/get_scores_response_data_boolean.py
new file mode 100644
index 000000000..4dbf85af2
--- /dev/null
+++ b/langfuse/api/resources/score/types/get_scores_response_data_boolean.py
@@ -0,0 +1,46 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ....core.datetime_utils import serialize_datetime
+from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from ...commons.types.boolean_score import BooleanScore
+from .get_scores_response_trace_data import GetScoresResponseTraceData
+
+
+class GetScoresResponseDataBoolean(BooleanScore):
+ trace: GetScoresResponseTraceData
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ kwargs_with_defaults_exclude_none: typing.Any = {
+ "by_alias": True,
+ "exclude_none": True,
+ **kwargs,
+ }
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset),
+ super().dict(**kwargs_with_defaults_exclude_none),
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ allow_population_by_field_name = True
+ populate_by_name = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/langfuse/api/resources/score/types/get_scores_response_data_categorical.py b/langfuse/api/resources/score/types/get_scores_response_data_categorical.py
new file mode 100644
index 000000000..3c619779f
--- /dev/null
+++ b/langfuse/api/resources/score/types/get_scores_response_data_categorical.py
@@ -0,0 +1,46 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ....core.datetime_utils import serialize_datetime
+from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from ...commons.types.categorical_score import CategoricalScore
+from .get_scores_response_trace_data import GetScoresResponseTraceData
+
+
+class GetScoresResponseDataCategorical(CategoricalScore):
+ trace: GetScoresResponseTraceData
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ kwargs_with_defaults_exclude_none: typing.Any = {
+ "by_alias": True,
+ "exclude_none": True,
+ **kwargs,
+ }
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset),
+ super().dict(**kwargs_with_defaults_exclude_none),
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ allow_population_by_field_name = True
+ populate_by_name = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/langfuse/api/resources/score/types/get_scores_response_data_numeric.py b/langfuse/api/resources/score/types/get_scores_response_data_numeric.py
new file mode 100644
index 000000000..127d8f028
--- /dev/null
+++ b/langfuse/api/resources/score/types/get_scores_response_data_numeric.py
@@ -0,0 +1,46 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ....core.datetime_utils import serialize_datetime
+from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from ...commons.types.numeric_score import NumericScore
+from .get_scores_response_trace_data import GetScoresResponseTraceData
+
+
+class GetScoresResponseDataNumeric(NumericScore):
+ trace: GetScoresResponseTraceData
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ kwargs_with_defaults_exclude_none: typing.Any = {
+ "by_alias": True,
+ "exclude_none": True,
+ **kwargs,
+ }
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset),
+ super().dict(**kwargs_with_defaults_exclude_none),
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ allow_population_by_field_name = True
+ populate_by_name = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/langfuse/api/resources/score/types/get_scores_response_trace_data.py b/langfuse/api/resources/score/types/get_scores_response_trace_data.py
new file mode 100644
index 000000000..efbafadf4
--- /dev/null
+++ b/langfuse/api/resources/score/types/get_scores_response_trace_data.py
@@ -0,0 +1,52 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ....core.datetime_utils import serialize_datetime
+from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class GetScoresResponseTraceData(pydantic_v1.BaseModel):
+ user_id: typing.Optional[str] = pydantic_v1.Field(alias="userId", default=None)
+ """
+ The user ID associated with the trace referenced by score
+ """
+
+ tags: typing.Optional[typing.List[str]] = pydantic_v1.Field(default=None)
+ """
+ A list of tags associated with the trace referenced by score
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ kwargs_with_defaults_exclude_none: typing.Any = {
+ "by_alias": True,
+ "exclude_none": True,
+ **kwargs,
+ }
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset),
+ super().dict(**kwargs_with_defaults_exclude_none),
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ allow_population_by_field_name = True
+ populate_by_name = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/langfuse/api/tests/utils/test_http_client.py b/langfuse/api/tests/utils/test_http_client.py
index 4a37a5236..21b37b58f 100644
--- a/langfuse/api/tests/utils/test_http_client.py
+++ b/langfuse/api/tests/utils/test_http_client.py
@@ -1,8 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
-
-from langfuse.api.core.http_client import get_request_body
-from langfuse.api.core.request_options import RequestOptions
+from finto.core.http_client import get_request_body
+from finto.core.request_options import RequestOptions
def get_request_options() -> RequestOptions:
diff --git a/langfuse/api/tests/utils/test_query_encoding.py b/langfuse/api/tests/utils/test_query_encoding.py
index fdbcf6b76..a5a366eb4 100644
--- a/langfuse/api/tests/utils/test_query_encoding.py
+++ b/langfuse/api/tests/utils/test_query_encoding.py
@@ -1,7 +1,6 @@
# This file was auto-generated by Fern from our API Definition.
-
-from langfuse.api.core.query_encoder import encode_query
+from finto.core.query_encoder import encode_query
def test_query_encoding() -> None:
From e1536b3ccc24544ab33e0f28a478112edbb5d358 Mon Sep 17 00:00:00 2001
From: Hassieb Pakzad <68423100+hassiebp@users.noreply.github.com>
Date: Tue, 12 Nov 2024 14:38:48 +0100
Subject: [PATCH 02/42] add media upload
---
.../ingestion_consumer.py} | 268 +++++-------------
langfuse/_task_manager/media_manager.py | 245 ++++++++++++++++
.../_task_manager/media_upload_consumer.py | 41 +++
langfuse/_task_manager/media_upload_queue.py | 14 +
langfuse/_task_manager/task_manager.py | 195 +++++++++++++
langfuse/client.py | 42 ++-
langfuse/openai.py | 56 ++--
langfuse/types/__init__.py | 6 +-
poetry.lock | 10 +-
static/joke_prompt.wav | Bin 0 -> 157774 bytes
static/puton.jpg | Bin 0 -> 650780 bytes
tests/test_core_sdk.py | 28 +-
tests/test_langchain.py | 66 +++--
tests/test_llama_index.py | 14 +-
tests/test_task_manager.py | 40 +--
15 files changed, 698 insertions(+), 327 deletions(-)
rename langfuse/{task_manager.py => _task_manager/ingestion_consumer.py} (51%)
create mode 100644 langfuse/_task_manager/media_manager.py
create mode 100644 langfuse/_task_manager/media_upload_consumer.py
create mode 100644 langfuse/_task_manager/media_upload_queue.py
create mode 100644 langfuse/_task_manager/task_manager.py
create mode 100644 static/joke_prompt.wav
create mode 100644 static/puton.jpg
diff --git a/langfuse/task_manager.py b/langfuse/_task_manager/ingestion_consumer.py
similarity index 51%
rename from langfuse/task_manager.py
rename to langfuse/_task_manager/ingestion_consumer.py
index a507785fe..4fa704847 100644
--- a/langfuse/task_manager.py
+++ b/langfuse/_task_manager/ingestion_consumer.py
@@ -1,56 +1,45 @@
-"""@private"""
-
-import atexit
import json
import logging
-import queue
import threading
-from queue import Empty, Queue
import time
-from typing import List, Any, Optional
import typing
+from queue import Empty, Queue
+from typing import Any, List, Optional
-from langfuse.Sampler import Sampler
-from langfuse.parse_error import handle_exception
-from langfuse.request import APIError
-from langfuse.utils import _get_timestamp
-from langfuse.types import MaskFunction
+import backoff
try:
- import pydantic.v1 as pydantic # type: ignore
+ import pydantic.v1 as pydantic
except ImportError:
- import pydantic # type: ignore
-
-
-import backoff
+ import pydantic
-from langfuse.request import LangfuseClient
+from langfuse.api.client import FernLangfuse
+from langfuse.parse_error import handle_exception
+from langfuse.request import APIError, LangfuseClient
+from langfuse.Sampler import Sampler
from langfuse.serializer import EventSerializer
+from langfuse.types import MaskFunction
-# largest message size in db is 331_000 bytes right now
-MAX_MSG_SIZE = 1_000_000
-
-# https://vercel.com/docs/functions/serverless-functions/runtimes#request-body-size
-# The maximum payload size for the request body or the response body of a Serverless Function is 4.5 MB
-# 4_500_000 Bytes = 4.5 MB
-# configured to be 3 MB to be safe
+from .media_manager import MediaManager
-BATCH_SIZE_LIMIT = 2_500_000
+MAX_EVENT_SIZE_BYTES = 1_000_000
+MAX_BATCH_SIZE_BYTES = 2_500_000
-class LangfuseMetadata(pydantic.BaseModel):
+class IngestionMetadata(pydantic.BaseModel):
batch_size: int
- sdk_integration: typing.Optional[str] = None
- sdk_name: str = None
- sdk_version: str = None
- public_key: str = None
+ sdk_integration: Optional[str] = None
+ sdk_name: Optional[str] = None
+ sdk_version: Optional[str] = None
+ public_key: Optional[str] = None
-class Consumer(threading.Thread):
+class IngestionConsumer(threading.Thread):
_log = logging.getLogger("langfuse")
- _queue: Queue
+ _ingestion_queue: Queue
_identifier: int
_client: LangfuseClient
+ _api_client: FernLangfuse
_flush_at: int
_flush_interval: float
_max_retries: int
@@ -60,16 +49,19 @@ class Consumer(threading.Thread):
_sdk_integration: str
_mask: Optional[MaskFunction]
_sampler: Sampler
+ _media_manager: MediaManager
def __init__(
self,
- queue: Queue,
+ *,
+ ingestion_queue: Queue,
identifier: int,
client: LangfuseClient,
flush_at: int,
flush_interval: float,
max_retries: int,
public_key: str,
+ media_manager: MediaManager,
sdk_name: str,
sdk_version: str,
sdk_integration: str,
@@ -77,15 +69,15 @@ def __init__(
mask: Optional[MaskFunction] = None,
):
"""Create a consumer thread."""
- threading.Thread.__init__(self)
- # Make consumer a daemon thread so that it doesn't block program exit
- self.daemon = True
- self._queue = queue
+ super().__init__()
# It's important to set running in the constructor: if we are asked to
# pause immediately after construction, we might set running to True in
# run() *after* we set it to False in pause... and keep running
# forever.
self.running = True
+ # Make consumer a daemon thread so that it doesn't block program exit
+ self.daemon = True
+ self._ingestion_queue = ingestion_queue
self._identifier = identifier
self._client = client
self._flush_at = flush_at
@@ -97,73 +89,81 @@ def __init__(
self._sdk_integration = sdk_integration
self._mask = mask
self._sampler = Sampler(sample_rate)
+ self._media_manager = media_manager
def _next(self):
"""Return the next batch of items to upload."""
- queue = self._queue
- items = []
+ events = []
start_time = time.monotonic()
total_size = 0
- while len(items) < self._flush_at:
+ while len(events) < self._flush_at:
elapsed = time.monotonic() - start_time
if elapsed >= self._flush_interval:
break
try:
- item = queue.get(block=True, timeout=self._flush_interval - elapsed)
+ event = self._ingestion_queue.get(
+ block=True, timeout=self._flush_interval - elapsed
+ )
# convert pydantic models to dicts
- if "body" in item and isinstance(item["body"], pydantic.BaseModel):
- item["body"] = item["body"].dict(exclude_none=True)
+ if "body" in event and isinstance(event["body"], pydantic.BaseModel):
+ event["body"] = event["body"].dict(exclude_none=True)
# sample event
- if not self._sampler.sample_event(item):
- queue.task_done()
+ if not self._sampler.sample_event(event):
+ self._ingestion_queue.task_done()
continue
+ # handle multimodal data
+ self._media_manager.process_multimodal_event_in_place(event)
+
# truncate item if it exceeds size limit
item_size = self._truncate_item_in_place(
- item=item,
- max_size=MAX_MSG_SIZE,
+ event=event,
+ max_size=MAX_EVENT_SIZE_BYTES,
log_message="",
)
# apply mask
- self._apply_mask_in_place(item)
+ self._apply_mask_in_place(event)
# check for serialization errors
try:
- json.dumps(item, cls=EventSerializer)
+ json.dumps(event, cls=EventSerializer)
except Exception as e:
self._log.error(f"Error serializing item, skipping: {e}")
- queue.task_done()
+ self._ingestion_queue.task_done()
continue
- items.append(item)
+ events.append(event)
total_size += item_size
- if total_size >= BATCH_SIZE_LIMIT:
+ if total_size >= MAX_BATCH_SIZE_BYTES:
self._log.debug("hit batch size limit (size: %d)", total_size)
break
except Empty:
break
- self._log.debug("~%d items in the Langfuse queue", self._queue.qsize())
- return items
+ self._log.debug(
+ "~%d items in the Langfuse queue", self._ingestion_queue.qsize()
+ )
+
+ return events
def _truncate_item_in_place(
self,
*,
- item: typing.Any,
+ event: typing.Any,
max_size: int,
log_message: typing.Optional[str] = None,
) -> int:
"""Truncate the item in place to fit within the size limit."""
- item_size = self._get_item_size(item)
+ item_size = self._get_item_size(event)
self._log.debug(f"item size {item_size}")
if item_size > max_size:
@@ -172,14 +172,14 @@ def _truncate_item_in_place(
item_size,
)
- if "body" in item:
+ if "body" in event:
drop_candidates = ["input", "output", "metadata"]
sorted_field_sizes = sorted(
[
(
field,
- self._get_item_size((item["body"][field]))
- if field in item["body"]
+ self._get_item_size((event["body"][field]))
+ if field in event["body"]
else 0,
)
for field in drop_candidates
@@ -191,10 +191,10 @@ def _truncate_item_in_place(
for _ in range(len(sorted_field_sizes)):
field_to_drop, size_to_drop = sorted_field_sizes.pop()
- if field_to_drop not in item["body"]:
+ if field_to_drop not in event["body"]:
continue
- item["body"][field_to_drop] = log_message
+ event["body"][field_to_drop] = log_message
item_size -= size_to_drop
self._log.debug(
@@ -205,16 +205,16 @@ def _truncate_item_in_place(
break
# if item does not have body or input/output fields, drop the event
- if "body" not in item or (
- "input" not in item["body"] and "output" not in item["body"]
+ if "body" not in event or (
+ "input" not in event["body"] and "output" not in event["body"]
):
self._log.warning(
"Item does not have body or input/output fields, dropping item."
)
- self._queue.task_done()
+ self._ingestion_queue.task_done()
return 0
- return self._get_item_size(item)
+ return self._get_item_size(event)
def _get_item_size(self, item: typing.Any) -> int:
"""Return the size of the item in bytes."""
@@ -235,7 +235,7 @@ def _apply_mask_in_place(self, event: dict):
body[key] = ""
def run(self):
- """Runs the consumer."""
+ """Run the consumer."""
self._log.debug("consumer is running...")
while self.running:
self.upload()
@@ -253,7 +253,7 @@ def upload(self):
finally:
# mark items as acknowledged from queue
for _ in batch:
- self._queue.task_done()
+ self._ingestion_queue.task_done()
def pause(self):
"""Pause the consumer."""
@@ -262,7 +262,7 @@ def pause(self):
def _upload_batch(self, batch: List[Any]):
self._log.debug("uploading batch of %d items", len(batch))
- metadata = LangfuseMetadata(
+ metadata = IngestionMetadata(
batch_size=len(batch),
sdk_integration=self._sdk_integration,
sdk_name=self._sdk_name,
@@ -287,134 +287,4 @@ def execute_task_with_backoff(batch: List[Any]):
raise e
execute_task_with_backoff(batch)
- self._log.debug("successfully uploaded batch of %d items", len(batch))
-
-
-class TaskManager(object):
- _log = logging.getLogger("langfuse")
- _consumers: List[Consumer]
- _enabled: bool
- _threads: int
- _max_task_queue_size: int
- _queue: Queue
- _client: LangfuseClient
- _flush_at: int
- _flush_interval: float
- _max_retries: int
- _public_key: str
- _sdk_name: str
- _sdk_version: str
- _sdk_integration: str
- _sample_rate: float
- _mask: Optional[MaskFunction]
-
- def __init__(
- self,
- client: LangfuseClient,
- flush_at: int,
- flush_interval: float,
- max_retries: int,
- threads: int,
- public_key: str,
- sdk_name: str,
- sdk_version: str,
- sdk_integration: str,
- enabled: bool = True,
- max_task_queue_size: int = 100_000,
- sample_rate: float = 1,
- mask: Optional[MaskFunction] = None,
- ):
- self._max_task_queue_size = max_task_queue_size
- self._threads = threads
- self._queue = queue.Queue(self._max_task_queue_size)
- self._consumers = []
- self._client = client
- self._flush_at = flush_at
- self._flush_interval = flush_interval
- self._max_retries = max_retries
- self._public_key = public_key
- self._sdk_name = sdk_name
- self._sdk_version = sdk_version
- self._sdk_integration = sdk_integration
- self._enabled = enabled
- self._sample_rate = sample_rate
- self._mask = mask
-
- self.init_resources()
-
- # cleans up when the python interpreter closes
- atexit.register(self.join)
-
- def init_resources(self):
- for i in range(self._threads):
- consumer = Consumer(
- queue=self._queue,
- identifier=i,
- client=self._client,
- flush_at=self._flush_at,
- flush_interval=self._flush_interval,
- max_retries=self._max_retries,
- public_key=self._public_key,
- sdk_name=self._sdk_name,
- sdk_version=self._sdk_version,
- sdk_integration=self._sdk_integration,
- sample_rate=self._sample_rate,
- mask=self._mask,
- )
- consumer.start()
- self._consumers.append(consumer)
-
- def add_task(self, event: dict):
- if not self._enabled:
- return
-
- try:
- event["timestamp"] = _get_timestamp()
-
- self._queue.put(event, block=False)
- except queue.Full:
- self._log.warning("analytics-python queue is full")
- return False
- except Exception as e:
- self._log.exception(f"Exception in adding task {e}")
-
- return False
-
- def flush(self):
- """Force a flush from the internal queue to the server."""
- self._log.debug("flushing queue")
- queue = self._queue
- size = queue.qsize()
- queue.join()
- # Note that this message may not be precise, because of threading.
- self._log.debug("successfully flushed about %s items.", size)
-
- def join(self):
- """End the consumer threads once the queue is empty.
-
- Blocks execution until finished
- """
- self._log.debug(f"joining {len(self._consumers)} consumer threads")
-
- # pause all consumers before joining them so we don't have to wait for multiple
- # flush intervals to join them all.
- for consumer in self._consumers:
- consumer.pause()
-
- for consumer in self._consumers:
- try:
- consumer.join()
- except RuntimeError:
- # consumer thread has not started
- pass
-
- self._log.debug(f"consumer thread {consumer._identifier} joined")
-
- def shutdown(self):
- """Flush all messages and cleanly shutdown the client."""
- self._log.debug("shutdown initiated")
-
- self.flush()
- self.join()
-
- self._log.debug("shutdown completed")
+ self._log.debug("successfully uploaded batch of %d events", len(batch))
diff --git a/langfuse/_task_manager/media_manager.py b/langfuse/_task_manager/media_manager.py
new file mode 100644
index 000000000..9874ff874
--- /dev/null
+++ b/langfuse/_task_manager/media_manager.py
@@ -0,0 +1,245 @@
+import base64
+import hashlib
+import logging
+from queue import Empty
+from typing import Literal
+
+import requests
+
+from langfuse.api import GetMediaUploadUrlRequest, PatchMediaBody
+from langfuse.api.client import FernLangfuse
+from langfuse.utils import _get_timestamp
+
+from .media_upload_queue import MediaUploadQueue, UploadMediaJob
+
+
+class MediaManager:
+ _log = logging.getLogger(__name__)
+
+ def __init__(
+ self, *, api_client: FernLangfuse, media_upload_queue: MediaUploadQueue
+ ):
+ self._api_client = api_client
+ self._queue = media_upload_queue
+
+ def process_next_media_upload(self):
+ try:
+ data = self._queue.get(block=True, timeout=1)
+ self._process_upload_media_job(data=data)
+
+ self._queue.task_done()
+ except Empty:
+ pass
+ except Exception as e:
+ self._log.error(f"Error uploading media: {e}")
+ self._queue.task_done()
+
+ def process_multimodal_event_in_place(self, event: dict):
+ try:
+ if "body" not in event:
+ return
+
+ body = event["body"]
+ multimodal_fields = ["input", "output"]
+
+ for field in multimodal_fields:
+ if field in body:
+ field_data = body[field]
+
+ if field == "output":
+ self._process_multimodal_message(
+ event=event, body=body, field=field, message=field_data
+ )
+
+ if isinstance(field_data, list):
+ for message in field_data:
+ self._process_multimodal_message(
+ event=event, body=body, field=field, message=message
+ )
+
+ except Exception as e:
+ self._log.error(f"Error processing multimodal event: {e}")
+
+ def _process_multimodal_message(
+ self, *, event: dict, body: dict, field: str, message: dict
+ ):
+ if isinstance(message, dict) and message.get("content", None) is not None:
+ content = message["content"]
+
+ for content_part in content:
+ if isinstance(content_part, dict):
+ if content_part.get("image_url", None) is not None:
+ base64_data_uri = content_part["image_url"]["url"]
+ if base64_data_uri.startswith("data:"):
+ media_reference_string = self._enqueue_media_upload(
+ event=event,
+ body=body,
+ field=field,
+ base64_data_uri=base64_data_uri,
+ )
+
+ if media_reference_string:
+ content_part["image_url"]["url"] = (
+ media_reference_string
+ )
+
+ if content_part.get("input_audio", None) is not None:
+ base64_data_uri = (
+ f"data:audio/{content_part['input_audio']['format']};base64,"
+ + content_part["input_audio"]["data"]
+ )
+
+ media_reference_string = self._enqueue_media_upload(
+ event=event,
+ body=body,
+ field=field,
+ base64_data_uri=base64_data_uri,
+ )
+
+ if media_reference_string:
+ content_part["input_audio"]["data"] = media_reference_string
+
+ if content_part.get("output_audio", None) is not None:
+ base64_data_uri = (
+ f"data:audio/{content_part['output_audio']['format']};base64,"
+ + content_part["output_audio"]["data"]
+ )
+
+ media_reference_string = self._enqueue_media_upload(
+ event=event,
+ body=body,
+ field=field,
+ base64_data_uri=base64_data_uri,
+ )
+
+ if media_reference_string:
+ content_part["output_audio"]["data"] = (
+ media_reference_string
+ )
+
+ def _enqueue_media_upload(
+ self, *, event: dict, body: dict, field: str, base64_data_uri: str
+ ):
+ parsed_content = self._parse_base64_data_uri(base64_data_uri)
+ trace_id = body.get("traceId", None) or (
+ body.get("id", None)
+ if "type" in event and "trace" in event["type"]
+ else None
+ )
+
+ if trace_id is None:
+ raise ValueError("trace_id is required for media upload")
+
+ observation_id = (
+ body.get("id", None)
+ if "type" in event
+ and ("generation" in event["type"] or "span" in event["type"])
+ else None
+ )
+
+ if parsed_content:
+ content_length = parsed_content["content_length"]
+ content_type = parsed_content["content_type"]
+ content_sha256_hash = parsed_content["content_sha256_hash"]
+ content_bytes = parsed_content["content_bytes"]
+
+ upload_url_response = self._api_client.media.get_upload_url(
+ request=GetMediaUploadUrlRequest(
+ field=field,
+ contentLength=content_length,
+ contentType=content_type,
+ sha256Hash=content_sha256_hash,
+ traceId=trace_id,
+ observationId=observation_id,
+ )
+ )
+
+ upload_url = upload_url_response.upload_url
+ media_id = upload_url_response.media_id
+
+ if upload_url is not None:
+ self._queue.put(
+ item={
+ "content_bytes": content_bytes,
+ "content_type": content_type,
+ "content_sha256_hash": content_sha256_hash,
+ "upload_url": upload_url,
+ "media_id": media_id,
+ },
+ block=True,
+ )
+
+ return self._format_media_reference_string(
+ content_type=content_type,
+ media_id=media_id,
+ source="base64",
+ )
+
+ def _process_upload_media_job(
+ self,
+ *,
+ data: UploadMediaJob,
+ ):
+ upload_response = requests.put(
+ data["upload_url"],
+ headers={
+ "Content-Type": data["content_type"],
+ "x-amz-checksum-sha256": data["content_sha256_hash"],
+ },
+ data=data["content_bytes"],
+ )
+
+ self._api_client.media.patch(
+ media_id=data["media_id"],
+ request=PatchMediaBody(
+ uploadedAt=_get_timestamp(),
+ uploadHttpStatus=upload_response.status_code,
+ uploadHttpError=upload_response.text,
+ ),
+ )
+
+ def _format_media_reference_string(
+ self, *, content_type: str, media_id: str, source: Literal["base64"]
+ ) -> str:
+ return f"@@@langfuseMedia:type={content_type}|id={media_id}|source={source}@@@"
+
+ def _parse_base64_data_uri(self, data: str):
+ if not data or not isinstance(data, str):
+ return None
+
+ if not data.startswith("data:"):
+ return None
+
+ try:
+ # Split the data into metadata and actual data
+ header, _, actual_data = data[5:].partition(",")
+ if not header or not actual_data:
+ return None
+
+ # Determine if the data is base64 encoded
+ is_base64 = header.endswith(";base64")
+ if not is_base64:
+ return None
+
+ content_type = header[:-7]
+ if not content_type:
+ return None
+
+ try:
+ content_bytes = base64.b64decode(actual_data)
+ except Exception:
+ return None
+
+ content_length = len(content_bytes)
+
+ sha256_hash_bytes = hashlib.sha256(content_bytes).digest()
+ sha256_hash_base64 = base64.b64encode(sha256_hash_bytes).decode("utf-8")
+
+ return {
+ "content_type": content_type,
+ "content_bytes": content_bytes,
+ "content_length": content_length,
+ "content_sha256_hash": sha256_hash_base64,
+ }
+ except Exception:
+ return None
diff --git a/langfuse/_task_manager/media_upload_consumer.py b/langfuse/_task_manager/media_upload_consumer.py
new file mode 100644
index 000000000..c4720165d
--- /dev/null
+++ b/langfuse/_task_manager/media_upload_consumer.py
@@ -0,0 +1,41 @@
+import logging
+import threading
+
+from .media_manager import MediaManager
+
+
+class MediaUploadConsumer(threading.Thread):
+ _log = logging.getLogger(__name__)
+ _identifier: int
+ _max_retries: int
+ _media_manager: MediaManager
+
+ def __init__(
+ self,
+ *,
+ identifier: int,
+ max_retries: int,
+ media_manager: MediaManager,
+ ):
+ """Create a consumer thread."""
+ super().__init__()
+ # Make consumer a daemon thread so that it doesn't block program exit
+ self.daemon = True
+ # It's important to set running in the constructor: if we are asked to
+ # pause immediately after construction, we might set running to True in
+ # run() *after* we set it to False in pause... and keep running
+ # forever.
+ self.running = True
+ self._identifier = identifier
+ self._max_retries = max_retries
+ self._media_manager = media_manager
+
+ def run(self):
+ """Run the media upload consumer."""
+ self._log.debug("consumer is running...")
+ while self.running:
+ self._media_manager.process_next_media_upload()
+
+ def pause(self):
+ """Pause the media upload consumer."""
+ self.running = False
diff --git a/langfuse/_task_manager/media_upload_queue.py b/langfuse/_task_manager/media_upload_queue.py
new file mode 100644
index 000000000..81bd9d331
--- /dev/null
+++ b/langfuse/_task_manager/media_upload_queue.py
@@ -0,0 +1,14 @@
+from queue import Queue
+from typing import TypedDict
+
+
+class UploadMediaJob(TypedDict):
+ upload_url: str
+ media_id: str
+ content_type: str
+ content_bytes: bytes
+ content_sha256_hash: str
+
+
+class MediaUploadQueue(Queue[UploadMediaJob]):
+ pass
diff --git a/langfuse/_task_manager/task_manager.py b/langfuse/_task_manager/task_manager.py
new file mode 100644
index 000000000..76df2ca66
--- /dev/null
+++ b/langfuse/_task_manager/task_manager.py
@@ -0,0 +1,195 @@
+"""@private"""
+
+import atexit
+import logging
+import queue
+from queue import Queue
+from typing import List, Optional
+
+from langfuse.api.client import FernLangfuse
+from langfuse.request import LangfuseClient
+from langfuse.types import MaskFunction
+from langfuse.utils import _get_timestamp
+
+from .ingestion_consumer import IngestionConsumer
+from .media_manager import MediaManager
+from .media_upload_consumer import MediaUploadConsumer
+from .media_upload_queue import MediaUploadQueue
+
+
+class TaskManager(object):
+ _log = logging.getLogger(__name__)
+ _ingestion_consumers: List[IngestionConsumer]
+ _enabled: bool
+ _threads: int
+ _media_upload_threads: int
+ _max_task_queue_size: int
+ _ingestion_queue: Queue
+ _media_upload_queue: MediaUploadQueue
+ _client: LangfuseClient
+ _api_client: FernLangfuse
+ _flush_at: int
+ _flush_interval: float
+ _max_retries: int
+ _public_key: str
+ _sdk_name: str
+ _sdk_version: str
+ _sdk_integration: str
+ _sample_rate: float
+ _mask: Optional[MaskFunction]
+
+ def __init__(
+ self,
+ *,
+ client: LangfuseClient,
+ api_client: FernLangfuse,
+ flush_at: int,
+ flush_interval: float,
+ max_retries: int,
+ threads: int,
+ media_upload_threads: int = 1,
+ public_key: str,
+ sdk_name: str,
+ sdk_version: str,
+ sdk_integration: str,
+ enabled: bool = True,
+ max_task_queue_size: int = 100_000,
+ sample_rate: float = 1,
+ mask: Optional[MaskFunction] = None,
+ ):
+ self._max_task_queue_size = max_task_queue_size
+ self._threads = threads
+ self._media_upload_threads = media_upload_threads
+ self._ingestion_queue = queue.Queue(self._max_task_queue_size)
+ self._media_upload_queue = MediaUploadQueue(self._max_task_queue_size)
+ self._media_manager = MediaManager(
+ api_client=api_client, media_upload_queue=self._media_upload_queue
+ )
+ self._ingestion_consumers = []
+ self._media_upload_consumers = []
+ self._client = client
+ self._api_client = api_client
+ self._flush_at = flush_at
+ self._flush_interval = flush_interval
+ self._max_retries = max_retries
+ self._public_key = public_key
+ self._sdk_name = sdk_name
+ self._sdk_version = sdk_version
+ self._sdk_integration = sdk_integration
+ self._enabled = enabled
+ self._sample_rate = sample_rate
+ self._mask = mask
+
+ self.init_resources()
+
+ # cleans up when the python interpreter closes
+ atexit.register(self.join)
+
+ def init_resources(self):
+ for i in range(self._threads):
+ ingestion_consumer = IngestionConsumer(
+ ingestion_queue=self._ingestion_queue,
+ identifier=i,
+ client=self._client,
+ media_manager=self._media_manager,
+ flush_at=self._flush_at,
+ flush_interval=self._flush_interval,
+ max_retries=self._max_retries,
+ public_key=self._public_key,
+ sdk_name=self._sdk_name,
+ sdk_version=self._sdk_version,
+ sdk_integration=self._sdk_integration,
+ sample_rate=self._sample_rate,
+ mask=self._mask,
+ )
+ ingestion_consumer.start()
+ self._ingestion_consumers.append(ingestion_consumer)
+
+ for i in range(self._media_upload_threads):
+ media_upload_consumer = MediaUploadConsumer(
+ identifier=i,
+ max_retries=self._max_retries,
+ media_manager=self._media_manager,
+ )
+ media_upload_consumer.start()
+ self._media_upload_consumers.append(media_upload_consumer)
+
+ def add_task(self, event: dict):
+ if not self._enabled:
+ return
+
+ try:
+ event["timestamp"] = _get_timestamp()
+
+ self._ingestion_queue.put(event, block=False)
+ except queue.Full:
+ self._log.warning("analytics-python queue is full")
+ return False
+ except Exception as e:
+ self._log.exception(f"Exception in adding task {e}")
+
+ return False
+
+ def flush(self):
+ """Force a flush from the internal queue to the server."""
+ self._log.debug("flushing ingestion and media upload queues")
+
+ # Ingestion queue
+ ingestion_queue_size = self._ingestion_queue.qsize()
+ self._ingestion_queue.join()
+ self._log.debug(
+ f"Successfully flushed ~{ingestion_queue_size} items from ingestion queue"
+ )
+
+ # Media upload queue
+ media_upload_queue_size = self._media_upload_queue.qsize()
+ self._media_upload_queue.join()
+ self._log.debug(
+ f"Successfully flushed ~{media_upload_queue_size} items from media upload queue"
+ )
+
+ def join(self):
+ """End the consumer threads once the queue is empty.
+
+ Blocks execution until finished
+ """
+ self._log.debug(f"joining {len(self._ingestion_consumers)} consumer threads")
+
+ # pause all consumers before joining them so we don't have to wait for multiple
+ # flush intervals to join them all.
+ for ingestion_consumer in self._ingestion_consumers:
+ ingestion_consumer.pause()
+
+ for media_upload_consumer in self._media_upload_consumers:
+ media_upload_consumer.pause()
+
+ for ingestion_consumer in self._ingestion_consumers:
+ try:
+ ingestion_consumer.join()
+ except RuntimeError:
+ # consumer thread has not started
+ pass
+
+ self._log.debug(
+ f"IngestionConsumer thread {ingestion_consumer._identifier} joined"
+ )
+
+ for media_upload_consumer in self._media_upload_consumers:
+ try:
+ media_upload_consumer.join()
+ except RuntimeError:
+ # consumer thread has not started
+ pass
+
+ self._log.debug(
+ f"MediaUploadConsumer thread {media_upload_consumer._identifier} joined"
+ )
+
+ def shutdown(self):
+ """Flush all messages and cleanly shutdown the client."""
+ self._log.debug("shutdown initiated")
+
+ self.flush()
+ self.join()
+
+ self._log.debug("shutdown completed")
diff --git a/langfuse/client.py b/langfuse/client.py
index f9e6a2380..74a2f53d4 100644
--- a/langfuse/client.py
+++ b/langfuse/client.py
@@ -1,28 +1,19 @@
-from contextlib import contextmanager
import datetime as dt
import logging
import os
-import typing
-import uuid
-import backoff
-import httpx
-from enum import Enum
import time
import tracemalloc
-from typing import (
- Any,
- Dict,
- Optional,
- Literal,
- Union,
- List,
- Sequence,
- overload,
-)
+import typing
import urllib.parse
+import uuid
import warnings
+from contextlib import contextmanager
from dataclasses import dataclass
+from enum import Enum
+from typing import Any, Dict, List, Literal, Optional, Sequence, Union, overload
+import backoff
+import httpx
from langfuse.api.resources.commons.types.dataset_run_with_items import (
DatasetRunWithItems,
@@ -39,8 +30,8 @@
)
from langfuse.api.resources.ingestion.types.create_span_body import CreateSpanBody
from langfuse.api.resources.ingestion.types.score_body import ScoreBody
-from langfuse.api.resources.ingestion.types.trace_body import TraceBody
from langfuse.api.resources.ingestion.types.sdk_log_body import SdkLogBody
+from langfuse.api.resources.ingestion.types.trace_body import TraceBody
from langfuse.api.resources.ingestion.types.update_generation_body import (
UpdateGenerationBody,
)
@@ -51,28 +42,26 @@
from langfuse.api.resources.prompts.types import (
CreatePromptRequest_Chat,
CreatePromptRequest_Text,
- Prompt_Text,
Prompt_Chat,
+ Prompt_Text,
)
from langfuse.api.resources.trace.types.traces import Traces
from langfuse.api.resources.utils.resources.pagination.types.meta_response import (
MetaResponse,
)
from langfuse.model import (
+ ChatMessageDict,
+ ChatPromptClient,
CreateDatasetItemRequest,
CreateDatasetRequest,
CreateDatasetRunItemRequest,
- ChatMessageDict,
DatasetItem,
DatasetStatus,
ModelUsage,
PromptClient,
- ChatPromptClient,
TextPromptClient,
)
-from langfuse.parse_error import (
- handle_fern_exception,
-)
+from langfuse.parse_error import handle_fern_exception
from langfuse.prompt_cache import PromptCache
try:
@@ -80,13 +69,13 @@
except ImportError:
import pydantic # type: ignore
+from langfuse._task_manager.task_manager import TaskManager
from langfuse.api.client import FernLangfuse
from langfuse.environment import get_common_release_envs
from langfuse.logging import clean_logger
from langfuse.model import Dataset, MapValue, Observation, TraceWithFullDetails
from langfuse.request import LangfuseClient
-from langfuse.task_manager import TaskManager
-from langfuse.types import SpanLevel, ScoreDataType, MaskFunction
+from langfuse.types import MaskFunction, ScoreDataType, SpanLevel
from langfuse.utils import _convert_usage_input, _create_prompt_context, _get_timestamp
from .version import __version__ as version
@@ -308,6 +297,7 @@ def __init__(
"flush_interval": flush_interval,
"max_retries": max_retries,
"client": langfuse_client,
+ "api_client": self.client,
"public_key": public_key,
"sdk_name": "python",
"sdk_version": version,
@@ -1383,7 +1373,7 @@ def _log_memory_usage(self):
:top_k_items
],
"total_usage": f"{total_memory_usage:.2f} MB",
- "langfuse_queue_length": self.task_manager._queue.qsize(),
+ "langfuse_queue_length": self.task_manager._ingestion_queue.qsize(),
}
self.log.debug("Memory usage: ", logged_memory_usage)
diff --git a/langfuse/openai.py b/langfuse/openai.py
index d3e66f9e5..ccd917fd6 100644
--- a/langfuse/openai.py
+++ b/langfuse/openai.py
@@ -17,18 +17,17 @@
See docs for more details: https://langfuse.com/docs/integrations/openai
"""
-import copy
import logging
-from inspect import isclass
import types
-
from collections import defaultdict
from dataclasses import dataclass
-from typing import List, Optional
+from inspect import isclass
+from typing import Optional
import openai.resources
from openai._types import NotGiven
from packaging.version import Version
+from pydantic import BaseModel
from wrapt import wrap_function_wrapper
from langfuse import Langfuse
@@ -36,7 +35,6 @@
from langfuse.decorators import langfuse_context
from langfuse.utils import _get_timestamp
from langfuse.utils.langfuse_singleton import LangfuseSingleton
-from pydantic import BaseModel
try:
import openai
@@ -200,13 +198,13 @@ def _extract_chat_prompt(kwargs: any):
# uf user provided functions, we need to send these together with messages to langfuse
prompt.update(
{
- "messages": _filter_image_data(kwargs.get("messages", [])),
+ "messages": kwargs.get("messages", []),
}
)
return prompt
else:
# vanilla case, only send messages in openai format to langfuse
- return _filter_image_data(kwargs.get("messages", []))
+ return kwargs.get("messages", [])
def _extract_chat_response(kwargs: any):
@@ -215,15 +213,31 @@ def _extract_chat_response(kwargs: any):
"role": kwargs.get("role", None),
}
+ audio_content = None
+
if kwargs.get("function_call") is not None:
response.update({"function_call": kwargs["function_call"]})
if kwargs.get("tool_calls") is not None:
response.update({"tool_calls": kwargs["tool_calls"]})
+ if kwargs.get("audio") is not None:
+ audio = kwargs["audio"].__dict__
+
+ audio_content = [
+ {"type": "text", "text": audio.get("transcript", None)},
+ {
+ "type": "output_audio",
+ "output_audio": {
+ "data": audio.get("data", None),
+ "format": audio.get("format", "wav"),
+ },
+ },
+ ]
+
response.update(
{
- "content": kwargs.get("content", None),
+ "content": kwargs.get("content", None) or audio_content,
}
)
return response
@@ -727,32 +741,6 @@ def auth_check():
return modifier._langfuse.auth_check()
-def _filter_image_data(messages: List[dict]):
- """https://platform.openai.com/docs/guides/vision?lang=python
-
- The messages array remains the same, but the 'image_url' is removed from the 'content' array.
- It should only be removed if the value starts with 'data:image/jpeg;base64,'
-
- """
- output_messages = copy.deepcopy(messages)
-
- for message in output_messages:
- content = (
- message.get("content", None)
- if isinstance(message, dict)
- else getattr(message, "content", None)
- )
-
- if content is not None:
- for index, item in enumerate(content):
- if isinstance(item, dict) and item.get("image_url", None) is not None:
- url = item["image_url"]["url"]
- if url.startswith("data:image/"):
- del content[index]["image_url"]
-
- return output_messages
-
-
class LangfuseResponseGeneratorSync:
def __init__(
self,
diff --git a/langfuse/types/__init__.py b/langfuse/types/__init__.py
index 896557489..1836561be 100644
--- a/langfuse/types/__init__.py
+++ b/langfuse/types/__init__.py
@@ -1,10 +1,12 @@
"""@private"""
from datetime import datetime
-from langfuse.client import PromptClient, ModelUsage, MapValue
-from typing import Any, List, Optional, TypedDict, Literal, Dict, Union, Protocol
+from typing import Any, Dict, List, Literal, Optional, Protocol, TypedDict, Union
+
from pydantic import BaseModel
+from langfuse.model import MapValue, ModelUsage, PromptClient
+
SpanLevel = Literal["DEBUG", "DEFAULT", "WARNING", "ERROR"]
ScoreDataType = Literal["NUMERIC", "CATEGORICAL", "BOOLEAN"]
diff --git a/poetry.lock b/poetry.lock
index 5b11c6aa1..76de20c89 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand.
[[package]]
name = "aiohappyeyeballs"
@@ -3058,13 +3058,13 @@ sympy = "*"
[[package]]
name = "openai"
-version = "1.45.0"
+version = "1.54.3"
description = "The official Python library for the openai API"
optional = false
-python-versions = ">=3.7.1"
+python-versions = ">=3.8"
files = [
- {file = "openai-1.45.0-py3-none-any.whl", hash = "sha256:2f1f7b7cf90f038a9f1c24f0d26c0f1790c102ec5acd07ffd70a9b7feac1ff4e"},
- {file = "openai-1.45.0.tar.gz", hash = "sha256:731207d10637335413aa3c0955f8f8df30d7636a4a0f9c381f2209d32cf8de97"},
+ {file = "openai-1.54.3-py3-none-any.whl", hash = "sha256:f18dbaf09c50d70c4185b892a2a553f80681d1d866323a2da7f7be2f688615d5"},
+ {file = "openai-1.54.3.tar.gz", hash = "sha256:7511b74eeb894ac0b0253dc71f087a15d2e4d71d22d0088767205143d880cca6"},
]
[package.dependencies]
diff --git a/static/joke_prompt.wav b/static/joke_prompt.wav
new file mode 100644
index 0000000000000000000000000000000000000000..3923b1b9eccc2e0c1758cbc5e6f047beda9f1f52
GIT binary patch
literal 157774
zcmeEvb(|H|8~5~$xw*yC-Q6G|p_Hg7DH0+f3KF7%gdim-DT0WAD50Q8BOoH(9ZNTC
z*X^8{_dFcWZ{}_gKmEP$`^V0HnDd+`zE7N(xp!xW*W0&g(`GP3b#K}2#X;|XSjt9G
z6pel+HAln*iee}m)v~7c-(e_WO8HgW5G3)~H>p7DWx{_hCN@
zp%fei6)31cK?MpbP*8z_3KUeJpaKOID5yX|1qv!qP=SI96jY#~0tFQ)s6asl3Mx=g
zfr1JYRG^>&1r;c$KtTlxDo{{?f(jH=pr8T;6)31cK?MpbP*8z_3KUeJpaKOID5yX|
z1qv!qP=SI9{Qq46_^Sc(^E8nve;-<1Voz(C|3M|v2;u*65tvx#GA8Ch>=Hp@(x0ep~{Neeu(rnNDs*^{h@cY=@mq(@pD2RQ?mM1^@OnraUe9w+T+
z;eAaTry8HSTGX7druMAr=cKe+K*ND`)Y*)Ad24EAIkB~U6&JX9AEZ`K*FvSBqVNyT
ze!#`(TF-=rd~)AQ5NUof-sR&)V2j
zTj1ikNaT37L62zVZm|?1)aqdij7Bnlq<~J5nm8lkLB${xk>vIq$x+cUU+ohrXRE#9
zK;|Sa>=DkS8X&=OYDXf{f6gBErp;YT=ZppVIT1*o=#qY3E!NRW$cPYY(qbLb%iU@*
z@}jYYR+5*eoVnr(65m*=#Q^&0Ynu}tVp4g;7s-wEiFd+}Y(e_JJaT3W`Qo4465v2J
za77~JIL1g`-aR!Nyys~9>J2s&
z%lkKIGN+$}Mf^X$B`8=+o73a%v{HicZ?z;Af~WNb^y7CMqD`#-ot9KxZSl8j1YPAn
zJ4G;XtFk!wK1;4@HpB{ZNH4#&lUt5C
zDjuP7^Hp1*h+f`pq;Ae0v5DjpZq7DROWPxwTHcc)BR2WBfTooabS)3lL_%6sBR5Bt
zJ)Vm6ol_TZ@!BCCps&)8qjF;9-GAJ@6IeX$p^Z}O3GB7~r=@fIRd)+5l~XS_eq;oZ
z`nk39=0sxT)KM{2iORzi88uQTwbg23do3rD1{BptW>>;N{&PXG*aF3jG+G=r$&i}I2`X_#J#sGbMkQqd*fJ^iRxrV8EfG)l!<%|#e%*}lo4qiX_Gmg5JRO)Fk9#;>o
zOp68l+(*uOs2W
zE7evjBbY===7p+*=LU&NjSF)~2&%q{L}btsuG*?ROc9CJibx`P`SsM;Km$gAXaWs$
zk@Rs39?4ZG_XrbRtOePsl-8bbb84vkNQ&TUTZj)1EiZpM5)b!O?YyMGd-9=1@F*A*
zjTSp6F2N(%q{UVsg?v;l(bu*rCb`OkMdVmueN_`vn2RG;WyBw@B&jEfLu;v`YU6o4
zt@_c9HReWMb@S2$Y~bUAw1kWMkt6W8>VF!o&3~BxA+$bo)BjO>BB91eR8=&>$-B)z
zJ~9&83SbOkgInxdjX=vKw)k$2^t3IRaez!P00EDGB$psRPKxg}RV1MBkBX(0MtTJu
z!Xa8(M9fq1h%U6MCGHV>tPOI)Q4t9ek!ZCrB0+uxpX;RBJ&iUo$5!M_qJcK@huo3l
zsE8iX!TDgn#5*1tIFkdONQCFEU@r&%zz1!0b^#5TL=qV#ej*0s>S`r4i9~JH8k4wG
z8BlPju}0Fw?~~d{#DA-opI0^7{5<7n34Exjwk8A(w|ER9M}pJ@uUK{6YWH{5BgVm%
zRX>Gc41x|)H42=C7EK*Zup_e!TF`<6bIJU~8o-60och3H|6`}hNu(GLdV~WlsR6E+
z+6Qf-NhmcJOyL-{pImWdk4zs$2x|Z$AqZMI#1AV2q&NzYLL4}IsH$2A7HP>y!aiD6
zkSG51i>mGZgGcfZ
z>I=x)hcu0d6a1TFL{t*uRH4$9i%My#1a(u%Qc6))>VXna;t`Tl2`e0BL}-2`QxT}D
zD9fV?Pc?hgrYaSwdQ`Tu9|hZ(ilMG64;33~^~iG~LMTi{A-hx*g%NqTP?C~@JQqRK
zUB$1IMEzuhSqkNN8)3#!4#lHbC^O|kS|W;;MZ7FzQ-&%=olp{#=cu|U!n2AFd9_g&
zk&kF94p~DMeiXk*1r!&mV;suah5R~D1hEJsh~hF*Iuunp$}bv4#Zex`jM{9JpI33B
ze-uKrA~c!`DL&+%L(#|xm7(m&l0&w3RMAXiPf>+Xy}YPjh%!-0C`vQ(oT1!OqEKcW
zir%dhqvBBvw-8Ptga>))Q5-Zvu^_Z8gbO1UM4A-Bh<}7EPzJ=wdX&2kS<8=NhIqUv
z8U|G|h$?A9)x|SHL8B=lZ3%I8=m!p%h45sr7wPIy)u~5SG^6SVA7#ZN4GAH`JTV}g
zAfjOoNvIACiXD(RWFsSM8dWoZ;?yDE6pBJZIY144$U;E2A=JWb22Bs5^^ZmZGBu%C
zLuh1qB!St9XDH+ZHUjbhR2cctAs;?8dklyR913wb%8UGR$Qo7`tO|HynMIxg0@%ua~Z~tM;urYFebPTFk`TP$cl_0?8bQKgqaF+3|BdX
zeBgY^9t@ShV+<<j06kv@<0^$Zw9P$X+K+VAe
z#H;@s19V{(z?u_~6h=ruo&X!>EUa}J?F0Hp_J9s?!JGm-n0v$@Xaf&+E65Q>AAG}1
zfRTfFhgT#-iz^J{0COIXKc@AMqJenvwH?L=?;kM62E>KE1N=kIWM2RtL;yBWGZ;TS
zZcu561T?V>uPQu}czr{@kPV(eP<6-!<`UEzXgnhU2e9#s1v$(JEQ67O5rPP?9^?ga
z;#~{Z6t6yP1r`t$-VZ?!bRY_-9o~UJ6W0r5*cZrQZor(ts~4{uybnW6Pywh8-WhN;
zusu`&Mih2H=Fxb=SVD$ye}J4miWVvj7&s=#2SzsdcZM)CV23iTp64v
zXyDm_buk8x7~%pg;6V)FmBfm3#uyM2#Efw;4|~V^EshTGAs^Kb^uZ2z#G2qhJ)p8A
zJK*DdaEogJGY!@dSmV)x7+`OOwS=R_e(}7+p74keM6ku91o?qAtPOmP!#W^^D&TtH
zF~PF{&s>a3;=-9j72tp#@q{xcdLV-Xv@kZ#kBk7!O1Qs*c}3<2t}|HT@yBDMdclZ*
zL&hF^!+bIq!2=v1gF2F#h}RGBf@oFXIR;U{Js_-Eb#I0W!8IH#$!vnF2k2qnWFF((
zNfg*4SdmeOxuec9SfRKUfQsijMuN=oT7!9yJ!9Ww{2sY}bi#83dmwdEJ>uC2=rFrT
zeQ{06Ou%ar{6U^@K$JKJJl8-UBRvv7swKn>*`u=jeSi&K*`)R)FN_Jf;2MG*!6)m9
za4;$vGZF_N;87-e&{sK7Rfv&91ri(^)**Z?7cAfaAF3RZfr^ldLQQe~poLWm`nYxw
z0f`6i7NAMcaE1gKa38t*PZiYK;#{yTi5Eu&5;$=6$sU3A0U6ui^$D7=m*JQmnf>R4
zBgL_*kpLb@ARFw5=wVHg8*p(Rcw8`z{gYTgmt+nS=)-{{2TGkAstov%UPvS$!3U9(
zKAs`Kg&&9w+mP5X4|2leg+0I-k0r#0v&C@&t@c$K{6Wmn;@O4g1lGmAL8iulW5RMZ
zE^Lp-3j4y90UC2jgk&yiTWmqlu)bPnu*5jvg+xoZ+B`|txO&(#)>EqoeSBRbH6^)0
zi)rvfxPVVIF&4xE6pj$jczk{&$aNyogAV>jv~gA>1K^T8iA^LJ<`R8~5NCrUS0g5I
z5>4C(ENv#(o{R(bj&sABI2J7*d%!w?4L@X#08LgMZb=W&-~bI=0qChx;uAapm*9hz
zR;I=aTr43ulgO|S>-`q32l&&(jY*i!2i*OKG_mTHXoSz!$<#Wm1!R9f{0JhHB|
z-U&@Sk(TgDTxuUEau6=*=iOopb&ld3$*fS(aLq7AUdR}SNOHHiy<;8V6I-ZuBJ
z#on+siGZ})dIFNlkCYIf+7{2g{LX-ewbd5K@GNI_j%XtzJ1drI@glW|L>sl1R%6$4
zpN%Fy@^1l8d#L@~@oRnOZKLI4T5E%8oL6rCvvP9#!ego3rSsAuapf<|DIs%@R0<*`
z^M*v7vrqU>(-MDA(j|H%PqkGggu+J2^FI%Msjg4w>Nd9Kto#>Vvk$MC6YsW1e5d$
zBH^oT&YZ~lB^tz1UB7@yq)*$baS-b9k%LzLBSkRsZb?j7ihH>uR&9v}aEL9bInmPU
zX!VF3x2knceXSnR((>}QCz@)T+a@>X*|dqhYNe&}mJ)7$Ek;nKTEx673mQ*8o=z)o
z95sf#<#9F99twR?nUmqxxK4CRcn>^Pbn4i
z$<(v?dNSVAYCMbI$78BUk$Ng0C@hQQ{ga;Ri(qSKs>&mN$yJ}&5#OXw^dkHD_taHD
z{HnTuO?t$S${`YHwVH&lT3{*e6DhI6Tyj4E*}-oq@u5YC<3D}myxY;n
z_VhII_jDFSo3tda+^tqaOJhyKffhL0KG07W&A+XMqliS!&BL6$W6Z6sq7gf_)mBaA
zYAMzF-=MU}KoiWId?JbD6Yc-DjYK28C!rC&r)~4cQxQ}h!owWgQ>A22$-5q)9cf3<
zuteKaX|Txu$Sot$sMk`$QNIz_QUpO88SzIfBm1~VWZD)aq)%*~+=9iwapd+)I8WQ=
z+#yL{Tg%8u)yP#Jks8|G)97gRA4jWLc~g(GeY`AhJmBJbM9Q@_Qt3#Yy!=IKJie#e
z6FrrqULVx4P^EcaEwu7yr%4RBRcE~FU)3P!z{#&VsvXf*TjE3I1*
z%gaftI)wUfTRf}tvd-!A-|DNrw1_HKrL^*YPOG@NsmIO6+@2plgS6rXBCCBhOYoucA4idBv>6kgilK5;N=1$2slEIt
z)$-{nf~B<}yk~7OhH9l^5}wM*k0P4S(h{t^Y={inoJXWg+tbouh5I?})jrH++|yb<
zj#h1-gs!4!ZStm7U7+xune&Wwaz>js9*GTX$U%IN{=eU9z2%2a_BK-Y{Pcj2BPMqV
zB>vnP5xxAcHL7iHDnF|yN0u8)%OUnEk8u8Pw`v6WQKZ)S$^TtGv58VIuz=%(89T`Dhb~$4q7P;HJtp7V6V8b{=oA;sh
zu4?4gCwi(*Bt_&}ODzqf{49qSk65a_{|*UTVHAQuFiHRET4Dw5vm9Ex$I(y5)LK7T
zJ5nPuW-XFRV+*wp)YBi4c6s%3;;Y#CYlF_?r7FtPQcsJg>OC#;)9PVlRXevQm6MxN
z?H*6z>j&O10p)Rr>g91-dCOE=Ek$%y9^vF}LFefXjHcBkS2gmx6nF|2{%#)n@He33
z2?nqMYqB$vD~1;9Nwk`aHeQuWd=O6Vx%(s!kC&^s`RQSe{G=+MAZhc+9j&Sb)IT}Y
zxFRvNJ?vlWg-C&?B4{;o(x8P8Vym`!^MMOG`Q3LCImW>lxzEH4mXKclEkTIXAl!e}
zMq=jfspwi6;c9s*O{D*Qn;T2z>uej5EkDp8WI6&jRzoPFrQt&l?Xk^OoezRej_yClb&ldjhs2
zXRQYCNM(o|^CIV+D$8lDO2~X78o1TUwKPbz2f1qJ=U1znHy!CWvX|2$Qkt`uQycn`
z(Q0$WQWZmMt#VZ=FMTW}*Jy&E+T^5&Mq~?G+9Rh`WM8Y9lg?{S*2?B1(x{lN#y~uqq
za$kfyTl_<`@Vzd6OB~c5$A#~D$-OUl#pu{S?8|V63~#!^KjiIY@a{qQf%h(vyJj3M
z-pjFXcvBp_sSw^73U3z0Z~uX}gu#2F@S8p1EmH6U@3#dc90UG4cX$UPejg#cO$NW?
zjYn@P1x$E1DByCaZ$we!x4pr;QvunB-qz|tZ&U?bczYAPjSb!{>O)-oj!^@81FRLj
zKQ;=zZ8r81Wkx9Y&8G0~+rm_7Buhqbvb}@em3vV+p&UW~b|`nzI}$sm
zj;e(GGw5F`dN*x3^mf^L=$*0ID3d$NY2}KtT3MlNME}kp>qAP0au2P_`RNIj_6s7}-i)az7B>OOjFZ-TN1y(#x+WRsw}Wr4C8{grEm)v!~~Q;pGB
z8K^tTee^b{qEvMhRa5j%;yWm+XoUPas$CSSgcFUN1HCU;QqoY?(dezi*HP?y5L&u&
z4#iX!d4*Y50li{lYx9}R}P{KiXe1&tLGi`rd}O&8R5lJP<;o&Oh;oW
zqqilOM76I&)k3w=p~%akcNFVTwc@F3C~^;a!<$GwR2rZ$i9=cMM$uG8@3-ZV-Z|x_
zas$=uwz3nA%Q0mOit-8?Tb8x5{ou~9vdMO>0u4p9hA=JI7R%_9mxrfGdJhh4X1pSPndQu(GzbVu&)HkR;dz5Wx
zw7x~7b`;?V=ncLXmEVx17vXZqb6ctuwMiMR%s?|F8IADI2yHX+v<{8#Zj@Pf%0XG^
zgVa(qd;6esI#rmSi{@W3>I|yRDKzWCNPi}ZW4iL1;*{6P4VAgd7s`00q4KiwCGygp
z8ieNPd(;Z*81)CrY9RF%H4??Ph#HOFx%@k-fdS3`>S)H?PvnIxh(Sk
zo6<^2m0wT_Des`YVhqY-UM^+#FVLYe%6Mun$_
zE7cTQ`Bg5C-cel~y}5dyQUPJLQf8xm6Qs$a3hYtZDK*jDn1ztOr~A=Hx&w7UIZL%c
znC0n_XiSDs9hH$v7xZPpRy6*@mDiQ`<;L@kh#jw@(sDF!YE!eTDRmn%2ef5
zYO(T_JXi?(WjK-%2oM{+#k*L6=<}JBTpmczH&%TQs&YZ=+|g3+IJeFN)%G2P|fHB
z^sevchGSl5(wO5+EIWr8LT{#4q1n=3*&|humW8*6qhv-tBCU}u(subx
zG|!yW6f{cnX)|MIGH8Z=pV~v2&^(%tc7%}9k$RgxOAn@7&|gyXk)K5L{_i!)18Jr-
zR2nWdl}Af!!^OkyPzNa_m!d8yd*s^kRe3A*1;erTnK|qzt{dM~_*v*E9OfHxCz;jA
z%M7|DwMkkK9OrX*gTBFm--5>A`+>5-=RyW)wQ`#-&A!YYWorw`y0*Gmd>^(B!?Gi}
z8{Abki89FN!;8Z|$T8G;`4y>B_)xgGEJ^3WRA_6UbZ}1iBc&V_kj{n5hu)KJF(>)9
zLSrG0U&^21pXX-_wR9JRu0jKDJX3-mC|kmV1JnJJf}=vELT!VW1Ac!iUwQwD&`_lh
zbBqlzAFw~LKQI$GiSMMFtZyt#=hE4gv_&Z)oeE70-;@?h4J0wVEO69!#{Y7-p;Ddh
zNIj5^^jmBtE`=S){>ENnjqD(HAvc{ZLD!R`q^k0ZXn*`xnh=T!MTb10M$#0;L{F!_
zLRZp@R1$rfz0S5`*Rs3Wcev{O7H$fg$xNbmDt(m&@%zAJ!}X-vNUOuF
z6eE2uEs$Q9nxc9Qmm5)uX#L(n|N1i%*`4fjY)j@O{U>c@eq-Kce`1#~m+6VjL1rPn
zkvghSie33j3Wwj4s!6S-$>FTfvG8IkUE<^n$tM++2g*k3ELt^xNfkrgf+fR?eKY+Up%A-WelNT`
z#7Vd1fl3DT4PBn;!Stj{GGo}b`~tzMOA>ksd-*2pG-?dG$xD<*OWoxk9=qN
z>6$cFu`{XcM~qAE9^B-8&wJTB!B@?<$UDM!-WTn!>fh|Y65KB}qHk~&bg9BFZXn~K
z8lme;Ke|61M;$}^&px?2+H)4m>*b3|U54fbU3uMM{vdago65%sErn@9Z{clzIrkG=
zgy})mm&b%J1Yh-k?``HirKE>~v070Mc&Cc)mcQ2A1eE1s@UxcE?O8cR!M{SvB7
z#>3xk{dM{3x$jSXeWKAx?(A2W*W6o|dDi!5_=s|cjWeFHFDYED)XH+dR`{jD{c`V@
zZdj~xENx*7fAK#uFUe*624`G`oP0E?eQM>*w$23a!_a(ssBVmDv2Ae7tU@Q^Z^c!O
znHBR@tS738X%kyasu$3C?QXZr<2vPhnEhqehgns#&t;EtOm{8zH4T5RG^D?$cTk_A
zowPZ9h})#=r9UI=J(=FBfsUa$QblSq+e~LL
zyrX|ZMEL`Z@YehA#|eLlxajZYwP+CV7ec0lE@;<<8RWkTv8EE(rO;Bb1R$
zkhd5sSt>-w6`547e67~?lB%qXJ4e0kO3utmZ3Sffa_`4!m;cPiekD3^T0c1G^%NJ#$a
zUhSJ(uk=dzN5}6eS*Bd!@?FYGB_|fW6Z@HEDKCdd
zc>csSRNJYjW`*O+m#^~Nfx=43ugaDy|58O`)w?ytdJpQ&t4&v3U3zHjRDtmpPAYbzL&E$^%P-fu
ze(RR!?(B!Jr`C0B3hof1V=9!IT)AlN5siY)`ZilsPpLS+MC&3&<7}os!f$4N`0$TA
zf7}>zrNhOCXDgi9b$0LNk@tIMe;<0AuVGexl@{GB1@COMhK#SX@ubVQz3>
zRfhAvc;n-Q?iXuZDthI?^+|W8CEiPa=-d?Gncs{h>}+h!_!~uL6gyDVRVX#8hUKKO
zr9Q-+q_%~p2X^@0^Syx@Vq?X_3^tSi>KyZz%smvDYk(LWffh-~RaK
z#hb&gO9`15c3&89X~N}oH)g?&&NczpYC$eP-=s?-$)9W?FQ!sBdk(tk*0}Ew7k+i>q{A
z=Crig-_tYSEqg-#qT#dhA9Po41=4gP}<(%`@;LH?>n^T>YnZUnjZ3#fR
zHr-r2u1@hGWm;5-t1`6upqeFX##i4|wLz6o*@|)BvsGOcQa-yc-Hf@i?&9uC2d-Va
z)9zvWluDV)oV~nfg7I`cL*J-tg?}x!qlgk)&ECwklDinJ=KL)Emy{z(s~=`OFeElf
z>Xnq3$R>|YpWvz!v?^WLG5R>m>roYBY%zJ4M9k-Qq^RIu#qd>x@sU^`PDDE;qlsHR--9t+MwFb=BB9W?}IL2MM0eaZX@P35jFnr--^eLS
zW5&wQ)L#&fS!UUL$1E%KR?$kOLgi;xol$*SrMO}ZOvj}Ajvo^nT>IlxmxJ+pi|ie^
zui5_e{i_Z(I+}2*;iXS*6-vo)y{xnpt?@g`S!?vFbGr7O8cnN4R|u7uA2-OdR#%;g
zmePGLXY0%pDPt1vJgD{X)08@n{eg|lJh5*~&tk{Rtf|2QuTc@ZI
z(OJ>mq9xm#mYJq;qQ_X)I9K0SIKs}Sdr=9>66J05O@u?aPxWQW@vj(uw@i)AD1N0v
zk?MD={Z(#Rp*w~v(ov_Bn01Xg*YU&;$L<`falHBQ501Zd;;T~|&;4{c{`Q5WIHw{#
zCzg+IS>}Glg_VD;w6Mb5vR@a!7`xQcNI#PORM`^h;VZ(N%<+MQquC|m(uoS
z&UN}d@A|t0kA@3T4cQUgo2){umrF=LhUh?3&oxK)?Ah6A*!_QnE3cRJj4{*r
zn)tw2+PKS*YM5k{jBQ1`sj+29)I0I7m9UpCT4GA9(YRk;;69f6*4>R)A6_hbA>%^I
zCH<9t*V1lWxZU>tTZu6#U!?EOUgfzQy1|Sz^tQ~iuZmWpPuuI8%jz1?--gEpGJQ$D
zLV-@9cjfJLX|5fAlwT~&)hCK(+vu2t_|?Vkm)uY$v+Rm8-9IiyqeY1vD$gs^_8cH|4i^z34I2mU*;AIlk|xOk8zv0QXFqo^mlbdb*~E}
z_*2{tZaUvv_k&@Ym~Ij+A(7uyfkan{#u&qu9^c@VchzD1FdMSO+CsMGpb
zd5mj8itX;AYY!40T(T$3PUwDR&-DejkKJ4IV9UcvI=U(eXi+{t>$
zI?`ego%{-FX!x5zMgK+LpJ)zOlV{S$xDGnK;YZ^N6Kx%D|2?`?T>Zj5i!CehPRWfW
zrWH$$e>rB3wS>4p_=esVZW8G1ZyjKQ_X3py3;pN)oBaEIO?+?oQhh}O-v*C`%7;G)
zH4kp|XLwt9D!A@sSIX+0`F7@#%*@Q0S*x?#Ix}1!cxw4ff%U<}(DiUFximGOX~SRE
zWf`W3znCcVG}Cf%jj^_|rSS)2HE|1?4J}QSsg79LSk>@{{v&-IeJA}Y{a8aov9Tp&
zPmO&y{(PYpv4d=b4Snfm{^nVolBV3PerxuPUvE&iggeFWCEb7hp^*4m;+n+$No!IU
zW$p9m!avhJh4#ko=8$Ev-Q0RyC?~v^VN?`(P_&X>00k*vfy&ETYOQFG%Nto&4{6?H<})(pAG*%h4oz
zZq}x(ML-fDt
z-!<$rE-?Ac)6Kn%PVQ&VdU^yXcoQF9dG{r-mwpz2TDbIAtGIn%T_u
z;?D@jbgT5=8TJ~li6cy#O&0WN=z#f>d6jvDSvDOK+Zs)Vd%FF?AwGpG#kFU*q3`EL
zvR`uzh3fhPhOZ5O3C)-TQfA;qZxd(B%zkN0Qc5OoNTd=wC2mVxlvpeAQ*>YCNI9Ao
z$|&vVlEh+9pa%-@)An2TB3THdsjvCK68V45WMH7+vLHZ(Fk
zFqp&%rc##O*828NQCp)rM2)pywhgrnu(IZ#3>}5`>`7&C=%%-cD>(p;1E95y{T_wC~s^ot}?Z>gsd;vFWZYpO^=eJ
zVxzZ3Rf*bVf5U#zcGCKtWrBH<=}WPNv8=ucAID5q(xh!+Aru$*#b@(f@y2=gxHr4n
zxpc09&QBat_N$Kjj%cUiXynXxE_1DMRdJ;_KXLu$7Q8R{w)(#bUJPH7>r!>;Mod?>
z7Z>J+@kP=1K-KgCeU!n6B06F)8abnEcoFTKhmGfqjQEL|EfzEBOzXu)Vj;1kI2~Pe
zMu=yOrwoF=8SkL)$t%Lwf`9m%dE?y2od#!7$GNQ8nOic%jE3or(_Tv5ow7bfNc}Z+
zO&XJNIP+$<$LVn=dawBJ1$|*jzDJ#9?sH9a2?o1qpE<@_+*ZV%W&bQHDXMUE!|3|a
zVstPnF1kjv5WO_Y9`&|;l+9;3W@>7@ri&6za*NQnM%}5^a#d-0=xo62>+UsquDFIe
z4UU-XSy`R4UdbAjm6)|CyQiayGs!vJb=NiC-N)12Tg#W`TkBsESQlIu`ZQctIwgIM
zezzb}$<#yI!35cI{Fg#${T_o|OcocLJ~J=2thRQsjkC3~U9`Szy=a+-_WUcBbTp4I
znEifVOet!+)Fo6Y(ABro6YYN0IXgQmb6iHn^xLVwr_gAI{Ft&UH9mc7#)PcS
zjxw$r?k~M9{f@xokX<^0zKg!d__!=#rM{4{r#Qs)qS=YQrxBTIWb{RZ$y_=1G~I)`AHSAUNu%1UlZRH+llewRpTt<^TsG+W#bZKEAfh0*VNc_O#H)WHPq9s;G1#Z
zF(au|X;NrJV7#vn+OZ6d&ROR(UQ16-O-h-cax&$E)Nj(-X7tTk<#^z-ddvAO!Ly;S
zq~1yu`X)1iD=b{p4Kyq@juic3d($M-QB!U6Pv&S#Kg(>(&z9dTKUt<*qAb15OHB8~
zYGPUAw|bxOJAa2O&c(62=%&;WxtEk3`XM+yFw_5yZ>{&X=R?nn9*27ns=zqcOxGUQ
zbyvXE#J$=kzYl>Bq;M-78?jrmS&Pr9-4ApCW3f&Yj%
z>~83q>2RWT&?{qTx+m>c8l65Oy=F$o%!^rHIvTo;yK8#q_~HX=gB8Qeq;iUbva`+j
z4!Y8YmBwU|Hv7!?EGw*Wwzq7{Y`1O2?1k*xY~^j!(C6=pmY2*8OdjJ9!vVMO>#CsfNU)bH#^@p>yQ*t;Qtkdi)=X~Gkb#8Ti
z?VjS9<2~tX9@riXg(g
zbxi8P)IMn?()AflGxuf9b&Per>M{D3`)%lIT|!Eb_fYegK3sdjreA@+hpKLBXKrpO
zWW8=(ZhOZr*=t0(?33*=_O-TpwzbwoOSbu)Owfmg@6gQK$F!l7l}++usb824
z{T7(yAMG3HZSAS-uIu{LndSJ{5$!mTy(oJz`mVOF~qc=?wj7T{!_t}uuo2;4lti{)r8Hu8isF;x5c38jJdC6x5aM#*m~6J
zLqDgi+pRmT-&tR=-n6u`%r&nyeIh<@4C<%qQu#*QZ1mf>AC=KjeQ9phC66qoqS=
zF1$xCVm7eXIUD*6Ru}y@w(AXTjAxAv#Npy{@h@=~I*$^Yi&ezu#DU^w@w#|YTq;J1
z7UPfTzpow4-(+JLx3W)K8Cn=v;M?Z0y1#Z-aQv7RpLr-fA+19ipB9xiBkk?h#>08e
zxxaRgaSwA3agTG)cOP+=@Qn3b@HF<$^QL$|_1XRV{a*&255|Ppa7pPU`9oy`b&2*d
zrMcexOkuLFpT4Z&mSL{3lsH4&A)ZD*Tg4%w&-kG+!!X;>-|(iPm!Y*G#&ASmNpICn
zK>F@(Bs$={Cc6a{+6MaxWe><`D06C>s@Of+bUFt-L}=XNwzk&Th@Bk
z-j?y^Z%hlt{>B)?S>1QS%lrwp3bT}|hen`6cv{dHIE;3Ko}Qv^&Q;Yp+Y#e9oc(?F
zs%&TWV6-c&a*jv4!4c0c?|k34{v!cbFgtWGyk7cKzO6h&_e%X4E4zlR#U14)@*RZo
zI!X7Jev+Y`(ID;>Uo=fXJM~agXHzNDPO+jm-54@VHxxE((Rb9J)y3(8{CMtn<^fe&
z=`6h;ni1INYvT2|cetiIhdFv@56@benUv8dqj*N4j1Cz;Wi-evmeo0Xt;6hE>t5zP
z<7Y!LQZc1H{T+LszoQ#txN59r8gE`^nP9DHJ7XJc57`GqO^SLis#TP1A8G%>Hr(3L
zQq#>Uwz%87b~$@EGab_%Egi+s
zzfF!(&RNbvuFbBlZpHnlXOs7!@0Q;fY#y2vejxQzZc+`H(d;JfEPqs3isnKM17rNl
z*h&0dj52+Mu2o+2sWZ*=hpCt8l-NSNWc68breqeVqOS-BMu+if00|jyfdY
zMfXcv{Ik51+}oWoj?c52WZE<2w9088r+QNEr4&PZWjeZ2o=N{TGby{iYl7#3uUT+)
zm{NMvm)SnTpZe3r)uyqQS8SK<_oHS++he|q$&T^GT!{HHrclg#(eFmpvfr~Vvbj-*3$XVh@d^N2d#^4o-*z-uJO(?&UD9*j+Y%(9d#Xz9L-QI
zMma3bi_ST&D()3%kBs*Y^riV{1oh#r(j@duLV5Z-v_7r;1%8ImOV?NbiD8PdJGxJ5
zkFJdOO>djS<`I?!mT8s`EuAbj%W88`a~As4`G<)${bqdKa9+2Ozs_VTccq2ld%>@K
zm0aIs1u|aED4V`J_2ra`$sZhpv(PvQK7xoE>u9
zciwj7PuTR`scXAj`q$$?#iAl=drAHnU}I&
zaEwLsf^mN38td-i{?0wbO9kc!_XI=13E`u1KlG%`6uLXRnm;Ve76uB#bsqgTgWIst
z=o7y-ImCM6L2<+!Nt#eq`+_XT-JE`STKTh73bR(sBMoLCzS~%63+1j(IAYN^qeRmP;!el#l68*ooXlZa4piZiIfgeuh5Vpf|;t
z-!{D_t`aMoMavuL4$dWZHI+9N6~`GHij|S%uD-i2T3^$!NjH^sqi0>x(X(Se1%vJl
z&IzuCu3gz@(=Vo`rhb?DF!@4Ke9FYMp6P|s#;4jc<~r`UdG96f=YeI?8LB7q6^p(w
z()-Zo?YB(R&8saoYpNyMdd6DLR?j-m{GDmH=?3}+=3C=o!wADpeOp~^VTkY?`kk|d
z-^bKtmaxBbE7=o6P2_=EbIq;$ZPzbZ@!TG!fm`Hxq{$zcG|Fv@k3&^fowkCHaf=Jms8RL|G^Q
z5xV9b=6pAMQ?}9ZQ&zK#nrV)dswr!dE2bo-u1oKbQ7$7nKIJ%WCY&QzAmYvobOZCMPj^>;+y$SwBA`4+m5I%_*GI1;nR
zXHP_Tmir#*!R@#eI*S`N#p38&qfDb-tRYSl<4lE3?M1)gvOZaV8C`KE=y&l;7!t=N~Y9GS($P)b$$BpnI*C-I&M4Lc~<(;15?8@l$lI#
z?l$)W|D&$6ah$1*xtiH*PBu+8y@sw|FPYO#--;>d85?wML*KIR(KXez5}NUoxn<}(
zDYjS3$OoeC}vRPyij7Wc+@?|ICg
zuU*5OqntHd4_q&}C!@P9)_dQZ<$E{KICLfa7rMi0sEnr8(o-2fGmUM)eZ?)|rgIB9
z9lruS88SxrLpUZ}7pm*-p>OWKFl;w`XIN+O8d{?7@J6FCL*Frqn~XaRdc!__PyH}m
ztndXlpE<4k8g3VC8d&1r^|Xof9ovSW{Hi|4Gb
zLhzID2XYJQBj!4nAQaXw(x1_HM00eikwu?&HybY+HtPS-bwa;;7^I8Q^%u7C1Nol(
zt9(a(K5r3v2zFrxKZ?K1+lBpn3qFo7&Clj|b}0QPb(H#wnx^pbsjv{P5o#aI3P^zy
z=$pzlzU$rx=zH4@-nYF|yqVsTzCXQ#y;_F}kSA$>1*G6NwM!3P-(epl8TogZyGqQ`&bD0a2mGVc@
z$ndq$#L&IqzQ9)h%l_GZJNm82l)(8wTu_gAhXQkh7eaf&55m*Jjl$1|$A`~^t4lvh
zPV|J>ROtn&r{tE-%G;IMlt5c(C#9#qp;s`Q*pA%mTq-+^ZNZLbd2St7iEqfC=U(RG
zx#3(%{!cWs(6^bwUH(13DnEzsC)^OG3SaPxI3N2KdaCOh-4=Zle?e{#{x%Tub@L^7
zH+o9A6P?YRHyqO(!yK{BX|5PgJ#UmR&Oa;ACsa|oDQ~7`GG2Bk=i%ZJVtXM;sHcn7
zjS(jD)6jZZ!>(rgqUR=GWbV)_&@M8FzC!O{Cb4(fWHy~`&h6%^p!r^1m@5nwmhxk{
zU)fP?NA?%={9zAjnbKalB^Qy0NHwG?()sX)P|0AT|3yFLKkG~Iy@@`p3=YXsYx)~5
zgudDRSQip@3hf1%yU#2~--(r$Du*4R^$9nx+A)Yx?hA9{Bf>5cbT=Z
zKKgg+Ii;hN7~Jfi;PZHUcx!l8xjuI~9E%)N9fGry>oj^q>X3K0uVLUMdI~T>`c+v^
z7h|`vCs;SS+CJdk;(zA%@o(@Wxp&wf(et1$GaXQkx-vg7TbNIo8cZjq1N#@}=Bo);
z_?>(SVUOSwWTB+4t1eR*$scBqAshnh!wuk$NhlH1h7lnEBE!n7Gx8Q`}!r+qNpTR(o4|$Mf{qPiNmK=|s
ze*T5pP0wYDqUQvcaO>xaf+J6caK
zbIrMS9Qss-W>;yp96OZV!6u=ReV$9_4)AXZ?+DSt8MHU)g~Gy4v;r2gZ!vS|RP=ni
zRXHFX4x7S5Lnnjv(9U!^up{teU`611z!a<&Y!mz_xG#7ucs_V0NQDN6HiY(tzDMiy
z`;a&ELHJr&3Ug9z^rWN}eWNa*zphx0p6AY{hcG7eROpvnDi_PQ;y>rN@xSn6`DT1H
z&+^*)JcJ
zenHoisE{RC%>R|=Pgf_`pRU8MOU~8klgbB8up3;zONlfR0tL@T>xYH}dylR_C!)X4@W~ye8sY7s*Fvk%?<@xe$_8Qr69NN+
zv7x2GDuLGiVgWYPIP41=gmf4^*DOdKEjy1nL6<pCtp3hvH
zT-)7u+#k5EItDrJIaWH`xfZ$idGGj#1lNZ?mi8*M>Gzqstio;4QO4uOHpWSYa_IM9
ztaviw7Y*VH_U75a3&tq0FI_5BKV18yAu(R1#TycIo+BFt(mAQIc
zGp-@~E?pA+h0TZ5Y4p2_!=ZV>{y}HpMfBTmBbrg~2S@s^c%Spx0u#|HZ5nJ4Tob&E
zu6)7Z_RwtfP5c&Vp*#rv{lN)&m`p3RDINVLJ&RsK_ojX5De#^2YGyZja(pB+7VWq!
z-=5pWn$XUh&CeB%3qJ}9--u7(LR%sCz?qN4UTr
zXZO%c(KE59sEgE_=ShRnCL#
zO37d8=BO$jwEN?KhWMyDX2fL0jy1XuoOm{nLCy$WayZ-SX7%^z!EOYyL0(*4hib
zhnWCYx0Q9l?jx4V?Gz0wa9N3$x5!zQl}bltmi$><7l%b2vg9Y}Dmf#K6E=CBWjy-I
z&E)&?F=Ya~zzuain*XmlK`o-5Qq47^-VI5m2G~FJUC5}Av%#_MFJub3vul=+7x?L@)xWr!Rl|0{(os9TMr4#o)L3(6l)+HkhTzF3Z
zQ%@I<%hNCAkEC%)6;iU3@5|;r=sgYwVUhou<^*q2PN?!8xq;GCDX64Vnkh%Ir6tvI
zYIC)-VhM+MDrFVp(D@5;G3Aro8>?|sE~^B*vKTemk<*#%$l~nftmvHN_|fsr5yQ^;
zR&AzulE%7T&^C65Cn>z}Zi-#><0T6NB55dFhlcDEwpi
z?(m@SHDT?;Du;~-`y*^^x||`qUB^J6jaRlPU*&P4ytIo>o=%q2>|?w$HsD89wI0|v
zM3iW4uK)o*TB@oXmn%up)@Xa0{k!>E3)K#37IX6wc8yw
zzj;o0ulalGJ&j@JJ8QfYBu`RKsCyjE@IfM7-CRRm^IQ*HpIp;i*PNQ8v*WYcLyb}v
zDtWQKF8PQUDDsPJ;t$b89xJ~RfvH=0&ps^_~OT3M~Vx36e#3W7zf5*%4XuG!e2@G$d`D7q
zCHs=%Q$~3Q(#KJaPKxu|PkLMPgOpLd?%3#*-2VoTPd6j1M!Ml4r9)CeM}+T>h!2kl
zbBDI1BA(CP!qv^02cIKZ-YAF3o29?3^TrW6Tte6jubTtSGx~LZ8~<2slfFt{qBYRk
z>opB9ESevD!ae^x|78CX|8Lqyt)=#p|Aeonf2fvIZ>N9IbD6{F`RPuF+cv3`+*9!o
zD_>AYI+7hn9k0|eYOK1o$92Jj*
zpWcKkBBSUi1$q(J*te}GW+$_^InK;)&NZ^J-{+$P=8pbe&u=USM(hQB?j{Zl%0X
z3aV4^7p^<15K9fi6JOv;am9i2TId>v{)}_gaGiDLa@KcrQP-luRq3{PAU(2ATW`#Q
z=4fNRzDT>}p9iMuq%YRn%^~m0u-q~KSccw4F*Uc{&Rg&&EE}8wT
zy!3iqlkSMWt1n##dvCj^){xB1bH%0-oy6G~{7<=%JVvGyMF7kPwjXLIWQ!_&>Va1p~
z(&_b+y^apQP$0_$Ud;J)e_ap*T0GlivPj&!vJsSQLMkmYD;zP7TF&WM1&_0wYpiRB>kyWqfh(QsmGgpgpL3`)#nHuaR1H=;Dre=I
z@^O(t^roY*t{rVnwqBb*n%h|O$BdIk3*(-?N57{(*LUfI@J#pU@w&s14L7=-!>DHT
zrEe^gIl_Ey7Pi`0{jJ5;J*57RorV6+?sP#8r2B9p9eB&6f2Dc!dX}c=@h9oH-I$)f
z74&7Ew+FEn|D+4mrdM^9q|n7TMeJm!oG8DOgOs0@q4YdmC;M?od8Xu23#m7izC^wK
zl(zT*ujD22D7l>cRcsOcMI{l#HOkTx7{|P~uubbfdN;aRDdsKno%zVzVm2`)^C)v%
z(#T@OFvs)tMtU~gs~ypX@HnmI&>QH#==tfZyQu$SY%#pXSmyn`wGiZIHEAdHxr)sC
z1*IIj^>H=AG1>9i@v}3(v%YgJeo%j>?A*e0iyRXjPO=j1)gZO0vPP~b$BBPM196-<
z;E6rTzCy3nQxGyH{c3}Z;l^~MwGpk4)!Xp31^HGq>KJ(#VRd4sV@6i9jycM_Y!B5{M=8Lgng19V-%Wd&oKZ#gTS{{HG+fgncYjiZ0
z73oE9UYD1+NJ#hSL0v^>W?|c)Gctx8YI!@EE~L{|1l^qt80A9T&XI$KYy;*9Jk
zy9(Adr?iIN$Y`mUm?iFu5UlV|ax3|;d|Q4bf0aMUPvmF(JTCuEzf}hLpje7lEE4TR
zgxG`U*N=|e6z=5}z2dj+wRSUlNzYnDJl0-oJ0n_T4YNvG4(pXES?R0?W-L8BbT^!NTqhjonI3qQ9V*v8AqtQnoG
z)1@fsg_LOTvKC{-Epsh4qc_oU6Qpp@J}K=IiD*VVpL`P%i0gD
zf2=XcXQ#Ems&4&7zgi9JwUuDavm&haRybYzQTDHN3J;>gdIFYqGTqn>xVz<2oaiI>
zz`ER#=3z09;~OuR@5(Rb5pol}k9$Et$L(uq#&LRT(Q-SR
z6@`yI!z#v#5zNv;>pxcUCc6AeSvtGvQnV)7JY(&)DqCmJpDpaFUVEc$S*7r&_DCf}
zh>@fW|Fs=D?)|W
z&QHJKWUT%R`z!lIN9zx}x>U@LGFO^U%{JCnx*o5YS=c#3&4cD}GSxw5R^z@=&KhqY
zM7Kwn9+2s>l-_=1owkojdBvYn9(tP>i}d(iUBqjq!My6BLRK);eRj6-PhmGOuM)>5mM{6K--!KkAbu-?j_$+$Nqf8;X^GfJu__R{@4&CF(`GgjLp<(krX
z>x)^>Tx=U+mi^31C#^Q-=#}ho@)_l(IA_lA&(-RSTk1;XNAcDwWS%r-X^+UL+{Tlv
zDp}?->1V2{jg;(GBP-adD@Hr+DMRGk_89$((MW2hTvU(99nI3(9kagtSl%G-vYq5+
z&q&)uRlMRv^Nu#sEGtsv%VG|F&121dQeI_{l+OHWd?oi7Vl9*=Vy!IshBeywgD&=9
z`oH~VK4Ya-QGAil$V)_jyP-8gs=@BCLd=v}(KXZDYC|-eNaVNC$gW?t56DZEI&vN>
z%xpoVTa>=^`Phx0qW)ei6VhIK&vA+^*cD;4`QSs5Y9Tc1tMdPyJuUTK(RSeeLy
z50_eq4E7lO(O>P`lAyM;nrP%FGoN|gEL&`Z)Atp7*^6&T56CzrFcU-2xFlkXCFU;Tsl!%2Yd%ylRqQ_aI@P4B!jRLc
zQ$xl$fVfR_~B0UIUlLIr+QP6=s#A>@j_;k)|6f8zK_yWIUPIMrrFu
zcr}vPb=q2{ofF%%6O5%t`jwGH;z7-(13J+jVHB}*$-gQC#TZkvzlx*sZE1nA%MVvi)WT@uM;U!gwU;Sr{IR32`LLsT-xO1EH+NwB7~GtPpwAvgB@1KwjU>4Y7F
zjrhc>d4LD8TuQQ<^8Yi^aPgyEn$GmaRtj96V7G95E#VvNEY0?{O7AxwMSS*_J`ZL^bU839eFl8jzL1Yyg!AWt=)}>>3V-f7K
zHRLYn&v&Z^_~`9K2IT12Ez_~X##+m*3^0#;z(2c)H;|V#^Bd9cHLFS*f6yY{bw>J=
z+RWn3C6RS_2sV_LCiDTjx9>Ef#r9oR
zkRzQb#d1f3r4_92Q}D`+AXm2p-|iMZ_HerZo}Fp!vo6t(-_#msEwMIRq1;~{yi6Yz
z+3(z8PsY)YcZ`!lM6LkijtH`h^ktVm-&OAZ;Z9(XR9$<@ZElE-Z1_!9vT;ot@>LnQR_yoCXe1l
zYoi_22N-Mg&U$w;_&cn@c0T))omKitJfrqGiVWWxCBIscs{LQEU96|hdr=*xoMG1}
zA>R>S@qV{U-9)m8Bm>!18VDCpB|PeuSdfF#0dZ13Ag_`$D76(;86p3Mr>T-XzeBA{
zCQ=+tbTR@KrdRmF$IT*kXZfr;Q_Rq^rz}lukW$-Q%i~FEkkBS!MAEzD%E`|Xe@IGB
z9-L~XZt-mOU$pl*BHfc+ok6nv7W{MA!icDdf%Hp6r0X02or~F!fS#q;I{abv4eKEdd|0}($eV?p;Zn~b!TQ@|a+SPf}QPiP1;vFN@
zIr2+Z)p0XKU+B%6s;BPuF84-wz9ltFc$RQC=|oB=Pfzb1-wJJ&(a}y7idq2q40c32
zqMTLS9fL&hwxECAYh5p#igT4(T#1l<=u>0XdrvAyS6C0F;koGqBTY5-$#rB^Cz8=x
zs}v_wp*w0h{-QJeAIDBd9rc9#hR9_G*1Hik@rqJ1*`y-YYICl+*PL&yhi{^_)xuXI
zsb)gWR6Gh7B)We=XeY1=);zy_7Jv-=8(8Q2_VeiuC
z&2%GU`3$Agk4Rr8!`$@$gl9-c?}KZFD$5t`U#xZJVxyM6%U{L6oN;WUn-$c8l
zHHFutrXZ?zBq`(MNTs(^bI)?ub{olCOy%
zX$>)jWJMAa7RMrAz(yxaeZ)DkrG?dwRFI>{rFC-Lb9QcW!n>lX)rOSg!u2_|eho
z>?vblgL0ykJ*3{~o7Y+dhgb&~&^bGNiQvT=>Ftc1=3L8=CW37Jq$=u2wZ3zMJ5P|)bwV9Z
z4RgQpr~Ich$Es`|(R2B)cy6ZbN-mcCJ~1fazqtJI#}fJ{rKb`z*V|AlZTgJ`U*LIif+LE9+oqvZ9{ZUIU%R=o
zOkLyfIc_-mIvP3VI<|oB*iGGUm^zLAr`k$B`51BWKkRz{Slh{xM3HsRKsbpWWQu>5q@wMUyC)Q3bo-#Fcl+R)PrId7q
z2Hgmr6Q*aVm?cNnYnf+cipjV2rjSBiB62-Oib;G!RX#ReE>-4R5Sxvgf_$
zhWDE9t$#l~8g;FPM53Fedh#6QvQk@}<;VZ5>yt>o<2*_fgk8
z=OsrH2#TKaJS2h39=pBH
zqRvG1q7qBiBfC66w8J;PY1d;{m;z^38!})QAs-ygZb$}Mx+Im6o2k3iL}{MpOwE~m
zF*$egFY)!h^^3mztzXT~Bjw=;B6`UzPsMg9~y
zEX%FTa+dm;k7uYFo-_1o&`|b=436gVDSMmwx4zx~r#HRlXzJ`kDsZ&y>rnFAElk_^_
zbNrx$9*LEcN+st99bDfh>61-K+A90ief0Io^x!^m-EqxyU3Cs{d{&-ebDi>2*wB8k
znguw_K+&ib^09To#|brUHz#XRVQ-`Dg}?Zy1a*&p#ZVm
zX)+5rt^H)F=1}i=Ztk=i!w+`>Pkn%>MlHCqazuUM7_Gjt^Jt?zpHjD{E=kg2x<39MiFdJoc|6U0h0MP4Mu+CC81y;a$c!VhZjWplIW)3Jw#$)9mT&2c
zhb;I*!3@ZD}sBUk^C^RZv2th)^Xz!W`hpdo7^UK
zh7X<#YYCphOZl-{m7chAuHVVP$B>8b1Bzv{GDYqqGD)kf!Nk-n%s%+=ePBg1iGTLW
z@2S7lqoe$)V}>&fnXhuJcaC)hyK*_lI8xLS>UQ~*RNOuUM!{_lwceXQfFXQ^S7TVm
ztU30dL_Q_NU=b|mQsR|<>R?A%S6+85XFst^AMU&5^`-7lSobyYQ;*MIqf@_q__{rM
z%eQ&)-sBCb*Hia-^Bez4xs@7fl5=k8#|%Gb?H74Ja((vqIUD3^mAz@^4-pU2xkH+|
zZpfFd*#@Wy-RsZovptHps&}ntS!%`9q8`Q9*DqL5oqM`d-UtcViQc)PPzpM3f}qb9!IIhgrQNoT1Fv;O&7v+P*X>XrQcoQ#y27-UY`r$3wP^ont(K7!-nekF
zUB2L`6x=OreZ=I50pW+j9;P!xB7?d(YKrovQ%m+%OO+G;``Yl!wJ$xQe~T_3^)+hH
z_m%O(l7>=^GxfiuLrMw92j`^V4&mi9e#kT|^QkO$0|v3nVa3-#oqk%GpzEo^S1X1&=`5OTl!pUrFgFV!i&TS>cO
zhKQcZ8FhzagYy?xq^paw4?Af_^(b{^1I)t&{FLnSLh`F)$q=r#E|bM_Q8g$J$5$_T
zi28k3M1Y%`s%)pbE*BYjxB8i!@oDO5lGGND^LH5dzDpw@
zrdm$zB&*UzX`yCyIAABMq=$Ot)RW0E3866+qk=zsKgUPii@qCu=xg7Y2k~8!%45&(
zYv-)SB2{kb=od66>`aER%pJ27i+qtiD(CZ@SF_*EnwY74249#Ia>toV=_K{Crm(Kp
z_{vemS>fy8{UTP<6|MTdcuD+^zFYAG{QXZg&x$Xq@qWkkqa9{fVUIwjlJyFXm
z8oBLO;+A#Q@au1=6#WG@r&e;_q|S+t<6p$?kBNxA6ZauuebTCwt={wcGTRn^DnlGs
zU0Z`r2X(&ZVa4KN{!Q*;6SMQ!jU
zm5qX~fv(!l1?o)L*NU?{_g5~<-`Ibnsj>GJL!{QQS$=~v@R<20(d}iJG~L9Xm8oH-
zL;8c&!|=$JGQRkFcvDhmCY6sJ68$&5{6_Sm=<89>qNm1uNVu9jJav-yo;K3DDJ6=(
zm71<9p;aT?8OmndpSg0jZ#hclT$O!o}NBn;onhwntv-eBd4qiuDM+*JnfYpz-uU#Go7DLbe>n
zC%uTVQ~T5VC}lPn^I1v3358>?e(xV+(=BQwR8HRE*{QX%`io-fBWFg~1pW<<4=NUP
z&0Wo%+11u@TN$SmWiP9y{;JFbBh*HY1`+w4JW?CjRAR`#_J%n$3hu>ZwX(B}YmKXw
z>mO&5V>-4XpPE_e3l5|ko%+#KxSE0asHY5|pDrU^_=m{jNY+a;(mG;2w7Wyv`$>5!
zUmMVF1ul-MS@e(Gpn8>6zl14$lLgMxlc
zcO|@c`b`;|WX+eOL#{5l;&Uv`UMJh_EOv%gVS9oe)7RX~`k>DPTeZ|L`GIX-A=WExnU=f-J-260GU)OE;5~oRDz=gIniyCA7
z*F1YtquKY>O1T5;RSWj8`$U*egMM%?
zbLMtlq`T)CXvb`>)~>BEF=ld9Q~HXH>>|CTfpV(Sz_~j3YuM-TcIm1HmvYSzmhV`?
z?XPn_{QC02(`hd%y?-3NIH9Nil^h#
zYjQbrz0Gzu%l1r$I-Q}nOSr4i2}+vXXW@4yeqx$;FGj;QKxF1^v>yn{RgQV*t-NDcQ+
z^bgbT7{Z!OZL6hO)3~VJ_QoZbPV^=uB^FOQnm8Z)e!G+u_OfPLHLIQ6#1T#Rt(PwU
zd_jh@rdmM$4+`dTM!avgCpxt+_9ry)8ogR-T)McmbZS+If0$S~B_{Q>cds@J0+v?l
zddD5-UiY!!Jt4JW1?m<2Aoy7D_Mn08WLIu?c6TZG3@i}9SsmY%9P&P?7TAvZu;jh6
z%D^U^fy&q&kj@XpE;)Xx_*Lytql_=Br)+B15%nf&Pt=7k@4vXeHi)Sjzb>(PN>*PTW0`$U
z_>?xTejyvfibqsPUoK;l082^sx7q4s>y~YMwzH8xXL+9CSopV4f6xf$Zh4#i%$#jp
z((d{0daJ`NaoT6nRq9U(@p!3-|B!klWtQiRf4cF_{9t{SQj`_+ZaUoC$Xo0UYU!@x
zJfnuIl@zx)WIoV(`LFpVd##k-Nm&vLCHAKyb#=nSgq%s=lgFlB^NjJG)~cIrs8Y0&
z=Yr?m<8Bl*A;=wUfGw*ZGA=kOC;_&BDEi6V?$NHwt^>}oj`Y~$$JCGv>9n1RI46mk
zT@CA#={G0Bf>{QB$xn8)6hr=T2^Fv6%70*Q&ndmZ7x(3RMJ0wB(`zwE1c@!M!9JpE
zb)v=V;2{B!67{?@7^3qGv}pk6surGB_fv
zd3d#mdl}AUIhd_R_RZODXZx7FT8q}SG$LiRr=qXFf1CEuNQEz|fm+=WgXE
z_hFjNV%{@uYh%38Da}$gr!-7)C7X#-(owo(@~6B9pSQ`o(?3Z6Zag3#K3pB`Tkh99$`=kh_8FgtI4IAEVR~N;Fy0bdqhgwXTD<-bOY2
zi*d_bX@yXKu0R&9r)(j?ve?EtiU2inNES*N_MU}u?KA}GUOBh?SbAwKH@awzeJQEY
zN!{Zs$29*o?(4Pa(C9}|7o-3Cb|GeV-13CU$tfPUR>}Cua*I1^8~4`W$j~ZbJJXlX
zJU8py$a2|+X5XG;UyiQXyGM4-GB(q<^qs?Y2H$cWb1YGgO7G1|#v%O&eXDBun^MnC5O}yq7s8
zYhvW`$f&G4v)0SHGxL)SB0^8sIW$Y~2j@~HrvSTST{n(v-+kx2rYEOIPK~82x--c2
z&u~N>PTHKDA@!r@gm1Do$hcq?0+~6044_cYkx?0>Cdj*~Llk7a%(iD+b*!mYOL!Od
z8V!ts#(ce>*3F;AKhgitf7ZX74Chn*o;lQBE*%wR$nXR^cR3Ab8xSxjLBKR}RRRUm
z80MzK&OY${)^ff8=@k$9aK3y4w4njFbWWI4H4saa;YeR9nNnl2z;199o2lB#pbGW~
z5BRw-Ia;b=yNC~;8$gb{nNojB7D)#ZF2$w9RF1h3^IvS4xD#=$5^^O!Nu9#nPSrM;
zo2A+E-%6xoy=ztQwa`Lgqr&r~kI48-rcs%eW_q0|KGTDYo6^q+4};6@Yw&OGkB)+B
zjJ#Utb`9%^aaBL1&G1+AmG?IH^iN%$k~t+qN~@F$DZ^8zdX9RZ`Z8*N!Wle(EOHSz
zomWe>$&I#wb@^v3d;xKu$p5g^9FFahFk7F3RlEoluZ`9g>lTRe*RbynHp`NoJ_E+7
zD)sX|)CcxSImO>(x))>luZUq(?az`$RbVduU0p=BW)67rO`zdpL2WLe7v(0k^da&}
zc^A7yA+qG>YQKSo3uMgF)1^9Zs_m5&Hm~WQ1pXW{Lx76&QDu;up
zN(RBc3>M2oxBQO}64d=&wEJeC~wQ4mm>ko9!Xl=t9*H>j~L7W>E`
z?x(KcmUSUyFG!Z*@;)lO9crDgSyYrz!e^}C-?@P?4#2S
z4J#YoF+4hKAU(orSle_>L&HLf2GS9EB$HDu=<-di~@QFEwjI!@1nPjHwmobe9tjYUhgJvgm02Bqu=9K^-yx8)ve#r
zwcPS-JofU=I%I@x_lcl(;PXwoj3YzBLt=wZ2QLZk5Nrqa3ED&cprNauv$EqINbNep
zWB&ykhyluVy??zg+B@7E(7
zL3OMz@@pct3QKgPX7Zi<&U(2dmH9Ga1~tAXmeVS2b~63~Blk#Ks+H8f`4h=1&(cnS
zH+2%x=QOq&na#CkXROXsYFA~*@cM}lbE<3A3g8~g(-AY5j;2`d#pUuklbs)(2e7=^
zi7uBrA{|4)*fvsjb2lxymrGRiN>P=O%?ZYHy(ZV(qAk!GX$k(V{)PVO{x1Ha{xE+w
ze@TBee_Q`5e~`9Fll6&uA&xTnZ0@x>Q$czstrbP&b#kyWMtP)!tJSep@2QZqr@P}m
zi1A9!pUD1Qa%^y`GG9uzd?Dt|BJ>$%+GMd<-`C4rU}w
z<5|u6<}h$Sm(1)~%~Y!{70^RysV9K%0eMx3il)Zt4ezO_UgxvA+L2_xXM-J!gp+)i
z*#%T^CfLfGnf=Z2;DDc-rOD3vtrlr0)%u+3P?+iz7!y&0ip*_z@~eO&-wk*FaaiwD
zl?xA*~cR(^6(_0-#H
zEAsV4oD-P6+w`;VV()zEXh(-rJ$gY3Dyzt=+`>X;7e_#M)?JW`nam4d
zlBXJrj18d3zZyUC+$l2^n=yz==yB_VC3DI{YdpTC^l`uC+?Yb}v=<`EaD!=kO0}~k
zes+{~%390LKZe!696jD`-Nf6jZ4YFh-eA9g+y6Id0{itzcI>KPK_`plVi!Fg7udU=
ziuc4ekLg2+7B2RtszmEuL4D1lmu(rA_c*;he^ad+BlnT(%a%9*k9sI*jHc3c@P0d~
z9k0d%UTfqwcIoZGTR+wIYs6#ECLLO9jzQ57Ir4jJMb;nW33fO}9|*KFqlTfp=Br1eHVmJ7r~9%|7MbbVy9QJLOX7CrnDBsQ8)x$y5tpqRBHkqsGm-AfxTv
ztfy7<75!rQ%p>M%R?|*;S2nPYwt>oZgYv8nWBoc%wmH#)ziBLO&qTDHt
zFoz$6m&X@ym1o2W*2r?^xH>*gBKztduGR+RKs0wV1-xWQunDo&b!!`|YZG4032?xN
zt*h)AL10HRfVOOG53#3GMPCS#C7Sw*$>}E*I16PoIK*N2J_o@}TtgB^z*=6E4zbJ6
z1;yM~s)N0?z{19$ai=-8;}EjDNnPMSaFT~W96Vvaddq)xSj`F|fq_)2H{p3*1LJ3N
zKk1O%CqDZK&rV}@B016L4R`zww9pw4vZwj^)_%Zg8vlVO`kU(E5d4frXIC=
zIF`N&D5~akYt01_b%^s+PJw6r##tyinEBFNwF`GKhtp;@OFMXMLq2Cf9o@qRImK8G
zf`OmOTo>c~7YU^FT{L2`J%RN%2qbC`yNlfk>}grnWF@;6k1A<(ys@BuHiBikXn)~K
z>G5iEbBDQz-W+tDxzc_b*ugB|>VF0C(h|H(JI>7M4IX0@zR);neA?+g^QGCW)8E11
zk4}5GKN?zvUB3XbP&i@Y3FEnqR4-ydpQD!_kj@t*8kHuYgJ|m+q_hX?x(@mO$KCJ7
z58IBA#@n6DW^GQN%E77$W?VkdEAf0zEHibV8u}SN`5@Qd1crSsD`_U$G?uyQ
z3m&Mu-3`m%l~Ifa-LnDdUPddvr)gO@Cn^cd;W%KgWx`46vq
z8@jOyefu3`Ljzt{oS7=lyjB41Q#0a!y2|L0Lq;m?COF)0tg2Y~h^p^4cZ~p$9-)6BwhVZUpj3^Fl^dZ()Pfld0
zi6tZ71gg9Ox>O6z?GA!^43Ba44D8SxEcowq1TF?sv=lq1vzeKj2pOvRiKI|
zq5)megu2-50<0n**`o*4oBv@hM{@#F8!(9h{CffWMsRO#-gmu{~O*%ce_5?I-8EEmX$nP-v
zca-nDneD}lc?v6ZK$;AHW?d_s*Y*aTIf3l{LW&D`1W-;ZLA7m48^=*(d7iVyF7opT
z(%p=f%|?!68S6}BIvLq6;PuPX-n*Jrdx%fF##|e)02V;ko3Qr{LxMAy!4>G(dgQf-
zF`Z7cTsv5;0jbSH%DwP2TA{DCupU)ds}0eQ+ENYnx?)(S5U?tbu@39du2D#_9XR(!
z=wd^Dc0qPCShe#Q$qq((i7}ty>na}cReqjkW-npYAEGm#IStOonPD6OKs`MlYquD)
zT^4Pu!HhNF`t6DEMl;JxnbDnS;gK{6?m(goxR1WbyD}P_3yT@BBFXF>8h0ykGM_u`
znZQ|h54m38suww<>=<&}gjB~f$F1;he!&V?va7RWbz|KI*34w?X)!WfoF>zSXv%R$
z{+dtD&O8rh=C&d)(Ej)zAK7{KvJ=cj2NxsD6Lcp$0EPO3pZgi%Xpp(XS+zaTiPm@u
zm9bAI7WWowVlOs!6?40W*&2lPsKcW^Up0_oTcjA+DY_%uzSx`zNb^tZ;RWpX7rGP-
z*2VwbtuO6&2RnqotMPF68l662$TEPq>x=%+M0%4M;c8ZMV0PDXirX2i@hL{Qll8rp
zy=*j^+BeM}mgb$Di^ch1PqD}kEQdd(R7
zugJ3r{~3&zu#oZo%e+7M|5;YB^`)_TebD+9=-p;k!EvPbNP2}7KY_pt_&TqN;mKh@Be4`|*A_R8t@Fl5w{YZeEYm&g8rM0vYXCfl7m<52GoT
z=3P|B=MC)nb?Me=&Z=n)VyP#OZtSUJu+x*Uk1JTa2YDPvdKnp8U>=tsi^+_yDVA_N
zV>^%BlBj^aM`};$77Coib|6i5CmG{9?x8iOCDy~HcjaBZ(84B2@FQ#P6>DHV5;?}I
zSdOlh=1~>RYmTf|V(IpBD^_+J#yB27vJ*Po3F$0l1!m*E
zwKU(PCVxlahvlH3@--4H$C^Bj9-ITwet|(5jbs;zMZc!9DhE)B
zFHJAS5mwS97#QM|^_=9?PY$&rjVij~@8wU?+~8P0u{Xsqzv0$+x~4Z;Z|pd!xSUZr
zDVG(^Y{PtOWHh=OAz-ziiRp3`dgObGk7O!-03Y9j4iWmAi0%f1g9>5Rmt*Z-Ne$%_
z%I`{P@rZM+I)Nr_ZSH_&`Y88%-Kx$>i1&$}--G(qL<_kG=+u7FHgv2mS@h9p(+xCz
z6WvAWh($|aU3xOZ4(=nFQv(YSNljsHKaeTDM_20z)>mHkv%9SCyv&{?dV`CN!#WiL
zJ-ZS9pi9VL1Quj1{lRV+Hu9rEIavqi@G-YDiz*(8h6M=0e|RKS6ivwQgoxF68n^MZ
z8)0jzqtkuS%7^U25_TsC9rCB>t652B<_E0yLPpq()q4)V_6pjz0DC*^vh^u2Nj57CxcM
zS|3jb)=;8`)p!O0pLrVJ13O%2*84~-p9k;Y1M7A#UTrM0w6VA6(&TjlIqgO&>(O%`
z(PLk{hr{e<$9VQCc=}jI-xb?82J1JHwRHt~ujZYbx!X64-G@#7nI5xR=*vJxR!!=k
zW?TNoDqqIxZBFyLuA|!@Si@V;@Zs#WIT>3s-cuGYqYk@d;LOh>NMt#l$QeAYwIJ%d
zV?SE6gVn(5E@dT6l-t`=-_dab044fSLguhj0vH>Xv1GAo9L!?l5Qbm}`#ZT*DQd*XE`_
ztv$JmmsWcAmXCHFX6F!batpD69D_xt*=e$1S?J-aP2Mw1@xfQ~8cvbXx1X%@uVJ+Z!6>DaGI;C&Wh*J{BkoQi$z$o|k6
zJ93X)(GSFdPFCP1bS(joJ&s-Z1Z!qI`$rqr?g2cAiM;j_`mhgu+lFrJV6J0WA2pcK
zN<=e5>CWs!Ja&+|3B;=3n7gaY-G6iiUM4O(!<}7cS02d5tD-dHQ@`}%_ZIxF)
zWL1c?%*}NqwjT{$iEX*h9)1@qbeYkg!2j9B{RPg24&qwHSd;PGhnK#9ENFFO*6|2D
z|5}W>C2O=gBOQ|#lgwlVcVf+FVg@S^@pi^8x4{PGW>@%t4@VOLHY<@h>;uvZ^f6uF
z=Vey-9j^B{EuK2g-*@rE-lk>hk1@8>Nb?DIVsO7Eaqo5he!$*w3_JRXRU5^9erL3S
z^QK++(gCj~;31d7+iAckIvU|k5r)RzV%xC+sY}b&=8}u-e&-)R1KVdXSdG;R9zUTjM
zxtn{$F@ckbPx6e1Sx!i^iGkQ=70)FFx+ajyuUOz8kVS4}RE^oLDTHy)b*BieDOx
zgd3y9KcU$Lc+F2}buJ`d7hPqG=iUMm&Bp7C^WS3pPmo34hn?Py+>Y>d6RFs02e
zFw&@n^hRO>Iv~g9{I3EduZC5w$}7q;s$d=x+8D*md_^{o(5MsW+A-|Ra(0zJ7||vk
z8(9B)SrKb_Y(bKNNbVBzdJ{R{V~(Gq%gL-PH;;^5xhhxdkrw?lL=p{I1EcX_=i$pP
zU>_ODT5g48t0UWA7)=di7|=YEJ9D6Y0bBPO9e=`o2R{D_yVMD0>ogh|SPQ$5)qjj3
zAeZ}G{UUNa%ri%@Pp5eH5wc2P_EY#CI6*uInwA%7{Dj0xA%(Ws^nm|fGtD9ea=1m2
zOiiR&hBc6xIdk(paP~H>%V?gP{nCp@1kNx|!pfww2EMTBqWP&YA3h$GT3MSBj9y2d
z?&3cMM)r(Xea9ld;%*Yq{ZKy9LQWDk=sTYyqx~j71KG+ato=89?su+bBL|c5d-?Qa
z-sxmqzaXb7NbT3Oxy{dNuEH!_XSRZFTz&_tm6$NbBR%C_=7f|
zV{wI>_z<@lWgMR#IMqIt^>CG04fIvLWF|kO3xW7Ku-_cxUS9Bfe43x19_#I7%qnqb
z0qk-|Y+EO!*%aAEq(xVOPP6R%uP`zXW;8aV57>)<&N`?&By!cjnd+aAS!P~afHhHy
zxygc*CH$}i{-40Oq_m1pASZZ@v0X*OVv&3dYxg};dVp--^S8`<)U>O3n3*6}MP6nu
zj8&GGyANjWvhm3>a)@ROpZTug?c`>3rC2dJ8Ih6ZpH*d51R}z~y3WM=b8s*2w0Gy=
zQxcKSW2Eqnck7Hh4(o9XJNJos3S2#&IZI@=bY@aZo8`b5f|%_v?)VF@cOa8Yd}0cB
z{*b%5hK|1C4jy6WkFcV)GE3)}r<;s19plT!D$2__Z!O3+Qh?o#CS1D{325E8xG>-eIlM}%-=TtLm)%A
zo>zBf?_SL34?+4T@sm4|Ygxh-x=mtLmX)zfJ4d92wuvQ{JUV7d_Vl(Jp+OL_-yTW{%L{A}gM%a5TG
zJCK3uL9Qf3^a4@voch*Qe7}}C@#5zW0%!B+UOLd|WJTJA}zAUnYMuA~F8A5r8`
zYM94CT6H8-F(1FEAK8d@#8zX;<;){a=z{;02hXnwKHnfBu=n`6tB4tI;^|fst2kS_
z0X4JD@(TRg9-v=ag89fVd&D7lNGgc$_}hiqd5_?+j3Gj=MfSs@_IwG%$s_LL1FRzt
zVY)a+Wo8q-slCCx4uyMW8uL4rF5B@iJ^X7uwsJ5xd(hb5sKbQAr!j+SUO9QTd{Iu4
zBjD21sgwRo{eLJ-AO-2!YAQYv*VmNZG2&KaG+b1l7E?w0f$Gy;^FLStlg!LkajP_}
z9*@ngRAftU?}!e0y}MtIHrZW9^hsbHphC>Jm**Et`d|7(y)=wEjTmblP;qOlH&$g{DrOocPp^uzbM&X{5>FivrbRc138=FVS?yhdANClSao{&$dCdoS|>
zkf(xZ(S6o~VQ-?JZ7SVeg{Y>cfWugVWHhBTHO@nvjkiSY0?S?jHMc6$)36vUUkLcg
zzlcmC+5hd}q{k)MXo~mDR%Z#@zvT)*ZQB0qqq=cliLAk&Ym@M55*TtfOPpDi!*M
zmr#Q(Wkwlmjdrlg6*dMN=ZwonePgixAE<=yT6?{%{zR*wCHjBV7HW^RIPJCeNvo=V
z)=wCt;Fc)~LL(2B`Ga(ce9sKJ58{*r^|YfnmGw!^_s%Y^`>w3+9PSv`QO>?P2|w3-
zS1#9lr{vt|=t-yO1vS0e9GuG#xtJX2^!F35{Y_Mw-8yKd7#f_I&A@f9TCVs+B&U^p53@*42DzXl(oq21`6S!7zEOCtI|{b
z!EpiB7LaAcfEkFM!Gb%%aTo2hlxYHL4h!7#q>_HXdt^*7X_v;q1bdTsrX)&gbiDK5CE9A-m)B0&x0v9+`$40e{nuo5C2Fe!M1OZ)IWTJoEph{iOnrYrO
ziWq(L#oAea4*vq5<{jdV@{ITV;E7L7rhjO%=bXp%H1=Ni_J=w8m4B&LOHb7=a+c6g
zxK5thQ$QY+kvl2})kMcx=LA<(_kEbHj|627?#8
zKh4Q0-?WkN#J1D#!Xdkd6W9{+ar%g
za>do@YEGE%E;_5gxLpg*?f>Y=>dpyl@vhPInB_s|UchCx(GlU;1+Q*t^)q;#P4agT
zafR5`w&Ix-wgYI)JH}5&e|>}Yz+cAyk1xWv&0Ei_c}{y4fqnWHuI(z`1>SgXU*A{X
zK)=^N9S+xBdJ{tdJ5-Pr+nG7tC3O)6;lOLBJOy#GPYrf72P1UP;dB;s7I0>U_d6RM
zKKY&boT1LAoD7%G@tE%L&RCUsiUhhW9F%A+aUW0bIeS+;xsd|kPv+9UXBpd#E?5wc
z{!%}VT-xc?^oCfEGqCH18o@?(qnaMK
zm<}x7HDeq5!#HC!Og+mP-wM9}W$ea+n8r_LO|z{z6?W2E?Ca_AtPa^_$qG*pk@Q$a
z$=N_2cLlRFN0|&mY7XTzJ7$Qy3vRM)ctIDi`whwJAI8IL3|^!HeH10FYIt7zEEAN}
zPQ2FY_=C7@
z2^s85FgU#g&-KO_ZuM3r;G?0F5`SW%%$*^zrC
zvBcd0e$U55MBCBPE_lgHu?-)H77G*A9Ft0cHIvb{K6n`!lu7tsDT>LLq2$G{s7Gh4
zPq~3?H{+uXP%0@NI8A%Q%Xa|QyihTVj9Xj#p7kr-YAHqqPT{?%)zI$3%>UIl)mPG|
z`W(KjzB0Zxpj9vWiu=#_+iTICH|`?40k9Nq;ufHP6cz!E$F#N%}Q+oQ?~r7k@Tci1n!$RDw_6R@h?h(!+YRZhNuPnm<0
zG6uuJ5N@}lTeL20(-B5zytC@sSN}$TOS;@1`5yWte|d1FL;Vx*L^5h?wSxL;Jz4K;
zM4_w0z!)DQ(%LWe;*35Uj*Rr`MAfG*cKqnvz{#>fu41m9uD@O9UAtU^T?Jk5;WTaU
z{0g(?d1ibE7GM`01FgjlY|mVxpDA<+HNeMcj4wM1YyMOlstwX+z<<3KPW)rYHcYRt
z_aGja4P_%)n++PQEg22ts0WuCWe4-?TReg(pA)5nFeD@dF8dd6u#{`
zFdtsUYgNT5;=jt+n?ZJ7IvRqpeWl6dmnSM(PW)I3`A#Ql{TJ@WwlD%85I4ct`9*Fx
z@#@2OTa}+>i5nW>r}u)jp%t}@meeXE-s)%RUnFQ(;$#jo?#!q<&hvgT2JV@d5
z+8F#^Nqg&m=8xxGiMp^E?}7WJHr{$+V+VfvC9@M;80+m?tgdnFoRg95B-YCBuwgfc
zb^jYUUAn@Z3y|_!=WJ&u_W#e0HI7R7NiFc*x++f?-#;Q)j3i@P37&|&Xlzb%qmkb@
z%Fb9um-UBWG?(CMF4cChW4iPj#8|tr3U%1AUV-Fo3*V#-ZutP{(-u@Ef~n}n!?*QT
zw1rKwCuq%i@GL~pkG)Zy3yQrVCvHpwW!oEWw58bEgX~uev4U%?=2jNCasMHPY|Y8H
zbHMptf#EmC%ml039FXc?t$g4B4v=qYO@-z&80qmuByULB{lHyxgA4y(^!FBV+#7bt
zBsoUD##-ql=Yu_Lqi6^d{&+Gp>#4t-B_fP5=bL%WOYB}%j4
zhxqZN{#`Ev-Zsjpj&BtVVm3L=-i{$B(SbSs1-|W^XwDw&d}WZRC)K-jqDOIl!E5y@
zc4HWkQ9PEapOR5I%ACitRwLP+OHgU4PwYAd&c}UbD`Lf4cuWKE>hl>H;KNIo=0}xe
z-x|iA^@s{pXFM3oYz+$2uv!ztCZKbJ!A!i9a^s(@B#sZ48*}$3SiyoGkusbf(hTf=
zMa7|VQN*INjiJUE?smIz#famik}2jB
zcxcCvSIA7(;v{**_F#tMVC?#n-R6HRodtLk+1JKrCZ2jrf#UA&?(VYa;x5JAy=ZZF
zx8m-)xG&ztT}v&=BxBz%{eMrMg>Gq+nLGC$dCz;^Q-X-#jO3Dv(;FU57xY3_VUKAk
z_PQPCCydU#m$0Z8vd=S0AJFjZ3zl*Nlqdp@eG|Cp^N2@2pg7r(IOwjr0dL(*&7%HP
zE)jn(R%WpK$1Br#1{Zk-`RHXkPR`O?JFMk_-5N~xufUK`5(bK|u+t~-6GMp09Oh!?
zjznhrm`QTOe3|EQz`Vdb&|J>!h4($cl#NWZA5}xYhmRDG65VR7Wdrm=H?td@`bC()
zb>UclX9W(i4tx3YH1Whs^|xw=Yu-Uyh(e~Wl|;*8H4%hD9MBFQwiQ(TBkPkF)T}=>
zz@wzAAwZBOfP{8F2)ac
zr*2pryeyn5Vk9=FB-qtp{^bA&K)%-EZBM`zxJQlek6`Ama-j-Qktf}l
zPydU!x(N8yJ8HM9urcMyFkXO-Eue1Q%qYUUOQ=bx*pDA5@?3%kz7an73}VE#Xk%9d
z(4d$V#tWZ=if&CQj43snsMr>xWAP*yVi^?1juOjN=X$R2IW<92Ucu5{LNwl%
z9a5K96L6JiZ3b$kS5UDDC8{6E^?smY+YA=XUg8OnSgHf-xd>L^b+{HPv3pL?vu4Bv
z(ZsEzKzin4ombO?wv=dXJ=d^~exwQfdn00?C~(bx+2JEVS4)7YJ_DubU=+eGy#w!C
z4VKmyes~*R9f{)y@%fX<6V~fTK~Vot*)7UW>Isi*3x4-0Ucm_>*^H;O5bX37tnX4{
zb56lnu3-yt^mQu7|A=R?UPp)>H}c(nL`_+V2ye41`trL8*txM(`YpzNkjCNk>XgtU
zbV*MIFEYbEiPE#8FJF6)MQV9Kc{0?jl?6pW4cf_?yj2HnkxAO
zI;NIl31Y?heBw;v)@bzP>d>Q-2SsBQg!c-cK8{LG0nWx_YJH2zqS{l#&WrDN&=(`&
z|B}h*641d32h%Kp7ihDMtT!rYNpSqb&(B5zb+Jn1RV0U<6
z7#$N5(TuK&`s{xA^r$F`k3f$<($|=dt0*cJ!t+<+RSvye2TJAt!nxnT?wbIUC!6SI
z$L-_vv|`0xvWu5-@+-k`OAk7qgYL-?c1eked_-_VQo2^HK~>}4mN__0(@bkP4f#3MP-qJ0hf;xl<~CRVo%wSZ;V
zn+Mc({1v2M)Er#YmHc%bg`DslyM7nznwxXCo;b59XYL>Lh|g)SwE()nwi1=^CwGn)
zX4AvJ9Sx5JJmDOWiq&FyVVj--{n*JqlXr}#sC$uXvg?AYuv>PwmCO11satfOhbh()
zXB+Bx@3aKI4{RD#C}<6HL#zQMoKcR8wl>!MmfGearr}a6v7s={c*_}Y!c6E5il_|p
z-Sd|8{wrUWzcFj9vGCAFUeR59
zN~@z@_m%V>^DJh5>||F%S8tcyU6uLalY9-;>Uv*cowUMS8l-KNeYzvS8S32O2zB(e
zueZ&yR#i3d;%--le*1vm^`zw
zWZCG~9>KnDNOpOM-jzi?czVZ13slAzOgYrbG=l8
z314~WhnB3TO0B>RsPIa&FIISEkhY$>fj}JQ?`XFRS@Snha&QA7O)(FcG(;#swQv@AaQ?;W~
z)K|;_zG!c>C>1FpH!E>)!v5ds680yQ|I;~fbn+0F
zD$i4|8NsGU)@Wy*pt2#hutnhuBQ8X!;s1u|p;<#)2Hy`D>ey^+Xk{*$6eB#+Uus!3
zFLU8Cdwa?C<*af?PmH@astBVzHhG6U$$QqG~&t)!S39)vuDwbZB
zpCEm@v_#A&+~oYkGcTm6qWA*n47{P#p$gPqGwCxy-7G}FW$?|Hh;>?%xAdT+^n%{U
zs6myW4Jdatb0bS%Ye7^ZPN9a;-JBw26esC*l@yQNbvgNV;_8G6zlQz1^;1E=qDA8J
z1$-V<6iaf#VuD;ba!6cIW%Y+iV;h&vILA_Bs*hKH6*51lXTUN?dwX$PJIf8J
zhj35d!!B;)D<&UtN4XPS(XRceGg8aBmbjBWCA|H8HVmNQX>o&5fCRv%^x<
za+X^I;q3MC-Yk)=LZ1@~DDR!xxUVFyyVCv+g5IXddag>WP+bd!H(*
z_Sl$@vgS@pYuf1W_4o
zox+UB)|Niz*{FN=5Er0hI2C<`l1w8pgGS9J*BnAdRRxXN=hTA=VmB5OU3VkT>`c{O
zpsw*wye2u!JvcvYY+um^%x-g7W6ZOpyh5-R;mzo>Bv1eI%|tmwes+Wysg*-?#nj
z@jEPWM(R!P5&fAo)!Nk=8yp^^yi6}hheptNA{vYDlJ_s~N;!hA@mS_Mj&oOV*QGKXm93YxhUJB6i!@wRsTRg+H`U?H8gVPP?W>M;q-CfnSy+mmNe^#R*XG2;U(bK!|DO8A^C{+2
zjxX81H~kg)XHfD$&qOV~X}qmcK*o^Bhz{xhWT=vHWyVTTnWN@qXdih#Vodne&_ThY
z0uI