diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2307376aa..5f40243b1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -25,7 +25,7 @@ jobs: ci: runs-on: ubuntu-latest - timeout-minutes: 20 + timeout-minutes: 30 env: LANGFUSE_HOST: "http://localhost:3000" LANGFUSE_PUBLIC_KEY: "pk-lf-1234567890" @@ -71,11 +71,11 @@ jobs: cd ./langfuse-server echo "::group::Run langfuse server" - TELEMETRY_ENABLED=false docker compose up -d db + TELEMETRY_ENABLED=false docker compose -f docker-compose.v3preview.yml up -d postgres echo "::endgroup::" echo "::group::Logs from langfuse server" - TELEMETRY_ENABLED=false docker compose logs + TELEMETRY_ENABLED=false docker compose -f docker-compose.v3preview.yml logs echo "::endgroup::" echo "::group::Install dependencies (necessary to run seeder)" @@ -89,7 +89,7 @@ jobs: echo "::endgroup::" echo "::group::Run server" - TELEMETRY_ENABLED=false docker compose up -d langfuse-server + TELEMETRY_ENABLED=false CLICKHOUSE_MIGRATION_URL=clickhouse://clickhouse:9000 LANGFUSE_ASYNC_INGESTION_PROCESSING=false LANGFUSE_ASYNC_CLICKHOUSE_INGESTION_PROCESSING=false docker compose -f docker-compose.v3preview.yml up -d echo "::endgroup::" # Add this step to check the health of the container @@ -97,14 +97,14 @@ jobs: run: | echo "Checking if the langfuse server is up..." retry_count=0 - max_retries=5 + max_retries=10 until curl --output /dev/null --silent --head --fail http://localhost:3000/api/public/health do retry_count=`expr $retry_count + 1` echo "Attempt $retry_count of $max_retries..." if [ $retry_count -ge $max_retries ]; then echo "Langfuse server did not respond in time. Printing logs..." - docker logs langfuse-server_langfuse-server_1 + docker logs langfuse-server-langfuse-web-1 echo "Failing the step..." exit 1 fi diff --git a/langfuse/task_manager.py b/langfuse/_task_manager/ingestion_consumer.py similarity index 50% rename from langfuse/task_manager.py rename to langfuse/_task_manager/ingestion_consumer.py index a507785fe..a29ba0c28 100644 --- a/langfuse/task_manager.py +++ b/langfuse/_task_manager/ingestion_consumer.py @@ -1,54 +1,41 @@ -"""@private""" - -import atexit import json import logging -import queue import threading -from queue import Empty, Queue import time -from typing import List, Any, Optional -import typing -from langfuse.Sampler import Sampler -from langfuse.parse_error import handle_exception -from langfuse.request import APIError -from langfuse.utils import _get_timestamp -from langfuse.types import MaskFunction +from queue import Empty, Queue +from typing import Any, List, Optional + +import backoff try: - import pydantic.v1 as pydantic # type: ignore + import pydantic.v1 as pydantic except ImportError: - import pydantic # type: ignore - - -import backoff + import pydantic -from langfuse.request import LangfuseClient +from langfuse.parse_error import handle_exception +from langfuse.request import APIError, LangfuseClient +from langfuse.Sampler import Sampler from langfuse.serializer import EventSerializer +from langfuse.types import MaskFunction -# largest message size in db is 331_000 bytes right now -MAX_MSG_SIZE = 1_000_000 - -# https://vercel.com/docs/functions/serverless-functions/runtimes#request-body-size -# The maximum payload size for the request body or the response body of a Serverless Function is 4.5 MB -# 4_500_000 Bytes = 4.5 MB -# configured to be 3 MB to be safe +from .media_manager import MediaManager -BATCH_SIZE_LIMIT = 2_500_000 +MAX_EVENT_SIZE_BYTES = 1_000_000 +MAX_BATCH_SIZE_BYTES = 2_500_000 -class LangfuseMetadata(pydantic.BaseModel): +class IngestionMetadata(pydantic.BaseModel): batch_size: int - sdk_integration: typing.Optional[str] = None - sdk_name: str = None - sdk_version: str = None - public_key: str = None + sdk_integration: str + sdk_name: str + sdk_version: str + public_key: str -class Consumer(threading.Thread): +class IngestionConsumer(threading.Thread): _log = logging.getLogger("langfuse") - _queue: Queue + _ingestion_queue: Queue _identifier: int _client: LangfuseClient _flush_at: int @@ -60,16 +47,19 @@ class Consumer(threading.Thread): _sdk_integration: str _mask: Optional[MaskFunction] _sampler: Sampler + _media_manager: MediaManager def __init__( self, - queue: Queue, + *, + ingestion_queue: Queue, identifier: int, client: LangfuseClient, flush_at: int, flush_interval: float, max_retries: int, public_key: str, + media_manager: MediaManager, sdk_name: str, sdk_version: str, sdk_integration: str, @@ -77,15 +67,15 @@ def __init__( mask: Optional[MaskFunction] = None, ): """Create a consumer thread.""" - threading.Thread.__init__(self) - # Make consumer a daemon thread so that it doesn't block program exit - self.daemon = True - self._queue = queue + super().__init__() # It's important to set running in the constructor: if we are asked to # pause immediately after construction, we might set running to True in # run() *after* we set it to False in pause... and keep running # forever. self.running = True + # Make consumer a daemon thread so that it doesn't block program exit + self.daemon = True + self._ingestion_queue = ingestion_queue self._identifier = identifier self._client = client self._flush_at = flush_at @@ -97,73 +87,88 @@ def __init__( self._sdk_integration = sdk_integration self._mask = mask self._sampler = Sampler(sample_rate) + self._media_manager = media_manager def _next(self): """Return the next batch of items to upload.""" - queue = self._queue - items = [] + events = [] start_time = time.monotonic() total_size = 0 - while len(items) < self._flush_at: + while len(events) < self._flush_at: elapsed = time.monotonic() - start_time if elapsed >= self._flush_interval: break try: - item = queue.get(block=True, timeout=self._flush_interval - elapsed) + event = self._ingestion_queue.get( + block=True, timeout=self._flush_interval - elapsed + ) # convert pydantic models to dicts - if "body" in item and isinstance(item["body"], pydantic.BaseModel): - item["body"] = item["body"].dict(exclude_none=True) + if "body" in event and isinstance(event["body"], pydantic.BaseModel): + event["body"] = event["body"].dict(exclude_none=True) # sample event - if not self._sampler.sample_event(item): - queue.task_done() + if not self._sampler.sample_event(event): + self._ingestion_queue.task_done() continue + # handle multimodal data + self._media_manager.process_media_in_event(event) + # truncate item if it exceeds size limit item_size = self._truncate_item_in_place( - item=item, - max_size=MAX_MSG_SIZE, + event=event, + max_size=MAX_EVENT_SIZE_BYTES, log_message="", ) # apply mask - self._apply_mask_in_place(item) + self._apply_mask_in_place(event) # check for serialization errors try: - json.dumps(item, cls=EventSerializer) + json.dumps(event, cls=EventSerializer) except Exception as e: self._log.error(f"Error serializing item, skipping: {e}") - queue.task_done() + self._ingestion_queue.task_done() continue - items.append(item) + events.append(event) total_size += item_size - if total_size >= BATCH_SIZE_LIMIT: + if total_size >= MAX_BATCH_SIZE_BYTES: self._log.debug("hit batch size limit (size: %d)", total_size) break except Empty: break - self._log.debug("~%d items in the Langfuse queue", self._queue.qsize()) - return items + except Exception as e: + self._log.warning( + "Failed to process event in IngestionConsumer, skipping", + exc_info=e, + ) + self._ingestion_queue.task_done() + + self._log.debug( + "~%d items in the Langfuse queue", self._ingestion_queue.qsize() + ) + + return events def _truncate_item_in_place( self, *, - item: typing.Any, + event: Any, max_size: int, - log_message: typing.Optional[str] = None, + log_message: Optional[str] = None, ) -> int: """Truncate the item in place to fit within the size limit.""" - item_size = self._get_item_size(item) + item_size = self._get_item_size(event) self._log.debug(f"item size {item_size}") if item_size > max_size: @@ -172,14 +177,14 @@ def _truncate_item_in_place( item_size, ) - if "body" in item: + if "body" in event: drop_candidates = ["input", "output", "metadata"] sorted_field_sizes = sorted( [ ( field, - self._get_item_size((item["body"][field])) - if field in item["body"] + self._get_item_size((event["body"][field])) + if field in event["body"] else 0, ) for field in drop_candidates @@ -191,10 +196,10 @@ def _truncate_item_in_place( for _ in range(len(sorted_field_sizes)): field_to_drop, size_to_drop = sorted_field_sizes.pop() - if field_to_drop not in item["body"]: + if field_to_drop not in event["body"]: continue - item["body"][field_to_drop] = log_message + event["body"][field_to_drop] = log_message item_size -= size_to_drop self._log.debug( @@ -205,18 +210,18 @@ def _truncate_item_in_place( break # if item does not have body or input/output fields, drop the event - if "body" not in item or ( - "input" not in item["body"] and "output" not in item["body"] + if "body" not in event or ( + "input" not in event["body"] and "output" not in event["body"] ): self._log.warning( "Item does not have body or input/output fields, dropping item." ) - self._queue.task_done() + self._ingestion_queue.task_done() return 0 - return self._get_item_size(item) + return self._get_item_size(event) - def _get_item_size(self, item: typing.Any) -> int: + def _get_item_size(self, item: Any) -> int: """Return the size of the item in bytes.""" return len(json.dumps(item, cls=EventSerializer).encode()) @@ -235,7 +240,7 @@ def _apply_mask_in_place(self, event: dict): body[key] = "" def run(self): - """Runs the consumer.""" + """Run the consumer.""" self._log.debug("consumer is running...") while self.running: self.upload() @@ -253,7 +258,7 @@ def upload(self): finally: # mark items as acknowledged from queue for _ in batch: - self._queue.task_done() + self._ingestion_queue.task_done() def pause(self): """Pause the consumer.""" @@ -262,7 +267,7 @@ def pause(self): def _upload_batch(self, batch: List[Any]): self._log.debug("uploading batch of %d items", len(batch)) - metadata = LangfuseMetadata( + metadata = IngestionMetadata( batch_size=len(batch), sdk_integration=self._sdk_integration, sdk_name=self._sdk_name, @@ -287,134 +292,4 @@ def execute_task_with_backoff(batch: List[Any]): raise e execute_task_with_backoff(batch) - self._log.debug("successfully uploaded batch of %d items", len(batch)) - - -class TaskManager(object): - _log = logging.getLogger("langfuse") - _consumers: List[Consumer] - _enabled: bool - _threads: int - _max_task_queue_size: int - _queue: Queue - _client: LangfuseClient - _flush_at: int - _flush_interval: float - _max_retries: int - _public_key: str - _sdk_name: str - _sdk_version: str - _sdk_integration: str - _sample_rate: float - _mask: Optional[MaskFunction] - - def __init__( - self, - client: LangfuseClient, - flush_at: int, - flush_interval: float, - max_retries: int, - threads: int, - public_key: str, - sdk_name: str, - sdk_version: str, - sdk_integration: str, - enabled: bool = True, - max_task_queue_size: int = 100_000, - sample_rate: float = 1, - mask: Optional[MaskFunction] = None, - ): - self._max_task_queue_size = max_task_queue_size - self._threads = threads - self._queue = queue.Queue(self._max_task_queue_size) - self._consumers = [] - self._client = client - self._flush_at = flush_at - self._flush_interval = flush_interval - self._max_retries = max_retries - self._public_key = public_key - self._sdk_name = sdk_name - self._sdk_version = sdk_version - self._sdk_integration = sdk_integration - self._enabled = enabled - self._sample_rate = sample_rate - self._mask = mask - - self.init_resources() - - # cleans up when the python interpreter closes - atexit.register(self.join) - - def init_resources(self): - for i in range(self._threads): - consumer = Consumer( - queue=self._queue, - identifier=i, - client=self._client, - flush_at=self._flush_at, - flush_interval=self._flush_interval, - max_retries=self._max_retries, - public_key=self._public_key, - sdk_name=self._sdk_name, - sdk_version=self._sdk_version, - sdk_integration=self._sdk_integration, - sample_rate=self._sample_rate, - mask=self._mask, - ) - consumer.start() - self._consumers.append(consumer) - - def add_task(self, event: dict): - if not self._enabled: - return - - try: - event["timestamp"] = _get_timestamp() - - self._queue.put(event, block=False) - except queue.Full: - self._log.warning("analytics-python queue is full") - return False - except Exception as e: - self._log.exception(f"Exception in adding task {e}") - - return False - - def flush(self): - """Force a flush from the internal queue to the server.""" - self._log.debug("flushing queue") - queue = self._queue - size = queue.qsize() - queue.join() - # Note that this message may not be precise, because of threading. - self._log.debug("successfully flushed about %s items.", size) - - def join(self): - """End the consumer threads once the queue is empty. - - Blocks execution until finished - """ - self._log.debug(f"joining {len(self._consumers)} consumer threads") - - # pause all consumers before joining them so we don't have to wait for multiple - # flush intervals to join them all. - for consumer in self._consumers: - consumer.pause() - - for consumer in self._consumers: - try: - consumer.join() - except RuntimeError: - # consumer thread has not started - pass - - self._log.debug(f"consumer thread {consumer._identifier} joined") - - def shutdown(self): - """Flush all messages and cleanly shutdown the client.""" - self._log.debug("shutdown initiated") - - self.flush() - self.join() - - self._log.debug("shutdown completed") + self._log.debug("successfully uploaded batch of %d events", len(batch)) diff --git a/langfuse/_task_manager/media_manager.py b/langfuse/_task_manager/media_manager.py new file mode 100644 index 000000000..61e23ac60 --- /dev/null +++ b/langfuse/_task_manager/media_manager.py @@ -0,0 +1,244 @@ +import logging +import time +from queue import Empty, Queue +from typing import Any, Callable, Optional, TypeVar + +import backoff +import requests +from typing_extensions import ParamSpec + +from langfuse.api import GetMediaUploadUrlRequest, PatchMediaBody +from langfuse.api.client import FernLangfuse +from langfuse.api.core import ApiError +from langfuse.media import LangfuseMedia +from langfuse.utils import _get_timestamp + +from .media_upload_queue import UploadMediaJob + +T = TypeVar("T") +P = ParamSpec("P") + + +class MediaManager: + _log = logging.getLogger(__name__) + + def __init__( + self, + *, + api_client: FernLangfuse, + media_upload_queue: Queue, + max_retries: Optional[int] = 3, + ): + self._api_client = api_client + self._queue = media_upload_queue + self._max_retries = max_retries + + def process_next_media_upload(self): + try: + upload_job = self._queue.get(block=True, timeout=1) + self._log.debug(f"Processing upload for {upload_job['media_id']}") + self._process_upload_media_job(data=upload_job) + + self._queue.task_done() + except Empty: + self._log.debug("Media upload queue is empty") + pass + except Exception as e: + self._log.error(f"Error uploading media: {e}") + self._queue.task_done() + + def process_media_in_event(self, event: dict): + try: + if "body" not in event: + return + + body = event["body"] + trace_id = body.get("traceId", None) or ( + body.get("id", None) + if "type" in event and "trace" in event["type"] + else None + ) + + if trace_id is None: + raise ValueError("trace_id is required for media upload") + + observation_id = ( + body.get("id", None) + if "type" in event + and ("generation" in event["type"] or "span" in event["type"]) + else None + ) + + multimodal_fields = ["input", "output", "metadata"] + + for field in multimodal_fields: + if field in body: + processed_data = self._find_and_process_media( + data=body[field], + trace_id=trace_id, + observation_id=observation_id, + field=field, + ) + + body[field] = processed_data + + except Exception as e: + self._log.error(f"Error processing multimodal event: {e}") + + def _find_and_process_media( + self, + *, + data: Any, + trace_id: str, + observation_id: Optional[str], + field: str, + ): + seen = set() + max_levels = 10 + + def _process_data_recursively(data: Any, level: int): + if id(data) in seen or level > max_levels: + return data + + seen.add(id(data)) + + if isinstance(data, LangfuseMedia): + self._process_media( + media=data, + trace_id=trace_id, + observation_id=observation_id, + field=field, + ) + + return data + + if isinstance(data, str) and data.startswith("data:"): + media = LangfuseMedia( + obj=data, + base64_data_uri=data, + ) + + self._process_media( + media=media, + trace_id=trace_id, + observation_id=observation_id, + field=field, + ) + + return media + + if isinstance(data, list): + return [_process_data_recursively(item, level + 1) for item in data] + + if isinstance(data, dict): + return { + key: _process_data_recursively(value, level + 1) + for key, value in data.items() + } + + return data + + return _process_data_recursively(data, 1) + + def _process_media( + self, + *, + media: LangfuseMedia, + trace_id: str, + observation_id: Optional[str], + field: str, + ): + if ( + media._content_length is None + or media._content_type is None + or media._content_sha256_hash is None + or media._content_bytes is None + ): + return + + upload_url_response = self._request_with_backoff( + self._api_client.media.get_upload_url, + request=GetMediaUploadUrlRequest( + contentLength=media._content_length, + contentType=media._content_type, + sha256Hash=media._content_sha256_hash, + field=field, + traceId=trace_id, + observationId=observation_id, + ), + ) + + upload_url = upload_url_response.upload_url + media._media_id = upload_url_response.media_id # Important as this is will be used in the media reference string in serializer + + if upload_url is not None: + self._log.debug(f"Scheduling upload for {media._media_id}") + self._queue.put( + item={ + "upload_url": upload_url, + "media_id": media._media_id, + "content_bytes": media._content_bytes, + "content_type": media._content_type, + "content_sha256_hash": media._content_sha256_hash, + }, + block=True, + timeout=1, + ) + + else: + self._log.debug(f"Media {media._media_id} already uploaded") + + def _process_upload_media_job( + self, + *, + data: UploadMediaJob, + ): + upload_start_time = time.time() + upload_response = self._request_with_backoff( + requests.put, + data["upload_url"], + headers={ + "Content-Type": data["content_type"], + "x-amz-checksum-sha256": data["content_sha256_hash"], + }, + data=data["content_bytes"], + ) + upload_time_ms = int((time.time() - upload_start_time) * 1000) + + self._request_with_backoff( + self._api_client.media.patch, + media_id=data["media_id"], + request=PatchMediaBody( + uploadedAt=_get_timestamp(), + uploadHttpStatus=upload_response.status_code, + uploadHttpError=upload_response.text, + uploadTimeMs=upload_time_ms, + ), + ) + + self._log.debug( + f"Media upload completed for {data['media_id']} in {upload_time_ms}ms" + ) + + def _request_with_backoff( + self, func: Callable[P, T], *args: P.args, **kwargs: P.kwargs + ) -> T: + @backoff.on_exception( + backoff.expo, Exception, max_tries=self._max_retries, logger=None + ) + def execute_task_with_backoff() -> T: + try: + return func(*args, **kwargs) + except ApiError as e: + if ( + e.status_code is not None + and 400 <= e.status_code < 500 + and (e.status_code) != 429 + ): + raise e + except Exception as e: + raise e + + raise Exception("Failed to execute task") + + return execute_task_with_backoff() diff --git a/langfuse/_task_manager/media_upload_consumer.py b/langfuse/_task_manager/media_upload_consumer.py new file mode 100644 index 000000000..544a0a3c6 --- /dev/null +++ b/langfuse/_task_manager/media_upload_consumer.py @@ -0,0 +1,39 @@ +import logging +import threading + +from .media_manager import MediaManager + + +class MediaUploadConsumer(threading.Thread): + _log = logging.getLogger(__name__) + _identifier: int + _max_retries: int + _media_manager: MediaManager + + def __init__( + self, + *, + identifier: int, + media_manager: MediaManager, + ): + """Create a consumer thread.""" + super().__init__() + # Make consumer a daemon thread so that it doesn't block program exit + self.daemon = True + # It's important to set running in the constructor: if we are asked to + # pause immediately after construction, we might set running to True in + # run() *after* we set it to False in pause... and keep running + # forever. + self.running = True + self._identifier = identifier + self._media_manager = media_manager + + def run(self): + """Run the media upload consumer.""" + self._log.debug("consumer is running...") + while self.running: + self._media_manager.process_next_media_upload() + + def pause(self): + """Pause the media upload consumer.""" + self.running = False diff --git a/langfuse/_task_manager/media_upload_queue.py b/langfuse/_task_manager/media_upload_queue.py new file mode 100644 index 000000000..912af2546 --- /dev/null +++ b/langfuse/_task_manager/media_upload_queue.py @@ -0,0 +1,9 @@ +from typing import TypedDict + + +class UploadMediaJob(TypedDict): + upload_url: str + media_id: str + content_type: str + content_bytes: bytes + content_sha256_hash: str diff --git a/langfuse/_task_manager/task_manager.py b/langfuse/_task_manager/task_manager.py new file mode 100644 index 000000000..5b5a7c91a --- /dev/null +++ b/langfuse/_task_manager/task_manager.py @@ -0,0 +1,197 @@ +"""@private""" + +import atexit +import logging +import queue +from queue import Queue +from typing import List, Optional + +from langfuse.api.client import FernLangfuse +from langfuse.request import LangfuseClient +from langfuse.types import MaskFunction +from langfuse.utils import _get_timestamp + +from .ingestion_consumer import IngestionConsumer +from .media_manager import MediaManager +from .media_upload_consumer import MediaUploadConsumer + + +class TaskManager(object): + _log = logging.getLogger(__name__) + _ingestion_consumers: List[IngestionConsumer] + _enabled: bool + _threads: int + _max_task_queue_size: int + _ingestion_queue: Queue + _media_upload_queue: Queue + _client: LangfuseClient + _api_client: FernLangfuse + _flush_at: int + _flush_interval: float + _max_retries: int + _public_key: str + _sdk_name: str + _sdk_version: str + _sdk_integration: str + _sample_rate: float + _mask: Optional[MaskFunction] + + def __init__( + self, + *, + client: LangfuseClient, + api_client: FernLangfuse, + flush_at: int, + flush_interval: float, + max_retries: int, + threads: int, + public_key: str, + sdk_name: str, + sdk_version: str, + sdk_integration: str, + enabled: bool = True, + max_task_queue_size: int = 100_000, + sample_rate: float = 1, + mask: Optional[MaskFunction] = None, + ): + self._max_task_queue_size = max_task_queue_size + self._threads = threads + self._ingestion_queue = queue.Queue(self._max_task_queue_size) + self._media_upload_queue = Queue(self._max_task_queue_size) + self._media_manager = MediaManager( + api_client=api_client, + media_upload_queue=self._media_upload_queue, + max_retries=max_retries, + ) + self._ingestion_consumers = [] + self._media_upload_consumers = [] + self._client = client + self._api_client = api_client + self._flush_at = flush_at + self._flush_interval = flush_interval + self._max_retries = max_retries + self._public_key = public_key + self._sdk_name = sdk_name + self._sdk_version = sdk_version + self._sdk_integration = sdk_integration + self._enabled = enabled + self._sample_rate = sample_rate + self._mask = mask + + self.init_resources() + + # cleans up when the python interpreter closes + atexit.register(self.join) + + def init_resources(self): + for i in range(self._threads): + ingestion_consumer = IngestionConsumer( + ingestion_queue=self._ingestion_queue, + identifier=i, + client=self._client, + media_manager=self._media_manager, + flush_at=self._flush_at, + flush_interval=self._flush_interval, + max_retries=self._max_retries, + public_key=self._public_key, + sdk_name=self._sdk_name, + sdk_version=self._sdk_version, + sdk_integration=self._sdk_integration, + sample_rate=self._sample_rate, + mask=self._mask, + ) + ingestion_consumer.start() + self._ingestion_consumers.append(ingestion_consumer) + + for i in range(self._threads): + media_upload_consumer = MediaUploadConsumer( + identifier=i, + media_manager=self._media_manager, + ) + media_upload_consumer.start() + self._media_upload_consumers.append(media_upload_consumer) + + def add_task(self, event: dict): + if not self._enabled: + return + + try: + event["timestamp"] = _get_timestamp() + + self._ingestion_queue.put(event, block=False) + except queue.Full: + self._log.warning("analytics-python queue is full") + return False + except Exception as e: + self._log.exception(f"Exception in adding task {e}") + + return False + + def flush(self): + """Force a flush from the internal queue to the server.""" + self._log.debug("flushing ingestion and media upload queues") + + # Ingestion queue + ingestion_queue_size = self._ingestion_queue.qsize() + self._ingestion_queue.join() + self._log.debug( + f"Successfully flushed ~{ingestion_queue_size} items from ingestion queue" + ) + + # Media upload queue + media_upload_queue_size = self._media_upload_queue.qsize() + self._media_upload_queue.join() + self._log.debug( + f"Successfully flushed ~{media_upload_queue_size} items from media upload queue" + ) + + def join(self): + """End the consumer threads once the queue is empty. + + Blocks execution until finished + """ + self._log.debug( + f"joining {len(self._ingestion_consumers)} ingestion consumer threads" + ) + + # pause all consumers before joining them so we don't have to wait for multiple + # flush intervals to join them all. + for ingestion_consumer in self._ingestion_consumers: + ingestion_consumer.pause() + + for ingestion_consumer in self._ingestion_consumers: + try: + ingestion_consumer.join() + except RuntimeError: + # consumer thread has not started + pass + + self._log.debug( + f"IngestionConsumer thread {ingestion_consumer._identifier} joined" + ) + + self._log.debug( + f"joining {len(self._media_upload_consumers)} media upload consumer threads" + ) + for media_upload_consumer in self._media_upload_consumers: + media_upload_consumer.pause() + + for media_upload_consumer in self._media_upload_consumers: + try: + media_upload_consumer.join() + except RuntimeError: + # consumer thread has not started + pass + + self._log.debug( + f"MediaUploadConsumer thread {media_upload_consumer._identifier} joined" + ) + + def shutdown(self): + """Flush all messages and cleanly shutdown the client.""" + self._log.debug("shutdown initiated") + + self.flush() + self.join() + + self._log.debug("shutdown completed") diff --git a/langfuse/api/README.md b/langfuse/api/README.md index 5a483dc3e..4087db553 100644 --- a/langfuse/api/README.md +++ b/langfuse/api/README.md @@ -16,7 +16,7 @@ pip install finto Instantiate and use the client with the following: ```python -from finto import CreateDatasetItemRequest, DatasetStatus +from finto import CreateCommentRequest from finto.client import FernLangfuse client = FernLangfuse( @@ -27,16 +27,13 @@ client = FernLangfuse( password="YOUR_PASSWORD", base_url="https://yourhost.com/path/to/api", ) -client.dataset_items.create( - request=CreateDatasetItemRequest( - dataset_name="string", - input={"key": "value"}, - expected_output={"key": "value"}, - metadata={"key": "value"}, - source_trace_id="string", - source_observation_id="string", - id="string", - status=DatasetStatus.ACTIVE, +client.comments.create( + request=CreateCommentRequest( + project_id="string", + object_type="string", + object_id="string", + content="string", + author_user_id="string", ), ) ``` @@ -48,7 +45,7 @@ The SDK also exports an `async` client so that you can make non-blocking calls t ```python import asyncio -from finto import CreateDatasetItemRequest, DatasetStatus +from finto import CreateCommentRequest from finto.client import AsyncFernLangfuse client = AsyncFernLangfuse( @@ -62,16 +59,13 @@ client = AsyncFernLangfuse( async def main() -> None: - await client.dataset_items.create( - request=CreateDatasetItemRequest( - dataset_name="string", - input={"key": "value"}, - expected_output={"key": "value"}, - metadata={"key": "value"}, - source_trace_id="string", - source_observation_id="string", - id="string", - status=DatasetStatus.ACTIVE, + await client.comments.create( + request=CreateCommentRequest( + project_id="string", + object_type="string", + object_id="string", + content="string", + author_user_id="string", ), ) @@ -88,7 +82,7 @@ will be thrown. from .api_error import ApiError try: - client.dataset_items.create(...) + client.comments.create(...) except ApiError as e: print(e.status_code) print(e.body) @@ -111,7 +105,7 @@ A request is deemed retriable when any of the following HTTP status codes is ret Use the `max_retries` request option to configure this behavior. ```python -client.dataset_items.create(...,{ +client.comments.create(...,{ max_retries=1 }) ``` @@ -128,7 +122,7 @@ client = FernLangfuse(..., { timeout=20.0 }, ) # Override timeout for a specific method -client.dataset_items.create(...,{ +client.comments.create(...,{ timeout_in_seconds=1 }) ``` diff --git a/langfuse/api/__init__.py b/langfuse/api/__init__.py index 0530b2c76..df6daeace 100644 --- a/langfuse/api/__init__.py +++ b/langfuse/api/__init__.py @@ -9,8 +9,12 @@ CategoricalScore, ChatMessage, ChatPrompt, + Comment, + CommentObjectType, ConfigCategory, CreateChatPromptRequest, + CreateCommentRequest, + CreateCommentResponse, CreateDatasetItemRequest, CreateDatasetRequest, CreateDatasetRunItemRequest, @@ -39,6 +43,19 @@ DatasetRunWithItems, DatasetStatus, Error, + GetCommentsResponse, + GetMediaResponse, + GetMediaUploadUrlRequest, + GetMediaUploadUrlResponse, + GetScoresResponse, + GetScoresResponseData, + GetScoresResponseDataBoolean, + GetScoresResponseDataCategorical, + GetScoresResponseDataNumeric, + GetScoresResponseData_Boolean, + GetScoresResponseData_Categorical, + GetScoresResponseData_Numeric, + GetScoresResponseTraceData, HealthResponse, IngestionError, IngestionEvent, @@ -56,6 +73,7 @@ IngestionSuccess, IngestionUsage, MapValue, + MediaContentType, MethodNotAllowedError, Model, ModelUsageUnit, @@ -75,6 +93,7 @@ PaginatedDatasets, PaginatedModels, PaginatedSessions, + PatchMediaBody, Project, Projects, Prompt, @@ -92,7 +111,6 @@ Score_Boolean, Score_Categorical, Score_Numeric, - Scores, SdkLogBody, SdkLogEvent, ServiceUnavailableError, @@ -115,12 +133,14 @@ UpdateSpanEvent, Usage, UsageByModel, + comments, commons, dataset_items, dataset_run_items, datasets, health, ingestion, + media, metrics, models, observations, @@ -142,8 +162,12 @@ "CategoricalScore", "ChatMessage", "ChatPrompt", + "Comment", + "CommentObjectType", "ConfigCategory", "CreateChatPromptRequest", + "CreateCommentRequest", + "CreateCommentResponse", "CreateDatasetItemRequest", "CreateDatasetRequest", "CreateDatasetRunItemRequest", @@ -172,6 +196,19 @@ "DatasetRunWithItems", "DatasetStatus", "Error", + "GetCommentsResponse", + "GetMediaResponse", + "GetMediaUploadUrlRequest", + "GetMediaUploadUrlResponse", + "GetScoresResponse", + "GetScoresResponseData", + "GetScoresResponseDataBoolean", + "GetScoresResponseDataCategorical", + "GetScoresResponseDataNumeric", + "GetScoresResponseData_Boolean", + "GetScoresResponseData_Categorical", + "GetScoresResponseData_Numeric", + "GetScoresResponseTraceData", "HealthResponse", "IngestionError", "IngestionEvent", @@ -189,6 +226,7 @@ "IngestionSuccess", "IngestionUsage", "MapValue", + "MediaContentType", "MethodNotAllowedError", "Model", "ModelUsageUnit", @@ -208,6 +246,7 @@ "PaginatedDatasets", "PaginatedModels", "PaginatedSessions", + "PatchMediaBody", "Project", "Projects", "Prompt", @@ -225,7 +264,6 @@ "Score_Boolean", "Score_Categorical", "Score_Numeric", - "Scores", "SdkLogBody", "SdkLogEvent", "ServiceUnavailableError", @@ -248,12 +286,14 @@ "UpdateSpanEvent", "Usage", "UsageByModel", + "comments", "commons", "dataset_items", "dataset_run_items", "datasets", "health", "ingestion", + "media", "metrics", "models", "observations", diff --git a/langfuse/api/client.py b/langfuse/api/client.py index 4df0f19e0..da24da20f 100644 --- a/langfuse/api/client.py +++ b/langfuse/api/client.py @@ -5,11 +5,16 @@ import httpx from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from .resources.comments.client import AsyncCommentsClient, CommentsClient from .resources.dataset_items.client import AsyncDatasetItemsClient, DatasetItemsClient -from .resources.dataset_run_items.client import AsyncDatasetRunItemsClient, DatasetRunItemsClient +from .resources.dataset_run_items.client import ( + AsyncDatasetRunItemsClient, + DatasetRunItemsClient, +) from .resources.datasets.client import AsyncDatasetsClient, DatasetsClient from .resources.health.client import AsyncHealthClient, HealthClient from .resources.ingestion.client import AsyncIngestionClient, IngestionClient +from .resources.media.client import AsyncMediaClient, MediaClient from .resources.metrics.client import AsyncMetricsClient, MetricsClient from .resources.models.client import AsyncModelsClient, ModelsClient from .resources.observations.client import AsyncObservationsClient, ObservationsClient @@ -69,9 +74,11 @@ def __init__( password: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = None, timeout: typing.Optional[float] = None, follow_redirects: typing.Optional[bool] = True, - httpx_client: typing.Optional[httpx.Client] = None + httpx_client: typing.Optional[httpx.Client] = None, ): - _defaulted_timeout = timeout if timeout is not None else 60 if httpx_client is None else None + _defaulted_timeout = ( + timeout if timeout is not None else 60 if httpx_client is None else None + ) self._client_wrapper = SyncClientWrapper( base_url=base_url, x_langfuse_sdk_name=x_langfuse_sdk_name, @@ -81,16 +88,22 @@ def __init__( password=password, httpx_client=httpx_client if httpx_client is not None - else httpx.Client(timeout=_defaulted_timeout, follow_redirects=follow_redirects) + else httpx.Client( + timeout=_defaulted_timeout, follow_redirects=follow_redirects + ) if follow_redirects is not None else httpx.Client(timeout=_defaulted_timeout), timeout=_defaulted_timeout, ) + self.comments = CommentsClient(client_wrapper=self._client_wrapper) self.dataset_items = DatasetItemsClient(client_wrapper=self._client_wrapper) - self.dataset_run_items = DatasetRunItemsClient(client_wrapper=self._client_wrapper) + self.dataset_run_items = DatasetRunItemsClient( + client_wrapper=self._client_wrapper + ) self.datasets = DatasetsClient(client_wrapper=self._client_wrapper) self.health = HealthClient(client_wrapper=self._client_wrapper) self.ingestion = IngestionClient(client_wrapper=self._client_wrapper) + self.media = MediaClient(client_wrapper=self._client_wrapper) self.metrics = MetricsClient(client_wrapper=self._client_wrapper) self.models = ModelsClient(client_wrapper=self._client_wrapper) self.observations = ObservationsClient(client_wrapper=self._client_wrapper) @@ -150,9 +163,11 @@ def __init__( password: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = None, timeout: typing.Optional[float] = None, follow_redirects: typing.Optional[bool] = True, - httpx_client: typing.Optional[httpx.AsyncClient] = None + httpx_client: typing.Optional[httpx.AsyncClient] = None, ): - _defaulted_timeout = timeout if timeout is not None else 60 if httpx_client is None else None + _defaulted_timeout = ( + timeout if timeout is not None else 60 if httpx_client is None else None + ) self._client_wrapper = AsyncClientWrapper( base_url=base_url, x_langfuse_sdk_name=x_langfuse_sdk_name, @@ -162,22 +177,32 @@ def __init__( password=password, httpx_client=httpx_client if httpx_client is not None - else httpx.AsyncClient(timeout=_defaulted_timeout, follow_redirects=follow_redirects) + else httpx.AsyncClient( + timeout=_defaulted_timeout, follow_redirects=follow_redirects + ) if follow_redirects is not None else httpx.AsyncClient(timeout=_defaulted_timeout), timeout=_defaulted_timeout, ) - self.dataset_items = AsyncDatasetItemsClient(client_wrapper=self._client_wrapper) - self.dataset_run_items = AsyncDatasetRunItemsClient(client_wrapper=self._client_wrapper) + self.comments = AsyncCommentsClient(client_wrapper=self._client_wrapper) + self.dataset_items = AsyncDatasetItemsClient( + client_wrapper=self._client_wrapper + ) + self.dataset_run_items = AsyncDatasetRunItemsClient( + client_wrapper=self._client_wrapper + ) self.datasets = AsyncDatasetsClient(client_wrapper=self._client_wrapper) self.health = AsyncHealthClient(client_wrapper=self._client_wrapper) self.ingestion = AsyncIngestionClient(client_wrapper=self._client_wrapper) + self.media = AsyncMediaClient(client_wrapper=self._client_wrapper) self.metrics = AsyncMetricsClient(client_wrapper=self._client_wrapper) self.models = AsyncModelsClient(client_wrapper=self._client_wrapper) self.observations = AsyncObservationsClient(client_wrapper=self._client_wrapper) self.projects = AsyncProjectsClient(client_wrapper=self._client_wrapper) self.prompts = AsyncPromptsClient(client_wrapper=self._client_wrapper) - self.score_configs = AsyncScoreConfigsClient(client_wrapper=self._client_wrapper) + self.score_configs = AsyncScoreConfigsClient( + client_wrapper=self._client_wrapper + ) self.score = AsyncScoreClient(client_wrapper=self._client_wrapper) self.sessions = AsyncSessionsClient(client_wrapper=self._client_wrapper) self.trace = AsyncTraceClient(client_wrapper=self._client_wrapper) diff --git a/langfuse/api/reference.md b/langfuse/api/reference.md index 05a51edf5..068e8bce5 100644 --- a/langfuse/api/reference.md +++ b/langfuse/api/reference.md @@ -1,4 +1,273 @@ # Reference +## Comments +
client.comments.create(...) +
+
+ +#### πŸ“ Description + +
+
+ +
+
+ +Create a comment. Comments may be attached to different object types (trace, observation, session, prompt). +
+
+
+
+ +#### πŸ”Œ Usage + +
+
+ +
+
+ +```python +from finto import CreateCommentRequest +from finto.client import FernLangfuse + +client = FernLangfuse( + x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME", + x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION", + x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY", + username="YOUR_USERNAME", + password="YOUR_PASSWORD", + base_url="https://yourhost.com/path/to/api", +) +client.comments.create( + request=CreateCommentRequest( + project_id="string", + object_type="string", + object_id="string", + content="string", + author_user_id="string", + ), +) + +``` +
+
+
+
+ +#### βš™οΈ Parameters + +
+
+ +
+
+ +**request:** `CreateCommentRequest` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` β€” Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.comments.get(...) +
+
+ +#### πŸ“ Description + +
+
+ +
+
+ +Get all comments +
+
+
+
+ +#### πŸ”Œ Usage + +
+
+ +
+
+ +```python +from finto.client import FernLangfuse + +client = FernLangfuse( + x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME", + x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION", + x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY", + username="YOUR_USERNAME", + password="YOUR_PASSWORD", + base_url="https://yourhost.com/path/to/api", +) +client.comments.get( + page=1, + limit=1, + object_type="string", + object_id="string", + author_user_id="string", +) + +``` +
+
+
+
+ +#### βš™οΈ Parameters + +
+
+ +
+
+ +**page:** `typing.Optional[int]` β€” Page number, starts at 1. + +
+
+ +
+
+ +**limit:** `typing.Optional[int]` β€” Limit of items per page. If you encounter api issues due to too large page sizes, try to reduce the limit + +
+
+ +
+
+ +**object_type:** `typing.Optional[str]` β€” Filter comments by object type (trace, observation, session, prompt). + +
+
+ +
+
+ +**object_id:** `typing.Optional[str]` β€” Filter comments by object id. If objectType is not provided, an error will be thrown. + +
+
+ +
+
+ +**author_user_id:** `typing.Optional[str]` β€” Filter comments by author user id. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` β€” Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.comments.get_by_id(...) +
+
+ +#### πŸ“ Description + +
+
+ +
+
+ +Get a comment by id +
+
+
+
+ +#### πŸ”Œ Usage + +
+
+ +
+
+ +```python +from finto.client import FernLangfuse + +client = FernLangfuse( + x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME", + x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION", + x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY", + username="YOUR_USERNAME", + password="YOUR_PASSWORD", + base_url="https://yourhost.com/path/to/api", +) +client.comments.get_by_id( + comment_id="string", +) + +``` +
+
+
+
+ +#### βš™οΈ Parameters + +
+
+ +
+
+ +**comment_id:** `str` β€” The unique langfuse identifier of a comment + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` β€” Request-specific configuration. + +
+
+
+
+ + +
+
+
+ ## DatasetItems
client.dataset_items.create(...)
@@ -949,6 +1218,258 @@ client.ingestion.batch(
+ + +
+ +## Media +
client.media.get(...) +
+
+ +#### πŸ“ Description + +
+
+ +
+
+ +Get a media record +
+
+
+
+ +#### πŸ”Œ Usage + +
+
+ +
+
+ +```python +from finto.client import FernLangfuse + +client = FernLangfuse( + x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME", + x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION", + x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY", + username="YOUR_USERNAME", + password="YOUR_PASSWORD", + base_url="https://yourhost.com/path/to/api", +) +client.media.get( + media_id="string", +) + +``` +
+
+
+
+ +#### βš™οΈ Parameters + +
+
+ +
+
+ +**media_id:** `str` β€” The unique langfuse identifier of a media record + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` β€” Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.media.patch(...) +
+
+ +#### πŸ“ Description + +
+
+ +
+
+ +Patch a media record +
+
+
+
+ +#### πŸ”Œ Usage + +
+
+ +
+
+ +```python +import datetime + +from finto import PatchMediaBody +from finto.client import FernLangfuse + +client = FernLangfuse( + x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME", + x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION", + x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY", + username="YOUR_USERNAME", + password="YOUR_PASSWORD", + base_url="https://yourhost.com/path/to/api", +) +client.media.patch( + media_id="string", + request=PatchMediaBody( + uploaded_at=datetime.datetime.fromisoformat( + "2024-01-15 09:30:00+00:00", + ), + upload_http_status=1, + upload_http_error="string", + upload_time_ms=1, + ), +) + +``` +
+
+
+
+ +#### βš™οΈ Parameters + +
+
+ +
+
+ +**media_id:** `str` β€” The unique langfuse identifier of a media record + +
+
+ +
+
+ +**request:** `PatchMediaBody` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` β€” Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.media.get_upload_url(...) +
+
+ +#### πŸ“ Description + +
+
+ +
+
+ +Get a presigned upload URL for a media record +
+
+
+
+ +#### πŸ”Œ Usage + +
+
+ +
+
+ +```python +from finto import GetMediaUploadUrlRequest +from finto.client import FernLangfuse + +client = FernLangfuse( + x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME", + x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION", + x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY", + username="YOUR_USERNAME", + password="YOUR_PASSWORD", + base_url="https://yourhost.com/path/to/api", +) +client.media.get_upload_url( + request=GetMediaUploadUrlRequest( + trace_id="string", + observation_id="string", + content_length=1, + sha_256_hash="string", + field="string", + ), +) + +``` +
+
+
+
+ +#### βš™οΈ Parameters + +
+
+ +
+
+ +**request:** `GetMediaUploadUrlRequest` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` β€” Request-specific configuration. + +
+
+
+
+ +
@@ -2419,7 +2940,9 @@ client.score.get( value=1.1, score_ids="string", config_id="string", + queue_id="string", data_type=ScoreDataType.NUMERIC, + trace_tags=["string"], ) ``` @@ -2524,6 +3047,14 @@ client.score.get(
+**queue_id:** `typing.Optional[str]` β€” Retrieve only scores with a specific annotation queueId. + +
+
+ +
+
+ **data_type:** `typing.Optional[ScoreDataType]` β€” Retrieve only scores with a specific dataType.
@@ -2532,6 +3063,16 @@ client.score.get(
+**trace_tags:** `typing.Optional[ + typing.Union[typing.Sequence[str], typing.Sequence[typing.Sequence[str]]] +]` β€” Only scores linked to traces that include all of these tags will be returned. + +
+
+ +
+
+ **request_options:** `typing.Optional[RequestOptions]` β€” Request-specific configuration.
diff --git a/langfuse/api/resources/__init__.py b/langfuse/api/resources/__init__.py index 330d54aaa..4a6cc0319 100644 --- a/langfuse/api/resources/__init__.py +++ b/langfuse/api/resources/__init__.py @@ -1,12 +1,14 @@ # This file was auto-generated by Fern from our API Definition. from . import ( + comments, commons, dataset_items, dataset_run_items, datasets, health, ingestion, + media, metrics, models, observations, @@ -18,11 +20,14 @@ trace, utils, ) +from .comments import CreateCommentRequest, CreateCommentResponse, GetCommentsResponse from .commons import ( AccessDeniedError, BaseScore, BooleanScore, CategoricalScore, + Comment, + CommentObjectType, ConfigCategory, CreateScoreValue, Dataset, @@ -101,6 +106,13 @@ UpdateSpanBody, UpdateSpanEvent, ) +from .media import ( + GetMediaResponse, + GetMediaUploadUrlRequest, + GetMediaUploadUrlResponse, + MediaContentType, + PatchMediaBody, +) from .metrics import DailyMetrics, DailyMetricsDetails, UsageByModel from .models import CreateModelRequest, PaginatedModels from .observations import Observations, ObservationsViews @@ -121,7 +133,19 @@ Prompt_Text, TextPrompt, ) -from .score import CreateScoreRequest, CreateScoreResponse, Scores +from .score import ( + CreateScoreRequest, + CreateScoreResponse, + GetScoresResponse, + GetScoresResponseData, + GetScoresResponseDataBoolean, + GetScoresResponseDataCategorical, + GetScoresResponseDataNumeric, + GetScoresResponseData_Boolean, + GetScoresResponseData_Categorical, + GetScoresResponseData_Numeric, + GetScoresResponseTraceData, +) from .score_configs import CreateScoreConfigRequest, ScoreConfigs from .sessions import PaginatedSessions from .trace import Sort, Traces @@ -135,8 +159,12 @@ "CategoricalScore", "ChatMessage", "ChatPrompt", + "Comment", + "CommentObjectType", "ConfigCategory", "CreateChatPromptRequest", + "CreateCommentRequest", + "CreateCommentResponse", "CreateDatasetItemRequest", "CreateDatasetRequest", "CreateDatasetRunItemRequest", @@ -165,6 +193,19 @@ "DatasetRunWithItems", "DatasetStatus", "Error", + "GetCommentsResponse", + "GetMediaResponse", + "GetMediaUploadUrlRequest", + "GetMediaUploadUrlResponse", + "GetScoresResponse", + "GetScoresResponseData", + "GetScoresResponseDataBoolean", + "GetScoresResponseDataCategorical", + "GetScoresResponseDataNumeric", + "GetScoresResponseData_Boolean", + "GetScoresResponseData_Categorical", + "GetScoresResponseData_Numeric", + "GetScoresResponseTraceData", "HealthResponse", "IngestionError", "IngestionEvent", @@ -182,6 +223,7 @@ "IngestionSuccess", "IngestionUsage", "MapValue", + "MediaContentType", "MethodNotAllowedError", "Model", "ModelUsageUnit", @@ -201,6 +243,7 @@ "PaginatedDatasets", "PaginatedModels", "PaginatedSessions", + "PatchMediaBody", "Project", "Projects", "Prompt", @@ -218,7 +261,6 @@ "Score_Boolean", "Score_Categorical", "Score_Numeric", - "Scores", "SdkLogBody", "SdkLogEvent", "ServiceUnavailableError", @@ -241,12 +283,14 @@ "UpdateSpanEvent", "Usage", "UsageByModel", + "comments", "commons", "dataset_items", "dataset_run_items", "datasets", "health", "ingestion", + "media", "metrics", "models", "observations", diff --git a/langfuse/api/resources/comments/__init__.py b/langfuse/api/resources/comments/__init__.py new file mode 100644 index 000000000..e40c8546f --- /dev/null +++ b/langfuse/api/resources/comments/__init__.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +from .types import CreateCommentRequest, CreateCommentResponse, GetCommentsResponse + +__all__ = ["CreateCommentRequest", "CreateCommentResponse", "GetCommentsResponse"] diff --git a/langfuse/api/resources/comments/client.py b/langfuse/api/resources/comments/client.py new file mode 100644 index 000000000..5c17f1a7c --- /dev/null +++ b/langfuse/api/resources/comments/client.py @@ -0,0 +1,534 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from json.decoder import JSONDecodeError + +from ...core.api_error import ApiError +from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ...core.jsonable_encoder import jsonable_encoder +from ...core.pydantic_utilities import pydantic_v1 +from ...core.request_options import RequestOptions +from ..commons.errors.access_denied_error import AccessDeniedError +from ..commons.errors.error import Error +from ..commons.errors.method_not_allowed_error import MethodNotAllowedError +from ..commons.errors.not_found_error import NotFoundError +from ..commons.errors.unauthorized_error import UnauthorizedError +from ..commons.types.comment import Comment +from .types.create_comment_request import CreateCommentRequest +from .types.create_comment_response import CreateCommentResponse +from .types.get_comments_response import GetCommentsResponse + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class CommentsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def create( + self, + *, + request: CreateCommentRequest, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateCommentResponse: + """ + Create a comment. Comments may be attached to different object types (trace, observation, session, prompt). + + Parameters + ---------- + request : CreateCommentRequest + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateCommentResponse + + Examples + -------- + from finto import CreateCommentRequest + from finto.client import FernLangfuse + + client = FernLangfuse( + x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME", + x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION", + x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY", + username="YOUR_USERNAME", + password="YOUR_PASSWORD", + base_url="https://yourhost.com/path/to/api", + ) + client.comments.create( + request=CreateCommentRequest( + project_id="string", + object_type="string", + object_id="string", + content="string", + author_user_id="string", + ), + ) + """ + _response = self._client_wrapper.httpx_client.request( + "api/public/comments", + method="POST", + json=request, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return pydantic_v1.parse_obj_as(CreateCommentResponse, _response.json()) # type: ignore + if _response.status_code == 400: + raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + if _response.status_code == 401: + raise UnauthorizedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 403: + raise AccessDeniedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 405: + raise MethodNotAllowedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 404: + raise NotFoundError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get( + self, + *, + page: typing.Optional[int] = None, + limit: typing.Optional[int] = None, + object_type: typing.Optional[str] = None, + object_id: typing.Optional[str] = None, + author_user_id: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> GetCommentsResponse: + """ + Get all comments + + Parameters + ---------- + page : typing.Optional[int] + Page number, starts at 1. + + limit : typing.Optional[int] + Limit of items per page. If you encounter api issues due to too large page sizes, try to reduce the limit + + object_type : typing.Optional[str] + Filter comments by object type (trace, observation, session, prompt). + + object_id : typing.Optional[str] + Filter comments by object id. If objectType is not provided, an error will be thrown. + + author_user_id : typing.Optional[str] + Filter comments by author user id. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetCommentsResponse + + Examples + -------- + from finto.client import FernLangfuse + + client = FernLangfuse( + x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME", + x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION", + x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY", + username="YOUR_USERNAME", + password="YOUR_PASSWORD", + base_url="https://yourhost.com/path/to/api", + ) + client.comments.get( + page=1, + limit=1, + object_type="string", + object_id="string", + author_user_id="string", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "api/public/comments", + method="GET", + params={ + "page": page, + "limit": limit, + "objectType": object_type, + "objectId": object_id, + "authorUserId": author_user_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return pydantic_v1.parse_obj_as(GetCommentsResponse, _response.json()) # type: ignore + if _response.status_code == 400: + raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + if _response.status_code == 401: + raise UnauthorizedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 403: + raise AccessDeniedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 405: + raise MethodNotAllowedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 404: + raise NotFoundError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_by_id( + self, + comment_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> Comment: + """ + Get a comment by id + + Parameters + ---------- + comment_id : str + The unique langfuse identifier of a comment + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Comment + + Examples + -------- + from finto.client import FernLangfuse + + client = FernLangfuse( + x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME", + x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION", + x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY", + username="YOUR_USERNAME", + password="YOUR_PASSWORD", + base_url="https://yourhost.com/path/to/api", + ) + client.comments.get_by_id( + comment_id="string", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"api/public/comments/{jsonable_encoder(comment_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return pydantic_v1.parse_obj_as(Comment, _response.json()) # type: ignore + if _response.status_code == 400: + raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + if _response.status_code == 401: + raise UnauthorizedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 403: + raise AccessDeniedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 405: + raise MethodNotAllowedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 404: + raise NotFoundError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncCommentsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def create( + self, + *, + request: CreateCommentRequest, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateCommentResponse: + """ + Create a comment. Comments may be attached to different object types (trace, observation, session, prompt). + + Parameters + ---------- + request : CreateCommentRequest + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateCommentResponse + + Examples + -------- + import asyncio + + from finto import CreateCommentRequest + from finto.client import AsyncFernLangfuse + + client = AsyncFernLangfuse( + x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME", + x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION", + x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY", + username="YOUR_USERNAME", + password="YOUR_PASSWORD", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.comments.create( + request=CreateCommentRequest( + project_id="string", + object_type="string", + object_id="string", + content="string", + author_user_id="string", + ), + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "api/public/comments", + method="POST", + json=request, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return pydantic_v1.parse_obj_as(CreateCommentResponse, _response.json()) # type: ignore + if _response.status_code == 400: + raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + if _response.status_code == 401: + raise UnauthorizedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 403: + raise AccessDeniedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 405: + raise MethodNotAllowedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 404: + raise NotFoundError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get( + self, + *, + page: typing.Optional[int] = None, + limit: typing.Optional[int] = None, + object_type: typing.Optional[str] = None, + object_id: typing.Optional[str] = None, + author_user_id: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> GetCommentsResponse: + """ + Get all comments + + Parameters + ---------- + page : typing.Optional[int] + Page number, starts at 1. + + limit : typing.Optional[int] + Limit of items per page. If you encounter api issues due to too large page sizes, try to reduce the limit + + object_type : typing.Optional[str] + Filter comments by object type (trace, observation, session, prompt). + + object_id : typing.Optional[str] + Filter comments by object id. If objectType is not provided, an error will be thrown. + + author_user_id : typing.Optional[str] + Filter comments by author user id. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetCommentsResponse + + Examples + -------- + import asyncio + + from finto.client import AsyncFernLangfuse + + client = AsyncFernLangfuse( + x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME", + x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION", + x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY", + username="YOUR_USERNAME", + password="YOUR_PASSWORD", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.comments.get( + page=1, + limit=1, + object_type="string", + object_id="string", + author_user_id="string", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "api/public/comments", + method="GET", + params={ + "page": page, + "limit": limit, + "objectType": object_type, + "objectId": object_id, + "authorUserId": author_user_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return pydantic_v1.parse_obj_as(GetCommentsResponse, _response.json()) # type: ignore + if _response.status_code == 400: + raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + if _response.status_code == 401: + raise UnauthorizedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 403: + raise AccessDeniedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 405: + raise MethodNotAllowedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 404: + raise NotFoundError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_by_id( + self, + comment_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> Comment: + """ + Get a comment by id + + Parameters + ---------- + comment_id : str + The unique langfuse identifier of a comment + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Comment + + Examples + -------- + import asyncio + + from finto.client import AsyncFernLangfuse + + client = AsyncFernLangfuse( + x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME", + x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION", + x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY", + username="YOUR_USERNAME", + password="YOUR_PASSWORD", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.comments.get_by_id( + comment_id="string", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"api/public/comments/{jsonable_encoder(comment_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return pydantic_v1.parse_obj_as(Comment, _response.json()) # type: ignore + if _response.status_code == 400: + raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + if _response.status_code == 401: + raise UnauthorizedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 403: + raise AccessDeniedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 405: + raise MethodNotAllowedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 404: + raise NotFoundError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/langfuse/api/resources/comments/types/__init__.py b/langfuse/api/resources/comments/types/__init__.py new file mode 100644 index 000000000..13dc1d8d9 --- /dev/null +++ b/langfuse/api/resources/comments/types/__init__.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +from .create_comment_request import CreateCommentRequest +from .create_comment_response import CreateCommentResponse +from .get_comments_response import GetCommentsResponse + +__all__ = ["CreateCommentRequest", "CreateCommentResponse", "GetCommentsResponse"] diff --git a/langfuse/api/resources/comments/types/create_comment_request.py b/langfuse/api/resources/comments/types/create_comment_request.py new file mode 100644 index 000000000..98e25e2e1 --- /dev/null +++ b/langfuse/api/resources/comments/types/create_comment_request.py @@ -0,0 +1,69 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +from ....core.datetime_utils import serialize_datetime +from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1 + + +class CreateCommentRequest(pydantic_v1.BaseModel): + project_id: str = pydantic_v1.Field(alias="projectId") + """ + The id of the project to attach the comment to. + """ + + object_type: str = pydantic_v1.Field(alias="objectType") + """ + The type of the object to attach the comment to (trace, observation, session, prompt). + """ + + object_id: str = pydantic_v1.Field(alias="objectId") + """ + The id of the object to attach the comment to. If this does not reference a valid existing object, an error will be thrown. + """ + + content: str = pydantic_v1.Field() + """ + The content of the comment. May include markdown. Currently limited to 500 characters. + """ + + author_user_id: typing.Optional[str] = pydantic_v1.Field( + alias="authorUserId", default=None + ) + """ + The id of the user who created the comment. + """ + + def json(self, **kwargs: typing.Any) -> str: + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + return super().json(**kwargs_with_defaults) + + def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + kwargs_with_defaults_exclude_none: typing.Any = { + "by_alias": True, + "exclude_none": True, + **kwargs, + } + + return deep_union_pydantic_dicts( + super().dict(**kwargs_with_defaults_exclude_unset), + super().dict(**kwargs_with_defaults_exclude_none), + ) + + class Config: + frozen = True + smart_union = True + allow_population_by_field_name = True + populate_by_name = True + extra = pydantic_v1.Extra.allow + json_encoders = {dt.datetime: serialize_datetime} diff --git a/langfuse/api/resources/comments/types/create_comment_response.py b/langfuse/api/resources/comments/types/create_comment_response.py new file mode 100644 index 000000000..d7708f798 --- /dev/null +++ b/langfuse/api/resources/comments/types/create_comment_response.py @@ -0,0 +1,45 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +from ....core.datetime_utils import serialize_datetime +from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1 + + +class CreateCommentResponse(pydantic_v1.BaseModel): + id: str = pydantic_v1.Field() + """ + The id of the created object in Langfuse + """ + + def json(self, **kwargs: typing.Any) -> str: + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + return super().json(**kwargs_with_defaults) + + def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + kwargs_with_defaults_exclude_none: typing.Any = { + "by_alias": True, + "exclude_none": True, + **kwargs, + } + + return deep_union_pydantic_dicts( + super().dict(**kwargs_with_defaults_exclude_unset), + super().dict(**kwargs_with_defaults_exclude_none), + ) + + class Config: + frozen = True + smart_union = True + extra = pydantic_v1.Extra.allow + json_encoders = {dt.datetime: serialize_datetime} diff --git a/langfuse/api/resources/score/types/scores.py b/langfuse/api/resources/comments/types/get_comments_response.py similarity index 50% rename from langfuse/api/resources/score/types/scores.py rename to langfuse/api/resources/comments/types/get_comments_response.py index 998ae12e9..66a8b9527 100644 --- a/langfuse/api/resources/score/types/scores.py +++ b/langfuse/api/resources/comments/types/get_comments_response.py @@ -5,24 +5,37 @@ from ....core.datetime_utils import serialize_datetime from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1 -from ...commons.types.score import Score +from ...commons.types.comment import Comment from ...utils.resources.pagination.types.meta_response import MetaResponse -class Scores(pydantic_v1.BaseModel): - data: typing.List[Score] +class GetCommentsResponse(pydantic_v1.BaseModel): + data: typing.List[Comment] meta: MetaResponse def json(self, **kwargs: typing.Any) -> str: - kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs} + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } return super().json(**kwargs_with_defaults) def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: - kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs} - kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs} + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + kwargs_with_defaults_exclude_none: typing.Any = { + "by_alias": True, + "exclude_none": True, + **kwargs, + } return deep_union_pydantic_dicts( - super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none) + super().dict(**kwargs_with_defaults_exclude_unset), + super().dict(**kwargs_with_defaults_exclude_none), ) class Config: diff --git a/langfuse/api/resources/commons/__init__.py b/langfuse/api/resources/commons/__init__.py index d27b95745..e3e919f41 100644 --- a/langfuse/api/resources/commons/__init__.py +++ b/langfuse/api/resources/commons/__init__.py @@ -4,6 +4,8 @@ BaseScore, BooleanScore, CategoricalScore, + Comment, + CommentObjectType, ConfigCategory, CreateScoreValue, Dataset, @@ -33,13 +35,21 @@ TraceWithFullDetails, Usage, ) -from .errors import AccessDeniedError, Error, MethodNotAllowedError, NotFoundError, UnauthorizedError +from .errors import ( + AccessDeniedError, + Error, + MethodNotAllowedError, + NotFoundError, + UnauthorizedError, +) __all__ = [ "AccessDeniedError", "BaseScore", "BooleanScore", "CategoricalScore", + "Comment", + "CommentObjectType", "ConfigCategory", "CreateScoreValue", "Dataset", diff --git a/langfuse/api/resources/commons/types/__init__.py b/langfuse/api/resources/commons/types/__init__.py index b380e4863..fcec85214 100644 --- a/langfuse/api/resources/commons/types/__init__.py +++ b/langfuse/api/resources/commons/types/__init__.py @@ -3,6 +3,8 @@ from .base_score import BaseScore from .boolean_score import BooleanScore from .categorical_score import CategoricalScore +from .comment import Comment +from .comment_object_type import CommentObjectType from .config_category import ConfigCategory from .create_score_value import CreateScoreValue from .dataset import Dataset @@ -33,6 +35,8 @@ "BaseScore", "BooleanScore", "CategoricalScore", + "Comment", + "CommentObjectType", "ConfigCategory", "CreateScoreValue", "Dataset", diff --git a/langfuse/api/resources/commons/types/base_score.py b/langfuse/api/resources/commons/types/base_score.py index 0f2ce7018..71bed6ef4 100644 --- a/langfuse/api/resources/commons/types/base_score.py +++ b/langfuse/api/resources/commons/types/base_score.py @@ -13,27 +13,49 @@ class BaseScore(pydantic_v1.BaseModel): trace_id: str = pydantic_v1.Field(alias="traceId") name: str source: ScoreSource - observation_id: typing.Optional[str] = pydantic_v1.Field(alias="observationId", default=None) + observation_id: typing.Optional[str] = pydantic_v1.Field( + alias="observationId", default=None + ) timestamp: dt.datetime created_at: dt.datetime = pydantic_v1.Field(alias="createdAt") updated_at: dt.datetime = pydantic_v1.Field(alias="updatedAt") - author_user_id: typing.Optional[str] = pydantic_v1.Field(alias="authorUserId", default=None) + author_user_id: typing.Optional[str] = pydantic_v1.Field( + alias="authorUserId", default=None + ) comment: typing.Optional[str] = None config_id: typing.Optional[str] = pydantic_v1.Field(alias="configId", default=None) """ Reference a score config on a score. When set, config and score name must be equal and value must comply to optionally defined numerical range """ + queue_id: typing.Optional[str] = pydantic_v1.Field(alias="queueId", default=None) + """ + Reference an annotation queue on a score. Populated if the score was initially created in an annotation queue. + """ + def json(self, **kwargs: typing.Any) -> str: - kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs} + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } return super().json(**kwargs_with_defaults) def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: - kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs} - kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs} + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + kwargs_with_defaults_exclude_none: typing.Any = { + "by_alias": True, + "exclude_none": True, + **kwargs, + } return deep_union_pydantic_dicts( - super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none) + super().dict(**kwargs_with_defaults_exclude_unset), + super().dict(**kwargs_with_defaults_exclude_none), ) class Config: diff --git a/langfuse/api/resources/commons/types/comment.py b/langfuse/api/resources/commons/types/comment.py new file mode 100644 index 000000000..4d8b1916a --- /dev/null +++ b/langfuse/api/resources/commons/types/comment.py @@ -0,0 +1,54 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +from ....core.datetime_utils import serialize_datetime +from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1 +from .comment_object_type import CommentObjectType + + +class Comment(pydantic_v1.BaseModel): + id: str + project_id: str = pydantic_v1.Field(alias="projectId") + created_at: dt.datetime = pydantic_v1.Field(alias="createdAt") + updated_at: dt.datetime = pydantic_v1.Field(alias="updatedAt") + object_type: CommentObjectType = pydantic_v1.Field(alias="objectType") + object_id: str = pydantic_v1.Field(alias="objectId") + content: str + author_user_id: typing.Optional[str] = pydantic_v1.Field( + alias="authorUserId", default=None + ) + + def json(self, **kwargs: typing.Any) -> str: + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + return super().json(**kwargs_with_defaults) + + def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + kwargs_with_defaults_exclude_none: typing.Any = { + "by_alias": True, + "exclude_none": True, + **kwargs, + } + + return deep_union_pydantic_dicts( + super().dict(**kwargs_with_defaults_exclude_unset), + super().dict(**kwargs_with_defaults_exclude_none), + ) + + class Config: + frozen = True + smart_union = True + allow_population_by_field_name = True + populate_by_name = True + extra = pydantic_v1.Extra.allow + json_encoders = {dt.datetime: serialize_datetime} diff --git a/langfuse/api/resources/commons/types/comment_object_type.py b/langfuse/api/resources/commons/types/comment_object_type.py new file mode 100644 index 000000000..9c6c134c6 --- /dev/null +++ b/langfuse/api/resources/commons/types/comment_object_type.py @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +import enum +import typing + +T_Result = typing.TypeVar("T_Result") + + +class CommentObjectType(str, enum.Enum): + TRACE = "TRACE" + OBSERVATION = "OBSERVATION" + SESSION = "SESSION" + PROMPT = "PROMPT" + + def visit( + self, + trace: typing.Callable[[], T_Result], + observation: typing.Callable[[], T_Result], + session: typing.Callable[[], T_Result], + prompt: typing.Callable[[], T_Result], + ) -> T_Result: + if self is CommentObjectType.TRACE: + return trace() + if self is CommentObjectType.OBSERVATION: + return observation() + if self is CommentObjectType.SESSION: + return session() + if self is CommentObjectType.PROMPT: + return prompt() diff --git a/langfuse/api/resources/commons/types/model.py b/langfuse/api/resources/commons/types/model.py index 8e9449272..18b26c478 100644 --- a/langfuse/api/resources/commons/types/model.py +++ b/langfuse/api/resources/commons/types/model.py @@ -24,7 +24,7 @@ class Model(pydantic_v1.BaseModel): Regex pattern which matches this model definition to generation.model. Useful in case of fine-tuned models. If you want to exact match, use `(?i)^modelname$` """ - start_date: typing.Optional[dt.datetime] = pydantic_v1.Field( + start_date: typing.Optional[dt.date] = pydantic_v1.Field( alias="startDate", default=None ) """ diff --git a/langfuse/api/resources/commons/types/score.py b/langfuse/api/resources/commons/types/score.py index 946de3092..e39221084 100644 --- a/langfuse/api/resources/commons/types/score.py +++ b/langfuse/api/resources/commons/types/score.py @@ -16,25 +16,45 @@ class Score_Numeric(pydantic_v1.BaseModel): trace_id: str = pydantic_v1.Field(alias="traceId") name: str source: ScoreSource - observation_id: typing.Optional[str] = pydantic_v1.Field(alias="observationId", default=None) + observation_id: typing.Optional[str] = pydantic_v1.Field( + alias="observationId", default=None + ) timestamp: dt.datetime created_at: dt.datetime = pydantic_v1.Field(alias="createdAt") updated_at: dt.datetime = pydantic_v1.Field(alias="updatedAt") - author_user_id: typing.Optional[str] = pydantic_v1.Field(alias="authorUserId", default=None) + author_user_id: typing.Optional[str] = pydantic_v1.Field( + alias="authorUserId", default=None + ) comment: typing.Optional[str] = None config_id: typing.Optional[str] = pydantic_v1.Field(alias="configId", default=None) - data_type: typing.Literal["NUMERIC"] = pydantic_v1.Field(alias="dataType", default="NUMERIC") + queue_id: typing.Optional[str] = pydantic_v1.Field(alias="queueId", default=None) + data_type: typing.Literal["NUMERIC"] = pydantic_v1.Field( + alias="dataType", default="NUMERIC" + ) def json(self, **kwargs: typing.Any) -> str: - kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs} + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } return super().json(**kwargs_with_defaults) def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: - kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs} - kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs} + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + kwargs_with_defaults_exclude_none: typing.Any = { + "by_alias": True, + "exclude_none": True, + **kwargs, + } return deep_union_pydantic_dicts( - super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none) + super().dict(**kwargs_with_defaults_exclude_unset), + super().dict(**kwargs_with_defaults_exclude_none), ) class Config: @@ -53,25 +73,45 @@ class Score_Categorical(pydantic_v1.BaseModel): trace_id: str = pydantic_v1.Field(alias="traceId") name: str source: ScoreSource - observation_id: typing.Optional[str] = pydantic_v1.Field(alias="observationId", default=None) + observation_id: typing.Optional[str] = pydantic_v1.Field( + alias="observationId", default=None + ) timestamp: dt.datetime created_at: dt.datetime = pydantic_v1.Field(alias="createdAt") updated_at: dt.datetime = pydantic_v1.Field(alias="updatedAt") - author_user_id: typing.Optional[str] = pydantic_v1.Field(alias="authorUserId", default=None) + author_user_id: typing.Optional[str] = pydantic_v1.Field( + alias="authorUserId", default=None + ) comment: typing.Optional[str] = None config_id: typing.Optional[str] = pydantic_v1.Field(alias="configId", default=None) - data_type: typing.Literal["CATEGORICAL"] = pydantic_v1.Field(alias="dataType", default="CATEGORICAL") + queue_id: typing.Optional[str] = pydantic_v1.Field(alias="queueId", default=None) + data_type: typing.Literal["CATEGORICAL"] = pydantic_v1.Field( + alias="dataType", default="CATEGORICAL" + ) def json(self, **kwargs: typing.Any) -> str: - kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs} + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } return super().json(**kwargs_with_defaults) def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: - kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs} - kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs} + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + kwargs_with_defaults_exclude_none: typing.Any = { + "by_alias": True, + "exclude_none": True, + **kwargs, + } return deep_union_pydantic_dicts( - super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none) + super().dict(**kwargs_with_defaults_exclude_unset), + super().dict(**kwargs_with_defaults_exclude_none), ) class Config: @@ -90,25 +130,45 @@ class Score_Boolean(pydantic_v1.BaseModel): trace_id: str = pydantic_v1.Field(alias="traceId") name: str source: ScoreSource - observation_id: typing.Optional[str] = pydantic_v1.Field(alias="observationId", default=None) + observation_id: typing.Optional[str] = pydantic_v1.Field( + alias="observationId", default=None + ) timestamp: dt.datetime created_at: dt.datetime = pydantic_v1.Field(alias="createdAt") updated_at: dt.datetime = pydantic_v1.Field(alias="updatedAt") - author_user_id: typing.Optional[str] = pydantic_v1.Field(alias="authorUserId", default=None) + author_user_id: typing.Optional[str] = pydantic_v1.Field( + alias="authorUserId", default=None + ) comment: typing.Optional[str] = None config_id: typing.Optional[str] = pydantic_v1.Field(alias="configId", default=None) - data_type: typing.Literal["BOOLEAN"] = pydantic_v1.Field(alias="dataType", default="BOOLEAN") + queue_id: typing.Optional[str] = pydantic_v1.Field(alias="queueId", default=None) + data_type: typing.Literal["BOOLEAN"] = pydantic_v1.Field( + alias="dataType", default="BOOLEAN" + ) def json(self, **kwargs: typing.Any) -> str: - kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs} + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } return super().json(**kwargs_with_defaults) def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: - kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs} - kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs} + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + kwargs_with_defaults_exclude_none: typing.Any = { + "by_alias": True, + "exclude_none": True, + **kwargs, + } return deep_union_pydantic_dicts( - super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none) + super().dict(**kwargs_with_defaults_exclude_unset), + super().dict(**kwargs_with_defaults_exclude_none), ) class Config: diff --git a/langfuse/api/resources/media/__init__.py b/langfuse/api/resources/media/__init__.py new file mode 100644 index 000000000..f337d7a04 --- /dev/null +++ b/langfuse/api/resources/media/__init__.py @@ -0,0 +1,17 @@ +# This file was auto-generated by Fern from our API Definition. + +from .types import ( + GetMediaResponse, + GetMediaUploadUrlRequest, + GetMediaUploadUrlResponse, + MediaContentType, + PatchMediaBody, +) + +__all__ = [ + "GetMediaResponse", + "GetMediaUploadUrlRequest", + "GetMediaUploadUrlResponse", + "MediaContentType", + "PatchMediaBody", +] diff --git a/langfuse/api/resources/media/client.py b/langfuse/api/resources/media/client.py new file mode 100644 index 000000000..4541966e9 --- /dev/null +++ b/langfuse/api/resources/media/client.py @@ -0,0 +1,509 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from json.decoder import JSONDecodeError + +from ...core.api_error import ApiError +from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ...core.jsonable_encoder import jsonable_encoder +from ...core.pydantic_utilities import pydantic_v1 +from ...core.request_options import RequestOptions +from ..commons.errors.access_denied_error import AccessDeniedError +from ..commons.errors.error import Error +from ..commons.errors.method_not_allowed_error import MethodNotAllowedError +from ..commons.errors.not_found_error import NotFoundError +from ..commons.errors.unauthorized_error import UnauthorizedError +from .types.get_media_response import GetMediaResponse +from .types.get_media_upload_url_request import GetMediaUploadUrlRequest +from .types.get_media_upload_url_response import GetMediaUploadUrlResponse +from .types.patch_media_body import PatchMediaBody + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class MediaClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def get( + self, media_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> GetMediaResponse: + """ + Get a media record + + Parameters + ---------- + media_id : str + The unique langfuse identifier of a media record + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetMediaResponse + + Examples + -------- + from finto.client import FernLangfuse + + client = FernLangfuse( + x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME", + x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION", + x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY", + username="YOUR_USERNAME", + password="YOUR_PASSWORD", + base_url="https://yourhost.com/path/to/api", + ) + client.media.get( + media_id="string", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"api/public/media/{jsonable_encoder(media_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return pydantic_v1.parse_obj_as(GetMediaResponse, _response.json()) # type: ignore + if _response.status_code == 400: + raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + if _response.status_code == 401: + raise UnauthorizedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 403: + raise AccessDeniedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 405: + raise MethodNotAllowedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 404: + raise NotFoundError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def patch( + self, + media_id: str, + *, + request: PatchMediaBody, + request_options: typing.Optional[RequestOptions] = None, + ) -> None: + """ + Patch a media record + + Parameters + ---------- + media_id : str + The unique langfuse identifier of a media record + + request : PatchMediaBody + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + import datetime + + from finto import PatchMediaBody + from finto.client import FernLangfuse + + client = FernLangfuse( + x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME", + x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION", + x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY", + username="YOUR_USERNAME", + password="YOUR_PASSWORD", + base_url="https://yourhost.com/path/to/api", + ) + client.media.patch( + media_id="string", + request=PatchMediaBody( + uploaded_at=datetime.datetime.fromisoformat( + "2024-01-15 09:30:00+00:00", + ), + upload_http_status=1, + upload_http_error="string", + upload_time_ms=1, + ), + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"api/public/media/{jsonable_encoder(media_id)}", + method="PATCH", + json=request, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return + if _response.status_code == 400: + raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + if _response.status_code == 401: + raise UnauthorizedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 403: + raise AccessDeniedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 405: + raise MethodNotAllowedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 404: + raise NotFoundError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_upload_url( + self, + *, + request: GetMediaUploadUrlRequest, + request_options: typing.Optional[RequestOptions] = None, + ) -> GetMediaUploadUrlResponse: + """ + Get a presigned upload URL for a media record + + Parameters + ---------- + request : GetMediaUploadUrlRequest + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetMediaUploadUrlResponse + + Examples + -------- + from finto import GetMediaUploadUrlRequest + from finto.client import FernLangfuse + + client = FernLangfuse( + x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME", + x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION", + x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY", + username="YOUR_USERNAME", + password="YOUR_PASSWORD", + base_url="https://yourhost.com/path/to/api", + ) + client.media.get_upload_url( + request=GetMediaUploadUrlRequest( + trace_id="string", + observation_id="string", + content_length=1, + sha_256_hash="string", + field="string", + ), + ) + """ + _response = self._client_wrapper.httpx_client.request( + "api/public/media", + method="POST", + json=request, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return pydantic_v1.parse_obj_as( + GetMediaUploadUrlResponse, _response.json() + ) # type: ignore + if _response.status_code == 400: + raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + if _response.status_code == 401: + raise UnauthorizedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 403: + raise AccessDeniedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 405: + raise MethodNotAllowedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 404: + raise NotFoundError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncMediaClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def get( + self, media_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> GetMediaResponse: + """ + Get a media record + + Parameters + ---------- + media_id : str + The unique langfuse identifier of a media record + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetMediaResponse + + Examples + -------- + import asyncio + + from finto.client import AsyncFernLangfuse + + client = AsyncFernLangfuse( + x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME", + x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION", + x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY", + username="YOUR_USERNAME", + password="YOUR_PASSWORD", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.media.get( + media_id="string", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"api/public/media/{jsonable_encoder(media_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return pydantic_v1.parse_obj_as(GetMediaResponse, _response.json()) # type: ignore + if _response.status_code == 400: + raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + if _response.status_code == 401: + raise UnauthorizedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 403: + raise AccessDeniedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 405: + raise MethodNotAllowedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 404: + raise NotFoundError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def patch( + self, + media_id: str, + *, + request: PatchMediaBody, + request_options: typing.Optional[RequestOptions] = None, + ) -> None: + """ + Patch a media record + + Parameters + ---------- + media_id : str + The unique langfuse identifier of a media record + + request : PatchMediaBody + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + import asyncio + import datetime + + from finto import PatchMediaBody + from finto.client import AsyncFernLangfuse + + client = AsyncFernLangfuse( + x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME", + x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION", + x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY", + username="YOUR_USERNAME", + password="YOUR_PASSWORD", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.media.patch( + media_id="string", + request=PatchMediaBody( + uploaded_at=datetime.datetime.fromisoformat( + "2024-01-15 09:30:00+00:00", + ), + upload_http_status=1, + upload_http_error="string", + upload_time_ms=1, + ), + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"api/public/media/{jsonable_encoder(media_id)}", + method="PATCH", + json=request, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return + if _response.status_code == 400: + raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + if _response.status_code == 401: + raise UnauthorizedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 403: + raise AccessDeniedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 405: + raise MethodNotAllowedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 404: + raise NotFoundError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_upload_url( + self, + *, + request: GetMediaUploadUrlRequest, + request_options: typing.Optional[RequestOptions] = None, + ) -> GetMediaUploadUrlResponse: + """ + Get a presigned upload URL for a media record + + Parameters + ---------- + request : GetMediaUploadUrlRequest + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetMediaUploadUrlResponse + + Examples + -------- + import asyncio + + from finto import GetMediaUploadUrlRequest + from finto.client import AsyncFernLangfuse + + client = AsyncFernLangfuse( + x_langfuse_sdk_name="YOUR_X_LANGFUSE_SDK_NAME", + x_langfuse_sdk_version="YOUR_X_LANGFUSE_SDK_VERSION", + x_langfuse_public_key="YOUR_X_LANGFUSE_PUBLIC_KEY", + username="YOUR_USERNAME", + password="YOUR_PASSWORD", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.media.get_upload_url( + request=GetMediaUploadUrlRequest( + trace_id="string", + observation_id="string", + content_length=1, + sha_256_hash="string", + field="string", + ), + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "api/public/media", + method="POST", + json=request, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return pydantic_v1.parse_obj_as( + GetMediaUploadUrlResponse, _response.json() + ) # type: ignore + if _response.status_code == 400: + raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + if _response.status_code == 401: + raise UnauthorizedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 403: + raise AccessDeniedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 405: + raise MethodNotAllowedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + if _response.status_code == 404: + raise NotFoundError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/langfuse/api/resources/media/types/__init__.py b/langfuse/api/resources/media/types/__init__.py new file mode 100644 index 000000000..20af676d8 --- /dev/null +++ b/langfuse/api/resources/media/types/__init__.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +from .get_media_response import GetMediaResponse +from .get_media_upload_url_request import GetMediaUploadUrlRequest +from .get_media_upload_url_response import GetMediaUploadUrlResponse +from .media_content_type import MediaContentType +from .patch_media_body import PatchMediaBody + +__all__ = [ + "GetMediaResponse", + "GetMediaUploadUrlRequest", + "GetMediaUploadUrlResponse", + "MediaContentType", + "PatchMediaBody", +] diff --git a/langfuse/api/resources/media/types/get_media_response.py b/langfuse/api/resources/media/types/get_media_response.py new file mode 100644 index 000000000..fa5368872 --- /dev/null +++ b/langfuse/api/resources/media/types/get_media_response.py @@ -0,0 +1,72 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +from ....core.datetime_utils import serialize_datetime +from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1 + + +class GetMediaResponse(pydantic_v1.BaseModel): + media_id: str = pydantic_v1.Field(alias="mediaId") + """ + The unique langfuse identifier of a media record + """ + + content_type: str = pydantic_v1.Field(alias="contentType") + """ + The MIME type of the media record + """ + + content_length: int = pydantic_v1.Field(alias="contentLength") + """ + The size of the media record in bytes + """ + + uploaded_at: dt.datetime = pydantic_v1.Field(alias="uploadedAt") + """ + The date and time when the media record was uploaded + """ + + url: str = pydantic_v1.Field() + """ + The download URL of the media record + """ + + url_expiry: str = pydantic_v1.Field(alias="urlExpiry") + """ + The expiry date and time of the media record download URL + """ + + def json(self, **kwargs: typing.Any) -> str: + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + return super().json(**kwargs_with_defaults) + + def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + kwargs_with_defaults_exclude_none: typing.Any = { + "by_alias": True, + "exclude_none": True, + **kwargs, + } + + return deep_union_pydantic_dicts( + super().dict(**kwargs_with_defaults_exclude_unset), + super().dict(**kwargs_with_defaults_exclude_none), + ) + + class Config: + frozen = True + smart_union = True + allow_population_by_field_name = True + populate_by_name = True + extra = pydantic_v1.Extra.allow + json_encoders = {dt.datetime: serialize_datetime} diff --git a/langfuse/api/resources/media/types/get_media_upload_url_request.py b/langfuse/api/resources/media/types/get_media_upload_url_request.py new file mode 100644 index 000000000..d0cde59fe --- /dev/null +++ b/langfuse/api/resources/media/types/get_media_upload_url_request.py @@ -0,0 +1,71 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +from ....core.datetime_utils import serialize_datetime +from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1 +from .media_content_type import MediaContentType + + +class GetMediaUploadUrlRequest(pydantic_v1.BaseModel): + trace_id: str = pydantic_v1.Field(alias="traceId") + """ + The trace ID associated with the media record + """ + + observation_id: typing.Optional[str] = pydantic_v1.Field( + alias="observationId", default=None + ) + """ + The observation ID associated with the media record. If the media record is associated directly with a trace, this will be null. + """ + + content_type: MediaContentType = pydantic_v1.Field(alias="contentType") + content_length: int = pydantic_v1.Field(alias="contentLength") + """ + The size of the media record in bytes + """ + + sha_256_hash: str = pydantic_v1.Field(alias="sha256Hash") + """ + The SHA-256 hash of the media record + """ + + field: str = pydantic_v1.Field() + """ + The trace / observation field the media record is associated with. This can be one of `input`, `output`, `metadata` + """ + + def json(self, **kwargs: typing.Any) -> str: + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + return super().json(**kwargs_with_defaults) + + def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + kwargs_with_defaults_exclude_none: typing.Any = { + "by_alias": True, + "exclude_none": True, + **kwargs, + } + + return deep_union_pydantic_dicts( + super().dict(**kwargs_with_defaults_exclude_unset), + super().dict(**kwargs_with_defaults_exclude_none), + ) + + class Config: + frozen = True + smart_union = True + allow_population_by_field_name = True + populate_by_name = True + extra = pydantic_v1.Extra.allow + json_encoders = {dt.datetime: serialize_datetime} diff --git a/langfuse/api/resources/media/types/get_media_upload_url_response.py b/langfuse/api/resources/media/types/get_media_upload_url_response.py new file mode 100644 index 000000000..fadc76c01 --- /dev/null +++ b/langfuse/api/resources/media/types/get_media_upload_url_response.py @@ -0,0 +1,54 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +from ....core.datetime_utils import serialize_datetime +from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1 + + +class GetMediaUploadUrlResponse(pydantic_v1.BaseModel): + upload_url: typing.Optional[str] = pydantic_v1.Field( + alias="uploadUrl", default=None + ) + """ + The presigned upload URL. If the asset is already uploaded, this will be null + """ + + media_id: str = pydantic_v1.Field(alias="mediaId") + """ + The unique langfuse identifier of a media record + """ + + def json(self, **kwargs: typing.Any) -> str: + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + return super().json(**kwargs_with_defaults) + + def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + kwargs_with_defaults_exclude_none: typing.Any = { + "by_alias": True, + "exclude_none": True, + **kwargs, + } + + return deep_union_pydantic_dicts( + super().dict(**kwargs_with_defaults_exclude_unset), + super().dict(**kwargs_with_defaults_exclude_none), + ) + + class Config: + frozen = True + smart_union = True + allow_population_by_field_name = True + populate_by_name = True + extra = pydantic_v1.Extra.allow + json_encoders = {dt.datetime: serialize_datetime} diff --git a/langfuse/api/resources/media/types/media_content_type.py b/langfuse/api/resources/media/types/media_content_type.py new file mode 100644 index 000000000..bf9368fb3 --- /dev/null +++ b/langfuse/api/resources/media/types/media_content_type.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +MediaContentType = typing.Literal[ + "image/png", + "image/jpeg", + "image/jpg", + "image/webp", + "audio/mpeg", + "audio/mp3", + "audio/wav", + "text/plain", + "application/pdf", +] diff --git a/langfuse/api/resources/media/types/patch_media_body.py b/langfuse/api/resources/media/types/patch_media_body.py new file mode 100644 index 000000000..49f0c3432 --- /dev/null +++ b/langfuse/api/resources/media/types/patch_media_body.py @@ -0,0 +1,66 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +from ....core.datetime_utils import serialize_datetime +from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1 + + +class PatchMediaBody(pydantic_v1.BaseModel): + uploaded_at: dt.datetime = pydantic_v1.Field(alias="uploadedAt") + """ + The date and time when the media record was uploaded + """ + + upload_http_status: int = pydantic_v1.Field(alias="uploadHttpStatus") + """ + The HTTP status code of the upload + """ + + upload_http_error: typing.Optional[str] = pydantic_v1.Field( + alias="uploadHttpError", default=None + ) + """ + The HTTP error message of the upload + """ + + upload_time_ms: typing.Optional[int] = pydantic_v1.Field( + alias="uploadTimeMs", default=None + ) + """ + The time in milliseconds it took to upload the media record + """ + + def json(self, **kwargs: typing.Any) -> str: + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + return super().json(**kwargs_with_defaults) + + def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + kwargs_with_defaults_exclude_none: typing.Any = { + "by_alias": True, + "exclude_none": True, + **kwargs, + } + + return deep_union_pydantic_dicts( + super().dict(**kwargs_with_defaults_exclude_unset), + super().dict(**kwargs_with_defaults_exclude_none), + ) + + class Config: + frozen = True + smart_union = True + allow_population_by_field_name = True + populate_by_name = True + extra = pydantic_v1.Extra.allow + json_encoders = {dt.datetime: serialize_datetime} diff --git a/langfuse/api/resources/score/__init__.py b/langfuse/api/resources/score/__init__.py index 8a9975510..97fd51ffa 100644 --- a/langfuse/api/resources/score/__init__.py +++ b/langfuse/api/resources/score/__init__.py @@ -1,5 +1,29 @@ # This file was auto-generated by Fern from our API Definition. -from .types import CreateScoreRequest, CreateScoreResponse, Scores +from .types import ( + CreateScoreRequest, + CreateScoreResponse, + GetScoresResponse, + GetScoresResponseData, + GetScoresResponseDataBoolean, + GetScoresResponseDataCategorical, + GetScoresResponseDataNumeric, + GetScoresResponseData_Boolean, + GetScoresResponseData_Categorical, + GetScoresResponseData_Numeric, + GetScoresResponseTraceData, +) -__all__ = ["CreateScoreRequest", "CreateScoreResponse", "Scores"] +__all__ = [ + "CreateScoreRequest", + "CreateScoreResponse", + "GetScoresResponse", + "GetScoresResponseData", + "GetScoresResponseDataBoolean", + "GetScoresResponseDataCategorical", + "GetScoresResponseDataNumeric", + "GetScoresResponseData_Boolean", + "GetScoresResponseData_Categorical", + "GetScoresResponseData_Numeric", + "GetScoresResponseTraceData", +] diff --git a/langfuse/api/resources/score/client.py b/langfuse/api/resources/score/client.py index 29054bf0d..4408ae195 100644 --- a/langfuse/api/resources/score/client.py +++ b/langfuse/api/resources/score/client.py @@ -20,7 +20,7 @@ from ..commons.types.score_source import ScoreSource from .types.create_score_request import CreateScoreRequest from .types.create_score_response import CreateScoreResponse -from .types.scores import Scores +from .types.get_scores_response import GetScoresResponse # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -31,7 +31,10 @@ def __init__(self, *, client_wrapper: SyncClientWrapper): self._client_wrapper = client_wrapper def create( - self, *, request: CreateScoreRequest, request_options: typing.Optional[RequestOptions] = None + self, + *, + request: CreateScoreRequest, + request_options: typing.Optional[RequestOptions] = None, ) -> CreateScoreResponse: """ Create a score @@ -69,7 +72,11 @@ def create( ) """ _response = self._client_wrapper.httpx_client.request( - "api/public/scores", method="POST", json=request, request_options=request_options, omit=OMIT + "api/public/scores", + method="POST", + json=request, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: @@ -77,13 +84,21 @@ def create( if _response.status_code == 400: raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore if _response.status_code == 401: - raise UnauthorizedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise UnauthorizedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore if _response.status_code == 403: - raise AccessDeniedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise AccessDeniedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore if _response.status_code == 405: - raise MethodNotAllowedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise MethodNotAllowedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore if _response.status_code == 404: - raise NotFoundError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise NotFoundError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -103,9 +118,13 @@ def get( value: typing.Optional[float] = None, score_ids: typing.Optional[str] = None, config_id: typing.Optional[str] = None, + queue_id: typing.Optional[str] = None, data_type: typing.Optional[ScoreDataType] = None, + trace_tags: typing.Optional[ + typing.Union[typing.Sequence[str], typing.Sequence[typing.Sequence[str]]] + ] = None, request_options: typing.Optional[RequestOptions] = None, - ) -> Scores: + ) -> GetScoresResponse: """ Get a list of scores @@ -144,15 +163,21 @@ def get( config_id : typing.Optional[str] Retrieve only scores with a specific configId. + queue_id : typing.Optional[str] + Retrieve only scores with a specific annotation queueId. + data_type : typing.Optional[ScoreDataType] Retrieve only scores with a specific dataType. + trace_tags : typing.Optional[typing.Union[typing.Sequence[str], typing.Sequence[typing.Sequence[str]]]] + Only scores linked to traces that include all of these tags will be returned. + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - Scores + GetScoresResponse Examples -------- @@ -185,7 +210,9 @@ def get( value=1.1, score_ids="string", config_id="string", + queue_id="string", data_type=ScoreDataType.NUMERIC, + trace_tags=["string"], ) """ _response = self._client_wrapper.httpx_client.request( @@ -196,36 +223,52 @@ def get( "limit": limit, "userId": user_id, "name": name, - "fromTimestamp": serialize_datetime(from_timestamp) if from_timestamp is not None else None, - "toTimestamp": serialize_datetime(to_timestamp) if to_timestamp is not None else None, + "fromTimestamp": serialize_datetime(from_timestamp) + if from_timestamp is not None + else None, + "toTimestamp": serialize_datetime(to_timestamp) + if to_timestamp is not None + else None, "source": source, "operator": operator, "value": value, "scoreIds": score_ids, "configId": config_id, + "queueId": queue_id, "dataType": data_type, + "traceTags": jsonable_encoder(trace_tags), }, request_options=request_options, ) try: if 200 <= _response.status_code < 300: - return pydantic_v1.parse_obj_as(Scores, _response.json()) # type: ignore + return pydantic_v1.parse_obj_as(GetScoresResponse, _response.json()) # type: ignore if _response.status_code == 400: raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore if _response.status_code == 401: - raise UnauthorizedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise UnauthorizedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore if _response.status_code == 403: - raise AccessDeniedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise AccessDeniedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore if _response.status_code == 405: - raise MethodNotAllowedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise MethodNotAllowedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore if _response.status_code == 404: - raise NotFoundError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise NotFoundError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def get_by_id(self, score_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> Score: + def get_by_id( + self, score_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> Score: """ Get a score @@ -258,7 +301,9 @@ def get_by_id(self, score_id: str, *, request_options: typing.Optional[RequestOp ) """ _response = self._client_wrapper.httpx_client.request( - f"api/public/scores/{jsonable_encoder(score_id)}", method="GET", request_options=request_options + f"api/public/scores/{jsonable_encoder(score_id)}", + method="GET", + request_options=request_options, ) try: if 200 <= _response.status_code < 300: @@ -266,19 +311,29 @@ def get_by_id(self, score_id: str, *, request_options: typing.Optional[RequestOp if _response.status_code == 400: raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore if _response.status_code == 401: - raise UnauthorizedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise UnauthorizedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore if _response.status_code == 403: - raise AccessDeniedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise AccessDeniedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore if _response.status_code == 405: - raise MethodNotAllowedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise MethodNotAllowedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore if _response.status_code == 404: - raise NotFoundError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise NotFoundError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def delete(self, score_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + def delete( + self, score_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: """ Delete a score @@ -311,7 +366,9 @@ def delete(self, score_id: str, *, request_options: typing.Optional[RequestOptio ) """ _response = self._client_wrapper.httpx_client.request( - f"api/public/scores/{jsonable_encoder(score_id)}", method="DELETE", request_options=request_options + f"api/public/scores/{jsonable_encoder(score_id)}", + method="DELETE", + request_options=request_options, ) try: if 200 <= _response.status_code < 300: @@ -319,13 +376,21 @@ def delete(self, score_id: str, *, request_options: typing.Optional[RequestOptio if _response.status_code == 400: raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore if _response.status_code == 401: - raise UnauthorizedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise UnauthorizedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore if _response.status_code == 403: - raise AccessDeniedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise AccessDeniedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore if _response.status_code == 405: - raise MethodNotAllowedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise MethodNotAllowedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore if _response.status_code == 404: - raise NotFoundError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise NotFoundError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -337,7 +402,10 @@ def __init__(self, *, client_wrapper: AsyncClientWrapper): self._client_wrapper = client_wrapper async def create( - self, *, request: CreateScoreRequest, request_options: typing.Optional[RequestOptions] = None + self, + *, + request: CreateScoreRequest, + request_options: typing.Optional[RequestOptions] = None, ) -> CreateScoreResponse: """ Create a score @@ -383,7 +451,11 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "api/public/scores", method="POST", json=request, request_options=request_options, omit=OMIT + "api/public/scores", + method="POST", + json=request, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: @@ -391,13 +463,21 @@ async def main() -> None: if _response.status_code == 400: raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore if _response.status_code == 401: - raise UnauthorizedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise UnauthorizedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore if _response.status_code == 403: - raise AccessDeniedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise AccessDeniedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore if _response.status_code == 405: - raise MethodNotAllowedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise MethodNotAllowedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore if _response.status_code == 404: - raise NotFoundError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise NotFoundError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -417,9 +497,13 @@ async def get( value: typing.Optional[float] = None, score_ids: typing.Optional[str] = None, config_id: typing.Optional[str] = None, + queue_id: typing.Optional[str] = None, data_type: typing.Optional[ScoreDataType] = None, + trace_tags: typing.Optional[ + typing.Union[typing.Sequence[str], typing.Sequence[typing.Sequence[str]]] + ] = None, request_options: typing.Optional[RequestOptions] = None, - ) -> Scores: + ) -> GetScoresResponse: """ Get a list of scores @@ -458,15 +542,21 @@ async def get( config_id : typing.Optional[str] Retrieve only scores with a specific configId. + queue_id : typing.Optional[str] + Retrieve only scores with a specific annotation queueId. + data_type : typing.Optional[ScoreDataType] Retrieve only scores with a specific dataType. + trace_tags : typing.Optional[typing.Union[typing.Sequence[str], typing.Sequence[typing.Sequence[str]]]] + Only scores linked to traces that include all of these tags will be returned. + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - Scores + GetScoresResponse Examples -------- @@ -503,7 +593,9 @@ async def main() -> None: value=1.1, score_ids="string", config_id="string", + queue_id="string", data_type=ScoreDataType.NUMERIC, + trace_tags=["string"], ) @@ -517,36 +609,52 @@ async def main() -> None: "limit": limit, "userId": user_id, "name": name, - "fromTimestamp": serialize_datetime(from_timestamp) if from_timestamp is not None else None, - "toTimestamp": serialize_datetime(to_timestamp) if to_timestamp is not None else None, + "fromTimestamp": serialize_datetime(from_timestamp) + if from_timestamp is not None + else None, + "toTimestamp": serialize_datetime(to_timestamp) + if to_timestamp is not None + else None, "source": source, "operator": operator, "value": value, "scoreIds": score_ids, "configId": config_id, + "queueId": queue_id, "dataType": data_type, + "traceTags": jsonable_encoder(trace_tags), }, request_options=request_options, ) try: if 200 <= _response.status_code < 300: - return pydantic_v1.parse_obj_as(Scores, _response.json()) # type: ignore + return pydantic_v1.parse_obj_as(GetScoresResponse, _response.json()) # type: ignore if _response.status_code == 400: raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore if _response.status_code == 401: - raise UnauthorizedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise UnauthorizedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore if _response.status_code == 403: - raise AccessDeniedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise AccessDeniedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore if _response.status_code == 405: - raise MethodNotAllowedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise MethodNotAllowedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore if _response.status_code == 404: - raise NotFoundError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise NotFoundError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def get_by_id(self, score_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> Score: + async def get_by_id( + self, score_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> Score: """ Get a score @@ -587,7 +695,9 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"api/public/scores/{jsonable_encoder(score_id)}", method="GET", request_options=request_options + f"api/public/scores/{jsonable_encoder(score_id)}", + method="GET", + request_options=request_options, ) try: if 200 <= _response.status_code < 300: @@ -595,19 +705,29 @@ async def main() -> None: if _response.status_code == 400: raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore if _response.status_code == 401: - raise UnauthorizedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise UnauthorizedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore if _response.status_code == 403: - raise AccessDeniedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise AccessDeniedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore if _response.status_code == 405: - raise MethodNotAllowedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise MethodNotAllowedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore if _response.status_code == 404: - raise NotFoundError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise NotFoundError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def delete(self, score_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + async def delete( + self, score_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: """ Delete a score @@ -648,7 +768,9 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"api/public/scores/{jsonable_encoder(score_id)}", method="DELETE", request_options=request_options + f"api/public/scores/{jsonable_encoder(score_id)}", + method="DELETE", + request_options=request_options, ) try: if 200 <= _response.status_code < 300: @@ -656,13 +778,21 @@ async def main() -> None: if _response.status_code == 400: raise Error(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore if _response.status_code == 401: - raise UnauthorizedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise UnauthorizedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore if _response.status_code == 403: - raise AccessDeniedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise AccessDeniedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore if _response.status_code == 405: - raise MethodNotAllowedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise MethodNotAllowedError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore if _response.status_code == 404: - raise NotFoundError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore + raise NotFoundError( + pydantic_v1.parse_obj_as(typing.Any, _response.json()) + ) # type: ignore _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) diff --git a/langfuse/api/resources/score/types/__init__.py b/langfuse/api/resources/score/types/__init__.py index 2917472ee..b627bad8f 100644 --- a/langfuse/api/resources/score/types/__init__.py +++ b/langfuse/api/resources/score/types/__init__.py @@ -2,6 +2,28 @@ from .create_score_request import CreateScoreRequest from .create_score_response import CreateScoreResponse -from .scores import Scores +from .get_scores_response import GetScoresResponse +from .get_scores_response_data import ( + GetScoresResponseData, + GetScoresResponseData_Boolean, + GetScoresResponseData_Categorical, + GetScoresResponseData_Numeric, +) +from .get_scores_response_data_boolean import GetScoresResponseDataBoolean +from .get_scores_response_data_categorical import GetScoresResponseDataCategorical +from .get_scores_response_data_numeric import GetScoresResponseDataNumeric +from .get_scores_response_trace_data import GetScoresResponseTraceData -__all__ = ["CreateScoreRequest", "CreateScoreResponse", "Scores"] +__all__ = [ + "CreateScoreRequest", + "CreateScoreResponse", + "GetScoresResponse", + "GetScoresResponseData", + "GetScoresResponseDataBoolean", + "GetScoresResponseDataCategorical", + "GetScoresResponseDataNumeric", + "GetScoresResponseData_Boolean", + "GetScoresResponseData_Categorical", + "GetScoresResponseData_Numeric", + "GetScoresResponseTraceData", +] diff --git a/langfuse/api/resources/score/types/get_scores_response.py b/langfuse/api/resources/score/types/get_scores_response.py new file mode 100644 index 000000000..777bb799b --- /dev/null +++ b/langfuse/api/resources/score/types/get_scores_response.py @@ -0,0 +1,45 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +from ....core.datetime_utils import serialize_datetime +from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1 +from ...utils.resources.pagination.types.meta_response import MetaResponse +from .get_scores_response_data import GetScoresResponseData + + +class GetScoresResponse(pydantic_v1.BaseModel): + data: typing.List[GetScoresResponseData] + meta: MetaResponse + + def json(self, **kwargs: typing.Any) -> str: + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + return super().json(**kwargs_with_defaults) + + def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + kwargs_with_defaults_exclude_none: typing.Any = { + "by_alias": True, + "exclude_none": True, + **kwargs, + } + + return deep_union_pydantic_dicts( + super().dict(**kwargs_with_defaults_exclude_unset), + super().dict(**kwargs_with_defaults_exclude_none), + ) + + class Config: + frozen = True + smart_union = True + extra = pydantic_v1.Extra.allow + json_encoders = {dt.datetime: serialize_datetime} diff --git a/langfuse/api/resources/score/types/get_scores_response_data.py b/langfuse/api/resources/score/types/get_scores_response_data.py new file mode 100644 index 000000000..e1b317975 --- /dev/null +++ b/langfuse/api/resources/score/types/get_scores_response_data.py @@ -0,0 +1,191 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +from ....core.datetime_utils import serialize_datetime +from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1 +from ...commons.types.score_source import ScoreSource +from .get_scores_response_trace_data import GetScoresResponseTraceData + + +class GetScoresResponseData_Numeric(pydantic_v1.BaseModel): + trace: GetScoresResponseTraceData + value: float + id: str + trace_id: str = pydantic_v1.Field(alias="traceId") + name: str + source: ScoreSource + observation_id: typing.Optional[str] = pydantic_v1.Field( + alias="observationId", default=None + ) + timestamp: dt.datetime + created_at: dt.datetime = pydantic_v1.Field(alias="createdAt") + updated_at: dt.datetime = pydantic_v1.Field(alias="updatedAt") + author_user_id: typing.Optional[str] = pydantic_v1.Field( + alias="authorUserId", default=None + ) + comment: typing.Optional[str] = None + config_id: typing.Optional[str] = pydantic_v1.Field(alias="configId", default=None) + queue_id: typing.Optional[str] = pydantic_v1.Field(alias="queueId", default=None) + data_type: typing.Literal["NUMERIC"] = pydantic_v1.Field( + alias="dataType", default="NUMERIC" + ) + + def json(self, **kwargs: typing.Any) -> str: + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + return super().json(**kwargs_with_defaults) + + def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + kwargs_with_defaults_exclude_none: typing.Any = { + "by_alias": True, + "exclude_none": True, + **kwargs, + } + + return deep_union_pydantic_dicts( + super().dict(**kwargs_with_defaults_exclude_unset), + super().dict(**kwargs_with_defaults_exclude_none), + ) + + class Config: + frozen = True + smart_union = True + allow_population_by_field_name = True + populate_by_name = True + extra = pydantic_v1.Extra.allow + json_encoders = {dt.datetime: serialize_datetime} + + +class GetScoresResponseData_Categorical(pydantic_v1.BaseModel): + trace: GetScoresResponseTraceData + value: typing.Optional[float] = None + string_value: str = pydantic_v1.Field(alias="stringValue") + id: str + trace_id: str = pydantic_v1.Field(alias="traceId") + name: str + source: ScoreSource + observation_id: typing.Optional[str] = pydantic_v1.Field( + alias="observationId", default=None + ) + timestamp: dt.datetime + created_at: dt.datetime = pydantic_v1.Field(alias="createdAt") + updated_at: dt.datetime = pydantic_v1.Field(alias="updatedAt") + author_user_id: typing.Optional[str] = pydantic_v1.Field( + alias="authorUserId", default=None + ) + comment: typing.Optional[str] = None + config_id: typing.Optional[str] = pydantic_v1.Field(alias="configId", default=None) + queue_id: typing.Optional[str] = pydantic_v1.Field(alias="queueId", default=None) + data_type: typing.Literal["CATEGORICAL"] = pydantic_v1.Field( + alias="dataType", default="CATEGORICAL" + ) + + def json(self, **kwargs: typing.Any) -> str: + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + return super().json(**kwargs_with_defaults) + + def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + kwargs_with_defaults_exclude_none: typing.Any = { + "by_alias": True, + "exclude_none": True, + **kwargs, + } + + return deep_union_pydantic_dicts( + super().dict(**kwargs_with_defaults_exclude_unset), + super().dict(**kwargs_with_defaults_exclude_none), + ) + + class Config: + frozen = True + smart_union = True + allow_population_by_field_name = True + populate_by_name = True + extra = pydantic_v1.Extra.allow + json_encoders = {dt.datetime: serialize_datetime} + + +class GetScoresResponseData_Boolean(pydantic_v1.BaseModel): + trace: GetScoresResponseTraceData + value: float + string_value: str = pydantic_v1.Field(alias="stringValue") + id: str + trace_id: str = pydantic_v1.Field(alias="traceId") + name: str + source: ScoreSource + observation_id: typing.Optional[str] = pydantic_v1.Field( + alias="observationId", default=None + ) + timestamp: dt.datetime + created_at: dt.datetime = pydantic_v1.Field(alias="createdAt") + updated_at: dt.datetime = pydantic_v1.Field(alias="updatedAt") + author_user_id: typing.Optional[str] = pydantic_v1.Field( + alias="authorUserId", default=None + ) + comment: typing.Optional[str] = None + config_id: typing.Optional[str] = pydantic_v1.Field(alias="configId", default=None) + queue_id: typing.Optional[str] = pydantic_v1.Field(alias="queueId", default=None) + data_type: typing.Literal["BOOLEAN"] = pydantic_v1.Field( + alias="dataType", default="BOOLEAN" + ) + + def json(self, **kwargs: typing.Any) -> str: + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + return super().json(**kwargs_with_defaults) + + def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + kwargs_with_defaults_exclude_none: typing.Any = { + "by_alias": True, + "exclude_none": True, + **kwargs, + } + + return deep_union_pydantic_dicts( + super().dict(**kwargs_with_defaults_exclude_unset), + super().dict(**kwargs_with_defaults_exclude_none), + ) + + class Config: + frozen = True + smart_union = True + allow_population_by_field_name = True + populate_by_name = True + extra = pydantic_v1.Extra.allow + json_encoders = {dt.datetime: serialize_datetime} + + +GetScoresResponseData = typing.Union[ + GetScoresResponseData_Numeric, + GetScoresResponseData_Categorical, + GetScoresResponseData_Boolean, +] diff --git a/langfuse/api/resources/score/types/get_scores_response_data_boolean.py b/langfuse/api/resources/score/types/get_scores_response_data_boolean.py new file mode 100644 index 000000000..4dbf85af2 --- /dev/null +++ b/langfuse/api/resources/score/types/get_scores_response_data_boolean.py @@ -0,0 +1,46 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +from ....core.datetime_utils import serialize_datetime +from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1 +from ...commons.types.boolean_score import BooleanScore +from .get_scores_response_trace_data import GetScoresResponseTraceData + + +class GetScoresResponseDataBoolean(BooleanScore): + trace: GetScoresResponseTraceData + + def json(self, **kwargs: typing.Any) -> str: + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + return super().json(**kwargs_with_defaults) + + def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + kwargs_with_defaults_exclude_none: typing.Any = { + "by_alias": True, + "exclude_none": True, + **kwargs, + } + + return deep_union_pydantic_dicts( + super().dict(**kwargs_with_defaults_exclude_unset), + super().dict(**kwargs_with_defaults_exclude_none), + ) + + class Config: + frozen = True + smart_union = True + allow_population_by_field_name = True + populate_by_name = True + extra = pydantic_v1.Extra.allow + json_encoders = {dt.datetime: serialize_datetime} diff --git a/langfuse/api/resources/score/types/get_scores_response_data_categorical.py b/langfuse/api/resources/score/types/get_scores_response_data_categorical.py new file mode 100644 index 000000000..3c619779f --- /dev/null +++ b/langfuse/api/resources/score/types/get_scores_response_data_categorical.py @@ -0,0 +1,46 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +from ....core.datetime_utils import serialize_datetime +from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1 +from ...commons.types.categorical_score import CategoricalScore +from .get_scores_response_trace_data import GetScoresResponseTraceData + + +class GetScoresResponseDataCategorical(CategoricalScore): + trace: GetScoresResponseTraceData + + def json(self, **kwargs: typing.Any) -> str: + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + return super().json(**kwargs_with_defaults) + + def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + kwargs_with_defaults_exclude_none: typing.Any = { + "by_alias": True, + "exclude_none": True, + **kwargs, + } + + return deep_union_pydantic_dicts( + super().dict(**kwargs_with_defaults_exclude_unset), + super().dict(**kwargs_with_defaults_exclude_none), + ) + + class Config: + frozen = True + smart_union = True + allow_population_by_field_name = True + populate_by_name = True + extra = pydantic_v1.Extra.allow + json_encoders = {dt.datetime: serialize_datetime} diff --git a/langfuse/api/resources/score/types/get_scores_response_data_numeric.py b/langfuse/api/resources/score/types/get_scores_response_data_numeric.py new file mode 100644 index 000000000..127d8f028 --- /dev/null +++ b/langfuse/api/resources/score/types/get_scores_response_data_numeric.py @@ -0,0 +1,46 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +from ....core.datetime_utils import serialize_datetime +from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1 +from ...commons.types.numeric_score import NumericScore +from .get_scores_response_trace_data import GetScoresResponseTraceData + + +class GetScoresResponseDataNumeric(NumericScore): + trace: GetScoresResponseTraceData + + def json(self, **kwargs: typing.Any) -> str: + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + return super().json(**kwargs_with_defaults) + + def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + kwargs_with_defaults_exclude_none: typing.Any = { + "by_alias": True, + "exclude_none": True, + **kwargs, + } + + return deep_union_pydantic_dicts( + super().dict(**kwargs_with_defaults_exclude_unset), + super().dict(**kwargs_with_defaults_exclude_none), + ) + + class Config: + frozen = True + smart_union = True + allow_population_by_field_name = True + populate_by_name = True + extra = pydantic_v1.Extra.allow + json_encoders = {dt.datetime: serialize_datetime} diff --git a/langfuse/api/resources/score/types/get_scores_response_trace_data.py b/langfuse/api/resources/score/types/get_scores_response_trace_data.py new file mode 100644 index 000000000..efbafadf4 --- /dev/null +++ b/langfuse/api/resources/score/types/get_scores_response_trace_data.py @@ -0,0 +1,52 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +from ....core.datetime_utils import serialize_datetime +from ....core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1 + + +class GetScoresResponseTraceData(pydantic_v1.BaseModel): + user_id: typing.Optional[str] = pydantic_v1.Field(alias="userId", default=None) + """ + The user ID associated with the trace referenced by score + """ + + tags: typing.Optional[typing.List[str]] = pydantic_v1.Field(default=None) + """ + A list of tags associated with the trace referenced by score + """ + + def json(self, **kwargs: typing.Any) -> str: + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + return super().json(**kwargs_with_defaults) + + def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + kwargs_with_defaults_exclude_none: typing.Any = { + "by_alias": True, + "exclude_none": True, + **kwargs, + } + + return deep_union_pydantic_dicts( + super().dict(**kwargs_with_defaults_exclude_unset), + super().dict(**kwargs_with_defaults_exclude_none), + ) + + class Config: + frozen = True + smart_union = True + allow_population_by_field_name = True + populate_by_name = True + extra = pydantic_v1.Extra.allow + json_encoders = {dt.datetime: serialize_datetime} diff --git a/langfuse/api/tests/utils/test_http_client.py b/langfuse/api/tests/utils/test_http_client.py index 4a37a5236..950fcdeb1 100644 --- a/langfuse/api/tests/utils/test_http_client.py +++ b/langfuse/api/tests/utils/test_http_client.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. - from langfuse.api.core.http_client import get_request_body from langfuse.api.core.request_options import RequestOptions diff --git a/langfuse/api/tests/utils/test_query_encoding.py b/langfuse/api/tests/utils/test_query_encoding.py index fdbcf6b76..9afa0ea78 100644 --- a/langfuse/api/tests/utils/test_query_encoding.py +++ b/langfuse/api/tests/utils/test_query_encoding.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. - from langfuse.api.core.query_encoder import encode_query diff --git a/langfuse/client.py b/langfuse/client.py index f9e6a2380..6137c2c02 100644 --- a/langfuse/client.py +++ b/langfuse/client.py @@ -1,28 +1,19 @@ -from contextlib import contextmanager import datetime as dt import logging import os -import typing -import uuid -import backoff -import httpx -from enum import Enum import time import tracemalloc -from typing import ( - Any, - Dict, - Optional, - Literal, - Union, - List, - Sequence, - overload, -) +import typing import urllib.parse +import uuid import warnings +from contextlib import contextmanager from dataclasses import dataclass +from enum import Enum +from typing import Any, Dict, List, Literal, Optional, Sequence, Union, overload +import backoff +import httpx from langfuse.api.resources.commons.types.dataset_run_with_items import ( DatasetRunWithItems, @@ -39,8 +30,8 @@ ) from langfuse.api.resources.ingestion.types.create_span_body import CreateSpanBody from langfuse.api.resources.ingestion.types.score_body import ScoreBody -from langfuse.api.resources.ingestion.types.trace_body import TraceBody from langfuse.api.resources.ingestion.types.sdk_log_body import SdkLogBody +from langfuse.api.resources.ingestion.types.trace_body import TraceBody from langfuse.api.resources.ingestion.types.update_generation_body import ( UpdateGenerationBody, ) @@ -51,28 +42,26 @@ from langfuse.api.resources.prompts.types import ( CreatePromptRequest_Chat, CreatePromptRequest_Text, - Prompt_Text, Prompt_Chat, + Prompt_Text, ) from langfuse.api.resources.trace.types.traces import Traces from langfuse.api.resources.utils.resources.pagination.types.meta_response import ( MetaResponse, ) from langfuse.model import ( + ChatMessageDict, + ChatPromptClient, CreateDatasetItemRequest, CreateDatasetRequest, CreateDatasetRunItemRequest, - ChatMessageDict, DatasetItem, DatasetStatus, ModelUsage, PromptClient, - ChatPromptClient, TextPromptClient, ) -from langfuse.parse_error import ( - handle_fern_exception, -) +from langfuse.parse_error import handle_fern_exception from langfuse.prompt_cache import PromptCache try: @@ -80,13 +69,13 @@ except ImportError: import pydantic # type: ignore +from langfuse._task_manager.task_manager import TaskManager from langfuse.api.client import FernLangfuse from langfuse.environment import get_common_release_envs from langfuse.logging import clean_logger from langfuse.model import Dataset, MapValue, Observation, TraceWithFullDetails from langfuse.request import LangfuseClient -from langfuse.task_manager import TaskManager -from langfuse.types import SpanLevel, ScoreDataType, MaskFunction +from langfuse.types import MaskFunction, ScoreDataType, SpanLevel from langfuse.utils import _convert_usage_input, _create_prompt_context, _get_timestamp from .version import __version__ as version @@ -268,11 +257,12 @@ def __init__( # Otherwise, defaults to WARNING level. # See https://docs.python.org/3/howto/logging.html#what-happens-if-no-configuration-is-provided logging.basicConfig() - self.log.setLevel(logging.DEBUG) + # Set level for all loggers under langfuse package + logging.getLogger("langfuse").setLevel(logging.DEBUG) clean_logger() else: - self.log.setLevel(logging.WARNING) + logging.getLogger("langfuse").setLevel(logging.WARNING) clean_logger() self.base_url = ( @@ -308,6 +298,7 @@ def __init__( "flush_interval": flush_interval, "max_retries": max_retries, "client": langfuse_client, + "api_client": self.client, "public_key": public_key, "sdk_name": "python", "sdk_version": version, @@ -1383,7 +1374,7 @@ def _log_memory_usage(self): :top_k_items ], "total_usage": f"{total_memory_usage:.2f} MB", - "langfuse_queue_length": self.task_manager._queue.qsize(), + "langfuse_queue_length": self.task_manager._ingestion_queue.qsize(), } self.log.debug("Memory usage: ", logged_memory_usage) diff --git a/langfuse/media.py b/langfuse/media.py new file mode 100644 index 000000000..02ccfa81f --- /dev/null +++ b/langfuse/media.py @@ -0,0 +1,203 @@ +"""This module contains the LangfuseMedia class, which is used to wrap media objects for upload to Langfuse.""" + +import base64 +import hashlib +import logging +import os +from typing import Optional, cast, Tuple + +from langfuse.api import MediaContentType +from langfuse.types import ParsedMediaReference + + +class LangfuseMedia: + """A class for wrapping media objects for upload to Langfuse. + + This class handles the preparation and formatting of media content for Langfuse, + supporting both base64 data URIs and raw content bytes. + + Args: + obj (Optional[object]): The source object to be wrapped. Can be accessed via the `obj` attribute. + base64_data_uri (Optional[str]): A base64-encoded data URI containing the media content + and content type (e.g., "data:image/jpeg;base64,/9j/4AAQ..."). + content_type (Optional[str]): The MIME type of the media content when providing raw bytes. + content_bytes (Optional[bytes]): Raw bytes of the media content. + file_path (Optional[str]): The path to the file containing the media content. For relative paths, + the current working directory is used. + + Raises: + ValueError: If neither base64_data_uri or the combination of content_bytes + and content_type is provided. + """ + + obj: object + + _log = logging.getLogger(__name__) + _content_bytes: Optional[bytes] + _content_type: Optional[MediaContentType] + _source: Optional[str] + _media_id: Optional[str] + + def __init__( + self, + *, + obj: Optional[object] = None, + base64_data_uri: Optional[str] = None, + content_type: Optional[MediaContentType] = None, + content_bytes: Optional[bytes] = None, + file_path: Optional[str] = None, + ): + """Initialize a LangfuseMedia object. + + Args: + obj: The object to wrap. + + base64_data_uri: A base64-encoded data URI containing the media content + and content type (e.g., "data:image/jpeg;base64,/9j/4AAQ..."). + content_type: The MIME type of the media content when providing raw bytes or reading from a file. + content_bytes: Raw bytes of the media content. + file_path: The path to the file containing the media content. For relative paths, + the current working directory is used. + """ + self.obj = obj + self._media_id = None + + if base64_data_uri is not None: + parsed_data = self._parse_base64_data_uri(base64_data_uri) + self._content_bytes, self._content_type = parsed_data + self._source = "base64_data_uri" + + elif content_bytes is not None and content_type is not None: + self._content_type = content_type + self._content_bytes = content_bytes + self._source = "bytes" + elif ( + file_path is not None + and content_type is not None + and os.path.exists(file_path) + ): + self._content_bytes = self._read_file(file_path) + self._content_type = content_type if self._content_bytes else None + self._source = "file" if self._content_bytes else None + else: + self._log.error( + "base64_data_uri, or content_bytes and content_type, or file_path must be provided to LangfuseMedia" + ) + + self._content_bytes = None + self._content_type = None + self._source = None + + def _read_file(self, file_path: str) -> Optional[bytes]: + try: + with open(file_path, "rb") as file: + return file.read() + except Exception as e: + self._log.error(f"Error reading file at path {file_path}", exc_info=e) + + return None + + @property + def _content_length(self) -> Optional[int]: + return len(self._content_bytes) if self._content_bytes else None + + @property + def _content_sha256_hash(self) -> Optional[str]: + if self._content_bytes is None: + return None + + sha256_hash_bytes = hashlib.sha256(self._content_bytes).digest() + + return base64.b64encode(sha256_hash_bytes).decode("utf-8") + + @property + def _reference_string(self) -> Optional[str]: + if self._content_type is None or self._source is None or self._media_id is None: + return None + + return f"@@@langfuseMedia:type={self._content_type}|id={self._media_id}|source={self._source}@@@" + + @staticmethod + def parse_reference_string(reference_string: str) -> ParsedMediaReference: + """Parse a media reference string into a ParsedMediaReference. + + Example reference string: + "@@@langfuseMedia:type=image/jpeg|id=some-uuid|source=base64_data_uri@@@" + + Args: + reference_string: The reference string to parse. + + Returns: + A TypedDict with the media_id, source, and content_type. + + Raises: + ValueError: If the reference string is empty or not a string. + ValueError: If the reference string does not start with "@@@langfuseMedia:type=". + ValueError: If the reference string does not end with "@@@". + ValueError: If the reference string is missing required fields. + """ + if not reference_string: + raise ValueError("Reference string is empty") + + if not isinstance(reference_string, str): + raise ValueError("Reference string is not a string") + + if not reference_string.startswith("@@@langfuseMedia:type="): + raise ValueError( + "Reference string does not start with '@@@langfuseMedia:type='" + ) + + if not reference_string.endswith("@@@"): + raise ValueError("Reference string does not end with '@@@'") + + content = reference_string[len("@@@langfuseMedia:") :].rstrip("@@@") + + # Split into key-value pairs + pairs = content.split("|") + parsed_data = {} + + for pair in pairs: + key, value = pair.split("=", 1) + parsed_data[key] = value + + # Verify all required fields are present + if not all(key in parsed_data for key in ["type", "id", "source"]): + raise ValueError("Missing required fields in reference string") + + return ParsedMediaReference( + media_id=parsed_data["id"], + source=parsed_data["source"], + content_type=parsed_data["type"], + ) + + def _parse_base64_data_uri( + self, data: str + ) -> Tuple[Optional[bytes], Optional[MediaContentType]]: + # Example data URI: data:image/jpeg;base64,/9j/4AAQ... + try: + if not data or not isinstance(data, str): + raise ValueError("Data URI is not a string") + + if not data.startswith("data:"): + raise ValueError("Data URI does not start with 'data:'") + + header, actual_data = data[5:].split(",", 1) + if not header or not actual_data: + raise ValueError("Invalid URI") + + # Split header into parts and check for base64 + header_parts = header.split(";") + if "base64" not in header_parts: + raise ValueError("Data is not base64 encoded") + + # Content type is the first part + content_type = header_parts[0] + if not content_type: + raise ValueError("Content type is empty") + + return base64.b64decode(actual_data), cast(MediaContentType, content_type) + + except Exception as e: + self._log.error("Error parsing base64 data URI", exc_info=e) + + return None, None diff --git a/langfuse/openai.py b/langfuse/openai.py index 1cb919300..da449dbb2 100644 --- a/langfuse/openai.py +++ b/langfuse/openai.py @@ -17,13 +17,13 @@ See docs for more details: https://langfuse.com/docs/integrations/openai """ -import copy import logging import types from collections import defaultdict from dataclasses import dataclass from inspect import isclass -from typing import List, Optional +from typing import Optional + import openai.resources from openai._types import NotGiven @@ -34,6 +34,7 @@ from langfuse import Langfuse from langfuse.client import StatefulGenerationClient from langfuse.decorators import langfuse_context +from langfuse.media import LangfuseMedia from langfuse.utils import _get_timestamp from langfuse.utils.langfuse_singleton import LangfuseSingleton @@ -199,13 +200,52 @@ def _extract_chat_prompt(kwargs: any): # uf user provided functions, we need to send these together with messages to langfuse prompt.update( { - "messages": _filter_image_data(kwargs.get("messages", [])), + "messages": [ + _process_message(message) for message in kwargs.get("messages", []) + ], } ) return prompt else: # vanilla case, only send messages in openai format to langfuse - return _filter_image_data(kwargs.get("messages", [])) + return [_process_message(message) for message in kwargs.get("messages", [])] + + +def _process_message(message): + if not isinstance(message, dict): + return message + + processed_message = {**message} + + content = processed_message.get("content", None) + if not isinstance(content, list): + return processed_message + + processed_content = [] + + for content_part in content: + if content_part.get("type") == "input_audio": + audio_base64 = content_part.get("input_audio", {}).get("data", None) + format = content_part.get("input_audio", {}).get("format", "wav") + + if audio_base64 is not None: + base64_data_uri = f"data:audio/{format};base64,{audio_base64}" + + processed_content.append( + { + "type": "input_audio", + "input_audio": { + "data": LangfuseMedia(base64_data_uri=base64_data_uri), + "format": format, + }, + } + ) + else: + processed_content.append(content_part) + + processed_message["content"] = processed_content + + return processed_message def _extract_chat_response(kwargs: any): @@ -214,17 +254,30 @@ def _extract_chat_response(kwargs: any): "role": kwargs.get("role", None), } + audio = None + if kwargs.get("function_call") is not None: response.update({"function_call": kwargs["function_call"]}) if kwargs.get("tool_calls") is not None: response.update({"tool_calls": kwargs["tool_calls"]}) + if kwargs.get("audio") is not None: + audio = kwargs["audio"].__dict__ + + if "data" in audio and audio["data"] is not None: + base64_data_uri = f"data:audio/{audio.get('format', 'wav')};base64,{audio.get('data', None)}" + audio["data"] = LangfuseMedia(base64_data_uri=base64_data_uri) + response.update( { "content": kwargs.get("content", None), } ) + + if audio is not None: + response.update({"audio": audio}) + return response @@ -740,32 +793,6 @@ def auth_check(): return modifier._langfuse.auth_check() -def _filter_image_data(messages: List[dict]): - """https://platform.openai.com/docs/guides/vision?lang=python - - The messages array remains the same, but the 'image_url' is removed from the 'content' array. - It should only be removed if the value starts with 'data:image/jpeg;base64,' - - """ - output_messages = copy.deepcopy(messages) - - for message in output_messages: - content = ( - message.get("content", None) - if isinstance(message, dict) - else getattr(message, "content", None) - ) - - if content is not None: - for index, item in enumerate(content): - if isinstance(item, dict) and item.get("image_url", None) is not None: - url = item["image_url"]["url"] - if url.startswith("data:image/"): - del content[index]["image_url"] - - return output_messages - - class LangfuseResponseGeneratorSync: def __init__( self, diff --git a/langfuse/serializer.py b/langfuse/serializer.py index 4536ff97a..bca9013ed 100644 --- a/langfuse/serializer.py +++ b/langfuse/serializer.py @@ -1,18 +1,21 @@ """@private""" +import enum from asyncio import Queue +from collections.abc import Sequence +from dataclasses import asdict, is_dataclass from datetime import date, datetime -from dataclasses import is_dataclass, asdict -import enum from json import JSONEncoder +from logging import getLogger +from pathlib import Path from typing import Any from uuid import UUID -from collections.abc import Sequence -from langfuse.api.core import serialize_datetime, pydantic_utilities -from pathlib import Path -from logging import getLogger + from pydantic import BaseModel +from langfuse.api.core import pydantic_utilities, serialize_datetime +from langfuse.media import LangfuseMedia + # Attempt to import Serializable try: from langchain.load.serializable import Serializable @@ -40,6 +43,12 @@ def default(self, obj: Any): # Timezone-awareness check return serialize_datetime(obj) + if isinstance(obj, LangfuseMedia): + return ( + obj._reference_string + or f"" + ) + # Check if numpy is available and if the object is a numpy scalar # If so, convert it to a Python scalar using the item() method if np is not None and isinstance(obj, np.generic): diff --git a/langfuse/types/__init__.py b/langfuse/types/__init__.py index 896557489..1cef199f7 100644 --- a/langfuse/types/__init__.py +++ b/langfuse/types/__init__.py @@ -1,9 +1,11 @@ """@private""" from datetime import datetime -from langfuse.client import PromptClient, ModelUsage, MapValue -from typing import Any, List, Optional, TypedDict, Literal, Dict, Union, Protocol +from typing import Any, Dict, List, Literal, Optional, Protocol, TypedDict, Union + from pydantic import BaseModel +from langfuse.api import MediaContentType +from langfuse.model import MapValue, ModelUsage, PromptClient SpanLevel = Literal["DEBUG", "DEFAULT", "WARNING", "ERROR"] @@ -46,3 +48,17 @@ class MaskFunction(Protocol): """ def __call__(self, *, data: Any) -> Any: ... + + +class ParsedMediaReference(TypedDict): + """A parsed media reference. + + Attributes: + media_id: The media ID. + source: The original source of the media, e.g. a file path, bytes, base64 data URI, etc. + content_type: The content type of the media. + """ + + media_id: str + source: str + content_type: MediaContentType diff --git a/poetry.lock b/poetry.lock index 5b11c6aa1..76de20c89 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -3058,13 +3058,13 @@ sympy = "*" [[package]] name = "openai" -version = "1.45.0" +version = "1.54.3" description = "The official Python library for the openai API" optional = false -python-versions = ">=3.7.1" +python-versions = ">=3.8" files = [ - {file = "openai-1.45.0-py3-none-any.whl", hash = "sha256:2f1f7b7cf90f038a9f1c24f0d26c0f1790c102ec5acd07ffd70a9b7feac1ff4e"}, - {file = "openai-1.45.0.tar.gz", hash = "sha256:731207d10637335413aa3c0955f8f8df30d7636a4a0f9c381f2209d32cf8de97"}, + {file = "openai-1.54.3-py3-none-any.whl", hash = "sha256:f18dbaf09c50d70c4185b892a2a553f80681d1d866323a2da7f7be2f688615d5"}, + {file = "openai-1.54.3.tar.gz", hash = "sha256:7511b74eeb894ac0b0253dc71f087a15d2e4d71d22d0088767205143d880cca6"}, ] [package.dependencies] diff --git a/static/bitcoin.pdf b/static/bitcoin.pdf new file mode 100644 index 000000000..1e19b739f Binary files /dev/null and b/static/bitcoin.pdf differ diff --git a/static/joke_prompt.wav b/static/joke_prompt.wav new file mode 100644 index 000000000..3923b1b9e Binary files /dev/null and b/static/joke_prompt.wav differ diff --git a/static/puton.jpg b/static/puton.jpg new file mode 100644 index 000000000..bf15cbef9 Binary files /dev/null and b/static/puton.jpg differ diff --git a/tests/test_core_sdk.py b/tests/test_core_sdk.py index 1d17e5e25..46ecee01f 100644 --- a/tests/test_core_sdk.py +++ b/tests/test_core_sdk.py @@ -3,6 +3,9 @@ from asyncio import gather from datetime import datetime, timedelta, timezone +import pytest + +from langfuse import Langfuse from langfuse.client import ( FetchObservationResponse, FetchObservationsResponse, @@ -11,11 +14,6 @@ FetchTracesResponse, ) from langfuse.utils import _get_timestamp - - -import pytest - -from langfuse import Langfuse from tests.api_wrapper import LangfuseAPI from tests.utils import ( CompletionUsage, @@ -61,7 +59,7 @@ def test_flush(): langfuse.flush() # Make sure that the client queue is empty after flushing - assert langfuse.task_manager._queue.empty() + assert langfuse.task_manager._ingestion_queue.empty() def test_shutdown(): @@ -76,7 +74,7 @@ def test_shutdown(): # we expect two things after shutdown: # 1. client queue is empty # 2. consumer thread has stopped - assert langfuse.task_manager._queue.empty() + assert langfuse.task_manager._ingestion_queue.empty() def test_invalid_score_data_does_not_raise_exception(): @@ -89,7 +87,7 @@ def test_invalid_score_data_does_not_raise_exception(): ) langfuse.flush() - assert langfuse.task_manager._queue.qsize() == 0 + assert langfuse.task_manager._ingestion_queue.qsize() == 0 score_id = create_uuid() @@ -102,7 +100,7 @@ def test_invalid_score_data_does_not_raise_exception(): ) langfuse.flush() - assert langfuse.task_manager._queue.qsize() == 0 + assert langfuse.task_manager._ingestion_queue.qsize() == 0 def test_create_numeric_score(): @@ -116,7 +114,7 @@ def test_create_numeric_score(): ) langfuse.flush() - assert langfuse.task_manager._queue.qsize() == 0 + assert langfuse.task_manager._ingestion_queue.qsize() == 0 score_id = create_uuid() @@ -131,7 +129,7 @@ def test_create_numeric_score(): langfuse.flush() - assert langfuse.task_manager._queue.qsize() == 0 + assert langfuse.task_manager._ingestion_queue.qsize() == 0 trace = api_wrapper.get_trace(trace.id) @@ -152,7 +150,7 @@ def test_create_boolean_score(): ) langfuse.flush() - assert langfuse.task_manager._queue.qsize() == 0 + assert langfuse.task_manager._ingestion_queue.qsize() == 0 score_id = create_uuid() @@ -168,7 +166,7 @@ def test_create_boolean_score(): langfuse.flush() - assert langfuse.task_manager._queue.qsize() == 0 + assert langfuse.task_manager._ingestion_queue.qsize() == 0 trace = api_wrapper.get_trace(trace.id) @@ -189,7 +187,7 @@ def test_create_categorical_score(): ) langfuse.flush() - assert langfuse.task_manager._queue.qsize() == 0 + assert langfuse.task_manager._ingestion_queue.qsize() == 0 score_id = create_uuid() @@ -204,7 +202,7 @@ def test_create_categorical_score(): langfuse.flush() - assert langfuse.task_manager._queue.qsize() == 0 + assert langfuse.task_manager._ingestion_queue.qsize() == 0 trace = api_wrapper.get_trace(trace.id) diff --git a/tests/test_decorators.py b/tests/test_decorators.py index 4714fd178..428d3f50e 100644 --- a/tests/test_decorators.py +++ b/tests/test_decorators.py @@ -1,15 +1,17 @@ import asyncio -from contextvars import ContextVar from collections import defaultdict from concurrent.futures import ThreadPoolExecutor -import pytest +from contextvars import ContextVar +from typing import Optional -from langchain_community.chat_models import ChatOpenAI +import pytest from langchain.prompts import ChatPromptTemplate -from langfuse.openai import AsyncOpenAI +from langchain_community.chat_models import ChatOpenAI + from langfuse.decorators import langfuse_context, observe +from langfuse.media import LangfuseMedia +from langfuse.openai import AsyncOpenAI from tests.utils import create_uuid, get_api, get_llama_index_index -from typing import Optional mock_metadata = "mock_metadata" mock_deep_metadata = "mock_deep_metadata" @@ -1404,6 +1406,7 @@ def test_threadpool_executor(): mock_parent_observation_id = create_uuid() from concurrent.futures import ThreadPoolExecutor, as_completed + from langfuse.decorators import langfuse_context, observe @observe() @@ -1457,3 +1460,39 @@ def main(): if o.parent_observation_id == mock_parent_observation_id ] assert len(child_observations) == 2 + + +def test_media(): + mock_trace_id = create_uuid() + + with open("static/bitcoin.pdf", "rb") as pdf_file: + pdf_bytes = pdf_file.read() + + media = LangfuseMedia(content_bytes=pdf_bytes, content_type="application/pdf") + + @observe() + def main(): + langfuse_context.update_current_trace( + metadata={ + "context": { + "nested": media, + }, + }, + ) + + main(langfuse_observation_id=mock_trace_id) + + langfuse_context.flush() + + trace_data = get_api().trace.get(mock_trace_id) + + assert ( + "@@@langfuseMedia:type=application/pdf|id=" + in trace_data.metadata["context"]["nested"] + ) + parsed_reference_string = LangfuseMedia.parse_reference_string( + trace_data.metadata["context"]["nested"] + ) + assert parsed_reference_string["content_type"] == "application/pdf" + assert parsed_reference_string["media_id"] is not None + assert parsed_reference_string["source"] == "bytes" diff --git a/tests/test_langchain.py b/tests/test_langchain.py index 9f4f9093a..3ed864fe6 100644 --- a/tests/test_langchain.py +++ b/tests/test_langchain.py @@ -2,42 +2,41 @@ import random import string import time - -from typing import Any, List, Mapping, Optional, Dict +from typing import Any, Dict, List, Mapping, Optional import pytest -from langchain_community.llms.anthropic import Anthropic -from langchain_community.llms.huggingface_hub import HuggingFaceHub from langchain.agents import AgentType, initialize_agent -from langchain_community.agent_toolkits.load_tools import load_tools from langchain.chains import ( ConversationalRetrievalChain, + ConversationChain, LLMChain, RetrievalQA, SimpleSequentialChain, - ConversationChain, ) -from langchain_core.tools import StructuredTool -from langchain_core.runnables.base import RunnableLambda from langchain.chains.openai_functions import create_openai_fn_chain from langchain.chains.summarize import load_summarize_chain -from langchain_community.document_loaders import TextLoader -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_openai import OpenAI, AzureChatOpenAI, ChatOpenAI from langchain.memory import ConversationBufferMemory from langchain.prompts import ChatPromptTemplate, PromptTemplate -from langchain.schema import Document +from langchain.schema import Document, HumanMessage, SystemMessage from langchain.text_splitter import CharacterTextSplitter +from langchain_community.agent_toolkits.load_tools import load_tools +from langchain_community.document_loaders import TextLoader +from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.llms.anthropic import Anthropic +from langchain_community.llms.huggingface_hub import HuggingFaceHub from langchain_community.vectorstores import Chroma +from langchain_core.callbacks.manager import CallbackManagerForLLMRun +from langchain_core.language_models.llms import LLM +from langchain_core.output_parsers import StrOutputParser +from langchain_core.runnables.base import RunnableLambda +from langchain_core.tools import StructuredTool +from langchain_openai import AzureChatOpenAI, ChatOpenAI, OpenAI from pydantic.v1 import BaseModel, Field -from langchain.schema import HumanMessage, SystemMessage + from langfuse.callback import CallbackHandler from langfuse.client import Langfuse from tests.api_wrapper import LangfuseAPI -from tests.utils import create_uuid, get_api -from langchain_core.callbacks.manager import CallbackManagerForLLMRun -from langchain_core.language_models.llms import LLM -from langchain_core.output_parsers import StrOutputParser +from tests.utils import create_uuid, encode_file_to_base64, get_api def test_callback_init(): @@ -225,6 +224,7 @@ def test_callback_generated_from_lcel_chain(): ) langfuse.flush() + handler.flush() trace_id = handler.get_trace_id() trace = api.trace.get(trace_id) @@ -1760,7 +1760,7 @@ def test_disabled_langfuse(): }, ) - assert handler.langfuse.task_manager._queue.empty() + assert handler.langfuse.task_manager._ingestion_queue.empty() handler.flush() @@ -2211,3 +2211,39 @@ def _generate_random_dict(n: int, key_length: int = 8) -> Dict[str, Any]: print(f"Full execution took {duration_full}ms") assert duration_full > 1000, "Full execution should take longer than 1 second" + + +def test_multimodal(): + api = get_api() + handler = CallbackHandler() + model = ChatOpenAI(model="gpt-4o-mini") + + image_data = encode_file_to_base64("static/puton.jpg") + + message = HumanMessage( + content=[ + {"type": "text", "text": "What's in this image?"}, + { + "type": "image_url", + "image_url": {"url": f"data:image/jpeg;base64,{image_data}"}, + }, + ], + ) + + response = model.invoke([message], config={"callbacks": [handler]}) + + print(response.content) + + handler.flush() + + trace = api.trace.get(handler.get_trace_id()) + + assert len(trace.observations) == 1 + assert trace.observations[0].type == "GENERATION" + + print(trace.observations[0].input) + + assert ( + "@@@langfuseMedia:type=image/jpeg|id=" + in trace.observations[0].input[0]["content"][1]["image_url"]["url"] + ) diff --git a/tests/test_llama_index.py b/tests/test_llama_index.py index 1fc5547cf..f3ccadc37 100644 --- a/tests/test_llama_index.py +++ b/tests/test_llama_index.py @@ -1,16 +1,12 @@ import pytest -from llama_index.core import ( - Settings, - PromptTemplate, -) +from llama_index.core import PromptTemplate, Settings from llama_index.core.callbacks import CallbackManager -from llama_index.llms.openai import OpenAI -from llama_index.llms.anthropic import Anthropic from llama_index.core.query_pipeline import QueryPipeline +from llama_index.llms.anthropic import Anthropic +from llama_index.llms.openai import OpenAI -from langfuse.llama_index import LlamaIndexCallbackHandler from langfuse.client import Langfuse - +from langfuse.llama_index import LlamaIndexCallbackHandler from tests.utils import create_uuid, get_api, get_llama_index_index @@ -540,7 +536,7 @@ def test_disabled_langfuse(): trace_id = callback.trace.id assert trace_id is not None - assert callback.langfuse.task_manager._queue.empty() + assert callback.langfuse.task_manager._ingestion_queue.empty() callback.flush() diff --git a/tests/test_logger.py b/tests/test_logger.py index 6d7700818..0c5d78b24 100644 --- a/tests/test_logger.py +++ b/tests/test_logger.py @@ -18,7 +18,6 @@ def test_via_env(): langfuse = Langfuse() assert langfuse.log.level == 10 - assert langfuse.task_manager._log.level == 10 os.environ.pop("LANGFUSE_DEBUG") @@ -30,20 +29,17 @@ def test_via_env_callback(): assert callback.log.level == 10 assert callback.langfuse.log.level == 10 - assert callback.langfuse.task_manager._log.level == 10 os.environ.pop("LANGFUSE_DEBUG") def test_debug_langfuse(): langfuse = Langfuse(debug=True) assert langfuse.log.level == 10 - assert langfuse.task_manager._log.level == 10 def test_default_langfuse(): langfuse = Langfuse() assert langfuse.log.level == 30 - assert langfuse.task_manager._log.level == 30 def test_default_langfuse_callback(): @@ -68,7 +64,6 @@ def test_default_langfuse_trace_callback(): assert callback.log.level == 30 assert callback.log.level == 30 assert callback.trace.log.level == 30 - assert callback.trace.task_manager._log.level == 30 def test_debug_langfuse_trace_callback(): @@ -79,4 +74,3 @@ def test_debug_langfuse_trace_callback(): assert callback.log.level == 10 assert callback.log.level == 10 assert callback.trace.log.level == 10 - assert callback.trace.task_manager._log.level == 10 diff --git a/tests/test_media.py b/tests/test_media.py new file mode 100644 index 000000000..181ec4775 --- /dev/null +++ b/tests/test_media.py @@ -0,0 +1,106 @@ +import base64 +import pytest +from langfuse.media import LangfuseMedia + +# Test data +SAMPLE_JPEG_BYTES = b"\xff\xd8\xff\xe0\x00\x10JFIF\x00\x01\x01\x01\x00H\x00H\x00\x00" +SAMPLE_BASE64_DATA_URI = ( + "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/4QBARXhpZgAA" +) + + +def test_init_with_base64_data_uri(): + media = LangfuseMedia(base64_data_uri=SAMPLE_BASE64_DATA_URI) + assert media._source == "base64_data_uri" + assert media._content_type == "image/jpeg" + assert media._content_bytes is not None + + +def test_init_with_content_bytes(): + media = LangfuseMedia(content_bytes=SAMPLE_JPEG_BYTES, content_type="image/jpeg") + assert media._source == "bytes" + assert media._content_type == "image/jpeg" + assert media._content_bytes == SAMPLE_JPEG_BYTES + + +def test_init_with_invalid_input(): + # LangfuseMedia logs error but doesn't raise ValueError when initialized without required params + media = LangfuseMedia() + assert media._source is None + assert media._content_type is None + assert media._content_bytes is None + + media = LangfuseMedia(content_bytes=SAMPLE_JPEG_BYTES) # Missing content_type + assert media._source is None + assert media._content_type is None + assert media._content_bytes is None + + media = LangfuseMedia(content_type="image/jpeg") # Missing content_bytes + assert media._source is None + assert media._content_type is None + assert media._content_bytes is None + + +def test_content_length(): + media = LangfuseMedia(content_bytes=SAMPLE_JPEG_BYTES, content_type="image/jpeg") + assert media._content_length == len(SAMPLE_JPEG_BYTES) + + +def test_content_sha256_hash(): + media = LangfuseMedia(content_bytes=SAMPLE_JPEG_BYTES, content_type="image/jpeg") + assert media._content_sha256_hash is not None + # Hash should be base64 encoded + assert base64.b64decode(media._content_sha256_hash) + + +def test_reference_string(): + media = LangfuseMedia(content_bytes=SAMPLE_JPEG_BYTES, content_type="image/jpeg") + # Reference string should be None initially as media_id is not set + assert media._reference_string is None + + # Set media_id + media._media_id = "test-id" + reference = media._reference_string + assert reference is not None + assert "test-id" in reference + assert "image/jpeg" in reference + assert "bytes" in reference + + +def test_parse_reference_string(): + valid_ref = "@@@langfuseMedia:type=image/jpeg|id=test-id|source=base64_data_uri@@@" + result = LangfuseMedia.parse_reference_string(valid_ref) + + assert result["media_id"] == "test-id" + assert result["content_type"] == "image/jpeg" + assert result["source"] == "base64_data_uri" + + +def test_parse_invalid_reference_string(): + with pytest.raises(ValueError): + LangfuseMedia.parse_reference_string("") + + with pytest.raises(ValueError): + LangfuseMedia.parse_reference_string("invalid") + + with pytest.raises(ValueError): + LangfuseMedia.parse_reference_string( + "@@@langfuseMedia:type=image/jpeg@@@" + ) # Missing fields + + +def test_file_handling(): + file_path = "static/puton.jpg" + + media = LangfuseMedia(file_path=file_path, content_type="image/jpeg") + assert media._source == "file" + assert media._content_bytes is not None + assert media._content_type == "image/jpeg" + + +def test_nonexistent_file(): + media = LangfuseMedia(file_path="nonexistent.jpg") + + assert media._source is None + assert media._content_bytes is None + assert media._content_type is None diff --git a/tests/test_openai.py b/tests/test_openai.py index 62de030d8..29752be94 100644 --- a/tests/test_openai.py +++ b/tests/test_openai.py @@ -1,8 +1,9 @@ import os import pytest -from pydantic import BaseModel from openai import APIConnectionError +from openai.types.chat.chat_completion_message import ChatCompletionMessage +from pydantic import BaseModel from langfuse.client import Langfuse from langfuse.openai import ( @@ -10,13 +11,9 @@ AsyncOpenAI, AzureOpenAI, _is_openai_v1, - _filter_image_data, openai, ) -from openai.types.chat.chat_completion import ChatCompletionMessage - -from tests.utils import create_uuid, get_api - +from tests.utils import create_uuid, encode_file_to_base64, get_api chat_func = ( openai.chat.completions.create if _is_openai_v1() else openai.ChatCompletion.create @@ -63,6 +60,7 @@ def test_openai_chat_completion(): assert generation.data[0].input == [ { "content": "You are an expert mathematician", + "audio": None, "function_call": None, "refusal": None, "role": "assistant", @@ -92,6 +90,7 @@ def test_openai_chat_completion(): assert trace.input == [ { "content": "You are an expert mathematician", + "audio": None, "function_call": None, "refusal": None, "role": "assistant", @@ -1171,127 +1170,6 @@ async def test_async_azure(): assert generation.data[0].level == "ERROR" -def test_image_data_filtered(): - api = get_api() - generation_name = create_uuid() - - openai.chat.completions.create( - name=generation_name, - model="gpt-4-vision-preview", - messages=[ - { - "role": "user", - "content": [ - {"type": "text", "text": "What is in this image?"}, - { - "type": "image_url", - "image_url": { - "url": "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEASABIAAD/2wBDAAMCAgMCAgMDAwMEAwMEBQgFBQQEBQoHBwYIDAoMDAsKCwsNDhIQDQ4RDgsLEBYQERMUFRUVDA8XGBYUGBIUFRT/wAALCAABAAEBAREA/8QAFAABAAAAAAAAAAAAAAAAAAAACf/EABQQAQAAAAAAAAAAAAAAAAAAAAD/2gAIAQEAAD8AKp//2Q==" - }, - }, - ], - } - ], - max_tokens=300, - ) - - generation = api.observations.get_many(name=generation_name, type="GENERATION") - - assert len(generation.data) == 1 - assert "data:image/jpeg;base64" not in generation.data[0].input - - -def test_image_data_filtered_png(): - api = get_api() - generation_name = create_uuid() - - openai.chat.completions.create( - name=generation_name, - model="gpt-4-vision-preview", - messages=[ - { - "role": "user", - "content": [ - {"type": "text", "text": "What is in this image?"}, - { - "type": "image_url", - "image_url": { - "url": "data:image/png;base64,/9j/4AAQSkZJRgABAQEASABIAAD/2wBDAAMCAgMCAgMDAwMEAwMEBQgFBQQEBQoHBwYIDAoMDAsKCwsNDhIQDQ4RDgsLEBYQERMUFRUVDA8XGBYUGBIUFRT/wAALCAABAAEBAREA/8QAFAABAAAAAAAAAAAAAAAAAAAACf/EABQQAQAAAAAAAAAAAAAAAAAAAAD/2gAIAQEAAD8AKp//2Q==" - }, - }, - ], - } - ], - max_tokens=300, - ) - - generation = api.observations.get_many(name=generation_name, type="GENERATION") - - assert len(generation.data) == 1 - assert "data:image/jpeg;base64" not in generation.data[0].input - - -def test_image_filter_base64(): - messages = [ - { - "role": "user", - "content": [ - {"type": "text", "text": "What’s in this image?"}, - { - "type": "image_url", - "image_url": {"url": "data:image/jpeg;base64,base64_image"}, - }, - ], - } - ] - result = _filter_image_data(messages) - - print(result) - - assert result == [ - { - "role": "user", - "content": [ - {"type": "text", "text": "What’s in this image?"}, - {"type": "image_url"}, - ], - } - ] - - -def test_image_filter_url(): - result = _filter_image_data( - [ - { - "role": "user", - "content": [ - {"type": "text", "text": "What’s in this image?"}, - { - "type": "image_url", - "image_url": { - "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", - }, - }, - ], - } - ] - ) - assert result == [ - { - "role": "user", - "content": [ - {"type": "text", "text": "What’s in this image?"}, - { - "type": "image_url", - "image_url": { - "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", - }, - }, - ], - } - ] - - def test_openai_with_existing_trace_id(): langfuse = Langfuse() trace = langfuse.trace( @@ -1380,6 +1258,7 @@ def test_disabled_langfuse(): openai.langfuse_enabled = True import importlib + from langfuse.openai import openai importlib.reload(openai) @@ -1483,6 +1362,7 @@ def test_structured_output_response_format_kwarg(): def test_structured_output_beta_completions_parse(): from typing import List + from packaging.version import Version class CalendarEvent(BaseModel): @@ -1593,3 +1473,111 @@ async def test_close_async_stream(): assert generation.data[0].completion_start_time is not None assert generation.data[0].completion_start_time >= generation.data[0].start_time assert generation.data[0].completion_start_time <= generation.data[0].end_time + + +def test_base_64_image_input(): + api = get_api() + client = openai.OpenAI() + generation_name = "test_base_64_image_input" + create_uuid()[:8] + + content_path = "static/puton.jpg" + content_type = "image/jpeg" + + base64_image = encode_file_to_base64(content_path) + + client.chat.completions.create( + name=generation_name, + model="gpt-4o-mini", + messages=[ + { + "role": "user", + "content": [ + {"type": "text", "text": "What’s in this image?"}, + { + "type": "image_url", + "image_url": { + "url": f"data:{content_type};base64,{base64_image}" + }, + }, + ], + } + ], + max_tokens=300, + ) + + openai.flush_langfuse() + + generation = api.observations.get_many(name=generation_name, type="GENERATION") + + assert len(generation.data) != 0 + assert generation.data[0].name == generation_name + assert generation.data[0].input[0]["content"][0]["text"] == "What’s in this image?" + assert ( + f"@@@langfuseMedia:type={content_type}|id=" + in generation.data[0].input[0]["content"][1]["image_url"]["url"] + ) + assert generation.data[0].type == "GENERATION" + assert "gpt-4o-mini" in generation.data[0].model + assert generation.data[0].start_time is not None + assert generation.data[0].end_time is not None + assert generation.data[0].start_time < generation.data[0].end_time + assert generation.data[0].usage.input is not None + assert generation.data[0].usage.output is not None + assert generation.data[0].usage.total is not None + assert "dog" in generation.data[0].output["content"] + + +def test_audio_input_and_output(): + api = get_api() + client = openai.OpenAI() + generation_name = "test_audio_input_and_output" + create_uuid()[:8] + + content_path = "static/joke_prompt.wav" + base64_string = encode_file_to_base64(content_path) + + client.chat.completions.create( + name=generation_name, + model="gpt-4o-audio-preview", + modalities=["text", "audio"], + audio={"voice": "alloy", "format": "wav"}, + messages=[ + { + "role": "user", + "content": [ + {"type": "text", "text": "Do what this recording says."}, + { + "type": "input_audio", + "input_audio": {"data": base64_string, "format": "wav"}, + }, + ], + }, + ], + ) + + openai.flush_langfuse() + + generation = api.observations.get_many(name=generation_name, type="GENERATION") + + assert len(generation.data) != 0 + assert generation.data[0].name == generation_name + assert ( + generation.data[0].input[0]["content"][0]["text"] + == "Do what this recording says." + ) + assert ( + "@@@langfuseMedia:type=audio/wav|id=" + in generation.data[0].input[0]["content"][1]["input_audio"]["data"] + ) + assert generation.data[0].type == "GENERATION" + assert "gpt-4o-audio-preview" in generation.data[0].model + assert generation.data[0].start_time is not None + assert generation.data[0].end_time is not None + assert generation.data[0].start_time < generation.data[0].end_time + assert generation.data[0].usage.input is not None + assert generation.data[0].usage.output is not None + assert generation.data[0].usage.total is not None + print(generation.data[0].output) + assert ( + "@@@langfuseMedia:type=audio/wav|id=" + in generation.data[0].output["audio"]["data"] + ) diff --git a/tests/test_task_manager.py b/tests/test_task_manager.py index 7c281f0b3..373493670 100644 --- a/tests/test_task_manager.py +++ b/tests/test_task_manager.py @@ -2,14 +2,14 @@ import subprocess import threading from urllib.parse import urlparse, urlunparse -import httpx +import httpx import pytest from pytest_httpserver import HTTPServer from werkzeug.wrappers import Request, Response +from langfuse._task_manager.task_manager import TaskManager from langfuse.request import LangfuseClient -from langfuse.task_manager import TaskManager logging.basicConfig() log = logging.getLogger("langfuse") @@ -58,7 +58,17 @@ def handler(request: Request): ) tm = TaskManager( - langfuse_client, 10, 0.1, 3, 1, 10_000, "test-sdk", "1.0.0", "default" + client=langfuse_client, + api_client=None, + public_key="pk", + flush_at=10, + flush_interval=0.1, + max_retries=3, + threads=1, + max_task_queue_size=10_000, + sdk_name="test-sdk", + sdk_version="1.0.0", + sdk_integration="default", ) tm.add_task({"foo": "bar"}) @@ -93,15 +103,17 @@ def handler(request: Request): ) tm = TaskManager( - langfuse_client, - 10, - 0.1, - 3, - 1, - 10_000, - "test-sdk", - "1.0.0", - "default", + client=langfuse_client, + api_client=None, + public_key="pk", + flush_at=10, + flush_interval=0.1, + max_retries=3, + threads=1, + max_task_queue_size=10_000, + sdk_name="test-sdk", + sdk_version="1.0.0", + sdk_integration="default", enabled=False, ) @@ -109,7 +121,7 @@ def handler(request: Request): tm.add_task({"foo": "bar"}) tm.add_task({"foo": "bar"}) - assert tm._queue.empty() + assert tm._ingestion_queue.empty() tm.flush() assert not request_fired @@ -133,7 +145,17 @@ def handler(request: Request): ) tm = TaskManager( - langfuse_client, 10, 0.1, 3, 1, 10_000, "test-sdk", "1.0.0", "default" + client=langfuse_client, + api_client=None, + public_key="pk", + flush_at=10, + flush_interval=0.1, + max_retries=3, + threads=1, + max_task_queue_size=10_000, + sdk_name="test-sdk", + sdk_version="1.0.0", + sdk_integration="default", ) tm.add_task({"type": "bar", "body": {"trace_id": "trace_123"}}) @@ -166,7 +188,17 @@ def handler(request: Request): ) tm = TaskManager( - langfuse_client, 10, 0.1, 3, 1, 10_000, "test-sdk", "1.0.0", "default" + client=langfuse_client, + api_client=None, + public_key="pk", + flush_at=10, + flush_interval=0.1, + max_retries=3, + threads=1, + max_task_queue_size=10_000, + sdk_name="test-sdk", + sdk_version="1.0.0", + sdk_integration="default", ) tm.add_task({"foo": "bar"}) @@ -198,7 +230,17 @@ def add_task_concurrently(tm, func): ) tm = TaskManager( - langfuse_client, 1, 0.1, 3, 1, 10_000, "test-sdk", "1.0.0", "default" + client=langfuse_client, + api_client=None, + public_key="pk", + flush_at=1, + flush_interval=0.1, + max_retries=3, + threads=1, + max_task_queue_size=10_000, + sdk_name="test-sdk", + sdk_version="1.0.0", + sdk_integration="default", ) threads = [ threading.Thread( @@ -222,7 +264,7 @@ def test_atexit(): python_code = """ import time import logging -from langfuse.task_manager import TaskManager # assuming task_manager is the module name +from langfuse._task_manager.task_manager import TaskManager from langfuse.request import LangfuseClient import httpx @@ -236,7 +278,7 @@ def test_atexit(): ] ) print("Adding task manager", TaskManager) -manager = TaskManager(langfuse_client, 10, 0.1, 3, 1, 10_000, "test-sdk", "1.0.0", "default") +manager = TaskManager(client=langfuse_client, api_client=None, public_key='pk', flush_at=10, flush_interval=0.1, max_retries=3, threads=1, max_task_queue_size=10_000, sdk_name="test-sdk", sdk_version="1.0.0", sdk_integration="default") """ @@ -260,7 +302,8 @@ def test_atexit(): print(process.stderr) - assert "consumer thread 0 joined" in logs + assert "MediaUploadConsumer thread 0 joined" in logs + assert "IngestionConsumer thread 0 joined" in logs def test_flush(httpserver: HTTPServer): @@ -289,7 +332,17 @@ def handler(request: Request): ) tm = TaskManager( - langfuse_client, 1, 0.1, 3, 1, 10_000, "test-sdk", "1.0.0", "default" + client=langfuse_client, + api_client=None, + public_key="pk", + flush_at=1, + flush_interval=0.1, + max_retries=3, + threads=1, + max_task_queue_size=10_000, + sdk_name="test-sdk", + sdk_version="1.0.0", + sdk_integration="default", ) for _ in range(100): @@ -298,7 +351,7 @@ def handler(request: Request): # a race condition. We do our best to load it up though. tm.flush() # Make sure that the client queue is empty after flushing - assert tm._queue.empty() + assert tm._ingestion_queue.empty() assert not failed @@ -328,7 +381,17 @@ def handler(request: Request): ) tm = TaskManager( - langfuse_client, 1, 0.1, 3, 5, 10_000, "test-sdk", "1.0.0", "default" + client=langfuse_client, + api_client=None, + public_key="pk", + flush_at=1, + flush_interval=0.1, + max_retries=3, + threads=5, + max_task_queue_size=10_000, + sdk_name="test-sdk", + sdk_version="1.0.0", + sdk_integration="default", ) for _ in range(100): @@ -338,12 +401,12 @@ def handler(request: Request): # we expect two things after shutdown: # 1. client queue is empty # 2. consumer thread has stopped - assert tm._queue.empty() + assert tm._ingestion_queue.empty() - assert len(tm._consumers) == 5 - for c in tm._consumers: + assert len(tm._ingestion_consumers) == 5 + for c in tm._ingestion_consumers: assert not c.is_alive() - assert tm._queue.empty() + assert tm._ingestion_queue.empty() assert not failed @@ -370,7 +433,17 @@ def handler(request: Request): ) tm = TaskManager( - langfuse_client, 1, 0.1, 3, 1, 10_000, "test-sdk", "1.0.0", "default" + client=langfuse_client, + api_client=None, + public_key="pk", + flush_at=1, + flush_interval=0.1, + max_retries=3, + threads=1, + max_task_queue_size=10_000, + sdk_name="test-sdk", + sdk_version="1.0.0", + sdk_integration="default", ) tm.add_task({"foo": "bar"}) @@ -382,7 +455,7 @@ def handler(request: Request): # a race condition. We do our best to load it up though. tm.flush() # Make sure that the client queue is empty after flushing - assert tm._queue.empty() + assert tm._ingestion_queue.empty() assert not failed @@ -411,7 +484,17 @@ def handler(request: Request): ) tm = TaskManager( - langfuse_client, 1, 0.1, 3, 1, 10_000, "test-sdk", "1.0.0", "default" + client=langfuse_client, + api_client=None, + public_key="pk", + flush_at=1, + flush_interval=0.1, + max_retries=3, + threads=1, + max_task_queue_size=10_000, + sdk_name="test-sdk", + sdk_version="1.0.0", + sdk_integration="default", ) tm.add_task({"type": "bar", "body": {"trace_id": "trace_123"}}) @@ -428,7 +511,7 @@ def handler(request: Request): # a race condition. We do our best to load it up though. tm.flush() # Make sure that the client queue is empty after flushing - assert tm._queue.empty() + assert tm._ingestion_queue.empty() assert not failed assert count == 2 @@ -439,17 +522,27 @@ def test_truncate_item_in_place(httpserver): ) tm = TaskManager( - langfuse_client, 10, 0.1, 3, 1, 100, "test-sdk", "1.0.0", "default" + client=langfuse_client, + api_client=None, + public_key="pk", + flush_at=10, + flush_interval=0.1, + max_retries=3, + threads=1, + max_task_queue_size=100, + sdk_name="test-sdk", + sdk_version="1.0.0", + sdk_integration="default", ) - consumer = tm._consumers[0] + consumer = tm._ingestion_consumers[0] # Item size within limit MAX_MSG_SIZE = 100 small_item = {"body": {"input": "small"}} assert ( - consumer._truncate_item_in_place(item=small_item, max_size=MAX_MSG_SIZE) + consumer._truncate_item_in_place(event=small_item, max_size=MAX_MSG_SIZE) <= MAX_MSG_SIZE ) assert small_item["body"]["input"] == "small" # unchanged @@ -457,7 +550,7 @@ def test_truncate_item_in_place(httpserver): # Item size exceeding limit large_item = {"body": {"input": "a" * (MAX_MSG_SIZE + 10)}} truncated_size = consumer._truncate_item_in_place( - item=large_item, max_size=MAX_MSG_SIZE + event=large_item, max_size=MAX_MSG_SIZE ) assert truncated_size <= MAX_MSG_SIZE @@ -466,7 +559,7 @@ def test_truncate_item_in_place(httpserver): # Logs message if item is truncated large_item = {"body": {"input": "a" * (MAX_MSG_SIZE + 10)}} truncated_size = consumer._truncate_item_in_place( - item=large_item, max_size=MAX_MSG_SIZE, log_message="truncated" + event=large_item, max_size=MAX_MSG_SIZE, log_message="truncated" ) assert truncated_size <= MAX_MSG_SIZE @@ -481,7 +574,7 @@ def test_truncate_item_in_place(httpserver): } } truncated_size = consumer._truncate_item_in_place( - item=full_item, max_size=MAX_MSG_SIZE + event=full_item, max_size=MAX_MSG_SIZE ) assert truncated_size <= MAX_MSG_SIZE @@ -497,7 +590,7 @@ def test_truncate_item_in_place(httpserver): "metadata": "c" * 10, } } - consumer._truncate_item_in_place(item=input_largest, max_size=MAX_MSG_SIZE) + consumer._truncate_item_in_place(event=input_largest, max_size=MAX_MSG_SIZE) assert input_largest["body"]["input"] is None assert input_largest["body"]["output"] is not None assert input_largest["body"]["metadata"] is not None @@ -510,7 +603,7 @@ def test_truncate_item_in_place(httpserver): "metadata": "c" * 20, } } - consumer._truncate_item_in_place(item=mixed_size, max_size=MAX_MSG_SIZE) + consumer._truncate_item_in_place(event=mixed_size, max_size=MAX_MSG_SIZE) assert mixed_size["body"]["input"] is not None assert mixed_size["body"]["output"] is None assert mixed_size["body"]["metadata"] is not None @@ -523,14 +616,14 @@ def test_truncate_item_in_place(httpserver): "metadata": "c" * 50, } } - consumer._truncate_item_in_place(item=very_large, max_size=MAX_MSG_SIZE) + consumer._truncate_item_in_place(event=very_large, max_size=MAX_MSG_SIZE) assert very_large["body"]["input"] is None assert very_large["body"]["output"] is None assert very_large["body"]["metadata"] is not None # Return value assert isinstance( - consumer._truncate_item_in_place(item=small_item, max_size=MAX_MSG_SIZE), int + consumer._truncate_item_in_place(event=small_item, max_size=MAX_MSG_SIZE), int ) # JSON serialization @@ -540,7 +633,7 @@ def test_truncate_item_in_place(httpserver): } } assert ( - consumer._truncate_item_in_place(item=complex_item, max_size=MAX_MSG_SIZE) + consumer._truncate_item_in_place(event=complex_item, max_size=MAX_MSG_SIZE) <= MAX_MSG_SIZE ) assert complex_item["body"]["input"] is None diff --git a/tests/utils.py b/tests/utils.py index 6c16dd962..583770d3c 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -1,3 +1,4 @@ +import base64 import os import typing from uuid import uuid4 @@ -7,18 +8,17 @@ except ImportError: import pydantic # type: ignore -from langfuse.api.client import FernLangfuse - from llama_index.core import ( Settings, - VectorStoreIndex, SimpleDirectoryReader, - load_index_from_storage, StorageContext, + VectorStoreIndex, + load_index_from_storage, ) - from llama_index.core.callbacks import CallbackManager +from langfuse.api.client import FernLangfuse + def create_uuid(): return str(uuid4()) @@ -106,3 +106,8 @@ def get_llama_index_index(callback, force_rebuild: bool = False): index = load_index_from_storage(storage_context) return index + + +def encode_file_to_base64(image_path) -> str: + with open(image_path, "rb") as file: + return base64.b64encode(file.read()).decode("utf-8")