From 8f1c1b91853f318276a0014a6424c680ebe58091 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fede=20Fern=C3=A1ndez?= <720923+fedefernandez@users.noreply.github.com> Date: Thu, 9 Nov 2023 13:14:00 +0100 Subject: [PATCH] OpenAI gen client (#529) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Raúl Raja Martínez --- openai-client/.gitignore | 1 + openai-client/.openapi-generator-ignore | 41 + openai-client/.openapi-generator/FILES | 167 +++ openai-client/.openapi-generator/VERSION | 1 + openai-client/build.gradle.kts | 161 +++ openai-client/generator/README.md | 25 + .../libraries/multiplatform/api.mustache | 135 ++ openai-client/generator/openai-config.json | 41 + .../src/commonMain/kotlin/FormBuilderOps.kt | 28 + .../functional/openai/apis/AssistantApi.kt | 70 ++ .../functional/openai/apis/AssistantsApi.kt | 1102 +++++++++++++++++ .../xebia/functional/openai/apis/AudioApi.kt | 195 +++ .../xebia/functional/openai/apis/ChatApi.kt | 68 + .../functional/openai/apis/CompletionsApi.kt | 68 + .../xebia/functional/openai/apis/EditsApi.kt | 68 + .../functional/openai/apis/EmbeddingsApi.kt | 68 + .../xebia/functional/openai/apis/FilesApi.kt | 204 +++ .../functional/openai/apis/FineTunesApi.kt | 194 +++ .../functional/openai/apis/FineTuningApi.kt | 205 +++ .../xebia/functional/openai/apis/ImagesApi.kt | 219 ++++ .../xebia/functional/openai/apis/ModelsApi.kt | 124 ++ .../functional/openai/apis/ModerationsApi.kt | 68 + .../functional/openai/auth/ApiKeyAuth.kt | 16 + .../functional/openai/auth/Authentication.kt | 12 + .../functional/openai/auth/HttpBasicAuth.kt | 17 + .../functional/openai/auth/HttpBearerAuth.kt | 14 + .../com/xebia/functional/openai/auth/OAuth.kt | 10 + .../openai/infrastructure/ApiAbstractions.kt | 32 + .../openai/infrastructure/ApiClient.kt | 214 ++++ .../openai/infrastructure/Base64ByteArray.kt | 34 + .../functional/openai/infrastructure/Bytes.kt | 115 ++ .../openai/infrastructure/HttpResponse.kt | 67 + .../openai/infrastructure/OctetByteArray.kt | 33 + .../openai/infrastructure/PartConfig.kt | 10 + .../openai/infrastructure/RequestConfig.kt | 15 + .../openai/infrastructure/RequestMethod.kt | 12 + .../openai/models/AssistantFileObject.kt | 46 + .../openai/models/AssistantObject.kt | 82 ++ .../models/AssistantObjectToolsInner.kt | 36 + .../openai/models/AssistantToolsCode.kt | 30 + .../openai/models/AssistantToolsFunction.kt | 34 + .../models/AssistantToolsFunctionFunction.kt | 31 + .../openai/models/AssistantToolsRetrieval.kt | 30 + .../ChatCompletionFunctionCallOption.kt | 24 + .../openai/models/ChatCompletionFunctions.kt | 30 + .../models/ChatCompletionMessageToolCall.kt | 38 + .../ChatCompletionMessageToolCallChunk.kt | 40 + ...tCompletionMessageToolCallChunkFunction.kt | 28 + .../ChatCompletionMessageToolCallFunction.kt | 30 + .../models/ChatCompletionNamedToolChoice.kt | 36 + .../ChatCompletionNamedToolChoiceFunction.kt | 19 + .../ChatCompletionRequestAssistantMessage.kt | 45 + ...tionRequestAssistantMessageFunctionCall.kt | 32 + .../ChatCompletionRequestFunctionMessage.kt | 41 + .../models/ChatCompletionRequestMessage.kt | 53 + .../ChatCompletionRequestSystemMessage.kt | 36 + .../ChatCompletionRequestToolMessage.kt | 40 + .../ChatCompletionRequestUserMessage.kt | 36 + .../models/ChatCompletionResponseMessage.kt | 47 + .../openai/models/ChatCompletionRole.kt | 46 + .../ChatCompletionStreamResponseDelta.kt | 48 + ...mpletionStreamResponseDeltaFunctionCall.kt | 32 + .../openai/models/ChatCompletionTool.kt | 34 + .../models/ChatCompletionToolChoiceOption.kt | 40 + .../models/ChatCompletionToolFunction.kt | 29 + .../openai/models/CompletionUsage.kt | 31 + .../models/CreateAssistantFileRequest.kt | 23 + .../openai/models/CreateAssistantRequest.kt | 51 + .../models/CreateChatCompletionRequest.kt | 121 ++ ...CreateChatCompletionRequestFunctionCall.kt | 28 + ...eateChatCompletionRequestResponseFormat.kt | 42 + .../models/CreateChatCompletionResponse.kt | 60 + ...reateChatCompletionResponseChoicesInner.kt | 52 + .../CreateChatCompletionStreamResponse.kt | 61 + ...hatCompletionStreamResponseChoicesInner.kt | 52 + .../openai/models/CreateCompletionRequest.kt | 126 ++ .../openai/models/CreateCompletionResponse.kt | 61 + .../CreateCompletionResponseChoicesInner.kt | 50 + ...eCompletionResponseChoicesInnerLogprobs.kt | 28 + .../openai/models/CreateEditRequest.kt | 46 + .../openai/models/CreateEditResponse.kt | 45 + .../models/CreateEditResponseChoicesInner.kt | 49 + .../openai/models/CreateEmbeddingRequest.kt | 49 + .../openai/models/CreateEmbeddingResponse.kt | 42 + .../models/CreateEmbeddingResponseUsage.kt | 27 + .../openai/models/CreateFineTuneRequest.kt | 104 ++ .../CreateFineTuneRequestHyperparameters.kt | 24 + .../models/CreateFineTuningJobRequest.kt | 47 + ...eateFineTuningJobRequestHyperparameters.kt | 34 + .../openai/models/CreateImageRequest.kt | 109 ++ .../openai/models/CreateMessageRequest.kt | 50 + .../openai/models/CreateModerationRequest.kt | 26 + .../openai/models/CreateModerationResponse.kt | 33 + .../CreateModerationResponseResultsInner.kt | 29 + ...oderationResponseResultsInnerCategories.kt | 75 ++ ...ationResponseResultsInnerCategoryScores.kt | 63 + .../openai/models/CreateRunRequest.kt | 45 + .../openai/models/CreateSpeechRequest.kt | 70 ++ .../models/CreateThreadAndRunRequest.kt | 47 + .../CreateThreadAndRunRequestToolsInner.kt | 36 + .../openai/models/CreateThreadRequest.kt | 28 + .../models/CreateTranscriptionResponse.kt | 17 + .../models/CreateTranslationResponse.kt | 15 + .../models/DeleteAssistantFileResponse.kt | 34 + .../openai/models/DeleteAssistantResponse.kt | 30 + .../openai/models/DeleteFileResponse.kt | 30 + .../openai/models/DeleteMessageResponse.kt | 31 + .../openai/models/DeleteModelResponse.kt | 23 + .../openai/models/DeleteThreadResponse.kt | 30 + .../functional/openai/models/Embedding.kt | 43 + .../xebia/functional/openai/models/Error.kt | 25 + .../functional/openai/models/ErrorResponse.kt | 14 + .../functional/openai/models/FineTune.kt | 88 ++ .../functional/openai/models/FineTuneEvent.kt | 35 + .../openai/models/FineTuneHyperparams.kt | 56 + .../functional/openai/models/FineTuningJob.kt | 108 ++ .../openai/models/FineTuningJobError.kt | 33 + .../openai/models/FineTuningJobEvent.kt | 44 + .../models/FineTuningJobHyperparameters.kt | 25 + .../openai/models/FunctionObject.kt | 29 + .../xebia/functional/openai/models/Image.kt | 33 + .../openai/models/ImagesResponse.kt | 21 + .../models/ListAssistantFilesResponse.kt | 27 + .../openai/models/ListAssistantsResponse.kt | 27 + .../openai/models/ListFilesResponse.kt | 28 + .../models/ListFineTuneEventsResponse.kt | 28 + .../openai/models/ListFineTunesResponse.kt | 28 + .../models/ListFineTuningJobEventsResponse.kt | 28 + .../openai/models/ListMessageFilesResponse.kt | 27 + .../openai/models/ListMessagesResponse.kt | 27 + .../openai/models/ListModelsResponse.kt | 28 + .../ListPaginatedFineTuningJobsResponse.kt | 30 + .../openai/models/ListRunStepsResponse.kt | 27 + .../openai/models/ListRunsResponse.kt | 27 + .../openai/models/ListThreadsResponse.kt | 27 + .../models/MessageContentImageFileObject.kt | 36 + .../MessageContentImageFileObjectImageFile.kt | 19 + ...ontentTextAnnotationsFileCitationObject.kt | 50 + ...notationsFileCitationObjectFileCitation.kt | 25 + ...ageContentTextAnnotationsFilePathObject.kt | 47 + ...ntTextAnnotationsFilePathObjectFilePath.kt | 19 + .../openai/models/MessageContentTextObject.kt | 36 + .../models/MessageContentTextObjectText.kt | 25 + ...geContentTextObjectTextAnnotationsInner.kt | 49 + .../openai/models/MessageFileObject.kt | 47 + .../functional/openai/models/MessageObject.kt | 90 ++ .../models/MessageObjectContentInner.kt | 37 + .../xebia/functional/openai/models/Model.kt | 46 + .../openai/models/ModifyAssistantRequest.kt | 52 + .../openai/models/ModifyMessageRequest.kt | 23 + .../openai/models/ModifyRunRequest.kt | 23 + .../openai/models/ModifyThreadRequest.kt | 23 + .../functional/openai/models/OpenAIFile.kt | 95 ++ .../functional/openai/models/RunObject.kt | 127 ++ .../openai/models/RunObjectLastError.kt | 39 + .../openai/models/RunObjectRequiredAction.kt | 38 + ...unObjectRequiredActionSubmitToolOutputs.kt | 25 + .../RunStepDetailsMessageCreationObject.kt | 38 + ...ilsMessageCreationObjectMessageCreation.kt | 19 + .../RunStepDetailsToolCallsCodeObject.kt | 44 + ...tailsToolCallsCodeObjectCodeInterpreter.kt | 31 + ...lsCodeObjectCodeInterpreterOutputsInner.kt | 43 + ...epDetailsToolCallsCodeOutputImageObject.kt | 36 + ...ailsToolCallsCodeOutputImageObjectImage.kt | 19 + ...tepDetailsToolCallsCodeOutputLogsObject.kt | 38 + .../RunStepDetailsToolCallsFunctionObject.kt | 41 + ...pDetailsToolCallsFunctionObjectFunction.kt | 32 + .../models/RunStepDetailsToolCallsObject.kt | 41 + ...tepDetailsToolCallsObjectToolCallsInner.kt | 52 + .../RunStepDetailsToolCallsRetrievalObject.kt | 41 + .../functional/openai/models/RunStepObject.kt | 115 ++ .../openai/models/RunStepObjectLastError.kt | 39 + .../openai/models/RunStepObjectStepDetails.kt | 46 + .../openai/models/RunToolCallObject.kt | 41 + .../models/RunToolCallObjectFunction.kt | 27 + .../models/SubmitToolOutputsRunRequest.kt | 21 + ...itToolOutputsRunRequestToolOutputsInner.kt | 26 + .../functional/openai/models/ThreadObject.kt | 48 + ...ChatCompletionRequestUserMessageContent.kt | 23 + ...ompletionRequestUserMessageContentImage.kt | 18 + ...letionRequestUserMessageContentImageUrl.kt | 27 + ...CompletionRequestUserMessageContentText.kt | 16 + .../CreateChatCompletionRequestModel.kt | 19 + .../create/CreateChatCompletionRequestStop.kt | 13 + .../create/CreateCompletionRequestModel.kt | 18 + .../create/CreateCompletionRequestPrompt.kt | 22 + .../create/CreateCompletionRequestStop.kt | 16 + .../ext/edit/create/CreateEditRequestModel.kt | 10 + .../create/CreateEmbeddingRequestInput.kt | 22 + .../create/CreateEmbeddingRequestModel.kt | 9 + ...teFineTuneRequestHyperparametersNEpochs.kt | 25 + .../create/CreateFineTuneRequestModel.kt | 12 + .../FineTuningJobHyperparametersNEpochs.kt | 23 + ...eTuningJobRequestHyperparametersNEpochs.kt | 24 + ...uningJobRequestHyperparametersBatchSize.kt | 24 + ...stHyperparametersLearningRateMultiplier.kt | 25 + ...eTuningJobRequestHyperparametersNEpochs.kt | 24 + .../create/CreateFineTuningJobRequestModel.kt | 11 + .../image/create/CreateImageRequestModel.kt | 10 + .../create/CreateImageEditRequestModel.kt | 9 + .../create/CreateModerationRequestInput.kt | 14 + .../create/CreateModerationRequestModel.kt | 10 + .../speech/create/CreateSpeechRequestModel.kt | 10 + .../create/CreateTranscriptionRequestModel.kt | 13 + settings.gradle.kts | 3 + 205 files changed, 10193 insertions(+) create mode 100644 openai-client/.gitignore create mode 100644 openai-client/.openapi-generator-ignore create mode 100644 openai-client/.openapi-generator/FILES create mode 100644 openai-client/.openapi-generator/VERSION create mode 100644 openai-client/build.gradle.kts create mode 100644 openai-client/generator/README.md create mode 100644 openai-client/generator/libraries/multiplatform/api.mustache create mode 100644 openai-client/generator/openai-config.json create mode 100644 openai-client/src/commonMain/kotlin/FormBuilderOps.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/AssistantApi.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/AssistantsApi.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/AudioApi.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/ChatApi.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/CompletionsApi.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/EditsApi.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/EmbeddingsApi.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/FilesApi.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/FineTunesApi.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/FineTuningApi.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/ImagesApi.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/ModelsApi.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/ModerationsApi.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/auth/ApiKeyAuth.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/auth/Authentication.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/auth/HttpBasicAuth.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/auth/HttpBearerAuth.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/auth/OAuth.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/ApiAbstractions.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/ApiClient.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/Base64ByteArray.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/Bytes.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/HttpResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/OctetByteArray.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/PartConfig.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/RequestConfig.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/RequestMethod.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantFileObject.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantObject.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantObjectToolsInner.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantToolsCode.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantToolsFunction.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantToolsFunctionFunction.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantToolsRetrieval.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionFunctionCallOption.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionFunctions.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionMessageToolCall.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionMessageToolCallChunk.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionMessageToolCallChunkFunction.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionMessageToolCallFunction.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionNamedToolChoice.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionNamedToolChoiceFunction.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestAssistantMessage.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestAssistantMessageFunctionCall.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestFunctionMessage.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestMessage.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestSystemMessage.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestToolMessage.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestUserMessage.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionResponseMessage.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRole.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionStreamResponseDelta.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionStreamResponseDeltaFunctionCall.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionTool.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionToolChoiceOption.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionToolFunction.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CompletionUsage.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateAssistantFileRequest.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateAssistantRequest.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequest.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestFunctionCall.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestResponseFormat.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionResponseChoicesInner.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionStreamResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionStreamResponseChoicesInner.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionRequest.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionResponseChoicesInner.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionResponseChoicesInnerLogprobs.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEditRequest.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEditResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEditResponseChoicesInner.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingRequest.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingResponseUsage.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuneRequest.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuneRequestHyperparameters.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuningJobRequest.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuningJobRequestHyperparameters.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateImageRequest.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateMessageRequest.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationRequest.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponseResultsInner.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponseResultsInnerCategories.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponseResultsInnerCategoryScores.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateRunRequest.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateSpeechRequest.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateThreadAndRunRequest.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateThreadAndRunRequestToolsInner.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateThreadRequest.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateTranscriptionResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateTranslationResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteAssistantFileResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteAssistantResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteFileResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteMessageResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteModelResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteThreadResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/Embedding.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/Error.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ErrorResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTune.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuneEvent.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuneHyperparams.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuningJob.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuningJobError.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuningJobEvent.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuningJobHyperparameters.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FunctionObject.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/Image.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ImagesResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListAssistantFilesResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListAssistantsResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListFilesResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListFineTuneEventsResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListFineTunesResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListFineTuningJobEventsResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListMessageFilesResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListMessagesResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListModelsResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListPaginatedFineTuningJobsResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListRunStepsResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListRunsResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListThreadsResponse.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentImageFileObject.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentImageFileObjectImageFile.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextAnnotationsFileCitationObject.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextAnnotationsFileCitationObjectFileCitation.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextAnnotationsFilePathObject.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextAnnotationsFilePathObjectFilePath.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextObject.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextObjectText.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextObjectTextAnnotationsInner.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageFileObject.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageObject.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageObjectContentInner.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/Model.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyAssistantRequest.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyMessageRequest.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyRunRequest.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyThreadRequest.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/OpenAIFile.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunObject.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunObjectLastError.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunObjectRequiredAction.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunObjectRequiredActionSubmitToolOutputs.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsMessageCreationObject.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsMessageCreationObjectMessageCreation.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeObject.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeObjectCodeInterpreter.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputsInner.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeOutputImageObject.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeOutputImageObjectImage.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeOutputLogsObject.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsFunctionObject.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsFunctionObjectFunction.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsObject.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsObjectToolCallsInner.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsRetrievalObject.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepObject.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepObjectLastError.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepObjectStepDetails.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunToolCallObject.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunToolCallObjectFunction.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/SubmitToolOutputsRunRequest.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/SubmitToolOutputsRunRequestToolOutputsInner.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ThreadObject.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/chat/ChatCompletionRequestUserMessageContent.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/chat/ChatCompletionRequestUserMessageContentImage.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/chat/ChatCompletionRequestUserMessageContentImageUrl.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/chat/ChatCompletionRequestUserMessageContentText.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/chat/create/CreateChatCompletionRequestModel.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/chat/create/CreateChatCompletionRequestStop.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/completion/create/CreateCompletionRequestModel.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/completion/create/CreateCompletionRequestPrompt.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/completion/create/CreateCompletionRequestStop.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/edit/create/CreateEditRequestModel.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/embedding/create/CreateEmbeddingRequestInput.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/embedding/create/CreateEmbeddingRequestModel.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/create/CreateFineTuneRequestHyperparametersNEpochs.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/create/CreateFineTuneRequestModel.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/job/FineTuningJobHyperparametersNEpochs.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/job/FineTuningJobRequestHyperparametersNEpochs.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/job/create/CreateFineTuningJobRequestHyperparametersBatchSize.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/job/create/CreateFineTuningJobRequestHyperparametersLearningRateMultiplier.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/job/create/CreateFineTuningJobRequestHyperparametersNEpochs.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/job/create/CreateFineTuningJobRequestModel.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/image/create/CreateImageRequestModel.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/image/edit/create/CreateImageEditRequestModel.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/moderation/create/CreateModerationRequestInput.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/moderation/create/CreateModerationRequestModel.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/speech/create/CreateSpeechRequestModel.kt create mode 100644 openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/transcription/create/CreateTranscriptionRequestModel.kt diff --git a/openai-client/.gitignore b/openai-client/.gitignore new file mode 100644 index 000000000..a3b1444d9 --- /dev/null +++ b/openai-client/.gitignore @@ -0,0 +1 @@ +generator/openai-api.yml \ No newline at end of file diff --git a/openai-client/.openapi-generator-ignore b/openai-client/.openapi-generator-ignore new file mode 100644 index 000000000..e91d7dcb0 --- /dev/null +++ b/openai-client/.openapi-generator-ignore @@ -0,0 +1,41 @@ +# OpenAPI Generator Ignore +# Generated by openapi-generator https://github.com/openapitools/openapi-generator + +# Use this file to prevent files from being overwritten by the generator. +# The patterns follow closely to .gitignore or .dockerignore. + +# As an example, the C# client generator defines ApiClient.cs. +# You can make changes and tell OpenAPI Generator to ignore just this file by uncommenting the following line: +#ApiClient.cs + +# You can match any string of characters against a directory, file or extension with a single asterisk (*): +#foo/*/qux +# The above matches foo/bar/qux and foo/baz/qux, but not foo/bar/baz/qux + +# You can recursively match patterns against a directory, file or extension with a double asterisk (**): +#foo/**/qux +# This matches foo/bar/qux, foo/baz/qux, and foo/bar/baz/qux + +# You can also negate patterns with an exclamation (!). +# For example, you can ignore all files in a docs folder with the file extension .md: +#docs/*.md +# Then explicitly reverse the ignore rule for a single file: +#!docs/README.md + +.gitignore +build.gradle.kts +settings.gradle.kts +gradle/** +gradlew +gradlew.bat +README.md +docs/** +src/*Test/** + +# Unused models +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionFunctionResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionFunctionResponseChoicesInner.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestMessageContentPart.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestMessageContentPartText.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestMessageContentPartImage.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestMessageContentPartImageImageUrl.kt \ No newline at end of file diff --git a/openai-client/.openapi-generator/FILES b/openai-client/.openapi-generator/FILES new file mode 100644 index 000000000..10b9f2eb7 --- /dev/null +++ b/openai-client/.openapi-generator/FILES @@ -0,0 +1,167 @@ +src/commonMain/kotlin/com/xebia/functional/openai/apis/AssistantApi.kt +src/commonMain/kotlin/com/xebia/functional/openai/apis/AssistantsApi.kt +src/commonMain/kotlin/com/xebia/functional/openai/apis/AudioApi.kt +src/commonMain/kotlin/com/xebia/functional/openai/apis/ChatApi.kt +src/commonMain/kotlin/com/xebia/functional/openai/apis/CompletionsApi.kt +src/commonMain/kotlin/com/xebia/functional/openai/apis/EditsApi.kt +src/commonMain/kotlin/com/xebia/functional/openai/apis/EmbeddingsApi.kt +src/commonMain/kotlin/com/xebia/functional/openai/apis/FilesApi.kt +src/commonMain/kotlin/com/xebia/functional/openai/apis/FineTunesApi.kt +src/commonMain/kotlin/com/xebia/functional/openai/apis/FineTuningApi.kt +src/commonMain/kotlin/com/xebia/functional/openai/apis/ImagesApi.kt +src/commonMain/kotlin/com/xebia/functional/openai/apis/ModelsApi.kt +src/commonMain/kotlin/com/xebia/functional/openai/apis/ModerationsApi.kt +src/commonMain/kotlin/com/xebia/functional/openai/auth/ApiKeyAuth.kt +src/commonMain/kotlin/com/xebia/functional/openai/auth/Authentication.kt +src/commonMain/kotlin/com/xebia/functional/openai/auth/HttpBasicAuth.kt +src/commonMain/kotlin/com/xebia/functional/openai/auth/HttpBearerAuth.kt +src/commonMain/kotlin/com/xebia/functional/openai/auth/OAuth.kt +src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/ApiAbstractions.kt +src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/ApiClient.kt +src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/Base64ByteArray.kt +src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/Bytes.kt +src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/HttpResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/OctetByteArray.kt +src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/PartConfig.kt +src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/RequestConfig.kt +src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/RequestMethod.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantFileObject.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantObject.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantObjectToolsInner.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantToolsCode.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantToolsFunction.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantToolsRetrieval.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionFunctionCallOption.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionFunctions.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionMessageToolCall.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionMessageToolCallChunk.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionMessageToolCallChunkFunction.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionMessageToolCallFunction.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionNamedToolChoice.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionNamedToolChoiceFunction.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestAssistantMessage.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestAssistantMessageFunctionCall.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestFunctionMessage.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestMessage.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestSystemMessage.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestToolMessage.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestUserMessage.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionResponseMessage.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRole.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionStreamResponseDelta.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionStreamResponseDeltaFunctionCall.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionTool.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionToolChoiceOption.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CompletionUsage.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateAssistantFileRequest.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateAssistantRequest.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequest.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestFunctionCall.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestResponseFormat.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionResponseChoicesInner.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionStreamResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionStreamResponseChoicesInner.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionRequest.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionResponseChoicesInner.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionResponseChoicesInnerLogprobs.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEditRequest.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEditResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEditResponseChoicesInner.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingRequest.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingResponseUsage.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuneRequest.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuneRequestHyperparameters.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuningJobRequest.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuningJobRequestHyperparameters.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateImageRequest.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateMessageRequest.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationRequest.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponseResultsInner.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponseResultsInnerCategories.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponseResultsInnerCategoryScores.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateRunRequest.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateSpeechRequest.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateThreadAndRunRequest.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateThreadAndRunRequestToolsInner.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateThreadRequest.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateTranscriptionResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateTranslationResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteAssistantFileResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteAssistantResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteFileResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteMessageResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteModelResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteThreadResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/Embedding.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/Error.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ErrorResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/FineTune.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuneEvent.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuneHyperparams.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuningJob.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuningJobError.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuningJobEvent.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuningJobHyperparameters.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/FunctionObject.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/Image.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ImagesResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ListAssistantFilesResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ListAssistantsResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ListFilesResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ListFineTuneEventsResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ListFineTunesResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ListFineTuningJobEventsResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ListMessageFilesResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ListMessagesResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ListModelsResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ListPaginatedFineTuningJobsResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ListRunStepsResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ListRunsResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ListThreadsResponse.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentImageFileObject.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentImageFileObjectImageFile.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextAnnotationsFileCitationObject.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextAnnotationsFileCitationObjectFileCitation.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextAnnotationsFilePathObject.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextAnnotationsFilePathObjectFilePath.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextObject.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextObjectText.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextObjectTextAnnotationsInner.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/MessageFileObject.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/MessageObject.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/MessageObjectContentInner.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/Model.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyAssistantRequest.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyMessageRequest.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyRunRequest.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyThreadRequest.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/OpenAIFile.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/RunObject.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/RunObjectLastError.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/RunObjectRequiredAction.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/RunObjectRequiredActionSubmitToolOutputs.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsMessageCreationObject.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsMessageCreationObjectMessageCreation.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeObject.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeObjectCodeInterpreter.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputsInner.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeOutputImageObject.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeOutputImageObjectImage.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeOutputLogsObject.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsFunctionObject.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsFunctionObjectFunction.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsObject.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsObjectToolCallsInner.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsRetrievalObject.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepObject.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepObjectLastError.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepObjectStepDetails.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/RunToolCallObject.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/RunToolCallObjectFunction.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/SubmitToolOutputsRunRequest.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/SubmitToolOutputsRunRequestToolOutputsInner.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ThreadObject.kt diff --git a/openai-client/.openapi-generator/VERSION b/openai-client/.openapi-generator/VERSION new file mode 100644 index 000000000..73a86b197 --- /dev/null +++ b/openai-client/.openapi-generator/VERSION @@ -0,0 +1 @@ +7.0.1 \ No newline at end of file diff --git a/openai-client/build.gradle.kts b/openai-client/build.gradle.kts new file mode 100644 index 000000000..b154a551d --- /dev/null +++ b/openai-client/build.gradle.kts @@ -0,0 +1,161 @@ +@file:Suppress("DSL_SCOPE_VIOLATION") + +import org.jetbrains.dokka.gradle.DokkaTask + +repositories { mavenCentral() } + +plugins { + base + alias(libs.plugins.kotlin.multiplatform) + alias(libs.plugins.kotest.multiplatform) + alias(libs.plugins.kotlinx.serialization) + alias(libs.plugins.spotless) + alias(libs.plugins.dokka) + alias(libs.plugins.arrow.gradle.publish) + alias(libs.plugins.semver.gradle) + alias(libs.plugins.detekt) +} + +dependencies { detektPlugins(project(":detekt-rules")) } + +java { + sourceCompatibility = JavaVersion.VERSION_11 + targetCompatibility = JavaVersion.VERSION_11 + toolchain { languageVersion = JavaLanguageVersion.of(11) } +} + +detekt { + toolVersion = "1.23.1" + source = files("src/commonMain/kotlin", "src/jvmMain/kotlin") + config.setFrom("../config/detekt/detekt.yml") + autoCorrect = true +} + +kotlin { + jvm { + compilations { + val integrationTest by compilations.creating { + // Create a test task to run the tests produced by this compilation: + tasks.register("integrationTest") { + description = "Run the integration tests" + group = "verification" + classpath = compileDependencyFiles + runtimeDependencyFiles + output.allOutputs + testClassesDirs = output.classesDirs + testLogging { events("passed") } + } + } + val test by compilations.getting + integrationTest.associateWith(test) + } + } + // JavaScript not supported due to reserved words +// js(IR) { +// browser() +// nodejs() +// } + linuxX64() + macosX64() + macosArm64() + mingwX64() + sourceSets { + all { + languageSettings.optIn("kotlin.ExperimentalStdlibApi") + } + val commonMain by getting { + dependencies { + implementation(libs.ktor.client.content.negotiation) + implementation(libs.ktor.client.serialization) + implementation(libs.klogging) + } + } + val commonTest by getting { + dependencies { + implementation(libs.kotest.property) + implementation(libs.kotest.framework) + implementation(libs.kotest.assertions) + } + } + val jvmMain by getting { + dependencies { + implementation(libs.logback) + api(libs.ktor.client.cio) + } + } + val jvmTest by getting { dependencies { implementation(libs.kotest.junit5) } } +// val jsMain by getting { dependencies { api(libs.ktor.client.js) } } + val linuxX64Main by getting { dependencies { api(libs.ktor.client.cio) } } + val macosX64Main by getting { dependencies { api(libs.ktor.client.cio) } } + val macosArm64Main by getting { dependencies { api(libs.ktor.client.cio) } } + val mingwX64Main by getting { dependencies { api(libs.ktor.client.winhttp) } } + val linuxX64Test by getting + val macosX64Test by getting + val macosArm64Test by getting + val mingwX64Test by getting + create("nativeMain") { + dependsOn(commonMain) + linuxX64Main.dependsOn(this) + macosX64Main.dependsOn(this) + macosArm64Main.dependsOn(this) + mingwX64Main.dependsOn(this) + } + create("nativeTest") { + dependsOn(commonTest) + linuxX64Test.dependsOn(this) + macosX64Test.dependsOn(this) + macosArm64Test.dependsOn(this) + mingwX64Test.dependsOn(this) + } + } +} + +spotless { + kotlin { + target("**/*.kt") + ktfmt().googleStyle() + } +} + +tasks { + withType().configureEach { + dependsOn(":detekt-rules:assemble") + autoCorrect = true + } + named("detektJvmMain") { + dependsOn(":detekt-rules:assemble") + getByName("build").dependsOn(this) + } + named("detekt") { + dependsOn(":detekt-rules:assemble") + getByName("build").dependsOn(this) + } + withType().configureEach { + maxParallelForks = Runtime.getRuntime().availableProcessors() + useJUnitPlatform() + testLogging { + setExceptionFormat("full") + setEvents(listOf("passed", "skipped", "failed", "standardOut", "standardError")) + } + } + withType().configureEach { + kotlin.sourceSets.forEach { kotlinSourceSet -> + dokkaSourceSets.named(kotlinSourceSet.name) { + perPackageOption { + matchingRegex.set(".*\\.internal.*") + suppress.set(true) + } + skipDeprecated.set(true) + reportUndocumented.set(false) + val baseUrl = checkNotNull(project.properties["pom.smc.url"]?.toString()) + kotlinSourceSet.kotlin.srcDirs.filter { it.exists() }.forEach { srcDir -> + sourceLink { + localDirectory.set(srcDir) + remoteUrl.set(uri("$baseUrl/blob/main/${srcDir.relativeTo(rootProject.rootDir)}").toURL()) + remoteLineSuffix.set("#L") + } + } + } + } + } +} + +tasks.withType { dependsOn(tasks.withType()) } diff --git a/openai-client/generator/README.md b/openai-client/generator/README.md new file mode 100644 index 000000000..9cfc316a3 --- /dev/null +++ b/openai-client/generator/README.md @@ -0,0 +1,25 @@ +# OpenAI Client + +Autogenerated client for OpenAI using the OpenAPI specification and [OpenAPI Generator](https://openapi-generator.tech/) + +## How to run + +1. Enter into module's root path + +2. Download openai-api specification + +```shell +curl -o generator/openai-api.yaml https://raw.githubusercontent.com/openai/openai-openapi/main/openapi.yaml +``` + +3. Run the openapi generator CLI from module's root with the following parameters: + +```shell +openapi-generator generate -i generator/openai-api.yaml -g kotlin -o . --skip-validate-spec -c generator/openai-config.json +``` + +4. Run the spotLess task from project's root + +```shell +./gradlew spotlessApply +``` \ No newline at end of file diff --git a/openai-client/generator/libraries/multiplatform/api.mustache b/openai-client/generator/libraries/multiplatform/api.mustache new file mode 100644 index 000000000..d49d92041 --- /dev/null +++ b/openai-client/generator/libraries/multiplatform/api.mustache @@ -0,0 +1,135 @@ +{{>licenseInfo}} +package {{apiPackage}} + +{{#imports}}import {{import}} +{{/imports}} + +import {{packageName}}.infrastructure.* +import io.ktor.client.HttpClient +import io.ktor.client.HttpClientConfig +import io.ktor.client.request.forms.formData +import io.ktor.client.engine.HttpClientEngine +import kotlinx.serialization.json.Json +import io.ktor.http.ParametersBuilder +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +{{#operations}} +{{#nonPublicApi}}internal {{/nonPublicApi}}open class {{classname}} : ApiClient { + + constructor( + baseUrl: String = ApiClient.BASE_URL, + httpClientEngine: HttpClientEngine? = null, + httpClientConfig: ((HttpClientConfig<*>) -> Unit)? = null, + jsonSerializer: Json = ApiClient.JSON_DEFAULT + ) : super(baseUrl = baseUrl, httpClientEngine = httpClientEngine, httpClientConfig = httpClientConfig, jsonBlock = jsonSerializer) + + constructor( + baseUrl: String, + httpClient: HttpClient + ): super(baseUrl = baseUrl, httpClient = httpClient) + + {{#operation}} + {{#allParams}} + {{#isEnum}} + + /** + * enum for parameter {{paramName}} + */ + @Serializable + {{#nonPublicApi}}internal {{/nonPublicApi}}enum class {{enumName}}{{operationIdCamelCase}}(val value: {{^isContainer}}{{dataType}}{{/isContainer}}{{#isContainer}}kotlin.String{{/isContainer}}) { + {{^enumUnknownDefaultCase}} + {{#allowableValues}}{{#enumVars}} + @SerialName(value = {{^isString}}"{{/isString}}{{{value}}}{{^isString}}"{{/isString}}) + {{&name}}({{{value}}}){{^-last}},{{/-last}} + {{/enumVars}}{{/allowableValues}} + {{/enumUnknownDefaultCase}} + {{#enumUnknownDefaultCase}} + {{#allowableValues}}{{#enumVars}}{{^-last}} + @SerialName(value = {{^isString}}"{{/isString}}{{{value}}}{{^isString}}"{{/isString}}) + {{&name}}({{{value}}}), + {{/-last}}{{/enumVars}}{{/allowableValues}} + {{/enumUnknownDefaultCase}} + } + + {{/isEnum}} + {{/allParams}} + /** + * {{summary}} + * {{notes}} + {{#allParams}} * @param {{{paramName}}} {{description}}{{^required}} (optional{{#defaultValue}}, default to {{{.}}}{{/defaultValue}}){{/required}}{{#required}}{{#defaultValue}} (default to {{{.}}}){{/defaultValue}}{{/required}} + {{/allParams}} * @return {{{returnType}}}{{^returnType}}void{{/returnType}} + */ + {{#returnType}} + @Suppress("UNCHECKED_CAST") + {{/returnType}} + open suspend fun {{operationId}}({{#allParams}}{{{paramName}}}: {{#isEnum}}{{#isContainer}}kotlin.collections.List<{{enumName}}{{operationIdCamelCase}}>{{/isContainer}}{{^isContainer}}{{enumName}}{{operationIdCamelCase}}{{/isContainer}}{{/isEnum}}{{^isEnum}}{{{dataType}}}{{/isEnum}}{{#required}}{{#defaultValue}} = {{^isNumber}}{{#isEnum}}{{enumName}}{{operationIdCamelCase}}.{{enumDefaultValue}}{{/isEnum}}{{^isEnum}}{{{defaultValue}}}{{/isEnum}}{{/isNumber}}{{#isNumber}}{{{defaultValue}}}.toDouble(){{/isNumber}}{{/defaultValue}}{{/required}}{{^required}}?{{#defaultValue}} = {{^isNumber}}{{#isEnum}}{{enumName}}{{operationIdCamelCase}}.{{enumDefaultValue}}{{/isEnum}}{{^isEnum}}{{{defaultValue}}}{{/isEnum}}{{/isNumber}}{{#isNumber}}{{{defaultValue}}}.toDouble(){{/isNumber}}{{/defaultValue}}{{^defaultValue}} = null{{/defaultValue}}{{/required}}{{^-last}}, {{/-last}}{{/allParams}}): HttpResponse<{{{returnType}}}{{^returnType}}Unit{{/returnType}}> { + + val localVariableAuthNames = listOf({{#authMethods}}"{{name}}"{{^-last}}, {{/-last}}{{/authMethods}}) + + val localVariableBody = {{#hasBodyParam}}{{#bodyParam}}{{#isArray}}{{operationIdCamelCase}}Request({{{paramName}}}{{^isList}}.asList(){{/isList}}){{/isArray}}{{^isArray}}{{#isMap}}{{operationIdCamelCase}}Request({{{paramName}}}){{/isMap}}{{^isMap}}{{{paramName}}}{{/isMap}}{{/isArray}}{{/bodyParam}}{{/hasBodyParam}} + {{^hasBodyParam}} + {{#hasFormParams}} + {{#isMultipart}} + formData { + {{#formParams}} + {{#isArray}} + {{{paramName}}}?.onEach { + appendGen("{{{baseName}}}[]", it) + } + {{/isArray}} + {{^isArray}} + {{{paramName}}}?.apply { appendGen("{{{baseName}}}", {{{paramName}}}) } + {{/isArray}} + {{/formParams}} + } + {{/isMultipart}} + {{^isMultipart}} + ParametersBuilder().also { + {{#formParams}} + {{{paramName}}}?.apply { it.appendGen("{{{baseName}}}", {{{paramName}}}.toString()) } + {{/formParams}} + }.build() + {{/isMultipart}} + {{/hasFormParams}} + {{^hasFormParams}} + io.ktor.client.utils.EmptyContent + {{/hasFormParams}} + {{/hasBodyParam}} + + val localVariableQuery = mutableMapOf>(){{#queryParams}} + {{{paramName}}}?.apply { localVariableQuery["{{baseName}}"] = {{#isContainer}}toMultiValue(this, "{{collectionFormat}}"){{/isContainer}}{{^isContainer}}listOf("${{{paramName}}}"){{/isContainer}} }{{/queryParams}} + val localVariableHeaders = mutableMapOf(){{#headerParams}} + {{{paramName}}}?.apply { localVariableHeaders["{{baseName}}"] = {{#isContainer}}this.joinToString(separator = collectionDelimiter("{{collectionFormat}}")){{/isContainer}}{{^isContainer}}this.toString(){{/isContainer}} }{{/headerParams}} + + val localVariableConfig = RequestConfig( + RequestMethod.{{httpMethod}}, + "{{path}}"{{#pathParams}}.replace("{" + "{{baseName}}" + "}", {{#isContainer}}{{paramName}}.joinToString(","){{/isContainer}}{{^isContainer}}"${{{paramName}}}"{{/isContainer}}){{/pathParams}}, + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = {{#hasAuthMethods}}true{{/hasAuthMethods}}{{^hasAuthMethods}}false{{/hasAuthMethods}}, + ) + + return {{#hasBodyParam}}jsonRequest{{/hasBodyParam}}{{^hasBodyParam}}{{#hasFormParams}}{{#isMultipart}}multipartFormRequest{{/isMultipart}}{{^isMultipart}}urlEncodedFormRequest{{/isMultipart}}{{/hasFormParams}}{{^hasFormParams}}request{{/hasFormParams}}{{/hasBodyParam}}( + localVariableConfig, + localVariableBody, + localVariableAuthNames + ).{{#isArray}}wrap<{{operationIdCamelCase}}Response>().map { value{{^isList}}.toTypedArray(){{/isList}} }{{/isArray}}{{^isArray}}{{#isMap}}wrap<{{operationIdCamelCase}}Response>().map { value }{{/isMap}}{{^isMap}}wrap(){{/isMap}}{{/isArray}} + } + +{{#hasBodyParam}} +{{#bodyParam}} +{{#isArray}}{{>serial_wrapper_request_list}}{{/isArray}}{{#isMap}}{{>serial_wrapper_request_map}}{{/isMap}} +{{/bodyParam}} +{{/hasBodyParam}} +{{#isArray}} +{{>serial_wrapper_response_list}} +{{/isArray}} +{{#isMap}} +{{>serial_wrapper_response_map}} +{{/isMap}} + + {{/operation}} +} +{{/operations}} diff --git a/openai-client/generator/openai-config.json b/openai-client/generator/openai-config.json new file mode 100644 index 000000000..78e593da7 --- /dev/null +++ b/openai-client/generator/openai-config.json @@ -0,0 +1,41 @@ +{ + "additionalProperties": { + "library": "multiplatform", + "groupId": "com.xebia.functional", + "artifactId": "openai-client", + "packageName": "com.xebia.functional.openai" + }, + "schemaMappings": { + "FunctionParameters": "kotlinx.serialization.json.JsonObject", + "ChatCompletionRequestUserMessage_content": "com.xebia.functional.openai.models.ext.chat.ChatCompletionRequestUserMessageContent", + "CreateChatCompletionRequest_model": "com.xebia.functional.openai.models.ext.chat.create.CreateChatCompletionRequestModel", + "CreateChatCompletionRequest_stop": "com.xebia.functional.openai.models.ext.chat.create.CreateChatCompletionRequestStop", + "CreateCompletionRequest_model": "com.xebia.functional.openai.models.ext.completion.create.CreateCompletionRequestModel", + "CreateCompletionRequest_prompt": "com.xebia.functional.openai.models.ext.completion.create.CreateCompletionRequestPrompt", + "CreateCompletionRequest_stop": "com.xebia.functional.openai.models.ext.completion.create.CreateCompletionRequestStop", + "CreateEditRequest_model": "com.xebia.functional.openai.models.ext.edit.create.CreateEditRequestModel", + "CreateEmbeddingRequest_input": "com.xebia.functional.openai.models.ext.embedding.create.CreateEmbeddingRequestInput", + "CreateEmbeddingRequest_model": "com.xebia.functional.openai.models.ext.embedding.create.CreateEmbeddingRequestModel", + "CreateFineTuneRequest_model": "com.xebia.functional.openai.models.ext.finetune.create.CreateFineTuneRequestModel", + "CreateFineTuneRequest_hyperparameters_n_epochs": "com.xebia.functional.openai.models.ext.finetune.create.CreateFineTuneRequestHyperparametersNEpochs", + "CreateFineTuningJobRequest_hyperparameters_batch_size": "com.xebia.functional.openai.models.ext.finetune.job.create.CreateFineTuningJobRequestHyperparametersBatchSize", + "CreateFineTuningJobRequest_hyperparameters_learning_rate_multiplier": "com.xebia.functional.openai.models.ext.finetune.job.create.CreateFineTuningJobRequestHyperparametersLearningRateMultiplier", + "CreateFineTuningJobRequest_hyperparameters_n_epochs": "com.xebia.functional.openai.models.ext.finetune.job.create.CreateFineTuningJobRequestHyperparametersNEpochs", + "CreateFineTuningJobRequest_model": "com.xebia.functional.openai.models.ext.finetune.job.create.CreateFineTuningJobRequestModel", + "CreateImageEditRequest_model": "com.xebia.functional.openai.models.ext.image.edit.create.CreateImageEditRequestModel", + "CreateImageRequest_model": "com.xebia.functional.openai.models.ext.image.create.CreateImageRequestModel", + "CreateModerationRequest_input": "com.xebia.functional.openai.models.ext.moderation.create.CreateModerationRequestInput", + "CreateModerationRequest_model": "com.xebia.functional.openai.models.ext.moderation.create.CreateModerationRequestModel", + "CreateSpeechRequest_model": "com.xebia.functional.openai.models.ext.speech.create.CreateSpeechRequestModel", + "CreateTranscriptionRequest_model": "com.xebia.functional.openai.models.ext.transcription.create.CreateTranscriptionRequestModel", + "FineTuningJobRequest_hyperparameters_n_epochs": "com.xebia.functional.openai.models.ext.finetune.job.FineTuningJobRequestHyperparametersNEpochs", + "FineTuningJob_hyperparameters_n_epochs": "com.xebia.functional.openai.models.ext.finetune.job.FineTuningJobHyperparametersNEpochs" + }, + "templateDir": "generator", + "files": { + "api.mustache": { + "templateType": "API", + "destinationFilename": ".kt" + } + } +} diff --git a/openai-client/src/commonMain/kotlin/FormBuilderOps.kt b/openai-client/src/commonMain/kotlin/FormBuilderOps.kt new file mode 100644 index 000000000..e34abb989 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/FormBuilderOps.kt @@ -0,0 +1,28 @@ +package com.xebia.functional.openai.apis + +import io.ktor.client.request.forms.* +import io.ktor.http.* +import io.ktor.util.* +import io.ktor.utils.io.core.* +import kotlinx.serialization.ExperimentalSerializationApi +import kotlinx.serialization.InternalSerializationApi +import kotlinx.serialization.serializerOrNull + +@OptIn(InternalAPI::class) +fun FormBuilder.appendGen(key: String, value: T, headers: Headers = Headers.Empty): Unit { + when (value) { + is String -> append(key, value, headers) + is Number -> append(key, value, headers) + is Boolean -> append(key, value, headers) + is ByteArray -> append(key, value, headers) + is ByteReadPacket -> append(key, value, headers) + is InputProvider -> append(key, value, headers) + is ChannelProvider -> append(key, value, headers) + is Enum<*> -> append(key, serialNameOrEnumValue(value), headers) + else -> append(key, value, headers) + } +} + +@OptIn(InternalSerializationApi::class, ExperimentalSerializationApi::class) +fun > serialNameOrEnumValue(v: Enum): String = + v::class.serializerOrNull()?.descriptor?.getElementName(v.ordinal) ?: v.name diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/AssistantApi.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/AssistantApi.kt new file mode 100644 index 000000000..5392b3435 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/AssistantApi.kt @@ -0,0 +1,70 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.apis + +import com.xebia.functional.openai.infrastructure.* +import com.xebia.functional.openai.models.AssistantObject +import com.xebia.functional.openai.models.ModifyAssistantRequest +import io.ktor.client.HttpClient +import io.ktor.client.HttpClientConfig +import io.ktor.client.engine.HttpClientEngine +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* +import kotlinx.serialization.json.Json + +open class AssistantApi : ApiClient { + + constructor( + baseUrl: String = ApiClient.BASE_URL, + httpClientEngine: HttpClientEngine? = null, + httpClientConfig: ((HttpClientConfig<*>) -> Unit)? = null, + jsonSerializer: Json = ApiClient.JSON_DEFAULT + ) : super( + baseUrl = baseUrl, + httpClientEngine = httpClientEngine, + httpClientConfig = httpClientConfig, + jsonBlock = jsonSerializer + ) + + constructor( + baseUrl: String, + httpClient: HttpClient + ) : super(baseUrl = baseUrl, httpClient = httpClient) + + /** + * Modifies an assistant. + * + * @param assistantId The ID of the assistant to modify. + * @param modifyAssistantRequest + * @return AssistantObject + */ + @Suppress("UNCHECKED_CAST") + open suspend fun modifyAssistant( + assistantId: kotlin.String, + modifyAssistantRequest: ModifyAssistantRequest + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = modifyAssistantRequest + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/assistants/{assistant_id}".replace("{" + "assistant_id" + "}", "$assistantId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return jsonRequest(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/AssistantsApi.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/AssistantsApi.kt new file mode 100644 index 000000000..2026e272a --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/AssistantsApi.kt @@ -0,0 +1,1102 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.apis + +import com.xebia.functional.openai.infrastructure.* +import com.xebia.functional.openai.models.AssistantFileObject +import com.xebia.functional.openai.models.AssistantObject +import com.xebia.functional.openai.models.CreateAssistantFileRequest +import com.xebia.functional.openai.models.CreateAssistantRequest +import com.xebia.functional.openai.models.CreateMessageRequest +import com.xebia.functional.openai.models.CreateRunRequest +import com.xebia.functional.openai.models.CreateThreadAndRunRequest +import com.xebia.functional.openai.models.CreateThreadRequest +import com.xebia.functional.openai.models.DeleteAssistantFileResponse +import com.xebia.functional.openai.models.DeleteAssistantResponse +import com.xebia.functional.openai.models.DeleteThreadResponse +import com.xebia.functional.openai.models.ListAssistantFilesResponse +import com.xebia.functional.openai.models.ListAssistantsResponse +import com.xebia.functional.openai.models.ListMessageFilesResponse +import com.xebia.functional.openai.models.ListMessagesResponse +import com.xebia.functional.openai.models.ListRunStepsResponse +import com.xebia.functional.openai.models.ListRunsResponse +import com.xebia.functional.openai.models.MessageFileObject +import com.xebia.functional.openai.models.MessageObject +import com.xebia.functional.openai.models.ModifyMessageRequest +import com.xebia.functional.openai.models.ModifyRunRequest +import com.xebia.functional.openai.models.ModifyThreadRequest +import com.xebia.functional.openai.models.RunObject +import com.xebia.functional.openai.models.RunStepObject +import com.xebia.functional.openai.models.SubmitToolOutputsRunRequest +import com.xebia.functional.openai.models.ThreadObject +import io.ktor.client.HttpClient +import io.ktor.client.HttpClientConfig +import io.ktor.client.engine.HttpClientEngine +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* +import kotlinx.serialization.json.Json + +open class AssistantsApi : ApiClient { + + constructor( + baseUrl: String = ApiClient.BASE_URL, + httpClientEngine: HttpClientEngine? = null, + httpClientConfig: ((HttpClientConfig<*>) -> Unit)? = null, + jsonSerializer: Json = ApiClient.JSON_DEFAULT + ) : super( + baseUrl = baseUrl, + httpClientEngine = httpClientEngine, + httpClientConfig = httpClientConfig, + jsonBlock = jsonSerializer + ) + + constructor( + baseUrl: String, + httpClient: HttpClient + ) : super(baseUrl = baseUrl, httpClient = httpClient) + + /** + * Cancels a run that is `in_progress`. + * + * @param threadId The ID of the thread to which this run belongs. + * @param runId The ID of the run to cancel. + * @return RunObject + */ + @Suppress("UNCHECKED_CAST") + open suspend fun cancelRun( + threadId: kotlin.String, + runId: kotlin.String + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/threads/{thread_id}/runs/{run_id}/cancel" + .replace("{" + "thread_id" + "}", "$threadId") + .replace("{" + "run_id" + "}", "$runId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Create an assistant with a model and instructions. + * + * @param createAssistantRequest + * @return AssistantObject + */ + @Suppress("UNCHECKED_CAST") + open suspend fun createAssistant( + createAssistantRequest: CreateAssistantRequest + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = createAssistantRequest + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/assistants", + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return jsonRequest(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Create an assistant file by attaching a [File](/docs/api-reference/files) to an + * [assistant](/docs/api-reference/assistants). + * + * @param assistantId The ID of the assistant for which to create a File. + * @param createAssistantFileRequest + * @return AssistantFileObject + */ + @Suppress("UNCHECKED_CAST") + open suspend fun createAssistantFile( + assistantId: kotlin.String, + createAssistantFileRequest: CreateAssistantFileRequest + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = createAssistantFileRequest + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/assistants/{assistant_id}/files".replace("{" + "assistant_id" + "}", "$assistantId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return jsonRequest(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Create a message. + * + * @param threadId The ID of the [thread](/docs/api-reference/threads) to create a message for. + * @param createMessageRequest + * @return MessageObject + */ + @Suppress("UNCHECKED_CAST") + open suspend fun createMessage( + threadId: kotlin.String, + createMessageRequest: CreateMessageRequest + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = createMessageRequest + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/threads/{thread_id}/messages".replace("{" + "thread_id" + "}", "$threadId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return jsonRequest(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Create a run. + * + * @param threadId The ID of the thread to run. + * @param createRunRequest + * @return RunObject + */ + @Suppress("UNCHECKED_CAST") + open suspend fun createRun( + threadId: kotlin.String, + createRunRequest: CreateRunRequest + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = createRunRequest + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/threads/{thread_id}/runs".replace("{" + "thread_id" + "}", "$threadId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return jsonRequest(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Create a thread. + * + * @param createThreadRequest (optional) + * @return ThreadObject + */ + @Suppress("UNCHECKED_CAST") + open suspend fun createThread( + createThreadRequest: CreateThreadRequest? = null + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = createThreadRequest + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/threads", + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return jsonRequest(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Create a thread and run it in one request. + * + * @param createThreadAndRunRequest + * @return RunObject + */ + @Suppress("UNCHECKED_CAST") + open suspend fun createThreadAndRun( + createThreadAndRunRequest: CreateThreadAndRunRequest + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = createThreadAndRunRequest + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/threads/runs", + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return jsonRequest(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Delete an assistant. + * + * @param assistantId The ID of the assistant to delete. + * @return DeleteAssistantResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun deleteAssistant( + assistantId: kotlin.String + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.DELETE, + "/assistants/{assistant_id}".replace("{" + "assistant_id" + "}", "$assistantId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Delete an assistant file. + * + * @param assistantId The ID of the assistant that the file belongs to. + * @param fileId The ID of the file to delete. + * @return DeleteAssistantFileResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun deleteAssistantFile( + assistantId: kotlin.String, + fileId: kotlin.String + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.DELETE, + "/assistants/{assistant_id}/files/{file_id}" + .replace("{" + "assistant_id" + "}", "$assistantId") + .replace("{" + "file_id" + "}", "$fileId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Delete a thread. + * + * @param threadId The ID of the thread to delete. + * @return DeleteThreadResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun deleteThread(threadId: kotlin.String): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.DELETE, + "/threads/{thread_id}".replace("{" + "thread_id" + "}", "$threadId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Retrieves an assistant. + * + * @param assistantId The ID of the assistant to retrieve. + * @return AssistantObject + */ + @Suppress("UNCHECKED_CAST") + open suspend fun getAssistant(assistantId: kotlin.String): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.GET, + "/assistants/{assistant_id}".replace("{" + "assistant_id" + "}", "$assistantId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Retrieves an AssistantFile. + * + * @param assistantId The ID of the assistant who the file belongs to. + * @param fileId The ID of the file we're getting. + * @return AssistantFileObject + */ + @Suppress("UNCHECKED_CAST") + open suspend fun getAssistantFile( + assistantId: kotlin.String, + fileId: kotlin.String + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.GET, + "/assistants/{assistant_id}/files/{file_id}" + .replace("{" + "assistant_id" + "}", "$assistantId") + .replace("{" + "file_id" + "}", "$fileId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Retrieve a message. + * + * @param threadId The ID of the [thread](/docs/api-reference/threads) to which this message + * belongs. + * @param messageId The ID of the message to retrieve. + * @return MessageObject + */ + @Suppress("UNCHECKED_CAST") + open suspend fun getMessage( + threadId: kotlin.String, + messageId: kotlin.String + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.GET, + "/threads/{thread_id}/messages/{message_id}" + .replace("{" + "thread_id" + "}", "$threadId") + .replace("{" + "message_id" + "}", "$messageId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Retrieves a message file. + * + * @param threadId The ID of the thread to which the message and File belong. + * @param messageId The ID of the message the file belongs to. + * @param fileId The ID of the file being retrieved. + * @return MessageFileObject + */ + @Suppress("UNCHECKED_CAST") + open suspend fun getMessageFile( + threadId: kotlin.String, + messageId: kotlin.String, + fileId: kotlin.String + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.GET, + "/threads/{thread_id}/messages/{message_id}/files/{file_id}" + .replace("{" + "thread_id" + "}", "$threadId") + .replace("{" + "message_id" + "}", "$messageId") + .replace("{" + "file_id" + "}", "$fileId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Retrieves a run. + * + * @param threadId The ID of the [thread](/docs/api-reference/threads) that was run. + * @param runId The ID of the run to retrieve. + * @return RunObject + */ + @Suppress("UNCHECKED_CAST") + open suspend fun getRun(threadId: kotlin.String, runId: kotlin.String): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.GET, + "/threads/{thread_id}/runs/{run_id}" + .replace("{" + "thread_id" + "}", "$threadId") + .replace("{" + "run_id" + "}", "$runId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Retrieves a run step. + * + * @param threadId The ID of the thread to which the run and run step belongs. + * @param runId The ID of the run to which the run step belongs. + * @param stepId The ID of the run step to retrieve. + * @return RunStepObject + */ + @Suppress("UNCHECKED_CAST") + open suspend fun getRunStep( + threadId: kotlin.String, + runId: kotlin.String, + stepId: kotlin.String + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.GET, + "/threads/{thread_id}/runs/{run_id}/steps/{step_id}" + .replace("{" + "thread_id" + "}", "$threadId") + .replace("{" + "run_id" + "}", "$runId") + .replace("{" + "step_id" + "}", "$stepId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Retrieves a thread. + * + * @param threadId The ID of the thread to retrieve. + * @return ThreadObject + */ + @Suppress("UNCHECKED_CAST") + open suspend fun getThread(threadId: kotlin.String): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.GET, + "/threads/{thread_id}".replace("{" + "thread_id" + "}", "$threadId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** enum for parameter order */ + @Serializable + enum class OrderListAssistantFiles(val value: kotlin.String) { + + @SerialName(value = "asc") asc("asc"), + @SerialName(value = "desc") desc("desc") + } + + /** + * Returns a list of assistant files. + * + * @param assistantId The ID of the assistant the file belongs to. + * @param limit A limit on the number of objects to be returned. Limit can range between 1 and + * 100, and the default is 20. (optional, default to 20) + * @param order Sort order by the `created_at` timestamp of the objects. `asc` + * for ascending order and `desc` for descending order. (optional, default to desc) + * @param after A cursor for use in pagination. `after` is an object ID that defines + * your place in the list. For instance, if you make a list request and receive 100 objects, + * ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch + * the next page of the list. (optional) + * @param before A cursor for use in pagination. `before` is an object ID that defines + * your place in the list. For instance, if you make a list request and receive 100 objects, + * ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch + * the previous page of the list. (optional) + * @return ListAssistantFilesResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun listAssistantFiles( + assistantId: kotlin.String, + limit: kotlin.Int? = 20, + order: OrderListAssistantFiles? = OrderListAssistantFiles.desc, + after: kotlin.String? = null, + before: kotlin.String? = null + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + limit?.apply { localVariableQuery["limit"] = listOf("$limit") } + order?.apply { localVariableQuery["order"] = listOf("$order") } + after?.apply { localVariableQuery["after"] = listOf("$after") } + before?.apply { localVariableQuery["before"] = listOf("$before") } + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.GET, + "/assistants/{assistant_id}/files".replace("{" + "assistant_id" + "}", "$assistantId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** enum for parameter order */ + @Serializable + enum class OrderListAssistants(val value: kotlin.String) { + + @SerialName(value = "asc") asc("asc"), + @SerialName(value = "desc") desc("desc") + } + + /** + * Returns a list of assistants. + * + * @param limit A limit on the number of objects to be returned. Limit can range between 1 and + * 100, and the default is 20. (optional, default to 20) + * @param order Sort order by the `created_at` timestamp of the objects. `asc` + * for ascending order and `desc` for descending order. (optional, default to desc) + * @param after A cursor for use in pagination. `after` is an object ID that defines + * your place in the list. For instance, if you make a list request and receive 100 objects, + * ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch + * the next page of the list. (optional) + * @param before A cursor for use in pagination. `before` is an object ID that defines + * your place in the list. For instance, if you make a list request and receive 100 objects, + * ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch + * the previous page of the list. (optional) + * @return ListAssistantsResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun listAssistants( + limit: kotlin.Int? = 20, + order: OrderListAssistants? = OrderListAssistants.desc, + after: kotlin.String? = null, + before: kotlin.String? = null + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + limit?.apply { localVariableQuery["limit"] = listOf("$limit") } + order?.apply { localVariableQuery["order"] = listOf("$order") } + after?.apply { localVariableQuery["after"] = listOf("$after") } + before?.apply { localVariableQuery["before"] = listOf("$before") } + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.GET, + "/assistants", + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** enum for parameter order */ + @Serializable + enum class OrderListMessageFiles(val value: kotlin.String) { + + @SerialName(value = "asc") asc("asc"), + @SerialName(value = "desc") desc("desc") + } + + /** + * Returns a list of message files. + * + * @param threadId The ID of the thread that the message and files belong to. + * @param messageId The ID of the message that the files belongs to. + * @param limit A limit on the number of objects to be returned. Limit can range between 1 and + * 100, and the default is 20. (optional, default to 20) + * @param order Sort order by the `created_at` timestamp of the objects. `asc` + * for ascending order and `desc` for descending order. (optional, default to desc) + * @param after A cursor for use in pagination. `after` is an object ID that defines + * your place in the list. For instance, if you make a list request and receive 100 objects, + * ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch + * the next page of the list. (optional) + * @param before A cursor for use in pagination. `before` is an object ID that defines + * your place in the list. For instance, if you make a list request and receive 100 objects, + * ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch + * the previous page of the list. (optional) + * @return ListMessageFilesResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun listMessageFiles( + threadId: kotlin.String, + messageId: kotlin.String, + limit: kotlin.Int? = 20, + order: OrderListMessageFiles? = OrderListMessageFiles.desc, + after: kotlin.String? = null, + before: kotlin.String? = null + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + limit?.apply { localVariableQuery["limit"] = listOf("$limit") } + order?.apply { localVariableQuery["order"] = listOf("$order") } + after?.apply { localVariableQuery["after"] = listOf("$after") } + before?.apply { localVariableQuery["before"] = listOf("$before") } + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.GET, + "/threads/{thread_id}/messages/{message_id}/files" + .replace("{" + "thread_id" + "}", "$threadId") + .replace("{" + "message_id" + "}", "$messageId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** enum for parameter order */ + @Serializable + enum class OrderListMessages(val value: kotlin.String) { + + @SerialName(value = "asc") asc("asc"), + @SerialName(value = "desc") desc("desc") + } + + /** + * Returns a list of messages for a given thread. + * + * @param threadId The ID of the [thread](/docs/api-reference/threads) the messages belong to. + * @param limit A limit on the number of objects to be returned. Limit can range between 1 and + * 100, and the default is 20. (optional, default to 20) + * @param order Sort order by the `created_at` timestamp of the objects. `asc` + * for ascending order and `desc` for descending order. (optional, default to desc) + * @param after A cursor for use in pagination. `after` is an object ID that defines + * your place in the list. For instance, if you make a list request and receive 100 objects, + * ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch + * the next page of the list. (optional) + * @param before A cursor for use in pagination. `before` is an object ID that defines + * your place in the list. For instance, if you make a list request and receive 100 objects, + * ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch + * the previous page of the list. (optional) + * @return ListMessagesResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun listMessages( + threadId: kotlin.String, + limit: kotlin.Int? = 20, + order: OrderListMessages? = OrderListMessages.desc, + after: kotlin.String? = null, + before: kotlin.String? = null + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + limit?.apply { localVariableQuery["limit"] = listOf("$limit") } + order?.apply { localVariableQuery["order"] = listOf("$order") } + after?.apply { localVariableQuery["after"] = listOf("$after") } + before?.apply { localVariableQuery["before"] = listOf("$before") } + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.GET, + "/threads/{thread_id}/messages".replace("{" + "thread_id" + "}", "$threadId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** enum for parameter order */ + @Serializable + enum class OrderListRunSteps(val value: kotlin.String) { + + @SerialName(value = "asc") asc("asc"), + @SerialName(value = "desc") desc("desc") + } + + /** + * Returns a list of run steps belonging to a run. + * + * @param threadId The ID of the thread the run and run steps belong to. + * @param runId The ID of the run the run steps belong to. + * @param limit A limit on the number of objects to be returned. Limit can range between 1 and + * 100, and the default is 20. (optional, default to 20) + * @param order Sort order by the `created_at` timestamp of the objects. `asc` + * for ascending order and `desc` for descending order. (optional, default to desc) + * @param after A cursor for use in pagination. `after` is an object ID that defines + * your place in the list. For instance, if you make a list request and receive 100 objects, + * ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch + * the next page of the list. (optional) + * @param before A cursor for use in pagination. `before` is an object ID that defines + * your place in the list. For instance, if you make a list request and receive 100 objects, + * ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch + * the previous page of the list. (optional) + * @return ListRunStepsResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun listRunSteps( + threadId: kotlin.String, + runId: kotlin.String, + limit: kotlin.Int? = 20, + order: OrderListRunSteps? = OrderListRunSteps.desc, + after: kotlin.String? = null, + before: kotlin.String? = null + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + limit?.apply { localVariableQuery["limit"] = listOf("$limit") } + order?.apply { localVariableQuery["order"] = listOf("$order") } + after?.apply { localVariableQuery["after"] = listOf("$after") } + before?.apply { localVariableQuery["before"] = listOf("$before") } + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.GET, + "/threads/{thread_id}/runs/{run_id}/steps" + .replace("{" + "thread_id" + "}", "$threadId") + .replace("{" + "run_id" + "}", "$runId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** enum for parameter order */ + @Serializable + enum class OrderListRuns(val value: kotlin.String) { + + @SerialName(value = "asc") asc("asc"), + @SerialName(value = "desc") desc("desc") + } + + /** + * Returns a list of runs belonging to a thread. + * + * @param threadId The ID of the thread the run belongs to. + * @param limit A limit on the number of objects to be returned. Limit can range between 1 and + * 100, and the default is 20. (optional, default to 20) + * @param order Sort order by the `created_at` timestamp of the objects. `asc` + * for ascending order and `desc` for descending order. (optional, default to desc) + * @param after A cursor for use in pagination. `after` is an object ID that defines + * your place in the list. For instance, if you make a list request and receive 100 objects, + * ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch + * the next page of the list. (optional) + * @param before A cursor for use in pagination. `before` is an object ID that defines + * your place in the list. For instance, if you make a list request and receive 100 objects, + * ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch + * the previous page of the list. (optional) + * @return ListRunsResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun listRuns( + threadId: kotlin.String, + limit: kotlin.Int? = 20, + order: OrderListRuns? = OrderListRuns.desc, + after: kotlin.String? = null, + before: kotlin.String? = null + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + limit?.apply { localVariableQuery["limit"] = listOf("$limit") } + order?.apply { localVariableQuery["order"] = listOf("$order") } + after?.apply { localVariableQuery["after"] = listOf("$after") } + before?.apply { localVariableQuery["before"] = listOf("$before") } + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.GET, + "/threads/{thread_id}/runs".replace("{" + "thread_id" + "}", "$threadId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Modifies a message. + * + * @param threadId The ID of the thread to which this message belongs. + * @param messageId The ID of the message to modify. + * @param modifyMessageRequest + * @return MessageObject + */ + @Suppress("UNCHECKED_CAST") + open suspend fun modifyMessage( + threadId: kotlin.String, + messageId: kotlin.String, + modifyMessageRequest: ModifyMessageRequest + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = modifyMessageRequest + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/threads/{thread_id}/messages/{message_id}" + .replace("{" + "thread_id" + "}", "$threadId") + .replace("{" + "message_id" + "}", "$messageId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return jsonRequest(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Modifies a run. + * + * @param threadId The ID of the [thread](/docs/api-reference/threads) that was run. + * @param runId The ID of the run to modify. + * @param modifyRunRequest + * @return RunObject + */ + @Suppress("UNCHECKED_CAST") + open suspend fun modifyRun( + threadId: kotlin.String, + runId: kotlin.String, + modifyRunRequest: ModifyRunRequest + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = modifyRunRequest + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/threads/{thread_id}/runs/{run_id}" + .replace("{" + "thread_id" + "}", "$threadId") + .replace("{" + "run_id" + "}", "$runId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return jsonRequest(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Modifies a thread. + * + * @param threadId The ID of the thread to modify. Only the `metadata` can be modified. + * @param modifyThreadRequest + * @return ThreadObject + */ + @Suppress("UNCHECKED_CAST") + open suspend fun modifyThread( + threadId: kotlin.String, + modifyThreadRequest: ModifyThreadRequest + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = modifyThreadRequest + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/threads/{thread_id}".replace("{" + "thread_id" + "}", "$threadId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return jsonRequest(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * When a run has the `status: \"requires_action\"` and + * `required_action.type` is `submit_tool_outputs`, this endpoint can be used + * to submit the outputs from the tool calls once they're all completed. All outputs must be + * submitted in a single request. + * + * @param threadId The ID of the [thread](/docs/api-reference/threads) to which this run belongs. + * @param runId The ID of the run that requires the tool output submission. + * @param submitToolOutputsRunRequest + * @return RunObject + */ + @Suppress("UNCHECKED_CAST") + open suspend fun submitToolOuputsToRun( + threadId: kotlin.String, + runId: kotlin.String, + submitToolOutputsRunRequest: SubmitToolOutputsRunRequest + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = submitToolOutputsRunRequest + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/threads/{thread_id}/runs/{run_id}/submit_tool_outputs" + .replace("{" + "thread_id" + "}", "$threadId") + .replace("{" + "run_id" + "}", "$runId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return jsonRequest(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/AudioApi.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/AudioApi.kt new file mode 100644 index 000000000..613c49304 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/AudioApi.kt @@ -0,0 +1,195 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.apis + +import com.xebia.functional.openai.infrastructure.* +import com.xebia.functional.openai.models.CreateSpeechRequest +import com.xebia.functional.openai.models.CreateTranscriptionResponse +import com.xebia.functional.openai.models.CreateTranslationResponse +import io.ktor.client.HttpClient +import io.ktor.client.HttpClientConfig +import io.ktor.client.engine.HttpClientEngine +import io.ktor.client.request.forms.formData +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* +import kotlinx.serialization.json.Json + +open class AudioApi : ApiClient { + + constructor( + baseUrl: String = ApiClient.BASE_URL, + httpClientEngine: HttpClientEngine? = null, + httpClientConfig: ((HttpClientConfig<*>) -> Unit)? = null, + jsonSerializer: Json = ApiClient.JSON_DEFAULT + ) : super( + baseUrl = baseUrl, + httpClientEngine = httpClientEngine, + httpClientConfig = httpClientConfig, + jsonBlock = jsonSerializer + ) + + constructor( + baseUrl: String, + httpClient: HttpClient + ) : super(baseUrl = baseUrl, httpClient = httpClient) + + /** + * Generates audio from the input text. + * + * @param createSpeechRequest + * @return com.xebia.functional.openai.infrastructure.OctetByteArray + */ + @Suppress("UNCHECKED_CAST") + open suspend fun createSpeech( + createSpeechRequest: CreateSpeechRequest + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = createSpeechRequest + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/audio/speech", + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return jsonRequest(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** enum for parameter responseFormat */ + @Serializable + enum class ResponseFormatCreateTranscription(val value: kotlin.String) { + + @SerialName(value = "json") json("json"), + @SerialName(value = "text") text("text"), + @SerialName(value = "srt") srt("srt"), + @SerialName(value = "verbose_json") verboseJson("verbose_json"), + @SerialName(value = "vtt") vtt("vtt") + } + + /** + * Transcribes audio into the input language. + * + * @param file The audio file object (not file name) to transcribe, in one of these formats: flac, + * mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + * @param model + * @param language The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve + * accuracy and latency. (optional) + * @param prompt An optional text to guide the model's style or continue a previous audio + * segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + * (optional) + * @param responseFormat The format of the transcript output, in one of these options: + * `json`, `text`, `srt`, `verbose_json`, or + * `vtt`. (optional, default to json) + * @param temperature The sampling temperature, between 0 and 1. Higher values like 0.8 will make + * the output more random, while lower values like 0.2 will make it more focused and + * deterministic. If set to 0, the model will use + * [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase + * the temperature until certain thresholds are hit. (optional, default to 0) + * @return CreateTranscriptionResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun createTranscription( + file: io.ktor.client.request.forms.InputProvider, + model: + com.xebia.functional.openai.models.ext.transcription.create.CreateTranscriptionRequestModel, + language: kotlin.String? = null, + prompt: kotlin.String? = null, + responseFormat: ResponseFormatCreateTranscription? = ResponseFormatCreateTranscription.json, + temperature: kotlin.Double? = 0.toDouble() + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = formData { + file?.apply { appendGen("file", file) } + model?.apply { appendGen("model", model) } + language?.apply { appendGen("language", language) } + prompt?.apply { appendGen("prompt", prompt) } + responseFormat?.apply { appendGen("response_format", responseFormat) } + temperature?.apply { appendGen("temperature", temperature) } + } + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/audio/transcriptions", + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return multipartFormRequest(localVariableConfig, localVariableBody, localVariableAuthNames) + .wrap() + } + + /** + * Translates audio into English. + * + * @param file The audio file object (not file name) translate, in one of these formats: flac, + * mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + * @param model + * @param prompt An optional text to guide the model's style or continue a previous audio + * segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. (optional) + * @param responseFormat The format of the transcript output, in one of these options: + * `json`, `text`, `srt`, `verbose_json`, or + * `vtt`. (optional, default to "json") + * @param temperature The sampling temperature, between 0 and 1. Higher values like 0.8 will make + * the output more random, while lower values like 0.2 will make it more focused and + * deterministic. If set to 0, the model will use + * [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase + * the temperature until certain thresholds are hit. (optional, default to 0) + * @return CreateTranslationResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun createTranslation( + file: io.ktor.client.request.forms.InputProvider, + model: + com.xebia.functional.openai.models.ext.transcription.create.CreateTranscriptionRequestModel, + prompt: kotlin.String? = null, + responseFormat: kotlin.String? = "json", + temperature: kotlin.Double? = 0.toDouble() + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = formData { + file?.apply { appendGen("file", file) } + model?.apply { appendGen("model", model) } + prompt?.apply { appendGen("prompt", prompt) } + responseFormat?.apply { appendGen("response_format", responseFormat) } + temperature?.apply { appendGen("temperature", temperature) } + } + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/audio/translations", + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return multipartFormRequest(localVariableConfig, localVariableBody, localVariableAuthNames) + .wrap() + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/ChatApi.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/ChatApi.kt new file mode 100644 index 000000000..8dc2b6cf5 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/ChatApi.kt @@ -0,0 +1,68 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.apis + +import com.xebia.functional.openai.infrastructure.* +import com.xebia.functional.openai.models.CreateChatCompletionRequest +import com.xebia.functional.openai.models.CreateChatCompletionResponse +import io.ktor.client.HttpClient +import io.ktor.client.HttpClientConfig +import io.ktor.client.engine.HttpClientEngine +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* +import kotlinx.serialization.json.Json + +open class ChatApi : ApiClient { + + constructor( + baseUrl: String = ApiClient.BASE_URL, + httpClientEngine: HttpClientEngine? = null, + httpClientConfig: ((HttpClientConfig<*>) -> Unit)? = null, + jsonSerializer: Json = ApiClient.JSON_DEFAULT + ) : super( + baseUrl = baseUrl, + httpClientEngine = httpClientEngine, + httpClientConfig = httpClientConfig, + jsonBlock = jsonSerializer + ) + + constructor( + baseUrl: String, + httpClient: HttpClient + ) : super(baseUrl = baseUrl, httpClient = httpClient) + + /** + * Creates a model response for the given chat conversation. + * + * @param createChatCompletionRequest + * @return CreateChatCompletionResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun createChatCompletion( + createChatCompletionRequest: CreateChatCompletionRequest + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = createChatCompletionRequest + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/chat/completions", + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return jsonRequest(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/CompletionsApi.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/CompletionsApi.kt new file mode 100644 index 000000000..804c8d3b1 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/CompletionsApi.kt @@ -0,0 +1,68 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.apis + +import com.xebia.functional.openai.infrastructure.* +import com.xebia.functional.openai.models.CreateCompletionRequest +import com.xebia.functional.openai.models.CreateCompletionResponse +import io.ktor.client.HttpClient +import io.ktor.client.HttpClientConfig +import io.ktor.client.engine.HttpClientEngine +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* +import kotlinx.serialization.json.Json + +open class CompletionsApi : ApiClient { + + constructor( + baseUrl: String = ApiClient.BASE_URL, + httpClientEngine: HttpClientEngine? = null, + httpClientConfig: ((HttpClientConfig<*>) -> Unit)? = null, + jsonSerializer: Json = ApiClient.JSON_DEFAULT + ) : super( + baseUrl = baseUrl, + httpClientEngine = httpClientEngine, + httpClientConfig = httpClientConfig, + jsonBlock = jsonSerializer + ) + + constructor( + baseUrl: String, + httpClient: HttpClient + ) : super(baseUrl = baseUrl, httpClient = httpClient) + + /** + * Creates a completion for the provided prompt and parameters. + * + * @param createCompletionRequest + * @return CreateCompletionResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun createCompletion( + createCompletionRequest: CreateCompletionRequest + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = createCompletionRequest + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/completions", + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return jsonRequest(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/EditsApi.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/EditsApi.kt new file mode 100644 index 000000000..36a03bef3 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/EditsApi.kt @@ -0,0 +1,68 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.apis + +import com.xebia.functional.openai.infrastructure.* +import com.xebia.functional.openai.models.CreateEditRequest +import com.xebia.functional.openai.models.CreateEditResponse +import io.ktor.client.HttpClient +import io.ktor.client.HttpClientConfig +import io.ktor.client.engine.HttpClientEngine +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* +import kotlinx.serialization.json.Json + +open class EditsApi : ApiClient { + + constructor( + baseUrl: String = ApiClient.BASE_URL, + httpClientEngine: HttpClientEngine? = null, + httpClientConfig: ((HttpClientConfig<*>) -> Unit)? = null, + jsonSerializer: Json = ApiClient.JSON_DEFAULT + ) : super( + baseUrl = baseUrl, + httpClientEngine = httpClientEngine, + httpClientConfig = httpClientConfig, + jsonBlock = jsonSerializer + ) + + constructor( + baseUrl: String, + httpClient: HttpClient + ) : super(baseUrl = baseUrl, httpClient = httpClient) + + /** + * Creates a new edit for the provided input, instruction, and parameters. + * + * @param createEditRequest + * @return CreateEditResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun createEdit( + createEditRequest: CreateEditRequest + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = createEditRequest + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/edits", + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return jsonRequest(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/EmbeddingsApi.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/EmbeddingsApi.kt new file mode 100644 index 000000000..452a62443 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/EmbeddingsApi.kt @@ -0,0 +1,68 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.apis + +import com.xebia.functional.openai.infrastructure.* +import com.xebia.functional.openai.models.CreateEmbeddingRequest +import com.xebia.functional.openai.models.CreateEmbeddingResponse +import io.ktor.client.HttpClient +import io.ktor.client.HttpClientConfig +import io.ktor.client.engine.HttpClientEngine +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* +import kotlinx.serialization.json.Json + +open class EmbeddingsApi : ApiClient { + + constructor( + baseUrl: String = ApiClient.BASE_URL, + httpClientEngine: HttpClientEngine? = null, + httpClientConfig: ((HttpClientConfig<*>) -> Unit)? = null, + jsonSerializer: Json = ApiClient.JSON_DEFAULT + ) : super( + baseUrl = baseUrl, + httpClientEngine = httpClientEngine, + httpClientConfig = httpClientConfig, + jsonBlock = jsonSerializer + ) + + constructor( + baseUrl: String, + httpClient: HttpClient + ) : super(baseUrl = baseUrl, httpClient = httpClient) + + /** + * Creates an embedding vector representing the input text. + * + * @param createEmbeddingRequest + * @return CreateEmbeddingResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun createEmbedding( + createEmbeddingRequest: CreateEmbeddingRequest + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = createEmbeddingRequest + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/embeddings", + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return jsonRequest(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/FilesApi.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/FilesApi.kt new file mode 100644 index 000000000..c99c6d2b6 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/FilesApi.kt @@ -0,0 +1,204 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.apis + +import com.xebia.functional.openai.infrastructure.* +import com.xebia.functional.openai.models.DeleteFileResponse +import com.xebia.functional.openai.models.ListFilesResponse +import com.xebia.functional.openai.models.OpenAIFile +import io.ktor.client.HttpClient +import io.ktor.client.HttpClientConfig +import io.ktor.client.engine.HttpClientEngine +import io.ktor.client.request.forms.formData +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* +import kotlinx.serialization.json.Json + +open class FilesApi : ApiClient { + + constructor( + baseUrl: String = ApiClient.BASE_URL, + httpClientEngine: HttpClientEngine? = null, + httpClientConfig: ((HttpClientConfig<*>) -> Unit)? = null, + jsonSerializer: Json = ApiClient.JSON_DEFAULT + ) : super( + baseUrl = baseUrl, + httpClientEngine = httpClientEngine, + httpClientConfig = httpClientConfig, + jsonBlock = jsonSerializer + ) + + constructor( + baseUrl: String, + httpClient: HttpClient + ) : super(baseUrl = baseUrl, httpClient = httpClient) + + /** enum for parameter purpose */ + @Serializable + enum class PurposeCreateFile(val value: kotlin.String) { + + @SerialName(value = "fine-tune") fineMinusTune("fine-tune"), + @SerialName(value = "assistants") assistants("assistants") + } + + /** + * Upload a file that can be used across various endpoints/features. The size of all the files + * uploaded by one organization can be up to 100 GB. The size of individual files for can be a + * maximum of 512MB. See the [Assistants Tools guide](/docs/assistants/tools) to learn more about + * the types of files supported. The Fine-tuning API only supports `.jsonl` files. + * Please [contact us](https://help.openai.com/) if you need to increase these storage limits. + * + * @param file The File object (not file name) to be uploaded. + * @param purpose The intended purpose of the uploaded file. Use \\\"fine-tune\\\" for + * [Fine-tuning](/docs/api-reference/fine-tuning) and \\\"assistants\\\" for + * [Assistants](/docs/api-reference/assistants) and [Messages](/docs/api-reference/messages). + * This allows us to validate the format of the uploaded file is correct for fine-tuning. + * @return OpenAIFile + */ + @Suppress("UNCHECKED_CAST") + open suspend fun createFile( + file: io.ktor.client.request.forms.InputProvider, + purpose: PurposeCreateFile + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = formData { + file?.apply { appendGen("file", file) } + purpose?.apply { appendGen("purpose", purpose) } + } + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/files", + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return multipartFormRequest(localVariableConfig, localVariableBody, localVariableAuthNames) + .wrap() + } + + /** + * Delete a file. + * + * @param fileId The ID of the file to use for this request. + * @return DeleteFileResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun deleteFile(fileId: kotlin.String): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.DELETE, + "/files/{file_id}".replace("{" + "file_id" + "}", "$fileId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Returns the contents of the specified file. + * + * @param fileId The ID of the file to use for this request. + * @return kotlin.String + */ + @Suppress("UNCHECKED_CAST") + open suspend fun downloadFile(fileId: kotlin.String): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.GET, + "/files/{file_id}/content".replace("{" + "file_id" + "}", "$fileId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Returns a list of files that belong to the user's organization. + * + * @param purpose Only return files with the given purpose. (optional) + * @return ListFilesResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun listFiles(purpose: kotlin.String? = null): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + purpose?.apply { localVariableQuery["purpose"] = listOf("$purpose") } + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.GET, + "/files", + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Returns information about a specific file. + * + * @param fileId The ID of the file to use for this request. + * @return OpenAIFile + */ + @Suppress("UNCHECKED_CAST") + open suspend fun retrieveFile(fileId: kotlin.String): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.GET, + "/files/{file_id}".replace("{" + "file_id" + "}", "$fileId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/FineTunesApi.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/FineTunesApi.kt new file mode 100644 index 000000000..82c762ca2 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/FineTunesApi.kt @@ -0,0 +1,194 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.apis + +import com.xebia.functional.openai.infrastructure.* +import com.xebia.functional.openai.models.CreateFineTuneRequest +import com.xebia.functional.openai.models.FineTune +import com.xebia.functional.openai.models.ListFineTuneEventsResponse +import com.xebia.functional.openai.models.ListFineTunesResponse +import io.ktor.client.HttpClient +import io.ktor.client.HttpClientConfig +import io.ktor.client.engine.HttpClientEngine +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* +import kotlinx.serialization.json.Json + +open class FineTunesApi : ApiClient { + + constructor( + baseUrl: String = ApiClient.BASE_URL, + httpClientEngine: HttpClientEngine? = null, + httpClientConfig: ((HttpClientConfig<*>) -> Unit)? = null, + jsonSerializer: Json = ApiClient.JSON_DEFAULT + ) : super( + baseUrl = baseUrl, + httpClientEngine = httpClientEngine, + httpClientConfig = httpClientConfig, + jsonBlock = jsonSerializer + ) + + constructor( + baseUrl: String, + httpClient: HttpClient + ) : super(baseUrl = baseUrl, httpClient = httpClient) + + /** + * Immediately cancel a fine-tune job. + * + * @param fineTuneId The ID of the fine-tune job to cancel + * @return FineTune + */ + @Suppress("UNCHECKED_CAST") + open suspend fun cancelFineTune(fineTuneId: kotlin.String): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/fine-tunes/{fine_tune_id}/cancel".replace("{" + "fine_tune_id" + "}", "$fineTuneId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Creates a job that fine-tunes a specified model from a given dataset. Response includes details + * of the enqueued job including job status and the name of the fine-tuned models once complete. + * [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + * + * @param createFineTuneRequest + * @return FineTune + */ + @Suppress("UNCHECKED_CAST") + open suspend fun createFineTune( + createFineTuneRequest: CreateFineTuneRequest + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = createFineTuneRequest + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/fine-tunes", + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return jsonRequest(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Get fine-grained status updates for a fine-tune job. + * + * @param fineTuneId The ID of the fine-tune job to get events for. + * @param stream Whether to stream events for the fine-tune job. If set to true, events will be + * sent as data-only + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + * as they become available. The stream will terminate with a `data: [DONE]` message + * when the job is finished (succeeded, cancelled, or failed). If set to false, only events + * generated so far will be returned. (optional, default to false) + * @return ListFineTuneEventsResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun listFineTuneEvents( + fineTuneId: kotlin.String, + stream: kotlin.Boolean? = false + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + stream?.apply { localVariableQuery["stream"] = listOf("$stream") } + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.GET, + "/fine-tunes/{fine_tune_id}/events".replace("{" + "fine_tune_id" + "}", "$fineTuneId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * List your organization's fine-tuning jobs + * + * @return ListFineTunesResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun listFineTunes(): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.GET, + "/fine-tunes", + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Gets info about the fine-tune job. + * [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + * + * @param fineTuneId The ID of the fine-tune job + * @return FineTune + */ + @Suppress("UNCHECKED_CAST") + open suspend fun retrieveFineTune(fineTuneId: kotlin.String): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.GET, + "/fine-tunes/{fine_tune_id}".replace("{" + "fine_tune_id" + "}", "$fineTuneId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/FineTuningApi.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/FineTuningApi.kt new file mode 100644 index 000000000..65fe14a1b --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/FineTuningApi.kt @@ -0,0 +1,205 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.apis + +import com.xebia.functional.openai.infrastructure.* +import com.xebia.functional.openai.models.CreateFineTuningJobRequest +import com.xebia.functional.openai.models.FineTuningJob +import com.xebia.functional.openai.models.ListFineTuningJobEventsResponse +import com.xebia.functional.openai.models.ListPaginatedFineTuningJobsResponse +import io.ktor.client.HttpClient +import io.ktor.client.HttpClientConfig +import io.ktor.client.engine.HttpClientEngine +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* +import kotlinx.serialization.json.Json + +open class FineTuningApi : ApiClient { + + constructor( + baseUrl: String = ApiClient.BASE_URL, + httpClientEngine: HttpClientEngine? = null, + httpClientConfig: ((HttpClientConfig<*>) -> Unit)? = null, + jsonSerializer: Json = ApiClient.JSON_DEFAULT + ) : super( + baseUrl = baseUrl, + httpClientEngine = httpClientEngine, + httpClientConfig = httpClientConfig, + jsonBlock = jsonSerializer + ) + + constructor( + baseUrl: String, + httpClient: HttpClient + ) : super(baseUrl = baseUrl, httpClient = httpClient) + + /** + * Immediately cancel a fine-tune job. + * + * @param fineTuningJobId The ID of the fine-tuning job to cancel. + * @return FineTuningJob + */ + @Suppress("UNCHECKED_CAST") + open suspend fun cancelFineTuningJob( + fineTuningJobId: kotlin.String + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/fine_tuning/jobs/{fine_tuning_job_id}/cancel" + .replace("{" + "fine_tuning_job_id" + "}", "$fineTuningJobId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Creates a job that fine-tunes a specified model from a given dataset. Response includes details + * of the enqueued job including job status and the name of the fine-tuned models once complete. + * [Learn more about fine-tuning](/docs/guides/fine-tuning) + * + * @param createFineTuningJobRequest + * @return FineTuningJob + */ + @Suppress("UNCHECKED_CAST") + open suspend fun createFineTuningJob( + createFineTuningJobRequest: CreateFineTuningJobRequest + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = createFineTuningJobRequest + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/fine_tuning/jobs", + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return jsonRequest(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Get status updates for a fine-tuning job. + * + * @param fineTuningJobId The ID of the fine-tuning job to get events for. + * @param after Identifier for the last event from the previous pagination request. (optional) + * @param limit Number of events to retrieve. (optional, default to 20) + * @return ListFineTuningJobEventsResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun listFineTuningEvents( + fineTuningJobId: kotlin.String, + after: kotlin.String? = null, + limit: kotlin.Int? = 20 + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + after?.apply { localVariableQuery["after"] = listOf("$after") } + limit?.apply { localVariableQuery["limit"] = listOf("$limit") } + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.GET, + "/fine_tuning/jobs/{fine_tuning_job_id}/events" + .replace("{" + "fine_tuning_job_id" + "}", "$fineTuningJobId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * List your organization's fine-tuning jobs + * + * @param after Identifier for the last job from the previous pagination request. (optional) + * @param limit Number of fine-tuning jobs to retrieve. (optional, default to 20) + * @return ListPaginatedFineTuningJobsResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun listPaginatedFineTuningJobs( + after: kotlin.String? = null, + limit: kotlin.Int? = 20 + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + after?.apply { localVariableQuery["after"] = listOf("$after") } + limit?.apply { localVariableQuery["limit"] = listOf("$limit") } + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.GET, + "/fine_tuning/jobs", + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Get info about a fine-tuning job. [Learn more about fine-tuning](/docs/guides/fine-tuning) + * + * @param fineTuningJobId The ID of the fine-tuning job. + * @return FineTuningJob + */ + @Suppress("UNCHECKED_CAST") + open suspend fun retrieveFineTuningJob( + fineTuningJobId: kotlin.String + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.GET, + "/fine_tuning/jobs/{fine_tuning_job_id}" + .replace("{" + "fine_tuning_job_id" + "}", "$fineTuningJobId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/ImagesApi.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/ImagesApi.kt new file mode 100644 index 000000000..1714f11cd --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/ImagesApi.kt @@ -0,0 +1,219 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.apis + +import com.xebia.functional.openai.infrastructure.* +import com.xebia.functional.openai.models.CreateImageRequest +import com.xebia.functional.openai.models.ImagesResponse +import io.ktor.client.HttpClient +import io.ktor.client.HttpClientConfig +import io.ktor.client.engine.HttpClientEngine +import io.ktor.client.request.forms.formData +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* +import kotlinx.serialization.json.Json + +open class ImagesApi : ApiClient { + + constructor( + baseUrl: String = ApiClient.BASE_URL, + httpClientEngine: HttpClientEngine? = null, + httpClientConfig: ((HttpClientConfig<*>) -> Unit)? = null, + jsonSerializer: Json = ApiClient.JSON_DEFAULT + ) : super( + baseUrl = baseUrl, + httpClientEngine = httpClientEngine, + httpClientConfig = httpClientConfig, + jsonBlock = jsonSerializer + ) + + constructor( + baseUrl: String, + httpClient: HttpClient + ) : super(baseUrl = baseUrl, httpClient = httpClient) + + /** + * Creates an image given a prompt. + * + * @param createImageRequest + * @return ImagesResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun createImage( + createImageRequest: CreateImageRequest + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = createImageRequest + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/images/generations", + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return jsonRequest(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** enum for parameter size */ + @Serializable + enum class PropertySizeCreateImageEdit(val value: kotlin.String) { + + @SerialName(value = "256x256") _256x256("256x256"), + @SerialName(value = "512x512") _512x512("512x512"), + @SerialName(value = "1024x1024") _1024x1024("1024x1024") + } + + /** enum for parameter responseFormat */ + @Serializable + enum class ResponseFormatCreateImageEdit(val value: kotlin.String) { + + @SerialName(value = "url") url("url"), + @SerialName(value = "b64_json") b64Json("b64_json") + } + + /** + * Creates an edited or extended image given an original image and a prompt. + * + * @param image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is + * not provided, image must have transparency, which will be used as the mask. + * @param prompt A text description of the desired image(s). The maximum length is 1000 + * characters. + * @param mask An additional image whose fully transparent areas (e.g. where alpha is zero) + * indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, + * and have the same dimensions as `image`. (optional) + * @param model (optional) + * @param n The number of images to generate. Must be between 1 and 10. (optional, default to 1) + * @param size The size of the generated images. Must be one of `256x256`, + * `512x512`, or `1024x1024`. (optional, default to 1024x1024) + * @param responseFormat The format in which the generated images are returned. Must be one of + * `url` or `b64_json`. (optional, default to url) + * @param user A unique identifier representing your end-user, which can help OpenAI to monitor + * and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). (optional) + * @return ImagesResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun createImageEdit( + image: io.ktor.client.request.forms.InputProvider, + prompt: kotlin.String, + mask: io.ktor.client.request.forms.InputProvider? = null, + model: com.xebia.functional.openai.models.ext.image.edit.create.CreateImageEditRequestModel? = + null, + n: kotlin.Int? = 1, + size: PropertySizeCreateImageEdit? = PropertySizeCreateImageEdit._1024x1024, + responseFormat: ResponseFormatCreateImageEdit? = ResponseFormatCreateImageEdit.url, + user: kotlin.String? = null + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = formData { + image?.apply { appendGen("image", image) } + prompt?.apply { appendGen("prompt", prompt) } + mask?.apply { appendGen("mask", mask) } + model?.apply { appendGen("model", model) } + n?.apply { appendGen("n", n) } + size?.apply { appendGen("size", size) } + responseFormat?.apply { appendGen("response_format", responseFormat) } + user?.apply { appendGen("user", user) } + } + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/images/edits", + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return multipartFormRequest(localVariableConfig, localVariableBody, localVariableAuthNames) + .wrap() + } + + /** enum for parameter responseFormat */ + @Serializable + enum class ResponseFormatCreateImageVariation(val value: kotlin.String) { + + @SerialName(value = "url") url("url"), + @SerialName(value = "b64_json") b64Json("b64_json") + } + + /** enum for parameter size */ + @Serializable + enum class PropertySizeCreateImageVariation(val value: kotlin.String) { + + @SerialName(value = "256x256") _256x256("256x256"), + @SerialName(value = "512x512") _512x512("512x512"), + @SerialName(value = "1024x1024") _1024x1024("1024x1024") + } + + /** + * Creates a variation of a given image. + * + * @param image The image to use as the basis for the variation(s). Must be a valid PNG file, less + * than 4MB, and square. + * @param model (optional) + * @param n The number of images to generate. Must be between 1 and 10. For `dall-e-3`, + * only `n=1` is supported. (optional, default to 1) + * @param responseFormat The format in which the generated images are returned. Must be one of + * `url` or `b64_json`. (optional, default to url) + * @param size The size of the generated images. Must be one of `256x256`, + * `512x512`, or `1024x1024`. (optional, default to 1024x1024) + * @param user A unique identifier representing your end-user, which can help OpenAI to monitor + * and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). (optional) + * @return ImagesResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun createImageVariation( + image: io.ktor.client.request.forms.InputProvider, + model: com.xebia.functional.openai.models.ext.image.edit.create.CreateImageEditRequestModel? = + null, + n: kotlin.Int? = 1, + responseFormat: ResponseFormatCreateImageVariation? = ResponseFormatCreateImageVariation.url, + size: PropertySizeCreateImageVariation? = PropertySizeCreateImageVariation._1024x1024, + user: kotlin.String? = null + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = formData { + image?.apply { appendGen("image", image) } + model?.apply { appendGen("model", model) } + n?.apply { appendGen("n", n) } + responseFormat?.apply { appendGen("response_format", responseFormat) } + size?.apply { appendGen("size", size) } + user?.apply { appendGen("user", user) } + } + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/images/variations", + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return multipartFormRequest(localVariableConfig, localVariableBody, localVariableAuthNames) + .wrap() + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/ModelsApi.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/ModelsApi.kt new file mode 100644 index 000000000..949388585 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/ModelsApi.kt @@ -0,0 +1,124 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.apis + +import com.xebia.functional.openai.infrastructure.* +import com.xebia.functional.openai.models.DeleteModelResponse +import com.xebia.functional.openai.models.ListModelsResponse +import com.xebia.functional.openai.models.Model +import io.ktor.client.HttpClient +import io.ktor.client.HttpClientConfig +import io.ktor.client.engine.HttpClientEngine +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* +import kotlinx.serialization.json.Json + +open class ModelsApi : ApiClient { + + constructor( + baseUrl: String = ApiClient.BASE_URL, + httpClientEngine: HttpClientEngine? = null, + httpClientConfig: ((HttpClientConfig<*>) -> Unit)? = null, + jsonSerializer: Json = ApiClient.JSON_DEFAULT + ) : super( + baseUrl = baseUrl, + httpClientEngine = httpClientEngine, + httpClientConfig = httpClientConfig, + jsonBlock = jsonSerializer + ) + + constructor( + baseUrl: String, + httpClient: HttpClient + ) : super(baseUrl = baseUrl, httpClient = httpClient) + + /** + * Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. + * + * @param model The model to delete + * @return DeleteModelResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun deleteModel(model: kotlin.String): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.DELETE, + "/models/{model}".replace("{" + "model" + "}", "$model"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Lists the currently available models, and provides basic information about each one such as the + * owner and availability. + * + * @return ListModelsResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun listModels(): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.GET, + "/models", + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + + /** + * Retrieves a model instance, providing basic information about the model such as the owner and + * permissioning. + * + * @param model The ID of the model to use for this request + * @return Model + */ + @Suppress("UNCHECKED_CAST") + open suspend fun retrieveModel(model: kotlin.String): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = io.ktor.client.utils.EmptyContent + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.GET, + "/models/{model}".replace("{" + "model" + "}", "$model"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/ModerationsApi.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/ModerationsApi.kt new file mode 100644 index 000000000..59d7e26d0 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/apis/ModerationsApi.kt @@ -0,0 +1,68 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.apis + +import com.xebia.functional.openai.infrastructure.* +import com.xebia.functional.openai.models.CreateModerationRequest +import com.xebia.functional.openai.models.CreateModerationResponse +import io.ktor.client.HttpClient +import io.ktor.client.HttpClientConfig +import io.ktor.client.engine.HttpClientEngine +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* +import kotlinx.serialization.json.Json + +open class ModerationsApi : ApiClient { + + constructor( + baseUrl: String = ApiClient.BASE_URL, + httpClientEngine: HttpClientEngine? = null, + httpClientConfig: ((HttpClientConfig<*>) -> Unit)? = null, + jsonSerializer: Json = ApiClient.JSON_DEFAULT + ) : super( + baseUrl = baseUrl, + httpClientEngine = httpClientEngine, + httpClientConfig = httpClientConfig, + jsonBlock = jsonSerializer + ) + + constructor( + baseUrl: String, + httpClient: HttpClient + ) : super(baseUrl = baseUrl, httpClient = httpClient) + + /** + * Classifies if text violates OpenAI's Content Policy + * + * @param createModerationRequest + * @return CreateModerationResponse + */ + @Suppress("UNCHECKED_CAST") + open suspend fun createModeration( + createModerationRequest: CreateModerationRequest + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = createModerationRequest + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/moderations", + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return jsonRequest(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/auth/ApiKeyAuth.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/auth/ApiKeyAuth.kt new file mode 100644 index 000000000..eab86e512 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/auth/ApiKeyAuth.kt @@ -0,0 +1,16 @@ +package com.xebia.functional.openai.auth + +class ApiKeyAuth(private val location: String, val paramName: String) : Authentication { + var apiKey: String? = null + var apiKeyPrefix: String? = null + + override fun apply(query: MutableMap>, headers: MutableMap) { + val key: String = apiKey ?: return + val prefix: String? = apiKeyPrefix + val value: String = if (prefix != null) "$prefix $key" else key + when (location) { + "query" -> query[paramName] = listOf(value) + "header" -> headers[paramName] = value + } + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/auth/Authentication.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/auth/Authentication.kt new file mode 100644 index 000000000..91c8561c5 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/auth/Authentication.kt @@ -0,0 +1,12 @@ +package com.xebia.functional.openai.auth + +interface Authentication { + + /** + * Apply authentication settings to header and query params. + * + * @param query Query parameters. + * @param headers Header parameters. + */ + fun apply(query: MutableMap>, headers: MutableMap) +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/auth/HttpBasicAuth.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/auth/HttpBasicAuth.kt new file mode 100644 index 000000000..bfe4cbb1b --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/auth/HttpBasicAuth.kt @@ -0,0 +1,17 @@ +package com.xebia.functional.openai.auth + +import io.ktor.util.InternalAPI +import io.ktor.util.encodeBase64 + +class HttpBasicAuth : Authentication { + var username: String? = null + var password: String? = null + + @OptIn(InternalAPI::class) + override fun apply(query: MutableMap>, headers: MutableMap) { + if (username == null && password == null) return + val str = (username ?: "") + ":" + (password ?: "") + val auth = str.encodeBase64() + headers["Authorization"] = "Basic $auth" + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/auth/HttpBearerAuth.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/auth/HttpBearerAuth.kt new file mode 100644 index 000000000..ee10ccda6 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/auth/HttpBearerAuth.kt @@ -0,0 +1,14 @@ +package com.xebia.functional.openai.auth + +class HttpBearerAuth(private val scheme: String?) : Authentication { + var bearerToken: String? = null + + override fun apply(query: MutableMap>, headers: MutableMap) { + val token: String = bearerToken ?: return + headers["Authorization"] = (if (scheme != null) upperCaseBearer(scheme)!! + " " else "") + token + } + + private fun upperCaseBearer(scheme: String): String? { + return if ("bearer".equals(scheme, ignoreCase = true)) "Bearer" else scheme + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/auth/OAuth.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/auth/OAuth.kt new file mode 100644 index 000000000..71f0fcced --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/auth/OAuth.kt @@ -0,0 +1,10 @@ +package com.xebia.functional.openai.auth + +class OAuth : Authentication { + var accessToken: String? = null + + override fun apply(query: MutableMap>, headers: MutableMap) { + val token: String = accessToken ?: return + headers["Authorization"] = "Bearer $token" + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/ApiAbstractions.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/ApiAbstractions.kt new file mode 100644 index 000000000..526f7364b --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/ApiAbstractions.kt @@ -0,0 +1,32 @@ +package com.xebia.functional.openai.infrastructure + +typealias MultiValueMap = MutableMap> + +fun collectionDelimiter(collectionFormat: String) = + when (collectionFormat) { + "csv" -> "," + "tsv" -> "\t" + "pipe" -> "|" + "space" -> " " + else -> "" + } + +val defaultMultiValueConverter: (item: Any?) -> String = { item -> "$item" } + +fun toMultiValue( + items: Array, + collectionFormat: String, + map: (item: T) -> String = defaultMultiValueConverter +) = toMultiValue(items.asIterable(), collectionFormat, map) + +fun toMultiValue( + items: Iterable, + collectionFormat: String, + map: (item: T) -> String = defaultMultiValueConverter +): List { + return when (collectionFormat) { + "multi" -> items.map(map) + else -> + listOf(items.joinToString(separator = collectionDelimiter(collectionFormat), transform = map)) + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/ApiClient.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/ApiClient.kt new file mode 100644 index 000000000..f0457a8a5 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/ApiClient.kt @@ -0,0 +1,214 @@ +package com.xebia.functional.openai.infrastructure + +import com.xebia.functional.openai.auth.* +import io.ktor.client.HttpClient +import io.ktor.client.HttpClientConfig +import io.ktor.client.engine.HttpClientEngine +import io.ktor.client.plugins.contentnegotiation.ContentNegotiation +import io.ktor.client.request.* +import io.ktor.client.request.forms.FormDataContent +import io.ktor.client.request.forms.MultiPartFormDataContent +import io.ktor.client.request.header +import io.ktor.client.request.parameter +import io.ktor.client.statement.HttpResponse +import io.ktor.http.* +import io.ktor.http.content.PartData +import io.ktor.serialization.kotlinx.json.json +import kotlin.Unit +import kotlinx.serialization.json.Json + +open class ApiClient(private val baseUrl: String) { + + private lateinit var client: HttpClient + + constructor( + baseUrl: String, + httpClientEngine: HttpClientEngine?, + httpClientConfig: ((HttpClientConfig<*>) -> Unit)? = null, + jsonBlock: Json, + ) : this(baseUrl = baseUrl) { + val clientConfig: (HttpClientConfig<*>) -> Unit by lazy { + { + it.install(ContentNegotiation) { json(jsonBlock) } + httpClientConfig?.invoke(it) + } + } + + client = httpClientEngine?.let { HttpClient(it, clientConfig) } ?: HttpClient(clientConfig) + } + + constructor(baseUrl: String, httpClient: HttpClient) : this(baseUrl = baseUrl) { + this.client = httpClient + } + + private val authentications: kotlin.collections.Map by lazy { + mapOf("ApiKeyAuth" to HttpBearerAuth("bearer")) + } + + companion object { + const val BASE_URL = "https://api.openai.com/v1" + val JSON_DEFAULT = Json { + ignoreUnknownKeys = true + prettyPrint = true + isLenient = true + } + protected val UNSAFE_HEADERS = listOf(HttpHeaders.ContentType) + } + + /** + * Set the username for the first HTTP basic authentication. + * + * @param username Username + */ + fun setUsername(username: String) { + val auth = + authentications?.values?.firstOrNull { it is HttpBasicAuth } as HttpBasicAuth? + ?: throw Exception("No HTTP basic authentication configured") + auth.username = username + } + + /** + * Set the password for the first HTTP basic authentication. + * + * @param password Password + */ + fun setPassword(password: String) { + val auth = + authentications?.values?.firstOrNull { it is HttpBasicAuth } as HttpBasicAuth? + ?: throw Exception("No HTTP basic authentication configured") + auth.password = password + } + + /** + * Set the API key value for the first API key authentication. + * + * @param apiKey API key + * @param paramName The name of the API key parameter, or null or set the first key. + */ + fun setApiKey(apiKey: String, paramName: String? = null) { + val auth = + authentications?.values?.firstOrNull { + it is ApiKeyAuth && (paramName == null || paramName == it.paramName) + } as ApiKeyAuth? + ?: throw Exception("No API key authentication configured") + auth.apiKey = apiKey + } + + /** + * Set the API key prefix for the first API key authentication. + * + * @param apiKeyPrefix API key prefix + * @param paramName The name of the API key parameter, or null or set the first key. + */ + fun setApiKeyPrefix(apiKeyPrefix: String, paramName: String? = null) { + val auth = + authentications?.values?.firstOrNull { + it is ApiKeyAuth && (paramName == null || paramName == it.paramName) + } as ApiKeyAuth? + ?: throw Exception("No API key authentication configured") + auth.apiKeyPrefix = apiKeyPrefix + } + + /** + * Set the access token for the first OAuth2 authentication. + * + * @param accessToken Access token + */ + fun setAccessToken(accessToken: String) { + val auth = + authentications?.values?.firstOrNull { it is OAuth } as OAuth? + ?: throw Exception("No OAuth2 authentication configured") + auth.accessToken = accessToken + } + + /** + * Set the access token for the first Bearer authentication. + * + * @param bearerToken The bearer token. + */ + fun setBearerToken(bearerToken: String) { + val auth = + authentications?.values?.firstOrNull { it is HttpBearerAuth } as HttpBearerAuth? + ?: throw Exception("No Bearer authentication configured") + auth.bearerToken = bearerToken + } + + protected suspend fun multipartFormRequest( + requestConfig: RequestConfig, + body: kotlin.collections.List?, + authNames: kotlin.collections.List + ): HttpResponse { + return request(requestConfig, MultiPartFormDataContent(body ?: listOf()), authNames) + } + + protected suspend fun urlEncodedFormRequest( + requestConfig: RequestConfig, + body: Parameters?, + authNames: kotlin.collections.List + ): HttpResponse { + return request(requestConfig, FormDataContent(body ?: Parameters.Empty), authNames) + } + + protected suspend fun jsonRequest( + requestConfig: RequestConfig, + body: Any? = null, + authNames: kotlin.collections.List + ): HttpResponse = request(requestConfig, body, authNames) + + protected suspend fun request( + requestConfig: RequestConfig, + body: Any? = null, + authNames: kotlin.collections.List + ): HttpResponse { + requestConfig.updateForAuth(authNames) + val headers = requestConfig.headers + + return client.request { + this.url { + this.takeFrom(URLBuilder(baseUrl)) + appendPath(requestConfig.path.trimStart('/').split('/')) + requestConfig.query.forEach { query -> + query.value.forEach { value -> parameter(query.key, value) } + } + } + this.method = requestConfig.method.httpMethod + headers + .filter { header -> !UNSAFE_HEADERS.contains(header.key) } + .forEach { header -> this.header(header.key, header.value) } + if ( + requestConfig.method in listOf(RequestMethod.PUT, RequestMethod.POST, RequestMethod.PATCH) + ) { + this.setBody(body) + } + } + } + + private fun RequestConfig.updateForAuth( + authNames: kotlin.collections.List + ) { + for (authName in authNames) { + val auth = + authentications?.get(authName) ?: throw Exception("Authentication undefined: $authName") + auth.apply(query, headers) + } + } + + private fun URLBuilder.appendPath(components: kotlin.collections.List): URLBuilder = + apply { + encodedPath = + encodedPath.trimEnd('/') + + components.joinToString("/", prefix = "/") { it.encodeURLQueryComponent() } + } + + private val RequestMethod.httpMethod: HttpMethod + get() = + when (this) { + RequestMethod.DELETE -> HttpMethod.Delete + RequestMethod.GET -> HttpMethod.Get + RequestMethod.HEAD -> HttpMethod.Head + RequestMethod.PATCH -> HttpMethod.Patch + RequestMethod.PUT -> HttpMethod.Put + RequestMethod.POST -> HttpMethod.Post + RequestMethod.OPTIONS -> HttpMethod.Options + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/Base64ByteArray.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/Base64ByteArray.kt new file mode 100644 index 000000000..01f047524 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/Base64ByteArray.kt @@ -0,0 +1,34 @@ +package com.xebia.functional.openai.infrastructure + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +@Serializable +class Base64ByteArray(val value: ByteArray) { + @Serializer(Base64ByteArray::class) + companion object : KSerializer { + override val descriptor = PrimitiveSerialDescriptor("Base64ByteArray", PrimitiveKind.STRING) + + override fun serialize(encoder: Encoder, obj: Base64ByteArray) = + encoder.encodeString(obj.value.encodeBase64()) + + override fun deserialize(decoder: Decoder) = + Base64ByteArray(decoder.decodeString().decodeBase64Bytes()) + } + + override fun equals(other: Any?): Boolean { + if (this === other) return true + if (other == null || this::class != other::class) return false + other as Base64ByteArray + return value.contentEquals(other.value) + } + + override fun hashCode(): Int { + return value.contentHashCode() + } + + override fun toString(): String { + return "Base64ByteArray(${hex(value)})" + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/Bytes.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/Bytes.kt new file mode 100644 index 000000000..57f25a73c --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/Bytes.kt @@ -0,0 +1,115 @@ +package com.xebia.functional.openai.infrastructure + +import io.ktor.utils.io.core.* +import kotlin.experimental.and + +private val digits = "0123456789abcdef".toCharArray() +private const val BASE64_ALPHABET = + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" +private const val BASE64_MASK: Byte = 0x3f +private const val BASE64_PAD = '=' +private val BASE64_INVERSE_ALPHABET = IntArray(256) { BASE64_ALPHABET.indexOf(it.toChar()) } + +private fun String.toCharArray(): CharArray = CharArray(length) { get(it) } + +private fun ByteArray.clearFrom(from: Int) = (from until size).forEach { this[it] = 0 } + +private fun Int.toBase64(): Char = BASE64_ALPHABET[this] + +private fun Byte.fromBase64(): Byte = + BASE64_INVERSE_ALPHABET[toInt() and 0xff].toByte() and BASE64_MASK + +internal fun ByteArray.encodeBase64(): String = + buildPacket { writeFully(this@encodeBase64) }.encodeBase64() + +internal fun String.decodeBase64Bytes(): ByteArray = + buildPacket { writeText(dropLastWhile { it == BASE64_PAD }) }.decodeBase64Bytes().readBytes() + +/** + * Encode [bytes] as a HEX string with no spaces, newlines and `0x` prefixes. + * + * Taken from + * https://github.com/ktorio/ktor/blob/master/ktor-utils/common/src/io/ktor/util/Crypto.kt + */ +internal fun hex(bytes: ByteArray): String { + val result = CharArray(bytes.size * 2) + var resultIndex = 0 + val digits = digits + + for (element in bytes) { + val b = element.toInt() and 0xff + result[resultIndex++] = digits[b shr 4] + result[resultIndex++] = digits[b and 0x0f] + } + + return result.concatToString() +} + +/** + * Decode bytes from HEX string. It should be no spaces and `0x` prefixes. + * + * Taken from + * https://github.com/ktorio/ktor/blob/master/ktor-utils/common/src/io/ktor/util/Crypto.kt + */ +internal fun hex(s: String): ByteArray { + val result = ByteArray(s.length / 2) + for (idx in result.indices) { + val srcIdx = idx * 2 + val high = s[srcIdx].toString().toInt(16) shl 4 + val low = s[srcIdx + 1].toString().toInt(16) + result[idx] = (high or low).toByte() + } + + return result +} + +/** + * Encode [ByteReadPacket] in base64 format. + * + * Taken from + * https://github.com/ktorio/ktor/blob/424d1d2cfaa3281302c60af9500f738c8c2fc846/ktor-utils/common/src/io/ktor/util/Base64.kt + */ +private fun ByteReadPacket.encodeBase64(): String = buildString { + val data = ByteArray(3) + while (remaining > 0) { + val read = readAvailable(data) + data.clearFrom(read) + + val padSize = (data.size - read) * 8 / 6 + val chunk = + ((data[0].toInt() and 0xFF) shl 16) or + ((data[1].toInt() and 0xFF) shl 8) or + (data[2].toInt() and 0xFF) + + for (index in data.size downTo padSize) { + val char = (chunk shr (6 * index)) and BASE64_MASK.toInt() + append(char.toBase64()) + } + + repeat(padSize) { append(BASE64_PAD) } + } +} + +/** + * Decode [ByteReadPacket] from base64 format + * + * Taken from + * https://github.com/ktorio/ktor/blob/424d1d2cfaa3281302c60af9500f738c8c2fc846/ktor-utils/common/src/io/ktor/util/Base64.kt + */ +private fun ByteReadPacket.decodeBase64Bytes(): Input = buildPacket { + val data = ByteArray(4) + + while (remaining > 0) { + val read = readAvailable(data) + + val chunk = + data.foldIndexed(0) { index, result, current -> + result or (current.fromBase64().toInt() shl ((3 - index) * 6)) + } + + for (index in data.size - 2 downTo (data.size - read)) { + val origin = (chunk shr (8 * index)) and 0xff + writeByte(origin.toByte()) + } + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/HttpResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/HttpResponse.kt new file mode 100644 index 000000000..44e9b4d11 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/HttpResponse.kt @@ -0,0 +1,67 @@ +package com.xebia.functional.openai.infrastructure + +import io.ktor.http.Headers +import io.ktor.http.isSuccess +import io.ktor.util.reflect.TypeInfo +import io.ktor.util.reflect.typeInfo + +open class HttpResponse( + val response: io.ktor.client.statement.HttpResponse, + val provider: BodyProvider +) { + val status: Int = response.status.value + val success: Boolean = response.status.isSuccess() + val headers: Map> = response.headers.mapEntries() + + suspend fun body(): T = provider.body(response) + + suspend fun typedBody(type: TypeInfo): V = provider.typedBody(response, type) + + companion object { + private fun Headers.mapEntries(): Map> { + val result = mutableMapOf>() + entries().forEach { result[it.key] = it.value } + return result + } + } +} + +interface BodyProvider { + suspend fun body(response: io.ktor.client.statement.HttpResponse): T + + suspend fun typedBody( + response: io.ktor.client.statement.HttpResponse, + type: TypeInfo + ): V +} + +class TypedBodyProvider(private val type: TypeInfo) : BodyProvider { + @Suppress("UNCHECKED_CAST") + override suspend fun body(response: io.ktor.client.statement.HttpResponse): T = + response.call.body(type) as T + + @Suppress("UNCHECKED_CAST") + override suspend fun typedBody( + response: io.ktor.client.statement.HttpResponse, + type: TypeInfo + ): V = response.call.body(type) as V +} + +class MappedBodyProvider( + private val provider: BodyProvider, + private val block: S.() -> T +) : BodyProvider { + override suspend fun body(response: io.ktor.client.statement.HttpResponse): T = + block(provider.body(response)) + + override suspend fun typedBody( + response: io.ktor.client.statement.HttpResponse, + type: TypeInfo + ): V = provider.typedBody(response, type) +} + +inline fun io.ktor.client.statement.HttpResponse.wrap(): HttpResponse = + HttpResponse(this, TypedBodyProvider(typeInfo())) + +fun HttpResponse.map(block: T.() -> V): HttpResponse = + HttpResponse(response, MappedBodyProvider(provider, block)) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/OctetByteArray.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/OctetByteArray.kt new file mode 100644 index 000000000..885321e48 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/OctetByteArray.kt @@ -0,0 +1,33 @@ +package com.xebia.functional.openai.infrastructure + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +@Serializable +class OctetByteArray(val value: ByteArray) { + @Serializer(OctetByteArray::class) + companion object : KSerializer { + override val descriptor = PrimitiveSerialDescriptor("OctetByteArray", PrimitiveKind.STRING) + + override fun serialize(encoder: Encoder, obj: OctetByteArray) = + encoder.encodeString(hex(obj.value)) + + override fun deserialize(decoder: Decoder) = OctetByteArray(hex(decoder.decodeString())) + } + + override fun equals(other: Any?): Boolean { + if (this === other) return true + if (other == null || this::class != other::class) return false + other as OctetByteArray + return value.contentEquals(other.value) + } + + override fun hashCode(): Int { + return value.contentHashCode() + } + + override fun toString(): String { + return "OctetByteArray(${hex(value)})" + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/PartConfig.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/PartConfig.kt new file mode 100644 index 000000000..53de25bee --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/PartConfig.kt @@ -0,0 +1,10 @@ +package com.xebia.functional.openai.infrastructure + +/** + * Defines a config object for a given part of a multi-part request. NOTE: Headers is a + * Map because rfc2616 defines multi-valued headers as csv-only. + */ +data class PartConfig( + val headers: MutableMap = mutableMapOf(), + val body: T? = null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/RequestConfig.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/RequestConfig.kt new file mode 100644 index 000000000..36884adac --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/RequestConfig.kt @@ -0,0 +1,15 @@ +package com.xebia.functional.openai.infrastructure + +/** + * Defines a config object for a given request. NOTE: This object doesn't include 'body' because it + * allows for caching of the constructed object for many request definitions. NOTE: Headers is a + * Map because rfc2616 defines multi-valued headers as csv-only. + */ +data class RequestConfig( + val method: RequestMethod, + val path: String, + val headers: MutableMap = mutableMapOf(), + val query: MutableMap> = mutableMapOf(), + val requiresAuthentication: Boolean, + val body: T? = null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/RequestMethod.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/RequestMethod.kt new file mode 100644 index 000000000..72e694eb5 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/RequestMethod.kt @@ -0,0 +1,12 @@ +package com.xebia.functional.openai.infrastructure + +/** Provides enumerated HTTP verbs */ +enum class RequestMethod { + GET, + DELETE, + HEAD, + OPTIONS, + PATCH, + POST, + PUT +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantFileObject.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantFileObject.kt new file mode 100644 index 000000000..9abaa4796 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantFileObject.kt @@ -0,0 +1,46 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * A list of [Files](/docs/api-reference/files) attached to an `assistant`. + * + * @param id The identifier, which can be referenced in API endpoints. + * @param `object` The object type, which is always `assistant.file`. + * @param createdAt The Unix timestamp (in seconds) for when the assistant file was created. + * @param assistantId The assistant ID that the file is attached to. + */ +@Serializable +data class AssistantFileObject( + + /* The identifier, which can be referenced in API endpoints. */ + @SerialName(value = "id") @Required val id: kotlin.String, + + /* The object type, which is always `assistant.file`. */ + @SerialName(value = "object") @Required val `object`: AssistantFileObject.`Object`, + + /* The Unix timestamp (in seconds) for when the assistant file was created. */ + @SerialName(value = "created_at") @Required val createdAt: kotlin.Int, + + /* The assistant ID that the file is attached to. */ + @SerialName(value = "assistant_id") @Required val assistantId: kotlin.String +) { + + /** + * The object type, which is always `assistant.file`. + * + * Values: assistantPeriodFile + */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "assistant.file") assistantPeriodFile("assistant.file") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantObject.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantObject.kt new file mode 100644 index 000000000..494a74b71 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantObject.kt @@ -0,0 +1,82 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Represents an `assistant` that can call the model and use tools. + * + * @param id The identifier, which can be referenced in API endpoints. + * @param `object` The object type, which is always `assistant`. + * @param createdAt The Unix timestamp (in seconds) for when the assistant was created. + * @param name The name of the assistant. The maximum length is 256 characters. + * @param description The description of the assistant. The maximum length is 512 characters. + * @param model ID of the model to use. You can use the + * [List models](/docs/api-reference/models/list) API to see all of your available models, or see + * our [Model overview](/docs/models/overview) for descriptions of them. + * @param instructions The system instructions that the assistant uses. The maximum length is 32768 + * characters. + * @param tools A list of tool enabled on the assistant. There can be a maximum of 128 tools per + * assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. + * @param fileIds A list of [file](/docs/api-reference/files) IDs attached to this assistant. There + * can be a maximum of 20 files attached to the assistant. Files are ordered by their creation + * date in ascending order. + * @param metadata Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maxium of 512 characters long. + */ +@Serializable +data class AssistantObject( + + /* The identifier, which can be referenced in API endpoints. */ + @SerialName(value = "id") @Required val id: kotlin.String, + + /* The object type, which is always `assistant`. */ + @SerialName(value = "object") @Required val `object`: AssistantObject.`Object`, + + /* The Unix timestamp (in seconds) for when the assistant was created. */ + @SerialName(value = "created_at") @Required val createdAt: kotlin.Int, + + /* The name of the assistant. The maximum length is 256 characters. */ + @SerialName(value = "name") @Required val name: kotlin.String?, + + /* The description of the assistant. The maximum length is 512 characters. */ + @SerialName(value = "description") @Required val description: kotlin.String?, + + /* ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. */ + @SerialName(value = "model") @Required val model: kotlin.String, + + /* The system instructions that the assistant uses. The maximum length is 32768 characters. */ + @SerialName(value = "instructions") @Required val instructions: kotlin.String?, + + /* A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. */ + @SerialName(value = "tools") + @Required + val tools: kotlin.collections.List = arrayListOf(), + + /* A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. */ + @SerialName(value = "file_ids") + @Required + val fileIds: kotlin.collections.List = arrayListOf(), + + /* Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ + @SerialName(value = "metadata") @Required val metadata: kotlin.String? +) { + + /** + * The object type, which is always `assistant`. + * + * Values: assistant + */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "assistant") assistant("assistant") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantObjectToolsInner.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantObjectToolsInner.kt new file mode 100644 index 000000000..8ea45b6df --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantObjectToolsInner.kt @@ -0,0 +1,36 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param type The type of tool being defined: `code_interpreter` + * @param function + */ +@Serializable +data class AssistantObjectToolsInner( + + /* The type of tool being defined: `code_interpreter` */ + @SerialName(value = "type") @Required val type: AssistantObjectToolsInner.Type, + @SerialName(value = "function") @Required val function: FunctionObject +) { + + /** + * The type of tool being defined: `code_interpreter` + * + * Values: codeInterpreter,retrieval,function + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "code_interpreter") codeInterpreter("code_interpreter"), + @SerialName(value = "retrieval") retrieval("retrieval"), + @SerialName(value = "function") function("function") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantToolsCode.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantToolsCode.kt new file mode 100644 index 000000000..8fc6ccc6f --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantToolsCode.kt @@ -0,0 +1,30 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** @param type The type of tool being defined: `code_interpreter` */ +@Serializable +data class AssistantToolsCode( + + /* The type of tool being defined: `code_interpreter` */ + @SerialName(value = "type") @Required val type: AssistantToolsCode.Type +) { + + /** + * The type of tool being defined: `code_interpreter` + * + * Values: codeInterpreter + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "code_interpreter") codeInterpreter("code_interpreter") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantToolsFunction.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantToolsFunction.kt new file mode 100644 index 000000000..a6700ca48 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantToolsFunction.kt @@ -0,0 +1,34 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param type The type of tool being defined: `function` + * @param function + */ +@Serializable +data class AssistantToolsFunction( + + /* The type of tool being defined: `function` */ + @SerialName(value = "type") @Required val type: AssistantToolsFunction.Type, + @SerialName(value = "function") @Required val function: FunctionObject +) { + + /** + * The type of tool being defined: `function` + * + * Values: function + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "function") function("function") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantToolsFunctionFunction.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantToolsFunctionFunction.kt new file mode 100644 index 000000000..b8092dc16 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantToolsFunctionFunction.kt @@ -0,0 +1,31 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * The function definition. + * + * @param description A description of what the function does, used by the model to choose when and + * how to call the function. + * @param name The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores + * and dashes, with a maximum length of 64. + * @param parameters + */ +@Serializable +data class AssistantToolsFunctionFunction( + + /* A description of what the function does, used by the model to choose when and how to call the function. */ + @SerialName(value = "description") @Required val description: kotlin.String, + + /* The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. */ + @SerialName(value = "name") @Required val name: kotlin.String, + @SerialName(value = "parameters") @Required val parameters: kotlinx.serialization.json.JsonObject +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantToolsRetrieval.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantToolsRetrieval.kt new file mode 100644 index 000000000..e4fbd038a --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/AssistantToolsRetrieval.kt @@ -0,0 +1,30 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** @param type The type of tool being defined: `retrieval` */ +@Serializable +data class AssistantToolsRetrieval( + + /* The type of tool being defined: `retrieval` */ + @SerialName(value = "type") @Required val type: AssistantToolsRetrieval.Type +) { + + /** + * The type of tool being defined: `retrieval` + * + * Values: retrieval + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "retrieval") retrieval("retrieval") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionFunctionCallOption.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionFunctionCallOption.kt new file mode 100644 index 000000000..304036d35 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionFunctionCallOption.kt @@ -0,0 +1,24 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Specifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that + * function. + * + * @param name The name of the function to call. + */ +@Serializable +data class ChatCompletionFunctionCallOption( + + /* The name of the function to call. */ + @SerialName(value = "name") @Required val name: kotlin.String +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionFunctions.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionFunctions.kt new file mode 100644 index 000000000..a2d3ea524 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionFunctions.kt @@ -0,0 +1,30 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param name The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores + * and dashes, with a maximum length of 64. + * @param parameters + * @param description A description of what the function does, used by the model to choose when and + * how to call the function. + */ +@Serializable +@Deprecated(message = "This schema is deprecated.") +data class ChatCompletionFunctions( + + /* The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. */ + @SerialName(value = "name") @Required val name: kotlin.String, + @SerialName(value = "parameters") @Required val parameters: kotlinx.serialization.json.JsonObject, + + /* A description of what the function does, used by the model to choose when and how to call the function. */ + @SerialName(value = "description") val description: kotlin.String? = null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionMessageToolCall.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionMessageToolCall.kt new file mode 100644 index 000000000..8babcc379 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionMessageToolCall.kt @@ -0,0 +1,38 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param id The ID of the tool call. + * @param type The type of the tool. Currently, only `function` is supported. + * @param function + */ +@Serializable +data class ChatCompletionMessageToolCall( + + /* The ID of the tool call. */ + @SerialName(value = "id") @Required val id: kotlin.String, + + /* The type of the tool. Currently, only `function` is supported. */ + @SerialName(value = "type") @Required val type: ChatCompletionMessageToolCall.Type, + @SerialName(value = "function") @Required val function: ChatCompletionMessageToolCallFunction +) { + + /** + * The type of the tool. Currently, only `function` is supported. + * + * Values: function + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "function") function("function") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionMessageToolCallChunk.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionMessageToolCallChunk.kt new file mode 100644 index 000000000..74a5dc60b --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionMessageToolCallChunk.kt @@ -0,0 +1,40 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param index + * @param id The ID of the tool call. + * @param type The type of the tool. Currently, only `function` is supported. + * @param function + */ +@Serializable +data class ChatCompletionMessageToolCallChunk( + @SerialName(value = "index") @Required val index: kotlin.Int, + + /* The ID of the tool call. */ + @SerialName(value = "id") val id: kotlin.String? = null, + + /* The type of the tool. Currently, only `function` is supported. */ + @SerialName(value = "type") val type: ChatCompletionMessageToolCallChunk.Type? = null, + @SerialName(value = "function") val function: ChatCompletionMessageToolCallChunkFunction? = null +) { + + /** + * The type of the tool. Currently, only `function` is supported. + * + * Values: function + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "function") function("function") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionMessageToolCallChunkFunction.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionMessageToolCallChunkFunction.kt new file mode 100644 index 000000000..bfb721852 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionMessageToolCallChunkFunction.kt @@ -0,0 +1,28 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param name The name of the function to call. + * @param arguments The arguments to call the function with, as generated by the model in JSON + * format. Note that the model does not always generate valid JSON, and may hallucinate parameters + * not defined by your function schema. Validate the arguments in your code before calling your + * function. + */ +@Serializable +data class ChatCompletionMessageToolCallChunkFunction( + + /* The name of the function to call. */ + @SerialName(value = "name") val name: kotlin.String? = null, + + /* The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. */ + @SerialName(value = "arguments") val arguments: kotlin.String? = null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionMessageToolCallFunction.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionMessageToolCallFunction.kt new file mode 100644 index 000000000..902ee642f --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionMessageToolCallFunction.kt @@ -0,0 +1,30 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * The function that the model called. + * + * @param name The name of the function to call. + * @param arguments The arguments to call the function with, as generated by the model in JSON + * format. Note that the model does not always generate valid JSON, and may hallucinate parameters + * not defined by your function schema. Validate the arguments in your code before calling your + * function. + */ +@Serializable +data class ChatCompletionMessageToolCallFunction( + + /* The name of the function to call. */ + @SerialName(value = "name") @Required val name: kotlin.String, + + /* The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. */ + @SerialName(value = "arguments") @Required val arguments: kotlin.String +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionNamedToolChoice.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionNamedToolChoice.kt new file mode 100644 index 000000000..5f4cce7ea --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionNamedToolChoice.kt @@ -0,0 +1,36 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Specifies a tool the model should use. Use to force the model to call a specific function. + * + * @param type The type of the tool. Currently, only `function` is supported. + * @param function + */ +@Serializable +data class ChatCompletionNamedToolChoice( + + /* The type of the tool. Currently, only `function` is supported. */ + @SerialName(value = "type") val type: ChatCompletionNamedToolChoice.Type? = null, + @SerialName(value = "function") val function: ChatCompletionNamedToolChoiceFunction? = null +) { + + /** + * The type of the tool. Currently, only `function` is supported. + * + * Values: function + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "function") function("function") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionNamedToolChoiceFunction.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionNamedToolChoiceFunction.kt new file mode 100644 index 000000000..3a8af7a02 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionNamedToolChoiceFunction.kt @@ -0,0 +1,19 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** @param name The name of the function to call. */ +@Serializable +data class ChatCompletionNamedToolChoiceFunction( + + /* The name of the function to call. */ + @SerialName(value = "name") @Required val name: kotlin.String +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestAssistantMessage.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestAssistantMessage.kt new file mode 100644 index 000000000..a4ca7b62b --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestAssistantMessage.kt @@ -0,0 +1,45 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param content The contents of the assistant message. + * @param role The role of the messages author, in this case `assistant`. + * @param toolCalls The tool calls generated by the model, such as function calls. + * @param functionCall + */ +@Serializable +data class ChatCompletionRequestAssistantMessage( + + /* The contents of the assistant message. */ + @SerialName(value = "content") @Required val content: kotlin.String?, + + /* The role of the messages author, in this case `assistant`. */ + @SerialName(value = "role") @Required val role: ChatCompletionRequestAssistantMessage.Role, + + /* The tool calls generated by the model, such as function calls. */ + @SerialName(value = "tool_calls") + val toolCalls: kotlin.collections.List? = null, + @Deprecated(message = "This property is deprecated.") + @SerialName(value = "function_call") + val functionCall: ChatCompletionRequestAssistantMessageFunctionCall? = null +) { + + /** + * The role of the messages author, in this case `assistant`. + * + * Values: assistant + */ + @Serializable + enum class Role(val value: kotlin.String) { + @SerialName(value = "assistant") assistant("assistant") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestAssistantMessageFunctionCall.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestAssistantMessageFunctionCall.kt new file mode 100644 index 000000000..edb12c84c --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestAssistantMessageFunctionCall.kt @@ -0,0 +1,32 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be + * called, as generated by the model. + * + * @param arguments The arguments to call the function with, as generated by the model in JSON + * format. Note that the model does not always generate valid JSON, and may hallucinate parameters + * not defined by your function schema. Validate the arguments in your code before calling your + * function. + * @param name The name of the function to call. + */ +@Serializable +@Deprecated(message = "This schema is deprecated.") +data class ChatCompletionRequestAssistantMessageFunctionCall( + + /* The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. */ + @SerialName(value = "arguments") @Required val arguments: kotlin.String, + + /* The name of the function to call. */ + @SerialName(value = "name") @Required val name: kotlin.String +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestFunctionMessage.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestFunctionMessage.kt new file mode 100644 index 000000000..a061c0209 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestFunctionMessage.kt @@ -0,0 +1,41 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param role The role of the messages author, in this case `function`. + * @param content The return value from the function call, to return to the model. + * @param name The name of the function to call. + */ +@Serializable +@Deprecated(message = "This schema is deprecated.") +data class ChatCompletionRequestFunctionMessage( + + /* The role of the messages author, in this case `function`. */ + @SerialName(value = "role") @Required val role: ChatCompletionRequestFunctionMessage.Role, + + /* The return value from the function call, to return to the model. */ + @SerialName(value = "content") @Required val content: kotlin.String?, + + /* The name of the function to call. */ + @SerialName(value = "name") @Required val name: kotlin.String +) { + + /** + * The role of the messages author, in this case `function`. + * + * Values: function + */ + @Serializable + enum class Role(val value: kotlin.String) { + @SerialName(value = "function") function("function") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestMessage.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestMessage.kt new file mode 100644 index 000000000..726102db7 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestMessage.kt @@ -0,0 +1,53 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param content The return value from the function call, to return to the model. + * @param role The role of the messages author, in this case `function`. + * @param toolCallId Tool call that this message is responding to. + * @param name The name of the function to call. + * @param toolCalls The tool calls generated by the model, such as function calls. + * @param functionCall + */ +@Serializable +data class ChatCompletionRequestMessage( + + /* The return value from the function call, to return to the model. */ + @SerialName(value = "content") @Required val content: kotlin.String?, + + /* The role of the messages author, in this case `function`. */ + @SerialName(value = "role") @Required val role: ChatCompletionRequestMessage.Role, + + /* Tool call that this message is responding to. */ + @SerialName(value = "tool_call_id") @Required val toolCallId: kotlin.String, + + /* The name of the function to call. */ + @SerialName(value = "name") @Required val name: kotlin.String, + + /* The tool calls generated by the model, such as function calls. */ + @SerialName(value = "tool_calls") + val toolCalls: kotlin.collections.List? = null, + @Deprecated(message = "This property is deprecated.") + @SerialName(value = "function_call") + val functionCall: ChatCompletionRequestAssistantMessageFunctionCall? = null +) { + + /** + * The role of the messages author, in this case `function`. + * + * Values: function + */ + @Serializable + enum class Role(val value: kotlin.String) { + @SerialName(value = "function") function("function") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestSystemMessage.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestSystemMessage.kt new file mode 100644 index 000000000..deb4408ba --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestSystemMessage.kt @@ -0,0 +1,36 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param content The contents of the system message. + * @param role The role of the messages author, in this case `system`. + */ +@Serializable +data class ChatCompletionRequestSystemMessage( + + /* The contents of the system message. */ + @SerialName(value = "content") @Required val content: kotlin.String?, + + /* The role of the messages author, in this case `system`. */ + @SerialName(value = "role") @Required val role: ChatCompletionRequestSystemMessage.Role +) { + + /** + * The role of the messages author, in this case `system`. + * + * Values: system + */ + @Serializable + enum class Role(val value: kotlin.String) { + @SerialName(value = "system") system("system") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestToolMessage.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestToolMessage.kt new file mode 100644 index 000000000..c9ab86cfc --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestToolMessage.kt @@ -0,0 +1,40 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param role The role of the messages author, in this case `tool`. + * @param content The contents of the tool message. + * @param toolCallId Tool call that this message is responding to. + */ +@Serializable +data class ChatCompletionRequestToolMessage( + + /* The role of the messages author, in this case `tool`. */ + @SerialName(value = "role") @Required val role: ChatCompletionRequestToolMessage.Role, + + /* The contents of the tool message. */ + @SerialName(value = "content") @Required val content: kotlin.String?, + + /* Tool call that this message is responding to. */ + @SerialName(value = "tool_call_id") @Required val toolCallId: kotlin.String +) { + + /** + * The role of the messages author, in this case `tool`. + * + * Values: tool + */ + @Serializable + enum class Role(val value: kotlin.String) { + @SerialName(value = "tool") tool("tool") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestUserMessage.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestUserMessage.kt new file mode 100644 index 000000000..fe017805f --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRequestUserMessage.kt @@ -0,0 +1,36 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param content + * @param role The role of the messages author, in this case `user`. + */ +@Serializable +data class ChatCompletionRequestUserMessage( + @SerialName(value = "content") + @Required + val content: com.xebia.functional.openai.models.ext.chat.ChatCompletionRequestUserMessageContent?, + + /* The role of the messages author, in this case `user`. */ + @SerialName(value = "role") @Required val role: ChatCompletionRequestUserMessage.Role +) { + + /** + * The role of the messages author, in this case `user`. + * + * Values: user + */ + @Serializable + enum class Role(val value: kotlin.String) { + @SerialName(value = "user") user("user") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionResponseMessage.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionResponseMessage.kt new file mode 100644 index 000000000..54fa3518f --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionResponseMessage.kt @@ -0,0 +1,47 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * A chat completion message generated by the model. + * + * @param content The contents of the message. + * @param role The role of the author of this message. + * @param toolCalls The tool calls generated by the model, such as function calls. + * @param functionCall + */ +@Serializable +data class ChatCompletionResponseMessage( + + /* The contents of the message. */ + @SerialName(value = "content") @Required val content: kotlin.String?, + + /* The role of the author of this message. */ + @SerialName(value = "role") @Required val role: ChatCompletionResponseMessage.Role, + + /* The tool calls generated by the model, such as function calls. */ + @SerialName(value = "tool_calls") + val toolCalls: kotlin.collections.List? = null, + @Deprecated(message = "This property is deprecated.") + @SerialName(value = "function_call") + val functionCall: ChatCompletionRequestAssistantMessageFunctionCall? = null +) { + + /** + * The role of the author of this message. + * + * Values: assistant + */ + @Serializable + enum class Role(val value: kotlin.String) { + @SerialName(value = "assistant") assistant("assistant") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRole.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRole.kt new file mode 100644 index 000000000..80fa41d9d --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRole.kt @@ -0,0 +1,46 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* + +/** + * The role of the author of a message + * + * Values: system,user,assistant,tool,function + */ +@Serializable +enum class ChatCompletionRole(val value: kotlin.String) { + + @SerialName(value = "system") system("system"), + @SerialName(value = "user") user("user"), + @SerialName(value = "assistant") assistant("assistant"), + @SerialName(value = "tool") tool("tool"), + @SerialName(value = "function") function("function"); + + /** + * Override [toString()] to avoid using the enum variable name as the value, and instead use the + * actual value defined in the API spec file. + * + * This solves a problem when the variable name and its value are different, and ensures that the + * client sends the correct enum values to the server always. + */ + override fun toString(): kotlin.String = value + + companion object { + /** Converts the provided [data] to a [String] on success, null otherwise. */ + fun encode(data: kotlin.Any?): kotlin.String? = + if (data is ChatCompletionRole) "$data" else null + + /** Returns a valid [ChatCompletionRole] for [data], null otherwise. */ + fun decode(data: kotlin.Any?): ChatCompletionRole? = + data?.let { + val normalizedData = "$it".lowercase() + values().firstOrNull { value -> it == value || normalizedData == "$value".lowercase() } + } + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionStreamResponseDelta.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionStreamResponseDelta.kt new file mode 100644 index 000000000..881d68746 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionStreamResponseDelta.kt @@ -0,0 +1,48 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * A chat completion delta generated by streamed model responses. + * + * @param content The contents of the chunk message. + * @param functionCall + * @param toolCalls + * @param role The role of the author of this message. + */ +@Serializable +data class ChatCompletionStreamResponseDelta( + + /* The contents of the chunk message. */ + @SerialName(value = "content") val content: kotlin.String? = null, + @Deprecated(message = "This property is deprecated.") + @SerialName(value = "function_call") + val functionCall: ChatCompletionStreamResponseDeltaFunctionCall? = null, + @SerialName(value = "tool_calls") + val toolCalls: kotlin.collections.List? = null, + + /* The role of the author of this message. */ + @SerialName(value = "role") val role: ChatCompletionStreamResponseDelta.Role? = null +) { + + /** + * The role of the author of this message. + * + * Values: system,user,assistant,tool + */ + @Serializable + enum class Role(val value: kotlin.String) { + @SerialName(value = "system") system("system"), + @SerialName(value = "user") user("user"), + @SerialName(value = "assistant") assistant("assistant"), + @SerialName(value = "tool") tool("tool") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionStreamResponseDeltaFunctionCall.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionStreamResponseDeltaFunctionCall.kt new file mode 100644 index 000000000..33efd9169 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionStreamResponseDeltaFunctionCall.kt @@ -0,0 +1,32 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be + * called, as generated by the model. + * + * @param arguments The arguments to call the function with, as generated by the model in JSON + * format. Note that the model does not always generate valid JSON, and may hallucinate parameters + * not defined by your function schema. Validate the arguments in your code before calling your + * function. + * @param name The name of the function to call. + */ +@Serializable +@Deprecated(message = "This schema is deprecated.") +data class ChatCompletionStreamResponseDeltaFunctionCall( + + /* The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. */ + @SerialName(value = "arguments") val arguments: kotlin.String? = null, + + /* The name of the function to call. */ + @SerialName(value = "name") val name: kotlin.String? = null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionTool.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionTool.kt new file mode 100644 index 000000000..e35afebe4 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionTool.kt @@ -0,0 +1,34 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param type The type of the tool. Currently, only `function` is supported. + * @param function + */ +@Serializable +data class ChatCompletionTool( + + /* The type of the tool. Currently, only `function` is supported. */ + @SerialName(value = "type") @Required val type: ChatCompletionTool.Type, + @SerialName(value = "function") @Required val function: FunctionObject +) { + + /** + * The type of the tool. Currently, only `function` is supported. + * + * Values: function + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "function") function("function") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionToolChoiceOption.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionToolChoiceOption.kt new file mode 100644 index 000000000..77cd1a292 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionToolChoiceOption.kt @@ -0,0 +1,40 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Controls which (if any) function is called by the model. `none` means the model will not call a + * function and instead generates a message. `auto` means the model can pick between generating a + * message or calling a function. Specifying a particular function via `{\"type: \"function\", + * \"function\": {\"name\": \"my_function\"}}` forces the model to call that function. `none` is the + * default when no functions are present. `auto` is the default if functions are present. + * + * @param type The type of the tool. Currently, only `function` is supported. + * @param function + */ +@Serializable +data class ChatCompletionToolChoiceOption( + + /* The type of the tool. Currently, only `function` is supported. */ + @SerialName(value = "type") val type: ChatCompletionToolChoiceOption.Type? = null, + @SerialName(value = "function") val function: ChatCompletionNamedToolChoiceFunction? = null +) { + + /** + * The type of the tool. Currently, only `function` is supported. + * + * Values: function + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "function") function("function") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionToolFunction.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionToolFunction.kt new file mode 100644 index 000000000..130703559 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionToolFunction.kt @@ -0,0 +1,29 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param name The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores + * and dashes, with a maximum length of 64. + * @param parameters + * @param description A description of what the function does, used by the model to choose when and + * how to call the function. + */ +@Serializable +data class ChatCompletionToolFunction( + + /* The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. */ + @SerialName(value = "name") @Required val name: kotlin.String, + @SerialName(value = "parameters") @Required val parameters: kotlinx.serialization.json.JsonObject, + + /* A description of what the function does, used by the model to choose when and how to call the function. */ + @SerialName(value = "description") val description: kotlin.String? = null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CompletionUsage.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CompletionUsage.kt new file mode 100644 index 000000000..c95db3a81 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CompletionUsage.kt @@ -0,0 +1,31 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Usage statistics for the completion request. + * + * @param completionTokens Number of tokens in the generated completion. + * @param promptTokens Number of tokens in the prompt. + * @param totalTokens Total number of tokens used in the request (prompt + completion). + */ +@Serializable +data class CompletionUsage( + + /* Number of tokens in the generated completion. */ + @SerialName(value = "completion_tokens") @Required val completionTokens: kotlin.Int, + + /* Number of tokens in the prompt. */ + @SerialName(value = "prompt_tokens") @Required val promptTokens: kotlin.Int, + + /* Total number of tokens used in the request (prompt + completion). */ + @SerialName(value = "total_tokens") @Required val totalTokens: kotlin.Int +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateAssistantFileRequest.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateAssistantFileRequest.kt new file mode 100644 index 000000000..de1dc012a --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateAssistantFileRequest.kt @@ -0,0 +1,23 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param fileId A [File](/docs/api-reference/files) ID (with `purpose=\"assistants\"`) that the + * assistant should use. Useful for tools like `retrieval` and `code_interpreter` that can access + * files. + */ +@Serializable +data class CreateAssistantFileRequest( + + /* A [File](/docs/api-reference/files) ID (with `purpose=\"assistants\"`) that the assistant should use. Useful for tools like `retrieval` and `code_interpreter` that can access files. */ + @SerialName(value = "file_id") @Required val fileId: kotlin.String +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateAssistantRequest.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateAssistantRequest.kt new file mode 100644 index 000000000..6e5b2dfe9 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateAssistantRequest.kt @@ -0,0 +1,51 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param model + * @param name The name of the assistant. The maximum length is 256 characters. + * @param description The description of the assistant. The maximum length is 512 characters. + * @param instructions The system instructions that the assistant uses. The maximum length is 32768 + * characters. + * @param tools A list of tool enabled on the assistant. There can be a maximum of 128 tools per + * assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. + * @param fileIds A list of [file](/docs/api-reference/files) IDs attached to this assistant. There + * can be a maximum of 20 files attached to the assistant. Files are ordered by their creation + * date in ascending order. + * @param metadata Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maxium of 512 characters long. + */ +@Serializable +data class CreateAssistantRequest( + @SerialName(value = "model") @Required val model: kotlin.String, + + /* The name of the assistant. The maximum length is 256 characters. */ + @SerialName(value = "name") val name: kotlin.String? = null, + + /* The description of the assistant. The maximum length is 512 characters. */ + @SerialName(value = "description") val description: kotlin.String? = null, + + /* The system instructions that the assistant uses. The maximum length is 32768 characters. */ + @SerialName(value = "instructions") val instructions: kotlin.String? = null, + + /* A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. */ + @SerialName(value = "tools") + val tools: kotlin.collections.List? = arrayListOf(), + + /* A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. */ + @SerialName(value = "file_ids") + val fileIds: kotlin.collections.List? = arrayListOf(), + + /* Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ + @SerialName(value = "metadata") val metadata: kotlin.String? = null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequest.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequest.kt new file mode 100644 index 000000000..c655795ed --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequest.kt @@ -0,0 +1,121 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param messages A list of messages comprising the conversation so far. + * [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + * @param model + * @param frequencyPenalty Number between -2.0 and 2.0. Positive values penalize new tokens based on + * their existing frequency in the text so far, decreasing the model's likelihood to repeat the + * same line verbatim. + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + * @param logitBias Modify the likelihood of specified tokens appearing in the completion. Accepts a + * JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated + * bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the + * model prior to sampling. The exact effect will vary per model, but values between -1 and 1 + * should decrease or increase likelihood of selection; values like -100 or 100 should result in a + * ban or exclusive selection of the relevant token. + * @param maxTokens The maximum number of [tokens](/tokenizer) to generate in the chat completion. + * The total length of input tokens and generated tokens is limited by the model's context length. + * [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + * for counting tokens. + * @param n How many chat completion choices to generate for each input message. + * @param presencePenalty Number between -2.0 and 2.0. Positive values penalize new tokens based on + * whether they appear in the text so far, increasing the model's likelihood to talk about new + * topics. + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + * @param responseFormat + * @param seed This feature is in Beta. If specified, our system will make a best effort to sample + * deterministically, such that repeated requests with the same `seed` and parameters should + * return the same result. Determinism is not guaranteed, and you should refer to the + * `system_fingerprint` response parameter to monitor changes in the backend. + * @param stop + * @param stream If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent + * as data-only + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + * as they become available, with the stream terminated by a `data: [DONE]` message. + * [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + * @param temperature What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + * make the output more random, while lower values like 0.2 will make it more focused and + * deterministic. We generally recommend altering this or `top_p` but not both. + * @param topP An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + * comprising the top 10% probability mass are considered. We generally recommend altering this or + * `temperature` but not both. + * @param tools A list of tools the model may call. Currently, only functions are supported as a + * tool. Use this to provide a list of functions the model may generate JSON inputs for. + * @param toolChoice + * @param user A unique identifier representing your end-user, which can help OpenAI to monitor and + * detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + * @param functionCall + * @param functions Deprecated in favor of `tools`. A list of functions the model may generate JSON + * inputs for. + */ +@Serializable +data class CreateChatCompletionRequest( + + /* A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). */ + @SerialName(value = "messages") + @Required + val messages: kotlin.collections.List, + @SerialName(value = "model") + @Required + val model: com.xebia.functional.openai.models.ext.chat.create.CreateChatCompletionRequestModel, + + /* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) */ + @SerialName(value = "frequency_penalty") val frequencyPenalty: kotlin.Double? = (0).toDouble(), + + /* Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. */ + @SerialName(value = "logit_bias") + val logitBias: kotlin.collections.Map? = null, + + /* The maximum number of [tokens](/tokenizer) to generate in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. */ + @SerialName(value = "max_tokens") val maxTokens: kotlin.Int? = null, + + /* How many chat completion choices to generate for each input message. */ + @SerialName(value = "n") val n: kotlin.Int? = 1, + + /* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) */ + @SerialName(value = "presence_penalty") val presencePenalty: kotlin.Double? = (0).toDouble(), + @SerialName(value = "response_format") + val responseFormat: CreateChatCompletionRequestResponseFormat? = null, + + /* This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. */ + @SerialName(value = "seed") val seed: kotlin.Int? = null, + @SerialName(value = "stop") + val stop: com.xebia.functional.openai.models.ext.chat.create.CreateChatCompletionRequestStop? = + null, + + /* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). */ + @SerialName(value = "stream") val stream: kotlin.Boolean? = false, + + /* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. */ + @SerialName(value = "temperature") val temperature: kotlin.Double? = (1).toDouble(), + + /* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. */ + @SerialName(value = "top_p") val topP: kotlin.Double? = (1).toDouble(), + + /* A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. */ + @SerialName(value = "tools") val tools: kotlin.collections.List? = null, + @SerialName(value = "tool_choice") val toolChoice: ChatCompletionToolChoiceOption? = null, + + /* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). */ + @SerialName(value = "user") val user: kotlin.String? = null, + @Deprecated(message = "This property is deprecated.") + @SerialName(value = "function_call") + val functionCall: CreateChatCompletionRequestFunctionCall? = null, + + /* Deprecated in favor of `tools`. A list of functions the model may generate JSON inputs for. */ + @Deprecated(message = "This property is deprecated.") + @SerialName(value = "functions") + val functions: kotlin.collections.List? = null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestFunctionCall.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestFunctionCall.kt new file mode 100644 index 000000000..b96726326 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestFunctionCall.kt @@ -0,0 +1,28 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Deprecated in favor of `tool_choice`. Controls which (if any) function is called by the model. + * `none` means the model will not call a function and instead generates a message. `auto` means the + * model can pick between generating a message or calling a function. Specifying a particular + * function via `{\"name\": \"my_function\"}` forces the model to call that function. `none` is the + * default when no functions are present. `auto`` is the default if functions are present. + * + * @param name The name of the function to call. + */ +@Serializable +@Deprecated(message = "This schema is deprecated.") +data class CreateChatCompletionRequestFunctionCall( + + /* The name of the function to call. */ + @SerialName(value = "name") @Required val name: kotlin.String +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestResponseFormat.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestResponseFormat.kt new file mode 100644 index 000000000..5957c331a --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestResponseFormat.kt @@ -0,0 +1,42 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * An object specifying the format that the model must output. Setting to `{ \"type\": + * \"json_object\" }` enables JSON mode, which guarantees the message the model generates is valid + * JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON + * yourself via a system or user message. Without this, the model may generate an unending stream of + * whitespace until the generation reaches the token limit, resulting in increased latency and + * appearance of a \"stuck\" request. Also note that the message content may be partially cut off if + * `finish_reason=\"length\"`, which indicates the generation exceeded `max_tokens` or the + * conversation exceeded the max context length. + * + * @param type Must be one of `text` or `json_object`. + */ +@Serializable +data class CreateChatCompletionRequestResponseFormat( + + /* Must be one of `text` or `json_object`. */ + @SerialName(value = "type") val type: CreateChatCompletionRequestResponseFormat.Type? = Type.text +) { + + /** + * Must be one of `text` or `json_object`. + * + * Values: text,jsonObject + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "text") text("text"), + @SerialName(value = "json_object") jsonObject("json_object") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionResponse.kt new file mode 100644 index 000000000..118677c0e --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionResponse.kt @@ -0,0 +1,60 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Represents a chat completion response returned by model, based on the provided input. + * + * @param id A unique identifier for the chat completion. + * @param choices A list of chat completion choices. Can be more than one if `n` is greater than 1. + * @param created The Unix timestamp (in seconds) of when the chat completion was created. + * @param model The model used for the chat completion. + * @param `object` The object type, which is always `chat.completion`. + * @param systemFingerprint This fingerprint represents the backend configuration that the model + * runs with. Can be used in conjunction with the `seed` request parameter to understand when + * backend changes have been made that might impact determinism. + * @param usage + */ +@Serializable +data class CreateChatCompletionResponse( + + /* A unique identifier for the chat completion. */ + @SerialName(value = "id") @Required val id: kotlin.String, + + /* A list of chat completion choices. Can be more than one if `n` is greater than 1. */ + @SerialName(value = "choices") + @Required + val choices: kotlin.collections.List, + + /* The Unix timestamp (in seconds) of when the chat completion was created. */ + @SerialName(value = "created") @Required val created: kotlin.Int, + + /* The model used for the chat completion. */ + @SerialName(value = "model") @Required val model: kotlin.String, + + /* The object type, which is always `chat.completion`. */ + @SerialName(value = "object") @Required val `object`: CreateChatCompletionResponse.`Object`, + + /* This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. */ + @SerialName(value = "system_fingerprint") val systemFingerprint: kotlin.String? = null, + @SerialName(value = "usage") val usage: CompletionUsage? = null +) { + + /** + * The object type, which is always `chat.completion`. + * + * Values: chatPeriodCompletion + */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "chat.completion") chatPeriodCompletion("chat.completion") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionResponseChoicesInner.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionResponseChoicesInner.kt new file mode 100644 index 000000000..a56beb7f0 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionResponseChoicesInner.kt @@ -0,0 +1,52 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param finishReason The reason the model stopped generating tokens. This will be `stop` if the + * model hit a natural stop point or a provided stop sequence, `length` if the maximum number of + * tokens specified in the request was reached, `content_filter` if content was omitted due to a + * flag from our content filters, `tool_calls` if the model called a tool, or `function_call` + * (deprecated) if the model called a function. + * @param index The index of the choice in the list of choices. + * @param message + */ +@Serializable +data class CreateChatCompletionResponseChoicesInner( + + /* The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. */ + @SerialName(value = "finish_reason") + @Required + val finishReason: CreateChatCompletionResponseChoicesInner.FinishReason, + + /* The index of the choice in the list of choices. */ + @SerialName(value = "index") @Required val index: kotlin.Int, + @SerialName(value = "message") @Required val message: ChatCompletionResponseMessage +) { + + /** + * The reason the model stopped generating tokens. This will be `stop` if the model hit a natural + * stop point or a provided stop sequence, `length` if the maximum number of tokens specified in + * the request was reached, `content_filter` if content was omitted due to a flag from our content + * filters, `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model + * called a function. + * + * Values: stop,length,toolCalls,contentFilter,functionCall + */ + @Serializable + enum class FinishReason(val value: kotlin.String) { + @SerialName(value = "stop") stop("stop"), + @SerialName(value = "length") length("length"), + @SerialName(value = "tool_calls") toolCalls("tool_calls"), + @SerialName(value = "content_filter") contentFilter("content_filter"), + @SerialName(value = "function_call") functionCall("function_call") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionStreamResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionStreamResponse.kt new file mode 100644 index 000000000..de693f505 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionStreamResponse.kt @@ -0,0 +1,61 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Represents a streamed chunk of a chat completion response returned by model, based on the + * provided input. + * + * @param id A unique identifier for the chat completion. Each chunk has the same ID. + * @param choices A list of chat completion choices. Can be more than one if `n` is greater than 1. + * @param created The Unix timestamp (in seconds) of when the chat completion was created. Each + * chunk has the same timestamp. + * @param model The model to generate the completion. + * @param `object` The object type, which is always `chat.completion.chunk`. + * @param systemFingerprint This fingerprint represents the backend configuration that the model + * runs with. Can be used in conjunction with the `seed` request parameter to understand when + * backend changes have been made that might impact determinism. + */ +@Serializable +data class CreateChatCompletionStreamResponse( + + /* A unique identifier for the chat completion. Each chunk has the same ID. */ + @SerialName(value = "id") @Required val id: kotlin.String, + + /* A list of chat completion choices. Can be more than one if `n` is greater than 1. */ + @SerialName(value = "choices") + @Required + val choices: kotlin.collections.List, + + /* The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. */ + @SerialName(value = "created") @Required val created: kotlin.Int, + + /* The model to generate the completion. */ + @SerialName(value = "model") @Required val model: kotlin.String, + + /* The object type, which is always `chat.completion.chunk`. */ + @SerialName(value = "object") @Required val `object`: CreateChatCompletionStreamResponse.`Object`, + + /* This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. */ + @SerialName(value = "system_fingerprint") val systemFingerprint: kotlin.String? = null +) { + + /** + * The object type, which is always `chat.completion.chunk`. + * + * Values: chatPeriodCompletionPeriodChunk + */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "chat.completion.chunk") + chatPeriodCompletionPeriodChunk("chat.completion.chunk") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionStreamResponseChoicesInner.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionStreamResponseChoicesInner.kt new file mode 100644 index 000000000..6b2e441f2 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionStreamResponseChoicesInner.kt @@ -0,0 +1,52 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param delta + * @param finishReason The reason the model stopped generating tokens. This will be `stop` if the + * model hit a natural stop point or a provided stop sequence, `length` if the maximum number of + * tokens specified in the request was reached, `content_filter` if content was omitted due to a + * flag from our content filters, `tool_calls` if the model called a tool, or `function_call` + * (deprecated) if the model called a function. + * @param index The index of the choice in the list of choices. + */ +@Serializable +data class CreateChatCompletionStreamResponseChoicesInner( + @SerialName(value = "delta") @Required val delta: ChatCompletionStreamResponseDelta, + + /* The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. */ + @SerialName(value = "finish_reason") + @Required + val finishReason: CreateChatCompletionStreamResponseChoicesInner.FinishReason?, + + /* The index of the choice in the list of choices. */ + @SerialName(value = "index") @Required val index: kotlin.Int +) { + + /** + * The reason the model stopped generating tokens. This will be `stop` if the model hit a natural + * stop point or a provided stop sequence, `length` if the maximum number of tokens specified in + * the request was reached, `content_filter` if content was omitted due to a flag from our content + * filters, `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model + * called a function. + * + * Values: stop,length,toolCalls,contentFilter,functionCall + */ + @Serializable + enum class FinishReason(val value: kotlin.String) { + @SerialName(value = "stop") stop("stop"), + @SerialName(value = "length") length("length"), + @SerialName(value = "tool_calls") toolCalls("tool_calls"), + @SerialName(value = "content_filter") contentFilter("content_filter"), + @SerialName(value = "function_call") functionCall("function_call") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionRequest.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionRequest.kt new file mode 100644 index 000000000..b05f6d7bc --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionRequest.kt @@ -0,0 +1,126 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param model + * @param prompt + * @param bestOf Generates `best_of` completions server-side and returns the \"best\" (the one with + * the highest log probability per token). Results cannot be streamed. When used with `n`, + * `best_of` controls the number of candidate completions and `n` specifies how many to return – + * `best_of` must be greater than `n`. **Note:** Because this parameter generates many + * completions, it can quickly consume your token quota. Use carefully and ensure that you have + * reasonable settings for `max_tokens` and `stop`. + * @param echo Echo back the prompt in addition to the completion + * @param frequencyPenalty Number between -2.0 and 2.0. Positive values penalize new tokens based on + * their existing frequency in the text so far, decreasing the model's likelihood to repeat the + * same line verbatim. + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + * @param logitBias Modify the likelihood of specified tokens appearing in the completion. Accepts a + * JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an + * associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) + * (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias + * is added to the logits generated by the model prior to sampling. The exact effect will vary per + * model, but values between -1 and 1 should decrease or increase likelihood of selection; values + * like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an + * example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being + * generated. + * @param logprobs Include the log probabilities on the `logprobs` most likely tokens, as well the + * chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely + * tokens. The API will always return the `logprob` of the sampled token, so there may be up to + * `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. + * @param maxTokens The maximum number of [tokens](/tokenizer) to generate in the completion. The + * token count of your prompt plus `max_tokens` cannot exceed the model's context length. + * [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + * for counting tokens. + * @param n How many completions to generate for each prompt. **Note:** Because this parameter + * generates many completions, it can quickly consume your token quota. Use carefully and ensure + * that you have reasonable settings for `max_tokens` and `stop`. + * @param presencePenalty Number between -2.0 and 2.0. Positive values penalize new tokens based on + * whether they appear in the text so far, increasing the model's likelihood to talk about new + * topics. + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + * @param seed If specified, our system will make a best effort to sample deterministically, such + * that repeated requests with the same `seed` and parameters should return the same result. + * Determinism is not guaranteed, and you should refer to the `system_fingerprint` response + * parameter to monitor changes in the backend. + * @param stop + * @param stream Whether to stream back partial progress. If set, tokens will be sent as data-only + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + * as they become available, with the stream terminated by a `data: [DONE]` message. + * [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + * @param suffix The suffix that comes after a completion of inserted text. + * @param temperature What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + * make the output more random, while lower values like 0.2 will make it more focused and + * deterministic. We generally recommend altering this or `top_p` but not both. + * @param topP An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + * comprising the top 10% probability mass are considered. We generally recommend altering this or + * `temperature` but not both. + * @param user A unique identifier representing your end-user, which can help OpenAI to monitor and + * detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + */ +@Serializable +data class CreateCompletionRequest( + @SerialName(value = "model") + @Required + val model: com.xebia.functional.openai.models.ext.completion.create.CreateCompletionRequestModel, + @SerialName(value = "prompt") + @Required + val prompt: + com.xebia.functional.openai.models.ext.completion.create.CreateCompletionRequestPrompt?, + + /* Generates `best_of` completions server-side and returns the \"best\" (the one with the highest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. */ + @SerialName(value = "best_of") val bestOf: kotlin.Int? = 1, + + /* Echo back the prompt in addition to the completion */ + @SerialName(value = "echo") val echo: kotlin.Boolean? = false, + + /* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) */ + @SerialName(value = "frequency_penalty") val frequencyPenalty: kotlin.Double? = (0).toDouble(), + + /* Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated. */ + @SerialName(value = "logit_bias") + val logitBias: kotlin.collections.Map? = null, + + /* Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. */ + @SerialName(value = "logprobs") val logprobs: kotlin.Int? = null, + + /* The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. */ + @SerialName(value = "max_tokens") val maxTokens: kotlin.Int? = 16, + + /* How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. */ + @SerialName(value = "n") val n: kotlin.Int? = 1, + + /* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) */ + @SerialName(value = "presence_penalty") val presencePenalty: kotlin.Double? = (0).toDouble(), + + /* If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. */ + @SerialName(value = "seed") val seed: kotlin.Int? = null, + @SerialName(value = "stop") + val stop: com.xebia.functional.openai.models.ext.completion.create.CreateCompletionRequestStop? = + null, + + /* Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). */ + @SerialName(value = "stream") val stream: kotlin.Boolean? = false, + + /* The suffix that comes after a completion of inserted text. */ + @SerialName(value = "suffix") val suffix: kotlin.String? = null, + + /* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. */ + @SerialName(value = "temperature") val temperature: kotlin.Double? = (1).toDouble(), + + /* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. */ + @SerialName(value = "top_p") val topP: kotlin.Double? = (1).toDouble(), + + /* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). */ + @SerialName(value = "user") val user: kotlin.String? = null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionResponse.kt new file mode 100644 index 000000000..bdc8e4253 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionResponse.kt @@ -0,0 +1,61 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Represents a completion response from the API. Note: both the streamed and non-streamed response + * objects share the same shape (unlike the chat endpoint). + * + * @param id A unique identifier for the completion. + * @param choices The list of completion choices the model generated for the input prompt. + * @param created The Unix timestamp (in seconds) of when the completion was created. + * @param model The model used for completion. + * @param `object` The object type, which is always \"text_completion\" + * @param systemFingerprint This fingerprint represents the backend configuration that the model + * runs with. Can be used in conjunction with the `seed` request parameter to understand when + * backend changes have been made that might impact determinism. + * @param usage + */ +@Serializable +data class CreateCompletionResponse( + + /* A unique identifier for the completion. */ + @SerialName(value = "id") @Required val id: kotlin.String, + + /* The list of completion choices the model generated for the input prompt. */ + @SerialName(value = "choices") + @Required + val choices: kotlin.collections.List, + + /* The Unix timestamp (in seconds) of when the completion was created. */ + @SerialName(value = "created") @Required val created: kotlin.Int, + + /* The model used for completion. */ + @SerialName(value = "model") @Required val model: kotlin.String, + + /* The object type, which is always \"text_completion\" */ + @SerialName(value = "object") @Required val `object`: CreateCompletionResponse.`Object`, + + /* This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. */ + @SerialName(value = "system_fingerprint") val systemFingerprint: kotlin.String? = null, + @SerialName(value = "usage") val usage: CompletionUsage? = null +) { + + /** + * The object type, which is always \"text_completion\" + * + * Values: textCompletion + */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "text_completion") textCompletion("text_completion") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionResponseChoicesInner.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionResponseChoicesInner.kt new file mode 100644 index 000000000..318f23b4d --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionResponseChoicesInner.kt @@ -0,0 +1,50 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param finishReason The reason the model stopped generating tokens. This will be `stop` if the + * model hit a natural stop point or a provided stop sequence, `length` if the maximum number of + * tokens specified in the request was reached, or `content_filter` if content was omitted due to + * a flag from our content filters. + * @param index + * @param logprobs + * @param text + */ +@Serializable +data class CreateCompletionResponseChoicesInner( + + /* The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, or `content_filter` if content was omitted due to a flag from our content filters. */ + @SerialName(value = "finish_reason") + @Required + val finishReason: CreateCompletionResponseChoicesInner.FinishReason, + @SerialName(value = "index") @Required val index: kotlin.Int, + @SerialName(value = "logprobs") + @Required + val logprobs: CreateCompletionResponseChoicesInnerLogprobs?, + @SerialName(value = "text") @Required val text: kotlin.String +) { + + /** + * The reason the model stopped generating tokens. This will be `stop` if the model hit a natural + * stop point or a provided stop sequence, `length` if the maximum number of tokens specified in + * the request was reached, or `content_filter` if content was omitted due to a flag from our + * content filters. + * + * Values: stop,length,contentFilter + */ + @Serializable + enum class FinishReason(val value: kotlin.String) { + @SerialName(value = "stop") stop("stop"), + @SerialName(value = "length") length("length"), + @SerialName(value = "content_filter") contentFilter("content_filter") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionResponseChoicesInnerLogprobs.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionResponseChoicesInnerLogprobs.kt new file mode 100644 index 000000000..5852dc3fc --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionResponseChoicesInnerLogprobs.kt @@ -0,0 +1,28 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param textOffset + * @param tokenLogprobs + * @param tokens + * @param topLogprobs + */ +@Serializable +data class CreateCompletionResponseChoicesInnerLogprobs( + @SerialName(value = "text_offset") val textOffset: kotlin.collections.List? = null, + @SerialName(value = "token_logprobs") + val tokenLogprobs: kotlin.collections.List? = null, + @SerialName(value = "tokens") val tokens: kotlin.collections.List? = null, + @SerialName(value = "top_logprobs") + val topLogprobs: kotlin.collections.List>? = + null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEditRequest.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEditRequest.kt new file mode 100644 index 000000000..1b881360f --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEditRequest.kt @@ -0,0 +1,46 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param instruction The instruction that tells the model how to edit the prompt. + * @param model + * @param input The input text to use as a starting point for the edit. + * @param n How many edits to generate for the input and instruction. + * @param temperature What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + * make the output more random, while lower values like 0.2 will make it more focused and + * deterministic. We generally recommend altering this or `top_p` but not both. + * @param topP An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + * comprising the top 10% probability mass are considered. We generally recommend altering this or + * `temperature` but not both. + */ +@Serializable +data class CreateEditRequest( + + /* The instruction that tells the model how to edit the prompt. */ + @SerialName(value = "instruction") @Required val instruction: kotlin.String, + @SerialName(value = "model") + @Required + val model: com.xebia.functional.openai.models.ext.edit.create.CreateEditRequestModel, + + /* The input text to use as a starting point for the edit. */ + @SerialName(value = "input") val input: kotlin.String? = "", + + /* How many edits to generate for the input and instruction. */ + @SerialName(value = "n") val n: kotlin.Int? = 1, + + /* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. */ + @SerialName(value = "temperature") val temperature: kotlin.Double? = (1).toDouble(), + + /* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. */ + @SerialName(value = "top_p") val topP: kotlin.Double? = (1).toDouble() +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEditResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEditResponse.kt new file mode 100644 index 000000000..274099fc7 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEditResponse.kt @@ -0,0 +1,45 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param choices A list of edit choices. Can be more than one if `n` is greater than 1. + * @param `object` The object type, which is always `edit`. + * @param created The Unix timestamp (in seconds) of when the edit was created. + * @param usage + */ +@Serializable +@Deprecated(message = "This schema is deprecated.") +data class CreateEditResponse( + + /* A list of edit choices. Can be more than one if `n` is greater than 1. */ + @SerialName(value = "choices") + @Required + val choices: kotlin.collections.List, + + /* The object type, which is always `edit`. */ + @SerialName(value = "object") @Required val `object`: CreateEditResponse.`Object`, + + /* The Unix timestamp (in seconds) of when the edit was created. */ + @SerialName(value = "created") @Required val created: kotlin.Int, + @SerialName(value = "usage") @Required val usage: CompletionUsage +) { + + /** + * The object type, which is always `edit`. + * + * Values: edit + */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "edit") edit("edit") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEditResponseChoicesInner.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEditResponseChoicesInner.kt new file mode 100644 index 000000000..babf34d15 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEditResponseChoicesInner.kt @@ -0,0 +1,49 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param finishReason The reason the model stopped generating tokens. This will be `stop` if the + * model hit a natural stop point or a provided stop sequence, `length` if the maximum number of + * tokens specified in the request was reached, or `content_filter` if content was omitted due to + * a flag from our content filters. + * @param index The index of the choice in the list of choices. + * @param text The edited result. + */ +@Serializable +data class CreateEditResponseChoicesInner( + + /* The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, or `content_filter` if content was omitted due to a flag from our content filters. */ + @SerialName(value = "finish_reason") + @Required + val finishReason: CreateEditResponseChoicesInner.FinishReason, + + /* The index of the choice in the list of choices. */ + @SerialName(value = "index") @Required val index: kotlin.Int, + + /* The edited result. */ + @SerialName(value = "text") @Required val text: kotlin.String +) { + + /** + * The reason the model stopped generating tokens. This will be `stop` if the model hit a natural + * stop point or a provided stop sequence, `length` if the maximum number of tokens specified in + * the request was reached, or `content_filter` if content was omitted due to a flag from our + * content filters. + * + * Values: stop,length + */ + @Serializable + enum class FinishReason(val value: kotlin.String) { + @SerialName(value = "stop") stop("stop"), + @SerialName(value = "length") length("length") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingRequest.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingRequest.kt new file mode 100644 index 000000000..e790cadf3 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingRequest.kt @@ -0,0 +1,49 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param input + * @param model + * @param encodingFormat The format to return the embeddings in. Can be either `float` or + * [`base64`](https://pypi.org/project/pybase64/). + * @param user A unique identifier representing your end-user, which can help OpenAI to monitor and + * detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + */ +@Serializable +data class CreateEmbeddingRequest( + @SerialName(value = "input") + @Required + val input: com.xebia.functional.openai.models.ext.embedding.create.CreateEmbeddingRequestInput, + @SerialName(value = "model") + @Required + val model: com.xebia.functional.openai.models.ext.embedding.create.CreateEmbeddingRequestModel, + + /* The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/). */ + @SerialName(value = "encoding_format") + val encodingFormat: CreateEmbeddingRequest.EncodingFormat? = EncodingFormat.float, + + /* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). */ + @SerialName(value = "user") val user: kotlin.String? = null +) { + + /** + * The format to return the embeddings in. Can be either `float` or + * [`base64`](https://pypi.org/project/pybase64/). + * + * Values: float,base64 + */ + @Serializable + enum class EncodingFormat(val value: kotlin.String) { + @SerialName(value = "float") float("float"), + @SerialName(value = "base64") base64("base64") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingResponse.kt new file mode 100644 index 000000000..1bcb61805 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingResponse.kt @@ -0,0 +1,42 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param `data` The list of embeddings generated by the model. + * @param model The name of the model used to generate the embedding. + * @param `object` The object type, which is always \"list\". + * @param usage + */ +@Serializable +data class CreateEmbeddingResponse( + + /* The list of embeddings generated by the model. */ + @SerialName(value = "data") @Required val `data`: kotlin.collections.List, + + /* The name of the model used to generate the embedding. */ + @SerialName(value = "model") @Required val model: kotlin.String, + + /* The object type, which is always \"list\". */ + @SerialName(value = "object") @Required val `object`: CreateEmbeddingResponse.`Object`, + @SerialName(value = "usage") @Required val usage: CreateEmbeddingResponseUsage +) { + + /** + * The object type, which is always \"list\". + * + * Values: list + */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "list") list("list") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingResponseUsage.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingResponseUsage.kt new file mode 100644 index 000000000..a3ad21dc7 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingResponseUsage.kt @@ -0,0 +1,27 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * The usage information for the request. + * + * @param promptTokens The number of tokens used by the prompt. + * @param totalTokens The total number of tokens used by the request. + */ +@Serializable +data class CreateEmbeddingResponseUsage( + + /* The number of tokens used by the prompt. */ + @SerialName(value = "prompt_tokens") @Required val promptTokens: kotlin.Int, + + /* The total number of tokens used by the request. */ + @SerialName(value = "total_tokens") @Required val totalTokens: kotlin.Int +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuneRequest.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuneRequest.kt new file mode 100644 index 000000000..df864cf48 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuneRequest.kt @@ -0,0 +1,104 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param trainingFile The ID of an uploaded file that contains training data. See + * [upload file](/docs/api-reference/files/upload) for how to upload a file. Your dataset must be + * formatted as a JSONL file, where each training example is a JSON object with the keys + * \"prompt\" and \"completion\". Additionally, you must upload your file with the purpose + * `fine-tune`. See the + * [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more details. + * @param batchSize The batch size to use for training. The batch size is the number of training + * examples used to train a single forward and backward pass. By default, the batch size will be + * dynamically configured to be ~0.2% of the number of examples in the training set, capped at + * 256 - in general, we've found that larger batch sizes tend to work better for larger datasets. + * @param classificationBetas If this is provided, we calculate F-beta scores at the specified beta + * values. The F-beta score is a generalization of F-1 score. This is only used for binary + * classification. With a beta of 1 (i.e. the F-1 score), precision and recall are given the same + * weight. A larger beta score puts more weight on recall and less on precision. A smaller beta + * score puts more weight on precision and less on recall. + * @param classificationNClasses The number of classes in a classification task. This parameter is + * required for multiclass classification. + * @param classificationPositiveClass The positive class in binary classification. This parameter is + * needed to generate precision, recall, and F1 metrics when doing binary classification. + * @param computeClassificationMetrics If set, we calculate classification-specific metrics such as + * accuracy and F-1 score using the validation set at the end of every epoch. These metrics can be + * viewed in the [results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). + * In order to compute classification metrics, you must provide a `validation_file`. Additionally, + * you must specify `classification_n_classes` for multiclass classification or + * `classification_positive_class` for binary classification. + * @param hyperparameters + * @param learningRateMultiplier The learning rate multiplier to use for training. The fine-tuning + * learning rate is the original learning rate used for pretraining multiplied by this value. By + * default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final `batch_size` + * (larger learning rates tend to perform better with larger batch sizes). We recommend + * experimenting with values in the range 0.02 to 0.2 to see what produces the best results. + * @param model + * @param promptLossWeight The weight to use for loss on the prompt tokens. This controls how much + * the model tries to learn to generate the prompt (as compared to the completion which always has + * a weight of 1.0), and can add a stabilizing effect to training when completions are short. If + * prompts are extremely long (relative to completions), it may make sense to reduce this weight + * so as to avoid over-prioritizing learning the prompt. + * @param suffix A string of up to 40 characters that will be added to your fine-tuned model name. + * For example, a `suffix` of \"custom-model-name\" would produce a model name like + * `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. + * @param validationFile The ID of an uploaded file that contains validation data. If you provide + * this file, the data is used to generate validation metrics periodically during fine-tuning. + * These metrics can be viewed in the + * [fine-tuning results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). + * Your train and validation data should be mutually exclusive. Your dataset must be formatted as + * a JSONL file, where each validation example is a JSON object with the keys \"prompt\" and + * \"completion\". Additionally, you must upload your file with the purpose `fine-tune`. See the + * [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more details. + */ +@Serializable +data class CreateFineTuneRequest( + + /* The ID of an uploaded file that contains training data. See [upload file](/docs/api-reference/files/upload) for how to upload a file. Your dataset must be formatted as a JSONL file, where each training example is a JSON object with the keys \"prompt\" and \"completion\". Additionally, you must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more details. */ + @SerialName(value = "training_file") @Required val trainingFile: kotlin.String, + + /* The batch size to use for training. The batch size is the number of training examples used to train a single forward and backward pass. By default, the batch size will be dynamically configured to be ~0.2% of the number of examples in the training set, capped at 256 - in general, we've found that larger batch sizes tend to work better for larger datasets. */ + @SerialName(value = "batch_size") val batchSize: kotlin.Int? = null, + + /* If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score is a generalization of F-1 score. This is only used for binary classification. With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger beta score puts more weight on recall and less on precision. A smaller beta score puts more weight on precision and less on recall. */ + @SerialName(value = "classification_betas") + val classificationBetas: kotlin.collections.List? = null, + + /* The number of classes in a classification task. This parameter is required for multiclass classification. */ + @SerialName(value = "classification_n_classes") val classificationNClasses: kotlin.Int? = null, + + /* The positive class in binary classification. This parameter is needed to generate precision, recall, and F1 metrics when doing binary classification. */ + @SerialName(value = "classification_positive_class") + val classificationPositiveClass: kotlin.String? = null, + + /* If set, we calculate classification-specific metrics such as accuracy and F-1 score using the validation set at the end of every epoch. These metrics can be viewed in the [results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). In order to compute classification metrics, you must provide a `validation_file`. Additionally, you must specify `classification_n_classes` for multiclass classification or `classification_positive_class` for binary classification. */ + @SerialName(value = "compute_classification_metrics") + val computeClassificationMetrics: kotlin.Boolean? = false, + @SerialName(value = "hyperparameters") + val hyperparameters: CreateFineTuneRequestHyperparameters? = null, + + /* The learning rate multiplier to use for training. The fine-tuning learning rate is the original learning rate used for pretraining multiplied by this value. By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final `batch_size` (larger learning rates tend to perform better with larger batch sizes). We recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best results. */ + @SerialName(value = "learning_rate_multiplier") val learningRateMultiplier: kotlin.Double? = null, + @SerialName(value = "model") + val model: com.xebia.functional.openai.models.ext.finetune.create.CreateFineTuneRequestModel? = + null, + + /* The weight to use for loss on the prompt tokens. This controls how much the model tries to learn to generate the prompt (as compared to the completion which always has a weight of 1.0), and can add a stabilizing effect to training when completions are short. If prompts are extremely long (relative to completions), it may make sense to reduce this weight so as to avoid over-prioritizing learning the prompt. */ + @SerialName(value = "prompt_loss_weight") + val promptLossWeight: kotlin.Double? = (0.01).toDouble(), + + /* A string of up to 40 characters that will be added to your fine-tuned model name. For example, a `suffix` of \"custom-model-name\" would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. */ + @SerialName(value = "suffix") val suffix: kotlin.String? = null, + + /* The ID of an uploaded file that contains validation data. If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in the [fine-tuning results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). Your train and validation data should be mutually exclusive. Your dataset must be formatted as a JSONL file, where each validation example is a JSON object with the keys \"prompt\" and \"completion\". Additionally, you must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more details. */ + @SerialName(value = "validation_file") val validationFile: kotlin.String? = null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuneRequestHyperparameters.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuneRequestHyperparameters.kt new file mode 100644 index 000000000..bec3daadb --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuneRequestHyperparameters.kt @@ -0,0 +1,24 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * The hyperparameters used for the fine-tuning job. + * + * @param nEpochs + */ +@Serializable +data class CreateFineTuneRequestHyperparameters( + @SerialName(value = "n_epochs") + val nEpochs: + com.xebia.functional.openai.models.ext.finetune.create.CreateFineTuneRequestHyperparametersNEpochs? = + null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuningJobRequest.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuningJobRequest.kt new file mode 100644 index 000000000..61d19d472 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuningJobRequest.kt @@ -0,0 +1,47 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param model + * @param trainingFile The ID of an uploaded file that contains training data. See + * [upload file](/docs/api-reference/files/upload) for how to upload a file. Your dataset must be + * formatted as a JSONL file. Additionally, you must upload your file with the purpose + * `fine-tune`. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + * @param hyperparameters + * @param suffix A string of up to 18 characters that will be added to your fine-tuned model name. + * For example, a `suffix` of \"custom-model-name\" would produce a model name like + * `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + * @param validationFile The ID of an uploaded file that contains validation data. If you provide + * this file, the data is used to generate validation metrics periodically during fine-tuning. + * These metrics can be viewed in the fine-tuning results file. The same data should not be + * present in both train and validation files. Your dataset must be formatted as a JSONL file. You + * must upload your file with the purpose `fine-tune`. See the + * [fine-tuning guide](/docs/guides/fine-tuning) for more details. + */ +@Serializable +data class CreateFineTuningJobRequest( + @SerialName(value = "model") + @Required + val model: + com.xebia.functional.openai.models.ext.finetune.job.create.CreateFineTuningJobRequestModel, + + /* The ID of an uploaded file that contains training data. See [upload file](/docs/api-reference/files/upload) for how to upload a file. Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. */ + @SerialName(value = "training_file") @Required val trainingFile: kotlin.String, + @SerialName(value = "hyperparameters") + val hyperparameters: CreateFineTuningJobRequestHyperparameters? = null, + + /* A string of up to 18 characters that will be added to your fine-tuned model name. For example, a `suffix` of \"custom-model-name\" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. */ + @SerialName(value = "suffix") val suffix: kotlin.String? = null, + + /* The ID of an uploaded file that contains validation data. If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should not be present in both train and validation files. Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. */ + @SerialName(value = "validation_file") val validationFile: kotlin.String? = null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuningJobRequestHyperparameters.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuningJobRequestHyperparameters.kt new file mode 100644 index 000000000..d711b1948 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuningJobRequestHyperparameters.kt @@ -0,0 +1,34 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * The hyperparameters used for the fine-tuning job. + * + * @param batchSize + * @param learningRateMultiplier + * @param nEpochs + */ +@Serializable +data class CreateFineTuningJobRequestHyperparameters( + @SerialName(value = "batch_size") + val batchSize: + com.xebia.functional.openai.models.ext.finetune.job.create.CreateFineTuningJobRequestHyperparametersBatchSize? = + null, + @SerialName(value = "learning_rate_multiplier") + val learningRateMultiplier: + com.xebia.functional.openai.models.ext.finetune.job.create.CreateFineTuningJobRequestHyperparametersLearningRateMultiplier? = + null, + @SerialName(value = "n_epochs") + val nEpochs: + com.xebia.functional.openai.models.ext.finetune.job.create.CreateFineTuningJobRequestHyperparametersNEpochs? = + null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateImageRequest.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateImageRequest.kt new file mode 100644 index 000000000..9bcc62e8f --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateImageRequest.kt @@ -0,0 +1,109 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param prompt A text description of the desired image(s). The maximum length is 1000 characters + * for `dall-e-2` and 4000 characters for `dall-e-3`. + * @param model + * @param n The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` + * is supported. + * @param quality The quality of the image that will be generated. `hd` creates images with finer + * details and greater consistency across the image. This param is only supported for `dall-e-3`. + * @param responseFormat The format in which the generated images are returned. Must be one of `url` + * or `b64_json`. + * @param propertySize The size of the generated images. Must be one of `256x256`, `512x512`, or + * `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for + * `dall-e-3` models. + * @param style The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes + * the model to lean towards generating hyper-real and dramatic images. Natural causes the model + * to produce more natural, less hyper-real looking images. This param is only supported for + * `dall-e-3`. + * @param user A unique identifier representing your end-user, which can help OpenAI to monitor and + * detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + */ +@Serializable +data class CreateImageRequest( + + /* A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. */ + @SerialName(value = "prompt") @Required val prompt: kotlin.String, + @SerialName(value = "model") + val model: com.xebia.functional.openai.models.ext.image.create.CreateImageRequestModel? = null, + + /* The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. */ + @SerialName(value = "n") val n: kotlin.Int? = 1, + + /* The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. */ + @SerialName(value = "quality") val quality: CreateImageRequest.Quality? = Quality.standard, + + /* The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ + @SerialName(value = "response_format") + val responseFormat: CreateImageRequest.ResponseFormat? = ResponseFormat.url, + + /* The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. */ + @SerialName(value = "size") + val propertySize: CreateImageRequest.PropertySize? = PropertySize._1024x1024, + + /* The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. */ + @SerialName(value = "style") val style: CreateImageRequest.Style? = Style.vivid, + + /* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). */ + @SerialName(value = "user") val user: kotlin.String? = null +) { + + /** + * The quality of the image that will be generated. `hd` creates images with finer details and + * greater consistency across the image. This param is only supported for `dall-e-3`. + * + * Values: standard,hd + */ + @Serializable + enum class Quality(val value: kotlin.String) { + @SerialName(value = "standard") standard("standard"), + @SerialName(value = "hd") hd("hd") + } + /** + * The format in which the generated images are returned. Must be one of `url` or `b64_json`. + * + * Values: url,b64Json + */ + @Serializable + enum class ResponseFormat(val value: kotlin.String) { + @SerialName(value = "url") url("url"), + @SerialName(value = "b64_json") b64Json("b64_json") + } + /** + * The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for + * `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. + * + * Values: _256x256,_512x512,_1024x1024,_1792x1024,_1024x1792 + */ + @Serializable + enum class PropertySize(val value: kotlin.String) { + @SerialName(value = "256x256") _256x256("256x256"), + @SerialName(value = "512x512") _512x512("512x512"), + @SerialName(value = "1024x1024") _1024x1024("1024x1024"), + @SerialName(value = "1792x1024") _1792x1024("1792x1024"), + @SerialName(value = "1024x1792") _1024x1792("1024x1792") + } + /** + * The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model + * to lean towards generating hyper-real and dramatic images. Natural causes the model to produce + * more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. + * + * Values: vivid,natural + */ + @Serializable + enum class Style(val value: kotlin.String) { + @SerialName(value = "vivid") vivid("vivid"), + @SerialName(value = "natural") natural("natural") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateMessageRequest.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateMessageRequest.kt new file mode 100644 index 000000000..af7637a51 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateMessageRequest.kt @@ -0,0 +1,50 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param role The role of the entity that is creating the message. Currently only `user` is + * supported. + * @param content The content of the message. + * @param fileIds A list of [File](/docs/api-reference/files) IDs that the message should use. There + * can be a maximum of 10 files attached to a message. Useful for tools like `retrieval` and + * `code_interpreter` that can access and use files. + * @param metadata Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maxium of 512 characters long. + */ +@Serializable +data class CreateMessageRequest( + + /* The role of the entity that is creating the message. Currently only `user` is supported. */ + @SerialName(value = "role") @Required val role: CreateMessageRequest.Role, + + /* The content of the message. */ + @SerialName(value = "content") @Required val content: kotlin.String, + + /* A list of [File](/docs/api-reference/files) IDs that the message should use. There can be a maximum of 10 files attached to a message. Useful for tools like `retrieval` and `code_interpreter` that can access and use files. */ + @SerialName(value = "file_ids") + val fileIds: kotlin.collections.List? = arrayListOf(), + + /* Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ + @SerialName(value = "metadata") val metadata: kotlin.String? = null +) { + + /** + * The role of the entity that is creating the message. Currently only `user` is supported. + * + * Values: user + */ + @Serializable + enum class Role(val value: kotlin.String) { + @SerialName(value = "user") user("user") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationRequest.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationRequest.kt new file mode 100644 index 000000000..ec7f1d6a6 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationRequest.kt @@ -0,0 +1,26 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param input + * @param model + */ +@Serializable +data class CreateModerationRequest( + @SerialName(value = "input") + @Required + val input: com.xebia.functional.openai.models.ext.moderation.create.CreateModerationRequestInput, + @SerialName(value = "model") + val model: + com.xebia.functional.openai.models.ext.moderation.create.CreateModerationRequestModel? = + null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponse.kt new file mode 100644 index 000000000..a9dd08a88 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponse.kt @@ -0,0 +1,33 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Represents policy compliance report by OpenAI's content moderation model against a given input. + * + * @param id The unique identifier for the moderation request. + * @param model The model used to generate the moderation results. + * @param results A list of moderation objects. + */ +@Serializable +data class CreateModerationResponse( + + /* The unique identifier for the moderation request. */ + @SerialName(value = "id") @Required val id: kotlin.String, + + /* The model used to generate the moderation results. */ + @SerialName(value = "model") @Required val model: kotlin.String, + + /* A list of moderation objects. */ + @SerialName(value = "results") + @Required + val results: kotlin.collections.List +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponseResultsInner.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponseResultsInner.kt new file mode 100644 index 000000000..8768e5db9 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponseResultsInner.kt @@ -0,0 +1,29 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param flagged Whether the content violates [OpenAI's usage policies](/policies/usage-policies). + * @param categories + * @param categoryScores + */ +@Serializable +data class CreateModerationResponseResultsInner( + + /* Whether the content violates [OpenAI's usage policies](/policies/usage-policies). */ + @SerialName(value = "flagged") @Required val flagged: kotlin.Boolean, + @SerialName(value = "categories") + @Required + val categories: CreateModerationResponseResultsInnerCategories, + @SerialName(value = "category_scores") + @Required + val categoryScores: CreateModerationResponseResultsInnerCategoryScores +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponseResultsInnerCategories.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponseResultsInnerCategories.kt new file mode 100644 index 000000000..23794f79e --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponseResultsInnerCategories.kt @@ -0,0 +1,75 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * A list of the categories, and whether they are flagged or not. + * + * @param hate Content that expresses, incites, or promotes hate based on race, gender, ethnicity, + * religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed + * at non-protected groups (e.g., chess players) is harrassment. + * @param hateThreatening Hateful content that also includes violence or serious harm towards the + * targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, + * disability status, or caste. + * @param harassment Content that expresses, incites, or promotes harassing language towards any + * target. + * @param harassmentThreatening Harassment content that also includes violence or serious harm + * towards any target. + * @param selfHarm Content that promotes, encourages, or depicts acts of self-harm, such as suicide, + * cutting, and eating disorders. + * @param selfHarmIntent Content where the speaker expresses that they are engaging or intend to + * engage in acts of self-harm, such as suicide, cutting, and eating disorders. + * @param selfHarmInstructions Content that encourages performing acts of self-harm, such as + * suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit + * such acts. + * @param sexual Content meant to arouse sexual excitement, such as the description of sexual + * activity, or that promotes sexual services (excluding sex education and wellness). + * @param sexualMinors Sexual content that includes an individual who is under 18 years old. + * @param violence Content that depicts death, violence, or physical injury. + * @param violenceGraphic Content that depicts death, violence, or physical injury in graphic + * detail. + */ +@Serializable +data class CreateModerationResponseResultsInnerCategories( + + /* Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harrassment. */ + @SerialName(value = "hate") @Required val hate: kotlin.Boolean, + + /* Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. */ + @SerialName(value = "hate/threatening") @Required val hateThreatening: kotlin.Boolean, + + /* Content that expresses, incites, or promotes harassing language towards any target. */ + @SerialName(value = "harassment") @Required val harassment: kotlin.Boolean, + + /* Harassment content that also includes violence or serious harm towards any target. */ + @SerialName(value = "harassment/threatening") @Required val harassmentThreatening: kotlin.Boolean, + + /* Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. */ + @SerialName(value = "self-harm") @Required val selfHarm: kotlin.Boolean, + + /* Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. */ + @SerialName(value = "self-harm/intent") @Required val selfHarmIntent: kotlin.Boolean, + + /* Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. */ + @SerialName(value = "self-harm/instructions") @Required val selfHarmInstructions: kotlin.Boolean, + + /* Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). */ + @SerialName(value = "sexual") @Required val sexual: kotlin.Boolean, + + /* Sexual content that includes an individual who is under 18 years old. */ + @SerialName(value = "sexual/minors") @Required val sexualMinors: kotlin.Boolean, + + /* Content that depicts death, violence, or physical injury. */ + @SerialName(value = "violence") @Required val violence: kotlin.Boolean, + + /* Content that depicts death, violence, or physical injury in graphic detail. */ + @SerialName(value = "violence/graphic") @Required val violenceGraphic: kotlin.Boolean +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponseResultsInnerCategoryScores.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponseResultsInnerCategoryScores.kt new file mode 100644 index 000000000..bbbf2e4b7 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponseResultsInnerCategoryScores.kt @@ -0,0 +1,63 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * A list of the categories along with their scores as predicted by model. + * + * @param hate The score for the category 'hate'. + * @param hateThreatening The score for the category 'hate/threatening'. + * @param harassment The score for the category 'harassment'. + * @param harassmentThreatening The score for the category 'harassment/threatening'. + * @param selfHarm The score for the category 'self-harm'. + * @param selfHarmIntent The score for the category 'self-harm/intent'. + * @param selfHarmInstructions The score for the category 'self-harm/instructions'. + * @param sexual The score for the category 'sexual'. + * @param sexualMinors The score for the category 'sexual/minors'. + * @param violence The score for the category 'violence'. + * @param violenceGraphic The score for the category 'violence/graphic'. + */ +@Serializable +data class CreateModerationResponseResultsInnerCategoryScores( + + /* The score for the category 'hate'. */ + @SerialName(value = "hate") @Required val hate: kotlin.Double, + + /* The score for the category 'hate/threatening'. */ + @SerialName(value = "hate/threatening") @Required val hateThreatening: kotlin.Double, + + /* The score for the category 'harassment'. */ + @SerialName(value = "harassment") @Required val harassment: kotlin.Double, + + /* The score for the category 'harassment/threatening'. */ + @SerialName(value = "harassment/threatening") @Required val harassmentThreatening: kotlin.Double, + + /* The score for the category 'self-harm'. */ + @SerialName(value = "self-harm") @Required val selfHarm: kotlin.Double, + + /* The score for the category 'self-harm/intent'. */ + @SerialName(value = "self-harm/intent") @Required val selfHarmIntent: kotlin.Double, + + /* The score for the category 'self-harm/instructions'. */ + @SerialName(value = "self-harm/instructions") @Required val selfHarmInstructions: kotlin.Double, + + /* The score for the category 'sexual'. */ + @SerialName(value = "sexual") @Required val sexual: kotlin.Double, + + /* The score for the category 'sexual/minors'. */ + @SerialName(value = "sexual/minors") @Required val sexualMinors: kotlin.Double, + + /* The score for the category 'violence'. */ + @SerialName(value = "violence") @Required val violence: kotlin.Double, + + /* The score for the category 'violence/graphic'. */ + @SerialName(value = "violence/graphic") @Required val violenceGraphic: kotlin.Double +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateRunRequest.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateRunRequest.kt new file mode 100644 index 000000000..12717d3d2 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateRunRequest.kt @@ -0,0 +1,45 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param assistantId The ID of the [assistant](/docs/api-reference/assistants) to use to execute + * this run. + * @param model The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If + * a value is provided here, it will override the model associated with the assistant. If not, the + * model associated with the assistant will be used. + * @param instructions Override the default system message of the assistant. This is useful for + * modifying the behavior on a per-run basis. + * @param tools Override the tools the assistant can use for this run. This is useful for modifying + * the behavior on a per-run basis. + * @param metadata Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maxium of 512 characters long. + */ +@Serializable +data class CreateRunRequest( + + /* The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. */ + @SerialName(value = "assistant_id") @Required val assistantId: kotlin.String, + + /* The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. */ + @SerialName(value = "model") val model: kotlin.String? = null, + + /* Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. */ + @SerialName(value = "instructions") val instructions: kotlin.String? = null, + + /* Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. */ + @SerialName(value = "tools") + val tools: kotlin.collections.List? = null, + + /* Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ + @SerialName(value = "metadata") val metadata: kotlin.String? = null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateSpeechRequest.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateSpeechRequest.kt new file mode 100644 index 000000000..429ea710f --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateSpeechRequest.kt @@ -0,0 +1,70 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param model + * @param input The text to generate audio for. The maximum length is 4096 characters. + * @param voice The voice to use when generating the audio. Supported voices are `alloy`, `echo`, + * `fable`, `onyx`, `nova`, and `shimmer`. + * @param responseFormat The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and + * `flac`. + * @param speed The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the + * default. + */ +@Serializable +data class CreateSpeechRequest( + @SerialName(value = "model") + @Required + val model: com.xebia.functional.openai.models.ext.speech.create.CreateSpeechRequestModel, + + /* The text to generate audio for. The maximum length is 4096 characters. */ + @SerialName(value = "input") @Required val input: kotlin.String, + + /* The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. */ + @SerialName(value = "voice") @Required val voice: CreateSpeechRequest.Voice, + + /* The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`. */ + @SerialName(value = "response_format") + val responseFormat: CreateSpeechRequest.ResponseFormat? = ResponseFormat.mp3, + + /* The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default. */ + @SerialName(value = "speed") val speed: kotlin.Double? = (1.0).toDouble() +) { + + /** + * The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, + * `onyx`, `nova`, and `shimmer`. + * + * Values: alloy,echo,fable,onyx,nova,shimmer + */ + @Serializable + enum class Voice(val value: kotlin.String) { + @SerialName(value = "alloy") alloy("alloy"), + @SerialName(value = "echo") echo("echo"), + @SerialName(value = "fable") fable("fable"), + @SerialName(value = "onyx") onyx("onyx"), + @SerialName(value = "nova") nova("nova"), + @SerialName(value = "shimmer") shimmer("shimmer") + } + /** + * The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`. + * + * Values: mp3,opus,aac,flac + */ + @Serializable + enum class ResponseFormat(val value: kotlin.String) { + @SerialName(value = "mp3") mp3("mp3"), + @SerialName(value = "opus") opus("opus"), + @SerialName(value = "aac") aac("aac"), + @SerialName(value = "flac") flac("flac") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateThreadAndRunRequest.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateThreadAndRunRequest.kt new file mode 100644 index 000000000..dbf519677 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateThreadAndRunRequest.kt @@ -0,0 +1,47 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param assistantId The ID of the [assistant](/docs/api-reference/assistants) to use to execute + * this run. + * @param thread + * @param model The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If + * a value is provided here, it will override the model associated with the assistant. If not, the + * model associated with the assistant will be used. + * @param instructions Override the default system message of the assistant. This is useful for + * modifying the behavior on a per-run basis. + * @param tools Override the tools the assistant can use for this run. This is useful for modifying + * the behavior on a per-run basis. + * @param metadata Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maxium of 512 characters long. + */ +@Serializable +data class CreateThreadAndRunRequest( + + /* The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. */ + @SerialName(value = "assistant_id") @Required val assistantId: kotlin.String, + @SerialName(value = "thread") val thread: CreateThreadRequest? = null, + + /* The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. */ + @SerialName(value = "model") val model: kotlin.String? = null, + + /* Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. */ + @SerialName(value = "instructions") val instructions: kotlin.String? = null, + + /* Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. */ + @SerialName(value = "tools") + val tools: kotlin.collections.List? = null, + + /* Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ + @SerialName(value = "metadata") val metadata: kotlin.String? = null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateThreadAndRunRequestToolsInner.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateThreadAndRunRequestToolsInner.kt new file mode 100644 index 000000000..84529c0fb --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateThreadAndRunRequestToolsInner.kt @@ -0,0 +1,36 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param type The type of tool being defined: `code_interpreter` + * @param function + */ +@Serializable +data class CreateThreadAndRunRequestToolsInner( + + /* The type of tool being defined: `code_interpreter` */ + @SerialName(value = "type") @Required val type: CreateThreadAndRunRequestToolsInner.Type, + @SerialName(value = "function") @Required val function: FunctionObject +) { + + /** + * The type of tool being defined: `code_interpreter` + * + * Values: codeInterpreter,retrieval,function + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "code_interpreter") codeInterpreter("code_interpreter"), + @SerialName(value = "retrieval") retrieval("retrieval"), + @SerialName(value = "function") function("function") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateThreadRequest.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateThreadRequest.kt new file mode 100644 index 000000000..3459ea697 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateThreadRequest.kt @@ -0,0 +1,28 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param messages A list of [messages](/docs/api-reference/messages) to start the thread with. + * @param metadata Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maxium of 512 characters long. + */ +@Serializable +data class CreateThreadRequest( + + /* A list of [messages](/docs/api-reference/messages) to start the thread with. */ + @SerialName(value = "messages") + val messages: kotlin.collections.List? = null, + + /* Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ + @SerialName(value = "metadata") val metadata: kotlin.String? = null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateTranscriptionResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateTranscriptionResponse.kt new file mode 100644 index 000000000..57131c18b --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateTranscriptionResponse.kt @@ -0,0 +1,17 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** @param text */ +@Serializable +data class CreateTranscriptionResponse( + @SerialName(value = "text") @Required val text: kotlin.String +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateTranslationResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateTranslationResponse.kt new file mode 100644 index 000000000..9f4224dee --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateTranslationResponse.kt @@ -0,0 +1,15 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** @param text */ +@Serializable +data class CreateTranslationResponse(@SerialName(value = "text") @Required val text: kotlin.String) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteAssistantFileResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteAssistantFileResponse.kt new file mode 100644 index 000000000..3db94fe49 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteAssistantFileResponse.kt @@ -0,0 +1,34 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Deletes the association between the assistant and the file, but does not delete the + * [File](/docs/api-reference/files) object itself. + * + * @param id + * @param deleted + * @param `object` + */ +@Serializable +data class DeleteAssistantFileResponse( + @SerialName(value = "id") @Required val id: kotlin.String, + @SerialName(value = "deleted") @Required val deleted: kotlin.Boolean, + @SerialName(value = "object") @Required val `object`: DeleteAssistantFileResponse.`Object` +) { + + /** Values: assistantPeriodFilePeriodDeleted */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "assistant.file.deleted") + assistantPeriodFilePeriodDeleted("assistant.file.deleted") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteAssistantResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteAssistantResponse.kt new file mode 100644 index 000000000..5a22b42fd --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteAssistantResponse.kt @@ -0,0 +1,30 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param id + * @param deleted + * @param `object` + */ +@Serializable +data class DeleteAssistantResponse( + @SerialName(value = "id") @Required val id: kotlin.String, + @SerialName(value = "deleted") @Required val deleted: kotlin.Boolean, + @SerialName(value = "object") @Required val `object`: DeleteAssistantResponse.`Object` +) { + + /** Values: assistantPeriodDeleted */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "assistant.deleted") assistantPeriodDeleted("assistant.deleted") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteFileResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteFileResponse.kt new file mode 100644 index 000000000..97944f4e3 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteFileResponse.kt @@ -0,0 +1,30 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param id + * @param `object` + * @param deleted + */ +@Serializable +data class DeleteFileResponse( + @SerialName(value = "id") @Required val id: kotlin.String, + @SerialName(value = "object") @Required val `object`: DeleteFileResponse.`Object`, + @SerialName(value = "deleted") @Required val deleted: kotlin.Boolean +) { + + /** Values: file */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "file") file("file") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteMessageResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteMessageResponse.kt new file mode 100644 index 000000000..37a994532 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteMessageResponse.kt @@ -0,0 +1,31 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param id + * @param deleted + * @param `object` + */ +@Serializable +data class DeleteMessageResponse( + @SerialName(value = "id") @Required val id: kotlin.String, + @SerialName(value = "deleted") @Required val deleted: kotlin.Boolean, + @SerialName(value = "object") @Required val `object`: DeleteMessageResponse.`Object` +) { + + /** Values: threadPeriodMessagePeriodDeleted */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "thread.message.deleted") + threadPeriodMessagePeriodDeleted("thread.message.deleted") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteModelResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteModelResponse.kt new file mode 100644 index 000000000..bedc7f180 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteModelResponse.kt @@ -0,0 +1,23 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param id + * @param deleted + * @param `object` + */ +@Serializable +data class DeleteModelResponse( + @SerialName(value = "id") @Required val id: kotlin.String, + @SerialName(value = "deleted") @Required val deleted: kotlin.Boolean, + @SerialName(value = "object") @Required val `object`: kotlin.String +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteThreadResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteThreadResponse.kt new file mode 100644 index 000000000..c902bb42a --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteThreadResponse.kt @@ -0,0 +1,30 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param id + * @param deleted + * @param `object` + */ +@Serializable +data class DeleteThreadResponse( + @SerialName(value = "id") @Required val id: kotlin.String, + @SerialName(value = "deleted") @Required val deleted: kotlin.Boolean, + @SerialName(value = "object") @Required val `object`: DeleteThreadResponse.`Object` +) { + + /** Values: threadPeriodDeleted */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "thread.deleted") threadPeriodDeleted("thread.deleted") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/Embedding.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/Embedding.kt new file mode 100644 index 000000000..d72994240 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/Embedding.kt @@ -0,0 +1,43 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Represents an embedding vector returned by embedding endpoint. + * + * @param index The index of the embedding in the list of embeddings. + * @param embedding The embedding vector, which is a list of floats. The length of vector depends on + * the model as listed in the [embedding guide](/docs/guides/embeddings). + * @param `object` The object type, which is always \"embedding\". + */ +@Serializable +data class Embedding( + + /* The index of the embedding in the list of embeddings. */ + @SerialName(value = "index") @Required val index: kotlin.Int, + + /* The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](/docs/guides/embeddings). */ + @SerialName(value = "embedding") @Required val embedding: kotlin.collections.List, + + /* The object type, which is always \"embedding\". */ + @SerialName(value = "object") @Required val `object`: Embedding.`Object` +) { + + /** + * The object type, which is always \"embedding\". + * + * Values: embedding + */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "embedding") embedding("embedding") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/Error.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/Error.kt new file mode 100644 index 000000000..e14dbb5b9 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/Error.kt @@ -0,0 +1,25 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param code + * @param message + * @param `param` + * @param type + */ +@Serializable +data class Error( + @SerialName(value = "code") @Required val code: kotlin.String?, + @SerialName(value = "message") @Required val message: kotlin.String, + @SerialName(value = "param") @Required val `param`: kotlin.String?, + @SerialName(value = "type") @Required val type: kotlin.String +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ErrorResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ErrorResponse.kt new file mode 100644 index 000000000..37dde0d3e --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ErrorResponse.kt @@ -0,0 +1,14 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** @param error */ +@Serializable data class ErrorResponse(@SerialName(value = "error") @Required val error: Error) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTune.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTune.kt new file mode 100644 index 000000000..170a8c29f --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTune.kt @@ -0,0 +1,88 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * The `FineTune` object represents a legacy fine-tune job that has been created through the API. + * + * @param id The object identifier, which can be referenced in the API endpoints. + * @param createdAt The Unix timestamp (in seconds) for when the fine-tuning job was created. + * @param fineTunedModel The name of the fine-tuned model that is being created. + * @param hyperparams + * @param model The base model that is being fine-tuned. + * @param `object` The object type, which is always \"fine-tune\". + * @param organizationId The organization that owns the fine-tuning job. + * @param resultFiles The compiled results files for the fine-tuning job. + * @param status The current status of the fine-tuning job, which can be either `created`, + * `running`, `succeeded`, `failed`, or `cancelled`. + * @param trainingFiles The list of files used for training. + * @param updatedAt The Unix timestamp (in seconds) for when the fine-tuning job was last updated. + * @param validationFiles The list of files used for validation. + * @param events The list of events that have been observed in the lifecycle of the FineTune job. + */ +@Serializable +@Deprecated(message = "This schema is deprecated.") +data class FineTune( + + /* The object identifier, which can be referenced in the API endpoints. */ + @SerialName(value = "id") @Required val id: kotlin.String, + + /* The Unix timestamp (in seconds) for when the fine-tuning job was created. */ + @SerialName(value = "created_at") @Required val createdAt: kotlin.Int, + + /* The name of the fine-tuned model that is being created. */ + @SerialName(value = "fine_tuned_model") @Required val fineTunedModel: kotlin.String?, + @SerialName(value = "hyperparams") @Required val hyperparams: FineTuneHyperparams, + + /* The base model that is being fine-tuned. */ + @SerialName(value = "model") @Required val model: kotlin.String, + + /* The object type, which is always \"fine-tune\". */ + @SerialName(value = "object") @Required val `object`: FineTune.`Object`, + + /* The organization that owns the fine-tuning job. */ + @SerialName(value = "organization_id") @Required val organizationId: kotlin.String, + + /* The compiled results files for the fine-tuning job. */ + @SerialName(value = "result_files") + @Required + val resultFiles: kotlin.collections.List, + + /* The current status of the fine-tuning job, which can be either `created`, `running`, `succeeded`, `failed`, or `cancelled`. */ + @SerialName(value = "status") @Required val status: kotlin.String, + + /* The list of files used for training. */ + @SerialName(value = "training_files") + @Required + val trainingFiles: kotlin.collections.List, + + /* The Unix timestamp (in seconds) for when the fine-tuning job was last updated. */ + @SerialName(value = "updated_at") @Required val updatedAt: kotlin.Int, + + /* The list of files used for validation. */ + @SerialName(value = "validation_files") + @Required + val validationFiles: kotlin.collections.List, + + /* The list of events that have been observed in the lifecycle of the FineTune job. */ + @SerialName(value = "events") val events: kotlin.collections.List? = null +) { + + /** + * The object type, which is always \"fine-tune\". + * + * Values: fineMinusTune + */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "fine-tune") fineMinusTune("fine-tune") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuneEvent.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuneEvent.kt new file mode 100644 index 000000000..44d8836f8 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuneEvent.kt @@ -0,0 +1,35 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Fine-tune event object + * + * @param createdAt + * @param level + * @param message + * @param `object` + */ +@Serializable +@Deprecated(message = "This schema is deprecated.") +data class FineTuneEvent( + @SerialName(value = "created_at") @Required val createdAt: kotlin.Int, + @SerialName(value = "level") @Required val level: kotlin.String, + @SerialName(value = "message") @Required val message: kotlin.String, + @SerialName(value = "object") @Required val `object`: FineTuneEvent.`Object` +) { + + /** Values: fineMinusTuneMinusEvent */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "fine-tune-event") fineMinusTuneMinusEvent("fine-tune-event") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuneHyperparams.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuneHyperparams.kt new file mode 100644 index 000000000..bacb6c644 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuneHyperparams.kt @@ -0,0 +1,56 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * The hyperparameters used for the fine-tuning job. See the + * [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. + * + * @param batchSize The batch size to use for training. The batch size is the number of training + * examples used to train a single forward and backward pass. + * @param learningRateMultiplier The learning rate multiplier to use for training. + * @param nEpochs The number of epochs to train the model for. An epoch refers to one full cycle + * through the training dataset. + * @param promptLossWeight The weight to use for loss on the prompt tokens. + * @param classificationNClasses The number of classes to use for computing classification metrics. + * @param classificationPositiveClass The positive class to use for computing classification + * metrics. + * @param computeClassificationMetrics The classification metrics to compute using the validation + * dataset at the end of every epoch. + */ +@Serializable +data class FineTuneHyperparams( + + /* The batch size to use for training. The batch size is the number of training examples used to train a single forward and backward pass. */ + @SerialName(value = "batch_size") @Required val batchSize: kotlin.Int, + + /* The learning rate multiplier to use for training. */ + @SerialName(value = "learning_rate_multiplier") + @Required + val learningRateMultiplier: kotlin.Double, + + /* The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. */ + @SerialName(value = "n_epochs") @Required val nEpochs: kotlin.Int, + + /* The weight to use for loss on the prompt tokens. */ + @SerialName(value = "prompt_loss_weight") @Required val promptLossWeight: kotlin.Double, + + /* The number of classes to use for computing classification metrics. */ + @SerialName(value = "classification_n_classes") val classificationNClasses: kotlin.Int? = null, + + /* The positive class to use for computing classification metrics. */ + @SerialName(value = "classification_positive_class") + val classificationPositiveClass: kotlin.String? = null, + + /* The classification metrics to compute using the validation dataset at the end of every epoch. */ + @SerialName(value = "compute_classification_metrics") + val computeClassificationMetrics: kotlin.Boolean? = null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuningJob.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuningJob.kt new file mode 100644 index 000000000..27761ddc1 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuningJob.kt @@ -0,0 +1,108 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * The `fine_tuning.job` object represents a fine-tuning job that has been created through the API. + * + * @param id The object identifier, which can be referenced in the API endpoints. + * @param createdAt The Unix timestamp (in seconds) for when the fine-tuning job was created. + * @param error + * @param fineTunedModel The name of the fine-tuned model that is being created. The value will be + * null if the fine-tuning job is still running. + * @param finishedAt The Unix timestamp (in seconds) for when the fine-tuning job was finished. The + * value will be null if the fine-tuning job is still running. + * @param hyperparameters + * @param model The base model that is being fine-tuned. + * @param `object` The object type, which is always \"fine_tuning.job\". + * @param organizationId The organization that owns the fine-tuning job. + * @param resultFiles The compiled results file ID(s) for the fine-tuning job. You can retrieve the + * results with the [Files API](/docs/api-reference/files/retrieve-contents). + * @param status The current status of the fine-tuning job, which can be either `validating_files`, + * `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + * @param trainedTokens The total number of billable tokens processed by this fine-tuning job. The + * value will be null if the fine-tuning job is still running. + * @param trainingFile The file ID used for training. You can retrieve the training data with the + * [Files API](/docs/api-reference/files/retrieve-contents). + * @param validationFile The file ID used for validation. You can retrieve the validation results + * with the [Files API](/docs/api-reference/files/retrieve-contents). + */ +@Serializable +data class FineTuningJob( + + /* The object identifier, which can be referenced in the API endpoints. */ + @SerialName(value = "id") @Required val id: kotlin.String, + + /* The Unix timestamp (in seconds) for when the fine-tuning job was created. */ + @SerialName(value = "created_at") @Required val createdAt: kotlin.Int, + @SerialName(value = "error") @Required val error: FineTuningJobError?, + + /* The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. */ + @SerialName(value = "fine_tuned_model") @Required val fineTunedModel: kotlin.String?, + + /* The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. */ + @SerialName(value = "finished_at") @Required val finishedAt: kotlin.Int?, + @SerialName(value = "hyperparameters") + @Required + val hyperparameters: FineTuningJobHyperparameters, + + /* The base model that is being fine-tuned. */ + @SerialName(value = "model") @Required val model: kotlin.String, + + /* The object type, which is always \"fine_tuning.job\". */ + @SerialName(value = "object") @Required val `object`: FineTuningJob.`Object`, + + /* The organization that owns the fine-tuning job. */ + @SerialName(value = "organization_id") @Required val organizationId: kotlin.String, + + /* The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents). */ + @SerialName(value = "result_files") + @Required + val resultFiles: kotlin.collections.List, + + /* The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. */ + @SerialName(value = "status") @Required val status: FineTuningJob.Status, + + /* The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. */ + @SerialName(value = "trained_tokens") @Required val trainedTokens: kotlin.Int?, + + /* The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents). */ + @SerialName(value = "training_file") @Required val trainingFile: kotlin.String, + + /* The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents). */ + @SerialName(value = "validation_file") @Required val validationFile: kotlin.String? +) { + + /** + * The object type, which is always \"fine_tuning.job\". + * + * Values: fineTuningPeriodJob + */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "fine_tuning.job") fineTuningPeriodJob("fine_tuning.job") + } + /** + * The current status of the fine-tuning job, which can be either `validating_files`, `queued`, + * `running`, `succeeded`, `failed`, or `cancelled`. + * + * Values: validatingFiles,queued,running,succeeded,failed,cancelled + */ + @Serializable + enum class Status(val value: kotlin.String) { + @SerialName(value = "validating_files") validatingFiles("validating_files"), + @SerialName(value = "queued") queued("queued"), + @SerialName(value = "running") running("running"), + @SerialName(value = "succeeded") succeeded("succeeded"), + @SerialName(value = "failed") failed("failed"), + @SerialName(value = "cancelled") cancelled("cancelled") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuningJobError.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuningJobError.kt new file mode 100644 index 000000000..f38e9b884 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuningJobError.kt @@ -0,0 +1,33 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * For fine-tuning jobs that have `failed`, this will contain more information on the cause of the + * failure. + * + * @param code A machine-readable error code. + * @param message A human-readable error message. + * @param `param` The parameter that was invalid, usually `training_file` or `validation_file`. This + * field will be null if the failure was not parameter-specific. + */ +@Serializable +data class FineTuningJobError( + + /* A machine-readable error code. */ + @SerialName(value = "code") @Required val code: kotlin.String, + + /* A human-readable error message. */ + @SerialName(value = "message") @Required val message: kotlin.String, + + /* The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. */ + @SerialName(value = "param") @Required val `param`: kotlin.String? +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuningJobEvent.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuningJobEvent.kt new file mode 100644 index 000000000..39fe52082 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuningJobEvent.kt @@ -0,0 +1,44 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Fine-tuning job event object + * + * @param id + * @param createdAt + * @param level + * @param message + * @param `object` + */ +@Serializable +data class FineTuningJobEvent( + @SerialName(value = "id") @Required val id: kotlin.String, + @SerialName(value = "created_at") @Required val createdAt: kotlin.Int, + @SerialName(value = "level") @Required val level: FineTuningJobEvent.Level, + @SerialName(value = "message") @Required val message: kotlin.String, + @SerialName(value = "object") @Required val `object`: FineTuningJobEvent.`Object` +) { + + /** Values: info,warn,error */ + @Serializable + enum class Level(val value: kotlin.String) { + @SerialName(value = "info") info("info"), + @SerialName(value = "warn") warn("warn"), + @SerialName(value = "error") error("error") + } + /** Values: fineTuningPeriodJobPeriodEvent */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "fine_tuning.job.event") + fineTuningPeriodJobPeriodEvent("fine_tuning.job.event") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuningJobHyperparameters.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuningJobHyperparameters.kt new file mode 100644 index 000000000..f93df17e8 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuningJobHyperparameters.kt @@ -0,0 +1,25 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * The hyperparameters used for the fine-tuning job. See the + * [fine-tuning guide](/docs/guides/fine-tuning) for more details. + * + * @param nEpochs + */ +@Serializable +data class FineTuningJobHyperparameters( + @SerialName(value = "n_epochs") + @Required + val nEpochs: + com.xebia.functional.openai.models.ext.finetune.job.FineTuningJobHyperparametersNEpochs +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FunctionObject.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FunctionObject.kt new file mode 100644 index 000000000..1028588d6 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/FunctionObject.kt @@ -0,0 +1,29 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param name The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores + * and dashes, with a maximum length of 64. + * @param parameters + * @param description A description of what the function does, used by the model to choose when and + * how to call the function. + */ +@Serializable +data class FunctionObject( + + /* The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. */ + @SerialName(value = "name") @Required val name: kotlin.String, + @SerialName(value = "parameters") @Required val parameters: kotlinx.serialization.json.JsonObject, + + /* A description of what the function does, used by the model to choose when and how to call the function. */ + @SerialName(value = "description") val description: kotlin.String? = null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/Image.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/Image.kt new file mode 100644 index 000000000..a295152c2 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/Image.kt @@ -0,0 +1,33 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Represents the url or the content of an image generated by the OpenAI API. + * + * @param b64Json The base64-encoded JSON of the generated image, if `response_format` is + * `b64_json`. + * @param url The URL of the generated image, if `response_format` is `url` (default). + * @param revisedPrompt The prompt that was used to generate the image, if there was any revision to + * the prompt. + */ +@Serializable +data class Image( + + /* The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. */ + @SerialName(value = "b64_json") val b64Json: kotlin.String? = null, + + /* The URL of the generated image, if `response_format` is `url` (default). */ + @SerialName(value = "url") val url: kotlin.String? = null, + + /* The prompt that was used to generate the image, if there was any revision to the prompt. */ + @SerialName(value = "revised_prompt") val revisedPrompt: kotlin.String? = null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ImagesResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ImagesResponse.kt new file mode 100644 index 000000000..6ca68fb7d --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ImagesResponse.kt @@ -0,0 +1,21 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param created + * @param `data` + */ +@Serializable +data class ImagesResponse( + @SerialName(value = "created") @Required val created: kotlin.Int, + @SerialName(value = "data") @Required val `data`: kotlin.collections.List +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListAssistantFilesResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListAssistantFilesResponse.kt new file mode 100644 index 000000000..23f89e7c1 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListAssistantFilesResponse.kt @@ -0,0 +1,27 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param `object` + * @param `data` + * @param firstId + * @param lastId + * @param hasMore + */ +@Serializable +data class ListAssistantFilesResponse( + @SerialName(value = "object") @Required val `object`: kotlin.String, + @SerialName(value = "data") @Required val `data`: kotlin.collections.List, + @SerialName(value = "first_id") @Required val firstId: kotlin.String, + @SerialName(value = "last_id") @Required val lastId: kotlin.String, + @SerialName(value = "has_more") @Required val hasMore: kotlin.Boolean +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListAssistantsResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListAssistantsResponse.kt new file mode 100644 index 000000000..c4e0d93cc --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListAssistantsResponse.kt @@ -0,0 +1,27 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param `object` + * @param `data` + * @param firstId + * @param lastId + * @param hasMore + */ +@Serializable +data class ListAssistantsResponse( + @SerialName(value = "object") @Required val `object`: kotlin.String, + @SerialName(value = "data") @Required val `data`: kotlin.collections.List, + @SerialName(value = "first_id") @Required val firstId: kotlin.String, + @SerialName(value = "last_id") @Required val lastId: kotlin.String, + @SerialName(value = "has_more") @Required val hasMore: kotlin.Boolean +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListFilesResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListFilesResponse.kt new file mode 100644 index 000000000..a27f00943 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListFilesResponse.kt @@ -0,0 +1,28 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param `data` + * @param `object` + */ +@Serializable +data class ListFilesResponse( + @SerialName(value = "data") @Required val `data`: kotlin.collections.List, + @SerialName(value = "object") @Required val `object`: ListFilesResponse.`Object` +) { + + /** Values: list */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "list") list("list") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListFineTuneEventsResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListFineTuneEventsResponse.kt new file mode 100644 index 000000000..75c8bb4f5 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListFineTuneEventsResponse.kt @@ -0,0 +1,28 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param `data` + * @param `object` + */ +@Serializable +data class ListFineTuneEventsResponse( + @SerialName(value = "data") @Required val `data`: kotlin.collections.List, + @SerialName(value = "object") @Required val `object`: ListFineTuneEventsResponse.`Object` +) { + + /** Values: list */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "list") list("list") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListFineTunesResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListFineTunesResponse.kt new file mode 100644 index 000000000..3aa054c39 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListFineTunesResponse.kt @@ -0,0 +1,28 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param `data` + * @param `object` + */ +@Serializable +data class ListFineTunesResponse( + @SerialName(value = "data") @Required val `data`: kotlin.collections.List, + @SerialName(value = "object") @Required val `object`: ListFineTunesResponse.`Object` +) { + + /** Values: list */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "list") list("list") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListFineTuningJobEventsResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListFineTuningJobEventsResponse.kt new file mode 100644 index 000000000..7b5dfb730 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListFineTuningJobEventsResponse.kt @@ -0,0 +1,28 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param `data` + * @param `object` + */ +@Serializable +data class ListFineTuningJobEventsResponse( + @SerialName(value = "data") @Required val `data`: kotlin.collections.List, + @SerialName(value = "object") @Required val `object`: ListFineTuningJobEventsResponse.`Object` +) { + + /** Values: list */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "list") list("list") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListMessageFilesResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListMessageFilesResponse.kt new file mode 100644 index 000000000..5a4965db9 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListMessageFilesResponse.kt @@ -0,0 +1,27 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param `object` + * @param `data` + * @param firstId + * @param lastId + * @param hasMore + */ +@Serializable +data class ListMessageFilesResponse( + @SerialName(value = "object") @Required val `object`: kotlin.String, + @SerialName(value = "data") @Required val `data`: kotlin.collections.List, + @SerialName(value = "first_id") @Required val firstId: kotlin.String, + @SerialName(value = "last_id") @Required val lastId: kotlin.String, + @SerialName(value = "has_more") @Required val hasMore: kotlin.Boolean +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListMessagesResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListMessagesResponse.kt new file mode 100644 index 000000000..49fbcaf3e --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListMessagesResponse.kt @@ -0,0 +1,27 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param `object` + * @param `data` + * @param firstId + * @param lastId + * @param hasMore + */ +@Serializable +data class ListMessagesResponse( + @SerialName(value = "object") @Required val `object`: kotlin.String, + @SerialName(value = "data") @Required val `data`: kotlin.collections.List, + @SerialName(value = "first_id") @Required val firstId: kotlin.String, + @SerialName(value = "last_id") @Required val lastId: kotlin.String, + @SerialName(value = "has_more") @Required val hasMore: kotlin.Boolean +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListModelsResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListModelsResponse.kt new file mode 100644 index 000000000..a079c8f12 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListModelsResponse.kt @@ -0,0 +1,28 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param `object` + * @param `data` + */ +@Serializable +data class ListModelsResponse( + @SerialName(value = "object") @Required val `object`: ListModelsResponse.`Object`, + @SerialName(value = "data") @Required val `data`: kotlin.collections.List +) { + + /** Values: list */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "list") list("list") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListPaginatedFineTuningJobsResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListPaginatedFineTuningJobsResponse.kt new file mode 100644 index 000000000..1858fe9df --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListPaginatedFineTuningJobsResponse.kt @@ -0,0 +1,30 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param `data` + * @param hasMore + * @param `object` + */ +@Serializable +data class ListPaginatedFineTuningJobsResponse( + @SerialName(value = "data") @Required val `data`: kotlin.collections.List, + @SerialName(value = "has_more") @Required val hasMore: kotlin.Boolean, + @SerialName(value = "object") @Required val `object`: ListPaginatedFineTuningJobsResponse.`Object` +) { + + /** Values: list */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "list") list("list") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListRunStepsResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListRunStepsResponse.kt new file mode 100644 index 000000000..38a3cac89 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListRunStepsResponse.kt @@ -0,0 +1,27 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param `object` + * @param `data` + * @param firstId + * @param lastId + * @param hasMore + */ +@Serializable +data class ListRunStepsResponse( + @SerialName(value = "object") @Required val `object`: kotlin.String, + @SerialName(value = "data") @Required val `data`: kotlin.collections.List, + @SerialName(value = "first_id") @Required val firstId: kotlin.String, + @SerialName(value = "last_id") @Required val lastId: kotlin.String, + @SerialName(value = "has_more") @Required val hasMore: kotlin.Boolean +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListRunsResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListRunsResponse.kt new file mode 100644 index 000000000..e21d10d4a --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListRunsResponse.kt @@ -0,0 +1,27 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param `object` + * @param `data` + * @param firstId + * @param lastId + * @param hasMore + */ +@Serializable +data class ListRunsResponse( + @SerialName(value = "object") @Required val `object`: kotlin.String, + @SerialName(value = "data") @Required val `data`: kotlin.collections.List, + @SerialName(value = "first_id") @Required val firstId: kotlin.String, + @SerialName(value = "last_id") @Required val lastId: kotlin.String, + @SerialName(value = "has_more") @Required val hasMore: kotlin.Boolean +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListThreadsResponse.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListThreadsResponse.kt new file mode 100644 index 000000000..be70e4d30 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ListThreadsResponse.kt @@ -0,0 +1,27 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param `object` + * @param `data` + * @param firstId + * @param lastId + * @param hasMore + */ +@Serializable +data class ListThreadsResponse( + @SerialName(value = "object") @Required val `object`: kotlin.String, + @SerialName(value = "data") @Required val `data`: kotlin.collections.List, + @SerialName(value = "first_id") @Required val firstId: kotlin.String, + @SerialName(value = "last_id") @Required val lastId: kotlin.String, + @SerialName(value = "has_more") @Required val hasMore: kotlin.Boolean +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentImageFileObject.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentImageFileObject.kt new file mode 100644 index 000000000..df8232866 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentImageFileObject.kt @@ -0,0 +1,36 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * References an image [File](/docs/api-reference/files) in the content of a message. + * + * @param type Always `image_file`. + * @param imageFile + */ +@Serializable +data class MessageContentImageFileObject( + + /* Always `image_file`. */ + @SerialName(value = "type") @Required val type: MessageContentImageFileObject.Type, + @SerialName(value = "image_file") @Required val imageFile: MessageContentImageFileObjectImageFile +) { + + /** + * Always `image_file`. + * + * Values: imageFile + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "image_file") imageFile("image_file") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentImageFileObjectImageFile.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentImageFileObjectImageFile.kt new file mode 100644 index 000000000..63f845f26 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentImageFileObjectImageFile.kt @@ -0,0 +1,19 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** @param fileId The [File](/docs/api-reference/files) ID of the image in the message content. */ +@Serializable +data class MessageContentImageFileObjectImageFile( + + /* The [File](/docs/api-reference/files) ID of the image in the message content. */ + @SerialName(value = "file_id") @Required val fileId: kotlin.String +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextAnnotationsFileCitationObject.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextAnnotationsFileCitationObject.kt new file mode 100644 index 000000000..fcc240a7f --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextAnnotationsFileCitationObject.kt @@ -0,0 +1,50 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * A citation within the message that points to a specific quote from a specific File associated + * with the assistant or the message. Generated when the assistant uses the \"retrieval\" tool to + * search files. + * + * @param type Always `file_citation`. + * @param text The text in the message content that needs to be replaced. + * @param fileCitation + * @param startIndex + * @param endIndex + */ +@Serializable +data class MessageContentTextAnnotationsFileCitationObject( + + /* Always `file_citation`. */ + @SerialName(value = "type") + @Required + val type: MessageContentTextAnnotationsFileCitationObject.Type, + + /* The text in the message content that needs to be replaced. */ + @SerialName(value = "text") @Required val text: kotlin.String, + @SerialName(value = "file_citation") + @Required + val fileCitation: MessageContentTextAnnotationsFileCitationObjectFileCitation, + @SerialName(value = "start_index") @Required val startIndex: kotlin.Int, + @SerialName(value = "end_index") @Required val endIndex: kotlin.Int +) { + + /** + * Always `file_citation`. + * + * Values: fileCitation + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "file_citation") fileCitation("file_citation") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextAnnotationsFileCitationObjectFileCitation.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextAnnotationsFileCitationObjectFileCitation.kt new file mode 100644 index 000000000..a1e17e2f2 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextAnnotationsFileCitationObjectFileCitation.kt @@ -0,0 +1,25 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param fileId The ID of the specific File the citation is from. + * @param quote The specific quote in the file. + */ +@Serializable +data class MessageContentTextAnnotationsFileCitationObjectFileCitation( + + /* The ID of the specific File the citation is from. */ + @SerialName(value = "file_id") @Required val fileId: kotlin.String, + + /* The specific quote in the file. */ + @SerialName(value = "quote") @Required val quote: kotlin.String +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextAnnotationsFilePathObject.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextAnnotationsFilePathObject.kt new file mode 100644 index 000000000..77750a10e --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextAnnotationsFilePathObject.kt @@ -0,0 +1,47 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * A URL for the file that's generated when the assistant used the `code_interpreter` tool to + * generate a file. + * + * @param type Always `file_path`. + * @param text The text in the message content that needs to be replaced. + * @param filePath + * @param startIndex + * @param endIndex + */ +@Serializable +data class MessageContentTextAnnotationsFilePathObject( + + /* Always `file_path`. */ + @SerialName(value = "type") @Required val type: MessageContentTextAnnotationsFilePathObject.Type, + + /* The text in the message content that needs to be replaced. */ + @SerialName(value = "text") @Required val text: kotlin.String, + @SerialName(value = "file_path") + @Required + val filePath: MessageContentTextAnnotationsFilePathObjectFilePath, + @SerialName(value = "start_index") @Required val startIndex: kotlin.Int, + @SerialName(value = "end_index") @Required val endIndex: kotlin.Int +) { + + /** + * Always `file_path`. + * + * Values: filePath + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "file_path") filePath("file_path") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextAnnotationsFilePathObjectFilePath.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextAnnotationsFilePathObjectFilePath.kt new file mode 100644 index 000000000..80ec8cefd --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextAnnotationsFilePathObjectFilePath.kt @@ -0,0 +1,19 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** @param fileId The ID of the file that was generated. */ +@Serializable +data class MessageContentTextAnnotationsFilePathObjectFilePath( + + /* The ID of the file that was generated. */ + @SerialName(value = "file_id") @Required val fileId: kotlin.String +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextObject.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextObject.kt new file mode 100644 index 000000000..4b374223e --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextObject.kt @@ -0,0 +1,36 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * The text content that is part of a message. + * + * @param type Always `text`. + * @param text + */ +@Serializable +data class MessageContentTextObject( + + /* Always `text`. */ + @SerialName(value = "type") @Required val type: MessageContentTextObject.Type, + @SerialName(value = "text") @Required val text: MessageContentTextObjectText +) { + + /** + * Always `text`. + * + * Values: text + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "text") text("text") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextObjectText.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextObjectText.kt new file mode 100644 index 000000000..6380241ca --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextObjectText.kt @@ -0,0 +1,25 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param `value` The data that makes up the text. + * @param annotations + */ +@Serializable +data class MessageContentTextObjectText( + + /* The data that makes up the text. */ + @SerialName(value = "value") @Required val `value`: kotlin.String, + @SerialName(value = "annotations") + @Required + val annotations: kotlin.collections.List +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextObjectTextAnnotationsInner.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextObjectTextAnnotationsInner.kt new file mode 100644 index 000000000..2f16c688a --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageContentTextObjectTextAnnotationsInner.kt @@ -0,0 +1,49 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param type Always `file_citation`. + * @param text The text in the message content that needs to be replaced. + * @param fileCitation + * @param startIndex + * @param endIndex + * @param filePath + */ +@Serializable +data class MessageContentTextObjectTextAnnotationsInner( + + /* Always `file_citation`. */ + @SerialName(value = "type") @Required val type: MessageContentTextObjectTextAnnotationsInner.Type, + + /* The text in the message content that needs to be replaced. */ + @SerialName(value = "text") @Required val text: kotlin.String, + @SerialName(value = "file_citation") + @Required + val fileCitation: MessageContentTextAnnotationsFileCitationObjectFileCitation, + @SerialName(value = "start_index") @Required val startIndex: kotlin.Int, + @SerialName(value = "end_index") @Required val endIndex: kotlin.Int, + @SerialName(value = "file_path") + @Required + val filePath: MessageContentTextAnnotationsFilePathObjectFilePath +) { + + /** + * Always `file_citation`. + * + * Values: citation,path + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "file_citation") citation("file_citation"), + @SerialName(value = "file_path") path("file_path") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageFileObject.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageFileObject.kt new file mode 100644 index 000000000..ba1058095 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageFileObject.kt @@ -0,0 +1,47 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * A list of files attached to a `message`. + * + * @param id The identifier, which can be referenced in API endpoints. + * @param `object` The object type, which is always `thread.message.file`. + * @param createdAt The Unix timestamp (in seconds) for when the message file was created. + * @param messageId The ID of the [message](/docs/api-reference/messages) that the + * [File](/docs/api-reference/files) is attached to. + */ +@Serializable +data class MessageFileObject( + + /* The identifier, which can be referenced in API endpoints. */ + @SerialName(value = "id") @Required val id: kotlin.String, + + /* The object type, which is always `thread.message.file`. */ + @SerialName(value = "object") @Required val `object`: MessageFileObject.`Object`, + + /* The Unix timestamp (in seconds) for when the message file was created. */ + @SerialName(value = "created_at") @Required val createdAt: kotlin.Int, + + /* The ID of the [message](/docs/api-reference/messages) that the [File](/docs/api-reference/files) is attached to. */ + @SerialName(value = "message_id") @Required val messageId: kotlin.String +) { + + /** + * The object type, which is always `thread.message.file`. + * + * Values: threadPeriodMessagePeriodFile + */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "thread.message.file") threadPeriodMessagePeriodFile("thread.message.file") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageObject.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageObject.kt new file mode 100644 index 000000000..d47a9916d --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageObject.kt @@ -0,0 +1,90 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Represents a message within a [thread](/docs/api-reference/threads). + * + * @param id The identifier, which can be referenced in API endpoints. + * @param `object` The object type, which is always `thread.message`. + * @param createdAt The Unix timestamp (in seconds) for when the message was created. + * @param threadId The [thread](/docs/api-reference/threads) ID that this message belongs to. + * @param role The entity that produced the message. One of `user` or `assistant`. + * @param content The content of the message in array of text and/or images. + * @param assistantId If applicable, the ID of the [assistant](/docs/api-reference/assistants) that + * authored this message. + * @param runId If applicable, the ID of the [run](/docs/api-reference/runs) associated with the + * authoring of this message. + * @param fileIds A list of [file](/docs/api-reference/files) IDs that the assistant should use. + * Useful for tools like retrieval and code_interpreter that can access files. A maximum of 10 + * files can be attached to a message. + * @param metadata Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maxium of 512 characters long. + */ +@Serializable +data class MessageObject( + + /* The identifier, which can be referenced in API endpoints. */ + @SerialName(value = "id") @Required val id: kotlin.String, + + /* The object type, which is always `thread.message`. */ + @SerialName(value = "object") @Required val `object`: MessageObject.`Object`, + + /* The Unix timestamp (in seconds) for when the message was created. */ + @SerialName(value = "created_at") @Required val createdAt: kotlin.Int, + + /* The [thread](/docs/api-reference/threads) ID that this message belongs to. */ + @SerialName(value = "thread_id") @Required val threadId: kotlin.String, + + /* The entity that produced the message. One of `user` or `assistant`. */ + @SerialName(value = "role") @Required val role: MessageObject.Role, + + /* The content of the message in array of text and/or images. */ + @SerialName(value = "content") + @Required + val content: kotlin.collections.List, + + /* If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this message. */ + @SerialName(value = "assistant_id") @Required val assistantId: kotlin.String?, + + /* If applicable, the ID of the [run](/docs/api-reference/runs) associated with the authoring of this message. */ + @SerialName(value = "run_id") @Required val runId: kotlin.String?, + + /* A list of [file](/docs/api-reference/files) IDs that the assistant should use. Useful for tools like retrieval and code_interpreter that can access files. A maximum of 10 files can be attached to a message. */ + @SerialName(value = "file_ids") + @Required + val fileIds: kotlin.collections.List = arrayListOf(), + + /* Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ + @SerialName(value = "metadata") @Required val metadata: kotlin.String? +) { + + /** + * The object type, which is always `thread.message`. + * + * Values: threadPeriodMessage + */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "thread.message") threadPeriodMessage("thread.message") + } + /** + * The entity that produced the message. One of `user` or `assistant`. + * + * Values: user,assistant + */ + @Serializable + enum class Role(val value: kotlin.String) { + @SerialName(value = "user") user("user"), + @SerialName(value = "assistant") assistant("assistant") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageObjectContentInner.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageObjectContentInner.kt new file mode 100644 index 000000000..8bb9c7aa3 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/MessageObjectContentInner.kt @@ -0,0 +1,37 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param type Always `image_file`. + * @param imageFile + * @param text + */ +@Serializable +data class MessageObjectContentInner( + + /* Always `image_file`. */ + @SerialName(value = "type") @Required val type: MessageObjectContentInner.Type, + @SerialName(value = "image_file") @Required val imageFile: MessageContentImageFileObjectImageFile, + @SerialName(value = "text") @Required val text: MessageContentTextObjectText +) { + + /** + * Always `image_file`. + * + * Values: imageFile,text + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "image_file") imageFile("image_file"), + @SerialName(value = "text") text("text") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/Model.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/Model.kt new file mode 100644 index 000000000..6346f9596 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/Model.kt @@ -0,0 +1,46 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Describes an OpenAI model offering that can be used with the API. + * + * @param id The model identifier, which can be referenced in the API endpoints. + * @param created The Unix timestamp (in seconds) when the model was created. + * @param `object` The object type, which is always \"model\". + * @param ownedBy The organization that owns the model. + */ +@Serializable +data class Model( + + /* The model identifier, which can be referenced in the API endpoints. */ + @SerialName(value = "id") @Required val id: kotlin.String, + + /* The Unix timestamp (in seconds) when the model was created. */ + @SerialName(value = "created") @Required val created: kotlin.Int, + + /* The object type, which is always \"model\". */ + @SerialName(value = "object") @Required val `object`: Model.`Object`, + + /* The organization that owns the model. */ + @SerialName(value = "owned_by") @Required val ownedBy: kotlin.String +) { + + /** + * The object type, which is always \"model\". + * + * Values: model + */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "model") model("model") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyAssistantRequest.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyAssistantRequest.kt new file mode 100644 index 000000000..6ac4376ae --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyAssistantRequest.kt @@ -0,0 +1,52 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param model + * @param name The name of the assistant. The maximum length is 256 characters. + * @param description The description of the assistant. The maximum length is 512 characters. + * @param instructions The system instructions that the assistant uses. The maximum length is 32768 + * characters. + * @param tools A list of tool enabled on the assistant. There can be a maximum of 128 tools per + * assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. + * @param fileIds A list of [File](/docs/api-reference/files) IDs attached to this assistant. There + * can be a maximum of 20 files attached to the assistant. Files are ordered by their creation + * date in ascending order. If a file was previosuly attached to the list but does not show up in + * the list, it will be deleted from the assistant. + * @param metadata Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maxium of 512 characters long. + */ +@Serializable +data class ModifyAssistantRequest( + @SerialName(value = "model") val model: kotlin.String? = null, + + /* The name of the assistant. The maximum length is 256 characters. */ + @SerialName(value = "name") val name: kotlin.String? = null, + + /* The description of the assistant. The maximum length is 512 characters. */ + @SerialName(value = "description") val description: kotlin.String? = null, + + /* The system instructions that the assistant uses. The maximum length is 32768 characters. */ + @SerialName(value = "instructions") val instructions: kotlin.String? = null, + + /* A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. */ + @SerialName(value = "tools") + val tools: kotlin.collections.List? = arrayListOf(), + + /* A list of [File](/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previosuly attached to the list but does not show up in the list, it will be deleted from the assistant. */ + @SerialName(value = "file_ids") + val fileIds: kotlin.collections.List? = arrayListOf(), + + /* Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ + @SerialName(value = "metadata") val metadata: kotlin.String? = null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyMessageRequest.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyMessageRequest.kt new file mode 100644 index 000000000..7cff6d2f4 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyMessageRequest.kt @@ -0,0 +1,23 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param metadata Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maxium of 512 characters long. + */ +@Serializable +data class ModifyMessageRequest( + + /* Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ + @SerialName(value = "metadata") val metadata: kotlin.String? = null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyRunRequest.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyRunRequest.kt new file mode 100644 index 000000000..6bc728f3b --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyRunRequest.kt @@ -0,0 +1,23 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param metadata Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maxium of 512 characters long. + */ +@Serializable +data class ModifyRunRequest( + + /* Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ + @SerialName(value = "metadata") val metadata: kotlin.String? = null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyThreadRequest.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyThreadRequest.kt new file mode 100644 index 000000000..f4d37bce9 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyThreadRequest.kt @@ -0,0 +1,23 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param metadata Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maxium of 512 characters long. + */ +@Serializable +data class ModifyThreadRequest( + + /* Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ + @SerialName(value = "metadata") val metadata: kotlin.String? = null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/OpenAIFile.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/OpenAIFile.kt new file mode 100644 index 000000000..bbee5509e --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/OpenAIFile.kt @@ -0,0 +1,95 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * The `File` object represents a document that has been uploaded to OpenAI. + * + * @param id The file identifier, which can be referenced in the API endpoints. + * @param bytes The size of the file, in bytes. + * @param createdAt The Unix timestamp (in seconds) for when the file was created. + * @param filename The name of the file. + * @param `object` The object type, which is always `file`. + * @param purpose The intended purpose of the file. Supported values are `fine-tune`, + * `fine-tune-results`, `assistants`, and `assistants_output`. + * @param status Deprecated. The current status of the file, which can be either `uploaded`, + * `processed`, or `error`. + * @param statusDetails Deprecated. For details on why a fine-tuning training file failed + * validation, see the `error` field on `fine_tuning.job`. + */ +@Serializable +data class OpenAIFile( + + /* The file identifier, which can be referenced in the API endpoints. */ + @SerialName(value = "id") @Required val id: kotlin.String, + + /* The size of the file, in bytes. */ + @SerialName(value = "bytes") @Required val bytes: kotlin.Int, + + /* The Unix timestamp (in seconds) for when the file was created. */ + @SerialName(value = "created_at") @Required val createdAt: kotlin.Int, + + /* The name of the file. */ + @SerialName(value = "filename") @Required val filename: kotlin.String, + + /* The object type, which is always `file`. */ + @SerialName(value = "object") @Required val `object`: OpenAIFile.`Object`, + + /* The intended purpose of the file. Supported values are `fine-tune`, `fine-tune-results`, `assistants`, and `assistants_output`. */ + @SerialName(value = "purpose") @Required val purpose: OpenAIFile.Purpose, + + /* Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. */ + @Deprecated(message = "This property is deprecated.") + @SerialName(value = "status") + @Required + val status: OpenAIFile.Status, + + /* Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`. */ + @Deprecated(message = "This property is deprecated.") + @SerialName(value = "status_details") + val statusDetails: kotlin.String? = null +) { + + /** + * The object type, which is always `file`. + * + * Values: file + */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "file") file("file") + } + /** + * The intended purpose of the file. Supported values are `fine-tune`, `fine-tune-results`, + * `assistants`, and `assistants_output`. + * + * Values: fineMinusTune,fineMinusTuneMinusResults,assistants,assistantsOutput + */ + @Serializable + enum class Purpose(val value: kotlin.String) { + @SerialName(value = "fine-tune") fineMinusTune("fine-tune"), + @SerialName(value = "fine-tune-results") fineMinusTuneMinusResults("fine-tune-results"), + @SerialName(value = "assistants") assistants("assistants"), + @SerialName(value = "assistants_output") assistantsOutput("assistants_output") + } + /** + * Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or + * `error`. + * + * Values: uploaded,processed,error + */ + @Serializable + enum class Status(val value: kotlin.String) { + @SerialName(value = "uploaded") uploaded("uploaded"), + @SerialName(value = "processed") processed("processed"), + @SerialName(value = "error") error("error") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunObject.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunObject.kt new file mode 100644 index 000000000..5b7da0129 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunObject.kt @@ -0,0 +1,127 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Represents an execution run on a [thread](/docs/api-reference/threads). + * + * @param id The identifier, which can be referenced in API endpoints. + * @param `object` The object type, which is always `thread.run`. + * @param createdAt The Unix timestamp (in seconds) for when the run was created. + * @param threadId The ID of the [thread](/docs/api-reference/threads) that was executed on as a + * part of this run. + * @param assistantId The ID of the [assistant](/docs/api-reference/assistants) used for execution + * of this run. + * @param status The status of the run, which can be either `queued`, `in_progress`, + * `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, or `expired`. + * @param requiredAction + * @param lastError + * @param expiresAt The Unix timestamp (in seconds) for when the run will expire. + * @param startedAt The Unix timestamp (in seconds) for when the run was started. + * @param cancelledAt The Unix timestamp (in seconds) for when the run was cancelled. + * @param failedAt The Unix timestamp (in seconds) for when the run failed. + * @param completedAt The Unix timestamp (in seconds) for when the run was completed. + * @param model The model that the [assistant](/docs/api-reference/assistants) used for this run. + * @param instructions The instructions that the [assistant](/docs/api-reference/assistants) used + * for this run. + * @param tools The list of tools that the [assistant](/docs/api-reference/assistants) used for this + * run. + * @param fileIds The list of [File](/docs/api-reference/files) IDs the + * [assistant](/docs/api-reference/assistants) used for this run. + * @param metadata Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maxium of 512 characters long. + */ +@Serializable +data class RunObject( + + /* The identifier, which can be referenced in API endpoints. */ + @SerialName(value = "id") @Required val id: kotlin.String, + + /* The object type, which is always `thread.run`. */ + @SerialName(value = "object") @Required val `object`: RunObject.`Object`, + + /* The Unix timestamp (in seconds) for when the run was created. */ + @SerialName(value = "created_at") @Required val createdAt: kotlin.Int, + + /* The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this run. */ + @SerialName(value = "thread_id") @Required val threadId: kotlin.String, + + /* The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. */ + @SerialName(value = "assistant_id") @Required val assistantId: kotlin.String, + + /* The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, or `expired`. */ + @SerialName(value = "status") @Required val status: RunObject.Status, + @SerialName(value = "required_action") @Required val requiredAction: RunObjectRequiredAction?, + @SerialName(value = "last_error") @Required val lastError: RunObjectLastError?, + + /* The Unix timestamp (in seconds) for when the run will expire. */ + @SerialName(value = "expires_at") @Required val expiresAt: kotlin.Int, + + /* The Unix timestamp (in seconds) for when the run was started. */ + @SerialName(value = "started_at") @Required val startedAt: kotlin.Int?, + + /* The Unix timestamp (in seconds) for when the run was cancelled. */ + @SerialName(value = "cancelled_at") @Required val cancelledAt: kotlin.Int?, + + /* The Unix timestamp (in seconds) for when the run failed. */ + @SerialName(value = "failed_at") @Required val failedAt: kotlin.Int?, + + /* The Unix timestamp (in seconds) for when the run was completed. */ + @SerialName(value = "completed_at") @Required val completedAt: kotlin.Int?, + + /* The model that the [assistant](/docs/api-reference/assistants) used for this run. */ + @SerialName(value = "model") @Required val model: kotlin.String, + + /* The instructions that the [assistant](/docs/api-reference/assistants) used for this run. */ + @SerialName(value = "instructions") @Required val instructions: kotlin.String, + + /* The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. */ + @SerialName(value = "tools") + @Required + val tools: kotlin.collections.List = arrayListOf(), + + /* The list of [File](/docs/api-reference/files) IDs the [assistant](/docs/api-reference/assistants) used for this run. */ + @SerialName(value = "file_ids") + @Required + val fileIds: kotlin.collections.List = arrayListOf(), + + /* Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ + @SerialName(value = "metadata") @Required val metadata: kotlin.String? +) { + + /** + * The object type, which is always `thread.run`. + * + * Values: threadPeriodRun + */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "thread.run") threadPeriodRun("thread.run") + } + /** + * The status of the run, which can be either `queued`, `in_progress`, `requires_action`, + * `cancelling`, `cancelled`, `failed`, `completed`, or `expired`. + * + * Values: queued,inProgress,requiresAction,cancelling,cancelled,failed,completed,expired + */ + @Serializable + enum class Status(val value: kotlin.String) { + @SerialName(value = "queued") queued("queued"), + @SerialName(value = "in_progress") inProgress("in_progress"), + @SerialName(value = "requires_action") requiresAction("requires_action"), + @SerialName(value = "cancelling") cancelling("cancelling"), + @SerialName(value = "cancelled") cancelled("cancelled"), + @SerialName(value = "failed") failed("failed"), + @SerialName(value = "completed") completed("completed"), + @SerialName(value = "expired") expired("expired") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunObjectLastError.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunObjectLastError.kt new file mode 100644 index 000000000..da5a86ad8 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunObjectLastError.kt @@ -0,0 +1,39 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * The last error associated with this run. Will be `null` if there are no errors. + * + * @param code One of `server_error` or `rate_limit_exceeded`. + * @param message A human-readable description of the error. + */ +@Serializable +data class RunObjectLastError( + + /* One of `server_error` or `rate_limit_exceeded`. */ + @SerialName(value = "code") @Required val code: RunObjectLastError.Code, + + /* A human-readable description of the error. */ + @SerialName(value = "message") @Required val message: kotlin.String +) { + + /** + * One of `server_error` or `rate_limit_exceeded`. + * + * Values: serverError,rateLimitExceeded + */ + @Serializable + enum class Code(val value: kotlin.String) { + @SerialName(value = "server_error") serverError("server_error"), + @SerialName(value = "rate_limit_exceeded") rateLimitExceeded("rate_limit_exceeded") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunObjectRequiredAction.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunObjectRequiredAction.kt new file mode 100644 index 000000000..e532a7769 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunObjectRequiredAction.kt @@ -0,0 +1,38 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Details on the action required to continue the run. Will be `null` if no action is required. + * + * @param type For now, this is always `submit_tool_outputs`. + * @param submitToolOutputs + */ +@Serializable +data class RunObjectRequiredAction( + + /* For now, this is always `submit_tool_outputs`. */ + @SerialName(value = "type") @Required val type: RunObjectRequiredAction.Type, + @SerialName(value = "submit_tool_outputs") + @Required + val submitToolOutputs: RunObjectRequiredActionSubmitToolOutputs +) { + + /** + * For now, this is always `submit_tool_outputs`. + * + * Values: submitToolOutputs + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "submit_tool_outputs") submitToolOutputs("submit_tool_outputs") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunObjectRequiredActionSubmitToolOutputs.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunObjectRequiredActionSubmitToolOutputs.kt new file mode 100644 index 000000000..8b2439141 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunObjectRequiredActionSubmitToolOutputs.kt @@ -0,0 +1,25 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Details on the tool outputs needed for this run to continue. + * + * @param toolCalls A list of the relevant tool calls. + */ +@Serializable +data class RunObjectRequiredActionSubmitToolOutputs( + + /* A list of the relevant tool calls. */ + @SerialName(value = "tool_calls") + @Required + val toolCalls: kotlin.collections.List +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsMessageCreationObject.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsMessageCreationObject.kt new file mode 100644 index 000000000..8e02217d0 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsMessageCreationObject.kt @@ -0,0 +1,38 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Details of the message creation by the run step. + * + * @param type Always `message_creation``. + * @param messageCreation + */ +@Serializable +data class RunStepDetailsMessageCreationObject( + + /* Always `message_creation``. */ + @SerialName(value = "type") @Required val type: RunStepDetailsMessageCreationObject.Type, + @SerialName(value = "message_creation") + @Required + val messageCreation: RunStepDetailsMessageCreationObjectMessageCreation +) { + + /** + * Always `message_creation``. + * + * Values: messageCreation + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "message_creation") messageCreation("message_creation") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsMessageCreationObjectMessageCreation.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsMessageCreationObjectMessageCreation.kt new file mode 100644 index 000000000..a62832d51 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsMessageCreationObjectMessageCreation.kt @@ -0,0 +1,19 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** @param messageId The ID of the message that was created by this run step. */ +@Serializable +data class RunStepDetailsMessageCreationObjectMessageCreation( + + /* The ID of the message that was created by this run step. */ + @SerialName(value = "message_id") @Required val messageId: kotlin.String +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeObject.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeObject.kt new file mode 100644 index 000000000..a2e9986dd --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeObject.kt @@ -0,0 +1,44 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Details of the Code Interpreter tool call the run step was involved in. + * + * @param id The ID of the tool call. + * @param type The type of tool call. This is always going to be `code_interpreter` for this type of + * tool call. + * @param codeInterpreter + */ +@Serializable +data class RunStepDetailsToolCallsCodeObject( + + /* The ID of the tool call. */ + @SerialName(value = "id") @Required val id: kotlin.String, + + /* The type of tool call. This is always going to be `code_interpreter` for this type of tool call. */ + @SerialName(value = "type") @Required val type: RunStepDetailsToolCallsCodeObject.Type, + @SerialName(value = "code_interpreter") + @Required + val codeInterpreter: RunStepDetailsToolCallsCodeObjectCodeInterpreter +) { + + /** + * The type of tool call. This is always going to be `code_interpreter` for this type of tool + * call. + * + * Values: codeInterpreter + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "code_interpreter") codeInterpreter("code_interpreter") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeObjectCodeInterpreter.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeObjectCodeInterpreter.kt new file mode 100644 index 000000000..8965898f4 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeObjectCodeInterpreter.kt @@ -0,0 +1,31 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * The Code Interpreter tool call definition. + * + * @param input The input to the Code Interpreter tool call. + * @param outputs The outputs from the Code Interpreter tool call. Code Interpreter can output one + * or more items, including text (`logs`) or images (`image`). Each of these are represented by a + * different object type. + */ +@Serializable +data class RunStepDetailsToolCallsCodeObjectCodeInterpreter( + + /* The input to the Code Interpreter tool call. */ + @SerialName(value = "input") @Required val input: kotlin.String, + + /* The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. */ + @SerialName(value = "outputs") + @Required + val outputs: kotlin.collections.List +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputsInner.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputsInner.kt new file mode 100644 index 000000000..25cc5e8e9 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputsInner.kt @@ -0,0 +1,43 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param type Always `logs`. + * @param logs The text output from the Code Interpreter tool call. + * @param image + */ +@Serializable +data class RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputsInner( + + /* Always `logs`. */ + @SerialName(value = "type") + @Required + val type: RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputsInner.Type, + + /* The text output from the Code Interpreter tool call. */ + @SerialName(value = "logs") @Required val logs: kotlin.String, + @SerialName(value = "image") + @Required + val image: RunStepDetailsToolCallsCodeOutputImageObjectImage +) { + + /** + * Always `logs`. + * + * Values: logs,image + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "logs") logs("logs"), + @SerialName(value = "image") image("image") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeOutputImageObject.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeOutputImageObject.kt new file mode 100644 index 000000000..5d094df1f --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeOutputImageObject.kt @@ -0,0 +1,36 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param type Always `image`. + * @param image + */ +@Serializable +data class RunStepDetailsToolCallsCodeOutputImageObject( + + /* Always `image`. */ + @SerialName(value = "type") @Required val type: RunStepDetailsToolCallsCodeOutputImageObject.Type, + @SerialName(value = "image") + @Required + val image: RunStepDetailsToolCallsCodeOutputImageObjectImage +) { + + /** + * Always `image`. + * + * Values: image + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "image") image("image") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeOutputImageObjectImage.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeOutputImageObjectImage.kt new file mode 100644 index 000000000..66c597359 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeOutputImageObjectImage.kt @@ -0,0 +1,19 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** @param fileId The [file](/docs/api-reference/files) ID of the image. */ +@Serializable +data class RunStepDetailsToolCallsCodeOutputImageObjectImage( + + /* The [file](/docs/api-reference/files) ID of the image. */ + @SerialName(value = "file_id") @Required val fileId: kotlin.String +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeOutputLogsObject.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeOutputLogsObject.kt new file mode 100644 index 000000000..00f208216 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeOutputLogsObject.kt @@ -0,0 +1,38 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Text output from the Code Interpreter tool call as part of a run step. + * + * @param type Always `logs`. + * @param logs The text output from the Code Interpreter tool call. + */ +@Serializable +data class RunStepDetailsToolCallsCodeOutputLogsObject( + + /* Always `logs`. */ + @SerialName(value = "type") @Required val type: RunStepDetailsToolCallsCodeOutputLogsObject.Type, + + /* The text output from the Code Interpreter tool call. */ + @SerialName(value = "logs") @Required val logs: kotlin.String +) { + + /** + * Always `logs`. + * + * Values: logs + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "logs") logs("logs") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsFunctionObject.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsFunctionObject.kt new file mode 100644 index 000000000..859d8763c --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsFunctionObject.kt @@ -0,0 +1,41 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param id The ID of the tool call object. + * @param type The type of tool call. This is always going to be `function` for this type of tool + * call. + * @param function + */ +@Serializable +data class RunStepDetailsToolCallsFunctionObject( + + /* The ID of the tool call object. */ + @SerialName(value = "id") @Required val id: kotlin.String, + + /* The type of tool call. This is always going to be `function` for this type of tool call. */ + @SerialName(value = "type") @Required val type: RunStepDetailsToolCallsFunctionObject.Type, + @SerialName(value = "function") + @Required + val function: RunStepDetailsToolCallsFunctionObjectFunction +) { + + /** + * The type of tool call. This is always going to be `function` for this type of tool call. + * + * Values: function + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "function") function("function") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsFunctionObjectFunction.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsFunctionObjectFunction.kt new file mode 100644 index 000000000..9e4262adf --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsFunctionObjectFunction.kt @@ -0,0 +1,32 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * The definition of the function that was called. + * + * @param name The name of the function. + * @param arguments The arguments passed to the function. + * @param output The output of the function. This will be `null` if the outputs have not been + * [submitted](/docs/api-reference/runs/submitToolOutputs) yet. + */ +@Serializable +data class RunStepDetailsToolCallsFunctionObjectFunction( + + /* The name of the function. */ + @SerialName(value = "name") @Required val name: kotlin.String, + + /* The arguments passed to the function. */ + @SerialName(value = "arguments") @Required val arguments: kotlin.String, + + /* The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. */ + @SerialName(value = "output") @Required val output: kotlin.String? +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsObject.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsObject.kt new file mode 100644 index 000000000..ed55dbeae --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsObject.kt @@ -0,0 +1,41 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Details of the tool call. + * + * @param type Always `tool_calls`. + * @param toolCalls An array of tool calls the run step was involved in. These can be associated + * with one of three types of tools: `code_interpreter`, `retrieval`, or `function`. + */ +@Serializable +data class RunStepDetailsToolCallsObject( + + /* Always `tool_calls`. */ + @SerialName(value = "type") @Required val type: RunStepDetailsToolCallsObject.Type, + + /* An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `retrieval`, or `function`. */ + @SerialName(value = "tool_calls") + @Required + val toolCalls: kotlin.collections.List +) { + + /** + * Always `tool_calls`. + * + * Values: toolCalls + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "tool_calls") toolCalls("tool_calls") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsObjectToolCallsInner.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsObjectToolCallsInner.kt new file mode 100644 index 000000000..f0d34723a --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsObjectToolCallsInner.kt @@ -0,0 +1,52 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param id The ID of the tool call object. + * @param type The type of tool call. This is always going to be `code_interpreter` for this type of + * tool call. + * @param codeInterpreter + * @param retrieval For now, this is always going to be an empty object. + * @param function + */ +@Serializable +data class RunStepDetailsToolCallsObjectToolCallsInner( + + /* The ID of the tool call object. */ + @SerialName(value = "id") @Required val id: kotlin.String, + + /* The type of tool call. This is always going to be `code_interpreter` for this type of tool call. */ + @SerialName(value = "type") @Required val type: RunStepDetailsToolCallsObjectToolCallsInner.Type, + @SerialName(value = "code_interpreter") + @Required + val codeInterpreter: RunStepDetailsToolCallsCodeObjectCodeInterpreter, + + /* For now, this is always going to be an empty object. */ + @SerialName(value = "retrieval") @Required val retrieval: kotlin.String, + @SerialName(value = "function") + @Required + val function: RunStepDetailsToolCallsFunctionObjectFunction +) { + + /** + * The type of tool call. This is always going to be `code_interpreter` for this type of tool + * call. + * + * Values: codeInterpreter,retrieval,function + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "code_interpreter") codeInterpreter("code_interpreter"), + @SerialName(value = "retrieval") retrieval("retrieval"), + @SerialName(value = "function") function("function") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsRetrievalObject.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsRetrievalObject.kt new file mode 100644 index 000000000..99f49491c --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsRetrievalObject.kt @@ -0,0 +1,41 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param id The ID of the tool call object. + * @param type The type of tool call. This is always going to be `retrieval` for this type of tool + * call. + * @param retrieval For now, this is always going to be an empty object. + */ +@Serializable +data class RunStepDetailsToolCallsRetrievalObject( + + /* The ID of the tool call object. */ + @SerialName(value = "id") @Required val id: kotlin.String, + + /* The type of tool call. This is always going to be `retrieval` for this type of tool call. */ + @SerialName(value = "type") @Required val type: RunStepDetailsToolCallsRetrievalObject.Type, + + /* For now, this is always going to be an empty object. */ + @SerialName(value = "retrieval") @Required val retrieval: kotlin.String +) { + + /** + * The type of tool call. This is always going to be `retrieval` for this type of tool call. + * + * Values: retrieval + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "retrieval") retrieval("retrieval") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepObject.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepObject.kt new file mode 100644 index 000000000..c96d0d090 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepObject.kt @@ -0,0 +1,115 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Represents a step in execution of a run. + * + * @param id The identifier of the run step, which can be referenced in API endpoints. + * @param `object` The object type, which is always `thread.run.step``. + * @param createdAt The Unix timestamp (in seconds) for when the run step was created. + * @param assistantId The ID of the [assistant](/docs/api-reference/assistants) associated with the + * run step. + * @param threadId The ID of the [thread](/docs/api-reference/threads) that was run. + * @param runId The ID of the [run](/docs/api-reference/runs) that this run step is a part of. + * @param type The type of run step, which can be either `message_creation` or `tool_calls`. + * @param status The status of the run step, which can be either `in_progress`, `cancelled`, + * `failed`, `completed`, or `expired`. + * @param stepDetails + * @param lastError + * @param expiredAt The Unix timestamp (in seconds) for when the run step expired. A step is + * considered expired if the parent run is expired. + * @param cancelledAt The Unix timestamp (in seconds) for when the run step was cancelled. + * @param failedAt The Unix timestamp (in seconds) for when the run step failed. + * @param completedAt The Unix timestamp (in seconds) for when the run step completed. + * @param metadata Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maxium of 512 characters long. + */ +@Serializable +data class RunStepObject( + + /* The identifier of the run step, which can be referenced in API endpoints. */ + @SerialName(value = "id") @Required val id: kotlin.String, + + /* The object type, which is always `thread.run.step``. */ + @SerialName(value = "object") @Required val `object`: RunStepObject.`Object`, + + /* The Unix timestamp (in seconds) for when the run step was created. */ + @SerialName(value = "created_at") @Required val createdAt: kotlin.Int, + + /* The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. */ + @SerialName(value = "assistant_id") @Required val assistantId: kotlin.String, + + /* The ID of the [thread](/docs/api-reference/threads) that was run. */ + @SerialName(value = "thread_id") @Required val threadId: kotlin.String, + + /* The ID of the [run](/docs/api-reference/runs) that this run step is a part of. */ + @SerialName(value = "run_id") @Required val runId: kotlin.String, + + /* The type of run step, which can be either `message_creation` or `tool_calls`. */ + @SerialName(value = "type") @Required val type: RunStepObject.Type, + + /* The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`. */ + @SerialName(value = "status") @Required val status: RunStepObject.Status, + @SerialName(value = "step_details") @Required val stepDetails: RunStepObjectStepDetails, + @SerialName(value = "last_error") @Required val lastError: RunStepObjectLastError?, + + /* The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired. */ + @SerialName(value = "expired_at") @Required val expiredAt: kotlin.Int?, + + /* The Unix timestamp (in seconds) for when the run step was cancelled. */ + @SerialName(value = "cancelled_at") @Required val cancelledAt: kotlin.Int?, + + /* The Unix timestamp (in seconds) for when the run step failed. */ + @SerialName(value = "failed_at") @Required val failedAt: kotlin.Int?, + + /* The Unix timestamp (in seconds) for when the run step completed. */ + @SerialName(value = "completed_at") @Required val completedAt: kotlin.Int?, + + /* Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ + @SerialName(value = "metadata") @Required val metadata: kotlin.String? +) { + + /** + * The object type, which is always `thread.run.step``. + * + * Values: threadPeriodRunPeriodStep + */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "thread.run.step") threadPeriodRunPeriodStep("thread.run.step") + } + /** + * The type of run step, which can be either `message_creation` or `tool_calls`. + * + * Values: messageCreation,toolCalls + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "message_creation") messageCreation("message_creation"), + @SerialName(value = "tool_calls") toolCalls("tool_calls") + } + /** + * The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, + * `completed`, or `expired`. + * + * Values: inProgress,cancelled,failed,completed,expired + */ + @Serializable + enum class Status(val value: kotlin.String) { + @SerialName(value = "in_progress") inProgress("in_progress"), + @SerialName(value = "cancelled") cancelled("cancelled"), + @SerialName(value = "failed") failed("failed"), + @SerialName(value = "completed") completed("completed"), + @SerialName(value = "expired") expired("expired") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepObjectLastError.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepObjectLastError.kt new file mode 100644 index 000000000..dda51e62f --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepObjectLastError.kt @@ -0,0 +1,39 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * The last error associated with this run step. Will be `null` if there are no errors. + * + * @param code One of `server_error` or `rate_limit_exceeded`. + * @param message A human-readable description of the error. + */ +@Serializable +data class RunStepObjectLastError( + + /* One of `server_error` or `rate_limit_exceeded`. */ + @SerialName(value = "code") @Required val code: RunStepObjectLastError.Code, + + /* A human-readable description of the error. */ + @SerialName(value = "message") @Required val message: kotlin.String +) { + + /** + * One of `server_error` or `rate_limit_exceeded`. + * + * Values: serverError,rateLimitExceeded + */ + @Serializable + enum class Code(val value: kotlin.String) { + @SerialName(value = "server_error") serverError("server_error"), + @SerialName(value = "rate_limit_exceeded") rateLimitExceeded("rate_limit_exceeded") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepObjectStepDetails.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepObjectStepDetails.kt new file mode 100644 index 000000000..2075a7e6f --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepObjectStepDetails.kt @@ -0,0 +1,46 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * The details of the run step. + * + * @param type Always `message_creation``. + * @param messageCreation + * @param toolCalls An array of tool calls the run step was involved in. These can be associated + * with one of three types of tools: `code_interpreter`, `retrieval`, or `function`. + */ +@Serializable +data class RunStepObjectStepDetails( + + /* Always `message_creation``. */ + @SerialName(value = "type") @Required val type: RunStepObjectStepDetails.Type, + @SerialName(value = "message_creation") + @Required + val messageCreation: RunStepDetailsMessageCreationObjectMessageCreation, + + /* An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `retrieval`, or `function`. */ + @SerialName(value = "tool_calls") + @Required + val toolCalls: kotlin.collections.List +) { + + /** + * Always `message_creation``. + * + * Values: messageCreation,toolCalls + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "message_creation") messageCreation("message_creation"), + @SerialName(value = "tool_calls") toolCalls("tool_calls") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunToolCallObject.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunToolCallObject.kt new file mode 100644 index 000000000..e586daa74 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunToolCallObject.kt @@ -0,0 +1,41 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Tool call objects + * + * @param id The ID of the tool call. This ID must be referenced when you submit the tool outputs in + * using the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. + * @param type The type of tool call the output is required for. For now, this is always `function`. + * @param function + */ +@Serializable +data class RunToolCallObject( + + /* The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. */ + @SerialName(value = "id") @Required val id: kotlin.String, + + /* The type of tool call the output is required for. For now, this is always `function`. */ + @SerialName(value = "type") @Required val type: RunToolCallObject.Type, + @SerialName(value = "function") @Required val function: RunToolCallObjectFunction +) { + + /** + * The type of tool call the output is required for. For now, this is always `function`. + * + * Values: function + */ + @Serializable + enum class Type(val value: kotlin.String) { + @SerialName(value = "function") function("function") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunToolCallObjectFunction.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunToolCallObjectFunction.kt new file mode 100644 index 000000000..4c79f80ea --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunToolCallObjectFunction.kt @@ -0,0 +1,27 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * The function definition. + * + * @param name The name of the function. + * @param arguments The arguments that the model expects you to pass to the function. + */ +@Serializable +data class RunToolCallObjectFunction( + + /* The name of the function. */ + @SerialName(value = "name") @Required val name: kotlin.String, + + /* The arguments that the model expects you to pass to the function. */ + @SerialName(value = "arguments") @Required val arguments: kotlin.String +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/SubmitToolOutputsRunRequest.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/SubmitToolOutputsRunRequest.kt new file mode 100644 index 000000000..8db73f8a6 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/SubmitToolOutputsRunRequest.kt @@ -0,0 +1,21 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** @param toolOutputs A list of tools for which the outputs are being submitted. */ +@Serializable +data class SubmitToolOutputsRunRequest( + + /* A list of tools for which the outputs are being submitted. */ + @SerialName(value = "tool_outputs") + @Required + val toolOutputs: kotlin.collections.List +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/SubmitToolOutputsRunRequestToolOutputsInner.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/SubmitToolOutputsRunRequestToolOutputsInner.kt new file mode 100644 index 000000000..8355b2c6d --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/SubmitToolOutputsRunRequestToolOutputsInner.kt @@ -0,0 +1,26 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param toolCallId The ID of the tool call in the `required_action` object within the run object + * the output is being submitted for. + * @param output The output of the tool call to be submitted to continue the run. + */ +@Serializable +data class SubmitToolOutputsRunRequestToolOutputsInner( + + /* The ID of the tool call in the `required_action` object within the run object the output is being submitted for. */ + @SerialName(value = "tool_call_id") val toolCallId: kotlin.String? = null, + + /* The output of the tool call to be submitted to continue the run. */ + @SerialName(value = "output") val output: kotlin.String? = null +) diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ThreadObject.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ThreadObject.kt new file mode 100644 index 000000000..b45b23191 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ThreadObject.kt @@ -0,0 +1,48 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Represents a thread that contains [messages](/docs/api-reference/messages). + * + * @param id The identifier, which can be referenced in API endpoints. + * @param `object` The object type, which is always `thread`. + * @param createdAt The Unix timestamp (in seconds) for when the thread was created. + * @param metadata Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maxium of 512 characters long. + */ +@Serializable +data class ThreadObject( + + /* The identifier, which can be referenced in API endpoints. */ + @SerialName(value = "id") @Required val id: kotlin.String, + + /* The object type, which is always `thread`. */ + @SerialName(value = "object") @Required val `object`: ThreadObject.`Object`, + + /* The Unix timestamp (in seconds) for when the thread was created. */ + @SerialName(value = "created_at") @Required val createdAt: kotlin.Int, + + /* Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ + @SerialName(value = "metadata") @Required val metadata: kotlin.String? +) { + + /** + * The object type, which is always `thread`. + * + * Values: thread + */ + @Serializable + enum class `Object`(val value: kotlin.String) { + @SerialName(value = "thread") thread("thread") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/chat/ChatCompletionRequestUserMessageContent.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/chat/ChatCompletionRequestUserMessageContent.kt new file mode 100644 index 000000000..c103d9b20 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/chat/ChatCompletionRequestUserMessageContent.kt @@ -0,0 +1,23 @@ +package com.xebia.functional.openai.models.ext.chat + +import kotlin.jvm.JvmInline +import kotlinx.serialization.Serializable + +@Serializable +sealed interface ChatCompletionRequestUserMessageContent { + @Serializable + @JvmInline + value class TextContent(val s: String) : ChatCompletionRequestUserMessageContent + + @Serializable + @JvmInline + value class ChatCompletionRequestUserMessageContentTextArray( + val array: List + ) : ChatCompletionRequestUserMessageContent + + @Serializable + @JvmInline + value class ChatCompletionRequestUserMessageContentImageArray( + val array: List + ) : ChatCompletionRequestUserMessageContent +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/chat/ChatCompletionRequestUserMessageContentImage.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/chat/ChatCompletionRequestUserMessageContentImage.kt new file mode 100644 index 000000000..dc98128a5 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/chat/ChatCompletionRequestUserMessageContentImage.kt @@ -0,0 +1,18 @@ +package com.xebia.functional.openai.models.ext.chat + +import kotlinx.serialization.Required +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +@Serializable +data class ChatCompletionRequestUserMessageContentImage( + @SerialName(value = "type") @Required val type: Type, + @SerialName(value = "image_url") + @Required + val imageUrl: ChatCompletionRequestUserMessageContentImageUrl +) { + @Serializable + enum class Type(val value: String) { + @SerialName(value = "image_url") imageUrl("image_url") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/chat/ChatCompletionRequestUserMessageContentImageUrl.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/chat/ChatCompletionRequestUserMessageContentImageUrl.kt new file mode 100644 index 000000000..9e7bb4bbd --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/chat/ChatCompletionRequestUserMessageContentImageUrl.kt @@ -0,0 +1,27 @@ +package com.xebia.functional.openai.models.ext.chat + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +@Serializable +data class ChatCompletionRequestUserMessageContentImageUrl( + + /* Either a URL of the image or the base64 encoded image data. */ + @SerialName(value = "url") val url: String? = null, + + /* Specifies the detail level of the image. */ + @SerialName(value = "detail") val detail: Detail? = Detail.auto +) { + + /** + * Specifies the detail level of the image. + * + * Values: auto,low,high + */ + @Serializable + enum class Detail(val value: kotlin.String) { + @SerialName(value = "auto") auto("auto"), + @SerialName(value = "low") low("low"), + @SerialName(value = "high") high("high") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/chat/ChatCompletionRequestUserMessageContentText.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/chat/ChatCompletionRequestUserMessageContentText.kt new file mode 100644 index 000000000..55a8814d3 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/chat/ChatCompletionRequestUserMessageContentText.kt @@ -0,0 +1,16 @@ +package com.xebia.functional.openai.models.ext.chat + +import kotlinx.serialization.Required +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +@Serializable +data class ChatCompletionRequestUserMessageContentText( + @SerialName(value = "type") @Required val type: Type, + @SerialName(value = "text") @Required val text: String +) { + @Serializable + enum class Type(val value: String) { + @SerialName(value = "text") text("text") + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/chat/create/CreateChatCompletionRequestModel.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/chat/create/CreateChatCompletionRequestModel.kt new file mode 100644 index 000000000..7dbc523f4 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/chat/create/CreateChatCompletionRequestModel.kt @@ -0,0 +1,19 @@ +package com.xebia.functional.openai.models.ext.chat.create + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +@Serializable +enum class CreateChatCompletionRequestModel(val value: String) { + @SerialName(value = "gpt-4") `gpt_4`("gpt-4"), + @SerialName(value = "gpt-4-0314") `gpt_4_0314`("gpt-4-0314"), + @SerialName(value = "gpt-4-0613") `gpt_4_0613`("gpt-4-0613"), + @SerialName(value = "gpt-4-32k") `gpt_4_32k`("gpt-4-32k"), + @SerialName(value = "gpt-4-32k-0314") `gpt_4_32k_0314`("gpt-4-32k-0314"), + @SerialName(value = "gpt-4-32k-0613") `gpt_4_32k_0613`("gpt-4-32k-0613"), + @SerialName(value = "gpt-3.5-turbo") `gpt_3_5_turbo`("gpt-3.5-turbo"), + @SerialName(value = "gpt-3.5-turbo-16k") `gpt_3_5_turbo_16k`("gpt-3.5-turbo-16k"), + @SerialName(value = "gpt-3.5-turbo-0301") `gpt_3_5_turbo_0301`("gpt-3.5-turbo-0301"), + @SerialName(value = "gpt-3.5-turbo-0613") `gpt_3_5_turbo_0613`("gpt-3.5-turbo-0613"), + @SerialName(value = "gpt-3.5-turbo-16k-0613") `gpt_3_5_turbo_16k_0613`("gpt-3.5-turbo-16k-0613") +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/chat/create/CreateChatCompletionRequestStop.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/chat/create/CreateChatCompletionRequestStop.kt new file mode 100644 index 000000000..81b7f7cba --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/chat/create/CreateChatCompletionRequestStop.kt @@ -0,0 +1,13 @@ +package com.xebia.functional.openai.models.ext.chat.create + +import kotlin.jvm.JvmInline +import kotlinx.serialization.Serializable + +@Serializable +sealed interface CreateChatCompletionRequestStop { + @Serializable @JvmInline value class StringValue(val s: String) : CreateChatCompletionRequestStop + + @Serializable + @JvmInline + value class ArrayValue(val array: List) : CreateChatCompletionRequestStop +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/completion/create/CreateCompletionRequestModel.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/completion/create/CreateCompletionRequestModel.kt new file mode 100644 index 000000000..1a5b09e76 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/completion/create/CreateCompletionRequestModel.kt @@ -0,0 +1,18 @@ +package com.xebia.functional.openai.models.ext.completion.create + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +@Serializable +enum class CreateCompletionRequestModel(val value: String) { + @SerialName(value = "babbage-002") `babbage_002`("babbage-002"), + @SerialName(value = "davinci-002") `davinci_002`("davinci-002"), + @SerialName(value = "gpt-3.5-turbo-instruct") `gpt_3_5_turbo_instruct`("gpt-3.5-turbo-instruct"), + @SerialName(value = "text-davinci-003") `text_davinci_003`("text-davinci-003"), + @SerialName(value = "text-davinci-002") `text_davinci_002`("text-davinci-002"), + @SerialName(value = "text-davinci-001") `text_davinci_001`("text-davinci-001"), + @SerialName(value = "code-davinci-002") `code_davinci_002`("code-davinci-002"), + @SerialName(value = "text-curie-001") `text_curie_001`("text-curie-001"), + @SerialName(value = "text-babbage-001") `text_babbage_001`("text-babbage-001"), + @SerialName(value = "text-ada-001") `text_ada_001`("text-ada-001") +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/completion/create/CreateCompletionRequestPrompt.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/completion/create/CreateCompletionRequestPrompt.kt new file mode 100644 index 000000000..c2886e0b8 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/completion/create/CreateCompletionRequestPrompt.kt @@ -0,0 +1,22 @@ +package com.xebia.functional.openai.models.ext.completion.create + +import kotlin.jvm.JvmInline +import kotlinx.serialization.Serializable + +@Serializable +sealed interface CreateCompletionRequestPrompt { + + @Serializable @JvmInline value class StringValue(val v: String) : CreateCompletionRequestPrompt + + @Serializable + @JvmInline + value class StringArrayValue(val v: List) : CreateCompletionRequestPrompt + + @Serializable + @JvmInline + value class IntArrayValue(val v: List) : CreateCompletionRequestPrompt + + @Serializable + @JvmInline + value class IntArrayArrayValue(val v: List>) : CreateCompletionRequestPrompt +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/completion/create/CreateCompletionRequestStop.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/completion/create/CreateCompletionRequestStop.kt new file mode 100644 index 000000000..3a3adbe20 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/completion/create/CreateCompletionRequestStop.kt @@ -0,0 +1,16 @@ +package com.xebia.functional.openai.models.ext.completion.create + +import kotlin.jvm.JvmInline +import kotlinx.serialization.Serializable + +@Serializable +sealed interface CreateCompletionRequestStop { + + @Serializable + @JvmInline + value class StringValue(val v: String = "<|endoftext|>") : CreateCompletionRequestStop + + @Serializable + @JvmInline + value class StringArrayValue(val v: List) : CreateCompletionRequestStop +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/edit/create/CreateEditRequestModel.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/edit/create/CreateEditRequestModel.kt new file mode 100644 index 000000000..cf269cb58 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/edit/create/CreateEditRequestModel.kt @@ -0,0 +1,10 @@ +package com.xebia.functional.openai.models.ext.edit.create + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +@Serializable +enum class CreateEditRequestModel(val value: String) { + @SerialName(value = "text-davinci-edit-001") `text_davinci_edit_001`("text-davinci-edit-001"), + @SerialName(value = "code-davinci-edit-001") `code_davinci_edit_001`("code-davinci-edit-001") +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/embedding/create/CreateEmbeddingRequestInput.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/embedding/create/CreateEmbeddingRequestInput.kt new file mode 100644 index 000000000..c5c581999 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/embedding/create/CreateEmbeddingRequestInput.kt @@ -0,0 +1,22 @@ +package com.xebia.functional.openai.models.ext.embedding.create + +import kotlin.jvm.JvmInline +import kotlinx.serialization.Serializable + +@Serializable +sealed interface CreateEmbeddingRequestInput { + + @Serializable @JvmInline value class StringValue(val v: String) : CreateEmbeddingRequestInput + + @Serializable + @JvmInline + value class StringArrayValue(val v: List) : CreateEmbeddingRequestInput + + @Serializable + @JvmInline + value class IntArrayValue(val v: List) : CreateEmbeddingRequestInput + + @Serializable + @JvmInline + value class IntArrayArrayValue(val v: List>) : CreateEmbeddingRequestInput +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/embedding/create/CreateEmbeddingRequestModel.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/embedding/create/CreateEmbeddingRequestModel.kt new file mode 100644 index 000000000..3643630e6 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/embedding/create/CreateEmbeddingRequestModel.kt @@ -0,0 +1,9 @@ +package com.xebia.functional.openai.models.ext.embedding.create + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +@Serializable +enum class CreateEmbeddingRequestModel(val value: String) { + @SerialName(value = "text-embedding-ada-002") `text_embedding_ada_002`("text-embedding-ada-002") +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/create/CreateFineTuneRequestHyperparametersNEpochs.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/create/CreateFineTuneRequestHyperparametersNEpochs.kt new file mode 100644 index 000000000..ffd826f90 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/create/CreateFineTuneRequestHyperparametersNEpochs.kt @@ -0,0 +1,25 @@ +package com.xebia.functional.openai.models.ext.finetune.create + +import kotlin.jvm.JvmInline +import kotlinx.serialization.Serializable + +@Serializable +sealed interface CreateFineTuneRequestHyperparametersNEpochs { + + @Serializable + @JvmInline + value class AutoValue(private val v: String = "auto") : + CreateFineTuneRequestHyperparametersNEpochs { + init { + require(v == "auto") { "Only Auto is supported" } + } + } + + @Serializable + @JvmInline + value class IntValue(val v: Int) : CreateFineTuneRequestHyperparametersNEpochs { + init { + require(v in 1..50) { "Only values between 1 and 50 are allowed" } + } + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/create/CreateFineTuneRequestModel.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/create/CreateFineTuneRequestModel.kt new file mode 100644 index 000000000..34f44bd72 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/create/CreateFineTuneRequestModel.kt @@ -0,0 +1,12 @@ +package com.xebia.functional.openai.models.ext.finetune.create + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +@Serializable +enum class CreateFineTuneRequestModel(val value: String) { + @SerialName(value = "ada") ada("ada"), + @SerialName(value = "babbage") babbage("babbage"), + @SerialName(value = "curie") curie("curie"), + @SerialName(value = "davinci") davinci("davinci") +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/job/FineTuningJobHyperparametersNEpochs.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/job/FineTuningJobHyperparametersNEpochs.kt new file mode 100644 index 000000000..ec4e83ae0 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/job/FineTuningJobHyperparametersNEpochs.kt @@ -0,0 +1,23 @@ +package com.xebia.functional.openai.models.ext.finetune.job + +import kotlin.jvm.JvmInline +import kotlinx.serialization.Serializable + +@Serializable +sealed interface FineTuningJobHyperparametersNEpochs { + @Serializable + @JvmInline + value class AutoValue(private val v: String = "auto") : FineTuningJobHyperparametersNEpochs { + init { + require(v == "auto") { "Only Auto is supported" } + } + } + + @Serializable + @JvmInline + value class IntValue(val v: Int) : FineTuningJobHyperparametersNEpochs { + init { + require(v in 1..50) { "Only values between 1 and 50 are allowed" } + } + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/job/FineTuningJobRequestHyperparametersNEpochs.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/job/FineTuningJobRequestHyperparametersNEpochs.kt new file mode 100644 index 000000000..8e9f511de --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/job/FineTuningJobRequestHyperparametersNEpochs.kt @@ -0,0 +1,24 @@ +package com.xebia.functional.openai.models.ext.finetune.job + +import kotlin.jvm.JvmInline +import kotlinx.serialization.Serializable + +@Serializable +sealed interface FineTuningJobRequestHyperparametersNEpochs { + @Serializable + @JvmInline + value class AutoValue(private val v: String = "auto") : + FineTuningJobRequestHyperparametersNEpochs { + init { + require(v == "auto") { "Only Auto is supported" } + } + } + + @Serializable + @JvmInline + value class IntValue(val v: Int) : FineTuningJobRequestHyperparametersNEpochs { + init { + require(v in 1..50) { "Only values between 1 and 50 are allowed" } + } + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/job/create/CreateFineTuningJobRequestHyperparametersBatchSize.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/job/create/CreateFineTuningJobRequestHyperparametersBatchSize.kt new file mode 100644 index 000000000..18cc6a106 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/job/create/CreateFineTuningJobRequestHyperparametersBatchSize.kt @@ -0,0 +1,24 @@ +package com.xebia.functional.openai.models.ext.finetune.job.create + +import kotlin.jvm.JvmInline +import kotlinx.serialization.Serializable + +@Serializable +sealed interface CreateFineTuningJobRequestHyperparametersBatchSize { + @Serializable + @JvmInline + value class AutoValue(private val v: String = "auto") : + CreateFineTuningJobRequestHyperparametersBatchSize { + init { + require(v == "auto") { "Only Auto is supported" } + } + } + + @Serializable + @JvmInline + value class IntValue(val v: Int) : CreateFineTuningJobRequestHyperparametersBatchSize { + init { + require(v in 1..256) { "Only values between 1 and 256 are allowed" } + } + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/job/create/CreateFineTuningJobRequestHyperparametersLearningRateMultiplier.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/job/create/CreateFineTuningJobRequestHyperparametersLearningRateMultiplier.kt new file mode 100644 index 000000000..1dbb8d07a --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/job/create/CreateFineTuningJobRequestHyperparametersLearningRateMultiplier.kt @@ -0,0 +1,25 @@ +package com.xebia.functional.openai.models.ext.finetune.job.create + +import kotlin.jvm.JvmInline +import kotlinx.serialization.Serializable + +@Serializable +sealed interface CreateFineTuningJobRequestHyperparametersLearningRateMultiplier { + @Serializable + @JvmInline + value class AutoValue(private val v: String = "auto") : + CreateFineTuningJobRequestHyperparametersLearningRateMultiplier { + init { + require(v == "auto") { "Only Auto is supported" } + } + } + + @Serializable + @JvmInline + value class DoubleValue(val v: Double) : + CreateFineTuningJobRequestHyperparametersLearningRateMultiplier { + init { + require(v > 0) { "Only values greater than 0 are allowed" } + } + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/job/create/CreateFineTuningJobRequestHyperparametersNEpochs.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/job/create/CreateFineTuningJobRequestHyperparametersNEpochs.kt new file mode 100644 index 000000000..97414d07e --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/job/create/CreateFineTuningJobRequestHyperparametersNEpochs.kt @@ -0,0 +1,24 @@ +package com.xebia.functional.openai.models.ext.finetune.job.create + +import kotlin.jvm.JvmInline +import kotlinx.serialization.Serializable + +@Serializable +sealed interface CreateFineTuningJobRequestHyperparametersNEpochs { + @Serializable + @JvmInline + value class AutoValue(private val v: String = "auto") : + CreateFineTuningJobRequestHyperparametersNEpochs { + init { + require(v == "auto") { "Only Auto is supported" } + } + } + + @Serializable + @JvmInline + value class IntValue(val v: Int) : CreateFineTuningJobRequestHyperparametersNEpochs { + init { + require(v in 1..50) { "Only values between 1 and 50 are allowed" } + } + } +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/job/create/CreateFineTuningJobRequestModel.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/job/create/CreateFineTuningJobRequestModel.kt new file mode 100644 index 000000000..fe7c6f705 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/finetune/job/create/CreateFineTuningJobRequestModel.kt @@ -0,0 +1,11 @@ +package com.xebia.functional.openai.models.ext.finetune.job.create + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +@Serializable +enum class CreateFineTuningJobRequestModel(val value: String) { + @SerialName(value = "babbage-002") `babbage_002`("babbage-002"), + @SerialName(value = "davinci-002") `davinci_002`("davinci-002"), + @SerialName(value = "gpt-3.5-turbo") `gpt_3_5_turbo`("gpt-3.5-turbo") +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/image/create/CreateImageRequestModel.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/image/create/CreateImageRequestModel.kt new file mode 100644 index 000000000..a1c8500f2 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/image/create/CreateImageRequestModel.kt @@ -0,0 +1,10 @@ +package com.xebia.functional.openai.models.ext.image.create + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +@Serializable +enum class CreateImageRequestModel(val value: String) { + @SerialName(value = "dall-e-2") `dall_e_2`("dall-e-2"), + @SerialName(value = "dall-e-3") `dall_e_3`("dall-e-3") +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/image/edit/create/CreateImageEditRequestModel.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/image/edit/create/CreateImageEditRequestModel.kt new file mode 100644 index 000000000..6f049a976 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/image/edit/create/CreateImageEditRequestModel.kt @@ -0,0 +1,9 @@ +package com.xebia.functional.openai.models.ext.image.edit.create + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +@Serializable +enum class CreateImageEditRequestModel(val value: String) { + @SerialName(value = "dall-e-2") `dall-e-2`("dall-e-2") +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/moderation/create/CreateModerationRequestInput.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/moderation/create/CreateModerationRequestInput.kt new file mode 100644 index 000000000..49746c36a --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/moderation/create/CreateModerationRequestInput.kt @@ -0,0 +1,14 @@ +package com.xebia.functional.openai.models.ext.moderation.create + +import kotlin.jvm.JvmInline +import kotlinx.serialization.Serializable + +@Serializable +sealed interface CreateModerationRequestInput { + + @Serializable @JvmInline value class StringValue(val v: String) : CreateModerationRequestInput + + @Serializable + @JvmInline + value class StringArrayValue(val v: List) : CreateModerationRequestInput +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/moderation/create/CreateModerationRequestModel.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/moderation/create/CreateModerationRequestModel.kt new file mode 100644 index 000000000..cfcaf98c5 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/moderation/create/CreateModerationRequestModel.kt @@ -0,0 +1,10 @@ +package com.xebia.functional.openai.models.ext.moderation.create + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +@Serializable +enum class CreateModerationRequestModel(val value: String) { + @SerialName(value = "text-moderation-latest") `text_moderation_latest`("text-moderation-latest"), + @SerialName(value = "text-moderation-stable") `text_moderation_stable`("text-moderation-stable") +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/speech/create/CreateSpeechRequestModel.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/speech/create/CreateSpeechRequestModel.kt new file mode 100644 index 000000000..575f5b55b --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/speech/create/CreateSpeechRequestModel.kt @@ -0,0 +1,10 @@ +package com.xebia.functional.openai.models.ext.speech.create + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +@Serializable +enum class CreateSpeechRequestModel(val value: String) { + @SerialName(value = "tts-1") `tts_1`("tts-1"), + @SerialName(value = "tts-1-hd") `tts_1_hd`("tts-1-hd") +} diff --git a/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/transcription/create/CreateTranscriptionRequestModel.kt b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/transcription/create/CreateTranscriptionRequestModel.kt new file mode 100644 index 000000000..305e63e03 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com/xebia/functional/openai/models/ext/transcription/create/CreateTranscriptionRequestModel.kt @@ -0,0 +1,13 @@ +package com.xebia.functional.openai.models.ext.transcription.create + +import io.ktor.utils.io.charsets.* +import io.ktor.utils.io.core.* +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +@Serializable +enum class CreateTranscriptionRequestModel(val value: String) { + @SerialName(value = "whisper-1") `whisper_1`("whisper-1"); + + fun asByteArray(): ByteArray = this.value.toByteArray(Charsets.UTF_8) +} diff --git a/settings.gradle.kts b/settings.gradle.kts index 6816a46bc..aaaf34756 100644 --- a/settings.gradle.kts +++ b/settings.gradle.kts @@ -27,6 +27,9 @@ project(":xef-filesystem").projectDir = file("filesystem") include("xef-tokenizer") project(":xef-tokenizer").projectDir = file("tokenizer") +include("xef-openai-client") +project(":xef-openai-client").projectDir = file("openai-client") + include("xef-openai") project(":xef-openai").projectDir = file("openai")