diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/BaseChat.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/BaseChat.kt index 9963caf63..1933ecdb6 100644 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/BaseChat.kt +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/BaseChat.kt @@ -17,9 +17,9 @@ interface BaseChat : LLM { "accessing maxContextLength requires model's context length to be of type MaxIoContextLength.Combined" ) - @Suppress("OVERRIDE_DEPRECATION") fun countTokens(text: String): Int + fun countTokens(text: String): Int - @Suppress("OVERRIDE_DEPRECATION") fun truncateText(text: String, maxTokens: Int): String + fun truncateText(text: String, maxTokens: Int): String - @Suppress("OVERRIDE_DEPRECATION") fun tokensFromMessages(messages: List): Int + fun tokensFromMessages(messages: List): Int } diff --git a/core/src/commonTest/kotlin/com/xebia/functional/xef/conversation/ConversationSpec.kt b/core/src/commonTest/kotlin/com/xebia/functional/xef/conversation/ConversationSpec.kt index 557b1fdc5..0b5b6129e 100644 --- a/core/src/commonTest/kotlin/com/xebia/functional/xef/conversation/ConversationSpec.kt +++ b/core/src/commonTest/kotlin/com/xebia/functional/xef/conversation/ConversationSpec.kt @@ -63,7 +63,7 @@ class ConversationSpec : ) val vectorStore = scope.store - val modelAda = TestModel(responses = messages) + val modelAda = TestModel(responses = messages, contextLength = MaxIoContextLength.Combined(2049)) val totalTokens = modelAda.tokensFromMessages(