Skip to content

Commit

Permalink
Merge pull request #1854 from SciPhi-AI/feature/add-agent-content-tool
Browse files Browse the repository at this point in the history
Feature/add agent content tool
  • Loading branch information
emrgnt-cmplxty authored Jan 22, 2025
2 parents df93b8c + 4f0b50c commit 9d7fb9e
Show file tree
Hide file tree
Showing 158 changed files with 3,830 additions and 4,967 deletions.
10 changes: 10 additions & 0 deletions js/sdk/src/v3/clients/retrieval.ts
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,8 @@ export class RetrievalClient {
* @param taskPromptOverride Optional custom prompt to override default
* @param includeTitleIfAvailable Include document titles in responses when available
* @param conversationId ID of the conversation
* @param maxToolContextLength Maximum context length for tool replies
* @param tools List of tool configurations
* @returns
*/
@feature("retrieval.agent")
Expand All @@ -168,6 +170,8 @@ export class RetrievalClient {
taskPromptOverride?: string;
includeTitleIfAvailable?: boolean;
conversationId?: string;
maxToolContextLength?: number;
tools?: Array<Record<string, any>>;
}): Promise<any | AsyncGenerator<string, void, unknown>> {
const data: Record<string, any> = {
message: options.message,
Expand All @@ -189,6 +193,12 @@ export class RetrievalClient {
...(options.conversationId && {
conversation_id: options.conversationId,
}),
...(options.maxToolContextLength && {
max_tool_context_length: options.maxToolContextLength,
}),
...(options.tools && {
tools: options.tools,
}),
};

if (options.ragGenerationConfig && options.ragGenerationConfig.stream) {
Expand Down
8 changes: 4 additions & 4 deletions llms.txt
Original file line number Diff line number Diff line change
Expand Up @@ -763,7 +763,7 @@ generation_config = { model = "openai/gpt-4o-mini" }

[agent]
system_instruction_name = "rag_agent"
tool_names = ["local_search", "web_search"]
tools = ["local_search", "web_search"]

[database.graph_creation_settings]
entity_types = []
Expand Down Expand Up @@ -2266,15 +2266,15 @@ The RAG agent is configured through the `r2r.toml` file. By default, it uses loc
```toml
[agent]
system_instruction_name = "rag_agent"
tool_names = ["local_search"]
tools = ["local_search"]
```

**Enable Web Search:**

```toml
[agent]
system_instruction_name = "rag_agent"
tool_names = ["local_search", "web_search"]
tools = ["local_search", "web_search"]
```

### Using the RAG Agent
Expand Down Expand Up @@ -11006,7 +11006,7 @@ default_max_collections_per_user = 10

[agent]
system_instruction_name = "rag_agent"
tool_names = ["local_search"]
tools = ["local_search"]

[agent.generation_config]
model = "openai/gpt-4o"
Expand Down
2 changes: 1 addition & 1 deletion py/all_possible_config.toml
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ concurrent_request_limit = 128 # Global concurrency limit for completion request
# -------------------------------------
[agent]
system_instruction_name = "rag_agent" # The "system" message or prompt name
tool_names = ["local_search", "retrieval"] # Tools accessible to the agent
tools = ["local_search", "retrieval"] # Tools accessible to the agent

[agent.generation_config]
model = "azure/gpt-4o" # e.g. "openai/gpt-3.5-turbo", "ollama/llama3.1"
Expand Down
34 changes: 17 additions & 17 deletions py/cli/utils/docker_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -285,23 +285,23 @@ def check_set_docker_env_vars(
or os.environ.get("PYTEST_CURRENT_TEST")
)

if not is_test:
for var in env_vars:
if value := os.environ.get(var):
warning_text = click.style("Warning:", fg="red", bold=True)

if value == env_vars[var]:
continue

prompt = (
f"{warning_text} It's only necessary to set this environment variable when connecting to an instance not managed by R2R.\n"
f"Environment variable {var} is set to '{value}'. Unset it?"
)
if click.confirm(prompt, default=True):
os.environ[var] = ""
click.echo(f"Unset {var}")
else:
click.echo(f"Kept {var}")
# if not is_test:
# for var in env_vars:
# if value := os.environ.get(var):
# warning_text = click.style("Warning:", fg="red", bold=True)

# if value == env_vars[var]:
# continue

# prompt = (
# f"{warning_text} It's only necessary to set this environment variable when connecting to an instance not managed by R2R.\n"
# f"Environment variable {var} is set to '{value}'. Unset it?"
# )
# if click.confirm(prompt, default=True):
# os.environ[var] = ""
# click.echo(f"Unset {var}")
# else:
# click.echo(f"Kept {var}")


def get_compose_files():
Expand Down
85 changes: 0 additions & 85 deletions py/core/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,8 @@
# Keep '*' imports for enhanced development velocity
from .agent import *
from .base import *
from .database import *
from .main import *
from .parsers import *
from .pipelines import *
from .pipes import *
from .providers import *

logger = logging.getLogger()
Expand All @@ -34,45 +31,32 @@


__all__ = [
## AGENT
# Base
"R2RAgent",
"R2RStreamingAgent",
# RAG Agents
"R2RRAGAgent",
"R2RStreamingRAGAgent",
## BASE
# Base abstractions
"AsyncSyncMeta",
"syncable",
# Completion abstractions
"MessageType",
# Document abstractions
"Document",
"DocumentChunk",
"DocumentResponse",
"IngestionStatus",
"KGExtractionStatus",
"KGEnrichmentStatus",
"DocumentType",
# Embedding abstractions
"EmbeddingPurpose",
"default_embedding_prefixes",
# Exception abstractions
"R2RDocumentProcessingError",
"R2RException",
# KG abstractions
"Entity",
"KGExtraction",
"Relationship",
# LLM abstractions
"GenerationConfig",
"LLMChatCompletion",
"LLMChatCompletionChunk",
"RAGCompletion",
# Prompt abstractions
"Prompt",
# Search abstractions
"AggregateSearchResult",
"WebSearchResponse",
"GraphSearchResult",
Expand All @@ -83,99 +67,52 @@
"select_search_filters",
"SearchMode",
"HybridSearchSettings",
# User abstractions
"Token",
"TokenData",
# Vector abstractions
"Vector",
"VectorEntry",
"VectorType",
"IndexConfig",
## AGENT
# Agent abstractions
"Agent",
"AgentConfig",
"Conversation",
"Message",
"Tool",
"ToolResult",
## API
# Auth Responses
"TokenResponse",
"User",
## LOGGING
# Run Manager
"RunManager",
"manage_run",
## PARSERS
# Base parser
"AsyncParser",
## PIPELINE
# Base pipeline
"AsyncPipeline",
## PIPES
"AsyncPipe",
"AsyncState",
## PROVIDERS
# Base provider classes
"AppConfig",
"Provider",
"ProviderConfig",
# Auth provider
"AuthConfig",
"AuthProvider",
# Crypto provider
"CryptoConfig",
"CryptoProvider",
# Email provider
"EmailConfig",
"EmailProvider",
# Database providers
"LimitSettings",
"DatabaseConfig",
"DatabaseProvider",
# Embedding provider
"EmbeddingConfig",
"EmbeddingProvider",
# LLM provider
"CompletionConfig",
"CompletionProvider",
## UTILS
"RecursiveCharacterTextSplitter",
"TextSplitter",
"run_pipeline",
"to_async_generator",
"generate_id",
"increment_version",
"validate_uuid",
## MAIN
## R2R ABSTRACTIONS
"R2RProviders",
"R2RPipes",
"R2RPipelines",
"R2RAgents",
## R2R APP
"R2RApp",
## R2R APP ENTRY
# "r2r_app",
## R2R ASSEMBLY
# Builder
"R2RBuilder",
# Config
"R2RConfig",
# Factory
"R2RProviderFactory",
"R2RPipeFactory",
"R2RPipelineFactory",
"R2RAgentFactory",
## R2R SERVICES
"AuthService",
"IngestionService",
"ManagementService",
"RetrievalService",
"GraphService",
## PARSERS
# Media parsers
"AudioParser",
"BMPParser",
"DOCParser",
Expand All @@ -188,7 +125,6 @@
"PPTParser",
"PPTXParser",
"RTFParser",
# Structured parsers
"CSVParser",
"CSVParserAdvanced",
"EMLParser",
Expand All @@ -203,26 +139,9 @@
"XLSParser",
"XLSXParser",
"XLSXParserAdvanced",
# Text parsers
"MDParser",
"HTMLParser",
"TextParser",
## PIPELINES
"SearchPipeline",
"RAGPipeline",
## PIPES
"SearchPipe",
"EmbeddingPipe",
"ParsingPipe",
"QueryTransformPipe",
"RAGPipe",
"StreamingRAGPipe",
"VectorSearchPipe",
"VectorStoragePipe",
"GraphStoragePipe",
"MultiSearchPipe",
## PROVIDERS
# Auth
"SupabaseAuthProvider",
"R2RAuthProvider",
"JwtAuthProvider",
Expand All @@ -232,16 +151,12 @@
"BcryptCryptoConfig",
"NaClCryptoConfig",
"NaClCryptoProvider",
# Database
"PostgresDatabaseProvider",
# Embeddings
"LiteLLMEmbeddingProvider",
"OpenAIEmbeddingProvider",
"OllamaEmbeddingProvider",
# LLM
"OpenAICompletionProvider",
"LiteLLMCompletionProvider",
# Ingestion
"UnstructuredIngestionProvider",
"R2RIngestionProvider",
"ChunkingStrategy",
Expand Down
Loading

0 comments on commit 9d7fb9e

Please sign in to comment.