From 9d0b0bb4f0a73900583065cc41f190ad28de7888 Mon Sep 17 00:00:00 2001
From: Kurtis Massey <55586356+kurtismassey@users.noreply.github.com>
Date: Sat, 17 Aug 2024 06:42:47 +0000
Subject: [PATCH] Logging and format
---
caddy_chatbot/src/app.py | 27 ++-
caddy_chatbot/src/caddy_core/components.py | 53 ++++--
caddy_chatbot/src/caddy_core/models.py | 3 +-
.../caddy_core/services/retrieval_chain.py | 9 +-
.../src/caddy_core/services/router.py | 20 +-
.../src/caddy_core/utils/monitoring.py | 48 +++++
caddy_chatbot/src/caddy_core/utils/prompt.py | 61 ++++--
.../integrations/google_chat/structures.py | 11 +-
.../integrations/microsoft_teams/content.py | 128 +++++--------
.../integrations/microsoft_teams/responses.py | 2 +-
.../microsoft_teams/structures.py | 176 +++++++++---------
.../microsoft_teams/verification.py | 15 +-
infra/template.yaml | 1 +
tests/ragas/ragas_pipeline.py | 32 ++--
tests/test_embedding_local.py | 5 +-
utils/route_loader.py | 42 +++--
16 files changed, 363 insertions(+), 270 deletions(-)
create mode 100644 caddy_chatbot/src/caddy_core/utils/monitoring.py
diff --git a/caddy_chatbot/src/app.py b/caddy_chatbot/src/app.py
index 708c54c..1bf584b 100644
--- a/caddy_chatbot/src/app.py
+++ b/caddy_chatbot/src/app.py
@@ -3,6 +3,7 @@
from caddy_core import components as caddy
from caddy_core.services import enrolment
+from caddy_core.utils.monitoring import logger
from integrations.google_chat.structures import GoogleChat
from integrations.google_chat.verification import (
@@ -16,6 +17,7 @@
app = FastAPI(docs_url=None)
+
@app.get("/health")
def health():
return JSONResponse(status_code=status.HTTP_200_OK, content={"status": "Online"})
@@ -26,17 +28,22 @@ def google_chat_endpoint(event=Depends(verify_google_chat_request)) -> dict:
"""
Handles inbound requests from Google Chat for Caddy
"""
+ logger.info("New Google Chat Request")
google_chat = GoogleChat()
user = event["user"]["email"]
domain = user.split("@")[1]
domain_enrolled, office = enrolment.check_domain_status(domain)
if domain_enrolled is not True:
+ logger.info("Domain not enrolled")
return google_chat.responses.DOMAIN_NOT_ENROLLED
+ logger.info("Domain is enrolled")
user_enrolled, user_record = enrolment.check_user_status(user)
if user_enrolled is not True:
+ logger.info("User is not enrolled")
return google_chat.responses.USER_NOT_ENROLLED
+ logger.info("User is enrolled")
included_in_rct = enrolment.check_rct_status(office)
if included_in_rct is True:
@@ -258,24 +265,24 @@ async def microsoft_teams_endpoint(request: Request):
query = microsoft_teams.format_message(event)
caddy.temporary_teams_invoke(microsoft_teams, query, event)
case "invoke":
- match event["value"]["action"]["verb"]:
- case "proceed":
+ match event["value"]["action"]["verb"]:
+ case "proceed":
# TODO Handle Proceed Route
print("Adviser choice was to proceed")
microsoft_teams.update_card(event)
return microsoft_teams.responses.OK
- case "redacted_query":
+ case "redacted_query":
# TODO Handle edit original query
print("Adviser choice was to edit original query")
- redacted_card = microsoft_teams.messages.create_redacted_card(event)
+ redacted_card = microsoft_teams.messages.create_redacted_card(event)
microsoft_teams.update_card(event, card=redacted_card)
return microsoft_teams.responses.OK
- case "approved":
- microsoft_teams.handle_thumbs_up(event)
- return microsoft_teams.responses.OK
- case "rejected":
- microsoft_teams.handle_thumbs_down(event)
- return microsoft_teams.responses.OK
+ case "approved":
+ microsoft_teams.handle_thumbs_up(event)
+ return microsoft_teams.responses.OK
+ case "rejected":
+ microsoft_teams.handle_thumbs_down(event)
+ return microsoft_teams.responses.OK
@app.post("/microsoft-teams/supervision")
diff --git a/caddy_chatbot/src/caddy_core/components.py b/caddy_chatbot/src/caddy_core/components.py
index ceb7bc7..9792c68 100644
--- a/caddy_chatbot/src/caddy_core/components.py
+++ b/caddy_chatbot/src/caddy_core/components.py
@@ -20,6 +20,7 @@
responses_table,
users_table,
)
+from caddy_core.utils.monitoring import logger
from caddy_core.services.retrieval_chain import build_chain
from caddy_core.services import enrolment
from caddy_core.services.evaluation import execute_optional_modules
@@ -47,6 +48,7 @@ def rct_survey_reminder(event, user_record, chat_client):
def handle_message(caddy_message, chat_client):
+ logger.info("Running message handler")
module_values, survey_complete = check_existing_call(caddy_message)
if survey_complete is True:
@@ -105,10 +107,12 @@ def remove_role_played_responses(response: str) -> str:
"""
adviser_index = response.find("Adviser: ")
if adviser_index != -1:
+ logger.info("Removing role played response")
return True, response[:adviser_index].strip()
adviser_index = response.find("Advisor: ")
if adviser_index != -1:
+ logger.info("Removing role played response")
return True, response[:adviser_index].strip()
return False, response.strip()
@@ -128,12 +132,12 @@ def format_chat_history(user_messages: List) -> List:
for message in user_messages:
human = message.get("llmPrompt")
ai = message.get("llmAnswer")
-
+
if human and ai:
history_langchain_format.append((human, ai))
elif human:
history_langchain_format.append((human, ""))
-
+
return history_langchain_format
@@ -150,9 +154,14 @@ def get_chat_history(message: UserMessage) -> List:
response = responses_table.query(
KeyConditionExpression=Key("threadId").eq(message.thread_id),
)
-
- sorted_items = sorted(response["Items"], key=lambda x: x.get("messageReceivedTimestamp", x.get("llmPromptTimestamp", "")))
-
+
+ sorted_items = sorted(
+ response["Items"],
+ key=lambda x: x.get(
+ "messageReceivedTimestamp", x.get("llmPromptTimestamp", "")
+ ),
+ )
+
history = format_chat_history(sorted_items)
return history
@@ -220,10 +229,8 @@ def store_message(message: UserMessage):
def store_response(response: LlmResponse):
responses_table.update_item(
- Key={
- "threadId": str(response.thread_id)
- },
- UpdateExpression="set responseId = :rId, llmAnswer = :la, llmResponseJSon = :lrj, llmPromptTimestamp = :lpt, llmResponseTimestamp = :lrt, route = :route",
+ Key={"threadId": str(response.thread_id)},
+ UpdateExpression="set responseId = :rId, llmAnswer = :la, llmResponseJSon = :lrj, llmPromptTimestamp = :lpt, llmResponseTimestamp = :lrt, route = :route, context = :context",
ExpressionAttributeValues={
":rId": response.response_id,
":la": response.llm_answer,
@@ -231,6 +238,7 @@ def store_response(response: LlmResponse):
":lpt": str(response.llm_prompt_timestamp),
":lrt": str(response.llm_response_timestamp),
":route": response.route,
+ ":context": response.context,
},
)
@@ -340,7 +348,7 @@ def check_existing_call(caddy_message) -> Tuple[Dict[str, Any], bool]:
def send_to_llm(caddy_query: UserMessage, chat_client):
query = caddy_query.message
-
+
domain = caddy_query.user_email.split("@")[1]
chat_history = get_chat_history(caddy_query)
@@ -493,7 +501,8 @@ def send_to_llm(caddy_query: UserMessage, chat_client):
llm_prompt_timestamp=ai_prompt_timestamp,
llm_response_json=json.dumps(response_card),
llm_response_timestamp=ai_response_timestamp,
- route=route or "no_route"
+ route=route or "no_route",
+ context=[doc_without_embeddings(doc) for doc in caddy_response["context"]],
)
store_response(llm_response)
@@ -543,6 +552,12 @@ def send_to_llm(caddy_query: UserMessage, chat_client):
store_approver_received_timestamp(supervision_event)
+def doc_without_embeddings(doc):
+ doc_dict = doc.dict()
+ doc_dict.pop("state", None)
+ return doc_dict
+
+
def store_approver_received_timestamp(event: SupervisionEvent):
responses_table.update_item(
Key={"threadId": event.thread_id},
@@ -567,6 +582,7 @@ def store_approver_event(thread_id: str, approval_event: ApprovalEvent):
ReturnValues="UPDATED_NEW",
)
+
def temporary_teams_invoke(chat_client, query, event):
"""
Temporary solution for Teams integration
@@ -591,11 +607,16 @@ def temporary_teams_invoke(chat_client, query, event):
chain, ai_prompt_timestamp = build_chain(CADDY_PROMPT)
- caddy_response = chain.invoke({
- "input": query,
- "chat_history": [],
- })
+ caddy_response = chain.invoke(
+ {
+ "input": query,
+ "chat_history": [],
+ }
+ )
_, caddy_response["answer"] = remove_role_played_responses(caddy_response["answer"])
- chat_client.send_adviser_card(event, card=chat_client.messages.generate_response_card(caddy_response["answer"]))
\ No newline at end of file
+ chat_client.send_adviser_card(
+ event,
+ card=chat_client.messages.generate_response_card(caddy_response["answer"]),
+ )
diff --git a/caddy_chatbot/src/caddy_core/models.py b/caddy_chatbot/src/caddy_core/models.py
index d435e68..7c79f8b 100644
--- a/caddy_chatbot/src/caddy_core/models.py
+++ b/caddy_chatbot/src/caddy_core/models.py
@@ -1,4 +1,4 @@
-from typing import Union
+from typing import Union, List, Any
from datetime import datetime
import uuid
import pydantic
@@ -36,6 +36,7 @@ class LlmResponse(pydantic.BaseModel):
llm_prompt_timestamp: datetime
llm_response_timestamp: datetime
route: str
+ context: Union[List[Any], None]
class SupervisionEvent(pydantic.BaseModel):
diff --git a/caddy_chatbot/src/caddy_core/services/retrieval_chain.py b/caddy_chatbot/src/caddy_core/services/retrieval_chain.py
index 981662d..e4b2d05 100644
--- a/caddy_chatbot/src/caddy_core/services/retrieval_chain.py
+++ b/caddy_chatbot/src/caddy_core/services/retrieval_chain.py
@@ -18,6 +18,8 @@
from opensearchpy import RequestsHttpConnection
from requests_aws4auth import AWS4Auth
+from caddy_core.utils.monitoring import logger
+
import re
import os
from datetime import datetime
@@ -62,6 +64,7 @@ def find_most_recent_caddy_vector_index():
# Initialize most_recent_index with the original index name as a fallback
most_recent_index = opensearch_index
+ logger.info(f"Index initalised with: {opensearch_index}")
# Pattern to match indexes of interest
pattern = re.compile(opensearch_index + r"_(\d{8})$")
@@ -70,6 +73,7 @@ def find_most_recent_caddy_vector_index():
index_list = client.indices.get("*")
most_recent_date = None
+ logger.info(f"Checking through {len(index_list)} indexes")
for index_name in index_list:
match = pattern.match(index_name)
if match:
@@ -77,14 +81,17 @@ def find_most_recent_caddy_vector_index():
extracted_date_str = match.group(1)
try:
extracted_date = datetime.strptime(extracted_date_str, "%Y%m%d")
+ logger.info(f"Date extracted from index: {extracted_date}")
# Update most recent date and index name if this index is more recent
if most_recent_date is None or extracted_date > most_recent_date:
+ logger.info(f"Setting as most recent date: {extracted_date}")
most_recent_date = extracted_date
most_recent_index = index_name
except ValueError:
# If the date is not valid, ignore this index
continue
+ logger.info(f"Most recent index is: {most_recent_index}")
return most_recent_index
@@ -142,7 +149,7 @@ def build_chain(CADDY_PROMPT):
)
llm = BedrockChat(
- model_id=os.environ.get("LLM"),
+ model_id=os.getenv("LLM"),
region_name=alternate_region,
model_kwargs={"temperature": 0.3, "top_k": 5, "max_tokens": 2000},
)
diff --git a/caddy_chatbot/src/caddy_core/services/router.py b/caddy_chatbot/src/caddy_core/services/router.py
index 10b390f..5c9edb7 100644
--- a/caddy_chatbot/src/caddy_core/services/router.py
+++ b/caddy_chatbot/src/caddy_core/services/router.py
@@ -1,7 +1,8 @@
-import os
+import os
import boto3
from semantic_router import Route, RouteLayer
from semantic_router.encoders import BedrockEncoder
+from caddy_core.utils.monitoring import logger
session = boto3.Session()
credentials = session.get_credentials()
@@ -12,23 +13,24 @@
region="eu-west-3",
)
-dynamodb = boto3.resource('dynamodb')
+dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table(os.getenv("ROUTES_TABLE_NAME"))
+
def get_routes_dynamically():
+ logger.info("Fetching routes")
response = table.scan()
routes = []
- for item in response['Items']:
- utterances = item['utterances']
+ for item in response["Items"]:
+ utterances = item["utterances"]
if isinstance(utterances[0], list):
utterances = utterances[0]
- route = Route(
- name=item['name'],
- utterances=utterances
- )
+ route = Route(name=item["name"], utterances=utterances)
routes.append(route)
+ logger.info(f"Fetched {len(routes)} routes")
return routes
+
routes = get_routes_dynamically()
-get_route = RouteLayer(encoder=embeddings, routes=routes)
\ No newline at end of file
+get_route = RouteLayer(encoder=embeddings, routes=routes)
diff --git a/caddy_chatbot/src/caddy_core/utils/monitoring.py b/caddy_chatbot/src/caddy_core/utils/monitoring.py
new file mode 100644
index 0000000..df9f03b
--- /dev/null
+++ b/caddy_chatbot/src/caddy_core/utils/monitoring.py
@@ -0,0 +1,48 @@
+import logging
+import sys
+
+
+class Colours:
+ RESET = "\033[0m"
+ BOLD = "\033[1m"
+ BLACK = "\033[30m"
+ RED = "\033[31m"
+ GREEN = "\033[32m"
+ YELLOW = "\033[33m"
+ BLUE = "\033[34m"
+ MAGENTA = "\033[35m"
+ CYAN = "\033[36m"
+ WHITE = "\033[37m"
+
+
+class ColourFormatter(logging.Formatter):
+ COLOURS = {
+ "DEBUG": Colours.BLUE,
+ "INFO": Colours.MAGENTA,
+ "WARNING": Colours.YELLOW,
+ "ERROR": Colours.RED,
+ "CRITICAL": Colours.BOLD + Colours.RED,
+ }
+
+ def format(self, record):
+ log_message = super().format(record)
+ return f"{self.COLOURS.get(record.levelname, '')}{log_message}{Colours.RESET}"
+
+
+def setup_logger(name, level=logging.INFO):
+ """
+ Function to setup a colour-coded logger
+ """
+ logger = logging.getLogger(name)
+ logger.setLevel(level)
+
+ formatter = ColourFormatter("%(name)s | %(asctime)s | %(levelname)s | %(message)s")
+
+ handler = logging.StreamHandler(sys.stdout)
+ handler.setFormatter(formatter)
+ logger.addHandler(handler)
+
+ return logger
+
+
+logger = setup_logger("CADDY")
diff --git a/caddy_chatbot/src/caddy_core/utils/prompt.py b/caddy_chatbot/src/caddy_core/utils/prompt.py
index edbd03a..1afa96f 100644
--- a/caddy_chatbot/src/caddy_core/utils/prompt.py
+++ b/caddy_chatbot/src/caddy_core/utils/prompt.py
@@ -1,33 +1,60 @@
import os
import boto3
+from botocore.exceptions import ClientError
+from datetime import datetime, timezone, timedelta
+
+from caddy_core.utils.monitoring import logger
from caddy_core.services.router import get_route
+
def get_prompt(prompt_name):
- dynamodb = boto3.resource('dynamodb')
- table = dynamodb.Table(os.getenv('PROMPTS_TABLE_NAME'))
-
- response = table.get_item(
- Key={
- 'PromptName': prompt_name
- }
- )
- return response['Item']['Prompt'] if 'Item' in response else None
+ dynamodb = boto3.resource("dynamodb")
+ table = dynamodb.Table(os.getenv("PROMPTS_TABLE_NAME"))
+
+ response = table.get_item(Key={"PromptName": prompt_name})
+ logger.info(f"Fetched prompt: {prompt_name}")
+ return response["Item"]["Prompt"] if "Item" in response else None
def retrieve_route_specific_augmentation(query):
refresh_session_token()
route = get_route(query).name
-
+ logger.info(f"Route returned: {route}")
+
prompt_name = f"{route.upper()}_PROMPT"
route_specific_augmentation = get_prompt(prompt_name)
-
+
if route_specific_augmentation is None:
- route_specific_augmentation = get_prompt('FALLBACK_PROMPT')
-
+ logger.info("Route not found, using fallback prompt")
+ route_specific_augmentation = get_prompt("FALLBACK_PROMPT")
+
return route_specific_augmentation, route
+
def refresh_session_token():
- session = boto3.Session()
- credentials = session.get_credentials()
- if credentials.token is not None:
- os.environ["AWS_SESSION_TOKEN"] = credentials.token
\ No newline at end of file
+ expiration_timestamp = os.environ.get("AWS_CREDENTIAL_EXPIRATION")
+ if expiration_timestamp:
+ expiration = datetime.fromisoformat(expiration_timestamp)
+ if datetime.now(timezone.utc) < expiration - timedelta(minutes=5):
+ logger.info("Credentials still valid, no refresh required")
+ return
+
+ try:
+ sts_client = boto3.client("sts")
+ temporary_credentials = sts_client.get_session_token(DurationSeconds=3600)
+
+ credentials = temporary_credentials["Credentials"]
+
+ os.environ["AWS_ACCESS_KEY_ID"] = credentials["AccessKeyId"]
+ os.environ["AWS_SECRET_ACCESS_KEY"] = credentials["SecretAccessKey"]
+ os.environ["AWS_SESSION_TOKEN"] = credentials["SessionToken"]
+
+ expiration_iso = credentials["Expiration"].astimezone(timezone.utc).isoformat()
+ os.environ["AWS_CREDENTIAL_EXPIRATION"] = expiration_iso
+
+ logger.info(
+ f"Refreshed credentials, new expiry time: {os.environ['AWS_CREDENTIAL_EXPIRATION']}"
+ )
+ except ClientError as e:
+ logger.error(f"Failed to refresh credentials: {e}")
+ raise
diff --git a/caddy_chatbot/src/integrations/google_chat/structures.py b/caddy_chatbot/src/integrations/google_chat/structures.py
index b0372db..2137c3c 100644
--- a/caddy_chatbot/src/integrations/google_chat/structures.py
+++ b/caddy_chatbot/src/integrations/google_chat/structures.py
@@ -604,7 +604,9 @@ def create_card(self, llm_response) -> Dict:
reference_links_section = {"header": "Reference links", "widgets": []}
- urls = re.findall(r"[(?:SOURCE_URL:)?(http[s]?://[^>]+)]", llm_response)
+ urls = re.findall(
+ r"[(?:SOURCE_URL:)?(http[s]?://[^>]+)]", llm_response
+ )
processed_urls = []
ref = 0
@@ -626,7 +628,8 @@ def create_card(self, llm_response) -> Dict:
f"[{url}]", f'[{ref} - {resource}]'
)
llm_response = llm_response.replace(
- f"[SOURCE_URL:{url}]", f'[{ref} - {resource}]'
+ f"[SOURCE_URL:{url}]",
+ f'[{ref} - {resource}]',
)
reference_link = {
@@ -874,8 +877,8 @@ def create_approved_card(
Returns:
Supervisor approved card
"""
- card["cardsV2"][0]["card"]["sections"].insert(0,
- self.responses.approval_json_widget(approver, supervisor_notes)
+ card["cardsV2"][0]["card"]["sections"].insert(
+ 0, self.responses.approval_json_widget(approver, supervisor_notes)
)
return card
diff --git a/caddy_chatbot/src/integrations/microsoft_teams/content.py b/caddy_chatbot/src/integrations/microsoft_teams/content.py
index 25c1c50..3cd31e0 100644
--- a/caddy_chatbot/src/integrations/microsoft_teams/content.py
+++ b/caddy_chatbot/src/integrations/microsoft_teams/content.py
@@ -1,17 +1,21 @@
from typing import List, Dict
import re
-CADDY_PROCESSING = [ {
- "type": "Container",
- "items": [ {
- "type": "TextBlock",
- "text": f"🦉 Processing request...",
- "weight": "bolder",
- "size": "medium" },
- ]
- },
+CADDY_PROCESSING = [
+ {
+ "type": "Container",
+ "items": [
+ {
+ "type": "TextBlock",
+ "text": f"🦉 Processing request...",
+ "weight": "bolder",
+ "size": "medium",
+ },
+ ],
+ },
]
+
def create_pii_detected_card(query: str) -> List[Dict]:
"""
Takes a message query and returns a PII Detected field with optional redaction input
@@ -24,24 +28,24 @@ def create_pii_detected_card(query: str) -> List[Dict]:
"type": "TextRun",
"text": "PII Detected",
"color": "attention",
- "weight": "bolder"
+ "weight": "bolder",
},
{
"type": "TextRun",
"text": " Please ensure all queries to Caddy are anonymised.",
- "italic": True
- }
- ]
+ "italic": True,
+ },
+ ],
},
{
"type": "RichTextBlock",
"id": "buttonText",
"inlines": [
{
- "type": "TextRun",
- "text": "Choose whether to proceed anyway or edit your original query"
+ "type": "TextRun",
+ "text": "Choose whether to proceed anyway or edit your original query",
}
- ]
+ ],
},
{
"type": "ActionSet",
@@ -51,39 +55,21 @@ def create_pii_detected_card(query: str) -> List[Dict]:
"type": "Action.Execute",
"title": "Proceed without redaction",
"verb": "proceed",
- "data": {
- "action": "proceed"
- }
+ "data": {"action": "proceed"},
},
{
"type": "Action.ToggleVisibility",
"title": "Edit original query",
"targetElements": [
- {
- "elementId": "queryText",
- "isVisible": True
- },
- {
- "elementId": "redactedQuerySubmission",
- "isVisible": True
- },
- {
- "elementId": "redactionButtons",
- "isVisible": False
- },
- {
- "elementId": "buttonText",
- "isVisible": False
- }
+ {"elementId": "queryText", "isVisible": True},
+ {"elementId": "redactedQuerySubmission", "isVisible": True},
+ {"elementId": "redactionButtons", "isVisible": False},
+ {"elementId": "buttonText", "isVisible": False},
],
- }
- ]
- },{
- "type": "Input.Text",
- "id": "queryText",
- "isVisible": False,
- "value": query
+ },
+ ],
},
+ {"type": "Input.Text", "id": "queryText", "isVisible": False, "value": query},
{
"type": "ActionSet",
"id": "redactedQuerySubmission",
@@ -93,15 +79,14 @@ def create_pii_detected_card(query: str) -> List[Dict]:
"type": "Action.Execute",
"title": "Submit Redaction",
"verb": "redacted_query",
- "data": {
- "action": "redacted_query"
- }
+ "data": {"action": "redacted_query"},
},
- ]
- }
+ ],
+ },
]
return PII_DETECTED
+
def create_redacted_card(event) -> List[Dict]:
"""
Takes in a redaction event and created a redacted query card
@@ -115,34 +100,22 @@ def create_redacted_card(event) -> List[Dict]:
"type": "TextRun",
"text": "Query redacted: ",
"color": "good",
- "weight": "bolder"
+ "weight": "bolder",
},
- {
- "type": "TextRun",
- "text": redacted_query,
- "italic": True
- }
- ]
+ {"type": "TextRun", "text": redacted_query, "italic": True},
+ ],
}
]
return REDACTED
+
def generate_response_card(llm_response):
"""
Creates a Teams Adaptive card given a Caddy response
"""
caddy_response = [
- {
- "type": "TextBlock",
- "text": llm_response,
- "wrap": True
- },
- {
- "type": "ActionSet",
- "id": "referenceLinks",
- "actions": [
- ]
- },
+ {"type": "TextBlock", "text": llm_response, "wrap": True},
+ {"type": "ActionSet", "id": "referenceLinks", "actions": []},
{
"type": "ActionSet",
"id": "approvalButtons",
@@ -151,20 +124,16 @@ def generate_response_card(llm_response):
"type": "Action.Execute",
"title": "👍",
"verb": "approved",
- "data": {
- "action": "approved"
- }
+ "data": {"action": "approved"},
},
{
"type": "Action.Execute",
"title": "👎",
"verb": "rejected",
- "data": {
- "action": "rejected"
- }
+ "data": {"action": "rejected"},
},
- ]
- }
+ ],
+ },
]
pattern = r"[(?:SOURCE_URL:)?(http[s]?://[^>]+)]"
@@ -187,24 +156,25 @@ def generate_response_card(llm_response):
ref = ref + 1
llm_response = llm_response.replace(
- f"[{url}]", f'[{ref} - {resource}]({url})'
+ f"[{url}]", f"[{ref} - {resource}]({url})"
)
llm_response = llm_response.replace(
- f"[SOURCE_URL:{url}]", f'[{ref} - {resource}]({url})'
+ f"[SOURCE_URL:{url}]", f"[{ref} - {resource}]({url})"
)
reference_link = {
"type": "Action.OpenUrl",
- "title": f'{ref} - {url}',
- "url": url
+ "title": f"{ref} - {url}",
+ "url": url,
}
caddy_response[1]["actions"].append(reference_link)
processed_urls.append(url)
-
llm_response = llm_response.replace("", "**").replace("", "**")
- llm_response = llm_response.replace('', "_").replace("", "_")
+ llm_response = llm_response.replace('', "_").replace(
+ "", "_"
+ )
caddy_response[0]["text"] = llm_response
return caddy_response
diff --git a/caddy_chatbot/src/integrations/microsoft_teams/responses.py b/caddy_chatbot/src/integrations/microsoft_teams/responses.py
index b8e2e8c..e26892d 100644
--- a/caddy_chatbot/src/integrations/microsoft_teams/responses.py
+++ b/caddy_chatbot/src/integrations/microsoft_teams/responses.py
@@ -4,4 +4,4 @@
# --- Status Responses --- #
NO_CONTENT = Response(status_code=status.HTTP_204_NO_CONTENT)
ACCEPTED = Response(status_code=status.HTTP_202_ACCEPTED)
-OK = Response(status_code=status.HTTP_200_OK)
\ No newline at end of file
+OK = Response(status_code=status.HTTP_200_OK)
diff --git a/caddy_chatbot/src/integrations/microsoft_teams/structures.py b/caddy_chatbot/src/integrations/microsoft_teams/structures.py
index 2adc2a4..66a6edb 100644
--- a/caddy_chatbot/src/integrations/microsoft_teams/structures.py
+++ b/caddy_chatbot/src/integrations/microsoft_teams/structures.py
@@ -14,94 +14,92 @@ def __init__(self):
self.reaction_actions = {
"like": self.handle_thumbs_up,
"dislike": self.handle_thumbs_down,
- } # TODO check works in teams with the emojis
+ } # TODO check works in teams with the emojis
- def send_adviser_card(self, event, card = None):
+ def send_adviser_card(self, event, card=None):
"""
Takes an incoming request from Teams Chat and returns a given response card
"""
if card is None:
card = self.messages.CADDY_PROCESSING
-
- conversation_id = event['conversation']['id']
- activity_id = event['id']
- service_url = event['serviceUrl']
-
- response_url = f"{service_url}/v3/conversations/{conversation_id}/activities/{activity_id}"
-
+
+ conversation_id = event["conversation"]["id"]
+ activity_id = event["id"]
+ service_url = event["serviceUrl"]
+
+ response_url = (
+ f"{service_url}/v3/conversations/{conversation_id}/activities/{activity_id}"
+ )
+
headers = {
"Authorization": f"Bearer {self.access_token}",
- "Content-Type": "application/json"
+ "Content-Type": "application/json",
}
-
+
response_activity = {
"type": "message",
- "from": event['recipient'],
- "conversation": event['conversation'],
- "recipient": event['from'],
+ "from": event["recipient"],
+ "conversation": event["conversation"],
+ "recipient": event["from"],
"replyToId": activity_id,
"attachments": [
{
- "contentType": "application/vnd.microsoft.card.adaptive",
- "content": {
- "$schema": "http://adaptivecards.io/schemas/adaptive-card.json",
- "type": "AdaptiveCard",
- "version": "1.0",
- "body": card
+ "contentType": "application/vnd.microsoft.card.adaptive",
+ "content": {
+ "$schema": "http://adaptivecards.io/schemas/adaptive-card.json",
+ "type": "AdaptiveCard",
+ "version": "1.0",
+ "body": card,
+ },
}
- }]
+ ],
}
-
- response = requests.post(
- response_url,
- json=response_activity,
- headers=headers
- )
+
+ response = requests.post(response_url, json=response_activity, headers=headers)
print(response.json())
- def update_card(self, event, card = None):
+ def update_card(self, event, card=None):
"""
Updates an existing teams message given a card and action event
"""
if card is None:
card = self.messages.CADDY_PROCESSING
-
- conversation_id = event['conversation']['id']
- activity_id = event['id']
- service_url = event['serviceUrl']
- reply_to_id = event['replyToId']
-
- response_url = f"{service_url}/v3/conversations/{conversation_id}/activities/{reply_to_id}"
-
+
+ conversation_id = event["conversation"]["id"]
+ activity_id = event["id"]
+ service_url = event["serviceUrl"]
+ reply_to_id = event["replyToId"]
+
+ response_url = (
+ f"{service_url}/v3/conversations/{conversation_id}/activities/{reply_to_id}"
+ )
+
headers = {
"Authorization": f"Bearer {self.access_token}",
- "Content-Type": "application/json"
+ "Content-Type": "application/json",
}
-
+
response_activity = {
"type": "message",
- "from": event['recipient'],
- "conversation": event['conversation'],
- "recipient": event['from'],
+ "from": event["recipient"],
+ "conversation": event["conversation"],
+ "recipient": event["from"],
"replyToId": activity_id,
"attachments": [
{
- "contentType": "application/vnd.microsoft.card.adaptive",
- "content": {
- "$schema": "http://adaptivecards.io/schemas/adaptive-card.json",
- "type": "AdaptiveCard",
- "version": "1.0",
- "body": card
+ "contentType": "application/vnd.microsoft.card.adaptive",
+ "content": {
+ "$schema": "http://adaptivecards.io/schemas/adaptive-card.json",
+ "type": "AdaptiveCard",
+ "version": "1.0",
+ "body": card,
+ },
}
- }]
+ ],
}
-
- response = requests.put(
- response_url,
- json=response_activity,
- headers=headers
- )
+
+ response = requests.put(response_url, json=response_activity, headers=headers)
print(response.json())
@@ -124,7 +122,7 @@ def format_message(self, event):
)
return "PII Detected"
-
+
self.send_adviser_card(event)
# TODO Format Message into Caddy event
@@ -134,78 +132,82 @@ def handle_reaction_added(self, event):
"""
Handles reactions added to a message, specifically for the sueprvisor space but currently applied to all
"""
- reaction_type = event['reactionsAdded'][0]['type']
- reply_to_id = event['replyToId']
+ reaction_type = event["reactionsAdded"][0]["type"]
+ reply_to_id = event["replyToId"]
# Fetch original message or log activity based on reply_to_id if needed
- response_text = f"Reaction '{reaction_type}' added to message with ID {reply_to_id}"
+ response_text = (
+ f"Reaction '{reaction_type}' added to message with ID {reply_to_id}"
+ )
self.send_advisor_message_from_supervisor(event, response_text)
- # TODO define a send_advisor_message_from_supervisor methods
+ # TODO define a send_advisor_message_from_supervisor methods
# TODO return caddy message from supervisor channel to advisor
-
def handle_reaction_removed(self, event):
"""
Handles reactions removed from a message, currently unsure if we need this
"""
- reaction_type = event['reactionsRemoved'][0]['type']
- reply_to_id = event['replyToId']
+ reaction_type = event["reactionsRemoved"][0]["type"]
+ reply_to_id = event["replyToId"]
# Fetch original message or log activity based on reply_to_id if needed
- response_text = f"Reaction '{reaction_type}' removed from message with ID {reply_to_id}"
+ response_text = (
+ f"Reaction '{reaction_type}' removed from message with ID {reply_to_id}"
+ )
self.send_advisor_message_from_supervisor(event, response_text)
-
def handle_thumbs_up(self, event, removed=False):
"""
Handle thumbs up reaction = an approval from supervisor
"""
action = "removed" if removed else "added"
- self.send_advisor_message_from_supervisor(event, f"Message approved {action} for message with ID {event['replyToId']}", 'share')
-
+ self.send_advisor_message_from_supervisor(
+ event,
+ f"Message approved {action} for message with ID {event['replyToId']}",
+ "share",
+ )
def handle_thumbs_down(self, event, removed=False):
"""
Handle thumbs down reaction = no approval from supervisor, caddy message not sent
"""
action = "removed" if removed else "added"
- self.send_advisor_message_from_supervisor(event, f"Answer not approved {action} for message with ID {event['replyToId']}", 'donotshare')
-
+ self.send_advisor_message_from_supervisor(
+ event,
+ f"Answer not approved {action} for message with ID {event['replyToId']}",
+ "donotshare",
+ )
def send_advisor_message_from_supervisor(self, event, text, type):
"""
Sends a simple text message in response to an event.
"""
- if type == 'donotshare':
- print('No approval from supervisor')
+ if type == "donotshare":
+ print("No approval from supervisor")
+
+ conversation_id = event["conversation"]["id"]
+ service_url = event["serviceUrl"]
- conversation_id = event['conversation']['id']
- service_url = event['serviceUrl']
-
response_url = f"{service_url}/v3/conversations/{conversation_id}/activities"
-
+
headers = {
"Authorization": f"Bearer {self.access_token}",
- "Content-Type": "application/json"
+ "Content-Type": "application/json",
}
-
+
response_activity = {
"type": "message",
- "from": event['recipient'],
- "conversation": event['conversation'],
- "recipient": event['from'],
- "text": text
+ "from": event["recipient"],
+ "conversation": event["conversation"],
+ "recipient": event["from"],
+ "text": text,
}
-
- response = requests.post(
- response_url,
- json=response_activity,
- headers=headers
- )
+
+ response = requests.post(response_url, json=response_activity, headers=headers)
print(response.json())
- # TODO make this have the details from caddy and the supervisor comments
\ No newline at end of file
+ # TODO make this have the details from caddy and the supervisor comments
diff --git a/caddy_chatbot/src/integrations/microsoft_teams/verification.py b/caddy_chatbot/src/integrations/microsoft_teams/verification.py
index edcb5a0..03af34f 100644
--- a/caddy_chatbot/src/integrations/microsoft_teams/verification.py
+++ b/caddy_chatbot/src/integrations/microsoft_teams/verification.py
@@ -1,4 +1,4 @@
-import os
+import os
import requests
APP_ID = os.environ.get("MicrosoftAppId", "")
@@ -7,26 +7,23 @@
url = "https://login.microsoftonline.com/botframework.com/oauth2/v2.0/token"
headers = {
"Host": "login.microsoftonline.com",
- "Content-Type": "application/x-www-form-urlencoded"
+ "Content-Type": "application/x-www-form-urlencoded",
}
authentication_data = {
"grant_type": "client_credentials",
"client_id": APP_ID,
"client_secret": APP_PASSWORD,
- "scope": "https://api.botframework.com/.default"
+ "scope": "https://api.botframework.com/.default",
}
+
def get_access_token():
"""
Fetches an access token using the Bot credentials
"""
- response = requests.post(
- url,
- headers=headers,
- data=authentication_data
- )
-
+ response = requests.post(url, headers=headers, data=authentication_data)
+
if response.status_code == 200:
response_data = response.json()
access_token = response_data.get("access_token")
diff --git a/infra/template.yaml b/infra/template.yaml
index afc5595..7a49141 100644
--- a/infra/template.yaml
+++ b/infra/template.yaml
@@ -337,6 +337,7 @@ Resources:
- ecr:GetAuthorizationToken
- ecr:GetDownloadUrlForLayer
- ecr:BatchGetImage
+ - sts:GetSessionToken
Resource: '*'
- PolicyName: !Sub CaddyLoggingPolicy-${StageName}
PolicyDocument:
diff --git a/tests/ragas/ragas_pipeline.py b/tests/ragas/ragas_pipeline.py
index 8287706..6bd9a10 100644
--- a/tests/ragas/ragas_pipeline.py
+++ b/tests/ragas/ragas_pipeline.py
@@ -1,7 +1,7 @@
from datasets import Dataset
from langchain_openai.chat_models import AzureChatOpenAI
from langchain_openai.embeddings import AzureOpenAIEmbeddings
-from ragas import evaluate
+from ragas import evaluate
import dotenv
from ragas.metrics import (
@@ -11,14 +11,14 @@
context_recall,
)
-from model_answers import questions, ground_truths, contexts, answers
+from model_answers import questions, ground_truths, contexts, answers
questions_mini = questions
ground_truths_mini = ground_truths
contexts_mini = contexts
answers_mini = answers
-from model_answers_bedrock import questions, ground_truths, context, answers
+from model_answers_bedrock import questions, ground_truths, context, answers
questions_bedrock = questions
ground_truths_bedrock = ground_truths
@@ -28,17 +28,22 @@
def create_ragas_dataset(questions, ground_truths, contexts, answers):
data_samples = {
- 'question': questions,
- 'answer': answers,
- 'contexts' : contexts,
- 'ground_truth': ground_truths
+ "question": questions,
+ "answer": answers,
+ "contexts": contexts,
+ "ground_truth": ground_truths,
}
dataset = Dataset.from_dict(data_samples)
return dataset
-bedrock_ragas_dataset = create_ragas_dataset(questions_bedrock, answers_bedrock, contexts_bedrock, ground_truths_bedrock)
-mini_ragas_dataset = create_ragas_dataset(questions_mini, answers_mini, contexts_mini, ground_truths_mini)
+
+bedrock_ragas_dataset = create_ragas_dataset(
+ questions_bedrock, answers_bedrock, contexts_bedrock, ground_truths_bedrock
+)
+mini_ragas_dataset = create_ragas_dataset(
+ questions_mini, answers_mini, contexts_mini, ground_truths_mini
+)
ENV = dotenv.dotenv_values()
@@ -78,16 +83,15 @@ def create_ragas_dataset(questions, ground_truths, contexts, answers):
def ragas_evaluate(dataset, model, embeddings):
result = evaluate(
dataset,
- metrics=[
- answer_relevancy
- ],
+ metrics=[answer_relevancy],
llm=model,
embeddings=embeddings,
is_async=True,
- #max_concurrent=1 # Add this line
+ # max_concurrent=1 # Add this line
)
return result
+
print(ragas_evaluate(bedrock_ragas_dataset, azure_model, azure_embeddings))
-print(ragas_evaluate(mini_ragas_dataset, azure_model, azure_embeddings))
\ No newline at end of file
+print(ragas_evaluate(mini_ragas_dataset, azure_model, azure_embeddings))
diff --git a/tests/test_embedding_local.py b/tests/test_embedding_local.py
index 3d58b7c..5653404 100644
--- a/tests/test_embedding_local.py
+++ b/tests/test_embedding_local.py
@@ -1,6 +1,7 @@
from sentence_transformers import SentenceTransformer
+
sentences = ["This is an example sentence", "Each sentence is converted"]
-model = SentenceTransformer('sentence-transformers/all-mpnet-base-v2')
+model = SentenceTransformer("sentence-transformers/all-mpnet-base-v2")
embeddings = model.encode(sentences)
-print(embeddings)
\ No newline at end of file
+print(embeddings)
diff --git a/utils/route_loader.py b/utils/route_loader.py
index 1efa80c..87fdabc 100644
--- a/utils/route_loader.py
+++ b/utils/route_loader.py
@@ -2,7 +2,7 @@
TABLE_NAME = "caddyRoutes"
-dynamodb = boto3.resource('dynamodb')
+dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table(TABLE_NAME)
routes_data = [
@@ -17,7 +17,7 @@
"What documents do I need to provide for benefits and tax credits?",
"Can you help me with a benefits and tax credits claim issue?",
"Where can I find more information about benefits and tax credits?",
- ]
+ ],
},
{
"name": "benefits_and_universal_credit",
@@ -30,7 +30,7 @@
"How can I calculate my entitlement to Universal Credit?",
"Can you help me with a Universal Credit claim issue?",
"Where can I find more information about Universal Credit?",
- ]
+ ],
},
{
"name": "charitable_support_and_food_banks",
@@ -43,7 +43,7 @@
"How can someone find local food banks?",
"What other resources or services are available for individuals in need?",
"Are there any restrictions or limitations on the frequency of accessing food banks?",
- ]
+ ],
},
{
"name": "consumer_goods_and_services",
@@ -56,7 +56,7 @@
"What are the regulations for online shopping?",
"How can I avoid scams and fraudulent sellers?",
"Can you provide guidance on product warranties and guarantees?",
- ]
+ ],
},
{
"name": "debt",
@@ -69,7 +69,7 @@
"How can someone deal with overwhelming debt?",
"What are the rights and protections for individuals in debt?",
"Can you provide guidance on bankruptcy and insolvency?",
- ]
+ ],
},
{
"name": "education",
@@ -84,7 +84,7 @@
"What resources are available for adult education and vocational training?",
"How can my client address issues related to bullying or discrimination in educational settings?",
"Where can I find more information about educational rights and regulations?",
- ]
+ ],
},
{
"name": "employment",
@@ -99,7 +99,7 @@
"What are the rights and protections for individuals facing unfair dismissal?",
"How can someone access support for workplace health and safety issues?",
"Where can I find more information about employment rights and regulations?",
- ]
+ ],
},
{
"name": "financial_services_and_capability",
@@ -113,7 +113,7 @@
"What resources are available for financial planning and retirement?",
"How can someone protect themselves from financial scams and fraud?",
"Where can I find more information about financial services and capability?",
- ]
+ ],
},
{
"name": "gva_and_hate_crime",
@@ -126,7 +126,7 @@
"How can someone access emergency accommodation or safe houses?",
"Can you provide guidance on safety planning for victims of gender-based violence and hate crimes?",
"Where can I find more information about gender-based violence and hate crimes?",
- ]
+ ],
},
{
"name": "health_and_community_care",
@@ -147,7 +147,7 @@
"How can someone navigate the process of obtaining medical equipment or assistive devices?",
"What are the rights and protections for individuals receiving healthcare and community care services?",
"Where can I find more information about healthcare rights and regulations?",
- ]
+ ],
},
{
"name": "housing",
@@ -160,7 +160,7 @@
"What are the options for dealing with eviction?",
"Can you provide guidance on housing benefits and assistance programs?",
"Where can I find more information about housing rights and regulations?",
- ]
+ ],
},
{
"name": "immigration_and_asylum",
@@ -175,7 +175,7 @@
"What are the requirements for obtaining citizenship or permanent residency?",
"How can someone address issues with their immigration status?",
"Where can I find more information about immigration and asylum rights and regulations?",
- ]
+ ],
},
{
"name": "legal",
@@ -189,7 +189,7 @@
"What are the rights and protections for individuals in the justice system?",
"How can someone navigate the process of obtaining legal documents or records?",
"Where can I find more information about the legal system, courts, and justice system?",
- ]
+ ],
},
{
"name": "relationships_and_family",
@@ -204,7 +204,7 @@
"What resources are available for couples or families going through relationship breakdown?",
"How can someone access counseling or mediation services for family disputes?",
"Where can I find more information about relationship and family rights and regulations?",
- ]
+ ],
},
{
"name": "tax",
@@ -219,7 +219,7 @@
"What are the tax implications of owning a business?",
"How can someone navigate the process of paying taxes as an employee (PAYE)?",
"Where can I find more information about tax laws and regulations?",
- ]
+ ],
},
{
"name": "travel_and_transport",
@@ -234,7 +234,7 @@
"What are the rights and protections for passengers using public transportation?",
"How can someone address issues with delays or cancellations of trains or buses?",
"Where can I find more information about travel and transportation services?",
- ]
+ ],
},
{
"name": "utilities_and_communications",
@@ -249,14 +249,16 @@
"What are the rights and protections for consumers in the energy and communications sectors?",
"How can someone navigate the process of installing and using prepayment meters?",
"Where can I find more information about energy crisis management and resources?",
- ]
- }
+ ],
+ },
]
+
def load_routes_to_dynamodb():
for route in routes_data:
table.put_item(Item=route)
print("All routes have been loaded into the DynamoDB table.")
+
if __name__ == "__main__":
- load_routes_to_dynamodb()
\ No newline at end of file
+ load_routes_to_dynamodb()