From 6353fcd256d67c1c222d66ed274a9a07fb1f65b2 Mon Sep 17 00:00:00 2001
From: iusztinpaul
Date: Sat, 1 Jun 2024 19:05:10 +0300
Subject: [PATCH] fix: Default values
---
course/module-4/Makefile | 2 +-
course/module-4/finetuning/settings.py | 2 +-
course/module-5/Makefile | 9 +++++++--
3 files changed, 9 insertions(+), 4 deletions(-)
diff --git a/course/module-4/Makefile b/course/module-4/Makefile
index 0f3723a..d3111d6 100644
--- a/course/module-4/Makefile
+++ b/course/module-4/Makefile
@@ -21,7 +21,7 @@ help:
list: help
-install:
+install: # Install the project dependencies.
@echo "Installing ..."
PYTHON_KEYRING_BACKEND=keyring.backends.null.Keyring poetry install
diff --git a/course/module-4/finetuning/settings.py b/course/module-4/finetuning/settings.py
index 4a8d9c0..1abdd6f 100644
--- a/course/module-4/finetuning/settings.py
+++ b/course/module-4/finetuning/settings.py
@@ -12,7 +12,7 @@ class AppSettings(BaseSettings):
COMET_PROJECT: str = ""
DATASET_ARTIFACT_NAME: str = "posts-instruct-dataset"
- FINE_TUNED_LLM_TWIN_MODEL_TYPE: str = "decodingml/llm-twin:1.1.0"
+ FINE_TUNED_LLM_TWIN_MODEL_TYPE: str = "decodingml/llm-twin:1.0.0"
CONFIG_FILE: str = "./finetuning/config.yaml"
MODEL_SAVE_DIR: str = "./training_pipeline_output"
diff --git a/course/module-5/Makefile b/course/module-5/Makefile
index 9bab734..1f9a318 100644
--- a/course/module-5/Makefile
+++ b/course/module-5/Makefile
@@ -1,11 +1,16 @@
help:
@grep -E '^[a-zA-Z0-9 -]+:.*#' Makefile | sort | while read -r l; do printf "\033[1;32m$$(echo $$l | cut -f 1 -d':')\033[00m:$$(echo $$l | cut -f 2- -d'#')\n"; done
+install: # Install the project dependencies.
+ @echo "Installing ..."
+
+ PYTHON_KEYRING_BACKEND=keyring.backends.null.Keyring poetry install
+
deploy-llm-microservice: # Deploy the Qwak model.
- qwak models deploy realtime --model-id "llm_twin" --instance "gpu.a10.2xl" --timeout 50000 --replicas 2 --server-workers 2
+ poetry run qwak models deploy realtime --model-id "llm_twin" --instance "gpu.a10.2xl" --timeout 50000 --replicas 2 --server-workers 2
undeploy-llm-microservice: # Deploy the Qwak model.
- qwak models undeploy --model-id "llm_twin"
+ poetry run qwak models undeploy --model-id "llm_twin"
call-inference-pipeline: # Call the inference pipeline.
poetry run python main.py