Skip to content

Commit

Permalink
⬆️ LLM parameters exposed in config.json
Browse files Browse the repository at this point in the history
  • Loading branch information
GiulioRossetti committed Nov 21, 2024
1 parent 9f05b44 commit c0aea94
Show file tree
Hide file tree
Showing 3 changed files with 14 additions and 9 deletions.
13 changes: 8 additions & 5 deletions config_files/config.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,18 @@
"servers": {
"llm": "http://127.0.0.1:11434/v1",
"llm_api_key": "NULL",
"llm_max_tokens": -1,
"llm_temperature": 1.5,
"llm_v": "http://127.0.0.1:11434/v1",
"llm_v_api_key": "NULL",
"llm_v_max_tokens": 300,
"llm_v_temperature": 0.5,
"api": "http://127.0.0.1:5010/"
},
"simulation": {
"name": "simulation",
"client": "YClientWithPages",
"days": 3,
"days": 30,
"slots": 24,
"starting_agents": 180,
"percentage_new_agents_iteration": 0.07,
Expand Down Expand Up @@ -42,12 +46,11 @@
},
"actions_likelihood": {
"post": 0.2,
"image": 0,
"news": 0,
"image": 0.15,
"news": 0.15,
"comment": 0.5,
"read": 0.2,
"share": 0.0,
"reply": 0,
"share": 0.2,
"search": 0.1,
"cast": 0.0
}
Expand Down
4 changes: 2 additions & 2 deletions y_client/classes/annotator.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ def __init__(self, config):
max_consecutive_auto_reply=1,
llm_config={
"config_list": self.config_list,
"temperature": 0.5,
"max_tokens": 300,
"temperature": config['temperature'],
"max_tokens": config['max_tokens'],
},
human_input_mode="NEVER",
)
Expand Down
6 changes: 4 additions & 2 deletions y_client/classes/base_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,8 @@ def __init__(
"url": config["servers"]["llm_v"],
"api_key": config["servers"]["llm_v_api_key"],
"model": config["agents"]["llm_v_agent"],
"temperature": config["servers"]["llm_v_temperature"],
"max_tokens": config["servers"]["llm_v_max_tokens"]
}
self.is_page = is_page

Expand Down Expand Up @@ -157,8 +159,8 @@ def __init__(
self.llm_config = {
"config_list": [config_list],
"seed": np.random.randint(0, 100000),
"max_tokens": -1, # max response length, -1 no limits. Imposing limits may lead to truncated responses
"temperature": 1.5,
"max_tokens": config['servers']['llm_max_tokens'], # max response length, -1 no limits. Imposing limits may lead to truncated responses
"temperature": config['servers']['llm_temperature'],
}

# add and configure the content recsys
Expand Down

0 comments on commit c0aea94

Please sign in to comment.