From 55411cefad9e0dc383121ca402abc1472c7e847a Mon Sep 17 00:00:00 2001 From: Andrew <47720952+andrewhong5297@users.noreply.github.com> Date: Thu, 30 Mar 2023 18:05:33 -0400 Subject: [PATCH 1/5] base logic --- package-lock.json | 20 +++++- package.json | 3 +- src/cli/commands/index/processRepository.ts | 74 +++++++++++++++++++++ src/cli/utils/traverseFileSystem.ts | 1 + src/types.ts | 3 + 5 files changed, 97 insertions(+), 4 deletions(-) diff --git a/package-lock.json b/package-lock.json index ebd487b..f35613e 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@context-labs/autodoc", - "version": "0.0.7", + "version": "0.0.8", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@context-labs/autodoc", - "version": "0.0.7", + "version": "0.0.8", "license": "MIT", "dependencies": { "@dqbd/tiktoken": "^1.0.2", @@ -22,7 +22,8 @@ "marked": "^4.3.0", "marked-terminal": "^5.1.1", "minimatch": "^7.4.3", - "ora": "^6.2.0" + "ora": "^6.2.0", + "ts-md5": "^1.3.1" }, "bin": { "doc": "dist/index.js" @@ -5043,6 +5044,14 @@ "node": ">=8.0" } }, + "node_modules/ts-md5": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/ts-md5/-/ts-md5-1.3.1.tgz", + "integrity": "sha512-DiwiXfwvcTeZ5wCE0z+2A9EseZsztaiZtGrtSaY5JOD7ekPnR/GoIVD5gXZAlK9Na9Kvpo9Waz5rW64WKAWApg==", + "engines": { + "node": ">=12" + } + }, "node_modules/tsconfig-paths": { "version": "3.14.2", "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.14.2.tgz", @@ -8835,6 +8844,11 @@ "is-number": "^7.0.0" } }, + "ts-md5": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/ts-md5/-/ts-md5-1.3.1.tgz", + "integrity": "sha512-DiwiXfwvcTeZ5wCE0z+2A9EseZsztaiZtGrtSaY5JOD7ekPnR/GoIVD5gXZAlK9Na9Kvpo9Waz5rW64WKAWApg==" + }, "tsconfig-paths": { "version": "3.14.2", "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.14.2.tgz", diff --git a/package.json b/package.json index 982ed97..82f67ec 100644 --- a/package.json +++ b/package.json @@ -40,7 +40,8 @@ "marked": "^4.3.0", "marked-terminal": "^5.1.1", "minimatch": "^7.4.3", - "ora": "^6.2.0" + "ora": "^6.2.0", + "ts-md5": "^1.3.1" }, "devDependencies": { "@types/commander": "^2.12.2", diff --git a/src/cli/commands/index/processRepository.ts b/src/cli/commands/index/processRepository.ts index 290a054..a93f9c9 100644 --- a/src/cli/commands/index/processRepository.ts +++ b/src/cli/commands/index/processRepository.ts @@ -1,5 +1,6 @@ import fs from 'node:fs/promises'; import path from 'node:path'; +import { Md5 } from 'ts-md5'; import { OpenAIChat } from 'langchain/llms'; import { encoding_for_model } from '@dqbd/tiktoken'; import { APIRateLimit } from '../../utils/APIRateLimit.js'; @@ -69,6 +70,19 @@ export const processRepository = async ( linkHosted, }): Promise => { const content = await fs.readFile(filePath, 'utf-8'); + + //calculate the hash of the file + const newChecksum = await calculateChecksum(filePath, [content]); + + //if an existing summary.json file exists, it will check the checksums and decide if a reindex is needed + const reindex = await reindexCheck( + path.join(outputRoot, filePath), + newChecksum, + ); + if (!reindex) { + return; + } + const markdownFilePath = path.join(outputRoot, filePath); const url = githubFileUrl(repositoryUrl, inputRoot, filePath, linkHosted); const summaryPrompt = createCodeFileSummary( @@ -140,6 +154,7 @@ export const processRepository = async ( url, summary, questions, + checksum: newChecksum, }; const outputPath = getFileName(markdownFilePath, '.', '.json'); @@ -195,6 +210,16 @@ export const processRepository = async ( const contents = (await fs.readdir(folderPath)).filter( (fileName) => !shouldIgnore(fileName), ); + + //get the checksum of all the files in the folder + const newChecksum = await calculateChecksum(folderPath, contents); + + //if an existing summary.json file exists, it will check the checksums and decide if a reindex is needed + const reindex = await reindexCheck(folderPath, newChecksum); + if (!reindex) { + return; + } + // eslint-disable-next-line prettier/prettier const url = githubFolderUrl(repositoryUrl, inputRoot, folderPath, linkHosted); const allFiles: (FileSummary | null)[] = await Promise.all( @@ -259,6 +284,7 @@ export const processRepository = async ( folders: folders.filter(Boolean), summary, questions: '', + checksum: newChecksum, }; const outputPath = path.join(folderPath, 'summary.json'); @@ -366,3 +392,51 @@ export const processRepository = async ( */ return models; }; + +//reads all the files, and returns a checksum +async function calculateChecksum( + folderPath: string, + contents: string[], +): Promise { + const checksums: string[] = []; + for (const fileName of contents) { + const filePath = `${folderPath}/${fileName}`; + const fileData = await fs.readFile(filePath, 'utf-8'); + const checksum = Md5.hashStr(fileData); + checksums.push(checksum); + } + const concatenatedChecksum = checksums.join(''); + const finalChecksum = Md5.hashStr(concatenatedChecksum); + return finalChecksum; +} + +//checks if a summary.json file exists, and if it does, compares the checksums to see if it needs to be re-indexed or not. +async function reindexCheck( + fileOrFolderPath: string, + newChecksum: string, +): Promise { + let summaryExists = false; + try { + await fs.access(path.join(fileOrFolderPath, 'summary.json')); + summaryExists = true; + } catch (error) {} + + if (summaryExists) { + const fileContents = await fs.readFile( + path.join(fileOrFolderPath, 'summary.json'), + 'utf8', + ); + const fileContentsJSON = JSON.parse(fileContents); + + const oldChecksum = fileContentsJSON.checksum; + + if (oldChecksum === newChecksum) { + console.log(`Skipping ${fileOrFolderPath} because it has not changed`); + return false; + } else { + return true; + } + } + //if no summary then generate one + return true; +} diff --git a/src/cli/utils/traverseFileSystem.ts b/src/cli/utils/traverseFileSystem.ts index f2e15a7..c2afbf8 100644 --- a/src/cli/utils/traverseFileSystem.ts +++ b/src/cli/utils/traverseFileSystem.ts @@ -36,6 +36,7 @@ export const traverseFileSystem = async ( await dfs(folderPath); await processFolder?.({ + inputPath, folderName, folderPath, projectName, diff --git a/src/types.ts b/src/types.ts index eb85968..a462d41 100644 --- a/src/types.ts +++ b/src/types.ts @@ -25,6 +25,7 @@ export type FileSummary = { url: string; summary: string; questions: string; + checksum: string; }; export type ProcessFileParams = { @@ -47,9 +48,11 @@ export type FolderSummary = { folders: FolderSummary[]; summary: string; questions: string; + checksum: string; }; export type ProcessFolderParams = { + inputPath: string; folderName: string; folderPath: string; projectName: string; From 8ec3137af9f66b44708bcebbb7df8806c078624d Mon Sep 17 00:00:00 2001 From: Andrew <47720952+andrewhong5297@users.noreply.github.com> Date: Thu, 30 Mar 2023 18:06:56 -0400 Subject: [PATCH 2/5] should be false --- src/cli/commands/init/index.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cli/commands/init/index.ts b/src/cli/commands/init/index.ts index 084073e..450ea8a 100644 --- a/src/cli/commands/init/index.ts +++ b/src/cli/commands/init/index.ts @@ -47,7 +47,7 @@ export const makeConfigTemplate = ( chatPrompt: '', contentType: 'code', targetAudience: 'smart developer', - linkHosted: true, + linkHosted: false, }; }; From 7cd76b879b3048cdff179d266a2ddf52817323e1 Mon Sep 17 00:00:00 2001 From: Andrew <47720952+andrewhong5297@users.noreply.github.com> Date: Thu, 30 Mar 2023 18:47:18 -0400 Subject: [PATCH 3/5] reindex and run checks --- .autodoc/docs/data/docstore.json | 2 +- .autodoc/docs/data/hnswlib.index | Bin 195160 -> 195092 bytes .../json/src/cli/commands/estimate/index.json | 9 +- .../src/cli/commands/estimate/summary.json | 18 +- .../commands/index/convertJsonToMarkdown.json | 9 +- .../cli/commands/index/createVectorStore.json | 9 +- .../json/src/cli/commands/index/index.json | 9 +- .../cli/commands/index/processRepository.json | 9 +- .../json/src/cli/commands/index/prompts.json | 9 +- .../json/src/cli/commands/index/summary.json | 54 ++-- .../json/src/cli/commands/init/index.json | 9 +- .../json/src/cli/commands/init/summary.json | 18 +- .../cli/commands/query/createChatChain.json | 9 +- .../json/src/cli/commands/query/index.json | 9 +- .../json/src/cli/commands/query/summary.json | 27 +- .../docs/json/src/cli/commands/summary.json | 144 +++++---- .../json/src/cli/commands/user/index.json | 9 +- .../json/src/cli/commands/user/summary.json | 18 +- .autodoc/docs/json/src/cli/spinner.json | 9 +- .autodoc/docs/json/src/cli/summary.json | 220 +++++++------- .../docs/json/src/cli/utils/APIRateLimit.json | 9 +- .../docs/json/src/cli/utils/FileUtil.json | 9 +- .autodoc/docs/json/src/cli/utils/LLMUtil.json | 9 +- .../docs/json/src/cli/utils/WaitUtil.json | 9 +- .autodoc/docs/json/src/cli/utils/summary.json | 58 ++-- .../src/cli/utils/traverseFileSystem.json | 9 +- .autodoc/docs/json/src/const.json | 9 +- .autodoc/docs/json/src/index.json | 9 +- .autodoc/docs/json/src/langchain/hnswlib.json | 9 +- .autodoc/docs/json/src/langchain/summary.json | 18 +- .autodoc/docs/json/src/summary.json | 274 ++++++++++-------- .autodoc/docs/json/src/types.json | 9 +- .autodoc/docs/json/tsconfig.json | 5 +- .../src/cli/commands/estimate/index.md | 36 +-- .../src/cli/commands/estimate/summary.md | 24 +- .../commands/index/convertJsonToMarkdown.md | 70 ++--- .../cli/commands/index/createVectorStore.md | 26 +- .../markdown/src/cli/commands/index/index.md | 63 ++-- .../cli/commands/index/processRepository.md | 48 +-- .../src/cli/commands/index/prompts.md | 36 ++- .../src/cli/commands/index/summary.md | 46 ++- .../markdown/src/cli/commands/init/index.md | 46 +-- .../markdown/src/cli/commands/init/summary.md | 31 +- .../src/cli/commands/query/createChatChain.md | 58 ++-- .../markdown/src/cli/commands/query/index.md | 43 ++- .../src/cli/commands/query/summary.md | 44 +-- .../docs/markdown/src/cli/commands/summary.md | 98 +++++-- .../markdown/src/cli/commands/user/index.md | 43 ++- .../markdown/src/cli/commands/user/summary.md | 45 ++- .autodoc/docs/markdown/src/cli/spinner.md | 63 ++-- .autodoc/docs/markdown/src/cli/summary.md | 51 ++-- .../markdown/src/cli/utils/APIRateLimit.md | 34 +-- .../docs/markdown/src/cli/utils/FileUtil.md | 48 +-- .../docs/markdown/src/cli/utils/LLMUtil.md | 54 ++-- .../docs/markdown/src/cli/utils/WaitUtil.md | 51 ++-- .../docs/markdown/src/cli/utils/summary.md | 65 +++-- .../src/cli/utils/traverseFileSystem.md | 61 ++-- .autodoc/docs/markdown/src/const.md | 32 +- .autodoc/docs/markdown/src/index.md | 31 +- .../docs/markdown/src/langchain/hnswlib.md | 40 +-- .../docs/markdown/src/langchain/summary.md | 28 +- .autodoc/docs/markdown/src/summary.md | 48 +-- .autodoc/docs/markdown/src/types.md | 37 ++- .autodoc/docs/markdown/tsconfig.md | 39 +-- .../commands/index/convertJsonToMarkdown.ts | 4 +- src/cli/commands/index/processRepository.ts | 37 ++- 66 files changed, 1355 insertions(+), 1161 deletions(-) diff --git a/.autodoc/docs/data/docstore.json b/.autodoc/docs/data/docstore.json index 7d5f8cd..dc401f7 100644 --- a/.autodoc/docs/data/docstore.json +++ b/.autodoc/docs/data/docstore.json @@ -1 +1 @@ -[["0",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/commands/estimate/index.ts)\n\nThe `estimate` function in this code file is responsible for providing an estimated cost of indexing a given repository using the AutodocRepoConfig configuration. This function is particularly useful for users who want to get an idea of the cost involved in processing their repository before actually running the process.\n\nThe function takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the repository name, URL, root directory, output directory, and other settings related to the processing of the repository.\n\nThe main steps involved in the function are:\n\n1. Set the output path for the JSON files generated during the process.\n2. Update the spinner text to display \"Estimating cost...\".\n3. Perform a dry run of the `processRepository` function with the given configuration options. The dry run does not actually process the repository but instead returns the details of the models that would be processed.\n4. Stop the spinner once the dry run is complete.\n5. Print the details of the models obtained from the dry run using the `printModelDetails` utility function.\n6. Calculate the total estimated cost using the `totalIndexCostEstimate` utility function.\n7. Display the estimated cost in a user-friendly format using the `chalk` library.\n\nHere's an example of how the `estimate` function might be used in the larger project:\n\n```javascript\nimport { estimate } from './autodoc/estimate';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output/',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\nThis example demonstrates how a user can call the `estimate` function with a specific configuration to get an estimated cost for processing their repository.\n## Questions: \n 1. **What is the purpose of the `estimate` function and what parameters does it accept?**\n\n The `estimate` function is used to estimate the cost of processing a repository for indexing. It accepts an `AutodocRepoConfig` object as a parameter, which contains various configuration options such as repository URL, output path, and other settings.\n\n2. **How does the `estimate` function calculate the cost estimate?**\n\n The `estimate` function performs a dry run of the `processRepository` command to get the estimated price for indexing the repository. It then uses the `totalIndexCostEstimate` function to calculate the total cost based on the returned run details.\n\n3. **What is the purpose of the `printModelDetails` function and how is it used in the `estimate` function?**\n\n The `printModelDetails` function is used to display the details of the models used in the estimation process. In the `estimate` function, it is called with the values of the `runDetails` object to print the model details before displaying the total cost estimate.","metadata":{"source":".autodoc/docs/markdown/src/cli/commands/estimate/index.md"}}],["1",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/estimate)\n\nThe `estimate` function in `index.ts` is a crucial part of the Autodoc project, as it allows users to estimate the cost of indexing a given repository before actually processing it. This function takes an `AutodocRepoConfig` object as input, which contains various configuration options for processing the repository.\n\nThe main steps involved in the `estimate` function are:\n\n1. Setting the output path for the JSON files generated during the process.\n2. Updating the spinner text to display \"Estimating cost...\".\n3. Performing a dry run of the `processRepository` function with the given configuration options. The dry run does not actually process the repository but instead returns the details of the models that would be processed.\n4. Stopping the spinner once the dry run is complete.\n5. Printing the details of the models obtained from the dry run using the `printModelDetails` utility function.\n6. Calculating the total estimated cost using the `totalIndexCostEstimate` utility function.\n7. Displaying the estimated cost in a user-friendly format using the `chalk` library.\n\nHere's an example of how the `estimate` function might be used in the larger project:\n\n```javascript\nimport { estimate } from './autodoc/estimate';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output/',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\nThis example demonstrates how a user can call the `estimate` function with a specific configuration to get an estimated cost for processing their repository. The function is designed to work seamlessly with other parts of the Autodoc project, such as the `processRepository` function, which is responsible for the actual processing of the repository.\n\nBy providing an estimated cost upfront, the `estimate` function helps users make informed decisions about whether to proceed with the indexing process or not. This can be particularly useful for users with large repositories or those who are working within a budget. Overall, the `estimate` function is an essential tool for users looking to leverage the power of Autodoc while managing their costs effectively.","metadata":{"source":".autodoc/docs/markdown/src/cli/commands/estimate/summary.md"}}],["2",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/commands/index/convertJsonToMarkdown.ts)\n\nThe `convertJsonToMarkdown` function in this code is responsible for converting JSON files containing documentation information into Markdown files. This is done in two main steps: counting the number of files in the project and creating Markdown files for each code file in the project.\n\nFirst, the function uses the `traverseFileSystem` utility to count the number of files in the project. It takes an `AutodocRepoConfig` object as input, which contains information about the project, such as its name, root directory, output directory, and other configuration options. The `traverseFileSystem` utility is called with a `processFile` function that increments the `files` counter for each file encountered.\n\n```javascript\nawait traverseFileSystem({\n inputPath: inputRoot,\n projectName,\n processFile: () => {\n files++;\n return Promise.resolve();\n },\n ignore: [],\n filePrompt,\n folderPrompt,\n contentType,\n targetAudience,\n linkHosted,\n});\n```\n\nNext, the function defines another `processFile` function that reads the content of each JSON file, converts it to a Markdown format, and writes the output to a new Markdown file in the specified output directory. It first checks if the content exists, and if not, it returns early. It then creates the output directory if it doesn't exist, and parses the JSON content into either a `FolderSummary` or a `FileSummary` object, depending on the file name.\n\nThe function then constructs the Markdown content by including a link to the code on GitHub, the summary, and any questions if they exist. Finally, it writes the Markdown content to the output file with the `.md` extension.\n\n```javascript\nconst outputPath = getFileName(markdownFilePath, '.', '.md');\nawait fs.writeFile(outputPath, markdown, 'utf-8');\n```\n\nThe `convertJsonToMarkdown` function is then called again with the new `processFile` function to create the Markdown files for each code file in the project.\n\n```javascript\nawait traverseFileSystem({\n inputPath: inputRoot,\n projectName,\n processFile,\n ignore: [],\n filePrompt,\n folderPrompt,\n contentType,\n targetAudience,\n linkHosted,\n});\n```\n\nIn summary, this code is responsible for converting JSON files containing documentation information into Markdown files, which can be used in the larger Autodoc project to generate documentation for code repositories.\n## Questions: \n 1. **What is the purpose of the `convertJsonToMarkdown` function?**\n\n The `convertJsonToMarkdown` function is responsible for converting JSON files containing summaries and questions about code files in a project into Markdown files. It traverses the file system, reads the JSON files, and creates corresponding Markdown files with the provided information.\n\n2. **How does the `traverseFileSystem` function work and what are its parameters?**\n\n The `traverseFileSystem` function is a utility function that recursively traverses the file system starting from a given input path. It takes an object as a parameter with properties such as `inputPath`, `projectName`, `processFile`, `ignore`, `filePrompt`, `folderPrompt`, `contentType`, `targetAudience`, and `linkHosted`. The function processes each file using the provided `processFile` callback and can be configured to ignore certain files or folders.\n\n3. **What is the purpose of the `processFile` function inside `convertJsonToMarkdown`?**\n\n The `processFile` function is a callback function that is passed to the `traverseFileSystem` function. It is responsible for reading the content of a JSON file, parsing it, and creating a corresponding Markdown file with the summary and questions. It also handles creating the output directory if it doesn't exist and writing the Markdown content to the output file.","metadata":{"source":".autodoc/docs/markdown/src/cli/commands/index/convertJsonToMarkdown.md"}}],["3",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/commands/index/createVectorStore.ts)\n\nThe code in this file is responsible for processing a directory of text files, splitting the text into chunks, and creating a vector store using the HNSWLib library and OpenAIEmbeddings.\n\nThe `processFile` function takes a file path as input and returns a Promise that resolves to a Document object. It reads the file contents and creates a Document object with the file contents as `pageContent` and the file path as metadata.\n\nThe `processDirectory` function takes a directory path as input and returns a Promise that resolves to an array of Document objects. It reads the files in the directory and calls `processFile` for each file. If a file is a directory, it calls `processDirectory` recursively. The function accumulates all the Document objects in an array and returns it.\n\nThe `RepoLoader` class extends the `BaseDocumentLoader` class and has a constructor that takes a file path as input. It has a `load` method that calls the `processDirectory` function with the file path and returns the resulting array of Document objects.\n\nThe `createVectorStore` function is an async function that takes an AutodocRepoConfig object as input, which contains the root directory and output file path. It creates a RepoLoader instance with the root directory, loads the raw documents, and splits them into chunks using the `RecursiveCharacterTextSplitter` class. It then creates a vector store using the HNSWLib library and OpenAIEmbeddings, and saves the vector store to the output file path.\n\nExample usage:\n\n```javascript\nconst config = {\n root: './data/documents',\n output: './data/vector_store',\n};\n\ncreateVectorStore(config).then(() => {\n console.log('Vector store created successfully');\n});\n```\n\nThis code snippet would process all the text files in the `./data/documents` directory, split the text into chunks, create a vector store using the HNSWLib library and OpenAIEmbeddings, and save the vector store to the `./data/vector_store` file.\n## Questions: \n 1. **Question:** What is the purpose of the `processFile` function and how does it handle errors?\n **Answer:** The `processFile` function reads the content of a file and creates a `Document` object with the file contents and metadata. If there is an error while reading the file, it rejects the promise with the error.\n\n2. **Question:** How does the `processDirectory` function handle nested directories and files?\n **Answer:** The `processDirectory` function iterates through the files in a directory. If it encounters a subdirectory, it calls itself recursively to process the subdirectory. If it encounters a file, it processes the file using the `processFile` function and adds the resulting `Document` object to the `docs` array.\n\n3. **Question:** What is the purpose of the `createVectorStore` function and how does it use the `RepoLoader` class?\n **Answer:** The `createVectorStore` function is responsible for creating a vector store from a given repository. It uses the `RepoLoader` class to load all the documents from the repository, splits the text into chunks using the `RecursiveCharacterTextSplitter`, and then creates a vector store using the `HNSWLib.fromDocuments` method with the `OpenAIEmbeddings`. Finally, it saves the vector store to the specified output path.","metadata":{"source":".autodoc/docs/markdown/src/cli/commands/index/createVectorStore.md"}}],["4",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/commands/index/index.ts)\n\nThe code in this file is responsible for processing a given repository and generating documentation in JSON and Markdown formats, as well as creating vector files for the documentation. It exports a single function `index` that takes an `AutodocRepoConfig` object as input, which contains various configuration options for processing the repository.\n\nThe `index` function performs the following steps:\n\n1. Define the paths for JSON, Markdown, and data output directories within the `output` folder.\n\n2. Process the repository by traversing its files, calling the LLMS (Language Learning Management System) for each file, and creating JSON files with the results. This is done using the `processRepository` function, which takes the same configuration options as the `index` function. The spinner text is updated to show the progress of this step.\n\n3. Convert the generated JSON files into Markdown format using the `convertJsonToMarkdown` function. This function also takes the same configuration options as the `index` function. The spinner text is updated to show the progress of this step, and a success message is displayed upon completion.\n\n4. Create vector files for the generated Markdown documentation using the `createVectorStore` function. This function also takes the same configuration options as the `index` function. The spinner text is updated to show the progress of this step, and a success message is displayed upon completion.\n\nHere's an example of how this code might be used in the larger project:\n\n```javascript\nimport autodoc from './autodoc';\n\nconst config = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n root: './src',\n output: './output',\n llms: 'https://llms.example.com',\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'text',\n targetAudience: 'developers',\n linkHosted: 'https://myproject-docs.example.com',\n};\n\nautodoc.index(config);\n```\n\nThis example would process the `MyProject` repository, generate JSON and Markdown documentation, and create vector files for the documentation, all while providing progress updates through spinner text.\n## Questions: \n 1. **What is the purpose of the `index` function in this code?**\n\n The `index` function is the main entry point for the autodoc project. It processes a given repository, converts the JSON files to markdown, and creates vector files based on the provided configuration options.\n\n2. **What are the different steps involved in processing the repository?**\n\n The processing of the repository involves three main steps: (1) traversing the repository and calling LLMS for each file to create JSON files with the results, (2) converting the JSON files to markdown files, and (3) creating vector files from the markdown files.\n\n3. **What is the role of the `AutodocRepoConfig` type?**\n\n The `AutodocRepoConfig` type is used to define the shape of the configuration object that is passed to the `index` function. It specifies the properties and their types that are required for the function to process the repository, convert JSON to markdown, and create vector files.","metadata":{"source":".autodoc/docs/markdown/src/cli/commands/index/index.md"}}],["5",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/commands/index/processRepository.ts)\n\nThe `processRepository` function in this code is responsible for processing a given code repository and generating summaries and questions for each file and folder within the repository. It takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the repository URL, input and output paths, language models to use, and other settings.\n\nThe function starts by initializing an `APIRateLimit` instance to limit the number of API calls made to the language models. It then defines several helper functions, such as `callLLM` for making API calls, `isModel` for checking if a given model is valid, `processFile` for processing individual files, and `processFolder` for processing folders.\n\nThe `processFile` function reads the content of a file, generates prompts for summaries and questions using the `createCodeFileSummary` and `createCodeQuestions` functions, and selects the best language model to use based on the token length of the prompts. It then calls the language model API to generate the summaries and questions, and saves the results as JSON files in the output directory.\n\nThe `processFolder` function reads the contents of a folder, filters out ignored files, and processes each file and subfolder within the folder. It then generates a summary prompt using the `folderSummaryPrompt` function and calls the language model API to generate a summary for the folder. The folder summary, along with the summaries and questions of its files and subfolders, is saved as a JSON file in the output directory.\n\nThe main part of the `processRepository` function first counts the number of files and folders in the input directory using the `filesAndFolders` function. It then processes each file and folder using the `traverseFileSystem` function, which calls the `processFile` and `processFolder` functions for each file and folder encountered. Finally, the function returns the language models used during processing.\n\nExample usage of the `processRepository` function:\n\n```javascript\nconst autodocConfig = {\n name: 'myProject',\n repositoryUrl: 'https://github.com/user/myProject',\n root: 'src',\n output: 'output',\n llms: [LLMModels.GPT3, LLMModels.GPT4],\n ignore: ['.git', 'node_modules'],\n filePrompt: 'Explain this code file',\n folderPrompt: 'Summarize this folder',\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nprocessRepository(autodocConfig).then((models) => {\n console.log('Processing complete');\n});\n```\n\nThis code would process the `src` directory of the `myProject` repository, generating summaries and questions for each file and folder, and saving the results in the `output` directory.\n## Questions: \n 1. **Question:** What is the purpose of the `processRepository` function and what are its input parameters?\n **Answer:** The `processRepository` function is responsible for processing a code repository by generating summaries and questions for each file and folder in the project. It takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the project name, repository URL, input and output paths, language models, and other settings. Additionally, it accepts an optional `dryRun` parameter, which, if set to true, will not save the generated summaries and questions to disk.\n\n2. **Question:** How does the code determine the best language model to use for generating summaries and questions?\n **Answer:** The code checks the maximum token length of each available language model (GPT3, GPT4, and GPT432k) and compares it with the token length of the prompts (summary and questions). It selects the first model that can handle the maximum token length and is included in the `llms` array provided in the configuration.\n\n3. **Question:** How does the code handle traversing the file system and processing files and folders?\n **Answer:** The code uses the `traverseFileSystem` utility function to traverse the file system. It takes an object with various configuration options, including the input path, project name, and callbacks for processing files and folders. The `processFile` and `processFolder` functions are passed as callbacks to handle the processing of files and folders, respectively.","metadata":{"source":".autodoc/docs/markdown/src/cli/commands/index/processRepository.md"}}],["6",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/commands/index/prompts.ts)\n\nThe code in this file provides three functions that generate prompts for documentation experts to create summaries and answer questions about code files and folders in a project. These functions are likely used in the larger autodoc project to automate the process of generating documentation for code files and folders.\n\n1. `createCodeFileSummary`: This function takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `filePrompt`. It returns a formatted string prompt for a documentation expert to write a summary of the code file. The prompt includes the file path, project name, content type, and a custom file prompt. For example:\n\n```javascript\ncreateCodeFileSummary('src/example.js', 'autodoc', 'console.log(\"Hello, World!\");', 'JavaScript', 'Write a detailed technical explanation of what this code does.');\n```\n\n2. `createCodeQuestions`: This function takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `targetAudience`. It returns a formatted string prompt for a documentation expert to generate three questions and answers that a target audience might have about the code file. The prompt includes the file path, project name, content type, and target audience. For example:\n\n```javascript\ncreateCodeQuestions('src/example.js', 'autodoc', 'console.log(\"Hello, World!\");', 'JavaScript', 'beginner');\n```\n\n3. `folderSummaryPrompt`: This function takes six parameters: `folderPath`, `projectName`, `files`, `folders`, `contentType`, and `folderPrompt`. It returns a formatted string prompt for a documentation expert to write a summary of the folder and its contents. The prompt includes the folder path, project name, content type, a list of files and their summaries, a list of subfolders and their summaries, and a custom folder prompt. For example:\n\n```javascript\nfolderSummaryPrompt('src/', 'autodoc', [{fileName: 'example.js', summary: 'A simple example file'}], [{folderName: 'utils', summary: 'Utility functions'}], 'JavaScript', 'Write a detailed technical explanation of the folder structure and contents.');\n```\n\nThese functions can be used in the autodoc project to generate prompts for documentation experts, helping to streamline the process of creating documentation for code files and folders.\n## Questions: \n 1. **Question:** What is the purpose of the `createCodeFileSummary` function?\n **Answer:** The `createCodeFileSummary` function generates a string template for a code file summary prompt, which includes the file path, project name, file contents, content type, and a file prompt.\n\n2. **Question:** How does the `createCodeQuestions` function differ from the `createCodeFileSummary` function?\n **Answer:** The `createCodeQuestions` function generates a string template for a code documentation prompt that asks for 3 questions and their answers, while the `createCodeFileSummary` function generates a string template for a code file summary prompt.\n\n3. **Question:** What is the purpose of the `folderSummaryPrompt` function and what parameters does it take?\n **Answer:** The `folderSummaryPrompt` function generates a string template for a folder summary prompt, which includes the folder path, project name, files, folders, content type, and a folder prompt. It takes parameters such as folderPath, projectName, files, folders, contentType, and folderPrompt.","metadata":{"source":".autodoc/docs/markdown/src/cli/commands/index/prompts.md"}}],["7",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/index)\n\nThe code in this folder is responsible for processing a given code repository, generating documentation in JSON and Markdown formats, and creating vector files for the documentation. It provides several functions and utilities to achieve these tasks, such as traversing the file system, calling language models, and converting JSON files to Markdown.\n\nFor example, the `processRepository` function processes a code repository and generates summaries and questions for each file and folder within the repository. It uses helper functions like `callLLM` to make API calls to language models and `processFile` and `processFolder` to process individual files and folders. The results are saved as JSON files in the output directory.\n\nThe `convertJsonToMarkdown` function converts JSON files containing documentation information into Markdown files. It counts the number of files in the project and creates Markdown files for each code file in the project using the `traverseFileSystem` utility.\n\nThe `createVectorStore` function processes a directory of text files, splits the text into chunks, and creates a vector store using the HNSWLib library and OpenAIEmbeddings. It processes the files in the directory and calls `processFile` for each file, creating a vector store and saving it to the output file path.\n\nHere's an example of how this code might be used in the larger project:\n\n```javascript\nimport autodoc from './autodoc';\n\nconst config = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n root: './src',\n output: './output',\n llms: 'https://llms.example.com',\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'text',\n targetAudience: 'developers',\n linkHosted: 'https://myproject-docs.example.com',\n};\n\nautodoc.index(config);\n```\n\nThis example would process the `MyProject` repository, generate JSON and Markdown documentation, and create vector files for the documentation, all while providing progress updates through spinner text.\n\nIn summary, the code in this folder plays a crucial role in the Autodoc project by processing code repositories, generating documentation in various formats, and creating vector files for the documentation. This helps developers to easily generate and maintain documentation for their projects, making it more accessible and understandable for other developers and users.","metadata":{"source":".autodoc/docs/markdown/src/cli/commands/index/summary.md"}}],["8",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/commands/init/index.ts)\n\nThis code is responsible for initializing and configuring the `autodoc` project. It provides a function `init` that creates a configuration file `autodoc.config.json` with user inputs and default values. The configuration file is essential for the project to function correctly and adapt to different user requirements.\n\nThe `makeConfigTemplate` function generates a default configuration object with pre-defined values. It takes an optional `config` parameter to override the default values. The returned object contains settings such as repository name, URL, output directory, LLM models, and various prompts for generating documentation.\n\nThe `init` function is an asynchronous function that takes an optional `config` parameter. It first checks if a configuration file already exists in the project directory. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf there is no existing configuration file or the user chooses to overwrite, the function prompts the user for the repository name, URL, and LLM models they have access to. These values are then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration object is written to the `autodoc.config.json` file in the project directory. A success message is displayed, instructing the user to run `doc index` to get started.\n\nHere's an example of how the `init` function is used:\n\n```javascript\nimport { init } from './autodoc';\n\n(async () => {\n await init();\n})();\n```\n\nThis code imports the `init` function and calls it, initializing the `autodoc` project with the user's inputs and default values.\n## Questions: \n 1. **Question:** What is the purpose of the `makeConfigTemplate` function and what does it return?\n **Answer:** The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc project. It takes an optional `config` parameter of type `AutodocRepoConfig` and returns a new `AutodocRepoConfig` object with default values for each property, using the provided `config` values if available.\n\n2. **Question:** How does the `init` function work and what does it do with the user's input?\n **Answer:** The `init` function is an asynchronous function that initializes the Autodoc configuration by prompting the user for input using the `inquirer` package. It takes an optional `config` parameter of type `AutodocRepoConfig` and uses it as the default values for the prompts. After collecting the user's input, it creates a new configuration object using the `makeConfigTemplate` function and writes it to a file named `autodoc.config.json`.\n\n3. **Question:** What are the different LLM models available in the `llms` prompt and how are they used in the configuration?\n **Answer:** The `llms` prompt provides three choices for the user to select the LLM models they have access to: GPT-3.5 Turbo, GPT-3.5 Turbo and GPT-4 8K (Early Access), and GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The selected LLM models are stored in the `llms` property of the `AutodocRepoConfig` object, which can be used later in the project to determine which models to use for generating documentation.","metadata":{"source":".autodoc/docs/markdown/src/cli/commands/init/index.md"}}],["9",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/init)\n\nThe `index.ts` file in the `init` folder is responsible for initializing and configuring the `autodoc` project. It provides an essential function called `init` that creates a configuration file named `autodoc.config.json` with user inputs and default values. This configuration file is crucial for the project to function correctly and adapt to different user requirements.\n\nThe `makeConfigTemplate` function generates a default configuration object with pre-defined values. It takes an optional `config` parameter to override the default values. The returned object contains settings such as repository name, URL, output directory, LLM models, and various prompts for generating documentation.\n\nThe `init` function is an asynchronous function that takes an optional `config` parameter. It first checks if a configuration file already exists in the project directory. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf there is no existing configuration file or the user chooses to overwrite, the function prompts the user for the repository name, URL, and LLM models they have access to. These values are then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration object is written to the `autodoc.config.json` file in the project directory. A success message is displayed, instructing the user to run `doc index` to get started.\n\nHere's an example of how the `init` function is used:\n\n```javascript\nimport { init } from './autodoc';\n\n(async () => {\n await init();\n})();\n```\n\nThis code imports the `init` function and calls it, initializing the `autodoc` project with the user's inputs and default values. The `init` function is a crucial part of the project, as it sets up the necessary configuration for the project to work correctly. It interacts with other parts of the project by providing the required settings and values, ensuring that the project can adapt to different user requirements and preferences.","metadata":{"source":".autodoc/docs/markdown/src/cli/commands/init/summary.md"}}],["10",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/commands/query/createChatChain.ts)\n\nThis code defines a function `makeChain` that creates a chatbot for answering questions about a software project. The chatbot is built using the `ChatVectorDBQAChain` class, which combines two separate language models: a question generator and a document chain.\n\nThe question generator is an instance of the `LLMChain` class, which uses the OpenAIChat API to generate standalone questions based on a given conversation history. The `CONDENSE_PROMPT` template is used to format the input for the language model.\n\nThe document chain is created using the `loadQAChain` function, which takes an instance of the OpenAIChat API and a prompt template as input. The `makeQAPrompt` function generates this template, which instructs the language model to provide a conversational answer with hyperlinks to the project's GitHub repository. The answer should be tailored to the target audience and include code examples when appropriate.\n\nThe `makeChain` function takes the following parameters:\n\n- `projectName`: The name of the software project.\n- `repositoryUrl`: The URL of the project's GitHub repository.\n- `contentType`: The type of content the chatbot is trained on (e.g., code, documentation).\n- `chatPrompt`: Additional instructions for answering questions about the content.\n- `targetAudience`: The intended audience for the chatbot's answers (e.g., developers, users).\n- `vectorstore`: An instance of the `HNSWLib` class for storing and searching vectors.\n- `llms`: An array of language models (e.g., GPT-3, GPT-4).\n- `onTokenStream`: An optional callback function to handle streaming tokens.\n\nExample usage:\n\n```javascript\nconst chatbot = makeChain(\n \"autodoc\",\n \"https://github.com/autodoc/autodoc\",\n \"code\",\n \"\",\n \"developer\",\n vectorstore,\n [gpt3, gpt4],\n (token) => console.log(token)\n);\n```\n\nThis creates a chatbot that can answer questions about the \"autodoc\" project, using the provided language models and vector store.\n## Questions: \n 1. **Question:** What is the purpose of the `makeChain` function and what are its input parameters?\n **Answer:** The `makeChain` function is used to create a new `ChatVectorDBQAChain` instance, which is responsible for generating questions and answers based on the given input parameters. The input parameters include `projectName`, `repositoryUrl`, `contentType`, `chatPrompt`, `targetAudience`, `vectorstore`, `llms`, and an optional `onTokenStream` callback function.\n\n2. **Question:** What are the roles of `CONDENSE_PROMPT` and `QA_PROMPT` in the code?\n **Answer:** `CONDENSE_PROMPT` is a template for generating a standalone question from a given chat history and follow-up input. `QA_PROMPT` is a template for generating a conversational answer with hyperlinks back to GitHub, based on the given context and question. Both templates are used in the `LLMChain` and `loadQAChain` instances, respectively.\n\n3. **Question:** How does the `onTokenStream` callback function work and when is it used?\n **Answer:** The `onTokenStream` callback function is an optional parameter in the `makeChain` function. It is used to handle the streaming of tokens generated by the OpenAIChat instance. If provided, it will be called with each new token generated during the chat process, allowing developers to handle or process the tokens in real-time.","metadata":{"source":".autodoc/docs/markdown/src/cli/commands/query/createChatChain.md"}}],["11",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/commands/query/index.ts)\n\nThis code defines a chatbot interface for the Autodoc project, which allows users to ask questions related to a specific codebase and receive answers in a conversational manner. The chatbot uses a language model to generate responses based on the user's input and the codebase documentation.\n\nThe code starts by importing necessary libraries and setting up the `marked` library with a custom terminal renderer for displaying Markdown content. It then defines a `chatHistory` array to store the conversation history between the user and the chatbot.\n\nThe `displayWelcomeMessage` function is used to display a welcome message to the user when they start the chatbot. The `clearScreenAndMoveCursorToTop` function clears the terminal screen and moves the cursor to the top.\n\nThe main function, `query`, takes two arguments: `AutodocRepoConfig` and `AutodocUserConfig`. It initializes the `vectorStore` by loading pre-trained embeddings and creates a `chain` object using the `makeChain` function. This chain object is responsible for generating responses based on the user's input.\n\nThe `getQuestion` function uses the `inquirer` library to prompt the user for a question. The main loop of the chatbot starts by getting the user's question and continues until the user types 'exit'. Inside the loop, the code updates the spinner text to 'Thinking...' and calls the `chain` object with the user's question and chat history. The response is then displayed in Markdown format using the `marked` library.\n\nIf an error occurs during the process, the chatbot displays an error message and prompts the user for another question.\n\nExample usage:\n\n```javascript\nquery(repoConfig, userConfig);\n```\n\nThis chatbot interface can be used in the larger Autodoc project to help users navigate and understand the codebase more efficiently by providing a conversational interface for asking questions and receiving answers.\n## Questions: \n 1. **What is the purpose of the `query` function and what are its input parameters?**\n\n The `query` function is used to interact with the chatbot, taking user input and providing responses based on the given codebase. It takes two input parameters: an `AutodocRepoConfig` object containing information about the repository, and an `AutodocUserConfig` object containing user-specific configuration.\n\n2. **How does the `vectorStore` work and what is its role in the code?**\n\n The `vectorStore` is an instance of HNSWLib loaded with data from the specified output directory and using OpenAIEmbeddings. It is used to store and retrieve vector representations of the codebase, which are then used by the `makeChain` function to generate responses to user questions.\n\n3. **How does the chat history work and what is its purpose?**\n\n The `chatHistory` is an array of string pairs, where each pair represents a user question and the corresponding chatbot response. It is used to store the conversation history between the user and the chatbot, allowing the chatbot to provide context-aware responses based on previous interactions.","metadata":{"source":".autodoc/docs/markdown/src/cli/commands/query/index.md"}}],["12",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/query)\n\nThe `query` folder in the Autodoc project contains code for creating a chatbot interface that allows users to ask questions related to a specific codebase and receive answers in a conversational manner. The chatbot uses a language model to generate responses based on the user's input and the codebase documentation.\n\nIn `createChatChain.ts`, the `makeChain` function is defined, which creates a chatbot using the `ChatVectorDBQAChain` class. This class combines two separate language models: a question generator and a document chain. The question generator is an instance of the `LLMChain` class, which uses the OpenAIChat API to generate standalone questions based on a given conversation history. The document chain is created using the `loadQAChain` function, which takes an instance of the OpenAIChat API and a prompt template as input.\n\nExample usage of `makeChain`:\n\n```javascript\nconst chatbot = makeChain(\n \"autodoc\",\n \"https://github.com/autodoc/autodoc\",\n \"code\",\n \"\",\n \"developer\",\n vectorstore,\n [gpt3, gpt4],\n (token) => console.log(token)\n);\n```\n\nIn `index.ts`, the main chatbot interface is defined. It starts by importing necessary libraries and setting up the `marked` library with a custom terminal renderer for displaying Markdown content. The main function, `query`, takes two arguments: `AutodocRepoConfig` and `AutodocUserConfig`. It initializes the `vectorStore` by loading pre-trained embeddings and creates a `chain` object using the `makeChain` function. This chain object is responsible for generating responses based on the user's input.\n\nThe main loop of the chatbot starts by getting the user's question and continues until the user types 'exit'. Inside the loop, the code updates the spinner text to 'Thinking...' and calls the `chain` object with the user's question and chat history. The response is then displayed in Markdown format using the `marked` library.\n\nExample usage of the chatbot interface:\n\n```javascript\nquery(repoConfig, userConfig);\n```\n\nThis chatbot interface can be used in the larger Autodoc project to help users navigate and understand the codebase more efficiently by providing a conversational interface for asking questions and receiving answers.","metadata":{"source":".autodoc/docs/markdown/src/cli/commands/query/summary.md"}}],["13",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands)\n\nThe code in the `src/cli/commands` folder is responsible for handling various command-line tasks in the Autodoc project. It contains several subfolders, each dedicated to a specific command or functionality, such as estimating costs, processing repositories, initializing the project, querying the chatbot, and managing user configurations.\n\nFor instance, the `estimate` subfolder contains a function that allows users to estimate the cost of indexing a given repository before actually processing it. This function takes an `AutodocRepoConfig` object as input and performs a dry run of the `processRepository` function. It then calculates the total estimated cost and displays it to the user. This helps users make informed decisions about whether to proceed with the indexing process or not.\n\n```javascript\nimport { estimate } from './autodoc/estimate';\n\nconst config = {\n // ...configuration options...\n};\n\nestimate(config);\n```\n\nThe `index` subfolder contains code for processing a given code repository, generating documentation in JSON and Markdown formats, and creating vector files for the documentation. It provides several functions and utilities to achieve these tasks, such as traversing the file system, calling language models, and converting JSON files to Markdown.\n\n```javascript\nimport autodoc from './autodoc';\n\nconst config = {\n // ...configuration options...\n};\n\nautodoc.index(config);\n```\n\nThe `init` subfolder is responsible for initializing and configuring the `autodoc` project. It provides an essential function called `init` that creates a configuration file named `autodoc.config.json` with user inputs and default values.\n\n```javascript\nimport { init } from './autodoc';\n\n(async () => {\n await init();\n})();\n```\n\nThe `query` subfolder contains code for creating a chatbot interface that allows users to ask questions related to a specific codebase and receive answers in a conversational manner. The chatbot uses a language model to generate responses based on the user's input and the codebase documentation.\n\n```javascript\nquery(repoConfig, userConfig);\n```\n\nThe `user` subfolder is responsible for managing the user configuration for the Autodoc project. It provides a way to create, update, and save the user configuration file, which stores information about the user's access to different Language Learning Models (LLMs).\n\n```typescript\nasync function user(): Promise {\n // ...\n}\n```\n\nIn summary, the code in the `src/cli/commands` folder plays a crucial role in the Autodoc project by providing various command-line functionalities, such as estimating costs, processing repositories, initializing the project, querying the chatbot, and managing user configurations. These functionalities help developers to easily generate and maintain documentation for their projects, making it more accessible and understandable for other developers and users.","metadata":{"source":".autodoc/docs/markdown/src/cli/commands/summary.md"}}],["14",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/commands/user/index.ts)\n\nThis code is responsible for managing the user configuration for the Autodoc project. It provides a way to create, update, and save the user configuration file, which stores information about the user's access to different Language Learning Models (LLMs) such as GPT-3.5 Turbo, GPT-4 8K, and GPT-4 32K.\n\nThe `makeConfigTemplate` function is used to create a default configuration object with the specified LLMs or default to GPT-3.5 Turbo if none are provided. This function is used to generate the initial configuration object for the user.\n\nThe `user` function is an asynchronous function that handles the user configuration process. It first checks if a user configuration file already exists. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf the user decides to continue or if no configuration file exists, the function proceeds to create the necessary directories for the configuration file. It then prompts the user to select the LLMs they have access to using the `inquirer` library. The user can choose from three options:\n\n1. GPT-3.5 Turbo\n2. GPT-3.5 Turbo, GPT-4 8K (Early Access)\n3. GPT-3.5 Turbo, GPT-4 8K (Early Access), GPT-4 32K (Early Access)\n\nAfter the user makes their selection, the new configuration object is created using the `makeConfigTemplate` function with the selected LLMs. The configuration object is then saved to the user configuration file in JSON format.\n\nFinally, the user is informed that the configuration has been saved and they can start querying by running the `doc q` command.\n## Questions: \n 1. **Question:** What is the purpose of the `makeConfigTemplate` function and what does it return?\n **Answer:** The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc user. It takes an optional `config` parameter of type `AutodocUserConfig` and returns a new configuration object with the `llms` property set to the provided value or a default value of `[LLMModels.GPT3]`.\n\n2. **Question:** How does the `user` function handle existing user configuration files?\n **Answer:** The `user` function checks if a user configuration file already exists at the `userConfigFilePath`. If it does, the function prompts the user with a confirmation message to overwrite the existing configuration. If the user chooses not to overwrite, the process exits; otherwise, the function proceeds to create a new configuration.\n\n3. **Question:** What are the available choices for the LLMs in the `user` function, and how are they used to create the new configuration?\n **Answer:** The available choices for LLMs are GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user can select one of these options, and the corresponding LLM models will be set as the value of the `llms` property in the new configuration object.","metadata":{"source":".autodoc/docs/markdown/src/cli/commands/user/index.md"}}],["15",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/user)\n\nThe `index.ts` file in the `user` folder is responsible for managing the user configuration for the Autodoc project. It provides a way to create, update, and save the user configuration file, which stores information about the user's access to different Language Learning Models (LLMs) such as GPT-3.5 Turbo, GPT-4 8K, and GPT-4 32K.\n\nThe `makeConfigTemplate` function is used to create a default configuration object with the specified LLMs or default to GPT-3.5 Turbo if none are provided. This function is used to generate the initial configuration object for the user.\n\n```typescript\nfunction makeConfigTemplate(llms: string[]): ConfigTemplate {\n // ...\n}\n```\n\nThe `user` function is an asynchronous function that handles the user configuration process. It first checks if a user configuration file already exists. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\n```typescript\nasync function user(): Promise {\n // ...\n}\n```\n\nIf the user decides to continue or if no configuration file exists, the function proceeds to create the necessary directories for the configuration file. It then prompts the user to select the LLMs they have access to using the `inquirer` library. The user can choose from three options:\n\n1. GPT-3.5 Turbo\n2. GPT-3.5 Turbo, GPT-4 8K (Early Access)\n3. GPT-3.5 Turbo, GPT-4 8K (Early Access), GPT-4 32K (Early Access)\n\nAfter the user makes their selection, the new configuration object is created using the `makeConfigTemplate` function with the selected LLMs. The configuration object is then saved to the user configuration file in JSON format.\n\n```typescript\nconst configTemplate = makeConfigTemplate(selectedLLMs);\nawait fs.promises.writeFile(configPath, JSON.stringify(configTemplate, null, 2));\n```\n\nFinally, the user is informed that the configuration has been saved and they can start querying by running the `doc q` command.\n\nThis code is essential for setting up the user's environment and preferences for the Autodoc project. It ensures that the user has the correct configuration file in place, which is necessary for the proper functioning of the project. The user configuration file is used by other parts of the project to determine which LLMs the user has access to and can query.\n\nFor example, when a user runs the `doc q` command, the project will read the user configuration file to determine which LLMs are available for querying. This ensures that the user only queries the LLMs they have access to, preventing any unauthorized access or usage.\n\nIn summary, the `index.ts` file in the `user` folder is responsible for managing the user configuration for the Autodoc project, ensuring that the user has the correct configuration file in place, and allowing the user to select the LLMs they have access to. This is essential for the proper functioning of the project and for maintaining the user's preferences and access to different LLMs.","metadata":{"source":".autodoc/docs/markdown/src/cli/commands/user/summary.md"}}],["16",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/spinner.ts)\n\nThis code provides a utility for managing a command-line spinner using the `ora` library. The spinner is a visual indicator that displays a series of characters in a loop, giving the user feedback that a process is running in the background. The code exports several functions to control the spinner's behavior, such as updating the text, stopping the spinner, and displaying success, error, or informational messages.\n\nThe `spinner` object is created as a singleton to ensure that there is only one instance of the spinner at any given time. This prevents multiple spinners from being displayed simultaneously, which could cause confusion for the user. The spinner is configured to use the 'dots' style.\n\nThe `updateSpinnerText` function is used to update the spinner's text. If the spinner is already spinning, it updates the text directly; otherwise, it starts the spinner with the given message. For example:\n\n```javascript\nupdateSpinnerText('Loading data...');\n```\n\nThe `stopSpinner` function stops the spinner if it is currently spinning:\n\n```javascript\nstopSpinner();\n```\n\nThe `spinnerError`, `spinnerSuccess`, and `spinnerInfo` functions are used to display error, success, and informational messages, respectively. These functions first check if the spinner is spinning and then call the appropriate `ora` method to display the message with the corresponding status symbol (e.g., a red cross for errors, a green checkmark for success, etc.):\n\n```javascript\nspinnerError('An error occurred');\nspinnerSuccess('Operation completed successfully');\nspinnerInfo('Please wait...');\n```\n\nIn the larger project, this utility can be used to provide a consistent and user-friendly interface for displaying progress and status messages during long-running tasks or processes.\n## Questions: \n 1. **What is the purpose of the `ora` package in this code?**\n\n The `ora` package is used to create a spinner in the terminal, providing a visual indication of a running process. In this code, it is used to create a singleton spinner with the 'dots' style.\n\n2. **What are the different states of the spinner and how are they updated?**\n\n The spinner can have different states such as spinning, stopped, failed, succeeded, and displaying information. The functions `updateSpinnerText`, `stopSpinner`, `spinnerError`, `spinnerSuccess`, and `spinnerInfo` are used to update the spinner's state and text accordingly.\n\n3. **How does the `updateSpinnerText` function work and when should it be used?**\n\n The `updateSpinnerText` function updates the spinner's text with the provided message. If the spinner is already spinning, it updates the text directly; otherwise, it starts the spinner with the new message. This function should be used when you want to change the spinner's text while it is spinning or start it with a new message.","metadata":{"source":".autodoc/docs/markdown/src/cli/spinner.md"}}],["17",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli)\n\nThe `spinner.ts` file in the `.autodoc/docs/json/src/cli` folder provides a utility for managing a command-line spinner using the `ora` library. The spinner is a visual indicator that displays a series of characters in a loop, giving the user feedback that a process is running in the background. The code exports several functions to control the spinner's behavior, such as updating the text, stopping the spinner, and displaying success, error, or informational messages.\n\nThe `spinner` object is created as a singleton to ensure that there is only one instance of the spinner at any given time. This prevents multiple spinners from being displayed simultaneously, which could cause confusion for the user. The spinner is configured to use the 'dots' style.\n\nThe `updateSpinnerText` function is used to update the spinner's text. If the spinner is already spinning, it updates the text directly; otherwise, it starts the spinner with the given message. For example:\n\n```javascript\nupdateSpinnerText('Loading data...');\n```\n\nThe `stopSpinner` function stops the spinner if it is currently spinning:\n\n```javascript\nstopSpinner();\n```\n\nThe `spinnerError`, `spinnerSuccess`, and `spinnerInfo` functions are used to display error, success, and informational messages, respectively. These functions first check if the spinner is spinning and then call the appropriate `ora` method to display the message with the corresponding status symbol (e.g., a red cross for errors, a green checkmark for success, etc.):\n\n```javascript\nspinnerError('An error occurred');\nspinnerSuccess('Operation completed successfully');\nspinnerInfo('Please wait...');\n```\n\nIn the larger project, this utility can be used to provide a consistent and user-friendly interface for displaying progress and status messages during long-running tasks or processes.","metadata":{"source":".autodoc/docs/markdown/src/cli/summary.md"}}],["18",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/utils/APIRateLimit.ts)\n\nThe `APIRateLimit` class in this code snippet is designed to manage and limit the number of concurrent API calls made by the application. This is useful in situations where the API being called has a rate limit or when the application needs to control the number of simultaneous requests to avoid overloading the server.\n\nThe class has a constructor that takes an optional `maxConcurrentCalls` parameter, which defaults to 50. This parameter determines the maximum number of API calls that can be made concurrently.\n\nThe main method of this class is `callApi(apiFunction: () => Promise): Promise`. This method takes a function `apiFunction` that returns a promise and wraps it in a rate-limited execution. The method returns a promise that resolves with the result of the API call or rejects with an error if the call fails.\n\nWhen `callApi` is called, it adds the `executeCall` function to the `queue`. The `executeCall` function is responsible for executing the API call, resolving or rejecting the promise, and managing the `inProgress` counter. After adding the `executeCall` function to the queue, the code checks if there are available slots for concurrent calls by comparing `inProgress` with `maxConcurrentCalls`. If there are available slots, it calls the `dequeueAndExecute` method.\n\nThe `dequeueAndExecute` method is responsible for executing the queued API calls while ensuring that the number of concurrent calls does not exceed the `maxConcurrentCalls` limit. It dequeues the next API call from the queue and executes it if there are available slots for concurrent calls.\n\nHere's an example of how this class can be used in the larger project:\n\n```javascript\nconst apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls\n\nasync function fetchData(id) {\n // Simulate an API call\n return new Promise((resolve) => setTimeout(() => resolve(`Data for ${id}`), 1000));\n}\n\nasync function getData(id) {\n return apiRateLimiter.callApi(() => fetchData(id));\n}\n\n// Usage\ngetData(1).then(console.log); // Fetches data for ID 1, rate-limited\n```\n\nIn this example, the `APIRateLimit` class is used to limit the number of concurrent calls to the `fetchData` function, which simulates an API call.\n## Questions: \n 1. **What is the purpose of the `APIRateLimit` class?**\n\n The `APIRateLimit` class is designed to manage and limit the number of concurrent API calls to a specified maximum, preventing the application from overwhelming the API with too many requests at once.\n\n2. **How does the `callApi` method work and what is its return type?**\n\n The `callApi` method takes an `apiFunction` as an argument, which is a function that returns a Promise. It adds the API call to a queue and manages the execution of queued calls based on the available slots for concurrent calls. The method returns a Promise of type `T`, where `T` is the expected return type of the `apiFunction`.\n\n3. **How does the `dequeueAndExecute` method work?**\n\n The `dequeueAndExecute` method is responsible for executing the queued API calls. It checks if there are any calls in the queue and if there are available slots for concurrent calls. If both conditions are met, it dequeues the next call from the queue and executes it. This method is called whenever a new API call is added to the queue or when an in-progress call is completed.","metadata":{"source":".autodoc/docs/markdown/src/cli/utils/APIRateLimit.md"}}],["19",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/utils/FileUtil.ts)\n\nThis code provides utility functions for handling file and folder paths in the autodoc project. The main purpose of these functions is to generate file names and GitHub URLs for files and folders.\n\n1. `getFileName(input: string, delimiter = '.', extension = '.md'): string`: This function takes an input string, an optional delimiter (default is '.'), and an optional extension (default is '.md'). It returns a new file name with the given extension. If the delimiter is not found in the input string, the function appends the extension to the input string. If the delimiter is found, the function replaces the part after the last delimiter with the extension. For example:\n\n ```javascript\n getFileName(\"example.txt\"); // returns \"example.md\"\n getFileName(\"example\"); // returns \"example.md\"\n ```\n\n2. `githubFileUrl(githubRoot: string, inputRoot: string, filePath: string, linkHosted: boolean): string`: This function generates a GitHub URL for a file. It takes the GitHub root URL, the input root path, the file path, and a boolean flag `linkHosted`. If `linkHosted` is true, the function returns a URL pointing to the hosted version of the file. If `linkHosted` is false, the function returns a URL pointing to the file in the GitHub repository. For example:\n\n ```javascript\n githubFileUrl(\"https://github.com/user/repo\", \"/input\", \"/input/example.md\", true); // returns \"https://github.com/user/repo/example.md\"\n githubFileUrl(\"https://github.com/user/repo\", \"/input\", \"/input/example.md\", false); // returns \"https://github.com/user/repo/blob/master/example.md\"\n ```\n\n3. `githubFolderUrl(githubRoot: string, inputRoot: string, folderPath: string, linkHosted: boolean): string`: This function is similar to `githubFileUrl`, but it generates a GitHub URL for a folder instead of a file. If `linkHosted` is true, the function returns a URL pointing to the hosted version of the folder. If `linkHosted` is false, the function returns a URL pointing to the folder in the GitHub repository. For example:\n\n ```javascript\n githubFolderUrl(\"https://github.com/user/repo\", \"/input\", \"/input/folder\", true); // returns \"https://github.com/user/repo/folder\"\n githubFolderUrl(\"https://github.com/user/repo\", \"/input\", \"/input/folder\", false); // returns \"https://github.com/user/repo/tree/master/folder\"\n ```\n\nThese utility functions can be used in the autodoc project to generate file names and URLs for documentation files and folders, making it easier to manage and navigate the documentation structure.\n## Questions: \n 1. **What does the `getFileName` function do?**\n\n The `getFileName` function takes an input string, an optional delimiter (default is '.'), and an optional extension (default is '.md'). It returns the input string with the specified extension, replacing the part after the last occurrence of the delimiter if it exists.\n\n2. **What is the purpose of the `githubFileUrl` and `githubFolderUrl` functions?**\n\n Both `githubFileUrl` and `githubFolderUrl` functions are used to generate URLs for files and folders, respectively, in a GitHub repository. They take a `githubRoot`, `inputRoot`, a `filePath` or `folderPath`, and a `linkHosted` boolean flag. If `linkHosted` is true, the generated URL will point to the hosted version of the file or folder; otherwise, it will point to the file or folder in the GitHub repository.\n\n3. **Why is the `inputRoot.length - 1` used in the `substring` method for both `githubFileUrl` and `githubFolderUrl` functions?**\n\n The `inputRoot.length - 1` is used to remove the `inputRoot` part from the `filePath` or `folderPath` when generating the final URL. This ensures that the generated URL only contains the relevant path relative to the GitHub repository root.","metadata":{"source":".autodoc/docs/markdown/src/cli/utils/FileUtil.md"}}],["20",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/utils/LLMUtil.ts)\n\nThis code defines and manages different language models (LLMs) and their associated costs for a project. It imports the `OpenAIChat` class from the `langchain/llms` module and the `LLMModelDetails` and `LLMModels` types from the `../../types.js` file.\n\nThe `models` object contains three LLMs: GPT3, GPT4, and GPT432k. Each model has a set of properties, such as `name`, `inputCostPer1KTokens`, `outputCostPer1KTokens`, `maxLength`, and an instance of `OpenAIChat` with specific configurations. The `inputTokens`, `outputTokens`, `succeeded`, `failed`, and `total` properties are initialized to 0.\n\n```javascript\n{\n name: LLMModels.GPT3,\n inputCostPer1KTokens: 0.002,\n outputCostPer1KTokens: 0.002,\n maxLength: 3050,\n llm: new OpenAIChat({ ... }),\n inputTokens: 0,\n outputTokens: 0,\n succeeded: 0,\n failed: 0,\n total: 0,\n}\n```\n\nThe `printModelDetails` function takes an array of `LLMModelDetails` and prints a summary table to the console. It calculates the total cost for each model based on the number of input and output tokens and their respective costs per 1,000 tokens. It also calculates the total file count, succeeded, failed, tokens, and cost across all models.\n\nThe `totalIndexCostEstimate` function calculates the total cost for all models in the input array. It uses the same cost calculation as in `printModelDetails` but returns the total cost as a number.\n\nThese functions can be used in the larger project to manage and analyze the usage and costs of different language models. For example, the `printModelDetails` function can provide a summary of the project's LLM usage, while the `totalIndexCostEstimate` function can help estimate the overall cost of using these models.\n## Questions: \n 1. **Question**: What is the purpose of the `models` object and what are the different models available?\n **Answer**: The `models` object is a record that maps the available LLMModels (GPT3, GPT4, and GPT432k) to their respective details, such as name, input and output costs, maxLength, and an instance of OpenAIChat with the corresponding model.\n\n2. **Question**: How does the `printModelDetails` function work and what information does it display?\n **Answer**: The `printModelDetails` function takes an array of LLMModelDetails and generates an output object containing the model name, file count, succeeded, failed, tokens, and cost. It then calculates the totals for each property and displays the information in a console table.\n\n3. **Question**: What is the purpose of the `totalIndexCostEstimate` function and how does it calculate the total cost?\n **Answer**: The `totalIndexCostEstimate` function calculates the total cost of indexing the given models by iterating through the models array and summing up the input and output costs per 1K tokens for each model.","metadata":{"source":".autodoc/docs/markdown/src/cli/utils/LLMUtil.md"}}],["21",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/utils/WaitUtil.ts)\n\nThe code in this file provides two utility functions, `wait` and `forTrue`, which are designed to help manage asynchronous operations in the larger project. Both functions return a `Promise`, which is a JavaScript object that represents the eventual completion (or failure) of an asynchronous operation and its resulting value.\n\n### wait function\n\nThe `wait` function takes two arguments: `timeoutMs`, which is the number of milliseconds to wait before resolving the promise, and `value`, which is an optional value to be returned when the promise resolves. The function creates a new `Promise` and uses `setTimeout` to resolve it with the given `value` after the specified `timeoutMs` has passed.\n\nExample usage:\n\n```javascript\n// Wait for 2 seconds and then log \"Hello, world!\"\nwait(2000, \"Hello, world!\").then(console.log);\n```\n\n### forTrue function\n\nThe `forTrue` function takes a single argument, `fn`, which is a function that returns a boolean value. The purpose of this function is to repeatedly check if the given function `fn` returns `true`. If it does, the promise resolves with `true`. If the function does not return `true` after 200 checks, the promise is rejected.\n\nThe function uses `setInterval` to repeatedly call the given function `fn` every 50 milliseconds. If `fn` returns `true`, the interval is cleared, and the promise is resolved. If the function has been called 200 times without returning `true`, the promise is rejected.\n\nExample usage:\n\n```javascript\n// Check if a certain element is visible on the page\nconst isElementVisible = () => document.querySelector(\"#my-element\").offsetParent !== null;\n\n// Wait for the element to become visible, then log \"Element is visible!\"\nforTrue(isElementVisible).then(() => console.log(\"Element is visible!\"));\n```\n\nIn summary, these utility functions help manage asynchronous operations by providing a way to wait for a certain amount of time or for a specific condition to be met. They can be used in various parts of the larger project to handle timing and conditional logic in an asynchronous manner.\n## Questions: \n 1. **What is the purpose of the `wait` function?**\n\n The `wait` function is an asynchronous utility function that resolves a promise after a specified timeout in milliseconds. It can be used to introduce a delay in the execution of asynchronous code.\n\n2. **How does the `forTrue` function work and what is its use case?**\n\n The `forTrue` function takes a function `fn` as an argument, which returns a boolean value. It repeatedly checks the result of `fn` every 50 milliseconds until it returns `true` or the maximum number of checks (200) is reached. This function can be used to wait for a specific condition to be met before proceeding with the execution of asynchronous code.\n\n3. **Is there any error handling or customization for the `forTrue` function, such as customizing the interval or maximum number of checks?**\n\n Currently, there is no error handling or customization options for the `forTrue` function. The interval is hardcoded to 50 milliseconds, and the maximum number of checks is hardcoded to 200. To add customization, additional parameters could be added to the function signature and used in the implementation.","metadata":{"source":".autodoc/docs/markdown/src/cli/utils/WaitUtil.md"}}],["22",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/utils)\n\nThe code in the `.autodoc/docs/json/src/cli/utils` folder provides utility functions and classes that help manage various aspects of the autodoc project, such as rate-limiting API calls, handling file and folder paths, managing language models, and traversing file systems.\n\n`APIRateLimit.ts` contains the `APIRateLimit` class, which is designed to manage and limit the number of concurrent API calls made by the application. This is useful when the API being called has a rate limit or when the application needs to control the number of simultaneous requests to avoid overloading the server. For example:\n\n```javascript\nconst apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls\nasync function getData(id) {\n return apiRateLimiter.callApi(() => fetchData(id));\n}\ngetData(1).then(console.log); // Fetches data for ID 1, rate-limited\n```\n\n`FileUtil.ts` provides utility functions for handling file and folder paths, such as generating file names and GitHub URLs for files and folders. These functions can be used to manage and navigate the documentation structure. For example:\n\n```javascript\ngetFileName(\"example.txt\"); // returns \"example.md\"\ngithubFileUrl(\"https://github.com/user/repo\", \"/input\", \"/input/example.md\", true); // returns \"https://github.com/user/repo/example.md\"\n```\n\n`LLMUtil.ts` defines and manages different language models (LLMs) and their associated costs for a project. It provides functions like `printModelDetails` and `totalIndexCostEstimate` to manage and analyze the usage and costs of different language models. For example, the `printModelDetails` function can provide a summary of the project's LLM usage, while the `totalIndexCostEstimate` function can help estimate the overall cost of using these models.\n\n`WaitUtil.ts` provides two utility functions, `wait` and `forTrue`, which help manage asynchronous operations in the larger project. They can be used in various parts of the project to handle timing and conditional logic in an asynchronous manner. For example:\n\n```javascript\nwait(2000, \"Hello, world!\").then(console.log); // Waits for 2 seconds and then logs \"Hello, world!\"\nforTrue(isElementVisible).then(() => console.log(\"Element is visible!\")); // Waits for an element to become visible, then logs \"Element is visible!\"\n```\n\n`traverseFileSystem.ts` contains the `traverseFileSystem` function, which recursively traverses a given file system, processes folders and files, and filters out ignored files based on provided patterns. It is designed to be used for processing and generating documentation for a given project. For example:\n\n```javascript\nconst params = {\n inputPath: './myProject',\n projectName: 'My Project',\n ignore: ['node_modules/**', '.git/**'],\n processFile: async (fileInfo) => {\n // Process the file, e.g., generate documentation\n },\n processFolder: async (folderInfo) => {\n // Process the folder, e.g., create a folder in the output directory\n },\n};\ntraverseFileSystem(params);\n```\n\nIn summary, the code in this folder provides various utility functions and classes that help manage different aspects of the autodoc project, making it easier to handle tasks such as rate-limiting, file and folder management, language model management, asynchronous operations, and file system traversal.","metadata":{"source":".autodoc/docs/markdown/src/cli/utils/summary.md"}}],["23",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/utils/traverseFileSystem.ts)\n\nThe `traverseFileSystem` function in this code is an asynchronous function that recursively traverses a given file system, processes folders and files, and filters out ignored files based on provided patterns. It is designed to be used in the larger project for processing and generating documentation for a given project.\n\nThe function takes an object of type `TraverseFileSystemParams` as its input, which contains the following properties:\n\n- `inputPath`: The root folder path to start traversing.\n- `projectName`: The name of the project being documented.\n- `processFile`: An optional callback function to process files.\n- `processFolder`: An optional callback function to process folders.\n- `ignore`: An array of patterns to ignore files and folders.\n- `filePrompt`: An optional prompt for processing files.\n- `folderPrompt`: An optional prompt for processing folders.\n- `contentType`: The type of content being processed.\n- `targetAudience`: The target audience for the documentation.\n- `linkHosted`: A flag indicating if the documentation should be linked to a hosted version.\n\nThe function first checks if the provided `inputPath` exists. If not, it logs an error message and returns. It then defines a helper function `shouldIgnore` that checks if a given file or folder should be ignored based on the `ignore` patterns.\n\nThe main logic of the function is implemented in the `dfs` (depth-first search) function, which recursively traverses the file system. It reads the contents of the current folder, filters out ignored files and folders, and processes them accordingly. If an entry is a directory, it calls `dfs` recursively and then calls the `processFolder` callback if provided. If an entry is a file and is a text file, it calls the `processFile` callback if provided.\n\nHere's an example of how this function might be used in the larger project:\n\n```javascript\nimport { traverseFileSystem } from './autodoc';\n\nconst params = {\n inputPath: './myProject',\n projectName: 'My Project',\n ignore: ['node_modules/**', '.git/**'],\n processFile: async (fileInfo) => {\n // Process the file, e.g., generate documentation\n },\n processFolder: async (folderInfo) => {\n // Process the folder, e.g., create a folder in the output directory\n },\n};\n\ntraverseFileSystem(params);\n```\n\nThis example would traverse the `myProject` folder, ignoring any files and folders within `node_modules` and `.git`, and process the remaining files and folders using the provided callback functions.\n## Questions: \n 1. **What is the purpose of the `traverseFileSystem` function?**\n\n The `traverseFileSystem` function is an asynchronous function that traverses a given file system, processes files and folders based on the provided parameters, and ignores files and folders that match the specified ignore patterns.\n\n2. **How does the `shouldIgnore` function work?**\n\n The `shouldIgnore` function takes a file or folder name as input and returns a boolean value indicating whether the file or folder should be ignored based on the provided ignore patterns. It uses the `minimatch` library to check if the file or folder name matches any of the ignore patterns.\n\n3. **What is the role of the `dfs` function inside `traverseFileSystem`?**\n\n The `dfs` function is an asynchronous function that performs a depth-first search on the file system starting from the given `currentPath`. It processes folders and files based on the provided parameters and recursively calls itself for each subdirectory.","metadata":{"source":".autodoc/docs/markdown/src/cli/utils/traverseFileSystem.md"}}],["24",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src/const.ts)\n\nThe code in this file is responsible for managing the user configuration file for the Autodoc project. It imports two Node.js built-in modules, `path` and `os`, which are used to handle file paths and operating system-related utility functions, respectively.\n\nThe `userConfigFileName` constant is defined as `'autodoc.user.json'`. This constant represents the name of the user configuration file that will be used by the Autodoc project.\n\nThe `userConfigFilePath` constant is created using the `path.resolve()` function, which resolves a sequence of paths into an absolute path. It takes three arguments:\n\n1. `os.homedir()`: This function returns the current user's home directory. It ensures that the user configuration file is stored in the user's home directory, making it user-specific.\n2. `'./.config/autodoc/'`: This string specifies the subdirectory within the user's home directory where the configuration file will be stored. The `.config` directory is a common location for storing configuration files on Unix-based systems, and the `autodoc` subdirectory is used to keep the Autodoc configuration files organized.\n3. `userConfigFileName`: This constant is used as the file name for the user configuration file.\n\nThe `userConfigFilePath` constant will store the absolute path to the user configuration file, which can be used by other parts of the Autodoc project to read or write user-specific settings.\n\nIn summary, this code is responsible for defining the location and name of the user configuration file for the Autodoc project. It ensures that the configuration file is stored in a user-specific directory and follows a standard naming convention. This allows the Autodoc project to easily manage user-specific settings and preferences.\n## Questions: \n 1. **What is the purpose of the `userConfigFileName` and `userConfigFilePath` constants?**\n\n The `userConfigFileName` constant defines the name of the user configuration file for the autodoc project, while the `userConfigFilePath` constant defines the absolute path to this file, which is located in the user's home directory under the `.config/autodoc/` folder.\n\n2. **Why are the `node:path` and `node:os` modules imported?**\n\n The `node:path` module is imported to provide utilities for working with file and directory paths, such as the `path.resolve()` function used to construct the `userConfigFilePath`. The `node:os` module is imported to provide operating system-related utility methods, such as `os.homedir()` which returns the current user's home directory.\n\n3. **Is this code compatible with different operating systems?**\n\n Yes, this code is compatible with different operating systems. The `os.homedir()` function from the `node:os` module returns the correct home directory path for the current user, regardless of the operating system. Additionally, the `path.resolve()` function from the `node:path` module handles path separators and other OS-specific details, ensuring the correct file path is generated.","metadata":{"source":".autodoc/docs/markdown/src/const.md"}}],["25",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src/index.ts)\n\nThe code is a CLI (Command Line Interface) tool for the Autodoc project, which helps in generating documentation for a codebase. It uses the `commander` package to define and manage commands, and `inquirer` for interactive prompts. The main commands supported are `init`, `estimate`, `index`, `user`, and `q`.\n\n1. `init`: Initializes the repository by creating an `autodoc.config.json` file in the current directory. If the file already exists, it uses the existing configuration.\n ```bash\n autodoc init\n ```\n\n2. `estimate`: Estimates the cost of running the `index` command on the repository. It requires the `autodoc.config.json` file to be present.\n ```bash\n autodoc estimate\n ```\n\n3. `index`: Traverses the codebase, writes documentation using LLM (Language Model), and creates a locally stored index. It prompts the user to confirm before starting the indexing process.\n ```bash\n autodoc index\n ```\n\n4. `user`: Sets the Autodoc user configuration. If a user configuration file exists, it uses the existing configuration; otherwise, it creates a new one.\n ```bash\n autodoc user\n ```\n\n5. `q`: Queries an Autodoc index. It requires both `autodoc.config.json` and user configuration files to be present.\n ```bash\n autodoc q\n ```\n\nThe code also handles unhandled promise rejections by logging the error stack, showing an error spinner, stopping the spinner, and exiting with an error code.\n\nOverall, this CLI tool simplifies the process of generating documentation for a codebase by providing an easy-to-use interface for managing configurations and running the Autodoc project's core functionalities.\n## Questions: \n 1. **Question:** What is the purpose of the `autodoc.config.json` file and how is it used in the code?\n **Answer:** The `autodoc.config.json` file is used to store the configuration for the Autodoc repository. It is read and parsed in various commands like `init`, `estimate`, `index`, and `q` to provide the necessary configuration for each command's execution.\n\n2. **Question:** How does the `estimate` command work and what does it do?\n **Answer:** The `estimate` command reads the `autodoc.config.json` file, parses it into a configuration object, and then calls the `estimate` function with the configuration. The purpose of this command is to estimate the cost of running the `index` command on the repository.\n\n3. **Question:** What is the purpose of the `user` command and how does it handle user configuration?\n **Answer:** The `user` command is used to set the Autodoc user configuration. It reads the user configuration file specified by `userConfigFilePath`, parses it into a configuration object, and then calls the `user` function with the configuration. If the configuration file is not found, it calls the `user` function without any configuration, allowing the user to set up their configuration.","metadata":{"source":".autodoc/docs/markdown/src/index.md"}}],["26",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src/langchain/hnswlib.ts)\n\nThe `HNSWLib` class in this code is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm from the `hnswlib-node` library. It extends the `SaveableVectorStore` class and provides methods for adding documents, searching for similar documents, and saving/loading the index.\n\nThe constructor takes an `Embeddings` object and an `HNSWLibArgs` object as arguments. The `Embeddings` object is used to convert text documents into numerical vectors, while the `HNSWLibArgs` object contains configuration options for the HNSW index and an optional `InMemoryDocstore` object for storing document metadata.\n\nThe `addDocuments` method takes an array of `Document` objects, converts their text content into numerical vectors using the `Embeddings` object, and adds the vectors to the HNSW index. The `addVectors` method is responsible for initializing the index, resizing it if necessary, and adding the vectors and their corresponding metadata to the `InMemoryDocstore`.\n\nThe `similaritySearchVectorWithScore` method takes a query vector and a number `k`, and returns the top `k` most similar documents in the index along with their similarity scores. It checks if the query vector has the correct dimensions and if `k` is within the valid range before performing the search.\n\nThe `save` and `load` methods allow the HNSW index and its associated metadata to be saved to and loaded from a specified directory. The `fromTexts` and `fromDocuments` static methods provide convenient ways to create an `HNSWLib` instance from an array of text strings or `Document` objects, respectively.\n\nExample usage:\n\n```javascript\nconst embeddings = new Embeddings(/* ... */);\nconst hnswLib = await HNSWLib.fromTexts(texts, metadatas, embeddings);\n\nconst queryVector = await embeddings.embedText(\"example query\");\nconst similarDocuments = await hnswLib.similaritySearchVectorWithScore(queryVector, 5);\n```\n\nIn the larger project, this class can be used to efficiently store and search for similar documents based on their embeddings, which can be useful for tasks such as document clustering, nearest neighbor search, and recommendation systems.\n## Questions: \n 1. **Question:** What is the purpose of the `HNSWLib` class and how does it relate to the `SaveableVectorStore` class?\n **Answer:** The `HNSWLib` class is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm from the `hnswlib-node` library. It extends the `SaveableVectorStore` class, which provides a base class for vector stores that can be saved and loaded from disk.\n\n2. **Question:** How does the `addDocuments` method work and what is its purpose?\n **Answer:** The `addDocuments` method takes an array of `Document` objects, extracts their `pageContent`, and embeds them into vectors using the `embedDocuments` method from the `embeddings` object. It then adds these vectors and the corresponding documents to the HNSW index and the `docstore` respectively.\n\n3. **Question:** How does the `similaritySearchVectorWithScore` method work and what does it return?\n **Answer:** The `similaritySearchVectorWithScore` method takes a query vector and a number `k` as input. It checks if the query vector has the same length as the number of dimensions and if `k` is not greater than the number of elements in the index. It then performs a k-nearest neighbors search on the HNSW index using the query vector and returns an array of `[Document, number]` tuples, where each tuple contains a document from the `docstore` and its corresponding distance score to the query vector.","metadata":{"source":".autodoc/docs/markdown/src/langchain/hnswlib.md"}}],["27",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc/docs/json/src/langchain)\n\nThe `hnswlib.ts` file in the `.autodoc/docs/json/src/langchain` folder contains the `HNSWLib` class, which is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm from the `hnswlib-node` library. This class is designed to efficiently store and search for similar documents based on their embeddings, making it useful for tasks such as document clustering, nearest neighbor search, and recommendation systems.\n\nThe `HNSWLib` class extends the `SaveableVectorStore` class and provides methods for adding documents, searching for similar documents, and saving/loading the index. It takes an `Embeddings` object and an `HNSWLibArgs` object as arguments in its constructor. The `Embeddings` object is responsible for converting text documents into numerical vectors, while the `HNSWLibArgs` object contains configuration options for the HNSW index and an optional `InMemoryDocstore` object for storing document metadata.\n\nThe `addDocuments` method accepts an array of `Document` objects, converts their text content into numerical vectors using the `Embeddings` object, and adds the vectors to the HNSW index. The `addVectors` method initializes the index, resizes it if necessary, and adds the vectors and their corresponding metadata to the `InMemoryDocstore`.\n\nThe `similaritySearchVectorWithScore` method takes a query vector and a number `k`, and returns the top `k` most similar documents in the index along with their similarity scores. It checks if the query vector has the correct dimensions and if `k` is within the valid range before performing the search.\n\nThe `save` and `load` methods allow the HNSW index and its associated metadata to be saved to and loaded from a specified directory. The `fromTexts` and `fromDocuments` static methods provide convenient ways to create an `HNSWLib` instance from an array of text strings or `Document` objects, respectively.\n\nHere's an example of how this code might be used:\n\n```javascript\nconst embeddings = new Embeddings(/* ... */);\nconst hnswLib = await HNSWLib.fromTexts(texts, metadatas, embeddings);\n\nconst queryVector = await embeddings.embedText(\"example query\");\nconst similarDocuments = await hnswLib.similaritySearchVectorWithScore(queryVector, 5);\n```\n\nIn the larger project, the `HNSWLib` class can be integrated with other components to build efficient and scalable systems for document similarity search, clustering, and recommendations based on text embeddings.","metadata":{"source":".autodoc/docs/markdown/src/langchain/summary.md"}}],["28",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc/docs/json/src)\n\nThe `.autodoc/docs/json/src` folder contains the core components of the Autodoc project, which aims to automatically generate documentation for a given code repository using OpenAI's language models (LLMs). The main files in this folder are `const.ts`, `index.ts`, and `types.ts`.\n\n`const.ts` manages the user configuration file for the Autodoc project. It defines the location and name of the user configuration file, ensuring that it is stored in a user-specific directory and follows a standard naming convention. This allows the Autodoc project to easily manage user-specific settings and preferences.\n\n`index.ts` is a CLI (Command Line Interface) tool for the Autodoc project, which simplifies the process of generating documentation for a codebase. It provides an easy-to-use interface for managing configurations and running the Autodoc project's core functionalities. The main commands supported are `init`, `estimate`, `index`, `user`, and `q`. For example:\n\n```bash\nautodoc init\nautodoc estimate\nautodoc index\nautodoc user\nautodoc q\n```\n\n`types.ts` defines the types and interfaces for the Autodoc project, providing the foundation for processing code repositories and generating documentation using OpenAI's language models. It includes types such as `AutodocUserConfig`, `AutodocRepoConfig`, `FileSummary`, `FolderSummary`, and more.\n\nThe `cli` subfolder contains the `spinner.ts` file, which provides a utility for managing a command-line spinner using the `ora` library. This utility can be used to provide a consistent and user-friendly interface for displaying progress and status messages during long-running tasks or processes. For example:\n\n```javascript\nupdateSpinnerText('Loading data...');\nstopSpinner();\nspinnerError('An error occurred');\nspinnerSuccess('Operation completed successfully');\nspinnerInfo('Please wait...');\n```\n\nThe `langchain` subfolder contains the `hnswlib.ts` file, which implements a vector store using the Hierarchical Navigable Small World (HNSW) algorithm. This class is designed to efficiently store and search for similar documents based on their embeddings, making it useful for tasks such as document clustering, nearest neighbor search, and recommendation systems. For example:\n\n```javascript\nconst embeddings = new Embeddings(/* ... */);\nconst hnswLib = await HNSWLib.fromTexts(texts, metadatas, embeddings);\n\nconst queryVector = await embeddings.embedText(\"example query\");\nconst similarDocuments = await hnswLib.similaritySearchVectorWithScore(queryVector, 5);\n```\n\nIn summary, the code in this folder provides the core components and utilities for the Autodoc project, enabling the automatic generation of documentation for code repositories using OpenAI's language models. The CLI tool simplifies the process, while the types and interfaces lay the foundation for processing and generating documentation. The additional utilities, such as the spinner and HNSWLib, enhance the user experience and provide efficient search capabilities.","metadata":{"source":".autodoc/docs/markdown/src/summary.md"}}],["29",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src/types.ts)\n\nThis code defines the types and interfaces for the `autodoc` project, which aims to automatically generate documentation for a given code repository. The project uses OpenAI's language models (LLMs) to process and generate summaries, questions, and other relevant information for files and folders within the repository.\n\nThe code starts by importing `OpenAIChat` from the `langchain/llms` package. It then defines several types and interfaces that are used throughout the project:\n\n- `AutodocUserConfig`: Represents the user configuration for the autodoc project, including the LLM models to be used.\n- `AutodocRepoConfig`: Represents the configuration for a specific repository, including its name, URL, root directory, output directory, LLM models, and other settings.\n- `FileSummary` and `FolderSummary`: Represent the summaries and questions generated for files and folders, respectively.\n- `ProcessFileParams`, `ProcessFolderParams`, and `TraverseFileSystemParams`: Define the parameters for processing files, folders, and traversing the file system, respectively.\n- `ProcessFile` and `ProcessFolder`: Define the function types for processing files and folders, respectively.\n- `LLMModels`: Enumerates the available LLM models, such as GPT-3.5-turbo, GPT-4, and GPT-4-32k.\n- `LLMModelDetails`: Represents the details of an LLM model, including its name, cost per 1K tokens, maximum length, and other statistics.\n\nFor example, when using this code in the larger project, you might define a `ProcessFile` function that takes a `ProcessFileParams` object as input and generates a summary and questions for the file using the specified LLM model. Similarly, you could define a `ProcessFolder` function that processes all files and subfolders within a folder, generating summaries and questions for each.\n\nThe `TraverseFileSystemParams` type allows you to configure how the file system is traversed, including specifying which files and folders to ignore, and what prompts to use for generating summaries and questions.\n\nOverall, this code provides the foundation for the `autodoc` project by defining the types and interfaces needed to process code repositories and generate documentation using OpenAI's language models.\n## Questions: \n 1. **Question:** What is the purpose of the `LLMModels` enum and how is it used in the code?\n **Answer:** The `LLMModels` enum defines the available language models for the autodoc project. It is used in the `AutodocUserConfig` and `AutodocRepoConfig` types to specify which language models should be used for processing files and folders.\n\n2. **Question:** What are the `ProcessFile` and `ProcessFolder` types and how are they used in the code?\n **Answer:** `ProcessFile` and `ProcessFolder` are types for functions that process a file or a folder, respectively. They are used as optional parameters in the `TraverseFileSystemParams` type, allowing developers to provide custom processing functions when traversing the file system.\n\n3. **Question:** What is the purpose of the `TraverseFileSystemParams` type and how is it used in the code?\n **Answer:** The `TraverseFileSystemParams` type defines the parameters required for traversing the file system. It is used to pass configuration options, such as input path, project name, custom processing functions, and other settings, to a function that will traverse the file system and process files and folders accordingly.","metadata":{"source":".autodoc/docs/markdown/src/types.md"}}],["30",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/tsconfig.json)\n\nThis code is a configuration file for the TypeScript compiler in a project. The purpose of this configuration is to define various options and settings that the TypeScript compiler should use when transpiling TypeScript code into JavaScript. This is important for ensuring that the compiled output is consistent and compatible with the intended runtime environment.\n\nHere's a brief explanation of the key options set in this configuration:\n\n- `\"rootDir\": \"src\"`: Specifies the root directory containing the TypeScript source files. This tells the compiler where to look for the input files.\n- `\"outDir\": \"dist\"`: Specifies the output directory for the compiled JavaScript files. This is where the transpiled code will be saved.\n- `\"strict\": true`: Enables strict type checking, which enforces stronger type safety and helps catch potential issues during development.\n- `\"target\": \"es2020\"`: Sets the target ECMAScript version for the compiled output. In this case, the output will be compatible with ECMAScript 2020 (ES11) features.\n- `\"module\": \"ES2020\"`: Specifies the module system to use in the compiled output. This setting is aligned with the target ECMAScript version.\n- `\"sourceMap\": true`: Generates source map files alongside the compiled output. This helps with debugging by mapping the compiled code back to the original TypeScript source.\n- `\"esModuleInterop\": true` and `\"allowSyntheticDefaultImports\": true`: These options enable better compatibility with different module systems and allow for more flexible import statements.\n- `\"moduleResolution\": \"node\"`: Sets the module resolution strategy to Node.js-style, which is the most common approach for resolving module imports in JavaScript projects.\n- `\"declaration\": true`: Generates TypeScript declaration files (`.d.ts`) alongside the compiled output. These files provide type information for the compiled code, which can be useful for other TypeScript projects that depend on this one.\n- `\"skipLibCheck\": true`: Skips type checking of declaration files, which can speed up the compilation process.\n\nIn the larger project, this configuration file ensures that the TypeScript compiler produces consistent and compatible JavaScript output, making it easier to integrate the compiled code with other parts of the project or with external dependencies.\n## Questions: \n 1. **What is the purpose of the `rootDir` and `outDir` options in the configuration?**\n\n The `rootDir` option specifies the root folder of the source files, while the `outDir` option specifies the output directory for the compiled files.\n\n2. **What does the `strict` option do in the configuration?**\n\n The `strict` option enables a set of strict type-checking options in the TypeScript compiler, ensuring a higher level of type safety in the code.\n\n3. **What is the significance of the `target` and `module` options in the configuration?**\n\n The `target` option sets the ECMAScript target version for the compiled JavaScript output, while the `module` option specifies the module system to be used in the generated code. In this case, both are set to \"es2020\", indicating that the output will be ECMAScript 2020 compliant.","metadata":{"source":".autodoc/docs/markdown/tsconfig.md"}}]] \ No newline at end of file +[["0",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src\\cli\\commands\\estimate\\index.ts)\n\nThe `estimate` function in this code is responsible for providing an estimated cost of processing a given repository using the Autodoc project. It takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the repository name, URL, root directory, output directory, and other settings related to the processing of the repository.\n\nThe function starts by constructing the path to the JSON output directory, which will be used to store the intermediate results of the processing. It then updates the spinner text to indicate that the cost estimation is in progress.\n\nNext, the `processRepository` function is called with the provided configuration options and a `true` flag to indicate that this is a dry run. This means that the repository will not actually be processed, but the function will return the details of what would happen if it were processed. This is used to calculate the estimated cost of processing the repository.\n\nOnce the dry run is complete, the spinner is updated to show success, and the results are printed using the `printModelDetails` function. The total estimated cost is then calculated using the `totalIndexCostEstimate` function, which takes the values of the `runDetails` object as input.\n\nFinally, the estimated cost is displayed in the console using the `chalk.redBright` function to format the text in a red color. The message also includes a disclaimer that the actual cost may vary and recommends setting a limit in the user's OpenAI account to prevent unexpected charges.\n\nHere's an example of how the `estimate` function might be used in the larger project:\n\n```javascript\nimport { estimate } from './path/to/this/file';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\nThis example would estimate the cost of processing the \"my-repo\" repository with the specified configuration options.\n## Questions: \n 1. **What is the purpose of the `estimate` function?**\n\n The `estimate` function is used to perform a dry run of the `processRepository` command to get an estimated price for indexing the given repository. It then prints the model details and the total estimated cost.\n\n2. **What are the parameters passed to the `processRepository` function?**\n\n The `processRepository` function is called with an object containing the following properties: `name`, `repositoryUrl`, `root`, `output`, `llms`, `ignore`, `filePrompt`, `folderPrompt`, `chatPrompt`, `contentType`, `targetAudience`, and `linkHosted`. Additionally, a second argument `true` is passed to indicate that it's a dry run.\n\n3. **How is the total estimated cost calculated and displayed?**\n\n The total estimated cost is calculated using the `totalIndexCostEstimate` function, which takes an array of values from the `runDetails` object. The cost is then displayed using `console.log` with `chalk.redBright` for formatting, showing the cost with two decimal places and a note that the actual cost may vary.","metadata":{"source":".autodoc\\docs\\markdown\\src\\cli\\commands\\estimate\\index.md"}}],["1",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands\\estimate)\n\nThe `estimate` function in `index.ts` is a crucial part of the Autodoc project, as it provides an estimated cost of processing a given repository. It takes an `AutodocRepoConfig` object as input, containing various configuration options such as repository name, URL, root directory, output directory, and other settings related to the processing of the repository.\n\nThe function begins by constructing the path to the JSON output directory, which stores intermediate results of the processing. It then updates the spinner text to indicate that cost estimation is in progress. The `processRepository` function is called with the provided configuration options and a `true` flag, signifying a dry run. This dry run returns the details of what would happen if the repository were processed, which is used to calculate the estimated cost.\n\nUpon completion of the dry run, the spinner is updated to show success, and the results are printed using the `printModelDetails` function. The total estimated cost is calculated using the `totalIndexCostEstimate` function, which takes the values of the `runDetails` object as input.\n\nFinally, the estimated cost is displayed in the console using the `chalk.redBright` function to format the text in red. The message also includes a disclaimer that the actual cost may vary and recommends setting a limit in the user's OpenAI account to prevent unexpected charges.\n\nHere's an example of how the `estimate` function might be used in the larger project:\n\n```javascript\nimport { estimate } from './path/to/this/file';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\nThis example would estimate the cost of processing the \"my-repo\" repository with the specified configuration options.","metadata":{"source":".autodoc\\docs\\markdown\\src\\cli\\commands\\estimate\\summary.md"}}],["2",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\convertJsonToMarkdown.ts)\n\nThe `convertJsonToMarkdown` function in this code is responsible for converting JSON files containing documentation information into Markdown files. This function is part of the larger Autodoc project, which aims to automate the process of generating documentation for code repositories.\n\nThe function takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the project name, input and output directories, and other settings related to the documentation generation process.\n\nThe code first counts the number of files in the project by traversing the file system using the `traverseFileSystem` utility function. This is done to provide a progress update to the user via the `updateSpinnerText` function.\n\nNext, the `processFile` function is defined, which is responsible for reading the content of each JSON file, parsing it, and converting it into a Markdown format. The function checks if the file has a summary, and if so, it generates the Markdown content with a link to the code on GitHub, the summary, and any questions if present. The output Markdown file is then saved in the specified output directory.\n\nFinally, the `traverseFileSystem` function is called again, this time with the `processFile` function as an argument. This allows the code to process each JSON file in the project and convert it into a Markdown file. Once the process is complete, a success message is displayed to the user using the `spinnerSuccess` function.\n\nExample usage:\n\n```javascript\nconvertJsonToMarkdown({\n name: \"myProject\",\n root: \"./input\",\n output: \"./output\",\n filePrompt: true,\n folderPrompt: true,\n contentType: \"code\",\n targetAudience: \"developers\",\n linkHosted: \"https://github.com/user/myProject\",\n});\n```\n\nThis will convert all JSON files in the `./input` directory into Markdown files and save them in the `./output` directory.\n## Questions: \n 1. **Question:** What is the purpose of the `convertJsonToMarkdown` function and what are the expected inputs?\n **Answer:** The `convertJsonToMarkdown` function is used to convert JSON files to Markdown files for each code file in the project. It takes an `AutodocRepoConfig` object as input, which contains various properties like projectName, root, output, filePrompt, folderPrompt, contentType, targetAudience, and linkHosted.\n\n2. **Question:** How does the `traverseFileSystem` function work and what is its role in this code?\n **Answer:** The `traverseFileSystem` function is a utility function that recursively traverses the file system, starting from the inputPath, and processes each file using the provided `processFile` function. In this code, it is used twice: first to count the number of files in the project, and then to create Markdown files for each code file in the project.\n\n3. **Question:** How are the output directories and Markdown files created, and what is the structure of the generated Markdown content?\n **Answer:** The output directories are created using the `fs.mkdir` function with the `recursive: true` option. The Markdown files are created using the `fs.writeFile` function. The structure of the generated Markdown content includes a link to view the code on GitHub, the summary, and optionally, a list of questions if they exist.","metadata":{"source":".autodoc\\docs\\markdown\\src\\cli\\commands\\index\\convertJsonToMarkdown.md"}}],["3",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\createVectorStore.ts)\n\nThe code in this file is responsible for processing a directory of text files, splitting the text into chunks, and creating a vector store using the HNSWLib library and OpenAIEmbeddings. This vector store can be used for efficient similarity search and retrieval of documents in the larger project.\n\nThe `processFile` function reads a file's content and creates a `Document` object with the content and metadata (source file path). It returns a Promise that resolves to the created Document.\n\nThe `processDirectory` function is a recursive function that processes a directory and its subdirectories. It reads the files in the directory, and for each file, it checks if it's a directory or a regular file. If it's a directory, the function calls itself with the new directory path. If it's a file, it calls the `processFile` function to create a Document object. The function returns an array of Document objects.\n\nThe `RepoLoader` class extends the `BaseDocumentLoader` class and has a constructor that takes a file path as an argument. It has a `load` method that calls the `processDirectory` function with the given file path and returns the array of Document objects.\n\nThe `createVectorStore` function is an async function that takes an `AutodocRepoConfig` object as an argument, which contains the root directory and output file path. It creates a `RepoLoader` instance with the root directory and loads the documents using the `load` method. It then creates a `RecursiveCharacterTextSplitter` instance with a specified chunk size and chunk overlap and splits the documents into chunks. Finally, it creates a vector store using the HNSWLib library and OpenAIEmbeddings with the processed documents and saves the vector store to the output file path.\n\nExample usage:\n\n```javascript\nconst config = {\n root: './data/documents',\n output: './data/vector_store',\n};\n\ncreateVectorStore(config).then(() => {\n console.log('Vector store created successfully');\n});\n```\n## Questions: \n 1. **Question:** What is the purpose of the `processFile` function and what does it return?\n **Answer:** The `processFile` function is an asynchronous function that reads the content of a file given its file path, creates a `Document` object with the file contents and metadata (source file path), and returns a Promise that resolves to the created `Document` object.\n\n2. **Question:** How does the `processDirectory` function work and what does it return?\n **Answer:** The `processDirectory` function is an asynchronous function that takes a directory path as input, reads all the files and subdirectories within it, and processes them recursively. It returns a Promise that resolves to an array of `Document` objects created from the files in the directory and its subdirectories.\n\n3. **Question:** What is the purpose of the `createVectorStore` function and how does it work?\n **Answer:** The `createVectorStore` function is an asynchronous function that takes an `AutodocRepoConfig` object as input, which contains the root directory path and output file path. The function loads all the documents from the root directory using the `RepoLoader`, splits the text into chunks using the `RecursiveCharacterTextSplitter`, creates a vector store from the documents using the `HNSWLib` and `OpenAIEmbeddings`, and saves the vector store to the specified output file.","metadata":{"source":".autodoc\\docs\\markdown\\src\\cli\\commands\\index\\createVectorStore.md"}}],["4",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\index.ts)\n\nThe code in this file is responsible for processing a given repository and generating documentation in JSON, Markdown, and vector formats. It exports a single function `index` that takes an `AutodocRepoConfig` object as its argument, which contains various configuration options for processing the repository.\n\nThe `index` function performs three main tasks:\n\n1. **Process the repository**: It traverses the repository, calls the LLMS (Language Learning Management System) for each file, and creates JSON files with the results. This is done using the `processRepository` function, which takes the same configuration options as the `index` function. The JSON files are stored in the `output/docs/json/` directory.\n\n ```javascript\n updateSpinnerText('Processing repository...');\n await processRepository({ /* configuration options */ });\n spinnerSuccess();\n ```\n\n2. **Create Markdown files**: It converts the generated JSON files into Markdown files using the `convertJsonToMarkdown` function. This function also takes the same configuration options as the `index` function. The Markdown files are stored in the `output/docs/markdown/` directory.\n\n ```javascript\n updateSpinnerText('Creating markdown files...');\n await convertJsonToMarkdown({ /* configuration options */ });\n spinnerSuccess();\n ```\n\n3. **Create vector files**: It creates vector files from the generated Markdown files using the `createVectorStore` function. This function also takes the same configuration options as the `index` function. The vector files are stored in the `output/docs/data/` directory.\n\n ```javascript\n updateSpinnerText('Create vector files...');\n await createVectorStore({ /* configuration options */ });\n spinnerSuccess();\n ```\n\nThroughout the execution of these tasks, the code uses `updateSpinnerText` and `spinnerSuccess` functions to provide visual feedback on the progress of the tasks.\n\nIn the larger project, this code would be used to automatically generate documentation for a given repository based on the provided configuration options. The generated documentation can then be used for various purposes, such as displaying it on a website or analyzing the content for specific insights.\n## Questions: \n 1. **What does the `index` function do in this code?**\n\n The `index` function is the main entry point for the autodoc project. It takes an `AutodocRepoConfig` object as input and performs three main tasks: processing the repository and creating JSON files, converting JSON files to markdown files, and creating vector files.\n\n2. **What is the purpose of the `processRepository`, `convertJsonToMarkdown`, and `createVectorStore` functions?**\n\n The `processRepository` function traverses the repository, calls LLMS for each file, and creates JSON files with the results. The `convertJsonToMarkdown` function creates markdown files from the generated JSON files. The `createVectorStore` function creates vector files from the markdown files.\n\n3. **What are the different types of prompts (`filePrompt`, `folderPrompt`, `chatPrompt`) used for in this code?**\n\n These prompts are likely used to interact with the user during the processing of the repository. The `filePrompt` might be used to ask the user for input regarding specific files, the `folderPrompt` for input regarding folders, and the `chatPrompt` for general input or feedback during the processing.","metadata":{"source":".autodoc\\docs\\markdown\\src\\cli\\commands\\index\\index.md"}}],["5",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\processRepository.ts)\n\nThe `processRepository` function in this code is responsible for generating summaries and questions for code files and folders in a given repository. It takes an `AutodocRepoConfig` object as input, which contains information about the project, repository URL, input and output paths, language models, and other configurations. An optional `dryRun` parameter can be provided to skip actual API calls and file writing.\n\nThe function starts by initializing the encoding and rate limit for API calls. It then defines two main helper functions: `processFile` and `processFolder`. The `processFile` function is responsible for processing individual code files. It reads the file content, calculates a checksum, and checks if reindexing is needed. If reindexing is required, it creates prompts for summaries and questions, selects the appropriate language model based on the input length, and calls the language model API to generate the summaries and questions. The results are then saved to a JSON file in the output directory.\n\nThe `processFolder` function is responsible for processing folders. It reads the folder content, calculates a checksum, and checks if reindexing is needed. If reindexing is required, it reads the summaries and questions of all files and subfolders in the folder, calls the language model API to generate a summary for the folder, and saves the result to a `summary.json` file in the folder.\n\nThe main function then counts the number of files and folders in the project and processes them using the `traverseFileSystem` utility function. It processes all files first, followed by all folders. Finally, it returns the language model usage statistics.\n\nThe `calculateChecksum` function calculates the checksum of a list of file contents, while the `reindexCheck` function checks if reindexing is needed by comparing the new and old checksums of a file or folder.\n## Questions: \n 1. **Question:** What is the purpose of the `processRepository` function and what are its inputs and outputs?\n **Answer:** The `processRepository` function processes a given code repository, generating summaries and questions for each file and folder within the repository. It takes an `AutodocRepoConfig` object and an optional `dryRun` boolean as inputs. The function returns a `Promise` that resolves to an object containing the models used during processing.\n\n2. **Question:** How does the `calculateChecksum` function work and what is its purpose?\n **Answer:** The `calculateChecksum` function takes an array of file contents as input and calculates a checksum for each file using the MD5 hashing algorithm. It then concatenates all the checksums and calculates a final checksum using MD5 again. The purpose of this function is to generate a unique identifier for the contents of the files, which can be used to determine if the files have changed and need to be reprocessed.\n\n3. **Question:** How does the `reindexCheck` function work and when is it used?\n **Answer:** The `reindexCheck` function checks if a summary.json file exists in the given file or folder path and compares the stored checksum with the new checksum to determine if the file or folder needs to be reindexed. It is used in the `processFile` and `processFolder` functions to decide whether to regenerate summaries and questions for a file or folder based on changes in their contents.","metadata":{"source":".autodoc\\docs\\markdown\\src\\cli\\commands\\index\\processRepository.md"}}],["6",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\prompts.ts)\n\nThis code defines three utility functions that generate prompts for documentation experts working on a project. These functions are used to create documentation for code files and folders within a project. The generated prompts are in markdown format and include specific instructions for the documentation expert.\n\n1. `createCodeFileSummary`: This function generates a prompt for creating a summary of a code file. It takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `filePrompt`. The function returns a markdown formatted string that includes the file's content and a custom prompt for the documentation expert.\n\nExample usage:\n```javascript\nconst prompt = createCodeFileSummary('path/to/file.js', 'MyProject', 'const x = 10;', 'JavaScript', 'Write a detailed technical explanation of this code.');\n```\n\n2. `createCodeQuestions`: This function generates a prompt for creating a list of questions and answers about a code file. It takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `targetAudience`. The function returns a markdown formatted string that includes the file's content and a custom prompt for the documentation expert to provide questions and answers.\n\nExample usage:\n```javascript\nconst prompt = createCodeQuestions('path/to/file.js', 'MyProject', 'const x = 10;', 'JavaScript', 'beginner');\n```\n\n3. `folderSummaryPrompt`: This function generates a prompt for creating a summary of a folder containing code files and subfolders. It takes six parameters: `folderPath`, `projectName`, `files`, `folders`, `contentType`, and `folderPrompt`. The `files` parameter is an array of `FileSummary` objects, and the `folders` parameter is an array of `FolderSummary` objects. The function returns a markdown formatted string that includes a list of files and folders with their summaries and a custom prompt for the documentation expert.\n\nExample usage:\n```javascript\nconst prompt = folderSummaryPrompt('path/to/folder', 'MyProject', fileSummaries, folderSummaries, 'JavaScript', 'Write a detailed technical explanation of this folder structure.');\n```\n\nThese functions can be used in the larger project to generate documentation tasks for experts, ensuring consistent formatting and instructions across different parts of the project.\n## Questions: \n 1. **What is the purpose of the `createCodeFileSummary` function?**\n\n The `createCodeFileSummary` function generates a string template for a code file summary prompt, which includes the file path, project name, file contents, content type, and a file prompt.\n\n2. **How does the `createCodeQuestions` function differ from the `createCodeFileSummary` function?**\n\n The `createCodeQuestions` function generates a string template for a code documentation prompt that asks for 3 questions and their answers, while the `createCodeFileSummary` function generates a string template for a code file summary prompt.\n\n3. **What is the role of the `folderSummaryPrompt` function?**\n\n The `folderSummaryPrompt` function generates a string template for a folder summary prompt, which includes the folder path, project name, lists of files and folders with their summaries, content type, and a folder prompt.","metadata":{"source":".autodoc\\docs\\markdown\\src\\cli\\commands\\index\\prompts.md"}}],["7",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands\\index)\n\nThe code in this folder is responsible for processing a given repository and generating documentation in JSON, Markdown, and vector formats. It consists of several functions and utilities that work together to automate the documentation generation process.\n\nThe main function, `index`, takes an `AutodocRepoConfig` object as input, which contains various configuration options for processing the repository. It performs three main tasks:\n\n1. **Process the repository**: It calls the `processRepository` function to traverse the repository, generate summaries and questions for code files and folders using the LLMS (Language Learning Management System), and create JSON files with the results. These JSON files are stored in the `output/docs/json/` directory.\n\n2. **Create Markdown files**: It uses the `convertJsonToMarkdown` function to convert the generated JSON files into Markdown files. These Markdown files are stored in the `output/docs/markdown/` directory.\n\n3. **Create vector files**: It calls the `createVectorStore` function to create vector files from the generated Markdown files. These vector files are stored in the `output/docs/data/` directory.\n\nThroughout the execution of these tasks, the code provides visual feedback on the progress of the tasks using `updateSpinnerText` and `spinnerSuccess` functions.\n\nHere's an example of how this code might be used:\n\n```javascript\nindex({\n name: \"myProject\",\n root: \"./input\",\n output: \"./output\",\n filePrompt: true,\n folderPrompt: true,\n contentType: \"code\",\n targetAudience: \"developers\",\n linkHosted: \"https://github.com/user/myProject\",\n});\n```\n\nThis will process the repository located at `./input`, generate documentation in JSON, Markdown, and vector formats, and save the results in the `./output` directory.\n\nThe `prompts.ts` file contains utility functions that generate prompts for documentation experts. These functions create markdown formatted strings with specific instructions for the documentation expert, ensuring consistent formatting and instructions across different parts of the project.\n\nIn summary, the code in this folder automates the process of generating documentation for a given repository based on the provided configuration options. The generated documentation can be used for various purposes, such as displaying it on a website or analyzing the content for specific insights.","metadata":{"source":".autodoc\\docs\\markdown\\src\\cli\\commands\\index\\summary.md"}}],["8",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src\\cli\\commands\\init\\index.ts)\n\nThis code is responsible for initializing the configuration of the Autodoc project. It provides a template for the configuration and prompts the user to input necessary information to set up the project. The main functionality is exposed through the `init` function, which is an asynchronous function that takes an optional `AutodocRepoConfig` object as an argument.\n\nThe `makeConfigTemplate` function creates a default configuration object with pre-defined values for various properties. It takes an optional `config` parameter and returns a new `AutodocRepoConfig` object with the provided values or default values if not provided.\n\nThe `init` function first checks if an `autodoc.config.json` file already exists in the project root. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nNext, the user is prompted to enter the name of their repository, the GitHub URL of their repository, and the LLMs they have access to. The LLMs are language models used for generating documentation. The user can choose between GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access).\n\nAfter the user provides the necessary information, a new configuration object is created using the `makeConfigTemplate` function with the user's input. The new configuration is then written to the `autodoc.config.json` file in the project root.\n\nFinally, a success message is displayed, instructing the user to run `doc index` to get started with the Autodoc project.\n\nExample usage:\n\n```javascript\nimport { init } from './path/to/this/file';\n\n// Initialize the configuration with default values\nawait init();\n\n// Initialize the configuration with custom values\nawait init({\n name: 'My Custom Repository',\n repositoryUrl: 'https://github.com/user/repo',\n});\n```\n## Questions: \n 1. **What is the purpose of the `makeConfigTemplate` function?**\n\n The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc project. It takes an optional `config` parameter of type `AutodocRepoConfig` and returns a new configuration object with default values for various properties.\n\n2. **How does the `init` function work and when is it called?**\n\n The `init` function is an asynchronous function that initializes the Autodoc configuration by creating an `autodoc.config.json` file in the specified location. It takes an optional `config` parameter of type `AutodocRepoConfig` and prompts the user for input to set the configuration values. It is called when the user wants to set up the Autodoc configuration for their project.\n\n3. **What is the purpose of the `inquirer.prompt` calls in the `init` function?**\n\n The `inquirer.prompt` calls are used to interactively prompt the user for input to set the configuration values for the Autodoc project. The user is asked for the repository name, repository URL, and the LLMs they have access to. The input is then used to create a new configuration object and write it to the `autodoc.config.json` file.","metadata":{"source":".autodoc\\docs\\markdown\\src\\cli\\commands\\init\\index.md"}}],["9",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands\\init)\n\nThe `index.ts` file in the `.autodoc\\docs\\json\\src\\cli\\commands\\init` folder is responsible for initializing the configuration of the Autodoc project. It provides a template for the configuration and prompts the user to input necessary information to set up the project. The main functionality is exposed through the `init` function, which is an asynchronous function that takes an optional `AutodocRepoConfig` object as an argument.\n\nThe `makeConfigTemplate` function creates a default configuration object with pre-defined values for various properties. It takes an optional `config` parameter and returns a new `AutodocRepoConfig` object with the provided values or default values if not provided.\n\nThe `init` function first checks if an `autodoc.config.json` file already exists in the project root. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nNext, the user is prompted to enter the name of their repository, the GitHub URL of their repository, and the LLMs they have access to. The LLMs are language models used for generating documentation. The user can choose between GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access).\n\nAfter the user provides the necessary information, a new configuration object is created using the `makeConfigTemplate` function with the user's input. The new configuration is then written to the `autodoc.config.json` file in the project root.\n\nFinally, a success message is displayed, instructing the user to run `doc index` to get started with the Autodoc project.\n\nExample usage:\n\n```javascript\nimport { init } from './path/to/this/file';\n\n// Initialize the configuration with default values\nawait init();\n\n// Initialize the configuration with custom values\nawait init({\n name: 'My Custom Repository',\n repositoryUrl: 'https://github.com/user/repo',\n});\n```\n\nThis code is essential for setting up the Autodoc project, as it creates the necessary configuration file and gathers user input to customize the project. It works in conjunction with other parts of the project, such as the CLI and the documentation generation process, which rely on the configuration file to function correctly.","metadata":{"source":".autodoc\\docs\\markdown\\src\\cli\\commands\\init\\summary.md"}}],["10",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src\\cli\\commands\\query\\createChatChain.ts)\n\nThis code defines a function `makeChain` that creates a chatbot for answering questions about a software project called `projectName`. The chatbot is trained on the content of the project, which is located at `repositoryUrl`. The content type of the project is specified by the `contentType` parameter. The chatbot is designed to provide conversational answers with hyperlinks back to GitHub, including code examples and links to the examples where appropriate. The target audience for the chatbot is specified by the `targetAudience` parameter.\n\nThe `makeChain` function takes several parameters:\n\n- `projectName`: The name of the software project.\n- `repositoryUrl`: The URL of the project's repository.\n- `contentType`: The type of content the chatbot is trained on.\n- `chatPrompt`: Additional instructions for answering questions about the content type.\n- `targetAudience`: The intended audience for the chatbot's answers.\n- `vectorstore`: An instance of HNSWLib for efficient nearest neighbor search.\n- `llms`: An array of LLMModels, which are language models used for generating answers.\n- `onTokenStream`: An optional callback function that is called when a new token is generated by the language model.\n\nThe `makeChain` function first creates a question generator using the `LLMChain` class. This generator is responsible for rephrasing follow-up questions to be standalone questions. It uses the `CONDENSE_PROMPT` template, which is defined at the beginning of the code.\n\nNext, the function creates a `QA_PROMPT` template using the `makeQAPrompt` function. This template is used to generate answers to the questions in a conversational manner, with hyperlinks back to GitHub and code examples where appropriate.\n\nFinally, the function creates and returns a new instance of the `ChatVectorDBQAChain` class, which combines the question generator and the document chain to create a chatbot that can answer questions about the software project. The chatbot uses the `vectorstore` for efficient nearest neighbor search and the `llms` language models for generating answers. If the `onTokenStream` callback is provided, it will be called when a new token is generated by the language model.\n## Questions: \n 1. **Question:** What is the purpose of the `makeChain` function and what are its input parameters?\n\n **Answer:** The `makeChain` function is used to create a `ChatVectorDBQAChain` instance, which is responsible for generating questions and answers based on the given input parameters. The input parameters include `projectName`, `repositoryUrl`, `contentType`, `chatPrompt`, `targetAudience`, `vectorstore`, `llms`, and an optional `onTokenStream` function.\n\n2. **Question:** What are the roles of `CONDENSE_PROMPT` and `QA_PROMPT` in this code?\n\n **Answer:** `CONDENSE_PROMPT` is a template for generating standalone questions from a given chat history and follow-up question. `QA_PROMPT` is a template for generating conversational answers with hyperlinks to GitHub, based on the provided context and question. Both templates are used in the `LLMChain` and `loadQAChain` instances, respectively.\n\n3. **Question:** How does the `onTokenStream` function work and when is it used?\n\n **Answer:** The `onTokenStream` function is an optional callback that can be provided to the `makeChain` function. It is used to handle the streaming of tokens generated by the OpenAIChat instance. If provided, it will be called with each new token generated during the chat process.","metadata":{"source":".autodoc\\docs\\markdown\\src\\cli\\commands\\query\\createChatChain.md"}}],["11",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src\\cli\\commands\\query\\index.ts)\n\nThis code defines a chatbot interface for the Autodoc project, which allows users to ask questions related to a specific codebase and receive answers in a conversational manner. The chatbot uses a combination of the `inquirer` library for user input, `marked` and `marked-terminal` for rendering Markdown output, and the `langchain` library for handling natural language processing tasks.\n\nThe `query` function is the main entry point for the chatbot. It takes two arguments: an `AutodocRepoConfig` object containing information about the code repository, and an `AutodocUserConfig` object containing user-specific settings. The function initializes a vector store using the `HNSWLib` and `OpenAIEmbeddings` classes, and creates a chat chain using the `makeChain` function.\n\nThe chatbot interface is displayed using the `displayWelcomeMessage` function, which prints a welcome message to the console. The `getQuestion` function is used to prompt the user for a question using the `inquirer` library. The chatbot then enters a loop, where it processes the user's question, generates a response using the chat chain, and displays the response as Markdown in the terminal.\n\nIf an error occurs during the processing of a question, the chatbot will display an error message and continue to prompt the user for a new question. The loop continues until the user types 'exit', at which point the chatbot terminates.\n\nHere's an example of how the `query` function might be used:\n\n```javascript\nimport { query } from './autodoc';\n\nconst repoConfig = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n output: 'path/to/output',\n contentType: 'code',\n chatPrompt: 'Ask me anything about MyProject',\n targetAudience: 'developers',\n};\n\nconst userConfig = {\n llms: 'path/to/llms',\n};\n\nquery(repoConfig, userConfig);\n```\n\nThis example would initialize the chatbot with the specified repository and user configurations, and start the chatbot interface for the user to ask questions about the \"MyProject\" codebase.\n## Questions: \n 1. **What is the purpose of the `query` function in this code?**\n\n The `query` function is responsible for handling user interactions with the chatbot. It takes in an AutodocRepoConfig object and an AutodocUserConfig object, sets up the necessary data structures, and then enters a loop where it prompts the user for questions, processes them, and displays the results.\n\n2. **How does the code handle rendering Markdown text in the terminal?**\n\n The code uses the `marked` library along with a custom `TerminalRenderer` to render Markdown text in the terminal. The `marked` library is configured with the custom renderer using `marked.setOptions({ renderer: new TerminalRenderer() });`.\n\n3. **What is the purpose of the `chatHistory` variable and how is it used?**\n\n The `chatHistory` variable is an array that stores the history of questions and answers in the chat session. It is used to keep track of the conversation between the user and the chatbot. When a new question is asked, the chat history is passed to the `chain.call()` function, and the new question and its corresponding answer are added to the `chatHistory` array.","metadata":{"source":".autodoc\\docs\\markdown\\src\\cli\\commands\\query\\index.md"}}],["12",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands\\query)\n\nThe `query` folder in the Autodoc project contains code for creating a chatbot that can answer questions about a specific software project in a conversational manner. The chatbot is trained on the content of the project and provides answers with hyperlinks back to GitHub, including code examples and links to the examples where appropriate.\n\nThe main entry point for the chatbot is the `query` function in `index.ts`. It takes two arguments: an `AutodocRepoConfig` object containing information about the code repository, and an `AutodocUserConfig` object containing user-specific settings. The function initializes a vector store and creates a chat chain using the `makeChain` function from `createChatChain.ts`.\n\nHere's an example of how the `query` function might be used:\n\n```javascript\nimport { query } from './autodoc';\n\nconst repoConfig = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n output: 'path/to/output',\n contentType: 'code',\n chatPrompt: 'Ask me anything about MyProject',\n targetAudience: 'developers',\n};\n\nconst userConfig = {\n llms: 'path/to/llms',\n};\n\nquery(repoConfig, userConfig);\n```\n\nThis example initializes the chatbot with the specified repository and user configurations and starts the chatbot interface for the user to ask questions about the \"MyProject\" codebase.\n\nThe `createChatChain.ts` file defines the `makeChain` function, which creates a chatbot for answering questions about a software project. The chatbot is designed to provide conversational answers with hyperlinks back to GitHub, including code examples and links to the examples where appropriate. The target audience for the chatbot is specified by the `targetAudience` parameter.\n\nThe `makeChain` function takes several parameters, such as `projectName`, `repositoryUrl`, `contentType`, `chatPrompt`, `targetAudience`, `vectorstore`, `llms`, and `onTokenStream`. It first creates a question generator using the `LLMChain` class, then creates a `QA_PROMPT` template using the `makeQAPrompt` function, and finally creates and returns a new instance of the `ChatVectorDBQAChain` class, which combines the question generator and the document chain to create a chatbot that can answer questions about the software project.\n\nIn summary, the code in the `query` folder is responsible for creating a chatbot that can answer questions about a specific software project in a conversational manner. The chatbot uses a combination of natural language processing techniques and efficient nearest neighbor search to generate accurate and relevant answers for the user.","metadata":{"source":".autodoc\\docs\\markdown\\src\\cli\\commands\\query\\summary.md"}}],["13",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands)\n\nThe code in the `.autodoc\\docs\\json\\src\\cli\\commands` folder is responsible for various tasks related to the Autodoc project, such as initializing the configuration, processing repositories, generating documentation, and creating a chatbot for answering questions about a specific software project. The folder contains several subfolders, each with a specific purpose.\n\n### estimate\n\nThe `estimate` function provides an estimated cost of processing a given repository. It takes an `AutodocRepoConfig` object as input and performs a dry run of the repository processing to calculate the estimated cost. Example usage:\n\n```javascript\nimport { estimate } from './path/to/this/file';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\n### index\n\nThe code in this folder processes a given repository and generates documentation in JSON, Markdown, and vector formats. It takes an `AutodocRepoConfig` object as input and performs three main tasks: processing the repository, creating Markdown files, and creating vector files. Example usage:\n\n```javascript\nindex({\n name: \"myProject\",\n root: \"./input\",\n output: \"./output\",\n filePrompt: true,\n folderPrompt: true,\n contentType: \"code\",\n targetAudience: \"developers\",\n linkHosted: \"https://github.com/user/myProject\",\n});\n```\n\n### init\n\nThe `init` function initializes the configuration of the Autodoc project. It prompts the user to input necessary information to set up the project and creates the `autodoc.config.json` file in the project root. Example usage:\n\n```javascript\nimport { init } from './path/to/this/file';\n\n// Initialize the configuration with default values\nawait init();\n\n// Initialize the configuration with custom values\nawait init({\n name: 'My Custom Repository',\n repositoryUrl: 'https://github.com/user/repo',\n});\n```\n\n### query\n\nThe `query` folder contains code for creating a chatbot that can answer questions about a specific software project. The main entry point is the `query` function, which takes an `AutodocRepoConfig` object and an `AutodocUserConfig` object as input. Example usage:\n\n```javascript\nimport { query } from './autodoc';\n\nconst repoConfig = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n output: 'path/to/output',\n contentType: 'code',\n chatPrompt: 'Ask me anything about MyProject',\n targetAudience: 'developers',\n};\n\nconst userConfig = {\n llms: 'path/to/llms',\n};\n\nquery(repoConfig, userConfig);\n```\n\n### user\n\nThe `user` folder manages the user configuration for the Autodoc project. It allows users to create, update, and save their configuration file, which stores information about their access to different Language Learning Models (LLMs). Example usage:\n\n```javascript\nimport { user } from './path/to/this/file';\n\n// Create a new user configuration with default settings\nawait user();\n\n// Update the user configuration with a custom config object\nawait user({ llms: [LLMModels.GPT3, LLMModels.GPT4] });\n```\n\nIn summary, the code in this folder is essential for various tasks related to the Autodoc project, such as initializing the configuration, processing repositories, generating documentation, and creating a chatbot for answering questions about a specific software project.","metadata":{"source":".autodoc\\docs\\markdown\\src\\cli\\commands\\summary.md"}}],["14",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src\\cli\\commands\\user\\index.ts)\n\nThis code is responsible for managing the user configuration for the Autodoc project. It provides a way to create, update, and save the user configuration file, which stores information about the user's access to different Language Learning Models (LLMs) such as GPT-3, GPT-4, and GPT-4 32K.\n\nThe `makeConfigTemplate` function is used to create a default configuration object with the provided `config` parameter or with GPT-3 as the default LLM. This function is used to generate a new configuration object when needed.\n\nThe main function, `user`, is an asynchronous function that takes an optional `config` parameter. It first checks if a user configuration file already exists at the `userConfigFilePath`. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf the user configuration file does not exist, the code attempts to create the necessary directories for the file. If there's an error during this process, it logs the error and exits with a non-zero status code.\n\nNext, the user is prompted to select which LLMs they have access to. The available options are GPT-3.5 Turbo, GPT-3.5 Turbo with GPT-4 8K (Early Access), and GPT-3.5 Turbo with GPT-4 8K and GPT-4 32K (Early Access). The user's selection is then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration object is written to the user configuration file in JSON format. A success message is displayed to the user, indicating that the configuration has been saved and they can start querying using the `doc q` command.\n\nExample usage:\n\n```javascript\nimport { user } from './path/to/this/file';\n\n// Create a new user configuration with default settings\nawait user();\n\n// Update the user configuration with a custom config object\nawait user({ llms: [LLMModels.GPT3, LLMModels.GPT4] });\n```\n## Questions: \n 1. **What is the purpose of the `makeConfigTemplate` function?**\n\n The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc user. It takes an optional `config` parameter and returns an object with a `llms` property, which is an array of LLM models.\n\n2. **How does the `user` function handle existing user configuration files?**\n\n The `user` function checks if a user configuration file already exists using `fsSync.existsSync`. If it does, the user is prompted with a confirmation message to overwrite the existing configuration. If the user chooses not to overwrite, the process exits with a status code of 0.\n\n3. **What are the available choices for LLM models in the `user` function?**\n\n The available choices for LLM models are GPT-3.5 Turbo, GPT-3.5 Turbo and GPT-4 8K (Early Access), and GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user can select one of these options, and the selected value is stored in the `llms` property of the new configuration object.","metadata":{"source":".autodoc\\docs\\markdown\\src\\cli\\commands\\user\\index.md"}}],["15",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands\\user)\n\nThe `index.ts` file in the `user` folder is responsible for managing the user configuration for the Autodoc project. It allows users to create, update, and save their configuration file, which stores information about their access to different Language Learning Models (LLMs) such as GPT-3, GPT-4, and GPT-4 32K.\n\nThe `makeConfigTemplate` function creates a default configuration object with either the provided `config` parameter or GPT-3 as the default LLM. This function is useful for generating a new configuration object when needed.\n\nThe main function, `user`, is an asynchronous function that takes an optional `config` parameter. It first checks if a user configuration file already exists at the `userConfigFilePath`. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf the user configuration file does not exist, the code attempts to create the necessary directories for the file. If there's an error during this process, it logs the error and exits with a non-zero status code.\n\nNext, the user is prompted to select which LLMs they have access to. The available options are GPT-3.5 Turbo, GPT-3.5 Turbo with GPT-4 8K (Early Access), and GPT-3.5 Turbo with GPT-4 8K and GPT-4 32K (Early Access). The user's selection is then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration object is written to the user configuration file in JSON format. A success message is displayed to the user, indicating that the configuration has been saved and they can start querying using the `doc q` command.\n\nThis code is essential for the Autodoc project as it allows users to manage their access to different LLMs and store their preferences in a configuration file. This configuration file can then be used by other parts of the project to determine which LLMs the user has access to and tailor the querying process accordingly.\n\nExample usage:\n\n```javascript\nimport { user } from './path/to/this/file';\n\n// Create a new user configuration with default settings\nawait user();\n\n// Update the user configuration with a custom config object\nawait user({ llms: [LLMModels.GPT3, LLMModels.GPT4] });\n```\n\nIn summary, the `index.ts` file in the `user` folder is a crucial part of the Autodoc project, allowing users to manage their LLM access and preferences. This configuration is then used by other parts of the project to provide a tailored experience based on the user's access to different LLMs.","metadata":{"source":".autodoc\\docs\\markdown\\src\\cli\\commands\\user\\summary.md"}}],["16",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src\\cli\\spinner.ts)\n\nThis code is responsible for managing a spinner, which is a visual element that indicates a process is running in the background. The spinner is created using the `ora` library, which provides a simple and customizable way to create spinners for command-line interfaces.\n\nThe code starts by importing the `ora` library and creating a singleton spinner instance with the 'dots' style. This ensures that there will only be one spinner active at any given time.\n\nThere are several functions exported by this module to interact with the spinner:\n\n1. `updateSpinnerText(message: string)`: This function updates the spinner's text with the provided message. If the spinner is already spinning, it simply updates the text; otherwise, it starts the spinner with the new message.\n\n Example usage:\n ```javascript\n updateSpinnerText('Loading data...');\n ```\n\n2. `stopSpinner()`: This function stops the spinner if it is currently spinning.\n\n Example usage:\n ```javascript\n stopSpinner();\n ```\n\n3. `spinnerError(message?: string)`: This function stops the spinner and marks it as failed with an optional error message. It only takes effect if the spinner is currently spinning.\n\n Example usage:\n ```javascript\n spinnerError('Failed to load data');\n ```\n\n4. `spinnerSuccess(message?: string)`: This function stops the spinner and marks it as successful with an optional success message. It only takes effect if the spinner is currently spinning.\n\n Example usage:\n ```javascript\n spinnerSuccess('Data loaded successfully');\n ```\n\n5. `spinnerInfo(message: string)`: This function displays an informational message without affecting the spinner's state.\n\n Example usage:\n ```javascript\n spinnerInfo('Connecting to server...');\n ```\n\nIn the larger project, this module can be used to provide visual feedback to users when a background process is running, such as loading data, connecting to a server, or performing a complex calculation. By using the exported functions, developers can easily update the spinner's text, stop it, or change its state to indicate success, failure, or display informational messages.\n## Questions: \n 1. **What is the purpose of the `ora` package in this code?**\n\n The `ora` package is used to create a spinner in the command line interface, providing a visual indication of a running process. In this code, it is used to create a singleton spinner with the 'dots' style.\n\n2. **How does the `updateSpinnerText` function work?**\n\n The `updateSpinnerText` function takes a message as an input and updates the spinner's text with the given message. If the spinner is already spinning, it updates the text directly; otherwise, it starts the spinner with the new message.\n\n3. **What are the differences between `spinnerError`, `spinnerSuccess`, and `spinnerInfo` functions?**\n\n These functions are used to update the spinner's state and message based on the outcome of a process. `spinnerError` is called when there is an error, and it stops the spinner with a failure message. `spinnerSuccess` is called when the process is successful, and it stops the spinner with a success message. `spinnerInfo` is used to display an informational message without stopping the spinner.","metadata":{"source":".autodoc\\docs\\markdown\\src\\cli\\spinner.md"}}],["17",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli)\n\nThe code in the `spinner.ts` file, located in the `.autodoc\\docs\\json\\src\\cli` folder, is responsible for managing a spinner, a visual element that indicates a background process is running. The spinner is created using the `ora` library, which provides a simple and customizable way to create spinners for command-line interfaces.\n\nThe module exports several functions to interact with the spinner:\n\n1. `updateSpinnerText(message: string)`: Updates the spinner's text with the provided message. If the spinner is already spinning, it simply updates the text; otherwise, it starts the spinner with the new message.\n\n Example usage:\n ```javascript\n updateSpinnerText('Loading data...');\n ```\n\n2. `stopSpinner()`: Stops the spinner if it is currently spinning.\n\n Example usage:\n ```javascript\n stopSpinner();\n ```\n\n3. `spinnerError(message?: string)`: Stops the spinner and marks it as failed with an optional error message. It only takes effect if the spinner is currently spinning.\n\n Example usage:\n ```javascript\n spinnerError('Failed to load data');\n ```\n\n4. `spinnerSuccess(message?: string)`: Stops the spinner and marks it as successful with an optional success message. It only takes effect if the spinner is currently spinning.\n\n Example usage:\n ```javascript\n spinnerSuccess('Data loaded successfully');\n ```\n\n5. `spinnerInfo(message: string)`: Displays an informational message without affecting the spinner's state.\n\n Example usage:\n ```javascript\n spinnerInfo('Connecting to server...');\n ```\n\nIn the larger project, this module can be used to provide visual feedback to users when a background process is running, such as loading data, connecting to a server, or performing a complex calculation. By using the exported functions, developers can easily update the spinner's text, stop it, or change its state to indicate success, failure, or display informational messages.","metadata":{"source":".autodoc\\docs\\markdown\\src\\cli\\summary.md"}}],["18",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src\\cli\\utils\\APIRateLimit.ts)\n\nThe `APIRateLimit` class in this code snippet is designed to manage and limit the number of concurrent API calls made by the application. This is useful in situations where the API being called has a rate limit or when the application needs to prevent overwhelming the server with too many requests at once.\n\nThe class constructor takes an optional parameter `maxConcurrentCalls`, which defaults to 50, to set the maximum number of concurrent API calls allowed. It maintains a queue of API calls and keeps track of the number of calls in progress.\n\nThe main method of this class is `callApi(apiFunction: () => Promise): Promise`. It takes a function `apiFunction` that returns a promise and wraps it in a new promise. The purpose of this wrapping is to control the execution of the API calls and ensure that they do not exceed the specified rate limit.\n\nWhen `callApi` is called, the provided `apiFunction` is added to the queue and the `dequeueAndExecute` method is triggered if there are available slots for concurrent calls. The `dequeueAndExecute` method checks if there are any API calls in the queue and if the number of in-progress calls is below the maximum limit. If both conditions are met, it dequeues the next API call and executes it.\n\nThe `executeCall` function inside `callApi` is responsible for actually calling the API function, resolving or rejecting the promise based on the result, and updating the number of in-progress calls. Once an API call is completed, the `dequeueAndExecute` method is called again to process any remaining calls in the queue.\n\nHere's an example of how this class can be used in the larger project:\n\n```javascript\nconst apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls\n\nasync function fetchSomeData(id) {\n // Call the API using the rate limiter\n const result = await apiRateLimiter.callApi(() => fetch(`https://api.example.com/data/${id}`));\n return result;\n}\n```\n\nIn this example, the `APIRateLimit` class is used to limit the number of concurrent calls to the `fetch` function, ensuring that no more than 10 calls are made at once.\n## Questions: \n 1. **What is the purpose of the `APIRateLimit` class?**\n\n The `APIRateLimit` class is designed to manage and limit the number of concurrent API calls to a specified maximum, preventing the application from overwhelming the API with too many requests at once.\n\n2. **How does the `callApi` method work and what is its return type?**\n\n The `callApi` method takes an `apiFunction` as an argument, which is a function that returns a Promise. It adds the API call to a queue and executes it when there are available slots for concurrent calls. The method returns a Promise of type `T`, where `T` is the expected return type of the `apiFunction`.\n\n3. **How can the maximum number of concurrent calls be configured?**\n\n The maximum number of concurrent calls can be configured by passing a value to the `maxConcurrentCalls` parameter in the constructor of the `APIRateLimit` class. If no value is provided, the default value is set to 50.","metadata":{"source":".autodoc\\docs\\markdown\\src\\cli\\utils\\APIRateLimit.md"}}],["19",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src\\cli\\utils\\FileUtil.ts)\n\nThis code provides utility functions for handling file and folder paths in the autodoc project. The main purpose of these functions is to generate file names and GitHub URLs for documentation files.\n\n1. `getFileName(input, delimiter, extension)`: This function takes an input string, an optional delimiter (default is '.'), and an optional extension (default is '.md'). It returns a new string with the given extension. If the delimiter is found in the input string, the function removes the part of the string after the last occurrence of the delimiter and appends the extension. If the delimiter is not found, the function simply appends the extension to the input string. This function can be used to generate file names for documentation files with the desired extension.\n\n Example usage:\n\n ```\n getFileName('example.txt'); // returns 'example.md'\n getFileName('example', '_', '.html'); // returns 'example.html'\n ```\n\n2. `githubFileUrl(githubRoot, inputRoot, filePath, linkHosted)`: This function generates a GitHub URL for a file. It takes the GitHub repository root URL, the input root folder path, the file path, and a boolean flag indicating whether the URL should be for the hosted version of the file or the source code. It returns a string with the generated URL.\n\n Example usage:\n\n ```\n githubFileUrl('https://github.com/user/repo', '/input', '/input/example.md', true);\n // returns 'https://github.com/user/repo/example.md'\n ```\n\n3. `githubFolderUrl(githubRoot, inputRoot, folderPath, linkHosted)`: This function is similar to `githubFileUrl`, but it generates a GitHub URL for a folder instead of a file. It takes the same arguments as `githubFileUrl` and returns a string with the generated URL.\n\n Example usage:\n\n ```\n githubFolderUrl('https://github.com/user/repo', '/input', '/input/folder', true);\n // returns 'https://github.com/user/repo/folder'\n ```\n\nThese utility functions can be used throughout the autodoc project to generate file names and GitHub URLs for documentation files and folders, ensuring consistent naming and URL generation across the project.\n## Questions: \n 1. **What is the purpose of the `getFileName` function?**\n\n The `getFileName` function takes an input string, an optional delimiter, and an optional extension, and returns a new string with the given extension. If the delimiter is not found in the input string, the extension is simply appended to the input string. If the delimiter is found, the input string is sliced up to the last delimiter index and the extension is appended.\n\n2. **What are the differences between the `githubFileUrl` and `githubFolderUrl` functions?**\n\n Both functions take the same parameters: `githubRoot`, `inputRoot`, a path (either `filePath` or `folderPath`), and a `linkHosted` boolean. The main difference is in the returned URL: `githubFileUrl` returns a URL pointing to a file in the GitHub repository, while `githubFolderUrl` returns a URL pointing to a folder in the GitHub repository. The URL structure differs slightly, with `/blob/master/` for files and `/tree/master/` for folders.\n\n3. **What is the purpose of the `linkHosted` parameter in the `githubFileUrl` and `githubFolderUrl` functions?**\n\n The `linkHosted` parameter is a boolean that determines whether the returned URL should point to the hosted version of the file or folder on GitHub Pages (if `true`) or to the file or folder within the GitHub repository itself (if `false`). Depending on the value of `linkHosted`, the functions will return different URL structures.","metadata":{"source":".autodoc\\docs\\markdown\\src\\cli\\utils\\FileUtil.md"}}],["20",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src\\cli\\utils\\LLMUtil.ts)\n\nThis code defines and manages different language models (LLMs) and their associated costs for a project that utilizes OpenAI's GPT models. It imports the `OpenAIChat` class from the `langchain/llms` module and the `LLMModelDetails` and `LLMModels` types from the `../../types.js` file.\n\nThe `models` object contains three LLMs: GPT3, GPT4, and GPT432k. Each model has its own properties, such as `name`, `inputCostPer1KTokens`, `outputCostPer1KTokens`, `maxLength`, and an instance of the `OpenAIChat` class with the respective model name and API key. Additionally, each model has counters for input tokens, output tokens, succeeded, failed, and total files processed.\n\nThe `printModelDetails` function takes an array of `LLMModelDetails` and prints a summary table to the console. It calculates the total cost for each model based on the input and output tokens and their respective costs per 1,000 tokens. It also calculates the total file count, succeeded, failed, tokens, and cost across all models.\n\nThe `totalIndexCostEstimate` function calculates the total cost of indexing all models in the input array. It uses the same cost calculation as in `printModelDetails` but returns the total cost as a number.\n\nThese functions can be used in the larger project to manage and analyze the usage and costs of different LLMs. For example, the `printModelDetails` function can be called to display a summary of the models' usage and costs:\n\n```javascript\nimport { models, printModelDetails } from './path/to/this/file';\n\n// Process files with models...\n// Update models' properties...\n\nprintModelDetails(Object.values(models));\n```\n\nAnd the `totalIndexCostEstimate` function can be used to estimate the total cost of indexing all models:\n\n```javascript\nimport { models, totalIndexCostEstimate } from './path/to/this/file';\n\n// Process files with models...\n// Update models' properties...\n\nconst totalCost = totalIndexCostEstimate(Object.values(models));\nconsole.log(`Total cost: ${totalCost}`);\n```\n## Questions: \n 1. **Question:** What is the purpose of the `models` object and how are the different GPT models being used?\n **Answer:** The `models` object is a record that maps different GPT models (GPT3, GPT4, and GPT432k) to their respective details, such as cost per tokens, maximum length, and an instance of `OpenAIChat` with the corresponding model configuration.\n\n2. **Question:** How does the `printModelDetails` function work and what information does it display?\n **Answer:** The `printModelDetails` function takes an array of `LLMModelDetails` as input, processes the information for each model, and then prints a summary table to the console. The table includes the model name, file count, succeeded and failed counts, total tokens, and cost.\n\n3. **Question:** What is the purpose of the `totalIndexCostEstimate` function and how is it calculating the total cost?\n **Answer:** The `totalIndexCostEstimate` function calculates the total cost of processing the given models by iterating through the input `models` array and summing up the costs based on the input and output tokens and their respective costs per 1K tokens.","metadata":{"source":".autodoc\\docs\\markdown\\src\\cli\\utils\\LLMUtil.md"}}],["21",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\utils)\n\nThe `.autodoc\\docs\\json\\src\\cli\\utils` folder contains utility functions and classes that assist in managing API rate limits, handling file and folder paths, managing language models, traversing file systems, and controlling asynchronous operations. These utilities can be used throughout the autodoc project to ensure consistent behavior and improve code organization.\n\n`APIRateLimit.ts` provides the `APIRateLimit` class, which manages and limits the number of concurrent API calls made by the application. This is useful when working with rate-limited APIs or preventing server overload. Example usage:\n\n```javascript\nconst apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls\nasync function fetchSomeData(id) {\n const result = await apiRateLimiter.callApi(() => fetch(`https://api.example.com/data/${id}`));\n return result;\n}\n```\n\n`FileUtil.ts` offers utility functions for generating file names and GitHub URLs for documentation files. These functions ensure consistent naming and URL generation across the project. Example usage:\n\n```javascript\ngetFileName('example.txt'); // returns 'example.md'\ngithubFileUrl('https://github.com/user/repo', '/input', '/input/example.md', true); // returns 'https://github.com/user/repo/example.md'\n```\n\n`LLMUtil.ts` defines and manages different language models (LLMs) and their associated costs for a project utilizing OpenAI's GPT models. Functions like `printModelDetails` and `totalIndexCostEstimate` can be used to manage and analyze the usage and costs of different LLMs. Example usage:\n\n```javascript\nimport { models, printModelDetails } from './path/to/this/file';\nprintModelDetails(Object.values(models));\nconst totalCost = totalIndexCostEstimate(Object.values(models));\nconsole.log(`Total cost: ${totalCost}`);\n```\n\n`traverseFileSystem.ts` contains the `traverseFileSystem` function, which recursively traverses a given file system, processing files and folders based on provided parameters. This is useful for generating documentation or performing tasks that require processing files and folders in a directory structure. Example usage:\n\n```javascript\nawait traverseFileSystem({\n inputPath: './src',\n projectName: 'myProject',\n processFile: (params) => { /* Process file logic */ },\n processFolder: (params) => { /* Process folder logic */ },\n ignore: ['node_modules/**', '.git/**'],\n});\n```\n\n`WaitUtil.ts` provides two utility functions, `wait` and `forTrue`, which help manage asynchronous operations by introducing delays and waiting for specific conditions to be met. These functions can be used to control the flow of asynchronous code execution. Example usage:\n\n```javascript\nasync function delayedEcho() {\n console.log(\"Start\");\n await wait(1000, \"Hello\");\n console.log(\"End\");\n}\n\nasync function waitForCondition() {\n console.log(\"Waiting for condition...\");\n await forTrue(() => condition);\n console.log(\"Condition met!\");\n}\n```\n\nIn summary, the utilities in this folder enhance the autodoc project by providing consistent behavior, improving code organization, and managing various aspects of the project, such as API rate limits, file and folder paths, language models, file system traversal, and asynchronous operations.","metadata":{"source":".autodoc\\docs\\markdown\\src\\cli\\utils\\summary.md"}}],["22",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src\\cli\\utils\\traverseFileSystem.ts)\n\nThe `traverseFileSystem` function in this code is an asynchronous function that recursively traverses a given file system, processing files and folders based on the provided parameters. It is designed to be used in the larger project for generating documentation or performing other tasks that require processing files and folders in a directory structure.\n\nThe function takes an object of type `TraverseFileSystemParams` as its input, which contains various properties to control the traversal and processing behavior. These properties include:\n\n- `inputPath`: The root path to start the traversal from.\n- `projectName`: The name of the project being processed.\n- `processFile`: An optional callback function to process a file.\n- `processFolder`: An optional callback function to process a folder.\n- `ignore`: An array of patterns to ignore during traversal.\n- `filePrompt`, `folderPrompt`: Optional prompts for user interaction.\n- `contentType`, `targetAudience`, `linkHosted`: Additional metadata for processing.\n\nThe function first checks if the provided `inputPath` exists using `fs.access`. If the path does not exist, it logs an error message and returns. It then defines a helper function `shouldIgnore` that checks if a given file or folder should be ignored based on the `ignore` patterns.\n\nThe main logic of the function is implemented in the `dfs` (depth-first search) function, which is called recursively to traverse the file system. It reads the contents of the current directory using `fs.readdir`, filters out ignored items, and processes the remaining items.\n\nFor each item, if it is a directory, the `dfs` function is called recursively, and the `processFolder` callback is invoked if provided. If it is a file and its content is text (checked using `isText`), the `processFile` callback is invoked if provided.\n\nThe traversal is performed using `Promise.all` to process items concurrently, improving performance. If an error occurs during traversal, it is logged and rethrown.\n\nHere's an example of how this function might be used in the larger project:\n\n```javascript\nawait traverseFileSystem({\n inputPath: './src',\n projectName: 'myProject',\n processFile: (params) => {\n // Process file logic here\n },\n processFolder: (params) => {\n // Process folder logic here\n },\n ignore: ['node_modules/**', '.git/**'],\n});\n```\n## Questions: \n 1. **What is the purpose of the `traverseFileSystem` function?**\n\n The `traverseFileSystem` function is an asynchronous function that traverses a given file system, processes folders and files based on the provided parameters, and ignores files and folders based on the given ignore patterns.\n\n2. **How does the `shouldIgnore` function work?**\n\n The `shouldIgnore` function takes a file name as input and returns a boolean value indicating whether the file should be ignored or not. It checks if the file name matches any of the ignore patterns provided in the `ignore` parameter using the `minimatch` library.\n\n3. **What is the role of the `dfs` function inside `traverseFileSystem`?**\n\n The `dfs` function is an asynchronous function that performs a depth-first search on the file system starting from the given `currentPath`. It processes folders and files based on the provided parameters and recursively calls itself for each subdirectory found.","metadata":{"source":".autodoc\\docs\\markdown\\src\\cli\\utils\\traverseFileSystem.md"}}],["23",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src\\cli\\utils\\WaitUtil.ts)\n\nThe code in this file provides two utility functions, `wait` and `forTrue`, which are designed to help manage asynchronous operations in the larger project. Both functions return a `Promise`, making them suitable for use with `async/await` syntax.\n\n### wait\n\nThe `wait` function takes two arguments: `timeoutMs`, a number representing the desired waiting time in milliseconds, and an optional `value` that defaults to `null`. It returns a `Promise` that resolves with the provided `value` after the specified `timeoutMs` has elapsed. This function can be used to introduce a delay in the execution of asynchronous code.\n\nExample usage:\n\n```javascript\nasync function delayedEcho() {\n console.log(\"Start\");\n await wait(1000, \"Hello\");\n console.log(\"End\");\n}\n\ndelayedEcho(); // Output: Start -> (1 second delay) -> End\n```\n\n### forTrue\n\nThe `forTrue` function takes a single argument, `fn`, which is a function that returns a boolean value. It returns a `Promise` that resolves with `true` when the provided function `fn` returns `true`. The function `fn` is checked every 50 milliseconds, up to a maximum of 200 times (i.e., 10 seconds). If `fn` does not return `true` within this time, the `Promise` is rejected.\n\nThis function can be used to wait for a specific condition to be met before continuing the execution of asynchronous code.\n\nExample usage:\n\n```javascript\nlet condition = false;\n\nsetTimeout(() => {\n condition = true;\n}, 3000);\n\nasync function waitForCondition() {\n console.log(\"Waiting for condition...\");\n await forTrue(() => condition);\n console.log(\"Condition met!\");\n}\n\nwaitForCondition(); // Output: Waiting for condition... -> (3 second delay) -> Condition met!\n```\n\nIn summary, this file provides two utility functions that help manage asynchronous operations by introducing delays and waiting for specific conditions to be met. These functions can be used in the larger project to control the flow of asynchronous code execution.\n## Questions: \n 1. **What is the purpose of the `wait` function?**\n\n The `wait` function is an asynchronous utility function that resolves a promise after a specified timeout in milliseconds, optionally returning a value when the promise is resolved.\n\n2. **How does the `forTrue` function work?**\n\n The `forTrue` function takes a function `fn` as an argument, which should return a boolean value. It checks the result of `fn` every 50 milliseconds and resolves the promise when `fn` returns `true`. If `fn` does not return `true` after 200 attempts, the promise is rejected.\n\n3. **What is the use case for the `forTrue` function?**\n\n The `forTrue` function can be used to wait for a certain condition to be met before proceeding with the execution of the code. This can be useful in situations where you need to wait for an asynchronous operation to complete or a specific state to be reached before continuing.","metadata":{"source":".autodoc\\docs\\markdown\\src\\cli\\utils\\WaitUtil.md"}}],["24",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src\\const.ts)\n\nThe code in this file is responsible for managing the user configuration file for the autodoc project. It imports two Node.js built-in modules, `path` and `os`, which are used to handle file paths and operating system-related utility functions, respectively.\n\nThe `userConfigFileName` constant is defined as `'autodoc.user.json'`, which represents the name of the user configuration file. This file is expected to store user-specific settings for the autodoc project in JSON format.\n\nThe `userConfigFilePath` constant is created using the `path.resolve()` function, which combines the provided arguments into an absolute file path. The `os.homedir()` function is used to get the current user's home directory, and `./.config/autodoc/` is appended to it as the folder where the user configuration file should be stored. Finally, the `userConfigFileName` constant is appended to the path, resulting in the complete file path for the user configuration file.\n\nBy exporting both `userConfigFileName` and `userConfigFilePath`, other parts of the autodoc project can easily access and use these constants to read or write user-specific settings. For example, when the autodoc application starts, it can read the user configuration file from the specified path, and apply the settings accordingly.\n\nHere's a code example of how these constants might be used in another part of the autodoc project:\n\n```javascript\nimport { userConfigFilePath } from './path/to/this/file';\n\n// Read user configuration from the file\nconst userConfig = JSON.parse(fs.readFileSync(userConfigFilePath, 'utf-8'));\n\n// Apply user settings\napplyUserSettings(userConfig);\n```\n\nIn summary, this code is responsible for defining the name and file path of the user configuration file for the autodoc project, allowing other parts of the project to easily access and manage user-specific settings.\n## Questions: \n 1. **What is the purpose of the `userConfigFileName` and `userConfigFilePath` constants?**\n\n The `userConfigFileName` constant defines the name of the user configuration file for the autodoc project, while the `userConfigFilePath` constant defines the absolute path to this file, which is located in the user's home directory under the `.config/autodoc/` folder.\n\n2. **Why are the `node:path` and `node:os` modules being imported?**\n\n The `node:path` module is imported to provide utilities for working with file and directory paths, such as resolving the absolute path to the user configuration file. The `node:os` module is imported to provide operating system-related utility methods, such as getting the user's home directory.\n\n3. **Is this code compatible with different operating systems?**\n\n Yes, this code is compatible with different operating systems. The `os.homedir()` method returns the home directory of the current user, which is platform-specific, and the `path.resolve()` method takes care of handling the correct path separators for the current operating system.","metadata":{"source":".autodoc\\docs\\markdown\\src\\const.md"}}],["25",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src\\index.ts)\n\nThis code is the main entry point for the Autodoc CLI tool, which provides a set of commands to help developers automatically generate documentation for their codebase. The tool uses the `commander` library to define and handle commands, and `inquirer` for interactive prompts.\n\nThe available commands are:\n\n1. `init`: Initializes the repository by creating an `autodoc.config.json` file in the current directory. If the file already exists, it uses the existing configuration.\n ```bash\n autodoc init\n ```\n\n2. `estimate`: Estimates the cost of running the `index` command on the repository. It requires the `autodoc.config.json` file to be present.\n ```bash\n autodoc estimate\n ```\n\n3. `index`: Traverses the codebase, writes documentation using LLM, and creates a locally stored index. Before starting the indexing process, it prompts the user for confirmation. It requires the `autodoc.config.json` file to be present.\n ```bash\n autodoc index\n ```\n\n4. `user`: Sets the Autodoc user configuration. If a user configuration file exists, it uses the existing configuration.\n ```bash\n autodoc user\n ```\n\n5. `q`: Queries an Autodoc index. It requires both the `autodoc.config.json` and user configuration files to be present.\n ```bash\n autodoc q\n ```\n\nThe code also listens for unhandled promise rejections and handles them gracefully by showing an error spinner, stopping the spinner, and exiting with an error code.\n\nIn the larger project, this CLI tool serves as the primary interface for users to interact with Autodoc, allowing them to easily generate and manage documentation for their codebase.\n## Questions: \n 1. **What is the purpose of the Autodoc CLI Tool?**\n\n The Autodoc CLI Tool is designed to help developers automatically generate documentation for their codebase by traversing the code, writing docs via LLM, and creating a locally stored index.\n\n2. **How does the `estimate` command work and what does it return?**\n\n The `estimate` command reads the `autodoc.config.json` file and estimates the cost of running the `index` command on the repository. It provides an estimation of the resources required to generate the documentation.\n\n3. **What is the role of the `user` command and how does it interact with the user configuration file?**\n\n The `user` command is responsible for setting the Autodoc user configuration. It reads the user configuration file (if it exists) and allows the user to update or create a new configuration. This configuration is then used in other commands, such as the `query` command, to interact with the Autodoc index.","metadata":{"source":".autodoc\\docs\\markdown\\src\\index.md"}}],["26",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src\\langchain\\hnswlib.ts)\n\nThe `HNSWLib` class in this code is a specialized vector store that uses the Hierarchical Navigable Small World (HNSW) algorithm for efficient similarity search. It is built on top of the `hnswlib-node` library and extends the `SaveableVectorStore` class. The main purpose of this class is to store and search for documents based on their embeddings, which are high-dimensional vectors representing the documents' content.\n\nThe constructor of the `HNSWLib` class takes an `Embeddings` object and an `HNSWLibArgs` object as arguments. The `Embeddings` object is used to convert documents into their corresponding vector representations, while the `HNSWLibArgs` object contains configuration options for the HNSW index and an optional `InMemoryDocstore` object for storing the documents.\n\nThe `addDocuments` method takes an array of `Document` objects, converts them into embeddings using the `Embeddings` object, and adds them to the HNSW index. The `similaritySearchVectorWithScore` method takes a query vector and a number `k`, and returns the top `k` most similar documents along with their similarity scores.\n\nThe `save` and `load` methods allow for persisting the HNSW index, document store, and configuration options to disk and loading them back into memory. The `fromTexts` and `fromDocuments` static methods provide convenient ways to create an `HNSWLib` instance from an array of texts or documents, respectively.\n\nHere's an example of how to use the `HNSWLib` class:\n\n```javascript\nconst embeddings = new Embeddings(/* ... */);\nconst args = { space: 'cosine' };\nconst hnswLib = new HNSWLib(embeddings, args);\n\n// Add documents to the index\nawait hnswLib.addDocuments(documents);\n\n// Perform a similarity search\nconst queryVector = /* ... */;\nconst k = 10;\nconst results = await hnswLib.similaritySearchVectorWithScore(queryVector, k);\n```\n\nIn the larger project, the `HNSWLib` class can be used to efficiently store and search for documents based on their content similarity, which can be useful for tasks such as document clustering, recommendation systems, or information retrieval.\n## Questions: \n 1. **Question**: What is the purpose of the `HNSWLib` class and how does it relate to the `SaveableVectorStore` class?\n **Answer**: The `HNSWLib` class is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm from the `hnswlib-node` library. It extends the `SaveableVectorStore` class, which provides a base class for vector stores that can be saved and loaded from disk.\n\n2. **Question**: How does the `addDocuments` method work and what is its purpose?\n **Answer**: The `addDocuments` method takes an array of `Document` objects, extracts their `pageContent`, and embeds them using the provided `Embeddings` instance. It then adds the resulting vectors and documents to the HNSW index and the `InMemoryDocstore`, respectively.\n\n3. **Question**: How does the `similaritySearchVectorWithScore` method work and what does it return?\n **Answer**: The `similaritySearchVectorWithScore` method takes a query vector and a number `k` as input, and searches for the `k` most similar vectors in the HNSW index. It returns an array of tuples, where each tuple contains a `Document` object and its corresponding similarity score to the query vector.","metadata":{"source":".autodoc\\docs\\markdown\\src\\langchain\\hnswlib.md"}}],["27",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\langchain)\n\nThe `hnswlib.ts` file in the `.autodoc\\docs\\json\\src\\langchain` folder contains the `HNSWLib` class, which is a specialized vector store utilizing the Hierarchical Navigable Small World (HNSW) algorithm for efficient similarity search. This class is built on top of the `hnswlib-node` library and extends the `SaveableVectorStore` class. Its primary purpose is to store and search for documents based on their embeddings, which are high-dimensional vectors representing the documents' content.\n\nThe `HNSWLib` class constructor takes an `Embeddings` object and an `HNSWLibArgs` object as arguments. The `Embeddings` object is responsible for converting documents into their corresponding vector representations, while the `HNSWLibArgs` object contains configuration options for the HNSW index and an optional `InMemoryDocstore` object for storing the documents.\n\nThe `addDocuments` method accepts an array of `Document` objects, converts them into embeddings using the `Embeddings` object, and adds them to the HNSW index. The `similaritySearchVectorWithScore` method takes a query vector and a number `k`, and returns the top `k` most similar documents along with their similarity scores.\n\nThe `save` and `load` methods enable persisting the HNSW index, document store, and configuration options to disk and loading them back into memory. The `fromTexts` and `fromDocuments` static methods provide convenient ways to create an `HNSWLib` instance from an array of texts or documents, respectively.\n\nIn the larger project, the `HNSWLib` class can be employed to efficiently store and search for documents based on their content similarity, which can be beneficial for tasks such as document clustering, recommendation systems, or information retrieval.\n\nHere's an example of how to use the `HNSWLib` class:\n\n```javascript\nconst embeddings = new Embeddings(/* ... */);\nconst args = { space: 'cosine' };\nconst hnswLib = new HNSWLib(embeddings, args);\n\n// Add documents to the index\nawait hnswLib.addDocuments(documents);\n\n// Perform a similarity search\nconst queryVector = /* ... */;\nconst k = 10;\nconst results = await hnswLib.similaritySearchVectorWithScore(queryVector, k);\n```\n\nThis code snippet demonstrates how to create an `HNSWLib` instance, add documents to the index, and perform a similarity search. The results can then be used for various purposes, such as finding related documents or generating recommendations based on content similarity.","metadata":{"source":".autodoc\\docs\\markdown\\src\\langchain\\summary.md"}}],["28",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src)\n\nThe `.autodoc\\docs\\json\\src` folder contains the core components of the autodoc project, which is designed to automatically generate documentation for a given code repository using OpenAI's language models (LLMs). The folder consists of three main files: `const.ts`, `index.ts`, and `types.ts`, as well as two subfolders: `cli` and `langchain`.\n\n`const.ts` defines the name and file path of the user configuration file for the autodoc project. This file stores user-specific settings in JSON format. Other parts of the project can easily access and use these constants to read or write user-specific settings. For example:\n\n```javascript\nimport { userConfigFilePath } from './path/to/this/file';\n\n// Read user configuration from the file\nconst userConfig = JSON.parse(fs.readFileSync(userConfigFilePath, 'utf-8'));\n\n// Apply user settings\napplyUserSettings(userConfig);\n```\n\n`index.ts` serves as the main entry point for the Autodoc CLI tool, providing a set of commands for developers to generate and manage documentation for their codebase. The available commands include `init`, `estimate`, `index`, `user`, and `q`. The CLI tool uses the `commander` library for command handling and `inquirer` for interactive prompts.\n\n`types.ts` defines the types and interfaces for the autodoc project, such as `AutodocUserConfig`, `AutodocRepoConfig`, `FileSummary`, `FolderSummary`, and more. These types are used to configure and run the autodoc tool, allowing users to generate documentation for their code repositories using OpenAI's LLMs.\n\nThe `cli` subfolder contains the `spinner.ts` file, which manages a spinner for visual feedback during background processes. It exports functions like `updateSpinnerText`, `stopSpinner`, `spinnerError`, `spinnerSuccess`, and `spinnerInfo` for easy interaction with the spinner.\n\nThe `langchain` subfolder contains the `hnswlib.ts` file, which provides the `HNSWLib` class for efficient similarity search using the Hierarchical Navigable Small World (HNSW) algorithm. This class is used to store and search for documents based on their embeddings, which are high-dimensional vectors representing the documents' content. Example usage:\n\n```javascript\nconst embeddings = new Embeddings(/* ... */);\nconst args = { space: 'cosine' };\nconst hnswLib = new HNSWLib(embeddings, args);\n\n// Add documents to the index\nawait hnswLib.addDocuments(documents);\n\n// Perform a similarity search\nconst queryVector = /* ... */;\nconst k = 10;\nconst results = await hnswLib.similaritySearchVectorWithScore(queryVector, k);\n```\n\nIn summary, the code in this folder is responsible for the core functionality of the autodoc project, including user configuration management, CLI tool commands, type definitions, spinner management, and efficient similarity search using the HNSW algorithm.","metadata":{"source":".autodoc\\docs\\markdown\\src\\summary.md"}}],["29",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/src\\types.ts)\n\nThis code defines the types and interfaces for the `autodoc` project, which aims to automatically generate documentation for a given code repository. The project uses OpenAI's language models (LLMs) to process and generate summaries, questions, and other relevant information for files and folders in the repository.\n\nThe `AutodocUserConfig` and `AutodocRepoConfig` types define the configuration options for the user and repository, respectively. These include settings such as the LLM models to use, repository URL, output directory, and content type.\n\n`FileSummary` and `FolderSummary` types represent the generated summaries for files and folders, including their paths, URLs, and checksums. The `ProcessFileParams` and `ProcessFolderParams` types define the parameters required for processing files and folders, such as the file or folder name, path, and content type.\n\n`ProcessFile` and `ProcessFolder` are function types that take the respective parameters and return a promise. These functions are responsible for processing the files and folders, generating summaries, and updating the documentation.\n\n`TraverseFileSystemParams` type defines the parameters for traversing the file system, including the input path, project name, and optional `processFile` and `processFolder` functions. It also includes settings for ignoring certain files or folders and content type preferences.\n\nThe `LLMModels` enum lists the available language models, such as GPT-3.5 Turbo, GPT-4, and GPT-4 32k. The `LLMModelDetails` type provides information about each model, including the cost per 1K tokens, maximum length, and success/failure statistics.\n\nIn the larger project, these types and interfaces would be used to configure and run the `autodoc` tool, allowing users to automatically generate documentation for their code repositories using OpenAI's language models. For example, a user could provide an `AutodocRepoConfig` object to configure the tool, and then use the `TraverseFileSystem` function to process the repository and generate the documentation.\n## Questions: \n 1. **What is the purpose of the `AutodocUserConfig` and `AutodocRepoConfig` types?**\n\n The `AutodocUserConfig` type is used to define the user configuration for the autodoc project, which includes an array of LLMModels. The `AutodocRepoConfig` type is used to define the repository configuration for the autodoc project, which includes various properties such as name, repository URL, root, output, LLMModels, and more.\n\n2. **What are the different LLMModels available in the `LLMModels` enum?**\n\n The `LLMModels` enum lists the available language models for the autodoc project. Currently, there are three models: GPT3 (gpt-3.5-turbo), GPT4 (gpt-4), and GPT432k (gpt-4-32k).\n\n3. **What is the purpose of the `ProcessFile` and `ProcessFolder` types?**\n\n The `ProcessFile` type is a function type that takes a `ProcessFileParams` object as input and returns a Promise. It is used to process a single file in the autodoc project. The `ProcessFolder` type is a function type that takes a `ProcessFolderParams` object as input and returns a Promise. It is used to process a folder in the autodoc project.","metadata":{"source":".autodoc\\docs\\markdown\\src\\types.md"}}],["30",{"pageContent":"[View code on GitHub](https://github.com/context-labs/autodoc/tsconfig.json)\n\nThe code provided is a configuration file for the TypeScript compiler in a project. It specifies various options that control how the TypeScript compiler should process the source code and generate the output JavaScript files. This configuration file is typically named `tsconfig.json` and is placed at the root of a TypeScript project.\n\nThe `compilerOptions` object contains several key-value pairs that define the behavior of the TypeScript compiler:\n\n- `rootDir`: Specifies the root directory of the source files. In this case, it is set to \"src\", meaning that the source files are located in the \"src\" folder.\n- `outDir`: Specifies the output directory for the compiled JavaScript files. In this case, it is set to \"dist\", meaning that the compiled files will be placed in the \"dist\" folder.\n- `strict`: Enables strict type checking, which helps catch potential issues in the code.\n- `target`: Specifies the ECMAScript target version for the output JavaScript files. In this case, it is set to \"es2020\", meaning that the output files will be compatible with ECMAScript 2020 features.\n- `module`: Specifies the module system to be used. In this case, it is set to \"ES2020\", meaning that the output files will use the ECMAScript 2020 module system.\n- `sourceMap`: Generates source map files, which help in debugging the compiled code by mapping it back to the original TypeScript source files.\n- `esModuleInterop`: Enables compatibility with ECMAScript modules for importing CommonJS modules.\n- `moduleResolution`: Specifies the module resolution strategy. In this case, it is set to \"node\", meaning that the Node.js module resolution algorithm will be used.\n- `allowSyntheticDefaultImports`: Allows default imports from modules with no default export.\n- `declaration`: Generates TypeScript declaration files (`.d.ts`) alongside the compiled JavaScript files, which can be useful for other projects that depend on this one.\n- `skipLibCheck`: Skips type checking of declaration files, which can speed up the compilation process.\n\nOverall, this configuration file helps ensure that the TypeScript compiler processes the source code according to the specified options, resulting in compiled JavaScript files that are compatible with the desired ECMAScript version and module system, while also providing useful features like source maps and strict type checking.\n## Questions: \n 1. **What is the purpose of the `rootDir` and `outDir` options in the configuration?**\n\n The `rootDir` option specifies the root directory of the input files, while the `outDir` option specifies the output directory for the compiled files.\n\n2. **What does the `strict` option do in the configuration?**\n\n The `strict` option enables a wide range of type checking behavior that results in stronger guarantees of program correctness.\n\n3. **What is the significance of the `target` and `module` options in the configuration?**\n\n The `target` option specifies the ECMAScript target version for the output code, and the `module` option specifies the module system used in the output code. In this case, both are set to \"es2020\", which means the output code will be compatible with ECMAScript 2020 features and module system.","metadata":{"source":".autodoc\\docs\\markdown\\tsconfig.md"}}]] \ No newline at end of file diff --git a/.autodoc/docs/data/hnswlib.index b/.autodoc/docs/data/hnswlib.index index 617b3e4f44be7207a770b40c036969d72aca47dd..e0b61c6ce5632dab52232394ab774ff164a418a3 100644 GIT binary patch literal 195092 zcmbrmc{r8P*T-ElMIzt!Oga3S4|9`%RoBrpDp#OgV zpWip6?|O7G_@AF!{m=LR{`_xK#~A~f-*Yeie_b!`qehoTbWx{^23<7iqD2>Nx-_PX z4qbHV(u6MJ@%8D_lrF|}`M-W?UCChH1CpRctFNkVs<%wz;Q(y*v5V^8^&YU{=6+R& zlty^vfeJJASHm&?r`)o)zS7{1PA)V%ix1{EakHDT6EqK`V)BiH@cfJfdckM$kbx=g zTQ(PmJhFwP+%xF7X9O6UW`liFI_lg0a*e*SmbZh(SkYG#oyTNgPUCO*anx{hov5LN zcFpJgUp0V@dU@bqFhf2*?y{_(&=$`*uE1tjHp-@jDg0(oD(s%HhfVwN33sjf#QiGA zfrY6y1YBq$-DtH1f2r+Z-BfewxMi+CEtuW1dB>WDHG$%|{%l}{7aUyp0?%mbD@~22 zVp;wGmXa}njlQJKJpXQhkgF{*bV5I7{;?r+atVT+c2#Us$PLV&kq%Xl`k>wW=NS1p z4(G<7VSA4sfY-5uIIR&dn_)b++aHJ>T+RF2So3G@fpG9;GikL(AZ8ieP!(ZQxcTZT zGj(}}@7~@8onK)vRO>iDx+)Ui8$X1LwT)o(9Hr8#6jr)7RkIcpxaX+00 zwsnv)ViX*(wTRI%^M(fPr8!M?aohJ+&^f&qbZl%dy}3Ob<5v~p+}Cpz{}Wfie`y9R zt(pmI)<}|XyQA1<{-0cphCg{_lj~qJ@Cwl5vso{KV5(#yIkwt^v`>8I(-SON*8?t8 zH<$K*9f=1g590|321pO{V_^B8QH=S_L(LBVxLE(g<-Sx$H^p-+rWtzDo#iXBYqRRy zI6EiYQRFWFoH`vp=X~P>{fzi0Epv$;2S;v=23h|K`|&p)edDX-?L!->^n#t;=oqkC zYY;rUG#U=Y%;Q^Ima$bW%<_+YJ%n=14(Pb67PWHsGM_=?*yi?Xtm)D1DBGLrGQ--1sMj&Fpsz@2wst72lnXJu`pe*B8SvqxuW( zec4NOy;}mf1%+`V{~|tSr5`?8^aW@=RhMjaz{#;a2C#v!V?|?lwdg!|owx<>4lm~O zLKE11*@GQ2h~Wd@zT~v$3Jkd|XH+j`JyuWQJ3?Bcq2_+pGjk=2?qe?*oHkTG&YXi) zyS78qi)ldT4}LojaP4Oan4k5WNp+U^PiG{o45>rIwGSBWIdr_%7&f=x1U5l)WYPgt zwmpPjq5g0-`xTFzHI8p)`?z`-Q$*dVpY9DWo|{okDb$PV(w+A2CS(^}B&!*FQncIvwWuxyJdJx&Ly9c%+y7Z|>&C;#k` z%O2Y(c+&P2#wC1~sh=d$E9F?{J}mKJHJaHDflYU;|?PtpHOV3wZ!|Gnv(tG2{*zb1?6xQ5h)Tf;46HX>X$?Ll=0Mcb>a_b%R2>+qlwC@#s zx&9n&YNO$bNd-JPR|n}m%v75UjoJBbrJ~>5^p+(;PLwOQbvXsa!whiEp&~F@Xod+6 zVMu;}r$?R#(p=R3>ICx)Yxtcy&R<&P03Y}osD9+nU6O?_h}u)__%|6g>}?3r)*9yT z+6;epjOU^^`a5OA!vXK{&Fxq6i8c+T!`@9K^`06$V9F}?`J0+VdXJq7RpSqf|{(30$v9BFM^BGo?#t@z^+%2Enzu=s8j_8oAYJxp(ax8n99`32llkHitT zOOSjBQ|vUQ)@dOy&QkiE9iSBh{aHXJ-a8n*5gGkepKH}7wRO(thT z#q6`Ji?0q;lum%qBY$uS_l3S?k)o$^y{|9BO@`6nSJDe6zc_{2PZl8Up(2hcZ1Z5Q zSG$DA_q0KA%v8@*>%mA~qPR7ABKDAH6!yX9cpWbX?8$W=(;1pQjKLn^0cgLYInw>H z=$aiI3LYh`@A^UIt=-%vM%zgNL#N`Aunv6bBty{;{MCRw(p6(1 z|7Vv2OrU+Xxzc&b0qDKU3bOsPawp79W*LP=I8@64e!M-RDt9!6+=bf8%nWpIr_!b)nxoPd7#%i0YU`KZp^|5#?$lLGSg9H7aAZeClqwgr` z)zDNJLiq2ondEoJcBm#*Fd|hevonN z0iU;0Ls2=sV44xTIC+Tb<@hm>v|9rkl`qb5zcf>so@0;K`#LEu8_tj>_EoLZe#KU_ zeZUv%Xp-j~MJxYkJl460ozdKZ z`r*!Z7cet8h1F{OMbbNXSr!P-SMTQ$-u^i2rv(=JH)0;8K)jU+^OJSq#KLU8=*wOh zl-QN8PmSeE`n82gN;=EB7%Lt}H7c{ExDRw17?0HRc)r^Z6im?A61l8D7>Uaip`$xb z9)Y2ydYCbItBPt}+PQxxPPpE$oQLps~rfi0aB3B(fU8(J#c z`sSjiYCD|zxF3i)Wb#A2qF#tk?W54!DFn;P9`mVQCuC7G9j|QztD4zB{?3T?lwn=E zv3=8SfyT$jxT{eQ@H+f~)%QY+02uB={yOFnqcuaZpVS|!h6btVN588WlgM}4O{dz+ z0_qv?%p3?=PuESJOC38T;hG? zL&En}LPN{V8}RVO-IbrlC-83KdQp41URL{nd1uTTCF}nae7+ujy z9`i{bS2%wG?lPQo`2t@VvH{1a9bu7=8=?1jS596C-cH)mzIS^#-JcUnadDhso{sGN zk8~i$fIlUVgl{rY+e1DnIJzQ+GPic!Q6N1;p@rM`<+9;E`n1<&NX&;p z_a|ZVz^=-O*yDIl-4IODT!gnG^*&zIk7so3vg`BlT+~P(tAPsfAd=?ucbTqAiuKUmULX#NXZForEGnig~C6U?d`&UWNuSDv>q_>GcX;v8on-SPP!bU3YhYX z8>c*lo{7Yk>)ePte=(|3gzRtj$Edyl1>9iL0*^f9Cyp_uO-Y2$}slSmp7=yh(?9r=4B+bqhJYDR26rUz~k!|IzNEOpk%&{jzCoAMSH*Gp`wcgt^X7K#f%gG3VM{>@fSg z@K~8V2bXL-hesN|1YhHbdM4wA(eTJ;Vo%~o*5;Nzr`y2ZJeiZ03C@s2Hr+7cEB^TF z4yg~%%9M$r;lBvEQTb>JMB!A{$?AnuNL7 zQ@WC5p}cq=ODvHJ6TWL#pQ zF$<};u~7X}z5YD)>ne@yn9iPEy2B~A%N8^8kho7C;3Fw?zPbC??^Uh)ex0`n-U^g0 zxYO)QoH&(l+`Jlzov2nAsOYfPg2y9O6jrlR2KD<s$V_ebM#9hCvG4}_m{$~25}7Wt_i5+gFoRn(*3+}fR=EKmEeh~1daIprv@ zHeZOfIsK^@-Ei&Ifk=J{bbd%2fYcuf^(m)bs_zTpYovSyZ-y94rS4PeGg!juDU>B= z)Mua*3tbdqbzd0Ys)JPpR(_Vh4rtD^iVd%2~LXwS;Ttol$Uj z$klyl_*Ij#uNU@rauNK^d${*j{E}t9Om99WoPG@FqPB@S1$LU)BTwXA@@*gnL%G#+ zQH!kh%XA=(pgBlS+!W%C-(7ux<_1XotqiQVPOOon+E%4aeyWd47o=f+hn?_2@`EW$ zeI&uNUtg55a`TNatVOc3--&wDmf4%U0e{zBKwrVS=G`d!(`6k8T6yKHRhWGz(gh1JoDLqb3x@dRLZZlAazH^~|5o-pVN} z;N`kEe83P}nBMCz?>@W&R8BL=pQo!*+!Dq4@|40QNcob{yaqFFO~juSO>@;RR?r+| zG8b%dUp~jk>!Ine4K#-dgf7YvW{* zl_}3)sNPO2tQqSj=1qO%ww%^UWDLr#8jA2`HM`DVMuTe~IDNtF~1!&G8*hgM8 zXOvW{u}%?kBA{7W{b-qqzytzDZ)QVv8cs9CB1ObMpoBvYz&BvF7kY zAa0<#8>J#|ke@8S%xT6T{Ed@eGMZa(haWp}tXXrmcB~$Dc+wh$e*D>4z-@iqaNj!< z!GiVkixsZ{XeJ_b7%1EDbLKwyK3rR54o3T?$_%y@y2vw^=_xd;VxQNIRW6?|6>I{V zhjvr!0*<)R+!5HT&X9D(5{Ava0o@Jz2SKmdri{3Vc;+Qr_uUJ+#Mxuru^=AOXDp8$JcBJ=&;dM35$U)o|H~?oot@77 zW&MvWt!LlQ-!?&9!s7b8Ol-v8B_<)|VqM?m(tE@r zakh%;1*tyqVo*5_h@DvPH8fuV;d{h&Dy+?dLiI)X)3}o)xW=(tCl%?ZU}YdLg1d?T z;C)3;pzEP^s|(O?(|k^PhV$F|Lh0oL=r<||b`<3U%}tSJy>Q9p0QXB)L441HjAqcV z(X*1h%go_{uk|>w4lKKQjop}Nqr4dsjl>ur_B!*;<$Aq)G4}&TMfiZ2V~hE%)4^(b zESXb2MXD``&YjL8ikpMr`%VKbP~^%cle^HIcRIhgq=49@0*Hxm!SsKevSB@=);#@# zZ{y?{gx5b!ybfnf4~sb}jy84|d6dpEk<+ZMK0hbxo`W0nnaI?5%zrAbZo*=&dx35L~=idLff2(F)U{myV!`QfNx8j&&7SYxd z?e-2~ZMtrA?O64jrxh0RD!*B{(`pS|Q}2emS9Mc5>&Ei1S2dW~GL%(~*a7MjQgOwy zU>H4TyUJZX45i-kT?scb6jyXXte7wpGAGe4<3vun9_%4|8aqaSJp z#N$FuEyZ?D0}RFHit6M}Ird~KyFD=s+*^!fUj}dFUd9KppzUaEser5PoQ=G!$9(WI z=m!OD?_i6>``pXm0=)m;94soWq@4X3xO8}bwrP+n+OB-VYpW~e4R$M8V)K7Y=VGo( z@3{lCzx*Ay2J0y|T?gaO^lIj1V#t3N&v9EeeiQsUS;Uw5Ph@scA5<&0Oo7WMo#mHt zKIrY31HMgz@Yti%xLc0R*=+cG#k^(`wom4b)n-GjEXcEO15Td+8FHOg1lK;PL@*!iVPaFbIY znr!VMwKCCA_N-}*Q|--^$)8&DAA#DEuhjyyPcVaR2RB000BwF}Su4rLGeb3cRw7So zZ3<7jD(Khy1e70jL(4Dc`RG|zaC(+2*NSWntxnT;k@k z<2vT+b;g$=Ca|&nOWvr@W^Q`eL|V{5#@{95=omGS_EMU3s}m-^Z3DZ$l_;a77yf^x+YHn#OI6uuy3V-K~I_C%Bjn=l(%AKKLZM=uAxfF;wSpmpz7318e z_xPCg=Rta{saXDMCeiv~olZFGZQT^99{8C#N6>S-zWiD)$Tcu;D^U%~dm_fc^o75f z-QH4so!pVHJe&ZQP{e3OLU3o2}Z+@v(*m4 zk9r^(ba}uZ)kX5}8)BK&pN%+r?PzKFf?TBfL7OR~;kx-a?7uCOjoEk#&u#)~+u33) zX!8QMtRG%~$@{L&&#d=>j#aBwhN@t`d59g}_WuTVw>!yn8`qpWWEF`%Z zin?Q^QLfY%>`&B;F$aEo)}2rgS*l367P|g0IusshTga zhVY6IIc84^u5a=c{Kiy^K9{L~cy3N#_N=>u667}&tJl_wV}YJ(@!%BtnbxS8MEAq- zr-wkDb^@&x;;C#!Lv ziB!Y*_upc6uxA(^|GXCdsk`IC7QXURr#NPxITgFQXW^wiS+HNb5mr9xEgev$1NA#} zWw+gCu1aBFCRJegyBoO6{xF|hV<2TY=_?Me%9*8hE$z`{Q%2v-BEK@82mH)uIi-4@bC&XY4fl>{M>-% z(4|d2J2bwj6g41(AOBnn;yhpN2iRa(2!k(ZNQIKW>h;-Y@^#H*q~Wx++fJ>CL`etrg@9jx#sv$LHQcXhW$#U z<2l6}y3EP`zN!>n9c;-+)7jAjkGb%f83E4V@M;{?X`aBRoriJKaj<-(C;d~;=OYJo zgxK?+NfUyxQ%xgSpdQ2atc#%I+=7dp&%yd%9l6efPw-;XW$?Cp%Sg-dnpzQ(jXej5*n&BVV8Y7R{3l{Xh%1dg!ac*d(d&G0__3dy~8lAPXey{fp~Gp zcllMqP!wx7MYkRF@fw759Jn&ck9TQjDedae9H^(@fY)@G+AfVZPH)RP$W4@J-vm4` ze54dL?rH9apFwiy^@sB7HUbJFwRW3!eLtr34d^(Sj;b|#m!3FEHCgVqiY%4pxAMf_Zt|8^5z{a3}u?cWVg zOpk)jNi}%xy`Rx}a?%#1r}@&1WmHUUF0!|)nXJz?72mDqM_Uru{MeHDPsdbmNe zZY9v$*$JxGuEi~Lw+Wr*)Q`&h8W)I3oe$&>#3JkQ*QqS>qLXao)Zw`5=zegCwt!1} zx?!QiE+~&u%O%ZMwMd)*i@JQvy*)M^w|{WwR97hWMf4Hr7v>(^j)Mx1aAFlc?|FaH zvRZ7@zZVb1-4NuhK^i}a(K<_) z!rx4Vu5C}SACXf9!)2c^cnnm>IPOFpBW{CEHJeH2XTZoo$G~v#6evzF$G7)R@#^`J z+<0CW>^|Wvv(~??TE1j2Jh&VR9j;UZ)f%J62Q@Z;_9K$3dH>_#_soRfsg7(bA>P!Z z`)0!8)?rZN{(=!R5Z7%-tsUoJ-s9Qq@7q+;!H4Yjf{_Yowcr@xV=A-kk6=9WIhy5C z=DL*(LN5j5B>Y|lO`g)hJ}qJW{I3bm2i?+Ot{FOAb_!jLuT!=mF%)iWKMV4{hp-s| zXQ9RRKt?qvldj6@lVYHg#t=^X$uF)s0f8R1%<+>kD=C$6Ncm7Wc1d6GE?_ifAh5>+dzQOjJ?3wcQ8F~C=K@G?|G5*+NjMoYTs^%-}3ZG!ib{;KqA3*qW( zM~V6o=sLwU^%kD^y9WZcUaPOgW<7FIE0W$@6hBwqcku}<9UFxngXh7Q0c~Jw>}cp0 zxD3CoVX$@YOn7YYj(_;t4&BW>BvISrVvC`3<1{3H;>7a!sqHh;$uZPZ(@C2SQ%!Aw z9a?wr@B0{VEBFONzxCq6PhaN@$1Kk=K$;?L8k8e`&mVXEq>6MOB9rFv-OER@pZgLp zl`rELcdQVP&+_#$V9v4YxOMLf{M4?D@6bKPN3XpD!sjxKC3f_%#IC-zK1XC$5j(Su*c{ecOntq@PRKKyeU$&Z2Vs|Zdaz2M0`o**%? zlIb`CXYQB;-=n<+6V&TKZ|j{9Gy?=1%HF08fwTpw7r@u8hE0%T1&;&i51Z2JAzM?K zgIngbl<8dHa9A$Sbi9cl8vDR$zX`+~Yp8~f@HrZc19nq zV$^TYiV@r94l1;xn%m63`no8hr{bSh@nOHLl#fl1seBr}!AuheNqwQOG`zE&a^AOt zB5H2tx<<<2`ye-S-K$U?L0;oBMjj0=Da&DznHMt}ItfcPI@Y#PWOetYTSF?1r- zR!^er_J@mHB30Yt$*!5a`?vw@rt1|To|CRzYp;y*D?pz{{<2`plqH9N7#C`vzjqVo zD>6q(@g&ICEdbZZbYj_ks9UPZzOCU%9)-pG3>2zQX8CA4^+^@d<4M$Gvda!HunP)e zk?y5HddCtQ43MkloMxwn9YpF~V)7x_A;%KQ^Y~`Z%N#43gLnJ;oY;gtxNuzLF>KWl zl*SoGIIr4?_gGw6&-wTuhy`!#+~j8mNoAiO(K1j;OQX2PDbx#=2FjnMo3GMjOnd^YKNvt7~hVr=L>-fY% zQ@K5FI>%QV1cxf(SZSSsGP6S3B=2681;mArWZfP|cr3&Rl_Nm;Fa+jgI7%6Y!{G3W zP4(QjI{yMv&f>8dDupr*@6_m_e6ss~>>!7W{*%oP9pMI+MpR#$iBk^I|D(9zfaQyh zs)!BPkH8Qhy@h@Y9N=EkI>AA?r0;^YVOGQyBo2kU+wO7FA<;wp-p)ffWBDc8<3ZR> zDyq*oLKlcjrHV9rI@XUkCTJJ4nQ{lWWxat{`;ie-vPIVMNW6xt^ZO}BcJ2jQC)Ee7 zkF5Rv5YlWNBu{73?wPlx(7q#ou}7s|a|QA}hHhj(Xb zHn9~-PT8W!&DYK5fPGvVs4sj1#OMLf5Y?`9@4s>S^bPCbGAjtdaOQyuk{XwHE%SJ}ePm=h}9 z;-m7Gwa@BV*V@pozCOuAkm_Ec`2#oyqzd1Kko^ZlhQox{N#s8p zgYY|~%&)pqas`gvzYBs{gqA0&oz0!Tv>X?=)^(%#4j27FeTza%U#B!ingsy$vd~jm zDm~Uc#UPGB|x8Idt zH){h|?A>KH@7BivsBS4&St)c5Ks~@jj>?r!G17ZRd05r*=2A|+ATlPuoN-Ad{Fbx@ zCYHH_lW!x1o&%_cV0^NAy>=x0yNBecO<{Mo{NnZ}YPje< zVl2MPA&8Sl)Ylv7GElBS%DR-ZPjHhh7gRLUVv)T$(7X#L$Y#=m3l8XI5(&gmz#jaS zJGb(twF$u+HqqQ}_e0t1&vu*{`j027InylaG5QyKxZU(x%%|GNf?vOIT+l!T!QEv;Z*;e^pA&KUBHZI z-{iXk`iSFYG@nHBKVGNP1gqyB%%$h&H>}Sx|B|lgp)yhgzl+TH`%G)$Uz~ai)4xrC z@9R3anHM!?&H?R}I?LOf=3ffUlHgo#Perink3ISDb$WB<%8eM_)jk~FRkY#X0w%M` z?}}(opTY8ZB}jY0iGdaBF%;ZD+^z_IB^E@}!+U^uTx2u$%tc3a+V4ISn*IKhClbRj z>IdG`=oONIz5-_d6?pN-tq&tEpI#KSimZlfDlC|BEBB!`0hj zmL(NBG!rZbr$;oTJUo}riWrR6#wN*=_534evi|< zLq(Z|Sm-()aQ1@9@A{zN)#XcCpzsVabL`T_9hUh&U`f9{QZ#M> z?|3HyO_MG|*wqh|?Ku}LF6Pcbepjg{gZS9Jy!^Ii|c zsZhKx0#+CHK$#yK|avvYmPdXkKy_-`+V7 zjmBIdPjR7{-x)D8s-IN`daNZj-a&KdxzN>Z25S*!sEE%(h_7b<`PM-E{J;5Jgc04( zNRfcnTkEe$KjJp15i~x4tprKVEEji+}^3SO7@J~?6P-f`1j@)Og!HS{J&IVuZ^k9 zaQJ2RuVZ_>V6DTHZiNt&x1A-tFyxP#wUFAq?t!b%^u>-XC-4t0Cs@Gl*?6M_vE?gNzutc0_xGQJ%F#o9@yo93a(d7H znBTrNT(h*0c6WS%+9O@ipw1fx_)L-XM--~{>Y_K~c8Fnl6BTY<;sx%u&0y>WTlC#!F7;{K zQZYPl41LBpL;mA3_@jH3&-Brj&6=Ait6%zJrsYzwEpP=JqoWWP-ID)%vr1KL*B$*9 z6@!Pofa|sI%Y#ncg?FdSq)(nd_{usX)i~TD7ibTIwb7p;r=bOYeK(7DJ<~y=+k=T@O(7RPx{VJj@|_A8#jagvo_$9QMIh~ z&L})r=cvj#yqUe~eSuz=nZ~aiuv3m^X~Rw1e)!Qh4R+qIV&OMA%zkDeZHiirqQ0sA z^B?A0$@}ArV2r<>vc0B7F7r}X4ISmlwzs93?wvjOA(Uk|RY>;ay? z+o98o>&%^IYWMeh@U+=(JkmzOe)k6{2d~an)$IGo-dq^Ze5yiKYoizQMO*fuUAC$C zJ)}8XV(J5Rpx^W44;->*jG{V>hF?AijZe_)Xzg1oe{43gAF)4KdXqGvJ@{>mks|g{ z)NXHjzueq6{jkrJF_3X?0v~iF8)sTpxSn}4h1F~f<_UwYfNn}>#ka`CRr0+GW1|`? zc@rg`a=sWU8m-~SLYjkX)Dw0y>lZA3(g`QO@4#Ak-^5deez?76FRr_j1Yyo!`4yUx z@)Hg?KcjJd9n(4RQ$0Gvl+t@RRo=^NXSCusZ7)FcgS{cm>WwPtf;*@!55)W*>f&5* z=%`}18MExADmM#x`NV@zdn6k9lVZ#d%H@@xRq*iDJ_Z_RaNy@)nbsE~w|>LfnHyo0 z>27etO0mvZx=2?^a@7TPw7JI*>c_yXVTD}l`T-U53SzAbGeB*pKYQ)p8-%u4$DHDCKipw$b+m!@T5iAR z0+I${Hk4!B#j|kfV4|oq7~dihyI))i#wOdv-XX0MSEf&fkZsRdSih|h-7$Tgj?on!oCg*Ro(9d!!C!((zh{D{7sYuv)&?RRqR0x_yf&)Z3NR@f!H~( zgazNqA`PvBPumXT_A%c0%LgQ?6{XFaj_io%Lq>kVI@tWiE)xu;v!iq+QHPU;`vKKI zGry)-&8SKfL~2J&QxJop~9wnzrTj%+2U4e#ye_ym&|Z?~DXIs14lvRy=zj4FP@kas(`g_ZRzs#kFlukP z#mmi4uqSPvaM!8laiGmXNS>;tc$HTI)ugNDG9B!?d56$?iFBRqx#Y-5H}F^4R!+V_ z`((!(pYXz+D;$u#k(V61gi#gqG0gF{YJJ#n((D|VXLJs1Yi^_OY-_pGC4V$}Zy*iS zT?CawMoKx|T~YWStqmSOxr+%cCavJ=qdGx_;SN>cs$DqzI^e>Fn`m7#n7Cis6f?Y$ z+yzn}1Oe49`}*!BlGX}e;-@WY*@;;mxc=#=>uL;;q+K6G!%h>LY zGl6Q6^gu`2vT~p3DXI%2XOiW&X$WZ+FmmlsMqUNFOO2EsU954L}5pb~R6k85 zI%ae?`KLO}kAkE2K8}qqa4X~ekatZR7VqDH)U!Z8%l%gvN(&pRDZ-~mT#RRfhFIrn z|4f9tZHM8rmo~!ZkUR%wMzj^$p_&)02hn|EnUWF?>%+>ReeqLH8Y%pkk#8xgcG?m# zne^J#7Fri}hOM1l)d0>)jm~--)PdI96I-}!f>;GB6iV=;Zh`tfoiwy_kxvnKh zT3641FH`;^X)CYR+KA+7&|T|beg8z|t7D?BX~l4|rg(1j6SFzR7wddOA2la$NIbMe;3hul4@pJeptFZD

*HCS?#G~#Cd4{PZ*6U)I zTUaunDgRuRC%B53SqE)yt`t5BEf<}Dhs&??WAaJ9W_S~bec4zE)}YrPYELlIZ;(eC zIM+bQ=`&C!^M? z!jtv`>A2)~U^?V&uV9m`FY)_jkBBQ2>>bb;#~&JjF@rs@#(k`6OPVuglh9`AcBh(t zE;p>mWyFr64um(0<7200UXiU<-h%-SMMypjLL(`=@CkGF{Kvl>Z*$5A{F7&Zio6UL z_kGC)>z}00D$$-`Wbq+>%`%>KGbrK{zn9>k<#E_xeKK2P@DNC+xc!r8;`3Ozu`n9c z+eC`B7R*3w8-ynNl5xaGQ$;W=t`|IN`xf70Mu- zG!^=8$RdrI1?$H<0o4a5KF%dxL&_0+ir!W_)*nF6>5_5oKK5%e3%$(3f!3Nk_PNW6 zXIWivA^h5!gUM63V)~+KNFGZ)x|vbW@ji|+d^UN26)P^HsAUiN1FV~NhehrQLBU(e z14kic3fypds9+eXsc0s2QEhocsXcQf|Ire>%>wT3rN^0w=CNr&-UH;x3b7pCeiH)s z%Y3-tvUi~gP!?k#h1ed$!`4$(P}ZtH zPRdGnbzC`6en7ACN!TD$s?P_vEK(>pSjv?{Zpp+Td|LBN01YQx@z{mDv>9Cb=ZxzP zs>4+;SMW&M!VaLdl5lSW@01=$KKK<25AB9KMjAkDPis~o7#)`0(^tg)B$#GE=@lCo z@~g9oxC$+A(R{*qB&XaA7J)xl$k`eY8p+2+*2KFD5}?X0p3%PZ z(kln?@cX{?d^I7i8(eJ%#FAk^EDx6!Z=&^TDZNZthpGEUt9n1^jn4*GB3(!I>7&rh z0OCJpp*Yu!h&-e|#cSs_=aOStdn2)=IGXb{<{-_SkKP>ucugG)5* zGYICcQNF~VR-AlD)IOYC_!h6PJS}D!%4&Oq-17zjf-g9+Brl7J09AE4)t#l_L>1*2 zxyZhQO#94y4n9D=ph;kMdnUv#%fp)4Bbl^vBNzJEY41Maqvgm#hE@lh$4QFwl*-Pcr6pg|9(D=z*)a_Qq z#D3pVUxGBlAoltMg?=yK6Za6gM@4yE?5T%$uA2jSM$12DRS(x zo|LbGgf>e{rx($Cr4vPdWhbAVMcQ*l*-ORe|7BE{*lU=c(0W)E?n!e9bENE#l$9A} z15SD<7#8#Lzj0#|J=X9@g5WAVf5yknj^3PZCB-p1CuLG(q#QjY7X>4@k97pf7_7BL zG55^@z_0)t9{p;g&{P;R@i!-Z;l$63_>|S`TgXY5fcO<|r;ZnF z#Xo#skCA({(ebe*%|@Cj3u+e7yeb5K&Z^9%wZW|=h7#pL>20-Beg0{`r=Mikcc^P^ zMW#3o%3}?rQ7HpKx7&0atabyQ+ZpU>n zqOXBM9D!nU}Kj zJPGy$|3l(Jr00Oz9+#ozmHx{9rcEWW2Q;HW;k`@uOu?N;J#l!4(dhP97qT-Bk|ylK z)S}@MaV?KJG8{?YVB@iLd^>Of3sBt<8cFj(dp7^6Em97Hqgf$rOQ0-fg7tF?F^8eK z5m$v40C8u%hI|^UBQ0T-Aso7v(7=rlq{vM(A{|$cMyKj50gwUCPP5Ma+A`|73&EoiO!rA`8+i!i$mK18I#+8l(_gg2lOZlmYfrPtR9qE+&|p$0f*^ z+-e*Ut3%b#d1C(LmOJS*arAH?E$94+XFV_Mygwbur-(~xdGjtS>X|rw(K8n2I0|Vd z$Z75dg1@J}J5|qzG(VNfYCXk_2Z+x=@B`(l`kXmiHx?^fW(!S$IWa?FeBB$M8KTfG z@E`V%W)&GoGcx5z>`J8h4VIN0#}l!SkoJ~l63_X2h5mtlnZ{Ry;Ov@()mkrb}1XARFCa z8tnUdlV|$0QN-sU#FzNnP5*seLR|O%_ir?{6c6%${4&0Mjy;<34x)+cO8W=m+I4kbOIK^wTn=AkJw1>a42ENxQLD!d?;bEpRxD;f==AD~a)G>OWU;I62 z@o^)5y7`f>C`)B&hNCc}iyFoi?2x}b-Or`K>u~Brdu4P&Eq4o@FMCydRXOz3R+?Wp zpR=Yo3~h$eYjk<;Xn1X!>i7OjsBhE}Zs|0}q_O@`apXKEt{j7V-fQFXYq>aORuwOM zSSi!-C~x1d0S%WsU}xzME6;V5=9^{W&F2Uc_Fm)F?U%7_EuN}e@9gGIUz(v^=W*C( ziU$;g*kMN&!+w_sszzL$i9S0raoXOEI5j!~*kvuYtz{Vx(;W#vZng%EHUX;bPCGF# z)rgKi)y-KRi_4!J!PAdBLfw+9IBQ%C{}n$Lcc1vch8bo+V9!7DHQU`>tm)Vf>Qbs_ zA~y2R;IZ-vx65XCfS+m23lp~R5kC&VuBXL3?eB6JO>@sKt-{$n^_x86@@gFYcauuj z+D$RNLVq(TtQzz7rogf0=Na7>XRHch-PH`(q=XY}bMLqE`bj^~S-FH}*NnN{Ts38V z+D=$(5Q!-(L(n|m1m~IbqO}+%c~vZfl$F{F?Jq>M55!ZIOm^Se8O0jf^>tQ)Hfl%{ zZiIk&`cc+*-a&dF>kyo}sf~Q9lH<$1S&$p@4fBt0209+dJ#Q^7etiM2Sf4?sr#6aC zpWQ4rY8v!g7zG=qyoBhN_B{J|8SC3`EC%*mgPVKLg{7y~U^6XCICJa?0^Gk4tD0Mf=?$6gLW-fD+`~` zfTOKPV3QZ?u~cU)yi=P19fxWv?^?K`{}3HWcZ_7T2M}FQ3Hiqxsc2pCh? zx4SD``Sz0~Pq@!ATL*z%w_-?fzb4iyU)tk{H@~F;t(Ed@N}kNNPsU*4mfkaq;BB*V zv^u3NRYcANeOoiWrBxlX$Ta1bRK0n-)f;itt3zzMS21pk+Xd;G{ox?-`bWzc(DX^> zO7t1}y!lBuw___boRtrY+cdxroi*V}WDEqPU&SkZcID2TwTFB9ZGqYSJ4o*xOu*4$ zAC5a%32ve9neT=yD0?zQrdq+a-sf41v;A4Brxl!iW{%#E>F=wg?L?OX7qQMm-$oNevHe;&KTJ%_iDB3c+p>tkBuK=)L6W55|0 zu9*y34NPGL4pu(3(uToC&YS zN5JhrI@HIbNfScw=!9@=YGRHN8Y%qD*KN3Y-EMYdY&>{1>IzMIv{0r-8{i7O!miN! zgTgEev2mwtJo_vT?Y^U1c^|)Ustx5wRa2-triIIMA7ImWkHB}_3r^1s8a=9c%p)C%G>Dnz zI*aFm6XsKnuBiVXgPux#)LDEK^zppt2 zyI#dIGcA0}qJjDb{{~*+lc#^>_jiv$Na-kPENaSkm!|<~1>SibkIx4^VWL*3=P~hg z1I4!IT@Y({?cQ3_vvQzgmUewP3y)5z{w>+eU_op2be|nb7w9soeJ2!}5EyR68ce^=3%%a+xB`8g6ylwGVWJ~^exu;z70+Z^ zdmdSp1J{g>kY-QfMnA2vgX>Yyn!1bja4_1Aq}PQ{rGxcsUtSUU0)lNmvYNU$7+Ydb z=U9uq;`Jnd1Nuzey>4vPy>0k|xJ77;+K8#JtMrY$#7GU2qN0&Bm5~N=Zi4c|A`kqR32UviHi!9;G6sK@!O-t7KE1*YlKBBxEF%6|%RFk$FE~ z-PiZJ?mzGQ{&ijN>*LdSpYwW-$9TTac^yWWA@CHvm+gjvgPT0N_g@Ha0yMC%{s5j^ z6N@6|Z=Lor!VgZ^f!_{H68+_|G;SQDV{&mWPos6^?%OxO7mYZ{?1mczw3HbGhCTUI}|6*Jv99h<*3lFd>t!!+CeeA&|Z zIL*&YK7QdHWIG%u%=^ovlLwJvAf3M64y`iRqxkQg-4kUI+kQ4VN{8DM@UnR?9(+6w z1;z{R+%xZhcpp$MCSXptdbp*^O#N}&7$|R$UGF*AElR|TO&22JEjN7}0n0a@WxicT z$key!zPDBpz5CgCJy@?kjOoV`kuU@_M;(K#KVOmJtC}2r&MUJzqgBv6*s-CrbXey! zr+S6g{!JL=Sf#q7d`*XLThsZ&rF8GWDN_TR^JbZxH-KZ~0}Y@?@2Y{#8I9LE~J9g`nEaGFZ|MSY{EPBWN`>&D0M zO9QO(TZR6`^1{blV{kP-pK6Gs4!OYm2VJ?Q?=2j0wWYea+!Ux^p;w>YAYv)bo3JPU zT)cK&tu=dPt%{mVETPX@zBa6@i;}^OfocT(1GQO6_uX|`!W8$x(U&F4nXr7ExZHVQlO0|G$qGil{kF-yt~n`A{{f zx)10MZ6F6-^nkF6JHAK@!3yQC+N9X=V) zjH`l(rF%JPGd|~O4RL!0@k_a)(S1I62i``CiF(DLH_kG3L+jP9Ncm=&6S9auq7+&S z5Kdrk$jAH(I|*+~*^14_uz}B0X3$j|X1f}~keZV~Sj~w$gofl46ZP@xEX)`yscV@E zeS5{^=N=qKF-;KoBU3-|X{7-11sj~D~-|1Lma#c5b@ z`i(?2rV0%y;^yPF1TfE7rP@JzuOOAahhK`T!F<7N{EyO=s~&X$Wu(< zw$9+D%*|R~;3G7WPhqI-YOd9<6J#|h=YLZU0&zZ0=rTs$jq#je&dF4pd=MvB8goGjL!28b_VHXl#@HtkAK$-zYr@Kl5KPm5?Yi2mq z#f|z1x;)#&KE0N~wf%Z9-S`^zbXh5#AFfIpUVLLZu3hm!r|vSj@yONcn1_+j9f%iEK`vIoB}3JDrLr zq4~LJF7#iPr?tGN3+-!kHymw7JdxZCckpX}FVVg*r;%_F8#SNCsUH=Q%gH_pys{4B zz4~0_#1)FaTeiYa?S>C}ZPoVA#<31}hrmqd9XQS4Bi_P~BA24C`J5j<5b zX@6z5?r}cVHw#5Cv~hXKy{B~&K1H@$WF*%({l|U0U*NuWV;N~imFf-+>#MjZ^#g;w z*`)KEVA$q=9;DM?Z9q07ZO*3IUc;9b&pCM+JU2KP7F@c=5!-{j;g z;Nhg1q`7Nh_SakZ`CX3kXXGjrcu74A9gGb@=-j6Zwj)n)?W(;m-+X05rtt&`kf_}MxQ@N$F^{GIav zNl#+MTYbsdDGO7tm?NDNBkX{csk=G72k)Zx7XAcl&$m{a+_=fwnH}Og_cT#!&u0h^ zDARdLH=hFRpSGTKx1BO7YOJzSaAOZEYZgOb_%5 znKBXly-gu~;bahAVP4Y+-r~VVcrzi^P2 zjr!4W8@}w=Q)nsX%v#DDewPb>ik^2HV(Q-OB5r82IZkM86k4Xda4d9Ndqf&cv#k~h zOF?+C$lkPn#;?&#_(#%8kXuj)y@Kt9MyHy*gub4B{DMyi>7tX^!8l&To%Wt8##p^u zSoU@#*iYJnB2L6zbzY2m8I#N!@HRbsh2~>|1Is@6gUx1r>^*ifBOk~M`>lX7$0)un z`zW#v52S5n`h1(Z1BCAUx7Ac4?!{i=;V3+EN%I;><>5GGx|ZzMGMIE{A}>F8o%C`S z+!O4N^D~J-8G~hgzQG0$U$% zY$-j6%aeXCUdFyu6=H0o36z&|m~-Phj346*ek~v1ol9_B? z2xsf5mbnEI6;t+_`&q*QnvT$9j1 z6?_%hoAyL)c$v{!IMovq>mv=$#qU!ONqV|fNU>mxkE*z(QML_m9(iUG}HJo#dvnWEP>U^Z1Y`67*1S#n~~lXvjtY{fAa!736GeF>$r$Lup}*n zv~oL@`WEX?>Ba~%q5HzK%)ixDT(zJV2<%!Mtv8gY^C@u@F0D}0984jpmj8Xv-1U&6Kntg5hyob>Q6vRqRXvk>BF zX1?G(;pyD`@Eze8bZ;LYH}@pp3uEA;^*VOys*K{8f&>^oW$h`9>KLB) z@FuM~tM1qf^GETWflEQen0im8_`(#|`;wfJqtN@p*@>n3Rx<{YE{(ufhErM1B7Md6 z%Og%4syrC~TdE#?jf=kOaQ-XY`eDXgS^y{gAbb|vvCP+V+NehCU+@PcUBovZo(*Mh zD@AXs!`?@dhb6yN-KWk2kv^gsPh{Pk;knB*zC7PRKHMtPgU$g-+j7EN)+%(R{50S? z3qG!|I*w>2XV;r7=3tmP2-RZxyf3E3IBb6DB#0RmeL*x8&02SvN1UiH^&eu6YR?q92ar*+KN$rrH$p>E`m<0B7=^WsK^Y(Od+HgGf^@ zz*W&F1izD?Si;XQ-wMu7ZOMP1MDp>lztucWGcHcr28-wF$y26B^M0LLA?XS^+Wi3| zELST|48|k+`>?O2j1$*s&^?~@Nu$*hc+R5TgSo842(HAVFa2QY?rxI(BwLj<0FW+_ z4~*1jG#ixut#zh2&k@>#-H*A%;@cSW#pkkt=9}s`-^Gk{iYoL4#ex~UnvJAMq%Bq3 z#9S($u!;#@7}&Bgkk4j9U(xIgm0dn~yonZ0>VBEiTA^l=C4a`Ah1K83or1 zZ8$M3IiIizqWl@od+RPA{%@F=d&)FZ0O~`qOmI<6u?CeS4tw~Wgjkc z@@OHi_+*cgRoVX|R;8Bg&63dM_gto9AGo&w(vMW5Szem!Pym>$;gu^=xE==~`);lT&^7h+BFe6mCw6#hd)_khBO-nstcLs$QU(y3r3B0%D;ClQ4#as;dv4Y zZ>c$|gTOLX+yf!L#C;L}`}hBz8}a|;c@g5XjQ`zRA>Qx*&)3JJ_4s#ZXL^kcb5Eu$POX+cS#?$N$wGLxHK8>ZhgY+0@isv zym|t+*bIi@4es-zgLC*_p^_Fk)GUJrm4*&Si!$0_*f^$A?nIuTwC z*uzF0Z3qk9=U|_!d9;7_en!Ut=Sv+SdWQlry-JYIK`Q9)0ta%laOMzAh&`SO4VHz% zd!s+n27THS^ul$;wBc2hQ@zlB>1J*9lPYR4OaE!H!(wDi_J~G-7$q< z^0^D|eeBh}M&`KpPbF6Q>Z5+p4cJ5X%^!T03tA>4!NG6>JDGpegli|w*gbMwfLWICb%2w zsRk)Kae2yB=x=yb+0?F#tvF~XuPyDZOe*}NBy0`G?3LE?&Eaow$p1Rtl{ZG@2D7XF=5+1zQ&?yJ1d3m&xFey0^(L|WCp5j&98Pvx3Hj1Fc;>hW_58AV;q0dBg*gEI);3aq zKMDa_2me<372O~7f{fGQbdITbhObqqu9(Q*(U9@^hik^-v^YbIc->I_Rn!#z?YoVo z>OGdeP*=WpWj$Nj;2;)`y2&VSn7pm6Oy?qUgmG6svQg79aNQ6tC!ZiQl#whn>P2s@^*zVBL>$HfZx5zPrmL{CDD)vL}c< z>FX%qOB0pf|B98}v9lU7-L#apSKV7J5NQc+9(xVO8q znC?aR_1PM?J2rcLh=c9H)d66$3X?O|e zEl8Gh%A7=8kg%N2`KzSmJp~OidO_v$AXwKD38eCm;5W7x%$E${Pgojr<$pt;M&}`lT?sEGs z^fHaaHfLw^xGQrZw6iZ9iY>-=wgK|%IWgc@`4EiP+d|5JJyGyWRrgOgp>GRGugi4l zk=X&98WN6+7OK*z$!);m-b;4&3C(A6Eal2JdnKwNoY~2O^{m|vTJAlVX~O_aYjgx# z#FlcZTS?$1@rcNE9Z!xKmW&OSeS+eWAa=^P5Hd2(z^~7{XeRK06>Tp;${}hwwvws7 zsIIs3_46wDTnB5|)NTL}hO?#@ZiA`)NjzeI1ejMQ!0?aCuj4nl{aP;gfDt~Ut=VJ# zc6)Pm$?s>PZ=jjwTqfeOdZ2|IXluqa#_g3pX4T+}%{STAVcuw|6^03uM8Ejc7foJV@tc+1@Bykhxg=;SR%#XS6I!AChH(?we~JxzOC!H(+EZ3wUsi`h z0(&HiyJ~p-roc?kEKMuIL2tFgeRB`$b6ng+M6 zXTY4q`>VU*tM-hk@4s=<2>8fALoHd;22XjKvKr|t)xn^nw*Wz2ep)`zR@~*V;yhI+ik-;P3Q^98#L?izwAOf*C{(FMclanT`ZEJ z{O(#9t-A{h){NoFBk$v*pWRvfh!ZG!;n(rOD0aP#dD}inv81@Yfst9$IrTM|b*y9!!u5fkVnO$j%;3c(6-assTnAJ_;DG(iq53vZ zF4aa6bpMsXTGD*Wm^0UxiDTKoWw9uIYOnU*)DVs2!#Hbo5+*P8;gnC@#ue@zcNUBn zRdd1>Q3w21-Y!o4g!ebpz_6$PvAlNy(A9r8^a$w;T8?S>U(ajYzw;98=#+$Jz0$C6 z@?lQvu8Y&z+qVR!g8QFC3iTbQ_s5hMmN4T>|2mu{Y=JIqCBdygIuUZo>r{28Ynu+# zqGSJKbdC8ZSnBkdWzbAW9Cy(|S=H_nfnPJ%fYF~2w0N`z?%i`!DgS&yRiLcN3#CO< zw}9^bvO0V|`&HuIYB~b_JtHi~u@4!L287%bX6miQgK*)lGTBlgr$keY$J%B>bxM)1*6Z&pp zM+XB6JSU!4Dr>jEi&ObHn&z#f(=n`%8<1AR8rO~Br#+KiZ;2aU)ae?51BA`6>vsn} zrQ0^r9jmFH(m{L90@QF4!=vdrtGBlsO0d#aSmaio( z#J|zAp}eM@M7ibN?&=HPK;Z}Uy1bS&FHIuN?!ZO=eLT9Fa5NvUe`<@QSvmC{EcnG# z(jv@K>l+ZJsebQv;>re27+qikgjq-!0^dp(!mIY2W!Y)T^4^IoX428T(U<8t6~uWR zblTR{2x)~nTo*c_@yC2|&gz`24T1U;W;E>rr$(%%{=2{~HZK*rp09H2qFjF#2`zu@ zL(-1Wd`tmSf7I!imm01DE7*6-hU%WMP9QX1=Eu(Hxy3{s_jeE6oNEgH&|j!O;XB^2^x%))nW>}i zdUz+u<486payR&ywZo{;cC51LMDj2Vg=T`e+H}ov zr*vi5(*>A#W)vo15ROp~&BGI>9m$(rgVb@>a@f!cCir@?RVUgTHx7y8xWGm4LkD=mJ_p(-_b3Ye zEby{=%61U_w&1-9@Z|e^YV>@G)(Rm>7t!MFK~5Mf5vNGq?ZZ)cvOt&&#rIp#xm7BH zv;G^_n6P7#YV2Q*#Bumw(`q!H&P9$PyYCOSKR8jEe(MFlO(Rm$uspa$F_7~o zt`tX4mQ>GMqTG;nxPXM4>JEG=bR>89FNHLRrYvGT*I^e5?5Fk1#2fr*w_hl{LBn4L zGU;j2!;EwQccA&kh=BuPW_KUqP55}r^~&D1Q-bK(u5 z3n6)17EErk0d6-mkwq@4&YAhf#yCSO5&Qp|&GeiuU`bd#dC={>NOd83&m1ZEgpQ}9 z3JqVW<)wU>+mRCn0r4S_J`%V}`ldbUx+bdqep}KC1t5+cXuFMc?{I$R)Jzn)DLZzb zAO5i$w8vj#q#Jqm%3V@}*?k$sR}%OlFTMleS-EvQP4ShkC4HqHG$5Z(`?zdWfix7} zpY<2}8@j2YSBblkxTlVHY;I`*c{ACh!!b@g!6)uLCUiL8twW-t?`6T!EcXmO1L#LN zHd>ef!p~EGia0@ZQZwR^M_}4yKBh;wQNFtH%ZXR`^yiK&;G_vlERJW5>E5o!M!Vrx z;bOeIsyTeSVT9W*J;qCOYdF0(YR~x(It5u`{bt9I<{Je9^MzNPQ|YWyoypIq7BS~j zp#Wp_Fh8IH-1M>%{R6_^dmbC7h8#OgTE7opqj^)Awc1#|kU;xBh1H|EKqZ*XbCk)` zV#qO9wtnXX;k`g$XV0du>ij|Nq%quGeTZWsZR-3o%|2Ll@&|NT%a_H zaL4oRIzlV3&KVnNFBLPSHDSNqfzW(VDkJSL5e}hRJx~%@OS2A%G+;jA4fWv%Nnof) z(oz(jH*EZJu+%r^{ELh310?Iad_I2`2om&?74=TH)6nQZvsU zshc^JJ~$)MtU;_%jvC}EkG)}nLfhQ&mRO|Beye} zNieEwTzmH&i!RvBLYqC}f!^WNYYX|i3O_t^>Mg(Ba5J{^K1sS|H4?rf#SO{-@x2!3 zIQe%*x?XUDe1>ZS)r~@W9C{3nXRZ+%Y@_u-)cs(@$)mv0Zp(1cfmAH6SPnTmwAH7- zrb#sCPzVP>)U#_u1dw;eI3+?fiC664)c8EpCti6ebg@_84+7~qPClOITtzT+`AHD-Q@j`{;=yQ!h8s+4fZ_r)$CQaj z_&U85+8gRD>l7BnM4o488F9nYS}5jyE>1F->Pa~S zkz4AUT&g=L+U}=PUtq+u*R-B5JP0*}X94PQBrX+rilk*YodYu-wiJmsIlW}k7*yYD zf%G!XcXOz}8_Tz?s)1%{jAmyTHBnn8t%O2b8KhXr#owMHX*d-AA*jR=UFV!t2DolP znola^*Jbi3Ky{1N$xft68O9#B6SEV-{vXQi&$p0t315467OxJU%0iNQPs`B^>i&wI7rWQl>58(!ODuXx>_JV#RR9*e1nOZ_eZr2Ty%80Fh{|xgFa&rr?8b?_qfJZ~TDi7i?~p1U$hQ2i|_nUzB%-0*A%euEiPX*xCzH zX1B*<-&gS~u8H{B>nZD<+!uGWp9&Q%(_mm@9o6R?D7W-2pv&-~e5k_}z5o{CTsyi3 z13?glfzWKAgO*^l@6J_KH@2xTLi zAHi2+gD~yYPr3$E;{UxEj`kI8c_X*0u*vb9)WmQT@0hWT@3AapMusEF!z+%oWB z5>_mmj@y3fsJ)Nvgzn4BnEB?-%>2hhzUFOTb|}PDesDD&y_Yw_4@hK3v{{tJJbb>98u)rhz&qGMVCThp- z8}VeNh59|QKl9)z;Ay!NwKB8WdjI9{%lsg_IJ&<4@4yaLGwde2F1^G@479{s2a~yYuO~G(VC~{I{HskkyECQ?ifRja;Ql%IdsPSi zuk%cZ8ekxIY+ZnD%pZAC@ysUNklr>zqD;IqfH7Hd#HFOHRJD>!q%yO^ETO@TY7HsX>R zd$~oM3OIkgjdbqBP1daIA>#jMIJVSQU7Fp6Us&=KlUDWf=(cq_vySYhJ|1zDEoi!h z@7j0}SC%*8wG|9nZN3kA3B!@Kea3g+ZJyumWLI@`ZByRhAWB^5@`>;i*kIMqTaH<12#;a>xys38SSkC<0WfktbJ>+_u4Aq=-yK&6aYSy6T zdR|((7K@rUL#@}3km>^_DE*{OdhdDIz5T4@@K4tKd;pXce*;lN^8=#slgS#$tbY^^ z+h;+=U|n_jrwlCUd4)}`r?A6~g5Y46ix^|GT{^4V9OpNs{nD?b;_vYf_~S55)pd)x zyzwQqET~wFnL*Bjk4EGz% zPws(+5qbD=)L$-o^LNoC%yYS}toxy%-W$_M*87&iBlc|pS~u%v+MB(|eT;t{_v5g4 zqtW$7EahS}_wC>V_X9d2^)QsBr@(x-j&wbYE}EOaT(@ke_-WzeofDDr#Hhw)sz<`Y7%+MK5=#y*rF|!l!>naq zd~diu=A5%pf3G?QNl(gI@_`+=Fl;Bt7n{iy4?h!%>SL4a^|*Cuj%RKD6cqUPc!V3= zoHG+Iy_|+!c`yvRZOSeznII1us$zcQQ$YEIj#?Qku+>_KdK<~xoy^UzDgVVoGmYe% zaozccj+U}M?k9Y)2LF*Ju&4eah@5$ViT6y2ZJ-K%;6wL7&bnG0-K`JI7`zvW7bxGo z(TipRA*HWKpU3cNPH`SP+tYPD(Z$lBdB(Uh_aqoyvggjTFXx}wwGgQ0*yQ5Nto?)Q z5Ha(EG_&PtpyOk9m(_UY`a8la4Vm5p%iaI*MLYAr_OgqdJGqc~gumxoZsbD6!+EHA zwwT>H)dac?ISraUUgO@je)7E@CmGeRyrgC?h#ud=cB0^_EjPM~xN++jy|E&-K#Bj- z7DuEE2EswA?W>R-@)*V*?#~^cI4Z$Ku1tR`Qw>Alh4YvlGL~(Ae-o{$G?4fLE=G-mX&;`lz2R|e zQO!)q*s)((JL;zLps5dk@u`tIclt}9`eW~P8bXuMG~&HXAWmd=A3Edn&_g0W>a+OX zQ2$gP?rmC6ItZC5s~=qMH2IA6pF+JRCDbh;CUC+d}s#5kFaXnR&4#H7xY_mSo9G8=5MAFMsX{f zu24DsDkI!r9j48LwtDCIje}Jn_&Ta%KCrCrNSKSN$68od<;pF7SjahhuVTSUCP&Pi zBa6CwWwr+on9heW-NKPL1jY~R$@gA7!!(yQfi-VSnZQJ$1s3h}5cSBYet7@MP5Jew zm(gU!Rz^KSKG#lq(DXcr{v=!lObb#8$9X`>c(v;FK}MLw#zeWpu7yK!!BImz-@K|0 zH{KsV3nKTqYnrOHiP|c~K_Wg@ZWOnd4-QFVmkMK;`$oFIz9=8J2H(T{LoNb`U{>lz z=j}2TSj;WyPHG{Yy!ehsGjtdr?%LOglWow z)6=PjB7n|Q;GED$e3I|%{PUgKvyruxd}>@3UQ;@-EldA{;n#ae+kVEt0{9NZD@%;2IYPB+NKO}$d!n?KDj^NRTU&F=)3D&{7hFx&1I{@xx1A#`7~ ze#9zv;Y2J@Pm)dx$J;mC(cbk3Fw-&;Kg={xhYg$ry%#OQCmQ>Jjt_))vh!?z@HSn| zZ#w0%G7E`QU!wRv9iRP}-w9sknu3PAJqV1CXv26~_X3K`K>jE88}yqz64JEF1dcOL zODna^LcycQO8MD$J@AC;2k{j_Dytt!+RR1%I>j9i-YS6I!xa#ixw3p6C%wR^R+S0q zi{W|vYWO z)7OU+Y6+wGLe%liJTKuc7rH8EZvZ2$Dho|#QB{JYQuI`>k2#X|)eA_+qHD7*bE;ox zaH_sIFT$A#NII6z6@!Vq!%CF8Jn6T-h9AM#YM_Bk3$=KI$F6 zed?Yv*uxf|2M#BWJT7`4C{B1<)xhEJOh^NSvAI`2BIyiXYyOdurjZpLX?Nj6RGyH;vQB3} z)z#ZbxflKe^mh&c58fP3KOfKHzJ$V!;v>X8wzAL^q`iPNBYvCzlM}|O#kTht@iu<( z+Kw|)@3D@ruAy=HYZSU<=BXUEE^-E(8|aUvhv}Ygio+EBe~RGgXEp0#!HB*{dVw@$ zGJ8M36Ft60G2#!Q_1GYKUcAuZA`Z$t6-BRT+clC&7qixhDz9Dy-{6#azatMQ?Ks;M@A@W9(w2D+qa95bS(DVYXgQgXOOh9d}MDl zCXQb$z27`bDfrt$j%dS?G%lErng#7@M{@ znq=!DVGg#QKTRe7MA+4U^kOiQ-%_R(Hvr-T)DC!A=f|iHc=gV7B>s^}a{#RY-@m1M zyiA1TOsfXTZs4@YF%gRL}Z-<;km41ZR_W+lZ+@ zuOs;(B#p;9CK= z!Z0gOwfem1Dd-;Z3yZq_hxhCrBWWmU)t}u+-Z+0ljtt8#t%uq#jb+l~bw0IV+%7Cg zYmCF!Z{VbnNn2fnUNbs^^4V0>CT_T##D!lV?1IUwn}eunsvFg((IT z)FbNIcW0?b_p_F7pHm)dScJ|L@Cc_pZZzmV)oTh8POH@Ob^77bR9}g3j#J$r9aH2- zu8#1NOBY-a91Y~ZK^->*q=`fFOtzgwolQe=)RL~4=a)&G(3k4wu;(1@$DDM495rAA z5Po4Zv%BzD?-n=jXdt*jBF`@74x|U8u*Yv_MtV^22BfuqE%YnDGiCtv>i-ZQ;y+GW zNq97ca>#{eh`oFlCn-_T{d56{_)^@FJU#dxn=I-C$cKt^r#NrrCO)BfDQX)E9U|6R zPd|>dnl8OxFDA6?ve4dwBZ2felBVH=*TUQI2B*U7Gy~}a{1Y1{30+NI1tWA`NX^WK zvqk|n_+rH;h5SQ)!1L3L{>_>Nt_H&MIxnGn&s%6mMw*+K*lxs`H^cFecM*&6$|cPe zfo{RHugpvXZoRvyN`4fmZb@gG;oR3fRq}~)>?K)@;=d{Qe{9bZxpZ_$kQr< zV^^K8#Tw^AM!F5{^Mgb$v#e8Ox?|X=d zIgi#}v81m(`EE9ulV^o*^Os6gQz)>wWA`RXM2rt3Y~pKltkt6C$>cS1vG9T;m@hty z8;pv z!C=8L5Tn_LTSpeM+aKz4n&q(Wr@d4YpHGbDENW4=1ta^MEXwE?QcfZClwmD)Bi4i`HIpGjC3B0I?pPyK?85fKwh0Qls=I#D^ zjL)4u6(&EOMDvdnpt%sva$hgCJRgm;cA5NVUe@nB!t=9-o2=@%)z5ks`(cy~pQnBxAkln+H||ry?Q)XD z|HE?UO`er!jYw}81GehY=y zElmnRiXn`dI!@pk$FvGsn})obdt>J#kEnlk30&}`87^2AxboqhE{k~=r}yJDvy(_0 zv1c{9;-y-9=>g$BCtibA$B#0?7S#O~3=Mvr!XecUVe6J~!S6`3UyRz^LndshTLXDg z;U|IeE;Ku7-lg1Qcs%xgV~AotBRIQ%bqL&W6bm^r>FMWC$(94is;it@}@8CE7ckX2W4Qt*>CWJmx{A$PM~vNN7>2!DOSl>agyzO zNPR}1<7i)FosR{;6D@oGx%oL(_ShTfcX+JZJn*;>gVOeR7`M&|$K2cicPCxq>o(b` z!KY7iHf|W)8{Z8UuhGU69?$YSpRB+ED=oR3Srh)%MH^Q98qckB%31cCI397)2ONSQ zvR6m9qw;7l&~bRw&OYFOtr@iIMZD}_iT~!FhsC`vGV!gPK4u){$}9}x`WC!gV-dv-zU`uTWjdOfx9>UO++s5RTm*5H|qHGHV+0W4mc&KmFP zE>G)d!*n{VhKru%u(hzI?DMJyrw*#Gp4+=y8C7)vp67RkR_57IX4ybK)xLyx@0SX- z``^I$%+(O@cMsV(#94=CQ_QmBUdE-ICO2)2Nv$IF=eTTP0Um$KMtE=KOT!Tu~6 z!!Pt&EQgduV&76dHC%+xG$?*Nk16b*{4lM89rJ89`xOUN1#_yNF zN2_49{(zCFT{{Rr2S!NiH*ADO(MA}MVI)&bAt!bMTz`-cY2kNqmS!^2@8i_wm+|31 z9o5<85=(ui3!(QC_%Qno)DIK5h~2PvW1u``3`R!(RE>)P0wg($7Dbx5GX@`$=0iZ}}Wi%M#U#SQl>HmB&1%yn=k|?fg{VT~an(yLc?s z0LRT41qO$n;gWHd*z@TS^qBq`-tMnY&$7~2ss8y4CoT2;g!Wi)vOgsKt3tgS72J5+ z;e1**6r_HKy??xMkmXl)b20>O6fN^{+lqZSqF} ztJypMah}xs3e}lP>tb@3ThQkV?K^9f$4>-u+|ZVuSGKVRDYp1Bxd;Oy-Q+Ld$6@Um zPxxLEi2T<keY}l|#yOmjp0))&!4vePgjR4vLyAg#UbQn}TF2eW zT-5`Yco%?(M?2j|@U+2pUiQycqL|}_iFdj4blS`Qs5u-R8-j!rc=${sy!NydR==g| zw)UiAkbXD$(U+IF@xeT{>edTM@3kF#^2kPj8u&P}8Bkw>#rHgx(POjcW9Jt5`N^k9q0jbUcldo&JNgad8< zvJNd`#Q#gfpPd)Sr1L(*33I7mtfBB&FPZ9!)_R|do?f@K0=zn1h9%x!vcMI)Gl>c; zGKXbwi2YRE)YDG6JgMt?NUaWM<{lcVK~YBxO)Ep|qrqTmE6e^zPAY^mjPeERmIh%= zVk`Ana0R=WcM&cP&1Sa`WnkfTd(su1U~D{xwjD=8QdeE@3SEz{bDHMY6qYcmH+WXB zsoe9iKVDp*C#SqNmkE2oaace8p!0F!``$3@+DcA+%(PlD!n?UZ|MsMH;bNzcZ0x*e zKsC=sR7?0b>^2Box!leT^^S)z>OapGz1#C!TVEjM8qOYQtI|12V`%>a;^{os*6sL} zMqZS^Xj<0*B&^4e=H>iZbw?nbf=%ci_`mb};bf~xK$x#c6CcyMRZjKHikDi*!?fDr z#2)>5LFyv>o$AG|yq%0gT0Vsty8F1iU=!42%)>c<>ocl(@pnRJu@x;O*njsrr<#Jv z3>o1SKffjq-jCfU>Ik=C0_^s8Q+rD2)uoxQQ(Tr%UG;G z;}|v@G6diGS7Ys?t)#uo3GbXRwWzJy;ny(;IckSr-}}O)!M4iDLC3Mf{Xsyn6&M08 zeRkriin)w%fp`93pc1Dj?|YW9xx1@5od^87w-y|hxbtNeS1?oSA(H+i?(|nF|J-v; zcl@SH*UxUL0OEalNt+koIP$iXs4+=86h9NXBs?RY*++YNwO}8D(~)!<--eCB(nwqW zkwp`Vl5#v$ybU&**uY8mQTQW^u0d-tAE-anHp>g}rWeE;1 z{R&0iAJ~KmUBPpYwcv8Xyv~fURp6!6QfCfWze(nV&!p8_s?poKp_@gTBJ|p{re`7h zjlMj)ek>C2l0NDOrGDBf)g`p;e+;PCP+HxU5l^w>!!E!z8-18W`>~`;e}Lk{oaSy5 ze2KL7{1*C)1s~%ox=#3E?N5l@TgeU2IYDBp6Qr;ADxU*2>gulHXici`xj?y=H5-QU z&F}1Sm{tzn>GzZO{MS}sJ-5tXEc6N|jVI~{2X=o5Gd|rxz1Nv+m`)cYZk4Q$K43F+ z+d#jAwtQyUY-l#*7Q6nylj_A#Ig{N9uf7UKfw7PEbE&^L7^UYD&#xsc^+4TI|0L4t z^1S6Az*Orw7ucEHI01$R(0#y(foQyP7%#J2!#8%WFLWjT%p6~bEpyd8)R<(2RL5NS zhUqSixnAi7+&x*F7k8Vi2)-E{)LNyUg;p$98nZYI_g25byBX^wszbOiu1u-xdz~(@ zV_r7Aoz_DR$VlX4;~9i3d`CJpk2Nl|R!w`P1NE(x*V-PZnBQkv$$v2BBb;bYctY5HDKvub zal8m!<3@>dVm=L3xWNY^VF^6@qoe*E_LYls7B!a8-JW-wT^~FA>Ht+E=$gHH$`oDDzVK!1Na;j5Ud9N0hjN2hiH#mY6Uq%>> zk!O!F`yCFrcEux{Fd>%-&owrum25JT_V!;{ru5Dm02#KYpt3kfa-fmz?)$Vk1Y)E`jEcO>bl%;lOYYY@HQHMlKXWoQM011OK^cU%Tdn*gtm=7B)($xHPv3hj-< zUR(jw9u{&~j44uH>M*}uuM})KAsR?KGU85lX{a-lxaA{x1x&O=iL`@Cngs+toS!ld zVK@p;MH<0UJ{12}YAD;OW#6(?>i2gMBxV^gTGc^pUG|Tq$l| zIOFDbT+(I=KHIm9(cePM(22sIh`NVQ_5R?t^dr!ep0#O|-W41+$H9=66F~Sh;#Jga zv)v(;c6WEz&lUtF#6Sf_6cq#&0~L|8*S$qC5Cgl#4(!6N z_gc(6Gv6QYeBU3>e9Sl&oU`{@_kGpbYb`ZCz(5zgZXNUYhNA+tZ`3S0jmtU}HNm+I ztzg0vGWth&8e_R-MdX{qE{Yk6WwkkD5@cM7VrIWwE++JXsy{I(Vu)5~|68SB%{oo} zxWR!78a%wLx@ij~+qPC7NGnaa!&}H!X$x|E1GJ|V*ustC zCG9>mSS~8IQ-aSKd4olHg{__OdM=oB;iS@5&8@GCK=+|TX zpmQ$xqzW|g{3r&esCgk|2?{HfRy910EK&ZVuEF1!#^bwkPkN;spEG=PGgWIG)1;7z z`^A_$v0etBJMLS`5q^)LT?63Du^(3Ead`cGz>5UsQ;q@~2%1jVe{O0j4IUg6NK+nE zq_I5<5PYF!{(u!M2Am?*tI)8dY$ zF7%S`T6M|s-ji&L%kPi3^Y>>p&DJMBtC~{0&fqtRpW$cscac%nN$-WVbI4TmQmtZT zoeD9$?!!JxY8P%*c@6lV)U}tge^1aDz`Vb5S!4<;-+E_AlALDjhj&_|a8E4T*ocpv zT`T%NZ3$m_Rc0O9K>-JC8MO{>{40Vc*EbgxtETV-*X2ept{0~I=0|QE*qxKJCb32!$^t5bklSi@<>VIr{Nnmb+@-@*9QGI`ohlH~Hx84e{~M42~@w!Cm$1=JgQl{fs$-&r?2&woWWf?ZdE^Gi#E7 z=cUp8T?zi4L)siMpc_>0!xNSw#m`5o?t>rlXXmbGwSB&4l%ApH1-qC^hgIv@lC!h> zDqm0Oo~h8^n^YF1bdY#{E1Xg;xXOox8pESh;-6c(QijbQ?M*;MskF;m-y+J3@#GU{ zm2Q)H+#gVHJd3>h-FiyzQd-u@eUeols#*p<@#p#*d3o4g={RsQue)|raWHM|Y_WWC zh(pd*la+Ucj;Bs%d|8d-nJi787&~~Ke4H`?}eI)u!DlLVcQ<)AtYgbB~8VY@z zDrST>VAXH$M(pP$mj<)Sz&gcl6ki{@XV z?fSTd?|=Qs@P$T))z^4_-#n(%x=590n#lZAW(Q5V5*VFNSNB-{U0L(zw%d#xKmxOo zHT@HX_D`Z=arP>gQGQwnM+oJ2RA!*+&YGJU++qezKFBYU_T~6}rQNPxb_Sny0_Ty+ zVTH<(&?BsJP27jk$|jMZohk8Dm%3%n;oX)+U0?jemsyuCQkUbqea#QI7f{&C>{{+%6 zBaXt)cnZbQcwZg(z}0%5(BNwc{lf5VoLOoB@}hXoy*$4}j={KJWITIy$E3Xai7}7l zp3`M?^avv}BaAPy=;aJw&pmoXsG6dfT<7KOHe(s{Vafh@k(WLxLznL)u&QrMR5)P_ zCHjz{!Hk*vg6U9CL3RHc+;zS)_fPFlDwjnKFmh3qT|j@7*O3Kl$cHtIH8-o3EF5;- zRbwn}F0V(J1N_VvG3a^;Gp4);H@r|;28B49*Y@wBfz}iQ*$16mQpZ&59@LlmJVE{W z|F%z)`aI(Q>$3=a%m4c8YBtU~i>;(*oGC5qr4G^7l-l~5xN7c>=4ks&(Y5z9_L){#jB0&E8;9p3sxDeYTjoevcIQ;~nefGu@-SKs zZSumf@jb{-zb8{)kHw7t=W36v^4M!B-iE%>d>SV5iB*^6{Mt{s)esl@-Y&hW=5mR~49g|UEZ@nCuKY(ma+Rluzg_V8eHEd1K17Sl+nTSNJ?2!qyymJ^ zt!18~C5_vQN^tn79spiMeoo{_~pBsbi%Q$KETn=?D2jQPujAE`;6^J z?`{tk&(mfT=Q>PJYtQ50LFdJ?F*t|h)i*JJ7S5A7>L$;$DxrH7bu-uO_#%gRUgSxA z%F99VdGztwBY5)gBeb>6TfL1U zoqyyyrX-hZ)Jt12cRd%kjMnq#OJYm6`BXbAPz>?xqI zkEgRYU9GBPEW9?qyrH#Pe@`xZI+XfU93ut|{w_xPtf7<=&nV>Bay%EY$5O0PBZ>M` zW3Jo%+RP)XPu84fHPfx3!|3?etMs^32mWOVKR=a73BNg62DEj}8T%+POfI@rOj>XEkoCipFqfNoTc6$Z?Oa3p z7gm#6-^-wOtnIaT9eXX4}}B;pH9)%)5SRSP81@vy9{0=cX-t7s!XFo9a=% zJ^0+VWKIvxPamiZ&ys7z=Zgzj&(~b<`&Bpox%Fnx>(Hr3cLe8ZC|>_~FFHQiLKQAvX5SMVc;>j}TDE&dzCW`e6MjDusEaGvza6nX!sc(!aabsUQGZ@%x9tz$#T8tq8!(|XZ&$7<$} zvd3hXj{WHB(PXM$CXXmRdk)vmvZs!b9@eV4&rzO#&qVd4)u@MJW~q1aw0mX^`n0k# zeRB^xBnAiQ}oYsNMP2bNo^Sj8=S6z5#-(lQr<~myGS4;GH zy@QA5kEPBHtJC{2Ep^l<-`tf#Q|ry*(N{O|FV7Ejb5~RL8|W-EeB5RCnrEow&@`^u z5TL%VB@u>kJ+i7Q|cj{^rRWzKbzs3Y%5W1ro-Ii*0>Lg+8*S zeph(W-~cs7)b`U~%YrL% zb1`egWKwt#a?DPRGi5)-{=hFgB73aHF3uq$vPwN&aZ&pvBV>i{TXN>Let09CFP2QR zb`Pap34VMxu(rOiLoHJ8i@K&vx0>wp?l>R3QzDX7}hoZ;(x1#LM z53;|Xn?C*QN0MJ$Y1zK0oI1Sev4VE|Ix5=K?#JFILOAGQDYkZeFIxt+;oZ$7bCdL3D4@&~G3&4U*AI;A-mHnnAA zYSE2nrRmDVeOiN;$7qJzdj5hn0^_rvsDG0&A7Zr{^O7jETB@b`lr6l;K1Z50!I+wt7kQmCDyw0ZkTId1e~Zct@YGHOfaNsr;$qjYfHa@laA1Ft+6 zCF{+9%TpT{mcBo38!xBD(4Ca`r21XeJGfk4^Sw`NMn-D;8|2qj&Dn@%GT(>1rcZJ% zk^Qi?`D|HPBd~j-XfBtD?ZaCD`(m}f=ikVaWe>~uuZrlsx>sk^gBaRn1C_WkTziJ6 z=H`FjPW?~)G8$BIA^basXGKvB&sIV{oj{lu0oZhlquwUp6 z0ygX5c`?7KGu>`nnFf&6AzU7|7v4PQKb94m_jJTrJ zTTzacX82{XH`gY`ioCRDXx{o_poWcQ4F+S5jk-eabZ8BaN%uME5SBzQ>1o zX>K#`hT%q@qT!6|)~--T zZNT%!ibp5pb*6grC&^a}i1a`QdYjRPzWJTdW-o56Rj%(~wrF{UvSY5%kZ;)*)Q6tj zEx)OFWK-8W?C|@V=vbqe4xATC2Mp_9*X-@SL@e7pnfn>jH;I zvf@gG3D1k$=;q4O`t)^mHEplx(q(EeX8^W?d>%4~OalAXV!jzY zL@j&fA=JCl#gsTYH<>5R7 z2pUsDD~UsqE%@={{CcIBodVic)et}M8bG)|xznwJ!b8=Y^s((h8uG^hKlc`WZ>K|- zpg-s5V71bu3gdxY^soPA#VLB;4Ufg$l50uLH+oFE;Y^uc0k1^K%y~TXxgmWf%u;=+ zU+c7)oeS&8z>bLzHD78g{r1Z9qwdS_a>Mvh&?p&yI#GHR-7a@MZ%x0ZbjaasXbdiJ zeTJ-iq%8p>O}9Ja;6Jue*62beaEJ#+bm!5j<3+Pt+tqs${DA;ZXwNSc(GO4CD3{4VVhs;2Db9n^w#G715()4 z8$x5Z_tR3`ODN70=u7Sp{EKcc3ggZTTtxEn_WUzGh87mSDwMB!zUHuOJF6p={&9j= z?C(vo=6#L+yGKDAtC}O~ESE~BfHTSI`w9^nzY_mHSnFBv2m>3TLw#sxvG-IhqAk@P zWionH;(NG9t|+#VqZQ<>6~l#H`iXpZCU$$?#@UX+@a44 z3BHGR-mOIomo4W*k*UJJf+T2e`uQo0p~tA@ldSm`BuIX&$i3 z$az2TD#KIYJ&s9eIRo!Qo&yF-xvz$Krv=Lemfx)IWA44%R&cB07c()gzxICLVS+a_ z&sFv(rGwO%gUe)e)v-!zY0z;ZtiRBeZw)M%Dy*T8gsQ7YJ!6!vV&Ik11vETs9nboB zh4SulVui11o})O*_o}qLMhtIA%bq@D<>{cWN$K0_Nkw&x7eR|E|H|*joZy|~9OVo* zXY*CVvvhK+5Byb%(feTtzprOw+J<1Cqktg{9U+V7slvx%(#dy0EGd86vGhaj=J3w& z9Hq=}el7L9yVlb43#;I>uENt4lapOevBHY2ZGOlK1w9BFixSJPQgtEWFGRhoMX85% z57j-A20wdCs6E#mQ4o^~k_z3({PTl}*$R~TuX2tK_#%wo^ z7QDdlvqzzE2w8%TaWSh;DW}J+6SQrupUMX$xWNk^(>Pu20`=X0jzwZ46Iz)wj*QLW zL)<6Ls#S~Ob)kE0RgR;t?-I=q+ZU4PH=~Ntm!Lg`(hwMHUi+sM?`Uy=!Qm451UMih zM{mQA)2P3#a`eUYO9f=HSQ}(L=LF_lmT$X^z-Ow?`J1;dbVLYE?|f7Mr@6uQ44J$< zUE#O#SwiVBXcC#!FomG;l)uI2Va@2+Yk_AS&NDI!m0cjW8KUlmKYVc~=mh9+D>q91 z3xD7YZMstNq2e8-<>m7ZCvx=Mi(Brrba@lyB^h%<+qXnoQSuyQV>;_@iW;h0u@KbFvd}*wb2r z*39wa9c!#%#nJEAn_|c}YoNP{Cn$5^t`1F*fvd+|A ztn|_3XL*$W)PWBo>gZmv1p7)td#HI5s23A+t34UtlY$3zlE6oX|E429p7OoBUIsEh z>2bapynw6Obzl(H%)d#7Ii<@>T}LG=@4L|_l7V|BvNHm=vC5awGdg^_KxSjPn*BrN z9j0gTR|2|AxNhz%flaE1RX+>ow6j{X5l?uxjFix=qD#ly5*k*!Kl3nsm_9%Ty9djl zjj<{VG4{nqgWI#^s#T|i%C9g_68)$R&0m#~MQPBH0-2M6+GON~^uQ*9{hTYAXJ4ODp@{B>1$-Ml91 zQGVXY+Of$2ta>{9<~vfgp3t-cUthaQK(laqeg?k$$LL$-FZF%*o?29GIldS`rDuQB zpux0~nLiEX4N(&eOr)JrJyb?RBlZ}UqZ{k7gV%oSxi($yafnmdqwyB|GC=ES{fsA6 zqI55%6A3@h8}cfa|O<8vUdLdk8t2;zmLy zr!oc|ngm|miK6mWU}#!;|7w-WHY74Zz5Ge5>Q#0)y%y)WcV}c>M(C6V=###jHt(jc z@~f$d$Dx5Wsq#R0AF;F86vEgWN(%#c1vxanusG7}CTET-t2ubp;S#;- z5b%oBcqt4nP|O?GHjD_YaU^)=AUBONd7DdDQdtmmn$m-&TJxa3)MpX@ zvmQeIUG3APKATW$Bh>e(&m;cd)E5q#b9lj>>&0eW@HI+oZIofM{2?t5%(VZC)$+-d|d`r5!nMT_Hz)*hc8WDqW{D z1-v+TMmWE2{FuAF_2n&-%Hr%}XP)Vm%RGH7gu71ds4pnJoM*^(6m;=39p9f@cdKYG zoSIkT+ug&o-M1gm$YEYIZERkVr?9Qwuf}uU{CgSoI2}f#>m8EM(mu*bl_y#n%{{no2e6xy>KmuvmjaH|+d=kC-uF@N+m>j`IUTgNxA z->3LXZMe(725Nk$_urMg=V)=NxG9;v23F9=9LZxIFF07}lRUhl>V2bk@k_YeIi2G6 z<~O^2-6W1hKP0<)E%^N9()y||7Fy!zK@)~L(!82h;pDZ7N{oDN_$+%*?N+s-Yn2*f z981zXV>_LxR#0DbEsZP{=3#v(^B1fy$|@d1d%aGQQ>9*8{*jJa(8+Vs*eAmU;`I$0tX9FwVlWvYY z!w>oga$fDOuw7h+j#aNG&}VXd&yVUF%&X0V1g>4{;WmTa-r=mep}l47wlQ?qZ4Ff! zUWQ{XPo&cAH&OE)m8smz)zk>Su}PPH{3Oo?yjL&YJvLpQzr2yNmpnJRwSQn#ZIvbx zUcHu$CSS2ED2;PP4`&LGD+TnauRGKAUCYJixbE`3-#TtJ5l{crn?o2!T2cEJt$9;H zM~#z13bOF%5M1X0a-DdaTfWcW8JlMb+u1(ISU0LMphAb9VouJ9vcFo$dV|`~rBRcO z?7)A(fkkY$y)WBd86+;fK1xlG+~AKHN9B{5`=n!s<6LRxd3spx37bo{Giq0FZTZ0% zJN^0L6uP$V0*$R!hFU}xl=C7la*N1db~<>Jam`e6?p9uQHb7kycfYtF@0-eYM;xt| zCY^=<@L>64pbeG%RGlBy@T2nGR?^$3A>5XH*nLkc?tC;_TsU;vn0Tm{>PcPI*OO2m zsulKBp6XlFY;dTUmcPIau^Bw+c)Nz_1oOk4MpcMIc zzN|HLSw)>XhX{-hwX_YCb++f#=au-!<+~Ne_-_>oyGG;L-(Ay&Gv-SUobZi*m262h z??$WnqT2E0f=Q2(eeDc*)M97R9C)wdqmX?rB81tR*$|U>vt?}hW*(? zl>+mDXWFq-^PTddeJJ>9A?1BhQfAkRRvbWuw;kl0zAk(}X_Nq_t9hqRy<>Sm+-CMI zbd22!b>Sb?XEFGZfUU-efEfA6xdS`OKH#26W7U*#6gg}GHPhpGyTe*p+Bm?t2EK46 zmp*?j)+aaFFGlr^5@A`NG|Ynp-;f%A#U)N&MdX{i*h8h=AMNm`!E#^KQ1)3ih3Xbq z%KaNH7rU3aDqhf?w-(~wpWgC}OTMhIH1fkJ;8`A;&>}_NygpdSU#lqH-i7Uc71e+LcyHMsUQI`jYhB9b zS9O8s`69*Fem^{2d-0A;2jO_B z7+D%NK(7=uBNtzU&X`G!CU++f?86Y3v`s>9NN^l^`!1v{lQz>Kw~=zpF;{`!0=5=b zW1&Ct+$7?kO?%IrFpbkPV&h zm`)GRm6v!;;Os$aazB-~?^rK@i(<|zcN#orgQ{sxcr`&3Uh$v#nG2b~4zu&&0`&bx zJ&7JPpdDCY>E1Ha81>5cX7!}r|22_QHWVG5FVTZKa3XcD?j2H z(CV~r>v7{je|r$rL=Q;ue2Wlw;BV(EQe`ezurR=q0dkGI6wO~QdQvHtW#J8@)s1DWzmfrD=P;cZvyr2dg zppS!giXYX#ko|H8eRJO@=*chA{FqG8K@6SA=m9bD%qp{gBRdmyPm7agGH^>^oK0vd zxqoF50>%lY)haz3PBq7e2-J;GT4Am}fuJ3YzBk<{{@D^4`ozxc6}E=2ob0YJkHA&v z3kSW};}fhfPT_RnZS|o=*3qn8VZe*3boucP_P@G~-B(WIyOGDVQCBiV%AUCt7?DT! zo;#AN_VbaaG*fG_$cvVwu2=7^W30(7Pm+k~V@hxXO~36z(Ag4oC|~z#sK!v=Fc81m z_K~0CuJD}C$N2K&Dq^{a0#9r;T3hDQ=Ds7eJmVkImM$GQrR!#bHsV3U2eab9r9T}= z-S{01ddNL!Ys=Mjr#7L1{_>{VtTg!Fx(vgH(74O($&v zJhA4`0rOtaguYex$Vnq+i9#(hjHbul)5hetg!!_bak?xE*Nvp>!HJYw0ek7-{%Us8 z6yNYv`)x|cTGc)6NvlOZ&bJxZu9tgR)ZCF7Lf%!k(e~vn=-jFAnyb%Rf!hMTr$sNSPKq0$4Mh88@v^`; zP3p)g*U!0UOqqAYfEMKVccp26_)dmC=7*`v_?A9ULNoKnFa1#vS3Wf zu|sqE@V6z0B<|!9i-Wb|zsE|{t$v|$rbyeaVH|L-4)plc*B7j+R^hz~-bE_i3N22k z6P|v1lfrDCvLT3}si?+`s_NR!7kgr9c*(;0(`P3se{Nj|H|6+Q*B}pqhveBK`-mBr zY)s|X+FV=8BLXTj>ITxz|zk3F4DtUz>XE)+rMVkxE11s)@ zPt~g&9z@VO3fD~KTPN)%F4a9z)wu!PB63$qQdlo4Sc9o`rYi;6_-LppVGi#iJZg{0 z@lfakT0dtX?{dyZuBA&DYvUdfyg2GCgl%W%Hb3=TC6nw*i{yuUpa&b%tHBup-h@ux zy(Qs+`EAsL9R06qdT~-VFKT^X3@FvW0EQ@BftR15bh_qYUQ(VyTj?50vEG$A-?8`{ zTmyc~4&RQWw)+7O@^TqGCkXu_y5!GbRr_M|0hOU}$28*bY$ryxLLF}7^L%R;^Ty|T zN0sjhT#!IxwzoGc)^O7g7^CTI?kE9|P0)SZXwGYi-qV45tbAf@-EMOC*!FVU>H(IX zJ6qC)^`H4&l?3`T+ns6*enVn&Jrnw!l*T}wKyQoAR`bfr!(u!bdd@6jk2Ru~vEFL< zU|M{xwhmuJsHN?<@FBT%aHa9O@o@0M9|2D!rVbn_1O9y{WDOebNn9_p zh1)L-g}1TQ?=)x$ukl-}d-ID$>FA#o?uodhipV{N5PBOqPi<~`q&GzbWC`@X(j%g+ zdm4E5wM6E?p*^=?pS)JA#uwQT!M{j&Y%gdz10J0|Crl8?NLYsU6u>i)w`H~lt!%=l z3FtHlFCltz@^i{Ue={n);QKG1(J zZpQr+lE5c<`QP@x^7wuqo;vU;dc;?UkCz4RbY;P1(U`7^Ij94F76`j6+fU%}v5 zz39Bt0$GK~DqdNQy;*w498zQPcvA!~%@fD3W(08W|BedfbMGyh#F!ItZ_!&|wl1x>Q1YnLV1F zCiqo>pEVtJcjWUO+tY?7N5z&dX9e&DYj%oI%AOy5Vf_r1S>)*068VF8XmWL4U%9jn z{l_oL>d+wVj0=a_oMhB*bVp@G1ieQ^2V#w{_{1Smlg-Ev&y*(RPP?!Va^(W%Sii5lF!Q0xTExZd zn{wD1UB|zxzB7S`60ge_v-_ATgTggYuIgQo5%g!J9gx*9JPXXxY6Xg4odeKOH!TnX^$JI>_nh$ zHMh|6df^q{3A`Ybuj;aLi-3QYTaTnEO~L0|BoG%_VNw1*&yRFM-%IFciM)=0?eL1& zH*BU(E(bOMudjOCn*e;&=@?V{%w^D3E;kj z{@`3q?4`9`4T)%+O+k2ZoSV#p?n(Mzm_Z~9=Z;$ zeZ|mD{D6`)mCO4UYR1U$Sgm(ZU;lGn#Q&_5P@h5k&)E_G&wZNy|J-~09K(54JcxYmWEe=Os2(J$qe zzlqlSse>%vpB!ic@rEG;dnyZ?y6ZwpMm`vjqF&yxlT7 z_c~gDI_9dyS?y*~xptnEyx}10wXr{3$CuRV*Gb|2u%tc;XQC9kxWs6>zq`aWbMMvt z`DC?+^gVYKx;Uth7<(+5CKf8ejaqNPGd7{zh>uX`%_*YWgH6)o@oe(`{)G#MYutKw zjF>p)JbiX+BsMSKg!PTbsqDh3Y#fc}&^6yUZCMm|Z#7oT?EgYqPG8^)-wSZpxL*3D z^_#hbdzNhbCYRoFYhnFcP(eOkwx{Pk{4;S}k{^?MH2*F>Kb6Qdk)j@WmriFRUA#@YUC+t?N7020^i@>5*hh4U=)f8fq70%`iTj~q6-tDfiI zb#CcbQxvnWDx6bY^zqLl*?byfz20{hZ|t)|I~@0w9+X(lBP$itdkpPpT=8B@Rq7>+ zRfpFI|Dd|GV#Z-{7VEuM7d=B?qDS%L8lF_D@Jb4*cbl45eM3i9x?+F7M_g;xXX;sV zCE2AuqQET!d20AwIbvo#z7y)rMbfaBPm3Dn$5j@s{5MClL_!8_IJwID%lDcbKHkOL z7&e5qef=X7c7#)^R*O&zdY1imuAkYC53D`QwxL&O-MS^T?WzM;cHd5q4{fAtk5$Abnf zkf<}B(cqodp!^QbSLYeYhNq$cvD1g~}S!Tn~I69-Pyx~o^&xU-U1WsjsjPqD8;x^A@TXDhw0=QaDc?`jTSpQ_E8 z-GSP^S;Ds_UgzT{)><)sv~QCSxxV_W^?90yevO_+=wb0MUkKq|xIvy4yl!1Cy=hNZ z{xGf?|E`zFG3R>A9|^W{Qm@7IDz<^)&?Z8pzn;JYr?e-JGA4BhbfOv?9YnrC>*dS4 z$o>YFwy0W)U6W}9y=Ghs%?jO1t%1W+sfu~$Y;n3Xdjhw?KK7nd zThmexoKH*>vo<$w$x6Kq$asOi`y|SoI8Ly@7WDJ zFs=x_czaaFCk`Ob71i{x4wK~Ej+Lms^D6f3U(xJyrh>u_>g+Sn3-!TE?Q_$e#-(&M z-seWG6u_h-F;gqhsi+@vwtbqyNDJ=QI9+5JX9U|(SckV-3HPz=xTL8*Y1(jV)ViGB zW5XK$+h;HBd9zI}xG|RT3wujGlBhY_yxc|N8n8dBD<|wIsaLs?+Z2^UME2o>+KS&U zL|+FA{G7sAEg+$~IVNC`ru93^Lx=j~OuFx~<=A%Aenc7m+UT*|U2zCkZ#@#dQWfLW z6m{|uYY6AFPrmQMI9fA#Zk=O9n_}g{!Z^(4>pj`*MJc^h=SO1ExMxNl7ptt*r~;!e z#U=vF zBIQO=ozng!)3Exzxb@2B3d1@1^H_x``l`dl=#Axo1sp4!Er-Rap|Nsn{*F{~{u-+1 z>u5GTcUV^Ty@fO5Jj4h8XxaKSKC=UsHu19X_h$bIjJ?&pPd$CHxl(rPeVBn;96R_b z0V^=}5n{yZ;-ZYdJN4e9tDZ16oGc=%?(D`jJ%Si~Art+U@xmI8a>=Uuz=jO$p?Q{6 zZ!}%|UKS7P#X+C)>SnG0OKkaEI_AfCwra8X@bETYjyfcP4JPoKf$>=1)Jywqlu=kI z%4`b~uKD_iy6>HJI%3a-CpwWwbR}vK)`T(7eBoXk4UOtc+cveOPWx9VtXFfz39j|1 zm1jYE?)F>k+**foeQ7FscUw+H{uLp`pLAy~#$gZk;VqrqfS z_n)`QMUlQ|gPgSw`{->>Vc;wF5Pd`$&vp34&8!FZqmWk_95}>YEU+z2&;JDz@JIC> z_`D|FiyUdbj4WrOPt4dBA0)1c^Ch2?;56P|yA=;zItzX0!`8N;V*2gIvdxB@*rTba zDZ5ppL4$sa7T3=y>|%Y=S1~HdLH`u*NaYpZ)-T(&4vk~X`ZPse7N9lj#N z=)$p98u(KxPIT~p%Lg|PWp!^W-JC7Zb>Q1)RLRUj zZqV>YiR8DWj0Q}n^rq``t`XWrhvqT|e6Pd1cjs_jj+OMz8(=R=B<~|e-vn>h?D~5#LTqu z5}E`0xRMS`P<14~Twb4pVWmFp7B8K=6yMRO#)CPw{1QUFGWeOx ze+%X04fSc$EkT$cd1mGn?l&iwUiNS!>RtLc^-H>H?f-5gU8gom-wW_Hr!UH)e;IAW zn!i5<^p*_3+G6xDe8Uo@!x?iTYHY-F+HWcm%$MeXCgg414$o17`z7_@nNK=1-pewn#2$vajx~?JaQ*hl~>!s#~iVTL@gv@Eh_;n=r=Q6Z%2>;9rRjSzB68Ma|_Kud7gG z_7;Y|Gq;Zp5;tnhr6>O5a`ar@DqU!6vo&<1%|J1JP?Ff*Ax?23M;~uSO2a>!xJn!= zo+zeon?`kNkK=pIYs(^O?P>6VDmr-9RJvd3xu>^7WseQTl>Vj1g}buSAEOqfvBK%_golI|b z?X}WKyM1(OL7;5xc^TSsvINhm>*oBu2J_m=*+%h>Hl#d7+8-|+-YMs2rq~XlW-qW7 z>YYaT9+`G(IvuYY4vpI00OrZq!9Acg-pWC9A80pk-xAPx3Ue*cTn2haCgk!qm3LCQ z@e$S$;2I3n0C4}EhF-*Z0-YFRZomhz@^4?itrYMd42(6uwA-Jf(bwj-HQ|rwY_U;I z2F9&IKj)XXpG+1nzg^?&Ggq+t!55UfShR?G^MYSGB@^_8;z8rZ+a~0)O4pQk#XN$) z9n5W;(x7XGvz$~A92ZDC{mygwktIy@kWfAJ*)7TIdDx!mQAQfUIzgcyG6vK%S}rIza;ZZ6u+@OH+H;VBHw0iEj0JGbsP z;1?O%gPhA=p($Cv7{0}Tm($zU>`B8u=T;cTBl>0t=okSHqN{Pn+%sx}qI;SQjjo+3 z9m$8|X3J_B9h7%a-%khDc2L-1D6bekbu^c4QkJYW_X}t(6BtOw_A>;GhOVlsKeSbz z<~2df;@5jVI;SG3EI|37=TCMj4=;g%44-NqKjCC1%o!u#^T|IbK})Z6kl-B@|MS?G zV?t?Mb$`*WbM)07@znOxX5A7Mj+ocX;^u3n%Y$!< zNV@YwL#+zT_Jag;={MjM#8sbga3`R}Le1n{M zZpUcLs6}n%v-siW@J9`fW&|&%n_na;-`TriW&P{jc+fbPAAu2yIjG%qZ^D)DyjDMUsWIds&Kf50sgEKLG za(t!2oOHV`%7dWyhQI?C(SZ-tu)1XA5YS5A;PSkjwM62cf*KXGkA==zAVwE!#lRv1 zzKMYE^lI`u5&SSYCqHQ7Wu@(bKeY)R$H}|)W2NE;jG?&E<{oPPgVM3gXPR?F>!$D; z!^G%XaT33id*W?6aIUC^+($-a?0`4_2kJMp8S(67|5%fKO}KSbZA8ZwOC zToOqAe!QaRR|GJ^@efl9VK$G~XhY+q&e z;rask4CVV!3};b1R(&bT-`y{7M);$~{gek_rKi4Jc2>GZ`a6}B%l{j}*Ta7c=xVQ_ zL-R7Uq_}sr1;&<4m=*S3;Y$m!rMzu*&^hR+$VRp(DfOrR{oB|Mr*U+%fs6Y0pSG@a$snOeG!J z7ULSk-~;e%Ej|$NUyXxA9}CQ>rm~7Ry^5>8BX}WF+T1A_&(eX9fmQG-PdR4)4D+q~ zQ^m*1PnyWTc$eoMu5+Xd!#_eBN63Cx8tE73YD({z&}sZC=rS^nmgbTM)xc*p=>3fo zd_1l$D-ZIx@FiKQ-Xet?8gdjG(Brwf5A$nEJD%|+f)u~NGnmNxayY5~DD1%!(UIPi z^;R{a@(dlENEhz8>vGRvuH4dJA{$`%cm2f6Lb}4j{P#~v;H<)I*{WnUe$eXwWmPvxUl=unb-?hChpLzFLK;D(r=br{&@Zi3fL^ekNu zUw~(HLl{|}5u0Z-=l|)BoMQ|7JB1T`l|;rOfM1LZhB^)yL0yACQQD*HWVf`i*z))? zb?A0MWe3O-i!l0`o|<(SenrA7>*Z6jm%}u2_)~_K;q?W}D4)sTdRe=2 z4*~B-$lyrf2eL7EmRu&d&V&z=;5~u7KzVZxujgc{wGQe_t&31=A=Kv&>PxMKQ2(bs zhfwP!)Mpdwa|-p@#sBTR2>kQ^`b%@F%UAZcqr;0+xmqEA?pVNyn%#Ax@3RW>v+ez< zM)(!}JLD2iIM7)P8(acje9@q`p^vEfmF>`fC1B zz?)CMTt;q}Q`qR&l=fKbQ2w8V%&q&dreN-s5`}s-d%ht=S5BawH8||wh|!?dh^s$<3!?|a{90*VYDzTT+|zLhwpXSMc(iH_)nJr z?%?Vn@w>Ku@e^(4C7g+{v@Q8Ps?6PnI8&h+*q^8IamM)Y-23zS@7WL@nU?M*akXPa-62W*;qq-dJsaz-$F7kWOYT?JmOkx`lU}<|P_K?PDfi`}TC1LC*xT(N zeN1UaTkV!x&Sf^E^`oj%?O}FQZ%lu#`=+OnG-((1!n;Lh9;~7NL))82_4Ix5!wrUt zL`51*A(B!Qz3bMyzqo!jylCLfHu7Y+qqPiMteb$b zb1h}t^jGlu@_MLh5r~CN{aMZ<1N3;BOLOuUwkCYQUwSK9FE1_i)r?aT%{zPxPvOyj z4b&>#{TNg^6t;eD3cJoH*4D$PlLK&uRReX^9*$JkxNUJyI5=w<``pYF=yPD8aW0Jq z>Agdy1xCT|CEZj5`E(n4wr|-iWmw1}JiV<>etgwmc-Sog%TwFQRGYluKnrL$vPKeP z;rRLn(mCUq-3Qq{@4D*I)ZX%+>4j47UPsXHSP|=xiP*GN96CEiu^xDj|LYwK8^-wI zGsB&v{ucg-74UDSo-$YJL&1SW6tA5b7mXut6@#VaLh)aux(2#H z#Vkz9^H0=Kc}S?ed|;eDjjM{22I}IgRVN;c`@V0)4$bG)f#mdt7uIRs(tX1Hn9~hLvUI9`Z&!-OQjl;1{lP` zyiK(C>me6R4sEDf=Y~SamqzlVi}#sP@I2m^?tkm@_YJts?*P+puExL5`Xh}2yemG* zjYc2WTyGSEci#pQ1{}d3@1r#|rtCwiuBubujir6;mDQ2^;6v(K*!QOit=%#NRoO{h z`$Hw66-`LH)bR*@tVedj|_q#}Z+b)TTJ079)mk zdWFHSjpao}Mgp&7&!ERRw{amKwsbf<%>vohgy7n~cr{cT4i4W2_b)l%XLYXPem55E zm!Ic70~@Ic_6D-Ry|2fA!p`>#@@rn0$~10J=j1ibc|#4mRlS*arH3!Su3iJPEhOdK z)^6&gPxMUsfY~^C&NIF*!V#xF{144nZQ)nW97A;3jE2jnFzbK-yy`U$mz7M!?^aW> zdR7#K4?ijDmYp#&koW9~N2?X8z+mawi3QkVMhflKkih!!NtpBV0-9~m<(ZfJqEdAq zza>9rMT^=KPB!7`?|yP)mWs{dt)PGR=}2{fXKXJ3oeOFjMB-N)+DC1O5w^bTEaz4` zfP3gWO~VCez+==dy3YF>obGV^cg+bR()IA}gtKs?ZLN8qimdIyaNUPQ6J*StEn)s^M7#X!A=%`tPYcBtX2AVuVLG7g#*nYTYBD&QU5dX zzTnt7P^`Uxht4xqhtKuLw}X!=^=&4h!-R04G2s_JWbm@$QAl-x?K_&to6~X`^$4T+ zR#N9!OUuvFdbJfxlq0zh*s_u*u-hdI4d3>`4w=BGj2i&!eoepxZ*O_cGJR2xP*+{V za&>0WEl$b&v-%EWTF-(i-L}|VcO%kg@S&S#(sPblN~3#+*^-J%!q*hoa^x=0O54G@ zJUfa{QtYTMys)Kj1C{>E9-h_^=g@QTZ9+w_$x{n^@UW5Vab{d&Bs_)8?)6~A%O|2% zSmjR}oa(v@l0zeL%l4MI?$=}3W0w!aOI)W@7;#@VC+vnTdTaO=JuSGA`3>nkaOF%_ zPU8qw(Psn($;3zExN!WosnB~I?K3^SH*w2-_WSE3JlebiUd>gZSAfbN^ge+NjF%!| z8oRS#ED{#7+_BN{`$i})=mr?MHx|~e8HXo7w&i&hO`*rWy|8B4Mu@v&3`L zT;hi`4sh$e6HFam!7SDv1H&5|vF*U#@@^Lg5I9klv5IRNG=#Jd1EA&ebeJFF!?Xid zBjGlb2F}27`W9@_2nT-f#V8QtZkv1oc0Cvaqx+3fZB5fT)u(3SD}|-~c*nQjs?X1w zIl#_-&D9LP1`j^$s`~YO&MptNhSS^DF{*8J9#Fyw|IoxX62r!2FrQNmRsEKu+kykIa6p6<9Qzj1OU-Hh-w(^gx~?~9k--4?IMM_H};q3g5Y z-t;P5Ts@CzLrY$-Wr3)F_9P{ft($XBbJi@2k&XbP(cM6+7~~)C8`okg=?}G#a#`X* zsJ}W~^s2PQTE!vLBY^%*%stj$U0>~b(Hewqv)7x1^nM^*Vkz<8S)+UQwR+^r;BvhD z?GIs6SE)x+ZI$LhrvDdnq3#+6jPQ&NeHO(XM;oi2w|k+z-cu#wTyGe8wLQ>lYdKbE zAi+mXPSe`|RM%bEI#IWru!MK-5RZ;aKOu1)^Iz9WCA_NT6~cbyJY@!>JaWWqKycI( zGegyC;u^N-(Lf~K1~K)z;Gz*(KpIr;q!TTR*QJkfg#{abFz-xDHh1j-E-<9)+*RNf zIhk-~9SnPY9Fn%BLh55(xiV@xQq5uX-w4<-!Vu$2Jj8M4M|GN@Ce^R6RxYG-y?+`> zufntV?&?qDkNn%uy0X@pom})qUd49y+BX%de;nqdE#bz-W`xUEMQ_5zS&mZc!Ohe@ zjyn`OR?-m#Kugqu-OBAyxNkWKPOO{IREq2~7dKn%g1kx2vSj~;myP;@#KWWw-*GRg z0g#3ky&;p{1v-8%G|phXUXWrq2F98gL5msfm{sgn*&Xz{$g*gcfu>q z+i=x^hkUWecGh|Ged5NgU`^{&nb9@axGP)5`=vwK4IpFaIS^wKq?(Y%*o;1pX4CzG zuS9>rIQ@9x5r8xp3T)Ip>>!(Uc?i@;q-(}OtLxFsZe>%s#J30o=C6nUuCC*jTj;sN zg41k-PZA~@WlDs}7`@3>eZ+b)(rdET;2x~vW2n$hyu4rnG#)=1)&~9G8Xs%ft+uA$ z1QsLB8*6_un@8qfWu(pXXbzb06fM5>l5MR@Iq4xFFM&O}xY04c5jtG_8&X~5Q+-M& zcdf3~fUDa)=aX(H6F#qpvk_6X_))mw6nmd*#jfwa&epCGHMjsY|@b zU~7o8@-2CS`OAJ(h9rFyfD74DqmgQQvIS)H##?!r=} zzEuUDcgxvWi}TUVFXG0V0hs=78Bm?E3#Ym_v~>IXP1aB=kT_nK9d9-Y#CHl$ zMi>uYGAlG|cF$nHCnV$2j1UxhL3}RpAvf_XrhPqhVSN35;Cp_&oIdd$r(IgQDxvsj>AV%5)2&b9HybAI-wwyFH53VPQ`2=tDb%xLxoW=^97R^^% zy(wj)2Z*bcSKsb&Qzu6#?|M$E)4DOQiuS;Z$TKN>t9eR)C)0p7Zq&DZ_c(V#_zo0cZUW-$fRE!3yN2&O~ zkxXzSVKXOgK+^u$C3pxvxOPngYL0{t2#~oFx-}k|*6D@PqCf=}lu{innL>kV$t)f;YE+^nzsD zYEHTz9<|X?`3*0s1ydkji&S64Wsw+Y^rn_KvW_UEiF?A@SG!0fn5fiuKt7wTu-wY2 zHf7QTSgA9Bk)PwA_a8%{&tLntN3Mj6+Tg?m%zw{4JVYBT7w-GT$y?!*Q`OMoeqD7m za;_gWgp&v2O3zS9;OeyFE!DoXj}7@WXfU${kcP+gAIOUm&!!mO;g`>?MdEy{>*yAzg$s?p7e}1@r+7d1iGJa1;S)?!IKt3FX7Vjr$~bz=9k|2A?Z&1`Q2Hi z`~w3+udubBR7g+`qR_vjB~P`JI>XM293Wg&Cp>K?{~4l(|Hhpg8Cnm2vcP`R@Aqe!zja%$vXl0Qbxy8+bbav z)^BLTJJIt>yU&)u`pJWlaF_kNbCdtRp#xiB8FAzt zpd3dDe>egnhCTxtW4`XTmMXj@&67&r0LTk->M88G>@t3Atq(OXw&8=`&ge9#g}@m` z=R|Xw#&$ip<4SW5`_$U)1(RQAWwc=2o^s~xcn^{8ehxf z4Q3`X8bej+?fSdCgwJ6);Z;mS*F_0GDL;Axji#4qL@q*}3P)WT$<3PXQ%Ex?1H+mN ztibP4eTA1}^qIWXn?XXWgZs3u>JNh}{K5DSbnj(nc)pg4Wcl(A7}u$BwWL^Z^Rg?#1IgCdtula1`azlj^K+blt$zEZ*#O$EX|p7O~G zYqYPCFY!?CX#M2NdY(thg3$N;Lq6T19cw*pgiQ6uL+e&@%Kj9>Rq{zXs?gAl$5-;0 z*it16ZVN2}$2aDR@l}7n?Ma@Jo_EjQL>f5>$@j?QSr};{%EZh_bI_Wk4()`lLE>N_ zoP#k*hZ%8$&{r@t*a0ZhA)HM>%Kn(}Ih2hc%`wLN1%kje(&k9|N;(m9m%L>KeScFF z<3>3g>U63rlb%s?9z|exkI^b|9CUOD;&0D1QAG|yS``U@naD6HH<3v{vXN7&F|zY9 zk)a^vKv*ulK+?;khvv%Ov_9N&rI>2|0~5X6DzFRTZzz1cFd1m$RU}QTAulhm6&`y| z;^eg$Wkf(djGZ_8gW!o?0Wy4vF{c{d2x*fK!*Zn`)%HfvUudjSZK>)}W2Eecce~n$ zi#*5K$*4ASB!32=PmiNs41p0(P66dwgipsXmDbD)UYad*BIRS-nf=@LqL+aDg-rPt z4;$AB-qVt~q`#Q(CsWGkI_~#H=x@pBk zjzF)MI=GHy#0{EkS`TbhlSFo&_EDkyK|a@N`2Y2pyI*puDUltamHs_W?}cSOy+r@P zyx`4j)q(M~KCXTp9oY7=H4uh#k$(su!?v8{>nppkl}B$#l;KL(VmAq1RRwq3S`9+N zbi!3xvQcmH>B(V0KABUVBnyr+-+39gubzaY=VZ#AVbkU5j4~G>eUT?}F~jB0LF9*hIk3}B*We{T? zH#Qq0`@93fA0A$~gONVajCGlZr0vn*eH>_ZX7FO|Pe?hIDxPx?fByGe#Q&a!_GziE#K9c^a(DBn)F>WzZW<4A*n!y>v!UqgVDw9kW9z#wWkhhN@6pIqcGwe5f1`ETsKNam@1BrTf{ZMqiaP(i5Iw;_A1R|1a9O!$>K9f5E6RPS$UEg$Z%u6Ay+s?WmI zswUtuB%FO5^)o*rU=|i8Hv@yG8(_?bNVtDwAMd<+3J#6d=8JR<*sO2eVd(RGI5lPg zj+^1l=sV;e(j5p}wGvj8WbnM%SCt(jf^q0DcQFp~>*(g=hWb$BF`1wBX(JzZwZ-MG+1Swa45#^kK7Xb`oqrwih_fr4 z%ec*Egcq=8kB8#gPUc?*8Bb%6(uZsClwbMRm6R_WBjW0=u47KUE>$(yy_idP@B!kIl8 zIE`uoS-PRTgAHQd1|v9MW1&`DruCAa>cPk0e9bp|8~JX}OlG_MGhgUf#v+tfs;=IaK@%<9}mjwTGmxCN@p{|SEfQug1NBTZ^-P?!XSQNqR zPaP1)t9EFx5;hsOhtTg^_`t|$3~@K%d)^HM=P^1m%`qlt=5uez0BipQc)G+B7RTwT z=~27#moz>F-8bbaC)JCvGo|+G3O|(pyx(1?>55qwAP?+vAIn1 z4?msEaPQgr7#(zl_NTIBd#X0F)hEY6hyJHv09_#;kx>KB)0c`~g29jbDtZg({{Em- zwR5S}O5xi7(eulmDXjUhi#W>9TGop*LGAzM!`ER4(IQa>>QVG;@soeJ-xJ+Bn9Ab3 zjqg9?p_7}U=6EK$4QM7)U5MIL55-1+<5@FYGy4`CYeDy^u0Uw8rZ+E+`^~>D>_q26 zUjD~B<$J(AuvJcSy@zpZs6L{N`3Bae*EnqHHdAWb)C@)a5k|5RPDg5MYizn6kMZBc zzv;I_fhT90J!0)<48dm8n(^K7rZo3W$U-aOX(yZhjD27bl}-v+p?F ztS>zF-VD2T?8mJ$dP2^hTc8&g3zFVsEVL*WwU7An7wRo&f;4{cCU6^2PvhX<;b2&z zNSV6_$(o_n{EwdjE3kI}!dgya2DvA1(C05x zyv?!jAZNz(0xVg=yheW_n{ch?@64 z`Hb(~6NXvxFJ73u0~Yfz){}IJc{^QneKd~~X5rMcQMfnlq7otd3ib}o?W`JzHR0iyPveOL+Y z`jj$$qYetp-|+T7csBA3JAG*eQZI19m(-`AUFiaS)@32_F}Aw)4E*I#9_FbZN8_@#S|((yqqTNRkXCZB3aTu~o$o9_hb zNwxZQChOsO0%jIz!R=Y^K^$|1!8)069`fBcG2;1JtnF}E8|(Z#2&OlzL~Y|uhkE#O z+6gI?pVct+l=Sk)L%w8zHgQx8{JrMIW39RCwF|@4 zIGaQKcBYd$VecF`{XCsGZXlnq!%m$NzZ;3Cak%u6`4k?6YxPX<*<5-ydRilu#s+At z_$5G}rQF6nZtopSbP;HR5%^}?LVO<>dAoT;SIZ?(5vtUxERHU(H_ctY>cI8RI z8_@MrDN+yOzlhUpZyG)0L!U)DiAAn|2!FGBVf()qVapp6nQEP<9?6h;O<4uhTcTFv zHX(`7-Pg#HtH zZZWT0+pBJKcY@xE8*u$eW3}Y3Bd)y>DR2#^jW1!BX7s>t`|c{?qS`00vrPDm7E2lM zU=LMjue`~dAu~4;JL*XAG`EgI`c5Lw=Y+>_c4=F|_n_O>PV^GIF0oMcR*WPq;lY!Z z>B_Hr--P7opD_EnGb^)c2alqf0F5g?-Q63u9yRbL-971b_EysiZje+J1`_p{2!g>Q*%3{p5a1n8AD6_8cfybQ(!+&$5ac0pP z9;4A`q)lbgIdbEe8Q379j{5hFF0j)N8F2;nn(`NioLCF|sE*Lj`TH%iiGQkD^Jsls zRNbG>`wWijQ%Kq%nY;V;hXy zOpQud&X=b2kTp^1EJm|YA&m)t{33ujM}Bi~C&v8rhL$TkW1rr$k#sg4ODJ~RSC8lQ zyP|}7(VlCrZSV~k%cldLX|#Uz!1W6iypUR-3B4pVz32f^pWFWCp}zA~?D(Juf1dtB zY0`c&%$b_SGyX+0(%%9{Wx^Ot`lL#Pm*`;^1_O?6#mn_S3*UfCQ*znwl?{Qs1dgD! z?3#LQa}9iOgt>IrlAq?TgSxTS(79eFkTxP+Sk3D?j)!xGi74hgX_PkZKGh6K&+^mH zZApg?rTYAaIe&bKTP{iiBO9xvRaBv6uRhpI=d+dc5|_SRdO~$=MOrl!Ni#El>vKR@ z%RBAprp%v7`yCs$sKtlY#cfdCw-`6*J7b5#Ax!A;FS`yg;%L$zA@aWPTsZ9LO&V8E z6`F%|A0}MzQ%KLLo#s1nIyPD09F2wIc77;~T1od_876{Gl#?oaMTf1upvfUysu?Z5 zY>f*{s@IlRPYGhAGjaE7AJRo1S@7|;vbygZE^&6pM>CDognG0d_=pdj@DhYBqjN?t z(;njZ;oZp+&E(bhIe8>5@MO4;6Q9!KwnE2>C#FBdAI*G0cpv*yHuBg~10+9yW+xiR z^jdgSxgEVvPNw}S#u9$UVrs`JibVk3%h+ND`n9~qM%HAYzMUcaRTzyGUVrkbCpd9t zck&Es!*QOt$uJeKy&6%gqe&}*OZPSTCrb7)&CqE=A7PV2bd7nEJ)91pHP3R+E5z+o z>z4!{iax^MZkJ)y*8sOaE*s!tFE2*=ND+G4$88e0fUjxMJAb2G!$&EF94-(a(Ogzqo#;#g%g3;#_ zCK`fA<07idxTB#y_z`^E zb`vKqChxsk=zVqmGe00bLVC_rJ>c1n#cnwx{00oNEhVkH4|^sJ<}}{?`&T^84RXyPeZ;6|&giO{Nvjk;g!qeA)=$=hSnvbA`y zG7%_`V6Qg&Nn5VnkVL+Kk6kfq1<-R!Epg(jNw_#}Ev=Prz$(*DVf=1Y9fG8z-! z(6m4yJm8cE5Et6woPl#`AH7DhZ_p_0eeft!zn(f@6Ay>O{=qq?Rx0H!K-d8ZomxO0 zx({6VP@yl+PrRuif1oZm?V_f9b0BQm!Oj=%WQ8u#@Za5LLN`ItwSmH`$sz|J>=7E4 zkdzxyQKgMK&uT=LAvvxcMgAoa{c`{>TOS9~IY>Cl8U>zUJvuI|#n4Cp z4M!SJnKBecJ;=x-Kn}!n(md?vM;{EB;|jSk*CeVFmHLxuItQUQbduVnwgQ1ugbB5p zDxp&%Jeb;2CLP2ms}i~#n*BCX@~t)sjN+s(@lJRbah@!%-(V!J6*&$@7A6TDfRqj3 zutb+y9Y?&w$!p-gNhevkQ3VhVaKc(XFt{9_{L_;fB&_4)V`^)KcvtvNc5IxT8b7!d z2wg(|#>uOa);J0@hm-|a%A^mt_}xNF8}41F`^U##7k-X>)F!#9muamJB&@6TB7`wW zUQspKepm1wEbI`;Vkn!YY(S^ zdpz7ehd9v~NpED-p1bf9q>}{?1MvwQc=U*qjumx5o_7Z$ zzT`%2K5?oYQNOatbebNG7daF@`|t-!ySza1gA#o{P_BT4Q?S1K58Rx*2fG#Nsg#Qd zUxt(!aH?6EIDwNV#i*>|+&8E~ulPbJd*Qqa4RV3O|-#L2kHsRIvuF&`<{ zM5+y>tcsDRm#3aJQnh~NLQP5Zqdtc`d<(qzfaF@J6Ow2fysChm`0aMvdolYVbQJlIOp)@lMj=nz5 z9hb#Iu_lBO#u2tTa>75L>`jc7@QI++N@DXW+XHzNK8DtPpxS0t6TUF7n_pm&ZUjGA z*#gg;Nn;)6oaV~?3*21)Jge*YP|kKk8yG0x>j^)_l>9>C#eT#~5{RtMCGU?!W5z|6b1|q7l1JrtEz@Cj%oylk16Z~< z0G`~fr;yj=LdOnP`ykaIr(T0On;eCn7sr95oj_!f!jDjXgjAbo?QaQVDoVM~359-J zL14G=nxs`p>r6xP_h{a?u1Y-wKWmI}U*E4nr>f!^2l3~B*CNF4;yQ%5CLx}M5YI~d z?;3=-CLyjvi0cyKeTMYYhVd@9xHw^tVw$CLeF{i@@za-aroFYC)H z&L7C1x_dX;*}34L0eS4bl`VZ&Hm_HdCeEAN|LcGmFCQo|TkpYLpW8~GH%GbQ_99yU zv>0!CE<;Cr!FSe=L#I}r?6!WkVm0>wZcLrUV|4$MM}L{h``NXC^^2RzbzU`-=R{}V z&CBQUZQt(L=?rI9e`)<8$2OXWk!dUsKJ!Ym*}U27Bk;N9C)TlSG1{yhf=(@5$%==VVuA2Ca)7R+14@b4<4{Ln=d<>q-D-mM> zF8)W9l(2l(E9VK*I&l&Ep4ktpkC>`w=DdZC1N2lNPkc=>M*k7yl~44=ZCCp$(xX)3 zM+^DrZxt@+?PLoq3Lw0?Gl)8PF|s3TP4{V;J1P05N#V49PdrYsNO30_QJf!hPF;;MD3I=Br8g-RBG#8ve%njxS+Hw-iQWiS-?mVc}aR)H5E6 z8)yyxMZTHLiuN<2Im0C>BhceUH|m?#YNe`|fB95C?_KeSC%bo%?W_Nj{so3&+i}g9 z`y?yeJNOlR_*;opFCD~uvc5wj`2oK}T=b9Mu=aBQ#f8#|l>HdHGL+MMVTj&$jD2ek zGw-mOE55~JSmQgi z5AVhlJ|rs^ttUCFqu&-o@Rc1v{mN-xaKOAYK5I`9=-KMZyPkBwnn4k~QM~{t)jbdU zENP#sbR#t?_cDKAodpwr-Qe>kx4@-|CwZZBD$sj1evb@+>JmcpR$yx2C3K~AMt$}r zgM~>ATfci9@A+dN7sqe8a-#H$?&oPAF`LFblO6N82py~Fn#af8NPPwsBcdc}?iMub z`4va6JjUpA@VJzLO2}WSApRG646Lce9Ks2h;dc+^9T!+&>+`7WE#rx;w_#FQ9OV#w zP@R?HI(K73bh&U4?RJ;I@na8p$ACw8xl1`L;p=&UyP14B*OF;ooP!@lx0v{@rHuyV zIc3rMXKC&6i^~C+GygVUKIV&5((yPhX*v^!`3g~(VIy8J>aM!Dy2CG9XZ2v}4e%bF!2~`LF4BIp*6e*| z6Kr^^9(>!W&G$rXhPc%Ou}6ZX+H`qySh^z)a=uMR!5N;d$Fb7q9~Htns7{+hxO$FR zG`x&!%p36o)!V@KVMBr6(xI6RcxqsOaOwE~*1x-?P+v%C>@K)}T!(Xi?&pM6P-*st z&GHJx_Q|c~uY310stI@;XaODGT%>*v$9HYLRhnOcy)6H{8y?Xehnlw&1g`V8<96`a z$|}%|UkX$o{Bge*oOlcgPhsbhEI3t>h+9oI^2@mi$|8^Nv~SctzP&huQLS)+!BIVT z@L~5}!tXkTj6PR7fBZYUmT8M0*6)L|2RD!~3c~}RfWhcH?8AC%Jmu}e^&52MG!8H= ztVSXp1o}QTuF*;OzRVF?Jd)8QEfw!)?|^+79nrM8jNe)92?)}iUonef#?56?sv@UbVfpz%aYe4iO3S^6#j;$NIp_r_1x zEy}P%lYaP4wkyhR#5rE4M3(f{2qLKsAS- zJ)bbbRz~vApOH{mes~yjayxk z|15k25>_#xPoAZwVEURV0uNPz$$uh^RKdggYo{tpx*>Z~?xYeX;<}45NPP>H4z&00 z3&7Zje`Og;D$Ue9hXS6yEYf`lb5wsdlqVYN$<{Y6BVjd`Tb*HD>{r9&fTQ^40V=r1-Z zXf-xCb6wOG-}NL7k8HBx%j$Pw)Mw~r?gr;p>5Bdr+JNdjp7dfrM*O2DE?dHGH7PhpbKXa`eAf>Dbma-A77fN&pTEYhgA zKJa^k8BqNQJm=k-grkjd9t3nYQj^V|5tmEWn7@AwIv`iA|ZJ-NcdhRHol%8@h<5>^t|6-YF0 z`1Y!nN>~T257Rw&$|@kd#?-0Re7jXJC;iN}TYW(Le`(N3Nt7PQiWZ~|AHkB8>nz;w zJj*ip2~N>tKxmXt6S~3UTSIC&_sZ|~D6qfIjF*gjfkL0p2!Dvn*HDem)nsaKMgJMr zs^M`n5ImyJYR9Q(CF@Bi_~x1WNe8?_%?nSqT&s!PmG+Y9ziJXA?JWDgumhS8p&KQ$ z3#W;XF5tHD*_^ON?KXK3x8KrO{T4DoZmQ#IeNY?CV{{aV$t|0Ini49YEA6g%)|Ei~KO7#V;*O$T zvMcJZeZk>Gb2+YaJ+;R^4bH+`+8?f5vTJ81c#O@q$tG=ZTi^!k<#t%2d6qg>brKkb z+U*0Cd2|ocW92U=>`@83@kXP%K$yaJE^&|-c`RWL)7t>)8X#Z7Nb5)#V}kV$j*^51 zo!4iehH9C7-yEU;Ky>sdS}cdosomlZ3=OLNwUCF z=x#9=$M8MOc4!LkzkP=yyleHTWT|(>N>PLOCaAvJ(sNfWKZ#Cg{kko>8c05C z0ZhNNfRm@gfEx)wI*}0`F!cF=|E+Ts+LZL!d>9tG9!Q_F1)C}u{f@KtR0(gYh6Xf8 z=zK-G>*EJD@u9djJ3xuav!zRJ&G#9HFc@;*uC_i)F%!*Qx>&07u{)&E_#se0;h(*GC z-lyy#G<)3z&y^QL!&~cl!rhJJ|AUZxAGRwpVs*zkK&O87)E6TULU!d~MxIWLH|$w( zhh4fC0YcM~FGTGfi_tAA9nJICks*fPv+l z@GPJFl@jXi&3#{F!tqQG>}NO-=R|h`(n1Px9ul_ke6=+aHozAb8$ML~4hp_@lDA>} z`3k(+X(dt}vR+5dBY6v1XiMs4-ZJqIBVEaTrH_6BflQI#GUTTw;opGY}`9VnxkvDC2W3;h4ly#W>4T z4XGG}v0r)s#>7JJ73SQ_xV8H3SeH}nBi;aMAJ|^pNgaN(1m@lw4zWRHR(z{ zbPF0>42E@)@37I9j;is=3Ft7dfDs;QsLt7d>T!6)A_}N)$)}mg+H`-XmZuYG;n@=L zuc&v@6s^b~>ZpRBM8D9yA^9YQ^b~2BCNk-6pgch{wtaKCY_`fsM<8*Es4uwOr;bc@ zL|hj`bKjN|UPDx$6YR_RMuL}x2UA3Slb6KFW!|uL(nLfOthF z55%Z0HH4FL%^(BZJ7tgX_ma?s7753JJWQ?sIQuSMcny5?#|`sFq}Ad_LT_D8{LZ4= z^yb7@*xlkCt;Li^eNYb)W9*Rnm5w17Le_@!nxv~(Mb9Zy#swlLaQ|3G9d>U9>CL~8 z^7SgGOawdED}rC~1HiL~kxaEOyt+(zmh!KBmt;_9o6^>;H@7gEE>lnC)Blsc2?6&R z5-2&L%%JM5di238q)Z1ypL7dtiS>M1ZiiY}+o8PX%1ii#+@wZqjNj%y~MG z_lj9ii(5(FrJOuCPRag?e-_?jr13QIBYLXASDFc3n@>2wDTCq<>)gkgcQ%0F0pZgr z&lB^ELc0q;WAr^)xmrnkbKfoJcV{&u-|irbT#fuUPtt|70}ZcJ(5!dt(wKufPL-LDJU4M??l<{D3$rp+8EXwo}Iq{ve z&EpYJMnXEF4C@{F%pT1dh5v5lqWh%XNW9Oc?w91;2~W``UPs<>HWC_7*FZm|E|B&B zp&3d0s@5TOYdxy)d6b(m((tfdIxCUSqYR@H*ffX-sy{KtOk`Fw{KmnlDeGBlEy@LY z79r(-f^#`xCyHEXUY`;ay!?1-9xn=?$y%Lol%h`6K#OS`fX-1B`A1FCP!w67@D;0D zAA;pI{z6xfw(lislJ`@>zW|a$!}y&3wL0n5^2e0d4ulJ9N(H8? zLeCvPHXcc{OJ5Jj($!9Xq`UX`!?GnsKstp9y>ma$nS993)$sDA zs(8LZ{4u5yCRWUn5xh^Jx~^>g-4Nn3L@=1w*>z$kidtsFf@HDNW)j zGs7{h^ek`X))V&KKnO&4c=|dIm(`8U|8^rDVzXyTH3Pil=T?)^Xjos???7{$I_MrN zN^gp*C+`6tM+xS69%Fm=e`8l%OK`Yz9W{N*EgUgr8H}3PA2vp=W7an}!mqM0{I7B) zom;tdz~wN+3~>Z+j{tnx?;)Jec!b^G(sR0Y-7&hR6Mjr<1%Ib>#}9XY@)@hzpi6{@ z{GdR_hVLS!2Ons!+o}fgvL5^K<&nu~Y4niKIUE9Ye&poYO&kjj7q)}lj-ePw*C-~v z34?@m8(82TihJjW@Ri;CB!x%n-}rd`+c}uI zhQ~luSOh<3e3_nWjpirc{lNcjnNh}?3`Gl*!1eX-{9^gBMtsi#cO4!TX29=#F)Qe1 zlZ>wq{{uP()xKysty|hen)IeFWq_w3?3^tocqW5Q*%-c|U_7iWy$-AY{N){V!q}~% z9xC+U4=gF^7jc zU%`eO!`a&hUN}&C#t#3KNESVEH~7<+su(0npc4}E(s#Rk@NADHUPsOGV1tuEA~ zd#L|jW^%J9li20CAJ8~t0S`ZOo@ZX#ik*K&K*Hi3oa!~dxIwuToSqA`?KGQtG+Xg_ za*fYC>ckqh)sZ{Xvxrn9xG=>UGK#+|lZPF^U74G>IOdRv8kPD~(f&hO=8&iPf8&bq zOyBl!BxVDw&@Qg6x4rup80}_W8ukd*`z*{zZI*mXR7%L-#i~y9jP;%#=kH z$LPM_uL7fFs()PlcPDf^oxty&3&&3l+|{YaAM=F!2{_NQE8cXhuY9~~sB<6zp9rYu7U{XaJctz_iD#PlCx<|`}lTq}iz=R{;HY3$HbaEVk zmD@VV;#`-P<<<7bsEcL!l@8X}uDk%Rck6+(jo(0!Wl8NATrbT=ng>3z#GJosRtMet z%gpye9W{DwDsvlXjcw-Ek&A;zL9BU&VtOnOjR&p5h9kdA-}ItUj7_)Cjb(wA+791X z7rPvs-?JKrcbkDL5*NOiU+S z)}t*34LQS?B;DfI|9SAb{-ZHvW__8ko$1?jmBTlep~1KD`MJDR-bTrTYq;uE8OPZiJmV=+(QUm_N9x8wvZbjb=t+ zGJm>mFbqx12a_s$Ae^u5hvrMOTt9kPK-Z4-C0UxfoAM${jX3bHXHoLTL^r{h=^9|9FuV39sq~& zcG&4sZww6_$wfc)o8K1qG$4`XS@axyBAfV@o}XErkFWch$~5PqrZwNBB)adX3-rBX2q|?#aiLoxf4aV# zRG!cVx^CITJ4}l}`W<~^qp(}#0U$n?Zv~X%MZ@k`I_r$!Aox+I58L=rUv`SBz?Saq zV3FQAeret&bg;Kn={s3-6I(t|ZzPMfcA<6Lj_~=0zTCC-M_TWhvN7vEsCl!4Q4fHQ z{UIfop1W)5o|0A&;Ob@eTRP@f z40Mh{A4tuz-r}#ax`gRXWa=Hr{&El-VC8rdOft3=yO%VXGZu7w)9-bfQoaF-|`L{H0wVm>iX&W<7jdJ3eN27 zEiIO=bHXMrYHxVAmtZ`&x$M^5ULj0lF1}9uefm0ujvtA)Vd-8&*mJv_^;#Y(ZScFJ zBrYg{9V_g>E7QJ~Hwe>ZnrC!66NFu27+W%HlDewv7VK}34}@{p>v1?-GN`92EsU@x zyBI8J9c;owd1R0&mR+3y>u8^YuAdeGy@zUZBU?WGI4`+j$0J7;BJ~8G*mPSuvmzdP zFV2II-^bzQ)7yONiwe>@8-ePC-`jk+7I&x*CBgy5Ys}HC-&(f*iv|yG%j0EN9}DdO z9i|!3zF18NM>oNVwsV0#o5|+ip}{DBARYaGi2Lfdtg^6O16w)-V^nMds~Imjq`lXx^JMzg4+M>=hrYMeftSE}Z#G`Hq;f+FG-zPc+m~%P zU_YL}?0JwP7hCDo_s?RzSGnSGjH_{6KJRyy?Ot@DM_;3aU+@zFjmtG=Iq4stG@|N{ z0_o70V2NuHUE2PTxPCEYs0XKSzeUhF5*#DY*SyT_f^2Z}E3Fx1DQeLJhK>MVc2GV* zzp}SGzx&%)9=;MMFXpV3J88d&yVQl+ysFOlEU$b2bmkUc!Xz+}7EF%jnlsWlsm~<& zdy5sgcm?gvC}7_kG4k9KjaDZ=X5bi)T-{v+4x3Ja;bSE5n!sb4onxCb;0ID#`~9;J zqG~y+o;2t%?ice}{`dPj_it)qwZ`>JzAwi8_fKcxjKRuc!L4mXJ*V<+?N6%NCYj1L z_^sv74%Pnt?>Rv~F|a_L{UzWRl=h|hnWd=5&bMOJ%2%c)qaF%y09_Z0c=}^&<3{;i zgdR`2az908v|47eUT)6N2#j7}Rj2dH=0ZCjlwB-JGv<_@)r!|D`_3bmUlp~WCU(Y& z243v`J%xMi_`?9E@wx$x4UAn!@9N+^V?xzsJm~TYuJ7@LhkU&zJ)H*{>1NrY(`+9~ zI^#>gi`HR@o2eA`PU^O&g+6b}^J0#?I5)^td-rgG-dB2!!Ffs}DZb$Cdmm}&Pw38a zI)0Y{FT#}uR5W5Afn)f`0*4F5`U@ouR2#ftTY*^}fQiox5Fx!arzIxp(Vt zZY9ggYVGgfjP+-HFzSusH-^5W-jf^A&7G$yed`nf4arZPZ3Xlgqqo^Aa1b9mx0u&E zCy3RBzMSZLsu&~8Yu6z7Dq-|AwTAXItgEk<=s#8*Vq3KicbH#;GRIy{QW^sP&BwmY zght!X=oQoYPsxfqioIQ}EMJ0#Ra%Sv8VsQq?|j-!j_*M~ZYKzM2dckiEW;}*-XXK$ z3@`9rX&Rbty_?||wAQr(=wQ?Yab^E8arLH^?4L&%cnloz(lh@{Eyi*5k@7_n zoTOU?-ec4|zggl%vnmzR#Fzfm$0J=ry9k?J(PGYSUEvOGbX*4=e@}k4x~uKmR)tN~ zzY2IrZG`hBhCh;dj~?^XBd_@hoAahchm|hpEpr`czS$3|Q?9D9$m6VVi`Ypo``o7w zU9d-0qZCf*Izxb02|iTc?mwQ;)7)_LX7Q=RHSW19P+H<$;Sx>#Souul8wPHCK%>um zlez1zXpEZrF8I%M16Cq%k|kCSuA)^#k_moKc`sAanL7ftCBSiHy*wD&dIit9 zb5kt6R)^Jkjz2lFb+v~R89opi8E51^zt0aFnH#;!)P=S$NjucN7i(+6IAy6{)$|&( zV#MfGB?SC3XJ4t#o98Uz5o?8_2gx?&p`M zbAc1PwISyWr7jXKl#UlT#+i6LLRapuit z^sU?+*>C(~0nZ^7he0nea8d$)M83rgMi#=rEYmaJI?B6icQ;iwz#&Qx6ZpIpbSjtF z^b~t|;cSKTj>tITm1l>KJx%w*{}kW>Bd72LunheB*BK3b&-EwXCFB=`IZ*mogO1dz z|G6fKUgKTwD{Tn#Y>IQiedW^YHRx7G-b24z>{H&C{f1;1&~ALRNon-|Y`W0lq4=4R zq2)aq&VACm64nmqLD-6pmUtH^p{~MScIYxu!prl%^afO~RYz9(+a%2-m*9=Llz?x?nA2ztdF*IQ@qy&t8olX zqK`5?RJd${SCQwBbfhoY*BE)8m{)d~=rXH3PdvVeYFKPkx^L&py>q*8y!3 zih8eM9Q^OL%J$#|E_orFVQ`9Ed)AJ;7raqgQ$xOBU>!}bt2r6|-Zzm=t~3^pE3JnG z)?-y8@Z6%rrjZ1X!;vrSseb7U=rS7}{lS?rB%#rHf5|i4ovz{x?_}O_udM!Y0p8;U zP8{A;gP@HR-fGZ*^lQQ^f)8dGw&Dy2Ym6MwUj3b1U3e!6Ii3VBn81@#d2M786f|^C zu^y~D*+iv&Z6W1XN128g=w%tveVobS=4hKoywx0gYQ8;0cdso& zMp?#Cc&fZe?=pLY!se~s%##-HlJIZ(gA4tn(tinSL-=qJ&QV9^;=J>x$^=O1Zrf8e zl=m0lBI0F7g~~RR4zFDv=drq0qsVSsg!1T>*7iHI%&i0bNk!_~CSEJwunKT=t^n?s zl*XA|@c`w|wiFkwOBm4ET#E+NP3^eS2wqA@D^H#^@^6}IGnHR2+fG9Z#}<2s0`JY- z;O1;{>gc8LPW-s-p{IMk*1VEx>wmYnPtc~czeN{Tp0Zc#|0HrGiMpgK_uH}3lHSky zDXlG4UJD*(cv}Pbq@%t`OJtRmNrheLHEMotshkxMuK7Q_T%5f?+evV|L_G-QF_2vt z7$+B%SR|1(BL~h}qwDD8yYh@a$@0$$h9$Gw#z@|G9HvYPvb3p zd104!H0EI!au1o!S*~N~>;8^1%eWwXpC-rx)a~gjv&Ft)?RnhnG8Z=G9JG+ zRlaOki-eGd1{RZIcz&aXylmwRd2GaE#(22L+p}~c zGD3>FY6n(=l&WUlviV$Bx;1G`c9&I^2FUjktsir2(pzn4+3TxUA4 zcm=IJSxx`1gB2e>5lVsG%NUs!J=EILl11Gm#zC9jo=Y0t181|ieMov<_+FgO-9U{} z#)xGr^C<2Xo*gWFFYE8>!ImbRg)puQUH#6~@NmAI=x(pBM}Ks96M1|XM^C3O7Y~*l zlOH`Qn!H{8u(zSLjyf~!SIrXv^`G-OyCjOSZNxs=VI1|v3TMleptSC5WMFqZXY=NS z$Ve}s<5ndN)C0fu-_8eD_laq|$K;rN((;~IQfie} z7q#DOZNvk{JaFYo(PT4m4fQ>@O>Bva7y6G3>G!-Zjmh|_ z{dl!q{QPBYyoe3vU1802i@i0(exI-W>fGKWRd4h6*y|aSYBR2n2URQ-r@Y#bSxAWR z>RysA>2qjas54(ksw^@-x8ONns}pJlwb(`y&CU#w=wqsNu)o&+^={Ps z0rv3!Ap)@1V9MYE>^Xge#^y=vRneT%jt!;(m9~m^-YjR1y>}ke|?Ir`HpKuuH(OIj`M0U-j|xZyT(f^yU_iSpZSg7VL}f{ z-w%OFakm=q`kuSQ^x(4mY}$Q!=655_zXA5=oiIUWU!I8m&l4$MYjK~3j{2(x-#Khf zE>~abs3*<+gc`ThcWqfpJ?Gcd#Uv*xTpdRWlLohKz*7sC%F17ViSWbN%l&*ap5&!* z(>7I5>o4T`KYbV&DbX7Y%;6>D?}<3?%T(&ECaae@%(I^~VwcB@WGCx2vZyM~9Ua$) zu!cH%qLqJx^)fA^8lS9to!^I@#+o=$DLg~txxvKsf=kI~70RPu{N+fOKHMk^&uM|mYA;*e(qwCFiW5;NIGNvWYPYY%9&FLb&e>|P2x=;Rj z+nri_h5#!clEvO_z-b4rJmalckx|9CHLH^-r?soGYnC&%=$1O_UcOs+o|=4( zmVNa~biH;R_E=?a?5ebr#*TO>d;Z%@hW(l$Fjii8-j)8j)r5N2=tj7Hk+ht7_KdZZ z=Zmug1_x*ZgPw>^WhFOFGBbcJJgehE{(Yz-dpg_{7z>{|h4sJKjIytLrKE$_E(@zK)gayW#CE-pc3g2z((QC3D|Z=L$l zGuh+I`lQ4Ay>)OnTQqJ;9gFO-=HVQiQ_@g;-hJ>Y3cfs+q6^21z>9b`Yw>eU%djO& zlNF^dXu&Vfl;Fp`y=jXcN>TG;>4SY&T@1f&LY?Rd9V+X~ z*Q5P2;H>nheBex(=#%_-a44gG1vpZzqr{xaUu9a+ckk;wFLXEFkKIH?RzduiM=6{V zx)A%<_vQFYe;Jl|<|eer$;d3dm}8;^+oTN^CGK=KxrcmJ_`>%eJ>nPTu@|zlyLcW_ z)eyeFS@levsi{frS~u2zEv+lTGvZm`4YK{Goy58`dd*m}=(<{i$2g2m<8p~`(iPbaxdA1clinz7M7WJ%#aXR^G?B*-l7>7qzfg*W!TsvhX9`acY)eDyrI;9FOPH zhgwlugJ!OLc|s6$%`BtTw4S=EQ>D)`Chbx^V1n)u&>4I>vpu&ycALy=RibaL_L+Lk z`^K~1rD1Q?TkICoo_}}%CtB@u<&Y^!V)j^K z)P)$Zat-=rmrUzk!9Wir&HYj+FfU%YJD@p{c#j4%tEhsn$9^lh*gF&3~EfsB1?%TD`HfK>g$4rU)9k zcqus1oR>GbKwTmOxZ=1DChse2N!7Z`^c?!{>>cVbB|#`G>i*b?8xOIS$6n?ON6-8- z^J{e1vpz#ubX)MlEPvi0p5^D zUsc8RTS8Nk@+#crum#zqJ>U-cm-)x!<6hubQuu*u*Y?jmEjqrMuN|9xTXVkhkxZ3RE{oKmNcRhmuA8WKf`Z#zdV~Nzi2Ea_v+5D>g1EJ0qY0c(;BoF98)Pb&@Ih8_ z>A`Kc?;%wu!yU^S@F2zgZ0+ly^so**Z=>p6)$H!Q_2|GqU9hJ{SFP^yQv{BqmI1qY zcdZD;P3-cYuJ8l*37rHuhfp_sCt{eP^u?)zL)Dx@tF0$B=fOG4IM$;qA8F;R!y{{f z7q6O>$2r~pIVIm6P7gQL5sItSdQA^rNjqown}Ew0gPj;qsk;2Qw-G<8=1FrahADr` z3TrTzM(ojRN^4N!=sm?g1iY$xn=aqX;*Ld;Wbar?53>%YQG84W%DPF=W6((THHBf& zCUmt_CF66tB~2Uk-85pqPRb(z8{wA@YQSQS!@duy?%Z%DWAmX$HHGESB06eDK-UeO8-?pL~JLA;57jI3-r6y6K^_aF~xvsj0IB49knk1TK8Hfz92FF^Z? zCU>(X_`n2Rt1B*orlF4>Yq93FiZxA6f+X-E<)@7k@BpIczsI%U9`gv=SFU>Tl-%}M z19Si7&A}hpWx-*&u+0{)dgU(5v||ql`GZuR6yBY)FTa(g15;$U)foN~ew+O!26O8z z!{y+vJt!txa%8d}DUShPEr8uxp6_m%btRl$M7L$t3kzEG#P>hOGYL(UH)gB!Kw0xt ztN>?0``^H4_9J)=3SH)6taE)LtBzbsCC40~4*8uq>xzxR%{V{k z8fBF*rcS*m;pg>rt>ZZxd$W80nJx~Mnx^y!!y~iO9v$WeQuCqi`t3Dds!wTVr#+@! z_8sMr`!1Z@aVx=h>t-Pq3=bsXaS2$gsh&nYV>k_H4h`N_>wf3D$_5l?5VAIbY(+;c zNMr=`?)@|A@}Ge*v{gMIu%@hjzrEvf?my(GIQnWc_X&PW$gIFabq(-~2|l?v2Ppjb zFaJFh?=xS@LGM)Lxzk6U`CT`i)wQi_?M5oYQeG0CRb>ptwTzz9pH)~b=dSQIb-Pxd zkrUCEZM6*OYIhcsjo0@o@Q`GiP?L&zAZU*}^4ERx6u z)S5`eH9md&GjbkyOiOyVasxrf@L?BUo;GT(0sPmqbAm{XXXCZT@Ulw@ehOS)M?fQs z*Eg<7aD@px1wGb=ua|s==T>8iwcgV&cKXye%~|DP7!M(n6Gf3x49yFl69#>KSRw-z zyTVQs>n>z*tTNQDDcGm+Y!Bu2pjQTy$}!;ous7sYrDa5GJk_z|n+w5bV_pUt+xy-Z z7tbGO=)rbQD*8*ul8irl!grO~^_K zcuiM7;G8>^O>SLJb${39d!wSMSr-DYmND+QiX2=?ShkH#`t$Zt)9b{6ytHJlRGE|SxGSdV1p}4uQF)Vr-(>`k zveV;7t`^W2TFr8n8p+%VN#Z}Bk{8H`EGO{oC2 zw2HvpIx-97E>^nwj9e`Z+PehCn~&Uhr}D4{ID(Uc*Lp$wDs8MX853}ogD=0-fh)A? z#Yk4(3Hp~GCqGfTzty(Xe}uYr_yjqo`vm!-{BViSD(+i&cn+^$;!ML%wIsB+XjuO- z-kI`&57XVK{^U$rfXJ`%Yr?vrP*y1rITu$N&@8M5lyMRr<;^T_`1&4d5F z7oqMysIUJ$t6JTQQ1>6ydl2eggt{m3zxN{4eF$|wLVboK{`|kZ6M-Ku;TZZ`+)mW7 z&ubZ;8dK7+-qVPEPjx5%Qb(j^&#LTLc|L#YfEwJ>NWb~#GIlLf!iciO`{@53;(mpv zwXfg%Q$n}9GRpo7eHyx&Pi*z1^jlHd$-;}=bn_08Rqn0)*6XFPtX7@UV#09lMtQzx z`%50{*nmz}z9`ei+wg#e6M4?Lm%^d>Shl#Zp8Cq89DRH)^{ebDZeA=yx3{+u)&Wh} z@xf2gcXv}7==l|AQN9=14`*_C9g=RdL+ECsQheuYPyNfEnH;&pk8G}L(j52gH%w^C z&4Qa#SmT`{%5A2Wm$`%P9O@&EAI1L1&w@CwU_WK9>msL)FU?k;_lbNzSDChS10OHZ zf_kPer9nG?$`&yf_^AI@3U&;VpI=3C=AO&!-|`R-!ZQ$#2Nr=-(>S~B4Ebqjd3`|K zco}DBCM_CP&`&<-MALJs@^tSg4r{-I`?hYP#wN1wz7wCfMC1ORi~2omv)I5Mi(zaZ zc8{m8x1)f+gnn#TeZ&6YUFm+w&N$X_zl2H?$tqzd1f^i1ur%Bd_GQm>K0Cgw>#*CX6rUnsmhr95?iBaS%y55fOX(gPsrQxY z`lbrcWI#KXM?XdJsrbJ1CI1HXGT$IWD`8x5ExbZLgp&Vucj0PUF5UKH51i~f;w1Fw zy2|ScWVIbK&rtKTbf zNO;Si`&8qb-vikrCx?1Hw4vrlG*&f}=1`eXpQ3O=GN-Ssnr!pTOE!;?G^oZmMh&z7 zlxA$cz6@u6?m)h+GNjLsWE%8-Zt)t}uDC>2T3vQaS;x&GX>8`55^jyHx%u}*aq4o0 zxS6|xo8GQZ{VNUO_!Z%N^lmiyYb~gM-N6*y^Ovx#nrezK86swkJ0LTh|B!ua9#J*V zeG=;H({nbfF&QDI4M~n|zDxI))%9lkn(@7`Z|s|ysm(m!P7J#JkMJydDYAJo`7ZCt zr-oh^GwnLF%~cZ>nw{so%$oYt(&c2B-ya-aQ5U@rZlr0q%5%oxdhA=KA8+t%E?VBY zPKEuia^9|8QvKZ2US=HJYz6g-T_++}=J3oz-o}7Y@5IN-%gN@L5DBe@vuDv*ju_Hj zgp{$QuuHKVW%*v%_PavCqf61CI!nmy9~(aQur^yCIzhNzEnUa+O~ zg?z9y;;ycw*=ut}RM8hTU&g@RUrhLHQ-9MP{!+1rNc&yc2>vgK9L78sp{pZgx`&-w zOTsl5_iUfnb+tL>&uRL%7>WL+-bc5Iyj``q&)+rm4OI?tcI*cZt>mZ!-#Go9gN_;_ zU=R7e>Pil;#}jHi$==0}XC1p{ave~XX3YP?I9jfXn7VX1f6X1hM>$4hpPR+NM42}| z*3^t#7`;H~1Kn}XznE){?=Bv~A(>X3UOiH5XnIig@(&iDepJ%U*9VDA?VL2PP?9ae zEsQ>|_sHpk9q8WAU$RfyUM{Thr$pV5m2p5uB)8>oJm+`(XCnT`BqLNLLF-%r&eoK0^f^-Ee|RB z`1dnY-<3nJhVhAy#F^zU$h`Xp#H`aka=^w39QN2*cpe^ZU~NR-vT@ujKb}tRZp-df zs?lur<74JKCC0-2r)LY`J5{;kPtm3RmH0mXHvWaoS}}$^&fcZtc;+yD>K4&b^aZxq z=@y-_-`{Ny{`^M`KK}HOv?>FQi2A=y%x87wiYM)dhw^x)<$G?yl)be%y|Rqa`vFl zrfFwwOzt;o$o&0>WUnvQdeq@hQq_4CSwJ4&9%*@%8hFimbBm+g3i->vbUJZIr@XrL zdB*X2UYP+`O&{x(B%je2DeZ+hSuTm@LG-Ug9n!snU$q(0`(%~Y)3m7`Wf+*o>7^Z{ z@6BuClT9VvGCGQE)6cWDg*#jJ+)Q?SQ=m6FYuj__)2uTMqQ#7PqZviJ8NY|`R(;HI z$5zREVRM9SkG7oEZ=4*o>KCE^>Dam5?EC7f^zV5PfB%ZYr+LYvgKFqD!xQAEMPVB7 zm-}q`gY)h;G(yURa#s3CaWi5S0XL0-H>QiQOLc_D-YjwJa1*^?l&7>xtf|&Y4BYHr z+)quD_7naaJ+PiOzUbrC`)M^3c+dnqC5wh@#Hq-BJk2?jKbP4~adsqyWQ+U&az6`pdQzL(L1lrTzsE*=B4T!*0+mu`!~X_!N(4 z)ous^4iPkf=2-6^P4QyW_w|bZZKmC2arf9?#>pAp`n|bxDRlZq>ht#wvMg1Dfu*wW z{dwrLg^Y2MXZRC=8s(m+OB(JswvkW#IgYw8l?OKcpt&!M;)IDg;^cuV68I;le{(XL zB{$_3FTBr09Jz{m?=3tmAIVv7`WE+=RZwklyu?WnH9wF2t2Hs8Gey)0f9YCxHGh0i znp!&dqgzGC$z$&Tz3J_JZ2hPynGY_9aeNUOGxrGKiynbJ-@{2tXcUoNdJ|>yd$Mle zrYHP25PQYk1lAsaI{k!@bU8y$KByB&?-W@dD!ldlnq{b+g(3ZdJ0 z=17fm=dsW#wi6UlUU;Wh93@kV94Vy=fZRW}uZneef>8~iH zmAP*7EQwnDZpY{?g4U+^#TA5moIP->DSe)}S>b^O{Ymyehbj#A`gA#%EDoFFUBbKM zKjk>}tLMR4wI=Z0XW639)&`V;{Vu@yntNO%9sfICq+Per^9Jl;)FOR)QW-r`htUt1 zqn>B*yOLb%mZ#uFC3)k>8w4DqFzea`OoDdU0ll|^(Q_0~-b0HYyOykcVkPvQh}f4R zdT~WL@WyAQEhYSe{Fu6e=9rIG8cs(Yuz#YP*7W2b+`nK555RtEA1mW5!lifQ$2Ln@ zX%wZaP{Ral;n|b37O?28cS{@6lYgkyWHF-1~B99+cTKS~b z+^Q90UBJ)LXCikzq8UpXaP;%90zJpGw%rl=?cLb&xgBMBjpwX#6B*ixz^NiY zWUq(e6uELJfs-|8b;`)DMXDE?|CvB)-oN!4%U^#F)08#=7ZJEeX&$mOzaz1(yrD{C zia#Budd_gH{6j*cK@)yA0aKw-j*;Rn=n6j70sgT5D48y{k>Y0eg;f;(kf;4vdDP2Z zKgv$rY+7TSv)6<(YoBMVyAc*wmm|Jk;^}*B^=5VARLyg?|1`e0=|9H$83}!caluJ4 zLhs}Jy;6cg`mCq${u)J&zboz?oQ-<7(9!$&%)<(w<)*!3WtFnnm+RJG0{)9T^KNUZ zR^4B2W%x5`{q?Bg7-*&&GPqkHf0@-+^&V%}&zIm1>eGC;@Eq|>WHiKn&>i;|>(?r4 zzVLCU6XJB4rwlE|!8gnC=`tgQ>LXw&Lm%kA%Wrc3lpONN_N7qWL2g)fl)lA069Ekj z`VySM(?p2GSVU&lMH#lRzO=BcNNS#d2f)Abj5UY$Og+=Wp&hv8v#hdvro_*ynvrVF zT8zJ>`F`j}HzUfiL)se|zik6s)!xgtlP^=>zK2YTXKlT%lDYFz;F_J%c6$F^{o5$b z0?aJ(gZ+5J2pT2{KTjVEODT>s)Ee1( zb>)n`yQxaq83Yf

?D9=EWc{=mWjb(_2D2n$&ez^DPN1q;UmMWW@I8}xC`DvB>@VQk79MexVmvs`(y|KiJOFYw;z z#y4K^>7jgQ!!4@vaR&Qb_U76BfFDla&mNU@cvoSbhy4k9nNdQsEXtgcqI{e_ZO2;p z?LwjYzE^##;vPNzx0{jmX`<*eW+$IolnFgh&QSUs9An%)c3*zJ=|HOgtQ*7^YhU-K zcppAvyUeS*PktJ`foHEBrg8^8AV5=E$cSHIBbvV7%L?1g&z6?ZD1;u=f!nOGFKgRq z^;t5b!Vbc`)6sr*lrgieNOM>(2cFD3gZ0E&EVx%t>kUmkv6M3!HZZ1%0f^LG(xJ7C1m3a2uA)M~JRbss*aFsKhxAVQHiQ z!53-?%O6Q#Dmf1QPv)(5Hf*5*2y0x+1U)L@Unp+gQsraFvEDrJ zktOCP)YNp@PJurZo$;T<8lab&pS6sxqJu*fe;5%~mw@rX(9n-WLbCx<={fwJ34V;` zd>PI5wnL2>Zo5ptI=QjSO|kJshWK`U1Q&XyD!)w1*FvWmw=NtKz&s6eB#>i>S?RY; zmT$MI+F^w&@NLC-6xF!^TvJs?7C_4TsGbSW*BQE(kr4n7nsKB@Hr?wupH3ur2&$)PD#4W;o>zg(r}beh$oH?p;-GIW4F`JTEid)6watIQ~% z_9;d$da3>nepOai>kQtfuMW8q_yLX4ij`&pZ*#Lcu1bgUiM~lnAIgMNgOdWvCrG#U zVTm(0oMX%5n?!!xN%ZIu4Lzy!t$|u3;Ep)CJ6Y8#_w*{O1G7vvQ4CG!O4grZpC^5( zbDeE|wPuSPVA2$4f~FS8sp#03S_U)*_1sp9`c7&D&l^^(8Q{U`>+f8`oC%eoe0s7T zejVrc?RfX5#rdH6J z( zv9^EcinEYO!qY4u>#y!SVALcdj3Ca%CE$m)WgS1yUaTc=KjDG`Ix+ z8t_>>vr|X5G8!5m3zt)szEe$Uzk!+meiYzpJj=A1@3t65N`svm?W-~>cJJzBz&FYP zvvHQ;+Vv_Y1!hc!{yD_f>l!0FaS~td^_0LiG05yYMYk+6sr+;1@C!oaq#LjIMXvjn zkRPKSc9ZLerKYbJvjwy#E4{5U3FtZk4+zU8juf_VA62RO8Tp|j_cFKU+bzAsv>g?p zuiwg(g&id{8TLDWAX~V%mB^yVzT*%hqe4vsdXtVFjFr(#esI4~O{9&iptyuWOx9{% z1p1y5CJq%!Yo54c5Oe@^jWe?5m()LHC|Q-SudCbzc#gbq97Fd~)05ki(D#Zr)!!?x zrm~zQyHDM@aMfj%v6)m(W&bo+SD6TM1m5KQR5+$CR2r2AHk~5C9pcXU-VEI$=hT0O zoU{sihL7N=qFGAs^Km>^r}*EY#CTPssBfK>&Q@BWO5kLQPq&1Q-6)kW&is6VfZa-W z!mn5m#!bk&DEj$q4u5}&&GVK414gO5r`YqrpA&F`vQ9sfcU}8)81^6AH24hmS84p4 z>YRN0JwKjalq7TQ`QG>zJSBBKe?4;8H2%$2(JKi1bKsc{d3GL;*^TEe9B*hrW&^m; zH-g)}+0A=HceBwpLwoPhTejL<2G78r7mupEptJ-ZS~tF}3=I62X8B>S>-;uidF~&) zrQ3e)AGg)i=hX~q7(_UqZJmanGg>WpV{&pyBgbLxJk-CPh-y24npzB{F){miN$3H( z^{W9rX)=(T94<5+jJ4;kcZ0N9KE3&C!c7{VJd`&_eUhz*m(rJnHk2ppY~)m*_T1ew zg`WQS${3?DHzt;jT)iQ9K_>SpYr&a*&*X#C2PvcNO)|O^h)oObOLaZ%TRG`1oijN9 zS`3e>?N7%J52O4;MS{FH(&Qpb{e;IL(aYzqX-l^T`lNOVeEE$zw>g~0ZDNj)+vC#4 z3)hwWvY?c)By|nP`%Rz*DQ$V0RcZZqOo&X&wc%)28@=!D?$Xlfl$LcTip$LSCPq2- z*7clQJm&f;?tA#7T$$1ob5vblF(F#kozOrI$=yR4T|S%6gx2I=weOSl@bdgTa5`V@ zH;D!WIN_RO-%n?t2&JL98FzIv6_`rk6U z#yeb+aOT1zcCEUKQ@ib=3VT+Pf0xDD%#;>%Cba}R;mr8AckJ}FGoI4N)BEHtvu=FR zqrX%&i5e#B360r&=zY;M3i{IHw(0wXl{_NRE;;y(18UHj9LM)H+`EntUGJ7rH7&g# zm(XL&9AsmHrJlcQu=G6ARo`m4N2obza(E`EM7`l_tya*Hf=c}H%_^F0*_h1^&&OO< zkeGY+yj$KVh(1FT;$r0R+Y5x{>wdf~riN^M-Ado+oXS-W&oNDE6(MI^9+H>x-SwFv zwu6vsjFiQTt4}OJm;7q zn@118dt~$QJ6DT6k0#P{&jUO?w>E9HtgFwjQ-dnGEtR{writG%7Q&SG7scH6=OE8S zUY0jXpdY+yJNu~hls+D>W&OI?izImj{S2x`_1eCXKSK;Tu1+mdy`}0%jq`MweYB%a z6@6LvKq~7|i2e)WmQL~VpWDx9`>?9w%(xMJXhOJfew|JS&i)ekOg?rvS58j_sh*ehg7{1`r5QJ-$S3(EsiP#?4>4Q!L&K# zhAA|_%Gezc%>9Pdml%`8+VJw%DKr?*&2FDDOfE^SVCoo+XAkjQWu-Ey@{3a#V~pIW z_D^cpzZ`MkY_DsNo}1G0vJ}2?w+XeS;Tf;$gGNNn<@X-x)TpeTQF2i&;~%pQ!g|6X zeBT&W{Wv}0nP?uJMMoXmbF-po8vZMZ)aO25aFbW~n`EOq&BbcJPo}@3GbrS=he%)0 zhtbytFj&Jp(W)qaUHzPk%XLCu^Ib>7#7qTJoih0qJUWl?#( zcej%gJ*2PyIhgxiUn@5S{t~6Uw}I~jM|+;)Pr!gTYjTnesdW$PG^4|QrF8*hPGy`7aun3!^3a?l;d2^@tT~DVwl+@iJH{S zLrtbV*Y=8#g(s-(u!H=f%M`-D8~?bLHiBI3^^-2AdBTbZywtazQETV`jPoyj+nrkc zDb7rn30a)(??#=Sns7ouMSYXHZ<;)g8u|=TxXMQ@cgfV+&(z$AV>!v%Qs2G6XFa+dRtBRZuri2Hp$Bp&dh;QN@XY0{l-6 z7gg24!(zTmTUq*5k(`QWGuydb5#9XTQ?zGG#k(f-FjtQD6MqzKVeqO1Hd92C(Zv4E zjI6`=yc*TU^J%H;MU~yYR6eeu^!n9S)v<^RY9Q{7KUvHh1=Ht~L+!t{_(f%8kE=sC zqis*r3pUGg>`!%$^q_;Wk;UuX$UBMxj~I0B)i6@?Trm9yrTfQ|8J>eaG2V+G#;sB~ zMnflTr~m4d7d2BI2^c3F!@|UOyvGh4CfEGV1WrVqcju`?yU7c?lO)#fOc$#))OEpF zTDr0V^E!`Q)6Z_vCSa-D*d2R0HMv8* zEsx1+arHRo(Fi_}>u4wrGP(p%uSIt9&;%Rx`&9plLg(hZ)$x+7F>$HF7SXibS;m?f z&|Zwk&KdY2?Yl8KhFS62>&xWM)6KbNYBW`B*MV<3IhbOurVD6D_Kw>jk_%Q-Zsu7! zkG(Ye4?&bGg1n0LlEV@V1<7k}KclIAz80WyPChVcH z9xp`OVU2k4wI)2nU+03*O*}Ec!PGo8QYL10P_?7KG@nSv?$j_AwL8HPD+4L_&=Luq z#}!*SE8H-egqh=BS7#CL_l5(0ofhw6tl8gF6J2BXP~+=ibp6^6ns8bWaD{*+I_eaC z|6N0mkvsN;bgJVbcEwp4=u?80pwX{BDy;{8ZD<-4>SltL5|~?phNfHQQ^cx8>qzOx zr7KseJ}t%ye^*D2%&n%bjs99$S>da=+OEDDr*Y)=H1hEnpQQNc&(q#|C*OF=y|z`V zb-n4_8rC1sHIzPUPmAD8{t@wabl8%ujt&pvVne-4N{U>@qSE79HscZ z#JXb_}o*Vl87U(;LV@B2dYXZ7Vj2%{o zYdbZgl*BTOKG$Y-b4>!SLX)Qy^Y_0ljX6BD9#@OQJ@zJdB=oaLED8k1)#hfI_a$(G zha|jY%rz@KUU{{j$nZQ&^~_#Mh3{ff6&-6Qe5)=q4R*f9s}?E!Rn7no>WWVS3hWG} zyE79@v6D+L<5*5xsW`**;}?Bfa7}{u#J|JVnQnwur&m#<2(`w)d^Ao!`<1H1oZx9V zt6lj9%oo@7@2Y$a53yPaed8vTUdNc>*++oes~gY?TrK&nC|l>K^GbM?x zw%bRCT`lR^5jPoM`z5@=U!?G@l3NLm823tC&-CUq(cTQ5XVmT^4)VjKap z*wV_z*y1-?;Sl#sj1u5G?vmSpdIfpv8C|9mu7eJn2WXZ~E%}_~AN0k^%7D(HA$b#k zGfv#&7h@mTdGfpYD+bN*vl zv4f?TksfPVtQVjgB-S2t^-P18li+4iec=UGUdRsj=G)b&PqX!r{jI~RQJuS$_=Rg(L-`iPTZ*5R7M$gi&i}Xuifv_a=59)3 z@o8Zl_@-^r&Sw}2zXdp7WEFaT9ie{cVelk|$I@b3RpfQUY7jg;Cl-}9p2t=;pyvfN zuw0ghy~;Cviu>aDZW6bC?U&>Fmy~CsQyKUx;axf4YJah)U2|R=vlE3{J!}3M{MtrVU)aH@z9*3G=N~~&^`J)Gyo6uuF6rL& z38S|7%Z~umZyV!jUM8Ne9m3v^my*J4-~jUiAKlh$g7|u*iQc|z4(GHgNo&dmnV_?i zfJs8(bV9&dR^D7oVa0v&VKClRcR9wRbFL?$hiI_V0am$($}CVj@OnFGP?-dw^ug0S zA5lJeihMHk4B7tbs6*%Soo?H?LuQr?&OgId-y9(LWwAHzmk5;Ekh+JaP|-k&CSL6$;v# zy|GgDz+I1Yf~M}xMX&Cnf6`1}UdI!7SVlB?UYt`P-!P!lfQ?Q{Px7aJWAL+KJllI6 z_44U2_B(AQ<|R1)4L9e-*X0Uww^7PF@~(wPMQFRD1fOSm7*~$7W9(G_%ZDqjFfzuZD3RZj-zUwXg%_9jdYhaF*7XklzNoXHqlw-Qw`s#%je6030 zp(jlVZK9doIfLBnv)3C-N6wznpQ-kDl}D*M*2i2g2i=iRBNuI;(|FGlSaT-kc0R#- zntU!oth5g1RrwqfFp^9^7UKV-h`OB?>iW^Y^k05`XsICn zyOovl^J1%akPKY#hzrJLOT}$%$6sgod!h8t#AGb4I$-8M+UVJ3QN87J5X{;0|WCT;DhM;t`P){U_y2)!3o^w zRUTRU-vBO+U~m>ioB2q1SP85mrO}~5WT&Bh1T+v~ZiF!*6!pDEX%Z1Jw1X^c;!BP0 zoM7OBL^dj}#8pJL;6nMk9)SOLA%Ct!nIY$;(mC)~|A)9Q@#?9I{w@s^i6XNUDN01B zd-hdi9zr2xo@X8zGNzN`wYfs(bb&GL<2-%u^y{hzuEe_i3%)AMji6yVh&1 zXFZ0#_j~U-dw-^T?>P_{Rzm*Gk&}+ar$=j<$YUCh>#LHFSHpE|9s0O%%3HWWKr|P5LgBGTw2$gfF_yf4x+w?_r8yEA5Qv_k zEC7tUSt8}LLI;xu+R1+vIg9L+Q~t%5wX%T1Cn$?NyME0l$CpnHWzv5V<=7&JR)|-@ z*L^Jf2a>+nm>^_ zqF0!kOt~8TFqkL&jN0*TFp%aI8Hq}@C9azd)Nvr49xU?hyw?S0LSM?1)gk!;(s21) zXc5s%l&RDDYR@@oMxl9Cv1hgTCDvgQ_aelf#oiJBFZO?+!~Cy)ZQQ4)PN6kTr`#LC zjgJiCr<__!^PpI%sn{Z2N}Yom`;RNThCISIn^NJ<&0IXt;R|Bb5YW@=&Q_)5@Yqo~ zu<@87q`16;lCYC#=lKQ01C79A_h8fx--{Ez?x6ql;NpU-?5qyQN9TQDYuH$<=(ZRd zWYP2S&eL#D>r?1(zz>T@q@#VtK2~}@Ub4v>0;{TT=im7E7+mICKzQy!?zZ9@jJ*+# zLnI9-wl>3Hx`$Era5}nm$b`uzTj8bD5gI1HM>;p$8h;-O_JpI6o}To1Ydz>p?~rE< zJCA{555Y+|CcXVT5r6NHadKxpra5&T&k7laLyJ$riK`lzm3Eguxztep7`~AiuggN; zr(u}2u@EYIlw!Pww#xnXLwt`YX6{ymPja{MsP=9)jJVKa-jwq7IrQ$hHCqnBK@b#{K>%rrK($q~SaWbRz{mOaVUfzpFUI3wQ% zyDz3^(G2PRTazUIBWeQ9TDO{~KZwCsi+ZcOS`@(n=f8Mv4`oq$B<4LE z$JKhSDEZ!J5A2JWQ5VwwHo9IKu&%H*k0^sI9qTeSAhq%5GNO!EiZn||Ww zR=*bCh2fWu^30f*#OdkG{l*}1TGBCRnoVk9<5)~Dlppq_CK(m2TUkOG#s z^M~+DT~&+M!JKLZQUk)^_atjJz%rDy|1^AAqmPkC8_MkLCwS1w1tTdRr@6_m*6n~* zv+u(_-}Usoo)y(_Cexs`>5m31$06%mo@v*848EVS18BU|3k|GTo8^Hl?#W`Nowy%b zjr@fATc1JV^HLUgVz_)Ts~1f9ejnY#Ct-3gE%oWz>+JLH1~gxL;Kuc0E^3dx*Z}#X zYdO_EigV)ww$a+-gW&4#PkdoX2)$SC2XB^|tCrh_@6mK-14I|U{!i5HOf58ucYE-ePZYn?Ye}jgHO8K9n@p$%ihT=Dk z)-4>9#7b9ffjhfQvHD&pIY_~2rpuR8coFBmAJ2ZiZ%nHvTm0wu8cuT#oh=T+MF$B&zLjwO*|%Zra}!zg zXb+yv3s(E%(AZ9V*zh;tId~5r&>|4cUdHo;8L7~@d3zWhR4Yv|HbHh^HrkZBqekU* znE0v;2v<1uP5zw|*2;rUg?Nq@eb}QkWd`^3)DH#e(&Rpda_z_nID6Mhz1hA3H04S- z_;rLzJ(*`_bV7Uwo&;-^USEYojn~*`*&MiYz=GSy?}5pEb|Uv{2mQvKlomhpg3ZBm z_#&sigag(XQyC7!7kMeUKF5*zSy@&ugmM$|!#$tF*n%KF@#`WvYWQW?MeB&o)Vl&S zrmVP48}$nJQ2yBj2%JFkmE$o*>lVF-ZmOmpYKuq z1s*sL|JrVWb90|@np>>t5<}~CJ-}Uk$0K(O2CwmZVfCPu?CSdpSZSdnj-|#sw?*m? zX`=BD?lxlxioP`;PzcnQSlMGD5Z;3S$7-PK!L6IG@F|D#d9n2YR$(w5{MsL8z5h9) z^@1+yltYdXc%m2vz8Noz+G(cu4z50Wg?S~eIO@p}SROF|y)S1lr#TMT&Tt@0&o={ zyB)qU_Ks0*O$fPMpQAijw{M0mX5%iNB_&3>!-qw}>ablf*(>*?7zFYY5y z9b&ugH=t^^2QHdkSl2gG%EtoX6K-$19MdZjq&}M$$*bpXMooi9G%jmp`}pSQ|8X^3 zId)SjacwOL{A=rDAU`_zmAfZCqcQpqsc#*Ux0*r3eA-`NRtaz1IhFeD0G@J+h7XAc z>dv!TH<8g8(_Ux#a_z=Wu*|J17NkVMD%v~aRZxGhKW)bQb)xrmo}1u&T#wy@&GFUb zOztYXqR}ru?lPnmuKVdMQ$29s>)V-kX*Rr4Ze!}jL(GHTZQi`N6wRtS$y4rKAwGz} z0JCQ5`xS<$*OAFIm&&@g3J}KNofk5T*YKB)l%IsmBMVUPYbs%-Ic(nWl&8h`vKqZm zd?Sa$t|6W*VM{h)V{h~*EQQ}YR3<*N;?_f@?A>cFaIc-yJ;iHKQ?>e@0b4z&iqDCo z_nPa^68H$8xB9W)>vG}fyjL)F!bYB1^o?o!9VhA%EXa!yroxPvMCCyApX z1@LgyOm?aCdw#>hwQS^m+NX5xeik^kGqjiz365vOlv9QQI6;eY;l=qtV+GP!+E2rg z_JyThd^x-e6p!d8JAO%2vKy@j!UXUP=mvzbJncvztMC7mAQ@J7aI zCg0JN3Fp`=#U0;X8(hZ^dzC8K>7fDDr!-~aHZb_!oUI%B3=Yrp!_zCh>-g`+_2Ec$ z$^51{fmQWNN8%~?oE!#(KcY{;QS&f5zKDhJwT*<%;sN~=`DS)eq3>pkciyGFr~e8) zMKzj8{c~T;u|gVwP3V=Rv>*ML5uSmycXu3CbD6vK$We&@)qtjrHEoOrB@q%C;Kvd*|@`7jVV zq)GW_Ry)!NLbdN<=8h@(;d`T@<)1Nd(JC7*P8`g%W^7~P=Lj1xV-n@ouWi{5yA8a$ z+*qC2(`1z*|l@&l_Zg^H-C*!@g^;IrSrd&`jop6;g|k&hi88 zTrg^7s1k>&!}Lc(+~2j*+2;K*qf4G+=(ttt?f#nB0=7%4(Kw{>WyQ>rcWtnZThjj1 zPv>6-pDTZe+b4qAt!Rj;Y$DG|+YP5z{>-OZfUz3M;5v0O3-rws*UJ7>ynyXD?}6a7 zuUXZMFofULES6|~Wycc@fW|-9eame&qoN<1QPh)>oydnpMpARGRtk3ahVal`%pvgclp)`PEm*__|bW@Re>SYZ`|lty*f== zn6pVTJ7)yuJ!KHJ@12^2uAv|J&iT4B%^&r*zIwL#S?IapFK;}r06hcJ@q3?>L&hU#4mYSAw@`BheD?iKY)(LIcc5MZ9Y_?wZ;3buG11t zbpP@!JHuS=WD@js^SFXDGB;Tc1z|4_bRh=rNiz#rL!y5)4lkV|i+N$rU(8w}tEm?|gVU?Pt4!59Cyoiup?x)u0(~E^X!n{4O*K+&rxL!)q&54IhQaH$ zVXS4^Nm%RML?u5%Ykq#GpEqN^r|YTNjcUMi!al__s+Yh6ZauN1M7~x!IVBfKqhMCt zXwgIXmq6nG@GzR9B$ujD$X$-$9f#fNC)nNav7$ z?Z;;non+%*X{nn>T$S8tT|mkVz-`P~OfTAv#Q&<-pj|AelP?PW+alzULUjY=vEa@A zY#!S#8AYG2pEy?zzuyD&y&ho7?C<0UZu4bT+k{@_qyF}WX1``5@e`1~0@5Xp72V$R z1X{bRra}X3S}p;fV=M4;RwUuvY9@Sp=RrN#FliQ_b{ z5?^-RFA05OxyKckRq3c6Tb+?GN%(OPx+*P3lZn5lF>_4+Wx>c(@>Lhka#3%j$YX&3r>?)gveMOByLiDKQ<$5>_$DTFgj-?ul6|?`N51GtrgONxT@M+ z_5BxHH=pDUVQ5f$(&Lv1S8e2kByw1apeA8to`UIY;N0r3{Pe%e*Gx(^$sV^ zn#4tBQuMw7HmjJ*Xs*z&u^E&VKLEOBoM0RUg0ILglJ|KB;fLet+Jd0-pyTXwRx?iC zg)~(M*|}^1UU<6?@7hh_#6u`JyA~SZi+BD&`axhPJRCS0egCz_)PTlvmHSPg^OM@| zTm`<@8{kvR9=NBb3xwP$WD$GUf#ZwT>hwts0kIZFMV*oT5{NV$~AaY)l_2Eqlwhd9BV z)+9QyP-I4ovL;y9aug=4$N+14?t!!!e-oO)iKCIOk5MmzUZDx<&vu8!&%QFscG$t) zhlPh@8Y)2che|ew-Y0BU@dPM~VWj(IgNs_6bc(cSw2acs5h!#g>03^cE2_%tvNVs`_A2oZFNt>m(zRIgD*%mJ?ZmuK59u0LGRj>b=1CBg zMmFNqb3l5F>M|Uk+;2+UoQVmE&XPwmtucD!g2J&aXHdh z%c2G~yUiCoj8k-{0r_FxeMAtJMuvd1cD3L%w(;0bFpRn-v=5_kfMyRogw{av3_v{& z_F+4Dzkz8;ctW|v5f)0%C5tR&sB*jxJ4DWRduAw*R+R!Y8$fZx=1BgX3olKY2FZ8Q zf)sbe*utP8H87yQX1?&2ghh-r4vOrK^aK;yE_eA+KEWkb@}Xyay|#4$(a-kFwu%}< zfhEMJB1e=+->U)rJHUw#A+$HlR7UlQ{ku$$6vF#f9&9C-%~8bxK#LgAZ+I3 zRdHYXB&51vl&x^mm_Yd`zjM}({{Nm47C`auFj&;x4lZ2YiIk_QB7cq8`-{)V8YHdF zg&!8zK)OR58*1peKN^4baLyxcjZ;3FO(X}bx`}Bm5D?TIj3fDfK%q(6vltmtF9CHTy zX*Wa4vGPgN^UaGV0*zf=uJpF@mSXUIPn{N~euKFmtksvTTd4Fs@>9#>blqv#c!ESZ zUNjO0LYwKn*kHFdA9%ln%^P_`IeRyi3oIgjtkc-!s~kHdow>O z_hv2=J%_^U#yiI`$`Q~d@PyC`u&IrL!ruxHMm!0^I}t8pvb&+2?7oxosIRPfm+x@O zGz6cvILZn8WYQFMITq!}s?h%PHog>DuSy!6Q!YVTdNEKwCNN5H4HJ4m^k<$Sy>~<2 zC;eB7z$#ILkcE%($dgmQ_^RSP2=VKG-iZ+VK8X7e|F?TTh-3b5e;qtNpWb)r%{{%W z`Lg6hd@<68b*oSBJY>YO5k(DQOlcA}Ii;(f{%{Hg(=%6>e3N0X-eGBD@i6|m@&Zd} zvmZWq?!b4`21CL=9p$Ir3-0UQ4YFK2sQ=ky@#qM8X57CX+WKlU^VvG;#lVk{;iQMd zMt_7vGemzmg8ipnZUN<#2n9HH>7>3%YZg%_%(PXj}QqyS`jIx<0zS7>V`U zhd`T$W1xQfMi|%pI7aO_jh5c?p+{gMxIfb59-m*~g!xIZFM2%uYdVt!y>5sOF;QST z?>|Ss30x`)JOlpEt|^&=Quu_*yNb299(-Q?BYj|pjq+h;Y$cXzdM z^1AO3M|;y+d}twuuL?#l8-HBo@E+Su9Rj0o1>l0FyMca&1-?wnpWm1sVxM=Cj%5yy z>`z1G3xuve6Q%JpbyYeqpfP|b!rC0oy*A26WCaYNQ|MuQ;9O8XojE z27}(VlC|2u!_=(3Jig^lu9sfHKhDX5RVQ;HWK{*wxU+y+191WEp}1{|6?CbN;0q!h zcv0vhu&b}fQ+iF~*`yWp4u&iI730?aE3o|TOCG*uC0^El%}eboCBF#_+Du+RIrs{Q zHY`NjpeJzf_#2qC)KH?aXZk&!^O09uslo3L^Ru7!fajkj;PEC1#e84T{;B+`W&Ck= zXY6mci~p_-frq)XVOf_u(uiAgxarhr$hX}VHFt)< zgN%H5Y;Op*nsE?swm=#&@&rHYeV^_p9Kh**1H^ovx!Ee7Z=iKF)?ZpA{!HVNCk?wg z6h!Sa^CQ^$W@oB_hETa^Gps%B%zMWS!or_taF9gLE>I2MF%8BEb0YEl&n7r-{!QE$ zeb>=qxvbLIqFdQySe8utb(U1a71EEllAbBM|L#}bl+YSiV{PRTx5}tLw&8VSJNeW& z0~~d$x0KR#8~fYX9ZVx26t;UUn^vmlom-cR*_`Q~&rDq{idu2kkR~s682bkI561sV| z!W+i*Fk(v~)7`Z&{!I3&h#myTjjYO=YSLdGd<=?6PJE{%w#;d}OJ< zaBfNUJDGUsA{@=n!mnP#Vg0~CeCxA6u%Fh$In>P<2y0oRK7T}AfowaI_qWwztzCTy zvwK3@ar-s{^-KDIp$1%*AJh85~eTHv^gNMUl$J}JLxn<>ygyAOU|I+aCkX(kU*>tWCr zT5|&wz9@M!%Bks)ocN3S41}lK-Kr=UOiG5%GClT_)4B`jeLK#98#dDobjrh?;BdFy3n4 zDNDgF$W}gd+<}u`0pc5IbH`pGje~D*SupQ`-&n?g7IpoU7;h}Uy4V(U<2&MlW(Nee z;qUF&IgJav9lZrazf*1E!bm+Jt%D`4uEC-X4@AE~<(pDR!d=2se~IRtyU)zQ(T|=% z{lnLQSKR0O+dt%_{ot)zV;uMHJ8=&1>3_lmM#9~c(@&cMFeVe=xf68)oYS zL0@{OQH%p&AOBTvINnRy$=9E5iW65{h2g_GLhyomocKWvcwHfImyew}2j0)T!w0#{ zg*gpc$}2kWC7q&=-`12Wy^W1%tc^hbsExWJvlOXUaJo-6OFGycKFoOurcb)_u0K1} zVGPwjXzcO8xCni9O->Y}zJ>GE^^nd3J1^0|6=}nN6WcDC?@X#|nft%rE!G zp@s3VVfYKdOLAKeYxUT4Q z%j?YZ*EH70bRDg^a2Q_>wiozC+_#c@|Jf`W$cn&4cU{ChsRn(nk#5?@j*sYHr^z4Z()~yO=h)9G3a0zmgWwM8 zc}8;$1ME#%;DYB&^fmpTxR&<)o^Osc_UJzCFs;2YQWe_L?dfUo^g4#J-6z&GKN9k0 z3_?5o7SdJy`uM4G6?uidc-43Y=%p_NW_3YoH#wi?FEfh7W35XVCK)g63ao8RpIi8VsMT5(s>T ziA+7Fs^zT-PjmLpR?nSl91}@aPhG z3?%Q#Zd**K^E2cLmE|3GkOx_TgbO%-S67v^3!T3O#O;a3b$lz(cK~GtXoxoznxi^@ zJ!OrfynB-_^~WTBjW&NIM}&Mv6gI!|mPKe);D1L9RO)vGYj?Kv&{T*C+0Q4uaRl-M zj4+7mGM5v^!8LjgLimSc`Tl}0RN`Gw7B0t>^rI-|)zP9{p&WoPu{TO7uAmj~2BdY- zn$}HwbRya@rK^rgW6s5SH_DoT#05CS8aAi1Z$?4-nM5FetCBWiq~i#n*E7L8Wt z28RLZVTJgg5!bN5>4C7M^J+-N6^yG0^eRqWUuz+(?-L6fEzS=l;|X zsYbBO?L(bE64*pN2BcLKq2rnjuoU`%(|0iHYi<{c68QnF@#zFK<`A^lN&LsiYXj9c zk~V>cUH4$)9)3W&K96*WEc}6Nv7Pzes4p}hh~5@Ga`13t(#GTR9O4e57;j(qbe!_t z45&vrVJCR?o(d%blX9@LvQ<$-BOx?M<$VG&YE7=3&m zWB`Pb%}_I=^Birp*x!%kKS_YOO$Pujbar^G(7PRkZ}b^{!S@#38`s2(+QTsP zV*nGHmgX7HSNmafquv5PVb9hiq`U{i*T&$=oK^U=@I&1^QN4-!M$!fRcf(c6<6L{B zIs(dIs2(10)8{g8<=R6PxeLuL`SgFZhH77&<+oeWJ^V+^E4*=2aqz`Qq=(wn;XWOo z^)yH!4qlDKTa0ux97WpG&bkx%(ruK%9AeowO=RJ{h`c;#(TZgN?FlSAm4_RiNM1BvsBV5I6(GR zUESKtb9T{w#(zGr^PA5rLcfVTgK|IaWLLnN^od8}aB+T8_QOw{YDA13QV&alYXl$C zdBJt#6G*z7iF`re^Q{9DD#2NVGds9_lLW?W;ihXz+6udy3Eiy0R z0aWt*s>qq%-p!)Dd3EHg`hkL{1QzC}54uCO)BtFn;9qb$lD2gWT6~7n=hd>&PZd#H zeG_Klkb)*i`dT9H6I{#5JMpLVj)vf{UQy+2?4#qjwDVxd9*5$1YP~j(MRv)zJ*Wp= zydB8v{9_AZ>79%0Qwoi>Bl)ko{&Gwz;P-8B0{M5D<_XQqyW;S{v{sD^-52pY$8`2J zhhvU$cs-_!k%njF6$oqN@Xh!uJY>~EmH5O#wp$L(hZc%_lL;?J{SM^gfU;&rng)oU zL>*yLr)Z%W@x+xQK)P2VpCCL$UA>Vu=Xa8NV7%E|Fi3AG_ya^%bt`GQs@>8QeIGnm z=yOOJilfMur>+S?%4B8ISL}szG-ZbGsHX0d*4zfvr%YsgB7dM<2fuypgj7RZcrd~v z>@cIVLYM{A>ul;8eX6~e%A(dPQXSO!f}>|j94U9e8MSeI;?jO~*G{-cW9|VtlS`5Q zoxk^V0O1SQaV=Hw=jcb%;q}M^T-1YC?Y}o#d z$M}SzlQ_0p9>>S7_*JVJ6n8v>SBh19)}f26UeUuPC#Okf_ZGvtuc7=*ZGCy*-FkRM z?uTuv)1mO&R&10ZLw%z{yt}#=d>iT^Mcx072f8eVj!RNNYwlJs?o-184i3au+f(?3 z0lrY^yA_jeRIt_^s<_KgEsWFnqC_VDk1m60pt=3d-G$$7wwuXV#hD2b0MyvnE zH^7a*TptvxIeIw$_bG}m*O)^4K|BPbFMS|+?OSDCdpA7f_8KPsJ&rkz z?x9xcR(y9Qo>|n={uX~E<}!3^-FKNZ{Q)hfuTp*-zl~=LwgIF4HkaGBQV%^V!-e-> zu;K6e(Vle~@OR!QSTQu3y?Hzv|6ShBa-HvhJH1SxC&)SosLd$%!CXQg(AUHkij zQQHk{?HNPa`MIvLT;nnSm2ZHuPLQ;v!EmUJy~1=CpJ!(J@4$YQFq3O=w z(&{evVD>o+Y2`)}v}@Etj-6BmL7fZGU_4;?xjB@XoWj2udSJF`5zxQk zJJ>c|lMni&iPrm`^VnwHW$IIOau4KtFWRZ0w`=i>)m}Dle-h58XY*{eo@9UarM;W) zC5fL^HyOLqdjg#iI|YMjQYAc!Pj{ee$M(%XGJbBf{L{!#_I}V9v(k;(yNU5^&caTz zolgsly}A;0I{Q+OXvr}rz1VWz5?B9g%ExDBiE~tSowo=~;2$5hLhYj$1)fM@Git%l z-ye^)ZO7;DC_^u~nc4xD;F0U55b?5x@2vk1{zL}RoBKTMSN{N8p|Qy~TH{_m9}gG+uZl@g;Nd8iHSzPUci6*kw-w&TMm-&wV*p z*1KK6cHhi_vjsY;s7>k#NY;L<{7h&J{;vkhy%Id|Ld|(ddiNF__Zz@8=dM8b$p8Cf zq9z*Bo)V)KTz%>;Y#uU_&T%s?z50ZSermlSgHx?3CH(mPukV5?2nm4GMh9_~f5I3S*&dy0M^aNhFQl74+B%vco5=XFg&)1;*+>Mie$8%*&@!>+zB(BXR+ zA3pUAEYwM0RR8=-&4avQUHYRud=z{%or?CFyJ5L)3=?xFzJK%1?d@s=&Kr^g;8OS52^_B@&)!|c}K;QylAiS@Q z!wpMlkB`Dm6L;VIovqe979Zr7I6r3RgtsU>`#yH_KEUjq`tN$|sZJ&DVSlj&q z`y~Gr7lE)6+elv=&*}`ru-;4@pDU?1c$fLJFlp*Sr13=K!gb*MJb*Xx+(cuekNZCz zLz@+CB|0~tKg)!tyuK0buhN_2=f*8jvt$^Yr#0^gCt=`SKWx8!864d82F}z5(wYw) z;oi=Z*g~Txi!$xSHCqI*7S?MZBKH&{eBk+-w~*=|_6_aEUcTszGs7$)uJvd9JMS8^ zwQDV}ak;C|oZ*n_bDVG&8fX8*qEF74?Q@U*ZUEI*nxnwR$9ZX7^wH_ABk{jkE#T}Z z9T0q&JJ}sf%1`3pN3`E}&U84w;t{x%UjotyG_Eb-VEZ@HWV=l2%LH+aV0q^NkB(l2 z)3S~#G}eNL;1l_Z8E;Z~mv1ki-f3;_}Wr`;z^Lq478g)N}H<202Q*-@psWIuiy)?qARF4iuuFA?wHin}e)>fuW;?^GcX zCi1NlE-{Uq;YjBocnk;+xY_K*DEfxR1#13{K`+1iD0I^D12MRzFaY*$u4Z;)AHv~S zGj+fHa-2On1*Ts+!avepAFa-IR~lQ^a#8OzHt^MUB5q2&MmkB0^>p6DYHo)Bybt1< z2e(0Ur)*CA0A<}f!IllhjIfQFcUs66-0YyXbFl`(P*}V6HtVb;@n1G!xNeJq3_V<7 z?)Q#hGhjMjRc55pG4RAK6ZO;lR&pofgP<{>81J_|2@l5ffL0&ABK0bgKI1MLLx>-p zu=$T4NLWOB6WW5%B!MGbz+_TZ{-)J>s#mcw_bg3^{IHu`+sBWc`rQQXT+hap$D64F z!}i;MVjdHELgagU`Q+(RxIA-A>0_zZK%xW%9C?!ueA-H4I2_9^iv+uGD0S`4ZN zsjC6ESUC?mO`KmhCOytiX8X#&FmWB{Oow8-WuK^aIK)x+qe$4PrO#_;bBNOi@#w|fjeGe>~6)pOiu zYYV?qN>EyU0}G}Nhm&@b!2jH2_edb`>uJS_{uS<3Kw5y2& zh&dS?wWZEO5FV@Rz8a{cbEIxlo&sq@px)0L+;IzUb~0P?=@Td~ifE4lBS~K)afovK z??ZgM@RihI*b;uk(^Mrc=Utw?eCZfrtUU;<( zp!{4Jvo6|Cb-x}4(LQN*jrQW9XLI07jtLk?_Xe|Lmh6DbJ5JhArM^}TcPN;J>pENbB$92C+U!UJ1!Z!65q!ywABN5OeY? zpOtz@Xh`l=yqkQz%Bd&eAKOvK$vY!A!Ik6vRH|Rn)29@|1K1PMi?q&F{u+}RX<)?1 z*ZUIpLB$DE-a-2a*!Kg(Mp2xSH5-}>Rr$TxOsK(&i(Foxwp?{!`TG1~-^f}Pj z;)nOsfb=B!93xrO$(Av??EaAx0@KyeOC*(i7VlBD7l{jy<^ZpHs8D@=7g`s!lLBLbG3&@9y#w+i@bbkW(mPu6N<9-Ky+rRO#bLcQLvdozasKV@1b)3G zhi4s*pxd23j)G$kjy#C`TnYK73i3)xXuD=A#NAC3UIum#>?05LJBPhz+-0X%Hc($R zw@}At_JSJ^9MqV=?yzV0+I)fUgp2C*Yh6@_g*&n4b|ah`V+~!t%~UK8&5^z5g#qC& z>CV);On`a;ul)z|H0Ps0dS7TDjH$`Qn_v6Fp$l^%#$pxA_|XakehQ62c+D~=kT0j2 zCJl?_yS{z*jm?%pEJG8HU0*m@R1SYe~a;8J0t(1;Hc=OJ!q`D5Ekko z@g1u;k>IGLjv=p8McKhboCHeOrB6DE6SsJp)GAavN!>uj3qORoi2BCP#wh-IW z+8uo(Z}UT2%wfZ`(LxgvKhZkyp(iBD9)NHji5ryi&!o>9u4dHV*xdgIZZf`(!qapzlEXeVyN=?1Jkq9w@XA`3x=kE-l`sx|>XXU)nb`9mnm-R-)bQ zRGJrT^}&mgeuw(oT%W`AzX2ViuEvx8brznUe{y^b#95Gi$w+PXbsnCoVzAzuiH!HnUn+mTX{I;^>VjL0|P>%B! zg(jx&r~TSntN*r)Bz)0Vg)SDFo&2{NbnP_hL3b`N`MH!T@R@&q8V(PCWFYA!cros< zvQWnq2s@E{IH%l0C2fxEW<&X_RwPgchi6A^6B!F8_p}iB%E&89l=(2BFY{-Ia++)P z$Kr7OGtyODFX;pm8p~azs~gs7O;P)#i5cOGA~cZB*b8EgnNDX-6q5oKZ`h-g4dh|r+3q7k-y&&KAb(2PRRnqEOl-3H4~TgbJx%?ol8%P{ zHQVT!w0KS)lv5T!{;WQJUDJ&6G+z*T72z3Q@_b*u$N*^kgl867013y~{M-YWSZpOx zE`ev?7v&!)vy=%F8Q~{*KL|s@WSO)hnjJe#eqtfkj{svbJZHWt#XM?~v z=cNIJiJjFk3$4HW!7yaN%0=ppBZ6&5AgY zDI<9eiLzfd=#wu9jwh|g2=@pZ1Hk-3XPN3$@R1^Xh)u~;Mmej<`xxOnUme^Ogr*<4 zOXY%_LT~$E&0qs{rlys=`_C^%SU~R^9{}RxYo&$p25EDEDKF1Fj8cLq?s^%%s$GbB9U}6R-bPwdqwR* zsxeOeDjOI60+AC@{mViZJ2`G=B13&Qk@jg_z8*!6L7Ye0E}z@{ZG-*hj3iG}j+%C~ z2WC5CHE-1|@S*on$p1K!7l30Qr&AxtO7vZIK8&(Hq+TS>3zochxz%yp+^xw#UJ=Ax zhG+cYl-aSbQ}%Ge6}YhU0%2hz%IubcY4@d^eyq=crt*fv&$B;|D%jCED%>#N@q`eBrFeQqDs z96xIw0s0*4I->}dKce-Kb$hbNNpv4RY8-o98V;uo8}R5Khe7Mq>Bu@Kl3iot_;O|5UH(cI}Rd@db z_wsjaSmAK&QyhRz64t|igGTdfqr&h>wF7?ji(-9pAF^!Rjh!0qhWIpHc0E>Ko!T}R z3ocsWw)cJ67uOVK6|x012aZIykyWt#v>#m6h>(8WzY6^-^yJSs$KfWQJRUJ%7%uDI z6a9U6E0Kfe!OBc8NLTXMSb85LL_G})LfmnNyb)I$JL2@uK2YS-Nj)*q71!G)V7C*q z@XWAMu*>W~&sCq2F6DUeCl&@+SQ7)sv|7UY4Q;UFv}L&b*gfgx-<1&6B#VbiJNb&A zZj#MfBlhs1soXVuzZg5|TzWg`TveaFY+{P*UYNt1xp(nsz!3b<%L)E8Jgqd}pQmUw zj$ze$uW)Vg11NvG9XoXW3lqyyQQKRF0js{U%D)X|x+dn?G8Sx`HdHq@ZzfL|ejMzx zWaxP89i;5*%~qaEzyYhgaFXja>6XPec=k9QrkpeaqkvDa|7E_!ZWZvRwMkg^!x(5G zWck{nKDoyq{bd#1X!PYw;`Mr)O^Ii`OM;_K%dOb4_ zaymQUvZnM5@YF&u9O{R+&wN1lwAN?hV?K&Ltss4k&Jwl4%M*v<#0mSKx*P3&*E7X6pahY_JI#HH`h>3g-?Mhox#xZW*2s?og`bV&gEfu>PuS^H=)P;QpsxF z9o~-i4o_-*nV;48MdPu9M?HTEF}ph8z-7&#a7YyD81;eS&ifdQZmLpWqw6kv)VTDU ziJI!3R8hA6whAp_2}=`e_;1#rPUl zz1`2M@`{+4|AXymO=jo%>WmI|sLlgXV20hSPSD?D1hg@C0k@5VMa?U|(i%8~66SQhknRXE?uk}T$N4)SNjB0&3^;rVW-QEZ1r99<1dQHIea~cjg(+$VX z=GgPIkgQ;FW(B^1G3n`vSzSnjl#zt&xPAb=h;QuhO)*{cX3SYAk%X~Cx0=G zhlgpM^7^vH#GT-HJsBTn5Pp6u1}Bi#9r<=&g2a1t)Z|xjVgGR&PBREY)0$$KjPJnxp2=t0I6rVZuh6y zd`r0FiPdY^^*fWnd&p6x#r$p1tItxXZfgh*V{b#FOU)dD-CN7cX?=x~eznTwo>o}g zXAnE6DdRN9=`gC<9BIE%WBK9PAp#@tefaY6Pc}8^~{3PGU5_jBrtKo4V_QH{MV5L%~mUEXO$w%cZ2g zD;Qy@;7WP?wP#>yQz|$MoFA?S!Hwk^+1P8|SxilE1FCDDG42G)OY(8$vmpX!d0>ve zLjSHz*Gj{g_g8^);!)P943)3FThnu@TX@-}GlXM3k@yA24K`-yHWksk+J#KugvO5& zY(K$X&iOu@$LvbPi?+cScJ>j2Pqc2xfFQo5avkwq3&}gOGyB~BCWHs4;_$EW%)rhO zI{hq%+|Szb;kkZzJ@6_13>qWG9ZK~r!R+uT*(@kUqMuPkc+;Fitor6(zn+Q@#5J!ibh2N;Jf7}%Xr)?P@5?z$WT&bTJpE^!Sd_C) zge?uF(z(+Vek)v}4`VJ7_`-N?7fsXe2Z^h%v8VdxGK@dVR~_3+;0ZYO6L*VwYb3{Q zq)ExH`q;CBrElN-26Trp{eB^{^XX;a*CfT!;FSo~7t*Q8Ymq+O(cHT6o9a)~Kkz9% zshuiM&fL#OTMS~m#tHITuIxZz znzfGkps15E42)s?nX8@4;I2VDsV_TT`0we%afiN&@6R&$Pe(2Qm-vha!}q-t=*hl9`<=fNZ2 zdlwB>h*p* z=Z&%3k2QE!3wyu$&fpK?hqzxkOjHU_RvgGTZyg0s98qg79u57;cCQ+cWqdkg{^X8mi)34r{PSy78rtH)v=B-cnIjXoF z-lK!+0Rf!ljL_M%ufi!c2l}esHey(>A$TndrP*w}C*#?})l}_V7qM%YMILTKgBmSm z1bGZ7qUQ!mhDPG4ElT8JI_BO09`e;Y-&pm)`d?>7!%m6t)nDYODjm%&U1l@19Cf@G zrCsw+qyJ8fqQuD!6vhBYOXxCuT@tXDt$ziH3AVo^w-V;2fgyQ1V{?J|T;Nx(;sydg z1Ct$Td-Pg{X3%=hDoCFyz7=YX74DF$$qxqJPpVnNPJ9|anwFbB1bv_nFI0qFmJ~&u zJ!15g(qBoy1HDJ>qB?2;T)Ds~*mjiB>dPa3RDG}3dXlFW`K6;;8=5@x6tz2Zfw@|g zu*Z3Pf%$f#p32L8?_cAryQTykBX1S9G7fHyQ?=rqdi1tj5)`lX&mF?=j_ikiI7dda zO-67)XEDXH@<-C5z#V~ zX!)C!Vo?M8yk5rpQ~6wL;E07;JpVHZ9V0ZaG|_vW8%0)jW3R5=1avw-vir;NVkbTG z%%<{|#|3l|*FGE0zliQg#q?9(-re4=D$L8gAA=Kb$X^!DfOjS|ISs`5P5l=7$TlbAq}8L%RPumj zlC^^eUCh8#BLenF z^fI_LkeX@Rh3orTipQ{TZ>Ie3#7nVoor|cmwkO-ftmm(ZOQHW}FmPG%iwSRu=fa$o zMn=9?i0azzmTjy15j+S7Cv9a_p9Oc9Hlgb!){npovfj)0626PjGxTrfKR%Q^iDxEs z_E}K=GViDH7g?ij4>m(7ar~(Ho@?WC`pG0!^zaZ_@nqcv3x7CAq#? zVsHs_v)w*$f~T&$BF2FGFk3SChM|+G^MQV>YN6)+7=_8uQZ1=*gMIRKfiVR9Rz8y6 zyL1)k8!le9G^uelcp6EaS{^6V6}^dHA^y#r&)@a#X4L+%a%Xz50H3nU_IQ?Qqe=OY zXM?@Wl`B%T3y$S=wZ=-@_bAto18R(7tT8FyY2W>>fcEDFohmZMth_9I`Y1+^Yulq8 zD74!;E^@vH`IZdgX>;#$$@ByPZN>8!`O_7*+u;9F3|~vYG=1=^EmF-(=_{8tU=p8t znxQyV0E?xXGvI(|dhaJsudz`4S8O;}Z?TmF5AWf57mvu)v;sP=gOv_F@jDazbBSZ~ zn~JmGw+ovUo3++=U)f2cdQ^}Yljyj&3GS6T01wwr!c&^4L)!O#j22Sq9HWLs&*=T4 zSN^VY^30|(?p1Lro=xJ8xmW3{BygHkezWJS#Yzu|xd)5W*;O{CU6KB#e|%%qcrWfS zYYebzF)5$DH*q~MBa%1FSgq<<+zj?+yeCh$zr_>M{u&=TSEe+J?iw%^{q|LPL;?>> z+p*bR{!!Mts!`0F>N!?fgY8Byf@WsLby=Zh8M4FB2X_{k@U3n<>7S zA2K|gE8f!U#PyQ_gqjvgqlCvEric6Qiq$(y>Jj4tmA_D!VD$KNnC{%mdyV;h>}~`8 zp5cFFGpj2S_|LA_7Yl{!Q_hjDu*$k&kEDkug9v$rP&I~(g+jwdOQj{6hW%A~f=}%U zl53iieG z6W8`@&~Pe`5%A%JHPn@6R-O`nhwrB@iqE+QM}E0zzF1v7Z@z#d6gCClA@{b?qi137 z{|9w-n^CFKw`Oe}8cg|Qf^IV0&VCVCD|xiXa@1o0$G@%4(0Gh2gxqd#)sQD4m)Z>s zKCJ4UZti@{Hv&qV+1WV+UX^u`H3z*uhVdumI z1c6=^3dg{yto#o0M@C;UG_Qb9l;o-l;4vd_Q=HDJ&uT&wT+ZWvP^S=PJkd}jZSq2l1x1?GtHHFc%mloyN7oGa68@qFuvHA3|Y z@;Hk6F$($8E?{97BYIX(rH{BqTr0jiC7q!w^SoZmP|nMF{wxcpB3TO=Yp47P!Shl2 z@EEbH;Q>}PZT+hdLo>7T+wkWST2#R6D@`Vm_ehJ#G#cRjP(YvS1y?^4IW>cLiGLP% zK3!CFZym+xPwy2zwah9HGeu0xSmagtDbr`I$~1L&d&S%4C%@(<){^1v8Ci%9k1A^? zJkjRP_afwt`iki!220e3gtnq)R=#p(!ew9| z8*Sb#5W)CvLh+c&50n-f)ng06%S&J*seA=JDd5kAGYbZP3%oC`zn|DJ!`4IwYwjK8 zsz=&<(Uk85Z?o!+K^3|aGAf3jB!{KR>&ki*F2bsvzmnxt?AxhuS#1#h$oR~|tiyO79|WPj(z zDnBQ1A!{R@8MzGyghb|L%fNXJ{UxEzO|>VJ`mYFn)P7Csy9xC>^}R$1{Mh5i89)C& z{tMpt%E;I5AsrbxiyK@x$Vc(4QN*Re6x%9JOz??e_g$`XUF()+Q1WX!8Go7k6&Ph$ z1r4RgDTCyU&ufgPHjBwUTVprBUG#CE13lX^P&RfQMxQ?yFh_O>!UXI+;h7O{x&9=%-+1WG?&&{PiYDHh$eN9rIS7ddy%1=#W-jGLHfP-1ee!J=t2LAo6EWvGB>*wHgC!s6ZZ+9bUmDq8=q+q?pYv;Z&(3-xTtu?VsSK*V&_!Q*Stiwahx01Nmo+L_ zvRu|ZS+29J&c|E)5S^mfToYj=h zM@H&gq^<|uf91~WPUX|Jw~l&HP#NxjE=CHnnAjX^fOCE7cj5(mS5afvYW!;U11dVxQ8c}|!^k~*-caMc zKCB_%oEXFw{_w_U+lkltcMIQ10UUSYBA@;-O-}c$N9a3wbYvh;A9qOxVNVHFZwX1Wn1P#$@R<8igzKY@+kc7(tEQ$^@?s9%^NpVMY^-(Y zvs%_#HfH(R7D8Y0MT((u&gS{tYvfU3Avsoz=P&vw)b&O@C)Yv_xA2qb z83VP(P9Fx~{?k2l<+>Z?FMd`gH-Ca&*(`nyUc=M(z2HsR|1mWVqAqeBA5Eynz#(&7 zy>?Xg?=tqN&_s?%X~cV;Vy}+T&E(oYcSP-V$j$t+X;is7GIcxl7jD{y(Hrcrq5ARA z=MJpa1o)+6o_MX+Sg+*fC`Oj7s8>0>U;f_fBXl#A4|aLP>hmAezmya_=!$Iqcs#Ij zGxk0kCZAX=q=bHT<;7+ljN*~+asPQ+?bER*;?p=ojNKn3uWsuiqmR@Tt9L)*e}wN} zHy7jXr82~Vy=4j5K|9O3(xjyY&ESor^YC#=+oQn5GFcN7n3miC3JCCWR~8f1jp#nFEa)={OhLh-2%Gi)2I_7iwE*BjNglRq{M8 zIp~SVT?$|LW)wwQ6`*b*#mT$oB$>U+RzFvMF#k;~srR1fFa3MZkONmY5q_z=s9$hB zT=!M&NrxI__!id(IR$H-tGDC<9qo10t(n`t3wz4h*OPj8lOBjt3DE{x6xZPDWjI2wbh$ewCPI)999XuC-pv1 zU-No$UQG8q#Euo;@WR9ge5B?!Idc}yJFx#x7HgX($BxgI_)Kln05=`=FK!ozp^+W( z=~do`XiM5Q%p3pCa0l}Oo}G{W5H3OkTp9ggJeaUgI@~Qn`@LGrE2T_Q7}BB7HO$F* zG2v`Ko;IWaFz~dhBl&2K7Z=)_EjPO@K|iNarS&VRcz1sWkIHsw`GLOz*U*|%qVH*g zoLbYDGxZdI$>;a;nKNhgk-_&u>F(-U1ROU@*NFyxAL6ysZS~Eg4=5}%Rd2rEoW@f& z_XXzQ46X@2Qq_0KK|AP8`zRi}zd3`OR9)g()PZ!SP>67~oXICfR7vVJtQfVK-;I7| zS_8{kQNYXl-p_~PT;cUL3~uA#du_$}TwBAsQXASc>W19&^pbG$(8*@TZ&G#aJ+ce` z%v_EA2T!m*9$`F$PuQCd$byj2!W{2+(T z31`cH{irp+&BKq_g`wo~=QRbomlTRKS1g`Ju|<8k;HI9u=0iGlX;_v^ZahWPUj%zE z$jLPVURE%#I)}-jaT^%dCVMz<;TfLe8T=%FxldKx%bQ1gu<0U!@ndA8Tf}+`U#aE* zbxFf5u94Czc2;Xd!OV~Jtz8oOagGF+v*K_6>kaboWZH|?@}^N+U;OX9z~7OD|3TV# zXPv@FZR`7qN~7?#VUwudiHZVsEzrk0@K}txihGh4h9)W8c{O_g*&MwmJf3c+Zk=As zrZ+1S@R~B6-pf5Z-(g%U^X3&;$@gCkrdqeJ1G8I+qY2LhuvJ43bGc__%^#aQRW0%3 zCgtf?(+}X+XL8)}E8xCx9*&$9ui^4X3UkA-RkUSSanWY}8sLGonO&|oLCeV-4YIw# z52`OXv2m_Y^8gGqru2B9S0@kRT4V0MC@dzw)T-3Xr8D@vu)b>G7qR+TenS89EZxGK z?~vb2I-e|Fj9(>v8(x*@OByz(q#kxORX7ezho?V6(0&R(}m7`#Cz~GkXog-b?q2=?S--IFSu5_>)QXau z??8R7gnp{4|M|2BxLlOj<-2Uwx0dlme<^!BJPrL2NXLFgk*8O0XsB;OX=}a370NDm zmkN(PFQ7xj-cj~!cj*`nxYmcxADp6XU)WYxwOlA?KI+Pr8r@==Q+%AdMx|$g2d4de>;Rh$f)i_m)svvv}kLDjo1n z9_lxiV*WVMzIPkx>D4z{>qalg*6MoRxOz{#DK`!nKyklD(6e$I80#&ehh)7I+l)Z> zaG~%S^<$iBUrYOyoBi zJ=Eur*OTIM#Ijxzm?41I;G?RX+Q$MKwugy1r&ez)IC=gltl>u4KDw^x*EEDt9}GT| z=MR2HpN^4E9lCoCQ7XB2UyvgC4WiU)F0R zj{PiSuJOo`>rMq~E#EZZ!a2zTy3Y(d+J@Su`4D)9ihi;o=uKd5Q8vyO(&4Q*-=$r_>Z#0WgwnYMtThx9(* z`f|tN17+N=9OG=~L;M^2Tc>Rwq1m<>2aQ?Ayo`Jl^Fy1~Gc>R(%0x*CqBMC@2Fu zsDa)x{;|^Cs$K~mO1l&6ig|Aiee7-CuX>6d?w$p{B@_68fMpEb&+slRZ(gB+s~7V3 zmF~Rl_dKceZ?m-_25R5P8M>6K7kwgq3V#509u&&UDvbG+b62jaSYB}}Srjiq;9A9j z?C*?owC#8Cj-o5Ic@DD`@0s;}1!5j^$hOT}zS4Ub)v48ywx8T7plOvxXZSAseGMHr zMT#%M3k+Q*=Wja80sH?6_)W9wZ}4t~QR3N_huG(l6<>-0*UIy%fyX7Z84qhxmp;BY zZ2a&)C!x(4H3P2cLrxzaP_NgK6t^HjG$_(RVGb`&_8_}UjZKB61v8H_G?)Bx<`C-2 z(S%Mh`TW1^$zYt5t3G*!DC}4`S`GCbzkCz<6EZ)O?2p1Px@av)tp28t0br z?yS7Zv!2If^;S&a^0bnagGBIjk)w+^UZ2x=9!haI7W&_w}p+{MI5bxf5!X+jlNX!&4@+ z>diMLjzV`&k5 z^wI&0Z;OCmk;;R2o%e&bzHh)vAKsYwFUjl4a$t(C=0U)Rp|4lbrPQnNQDgPRv>V>| zIcj6EfEK5iQ>FlC!W)(~f%}ZHno8$=zp@b7%{SxW*KyQ5_J#n?h_a`1@^YWm&lcc4 z_R#k$76KeW!GnCc<(oOY!FDpKzG_#xhVo>{7*cuYt7GcA#N|$RIO{(b9-i_^LSGQ{ z8^73}D}lWtKFo*V4{^s!Bth?}8Y7i+p!fCDyh)shEy#02e7l zNL{`+M8Bk)@D=bf=M|3=bdKtIvVOjgUml;S^dzfHW&a@yw(sVnu8n;s<;sIayclB= zb`cx6ZQ3t+rcf$!pu6<6!w9PP%f#m;3Y%Y47%--rt-&9dphBI zozY+O7@p2}dUY%#k5IgXOs$*1x=8RQvaQW|m<|4*21U~8ZpXz*9ip)}L3XyG*8Fc` zxbhY7j4q}&<-SmQa8vd!WPwZJZ(ikL67ZLiy%^v`FX#&~u$MdA+hH%jf3Aewf}dWE zmnt{K^>LNMwK$!7<2m?e1dcM`E7bZR59-Ot4N2uN|Nea<#i^wN z>cLlzrTPnsFtkjbo-Xq@g-$i{$Msayp-JHpmlASRYE$SQ2fBAKFUlf>df=uu1K6acyfhjpIL@H;IE$=bWZ^I|=J8{I4I8@NF1t6B8PZfS*F?QPKFH zKz3llH`DZe`*`c&7+`V~jon{J@rhLV#|FQ$c|DJL(;Hl$nDWf+S9$iPiOs{6UnW`qBI}BCpmqR8? zWE6S6;Y93dY8d$~Rq(^!5N%xM z_-pvN6oz&+kPV~eT5Is4$oM*$>iGxtUw-_kJ(~V6|7-m>Rq}&!`tHNk`0Aq{GJMW< z*7j7Pn`4P2~)&27rI*}-wuQbgkEbz)AP+v4U}Y|nQui$r)KcAPbh zPW2xnhQ-X`!VQK}rE-PLeAT*e>Zrdw4EL7bUB8Dq=oQVS8(h4vG+Zy$Yd>1=<&*24 z%ef`~$Z3udbn-uU^N3?(;c=F_zx<=uHQ{%+-!hK12f+HRSlCMYIzUhbZXpTZ(R0#Q5i> zi`I@WsdtC6Jo0Be&uQ9{Q$l<3-PB{^yn_=@E4rNr4$kDHfFG1x$d}ipM$oSOrS&>A zP@nswEaS5|XU}@x^?tjEy5%CKG)TZ_-W8VyS7+^P8!G4TCK`_JD8{6|6uI{Q)xM5smjV)`u>IHKxKf>Tt&Q&7$g*K+d+Q zY}()Lpl7anFYxcEyG3F1U-J)gaiMBS@79$tyOrNX`@IkGkeqD78i@F)IQqJ}K5xBV ziCc^d;oXy+NUDj-Ep59 z4qD67X^nPik{$LzeklJ$uM~;T-^m`^VB&70L~d+z?hf%sh!@u}zsyzV*gDwd+5=IHTk4-@rBW6Yy$6+VEe;=yPveqK;`Pu?u)8?^i4H$ zZ1??C#5!C&fv4(>@4T-xbmF`F?aYu`{fzH=v}jf17QdfSpL`GbP@g|FwH`;xo4d_^ zlz!tmSwGl;o|`ErUZ2CwqaJF*+d8tCyH=nFc#)m0j(+AEKP{>M!86kR!w0c#OFtUa z@KRoX;WP1joNFGlozQpOy|^J27QE{Ill5VfIOW)Lx;b>2*m`|CU7uH7223lgM|!=J zPez~S3z@EF=(AepH5topt;^`0Z>*&QA1(OTrS80Dc-HZ(dRu5~b}2Iu`&Pz|UdMQU zuhX9WsrAz;)ZOl_cEs_R+|l?tp+^|FBF~o|!h0hx%b9B*(e=H@442(cweH0=_LzB( zL+aZ~)T)tqtq9)#21nOjC+gh`rwxm;9zN> zZw1V`WgAJ~-61?|OGD0A2lpa`y~ManYN%_CFU4t7e1^${8OymsSTb4dTufL)0zT-| zpQp*D8TSadDAp{P%cs5U^oRHV(hnz{!6{Vl`c0YLHis&Cyb){0+OpTTk~%QV9Oqq3 z@BV0?!U2l9wMq3QCC10es1GM8U$qRX`Kb#h|60c@#w0ShLAQJ5MNKpMDD1=7Mp5{j zN^;ra@1pa&BmxIfo$f(2Yw0KP4+c-`(`gAcdIUBYjO}Mp=Qm>Vk*H%iqSI4;&r}M7- zk(|$dsDW!DaI2b6p3=%CkJr9VzU%$w=z%=`>%VH7?7!(2DXey?UdF&2^Z6nkd3?X? zXHQ^IU(vDhZ@F~!F!7*NYlU~(!~65F4((`W_QI}ku|`Cn zcE|BqV#%?SBGDMkzR~67!~?qs{2)I)UC)&)9+KWQgsP5hL$_RSsB2gJ!-YFq=JgNe zM_OAQp?%{&i{#Iby<64jVLtWULcea5FkdWfCJRO#7X^b0>itf|%Ti|+v&H$JGNj^e zUbCzby&9IIT}!zGUVf_PLpHr#Mt^>D4?TZwN6q#X($u^|BN^oe-ps?U?!PV?#oHXr z;~ek_je3ZCDnEuXuwN|Qu!;QdPU4(xPw3B*9TGKSs9sk50^J~vFLu@M+;JyxWnK^6 z^jpuC>38WuZgH(h(VGn16)s6#Xm(dm25+ifGw+&<^5*^3uo2>j^&;|5u`*Qu4RU`Z z%5+O6r7ue0xq6$a&f2Q&w|Q`xPgEV>aQcqr)vpTfO5Z@ICLk6CU)vRK$Hg3Efj>FtM1Co3a>W=$?8 z)Ve?eP`k|^C2*Ked)^ma$5_+%uK%>umV2ncf!^fT?kFk!xEIf$Z=%}V`B@4 zTqi#Jbc(bxa)s5-g8IYkW3pGj{FvLjnD3%GdR#ir`YNF#x#p(~e*S!oR6JTG<|aku zgvvGjs>zsw#c?mVJzE#}rGY=$YIC~x?5+t4gIQ^-C2_@QQ|re{4^m>5;O|}M3-^l~ zj6M#X^sG+J6uwYC7iad}dR6T3DQ*G_WLiaEPFUE~EdQqP zx&ARkey_EcH?x>*L ztgRYQ_eV7uI)plwE=yq-ikTlP;2E(xmAQ|@C172!08h}i;}+sYk2j#!f7{TjzK3a0xmsMfhfW!mxQ{17HwO;xYl4dinuo#9=GnWa zxO%fB3Xg6{&|Ne=YZc#Y<{`Jc|yXnQ}P9*4&JdStRZmC1dlJz?qbH&tG5g5dmL^Ibul2dvV(nz6nnqzbjjOhaCZ10tU@j{z09Ogn6+X^Smfkmj;T!bL4L?iprhz%2kB$S3{oeIN zR7wJa-+9dXOgdls2{-+CgqBSxC4tLo4os`~<2bi=h0+45wpsCKq(yDiOD%es-Ix{k zMzrh4(5~>6RrR)MK?K|vz-)8)Py=~*PkNJaMT4G^%8NGG)dn7DiI^0%L}reBBEd0A z6N#$xj!4fh2c_NT0D_OzvU*O<^JsWarM-2xp7~5C{g;FXV|ZzTeiHB=-UnW{WA|HK z$Y^woutr8G&e6Slbqw+-W=0Wx#^dd1T+XRQY#D-ig6 zs_W)sDlAm5RW%KbMTdGE5=uK>Utwp47Ad0ldlJIX-n4Pjcm{{DcC!H+b>=ZNn{i_6 zW~uZvxckJYmo4(NxldVZ#Ywu-Z<{)mQ+`7@9Q&X$53P5HRTT7U$#H0C6PPH6CR_7? zd;NsUNq+u#!hNb6v}XEwXoKa1K4g`HbpNnc=`5LH877C#%haCsz5&fYOVrx$W`e`H z*y0)jb0=#@=F@AhZ=h=rUofcJXd){R&>Zq_w*j(p z%2QDed)>5tbI+*Qu!^B{(X(%hFeeEDI3R&tq4g#5z0WGI|A;pwDB@Wkc=b3vtQRCWO8Q3Qsj!l%I zUt@&solRe7tt6-FQ3CiQSH)D)z(>5DHX#EAUM1l8dUY5{>r;IskuU332>-4FpGEnylobo zfjtlsDiS;^fv>s2fqYWUtF7lq_$+@$zCd@)0;cjB=yl@)&dS>T@|A2-U>jY#l_Hgo zdvk8R_qOCldL?&n@{P!5yGM&j=_)msSX)xrROziXL|{ayx9U}_iH6r}V^T*^Qkz(C z`C$S6(}O!FYUu|fxj<}XWCCp%`2wdOERAfdk{%d*O?<@JRA!z73>2b(~8izV}|7rz3!a&`#i^i)VP* zsGfP55VVHZf!~dkKcPPncX?lXSMIVTo}dSXdQVxafOze4oPk#+=8e@lf}cf!qbnu; zEoIJx_VoTw@fz>GX+eiZxSA919VKLTc^r9s@n)KT+fbU0zJ|}DTee$_IG56^X^o?jfmDdi$Pt+8P+d)ONRw@2w9${>NIvrXG)qooGX2= zLCACP1m@Pf+Lukg?aa^|f&7?(D7fRqi^m{4#vU5wGE&&fv zl@)F(;~?v@Bh}nV)Pvx6OAYj|aL+N5qJDki!>zCn#_S@b@?c;S*L>-WIkPm8H;Rpu z-r>6sOI`WGA@}3iZSWDPFcsQM{ z8xq%=r_%!0os_TIw>BzSI+%9<@5_h(JuAZH|M1I*kgz(G=o>+P%fh)ztQGIAJdT^s z8fA{xxWxY z4_@Vc-S+UHAHlLyJu7p`>OR@q>x;S?+DHw`Q#lmZSh)Ph~Og?$;bPqHnIZWJq<_rf=SRlXWYQ_gvhE}P%z`M^eRUuhu!NvTEU zsV&|6QC!~~8^(H6H?#A!3|#LzvABoi<^STTXQu_^b2^+{2mh5Fx~`)_t}Xci?iF%u zw2doGCJI<$Vfs04Nm`lOh!>|+XA5Uf>XW>hn^c`a?I+YQ8_pijbACq9lqdVOCR5IE zWc`)KD*M$^mR&?=S9bA!wZaGcf5fow%%@_w#ThCx=_q0DG1osur)NIQz0QmK_g8S| z*kX*a@}s8>^&(wtcwoOlG|-_OFZ2wOVJ)Wf*SNOkm!DC*!mpxf@iUC_y?#QMLfv(& zwfL9TgwIxb!gi;_sQt=IB4SDkVcoEwOf8P6I!oBsw>H0pdvVC`jTAIv09Rajk$N;6 zMOF<~bEBn2sM(H4ZCAeI)I0GHtv!H!npX{=VwPj6Zx4-v_cx&`4V`&n))F~4z(!)M za>Ua^bZ2QX)9Xi3{YLZz`KIe8ZDner%p5q48rm*l*Rg9@t?751Nq8II(Ozx&j~+$c zmN(&VTr+YF-xpVtzBhYA^`@Q|wOw0LAJ4@U{qCRWFj?o%%WO{K-)P`%9vRgzg(A zk3WT02(PD4k6q5UduO1wrfDOmw4{Xv+Urx#Y`{HevxQ@7eI0AUSZnVm+bk%m*dH3S z{{(AIpUCDfu|N6lsuaC^4;KtSL~XNz$RXytm}ys17}ztq+4Hjk;}e_D#fvi&YBOq| z248;6FUp%qsD@1!UdFv7$oOVy#v1D)D)5L$cn_6fpOX_ND)5ZGI;Uz2${e z^8ZtjDkNmF%|TyUpZ|mr;C+nN>`RgfKZ=>1>VXC;tk=p_}6MF*mjBG=FQ+D&dDgR#!aa^RQ}$8&&PB{#CoRmy+Y5{ODOF|e%C3w#5SN@2^XhSKW8A~$*BRJG5o<g^7~=5FWFwddiV`+Zzy4Ywcp+LQK9&oio1Yk-;7BAqx@nt_NAFM zN&}X1+p*CCxWje3L}Bl#LHug0ldf=icg4OUVjJQ?43K%qncUJ&dMR z*bnsZZx_=eAI?j$vlb71BaO_k1bMDr7`JOao=*K*i}M>>Q$VUjo&uHt&CVP*|l@k-#Eee!m2=h|Lss=$YaZS*dFbqb}%Mz-lh{ z@)m~-oI>CQvf5sN&D^p0zRZ{X94l~(B@3wiLk9vMGJ4*-PQ_j{H?EaPIaN$}dM=`( zt9nuT-Xk>sK^e|?U@ImcnMh9FM=4``RYtwgiSI!IbH~}U#!~pB3i`3ATO$A4srtY@ z`*`!Yy87-;i?B|Q#QSH>S@kY@I;rHI>a4IXdURp_^0N$wPdr4xbh&amawcdH#g!BO z?qS;~KiPMCkoujpb{00h1=ji^Lw+gw4lpst@F79#-=mnP4FVSXY4YtjzMU2r1f3!uMymX&T`!xdN?0ZJzQJk%)k_(^u)iN z&eT2bEDb%_RiKvman;|lb8M(=)~J#`wYvvfJJn>j&V{+ssih2lW6SJD1l=IQC)T40 z8E@r_(aQ{QKexVgk5zs2vwWxelU`TI;Ov)~ zotgP{r45c9ok?4FRbXJUL=VVg?jw2p=t6pOR#Ck*ou^IpmJ@W8EN1yeKm+Q)AKqBy zGY3YdNu?PEg*ed#y9fqcY3?tX+qoE3d;L_zhE~$wbx)usYo1f<<-0`3 zDj$g=PO;J^N)zJR4RDruXn#|!Pq$s5v`N}Iga`mfN++Zzue;RLJRtj^S$mo02YcKi8qP32C zAg}!1FF_z~1ECo%1?*r{^^pW*T>rpRzi*DkKP&x`)T2wk!gxw}oH%=|D zVOsCUqq8C9K~>YDW~-w;UyeY^0d6j^wzbl$n3oavH2> zNYuU9KlP$inpTsfODGgeYz}e>Zw?A+{TqWJ~_U7D&8}fPqJSBkN zIQ&N?bjYL40F>Ll=J zHE5(DX`S8?nzjX5|5z%Yc_T|46Dr&1C-7ZK`6>&0zLuA??fe;Fvz0#N_XcRyLb}4U zS6RWd>)IS)*=+}{ZSsK2`W81aPVqF=i*Kc!;KWB;rSdQVyDQU&nK_)@>jFCmM~Tx- z9t!L9i>hwSRoL@?#OW2JFcf&}HLb`{{m#-*q4>VmmFMJ?`%RdfeB5- zC6_LdrCofuyU#5K$AVA(%B^09=#q0ITDU4xgI)n=^nljO@+P-y>Y7=3iK2Qk!^a}b|DqXR^@Bkk@>bl|jx2p15x-q>s@AF(j4$sff)YXHz z!Hj;g$jLy8+=lyoT~5=Fiv78ZS6w>&cOS!30c(Qz-I>h{?a1J6LOs*<0vF+Tt8<-5 zW6P)Z^xYhQB?8BJN6il``}hG{OH*Mt^gWN==S9F@MqQZD58l91V3enMzGn;6MleA~2{lg#Y=e2J zc7jeH=tPYi9On*0rE@#SBksfA?A`yJ;wKbEXVT$Ma*)&QV3p@-ZymP z+zAJOUwg0)SrYsPZp5BTz0TqH9?GvU_)xP-*_r2g^+%hib@?P!qq2`@LE7coR^)ed zffm}NedyVetV3ex`l|l?$S;;(EU#(K@3fHiCgM3W_u)MC*-35e_X2c1*+J&|T%RbsDyj_}@SYwH6B-a(*1gz!2ihTt5f;>G7m_x_8NTcW& z<+T@0eOQCJ-^wVS4v421$XSuXs)FQ)& z3iLhRGe$&}T_}Exucl`myrHn0(QA~PFo+*SoaO3{n|aleMKZAEA2H!?b(Mu^&ifMc zJly)umhysEeG^_(D*vMJs#e??(e$hvt}&LHzwAk+E6Oo5ww?p zrY9%+l>{Fx;cJxMC-@6i83{C{d_kQEzC+<4?=8@QfX6tKrJ%ldRhEXnVXGd;xoG9l z8hD7FHrmD8-_8_uTK=NstYu1nOVp3Z?Ho#C+(xRtH=ZwwbHJS2G(EPs4joSE%StQV z2kx}w$wxlqag-UOZV(ob73OS63`q18J7NW*EWK8B44Yi z+@NuFn)yG(eTQF9Z`gmLsmQ8SvLmu-oclUv$rqJTk`UQjM%gnojM5}3DH)Mfbnfd! zMH7|mk&*1|nf1Hwp673PeqOI$iBF%;IoEiP>zs4nsR@*42l>Cj@1^mhn&TYZIgGq1 zP%R_*a^BW;E09Mgj5>+=?mgwbr+k?3tpYoy{Y%I5V-E5GoqizcK2>;%ERRf1Ga{gw zl5+WCC>tAk00ysm<&d8jFZ2%zoZD@i!pR3oLIV3-7UGsHZCk>2DY4#bfv&Dj`X03!^7h*fWZ>Xz3$O$XQf?)=q> zYn*zB6CQIXGb@#97%9ip^CJZh$TZX9G(SPYM0okDCHVff5V#I@6CbhYAA#uFZ@=Iu zktfx->Lhut_k5=t?KRi-29hU{1zzOrQiaxHVs3hP5$%zFc2V8TPk6#btt6oN0*~^( zUw00Kc|h70Nh7lJRec5jOC9}7p}AkWRR8#3Fp^!l_%3Fi#P@90n6|3tqGYUpJQsVM znS{ zv^LKTByLlSW|A zy(bAT#j}z&f=z!9Ub}=VOXxY&Y30kX;fnjT=5#Wn^H<0xDRlfuy;;Z6^#98H-i_Eo z7|uu!Aq-iGL7b`a}`e zob-&5P6s+CCT6#5`+KUy$814x6!-X(gbRLbMw*9HUC1zJPp#DEK?5GQI8Ex68;Mis z{tWUnAp9!f9_hP-C~BVcmGDe-UqdOUxra*nT+A$pGZrcivYS#jo4axch8#|2lUnHT zYdg=u+?1w3dJ2ElcaaXZOah;6y6kIcitx~oeC3vyr9g)U*Jv-s|M?^Ki!h=uu|`v4 z`f5pE7WDOh`KP^?sce_lRNj0!1UD8m#!EgQkndXsajs728N3M}=#maJzme5-@>`g< zs)M>Y(h|RC=fJ1@n>e8VK1ut(bMRQx82&R_4{H~cu$(VhT(|lzJGiu&dc&y)1YMhr zX$LRC!^7p^HLi)euhm^>w_Q*5>e!K(a z9UNc)fxdGnv9E<}N>8d5qnxOT^p6EF~ z8f(o$v8!b>b?A*A=t8r^+k;YA*InZw>P0Mg72EbXo1sMSOg=mvg_3e{gK+>+{7KM{z|zZPj{oAlzNJ4YykzhA(47rD@NjaM}GF zSd)mT(QY~{USgzfvMqy@$|6X}T21T9mT-fGTH-s>YawX=8fe|~BAt_=EDyF~Mu*)% z9Fv(|jTB%Psx%*5!%a&}!TzYBJR$luxAygi0`KRjKd3!U^SlpVKA*z{{(Eu9rBYba zcr9%AjD+Q^1-~;O5wAYpCb{>>X5ZX4Lg7nu70$eYdT#mrx!!xWAag14fiLtrYo(^H z@q_EHs`2W_l~e<>fN~_8+usoNhg7>5VX+Hx+-VJ4i`y{&zw3s0ww`}(QR|&uLt_yq}y%jGM+~&WHH^R0_?bUG0=IX|RT|nOz(~?+tZ*v|i{LQ(@ z=^V@Z_|wA3VPacz7NFf4DYx>TPrvww5N&lrG(CS%&m@4CzQdRiZNou{pS?$AM=`qB(n`*{GppH$Vq8umQV zWBpoB!o4T=qr9}K=nv?z%@I%Qf8+zwr!s5bZ?MtcgBdK`f`%t1W8V*7`EnKokC%E= zE~2D&-Bnz<{1FfOL(iR#(UIx6RdLKYc`htc$pn`@M`6;36FB^eInI9n8Gh7s=KI#B ziGJm47mVP|2QLu0mL52SbMu0okTR?g?Cle%jlkF%juqUN)CI2|)pwa3YFrv%j@hg*nVxgG))l)^Rb&Q9ij`L|gU{Mf0TK<^R`Qo)o zO_?x&IcZsNsuAAFqY+?e5Fc0an9t}i7l$pr4)Ugd%Foa;Slm?QPyB9S;Pp>RYQr?z z_wWR^@N?&rcizHezeaSf+u)eiexM#=S5CEog-$2Xbl)lLGsYFAb6;7J)nZh3HI-WB zHGoS#i{Vj&IMzPSKWnhml=g^B!j^l}kj@8+tUf@|b7SSufn5*4&Zk))aQp<@f0vV^AsnJ^oiu}Q-K85HO)kfeA zI5~Y~9g69>es4=GACb*A#2&`pnwVT7C)TfmZ`@@A#ta``mPEZyxLnS&ize39 zXlGiRU}bVXqw9Y>v~xG;9{eBtcdd;~IKZm<&kz{S%a4U)h}RxQbz@)kHxvyX74vpB zneh2inZORk-+^ZJ^&+S)7N|4Ejuvr8Iv4oxGap6l2wSN?jpbEK@4?(-VSKC4VNSiQ z;)H$B=ZP-dXg&n5Za529+dWsN*sKG>0l2sNJ!rb*z=x2n-2X1%q9I3M+5C-gYe_gG z9O0MWc2tkvQO@VIr#0SO9nmD-7DS&=ov@wLe^YPU1K}U+hflvxaK@Pqux8*Z==SCS_j%g^+IKkx^G2xL^mhZf$M{KfFZext_h*#oQ^K#4_{;i- zbj((dQ9a`522ao?`vUL&$^sWw--GI+7KEoe@bTb9Ot0z&jEqZXkNRw4-zUFgBle$V z>-L?-Qp*Rd-w6}-p>`W=)ps}&K4bd79rlFfcxk66-m1C<^cv|~_#oNXlqq=&n_gD+~4nuemYYl6!#>4d$ZHUj`v+hbi zq2O zE0vB3*Vk&O>-Tm;(lfYlSrixAhj2#ySG5cOyOt<;6LRvdf=T}>j5O0%<-vy_>DhRJ zV}kQA&h@f%y4Zy6U!4k67q}`Z2Y+wN$A0Ti@{gctU= zm`VR>NScYiEnA4BpJW~XJ#g$+22>9H$tW%uo;{E!pL#)@xQD0pZ2$}W&E%DRHlm1c z=iDL?ocek<-3OxikWs&*_9#lJ4AG@fNF6Sn zy0T9kA82ekN7t{0$eA}+WeJ`q&BLiDU{J%ZoHQZsu1QsTxsO%D+%%A4z(c%Rs>Inq zdKQm5uco?)X7oF(+44#t?#Gs$mH2e4M}A9x937agYDY1y`9z zCy71+5mR~((uF~6Qfp&Y;oMhUDw_f61UR+7AK}m#eqaA6Mt54m1TPxh<$Qy551y6m z1*#RkX5bul@mqh0sct5;guo)0@%axMuN%_`DB>qrRH2A80`X+3+a87=g^JUVw1^~^c;ti%k6gSh^ZUC_Fom#9y47?y!O zOdF`AGbQ8FB(S&~tW@~N;pQ3VPu_Ino z(0;fi=S-{3y7Opcokk^X3B-MZk3_%oHvh%~aVtv8W9ZS*a=xO08Bl$arhX|6tGAmt ze>u;L*oUJc%+-`d!JIe(Eqh;#XmtM~PT}X_@7cpZTtixO4sSIh6W@eI;q2p4 zNL-+jw_wQwlOWM#I6qsso9RC(Wi}Q&N#8b>cYd~2Nn7JUFcG>DvO9HUcTY^AKCy$R zrODiVGOdeFTyQRTb6w9H0LYjtAErHM5Wb!q}bsn%VXc&@a5a$Y8dgj2u;}4k7 zY#C9sF2OWz~M%r=;G zv9^38MN=jHAoM@gkPRM9btTMs#tMT9MeWvk&>K$aq82bE-B%4bRKkQ-5T6^eX9)^@ zO*|mH8}oZohEdy`ME#LApM~Q}roiB1osy4fe^z|k53})yOfDseDrL=iaaMW^3yoP-jIa` zX~^U)Sfp796R{-kS*K|l4zMJB*^2Pmfw-(GUO!@>k_V-lZ4Ts_kYYpE-EH{e!SBK9 z`vB~_F+*BnWM8LKDBh%Xspm-R_1Jca*|)p~v)l@VjzsET&>M1?+l{2`hQv6{ckz6zv2rChxJJ3H52vESM=K|kuHZG zFHE3g=QO;mW|Lmn#U{8L0C`L%ctOg4^+xz%K84$p z?pzL{mkGy^W*P$17=5O|dem=3a1A!gxe@An| z0m9UCN<`N+xWl6{5H12?rbN1tk?#lMusVO!sbiw+Ect#^4>Uk=QyO95!UQ90y+=re6LH&*_cCOP6E@Ow-A^P z!Z%$z9*w=L4Alz-o-|t+O7q?3DtRW>cVj=pMlWmYVKXGmf`9+ob9|VqHZnaxzN%HEpOlU-t_)Ki?70XDMlHQujDc}9k@4NQ=z#M`LmWdPg#rYy!Y1~ z)5`)Qxgu4E)BH(bFRys<7>T1$@SK%rghX}0PaGIPy|tQVQ(8b+#tA1u%rBPBuZKdf zi~o=4`Wwls;hy%hfq0Z|3%SgwJ}_%aS30(G?l#jwXum8myZ9RM44MuNz%T6*Kxi4# z^e|$7U*R)V;Zep;ISPlR2x-v}Yl*xO@9^_d-Rou_Z&Bx^x?B4Lc{e1S;WR^2Nml{Y z6eE4g$-hem>jS_?mEov;BcvDz55PsfNvA7>=_>hgz9F_V-dJXgRI5N5o{=wN&P#4H z(g6x#4!^!45yV{Y`>uOFwC`q4x)}-ILCkgvTIvE}C(U=M_i45`i}r6{ zc7Kp6j=T1iB@?ye@p~k#HDV+BNFhx|+BydFw_gX+el*{afxIoAjj@txF2#t$8Tl`n z@KMYwz~pZ*bXPLK_4Z$de0iOJ{b3o!Nbf@Kr@mawDQVV8-aQ$qKVadq7V4`TEAZ6* z4B4!xnM~SU6~0&WikNS|iWD=kt3aQ@RxbC!)tW8v)D;Wre;NLan}EKZztF7q2Z-5E zL5K?^Ii|tSH|=?=yg=d8>Nx&|{WyW2eDb>^bd1lSFsm=bRyD!`p2fwyLU?U$(-MWW zF!`efNS*+c9s@X?AKy?E$;Eu*cGGNSdWJQMIf}m5w~FEnnGpm2^G_J z&&Rt@w!-Y$o#pWzKH)?CBCP4O4}6cEN1IU_(SUzpmXv(!HHG#g~L zRk5V!u?GFxh`I**fqsXFop^(d|Mi1KAcLNtE@+gYrl0yk7pscXw_vF z_c#z^Ud&@2^>^`40fXS_t#y2+*;D0EYHwJ#qB+){PXwgB!$;<(Xi4)!pqvwP+V`P31Zr9tx8gHh1SF#QO#_q+odso1i78VDLTwz4yeSg`aKKoXdS>dFvhQ|0a)zXBOkKp{sF+n}%#& z{eo-g5U)nWiJ;Gip$I6yLmtjdPaO4r_d2R&!rRPqlX4vz0=3Fn`sP1 z1lVQpfVCDEOj9&i`cb+^>sdpU^1+Q>8eyN{jZ&9Ory+XUZwI#pn_$2VOMJI|8>H6k zfyWM^`1SHO^#5t2_9+MkNA?Q4XmVI|(FR>+9K?@T-hoNma1_V(*lZgbuWJRf;zvW< zy+4%pI(dB4qsdIe?KqZqFkxSwg<_8}kCg$vy2?Lnu5;I8`Z(^;CdgP9Pc>>UANC7I z@xGKRXq0pW7nCl61*MY3}CDJO-Ia zH?6+G)CE2HXNN3FC+Il8{o@#0ce9QB*}(y;=su?rKki61$3mIoNVJxu+{5-fnL~lh)?#$8r@ih-V<(n*t4M{Qo-7%0L{C) zp|$5rIPuRGdv`F#E>)Rm*SL%uejf+PG0VUhQ(#(TGtrOuvDZB~(q|(q41X>0uZOsE zX*8#P#*QAr(%cht?I?-%d;DAu*V1bs?CodJGM|R6ZXJNsd|PNZ@{}3n@F^gNa^8{=MzXg<+pz0&omRf|nC0qR z0uASD*juY9xIF2jHtW2C7ggEu0aq?CiYwgJXsBv>8er+IGE8tTg%-8PrArs^`p-YR_6Ri`zG1-Bb*2pc#ek#7r>f^#qf4U z2icL$ftVxHq>`Qb#206AxzQN7et0XMcK!pOZn$8Js9cd_jOn;cNou);`hfvqC7#w> z4z13$!uXjLoMHrlFH~qd@dG%YY|7&E9&xH;Rx(Ocrq5+#GYSbujv!CJz=O|xW-rEW z1H10-gpHXzp(Go2o7>8SCm0xVitCSQKx^K#QUB#uvhRm|ImH1+PIAN}ed@^(Q}@G) zdIs_dD>JBF;y}EkI*7cT_2fV~fkHTXs@a$NMsn4JcR=}+I#;Zq z_1nfkYPAB3|DI(Z^aJ?lMNfEKjs~`OIf|sCZ&?SD#ZB|Gh>`}6zC0$2TB>=GORw@*7V>SM{4_BJZp*j^TRy8QZo z+-n3q%MrQ-3-6C-Z=dwV@XX;rHB307!|UzHz?x$XsXmV=3tQY%v=gqQeVnE2`f4|H zDBpuScIc{92ZZ+@{Hk?v^CJ>Eb;uVrOZghg1&^7gsN*>#ABZ^$NZn zn#hTZCF*l|8m+gNUbF-5Oo~D66^j_v3m-b7lXP-EVSw&s2fZ)@CH&?!w&JOd`eW_{ zPI*`Aw+zIv#Z{2@{WcH(-HSI*d%>!0I@EDy?)-Gv;kE&K(VkjE4HFc8Ess5XbBSBj z-u^baRm*77Qx-RD18qcL{1gcX`|ECJgv&?k@ zob_a=>2D-kR%&2T+l!=2+R3}R{07oG{Pe=dSg%P6qyAH;n{I{@@_K$D^?`U7?M@GY zyvB6DtnXFnr{Oi;r$7$~iv&J_bNEi4BiFF7#na&Q!YSY~Gyz}q-OQ-IQ1m+0sG8fo z6Htv2_oQWs+TOEN!Jv=7@R<8_ESlAc=UNTJM^>M>Z{(G<^CjDWT| z-q^Y26uKY&9;?wcg@Mu0LSJN&E(3YsM|gg+nYy;w5mr9LTqZq)KMh?mqS`>@o_K5* zetwV!PQD}SdZ|}sca^w?)h@{r*uq85o^2d0$(nN{ItE63P0zq@g^T~DOR@bC;nx^XzNu#wF?4o*UTQrdi%zc=85DJy_-=DPZa&nNk8z?uQzaDKvO8% z+7&Aoon@O~p3oj}WQ-OV6&Z*=WP9#(WTe+P@es7|R6y2z#VCK${(ZmUK?qao*VR02wXBodx8yYd1Ydt;=)bnHFEOmZI&!aMNo zC1FT?#~bY(4ocTDtk?4h8r$2V%h1*s>f9FGwri^o+P+h~f+xXk+rD)gefh_$Z0K_a zI~Fz&8rR`x-&2yuWo_>Ab89=pFQX@7GLTk8!mkT7GG0P@ZiX} zkw`jO75TNDKT)NA%`(fMMn1*{+}F2;i>ql*)5BXxyEm70hj&y3Ul7kYPz~_H{bdT( zq{xBLH0<^=z^ShW(q3>mbzDU{mT+S=mjCHPy|Nuh~WWFxy=4arJwC#LF8zZ%PZklp9Y=_`eu9Mn}ovl5Af!$JoGz~U;P4|i|(w2!Y z8Q~A=49;hiXVQB0u>H$yE2N^B{<5 z9|fctgnxhqeJA%9n*EI114f(%7jq`!$5EA#_^GX&l2E}Y-Z;%}8&JP-S6ve+=13Jk zd$vIM2+}7$NV-HNtU=*d-{kg`f!6OolzJL1Cp1w%kC&13sJyVn1ZlUV;P)W`Kt4pa zWILep^Fbt?!L}~_A5D<*+lRQ_gKzB@$gkBbSIB>;q#uOmQF1olK*Cc_*eP%m`yAF1 z7%6Q!Kns!ID28{&NL$^5 zpreN#9?vb(mt|LB6miE@AID?pgX-fB3gd*vpAlhyVw`!nh- z*68smX#1}UgclNCV7<96U%SfzQ->cCTATlyF-GVl_I=|&PT0#SW|H@}PDpx4XbmKP zqsINyRf#Wo>YOgZtAX#w1j%=MUw9M{QP=YcZYTKj@W$xAJ^|%f?r8A-JFb@u`0V;B zzd3&reva<|v#0#Wt@oD_?@SQf$J*xHXL~ITiJLy7vCken;bh5#PA$Y6ymrq%d(x4d zu!Ass4wC0qsAkxTy!z_IR-rf{9F)*{3Q#YK9IH;jJ+euYG5Q@SjUA?v?i zGhccxhT_db+him0CHdw6EFRXLekYwL)oy@TV^}( zk~e4qqLyeTp*{;fi9Px}K*D>pSK5KzwMb^r&qrtppqU9~pp|^{WeuE-8iJpGorGKE zg)Dvx-TS}Rf{_N|gh@cN3?OgID7UCHwOH^W)$LD6*D)3xhDw7P?rW4xJSKrUVK=)S zaZzX`x_$AbGo1K}4eP5Z3;u3rIUCJ~S1|8YCSpD#3H&6_NLu|nQd}jfeSW|{ zye?1etemmG{xJO4e-S(Er>By)7Civ;S_k2;NtY1b{a`5%y}{?n6QmrnwQny2;RzEy z?pmLRjOHS6+Ig+Rmfc3`m|k~*xR11I3HkbRUfHy>oZ!|St1O=LGd;(XUrS(J1`d(e z|K5zmDagY((|DcZK*z3z@armRA{=4zkP&Wxn8~=zXb-a<+~su4Fy?a^FTFJigg>La zvJ;kEqS+iL50Av@3h8!EJ_Cshk^B(#$utZ+(odb!aG$_8@}-VS)t{+&XH6Wwj(8{L zJr06@NDFb&?m*`c1^x?Aqw{cXZZHDw#3gY0Yz!=PkZWEwQz;(2u~kd*U;}{kjB0Wz z5Q?ufh2u$Gkn}s`g`UCEb0d*@Q#H{v#LXS^kubZ?e`)=mTsJeId5G}JG%s@m(W7a1 z#?l;X0~g*kw|lBY+Fr~iXx8@z>>7`vSwS?fype!it>cBK67wZ+cd!S`$_=8P7}Y=f zckrG>-dQ5QAaIt^jD+sH*#e|-gok6A_CZp$)*#W}sQqw)N;56y6SsWo{&YJTIeklK8N%izApDi`aP9&FZ3VPL*xMIJgK*4u(yGSSquOEoMsTLRqP%} zKdPrvZm511^VcSmxtO7KnPo+M{trHmnv9eO@ZMui_r2PmryfP2(S?t`G5s%!S>^0Q zJ;~e8gs%E#EIH;2AM?gf%;4&LB?I$04x(cYQguO@Ai*B+X)kk5P$N zU}~rT@Ri4N?2^)5bqP#Ghe^gLd^PnOQ@W0l9}XMMGTYL#-UZK*;)k=w&s9H9v7j{< zt-wTc2hyAtJ1*NnGt(oSW=B9CRU)im@eB4&a}`a;@L z7E4-TNu8#vo@*nQUFeD}?pw>T?}yjTD@UC6!`ZVBQ;pRKok_lFozRJl;wvru=D}z_ zLY_4qMBK!8${!I9pC6Lc&G&_>w9yd_w*qGPzIUqb3ovZLVVy3}K zukl0ehk%$jMW!|Y>Orp1OqtB#I+_vatowo{DyN~i!DQ1u```LMga8V}xMl+IF z=yl;J3auk_1?ds~>+d$wxOY*fK{mc>{7)i1g4DnKb9^$+yiqGOluUhH=Vy)g)BPE? zPtp4u?VB7h2+7Mc@}`_{Q4%;{Z`%j2cKs{#7|%+5jJ;M4VKie9^9vD2m4D>kyYlII#@+c_k$v;lhkTnG?P1yAHTG=P9*_19=X+mlVM`5Y)|zt!{u_CXYrg#mvx*(jwPp|$hpkXr zZJCb0+_iYJ=>#w`GRx+$3b-o;%a4b?=OVx)7*TKUhJM-FWY z&ow;xh(|}@pGiGzY%m&c8|cW_?%3gnSHXC9XDPoJWDn^n54q)qP>xe(NuJ{tS>F2D$bH}L1*6D4^4EZDH_4Bpyc2gB3#WUH(`;JMEQmsZ8VZY>|F z<(oBp<*hzgKChYTIHkXOe7VY+9kPehAy3%RhW_;2bWf~^I|F?m$77tyA%6F903Ho< zz#+#bKyrRBxkq>f-srTMRmAmCrH)lV?*~ehg}fm03Y6)N7j>W%=S8rwU2C!O;a=!C zt06!1tsE|mI15WB-{7N$`r~818=sZg^2fGr&@<;V8+&&&vm5vbI>j7=O-&l(+XKV# z$(z&sr0yQ>x22GKSM-);f+xyKyib%$-Bg7qhq6J9O8AF*P2ygVYRtD ztTqKU>TgEd7hU9#z9Z18;RF67wuMZ`3wD>=kZ=vHgP0)+Y1g?OTAI zNeM)Rcb6$|QVSyu)zs2d-r719Uca3Qt+qTv=Y_ZU?QKJF)X;A%^Viw)uUo7Jv&6@u zu2`2Y+fXy_A@2M>l*j)*!v6$z1mm-1V3GP(`5gBNuP*-$6i4*`m|hp(C(WJ3cXs|< zG7e~`t*Qt6VxHq+mb1%|H*pDsvR-eorh&E^dT}ooalEvw5EDHnE9#X(wqTyUynBGQ zd{BBy`^$vzef%4^&uA+zqcsXB$2fe=I|lXdgSX)UMm?*pT^uKEoOq1)%Z_HB+hjV7 z_SBOpR@`FU9h_=i4&S<8gTVJ&;p>?u;yC$!%ic&mi$_imqB=Z@RG0YiLkk$Ve=s|2 zw*_lI7J;dA8nmAF011nbYK(gfX~{R9s9?rtM`2U5SJ-)ZdwE!SIPM7?&fI6veC3Ho1I^MAg)gU(v(Z!gx{rS+aN^opsjFb~) zNY8kIcdD~Rck!8wa-ax|TGeAVRuy%@V@~^(f%|h|=&WMcJnbC3Sz*f?g*Ru0-M!e% zyz?mf_0)4C=J0he+L??%ab5@d++xcUbY#MNm9U->_9%qm%H9`RxUm}eUYos&=*=5+>jFVsJyDaht#JKUlBzyiG=(y15vlY0D5&u2f}0K61juLy$c64nE_)a zw*cxHG%+!Ne%Z#bSvtt#J7$CZDs9mJo`%EIgTd;lGZOajszhUzupBP^43!e=+v3q# z9dX-%MGhBj!#LFrr5^GU#!C;*b2oFh8zCGwj zb+5yBro=;bj|Q;8qczL)_e1@-Klu2W6%QM=8btiw7FR-nPBN}Ip1>QXg(-dY`oNb_ zw@CYv=DWKacCOK9^;hKJq?tD{)oTauW}m>+(c3{CZ2)ucOhm#Tj_*b}yrO4-1a8C) zl;F=T7kr&}9dve7^HeWGTwWN5v)qE{oU~!8hO7n*f5xkdj>58>T@L#f--DFZQAoHT zi`rVaSmV~c z(^yc0t^(uPSk32rH9v=W(PcQpZVpR}p=;(JezJes3kmb<%W)=FIJfV1TDLisYHB;0 z%;`(dgKU)OGl=7sBK4~>`p7AVUjClaxTqYw+WrRE8eQPTTfA3?9!l$J6QS+5b0Dz) zMTR81s0a8Ke1k74UAa^tVbYerS+qf@a#FV?yx;1IiT4gTXf@mqBYRy((j%C?2Mw$JZ@>2@K>;r)LTsqAV{o2g(=fyoyu= zhILwKg7@1V!}yNS_$f0yRaJ$MC>ynjEm-d~>?h6D*NCI0^pn|tr$*+&P;gySHvHRF0B zd)CeeF5j*%|D3UkkC_~W<&mj4^X*8r;mu?mRgwj_j?g}g{mXzbMO|;cg7#lAk}O{) zpkq{1&=8E9ic-G(YnOaWk!1 zP7nG0E*-?FZz1*1q^$2sEB@tgH(6-rj^kE{|A%EW9;476-p>r+m-`mF=Q)TI7s}_{ zUqPcPP;En3d~(wC%Dr7IT0&~x8aMtT|>Gzf=I7cbz) zdDlrdnnA!1#=fL&q5B^VVUYE1==r6Qe9GQ|iP#aR@Z9s`fjE(W>68mP7dOy+_Xk{R z*G8TEiS|iKZYv6pHXb&@_7*o@gB_Fn~ZPVlK5kvDkiXM+>1F#eJOfIEuQs^6DJX`m!ZG| z^~x|j*^Abf4Ew>{&;2KpPC~`V3#{Yqm@+>IhShddy$#zkr?)%sMTUaUeinnls3)A_ zLz=W9)1Gvh^>WD*x(Kp*Petp35pX$w@j2oHwl1I(vo0vX{4p27gimC;1>13(RUb*z zd!yMK*t2yzNnh^6;azC%64Fj>RytC3t;yj+r#Mc@X5nXSIq@WE@Oda=l>6i@*BBJV z=r!o{){xV`LF%y#NEuU)$+T}f;TO++GM@=-BAis8t!o6uv!+X8x?TYUxyCdd_#kpgR7)T04Nz zd)4XxOSE?8%6|pW%Vh`%9cr?>P-=ViAY^%eMa{QvWYw}|CWPK zsxE#H*OQ|Y^4ajQwEpA^N5a(ZTxdMPDBedimD91|KJE{+1sL`@^qr9h#M;6PMQAp= zcbTLu#;c--*6!({k`7bftjNR>nNi%YdtaDV*%z}n+OQ8h?y({}nspiElV+U{D@T7& z$g>IE08Xc$!_I4`kq_6v9WCAZlO_V8Jy*}!1wDt_kq+}`A6p)UrQ2S@RzEj!3=%ul zn{~P7DSD5eP1+!I4<~J_l15P}9_l&G93(BwUg%5{vErZOmSEW5>-c0(2;5kC44sR& z2#n@JL#!_60v6eVA5~) zP(k|$@}nQqDfIp!>%1%`Y!R>^b=@I6m?SBi{xQ zN-m!+`37*hzpBnRkna+F$zQkD%t(HJ z8Nd8J4mfZnKlsuCnUB7FcLwKX0V9R>1gZyi)AJzKbTd=wbFxDEmLPd1UfE-as8t^A z-y2c}EySkoM!Z{yGm!rR>ThUxvjd#p8OHM#AinJR9p|PGlc}FzNKZ?kdO)G=ao|sW zX29h-O->lX$oFuqE_9zg0KTq#P5${CEbVtu(wSZ^aF=bM{Uz2H zxq;Bl)N5SlRdY2<&jr$AoN)e*i+F14ReElGjEE69(*Df8t5-7e;VK=gO0x+JYH&p?=mbBTHyUTn9b{hD=D!bUdp?G+R{_RGzdD#er0 z3;;XO+HcOqS}O6f&~}6c^VLc6Aiih$#V2vha`k7$S+r~u6!km{Hvzjxx~V^Cnmho+YJb>&>iy#7Akdiw{i`6m#|GJB90 z8!h+>YQ}X3i*@61iBew{vz58&g^c)tt$9Y91H2n0JP)^fXUp1{4n#|-5JkU{4`rkS z#A~7CK&S(GFYt-}ipQ7BEP0upymc6-eGAj;yq&*e2vG5-faX6BH zX6E%RaMi;BP*eMtjoz>y^jc<+HmN6*Ze(|7Fx-D7SokzBJhM;uLm=&jLgm@5IB1L&)HK*C zX_bG)&uwh8$Pd7N=X`-BNE(C@*VF5?Rq=kzCj>}G3cVn-1E0D@Lmp@J7vC0779I~K zHK%86`EL2%d}qONmfq_Xz%PY z>bEPT1AgGWA!kr%6w-zQQz3HEA$#FfNkj3Qp6wawLlB=wUWP9{`5VRjj64odoP=jX z{r_&lPk2sWGs*Mn3VbAe79--$GdmQCnIEU&KNeIrM)D;>Qwtx@DBc+UpqNu$&?e0d zlcK|U)??ah^2AtZnLC?VcsQcyRXT6N$OG8r`x{2uQ4X9->vrXl9(@!DLRS(tGxA_6 z)dfsE<%c!3>-dmk_h55|4;OVX{7x(2`wsIdWfrKz)H=J)JLuC^X zx=dmheUc?H-`ep(QDS>1A$>NdSq4z=Mbye+)1lAbaN zNoz3O%$3x))>zo6C$`?d88oVAKxw^^yt%i9I5s4$#|w`w6FyhgDqqfS{rb$Bx`#=` zxqOvxeOz_+7}eQ4F=N0%$*#De>^{{wujA^YPig%xM;ENaX`0b!_dTUh-2(Ynh3ZKi zsoP&(He(7lNl4>#oHA(|p!X!-&_<@Z1v6KBp{0T5I(3?X=2}W1t#|k7nju(TXoz8( zqsjZ`aiiEab(r2Qq(CNoquKcq=DK7H5MHQ)H;Y>UkS|sYwVe3R*D}_WzvjZDiYBo&x zTEZrpdvud&9!ofHtdd3(^HvZ#fHWl|j{$REw_YgFy{5C6#Y7FJ&C;Owzk~i38U?=?ZWdh_S z`Gbue?bW)^7yGUrrxvyh#k>RCvH6taO6i{!@NVh`SgAV~{`5Zx12W<;VB!rJ;<<~~ z3%rl^bg$d}s1g7{>y(`jspMkU(&5M2chAwAe1l2I5g`4+^t!T$2M%j z;ohTozvuU*7A3USv8lw)4GqBe(ywS@&xF)vnPIXJ;Y5l+rr=CYGqv8@i^Ql9cn~2`tB0kYSSKP9kIf$wp%dTc7gai1ns@Y z(l)-$nrV3rA_k;De*JsOPuzru#%A)F6>-vlc@;>xhdr9(RKCs^-hH25_xqhQuE9v1 z=TJJUxoWC;0C&&X%c2v5Ky|NyR|#3rVN!d%c(Xm0&WeP=cA$Ky$)$Ci&MGTjq%k^9 zNSd{Z?N9y>o1QhfaBA9CT<@VLPi^9%>S=X_BOx8=-(!H{BM-fsj#spM$&rL5iw|CA zC+~EFQa+^!%otI&3Oc}iMeGM81K48Jl)2NX%1UK6JrQTha z72>3g?u7Nf{{DuXl8snwG!|l>#Nu*uN6_|);tADW=wtq!oemrVBmFia^(kLo7LE=< zrPw>L5s$Gcf!E6~@o8gbz&XD;*lw1S+$l1gC6z}(m>LN4=X*NP-%%WgPGc8XeWnG~ zw4v)(M99j{$dE+-Rphtx#v8`dB0xw()`%;Fa9WSW7N?sh$>0pI#;0% zQ_5_$Rmwlsh976Glh<;>Km4{a2Afw8;#-ts+^>5M+=(r%%m0ygla(V?jbMMY4r|

w>t75I&XmL9`c}GFYS&qj4yus{% ziu<15X928~|9g{5vGAl?90Sj2AH$b)FUG4&4(w6O-biz0Z}JUPiWl2?yA5j4{p{Vw z#Dm-B!O&OBMt-!h1fzA&@UMZrVAse3=1|=dVR$YM%iqdQb9%;I=a@9>aSZD`syDp# z8O#*QtiQ6SBiu|{4_&P?;Mk~T zl6mH7XeF({>KEx!vrcAcnH|aP23)Gc!Tak!a51;`_7QyEdRf`NaMu)^PS`=?dvm&t*rZe6j)zOh3`$u8DS3YI`9c8 z7lGCi^mHmlzd!h>UL9o@eBnlG#m{U<5qF&Xy$dg@-ir;RwN#Ne zZ#7!Of$j6y;LWY@YsMsf7FLfS=a+Re5-=?`5zR*TZQUUXa$rT&SzVrd#HA?t9bvZo1rqVip6W* zh5GLrW8d9vTI#&!wY-t>Vcfa3 z97zKStl-+2ucdoedI+3XMBT2Pm?3xp4`vrK%E>w$U)4Vv+SIRx#=4p~rTr!ld3N%` zF#H$X4d+-UOM`FuLW{X6T;!O=$~N3^{74uvdq0ioBZL(VM$5_?ex^d3B{sgshE6yF zBQ(tAr5}A|8ZVDlwbhd=EwOdu1a#Bs;R`ge}Awg zfe}*GuOL{y`iQ`JF;}2Ela{hL)-l9Y=msR*ufr3{cSe{Cd98!-j>dJ+80k*){>JD! z*eGxk_p%;Ic5Dhattt zRvxkI9~gxB$Z<c;f$Xbzm4Gz~t*`-_@j5Byibt$lOwa#wne&eK_zA4LOcAJP?v z81Y@+yEcsPej9OF0NSEMLyZN|9=P{IR*Ta>I-m_ZEG1ULA z5w)v0w%8)2-MNe1HZJFFD>O06cr^WN4&Lnk5=l=YX%&7iq&^00`U4B&55b~s^@zjf zbCECqO?=E2N59P@+>zh+_$r;TJp&@gr#@8i$*N>r^nmukr_UltXL8aUa%kiepgL5u zqXN;ttd=K4Iw)&Pvq+;Gs9o<@1MwXE?%x+S4^Bmw1>ITi8a;Kq=XwZ8xrU<;HUi3T zwsC_8#d84)9YUM|-pS47J(;s}XD!*rDRz0JMS=2*8`6m=HTc;(K^OjR?3^Jv_r zwQQ4gf{~7-Sf664Fnruw`{#WS|9sGi--NHdcz3}<2v!EdyXv;%hUdRQSW zlSvEU!gwnnZH|d{w|O|QEa+p3QST{7U_{*9EKSI$1%NUCA6@bfL{a83H=6UBlV%6XBu&T zJ(+YX5BHi6ngjRd?RE2HoeiSv{LZNF)!;vQ2KQ!b(DF|{6Taqj{i$y5k5|@VHer&` zW-93tb!Qtt5S}Bc%R!;xCE;O6H>2{(B6D{(@xm z@LH+U57!F#!9RG*fLA$Ws!ZU-wbaWJov(4n zziD-`1Cb-7$XN!B!AShkn8`}1@{4FINtg=?bkdC{9U)4A-(~j zWyy13|Kmzsui@7YL;3fAtI01mgeS%2DC+GeY9jrua>Ew{Pt5IIje0}IK=OO8G_FvOX z9{JH+9To8qCj{n8RA(T3owZ2?VX8HduBX00CipjVQ3bB0e@(0FoxFNo4;~cmBJSCQ zPsaKI`3HQ~o+0RM;zFB{=AxW3=cB$)2D&cDi}gXf&s6gJon+Fvq8GwCFN^BMVXF!_1}2xqx*n?&KkaZmFGe9f0+c#|Ii7G29Z zc_4h+U%+sfAY&lT!b;F}IRga(As z2cvkC7|$Ml-JO8^F7(JL!d{cIsc*7Q_y=gP(vPpNw+{&iIQ2ZJehjE5a0f}3uoLeu zQr%YKjM6D}uig59_NyQd((~>;P8w65(9%#9JtC1`0>7i}w#p_w#|Ukqk}g2fPB5=b zqAx|AQ_l})zHy-*z&7%|ek%2+@dw1Dr-S*=84BT&O7%*9-j=w#73fSkgB>S51JW-ba)Nvq&^)-{ z5Z|08RE2J)exP)8q#=@zrFA>c z$e+n^2Fv05*nQBu*=uDy-4}A=@GjEZ{V-?he9@z;s|nI7)W1n$*?mqh%k2`?^EEhT z&=*8s?AGE)CaytzqKe)F^=Dz-$j@-5G6+R}*q+@4#m?tO05>mVHAeE#|7 zP)5%m=C%Gub>A9p>FKJZ^QEbOqrtT@53+y0p?**_E_^nEk>&>99qyz}GEr#fZc;H5 z`jhk?CtZTiXZRrXGzF*04ujf4yXhC$0WXzPk68-%5Jz*M`$<3gLhzdnsHu6IH&}U- zdR5Wb?ZX-foxC2+MusD4JHc~;w;9zrQ0%$M*Y@g&*=^-p`5sD!*>r_;J*>7chodeM z&|GE0Ha0eXB#Jq^-E4+d@^#*>)lNpeES0n~bW8R@@9WFZ(n}X_n4~k}I~=E7DO2x5 z7QLs-tNe*izo2B23e*Dxq4PFXjU_?PAiDG3I6<^GE^0KLVB4~ zpM{g|WltNt0_sbY-^@y@A4 zKDX}jY_^x^ga2gi1A3Ai{wew|2+qxOCJb` zJ(|S7#Xe2q%m`EZ5_>eYrmz2xGp%Q)Zj`+sd zoFDQ2gApqpC{5|u>$L&Dp~<4|Qgr(=964hVj(TzeUw7F85i7=^ufH3HCEY|_=kdT- zhGDhmLipD16}LQehIPCdmNy{X3j&O^v3W}sx0ki!bUpU=_yn*Y-4Ex7cfi;=jabTB zEm^0i5e|sxA%7bA5W^3UXsMkW zNcb!|(M_X!Iv<;Kj1_L}1@C_uS~cu!>*K+s+DC zNz%vM4YYCWA7-5~oCSFV^A(rs!GMTdTDwd9o5>R_ZFe4(3msr~Ry!qjP89giy7wEDu1%|GDhG*B)bC+5zAg8gp>^bs3Im0XI`Xxg_IR;MPu(`WTDp132QF)$f|o0gvLEH1 z6wCIsFM}CBueBa$+-aGA+PN6VCUuq%TMbn`YU25ePtQ30oo)8ClfxdoL!!ZS(%0)o`&%e-q4@bOLGv#(RX3jqjY5(0*7C5d$S|fV@BRIXw1v=AuFAO^O zMw(iw4SM_NIkF~|O5fUn%>Sf?dNG0aj56EEPpgqEr?Y+Cn8&=nj%Uvnz^#A;sAxX|^)t6TpayBom2Yy<-1o9WP1{#Yza>i#gcYK0WL*RCLBBxqG)2GcwraO)9l_%{tGSJiFN zYw@&dC{t}nHx*ATXYKIZooC$i`BqMUhiy}4!n1{Re#YCbYDJ%Xpj?7zm)k4{x^l`N z6m>?q#wfN>`+JKNo7zFL?Y0-@F0P+vI4+uM`v_33vwqoGNY_^{&7H>hT|`T z?>OwcHCW%9$$f(8`A_W!;N#Q?pYF4kXB6*b?dvy?zwFe98O5}RX0x3rYW2~rLtMkg z3cgfj;_{}+?38UjuA)8F#r&j|=c&#@`0tHp@KNIeCTi>8qd2$~*##m7_LVkIK8>O_ zev}vSr&rvletlr#*dyXvKsX0|dwd|Q?TMkgPSs&$ztBD0N+%e@Xg6u~cpemJu0*O| zcpTS2ZoMV~_J;=2S=ZP3hcC2eW_J_pYfqm)-G9t4r#F(*km^IPDv*TI1fOy0kw z3WK^X2UFv=5`9OkodFcyVmOE=Xju>47Gc>zm*yWet zoi~W`*9j*M+|M6XcTmL|#1zwhyFZ`vbw^W_5d#8Q6T9sYlGsMzDV>&Z2WXCbn)?+b ztd?U}nZl{b6&PqNC_F%sbm?AqE&eK4tv|9bRjBxz2MXMjJV&l29&EyX?6KqQvlhk&TC)nX4tU-&8Uksb zlveF%?{G7E=I+J}%+YA7-f;TsM&F4Cj^Xc(u9(^}2t%TpqfKusbgn3e^Ie;v$nDv7 zbf!y=CKI(v^ONak?0(c_5IVskVkd~)Coaat9d==H%M^Yr*ab+pNQS#R$?Lvf;@4v0 ziA&?5+IA{?}4dLKuGj10s)js$l?E>XoqX7iC6kj88LZ zub|GpcqFo++TxFctlc;VKJ^O#uWM(axxNe9&G3a!i@)NCWxo`{1FF4f$d#Ys{)3+} zb5bhlu}~zP#VJ3z7-Is)%ahD)oHMjuV<1ocwF9$Gz5?P{_;7bSCSS6J&$Z5YqFM*H=teN1 z8EHK@jnmD+d(1jA^ppv5z z0~f2daMEosxK$$R+=|7=r(#LF^#-BkUu`+Tyso)J`V3?Fs)3(6?`|2}`cs>?urFdQ z3TWTBy=AcZ*l66pa}E;Th*;p=8D}7I^+mqAqc`D8p{OUt-BsqBe*6(SMtBVFlwzYk z=$?y(@%`!Zy;Qbz&ohZ?Q_?&cEc81kK9=d4&@e<-j;nc!Lmq3$qAqDY>%4*2L3@?7 z1Q#_sthBL8+>PUgZB@Qx$Ai07 z5AEltbsa%#%m^jV$QMfV_i<52rXJ1`aTF34BjuKSKdKtFH8M~dG3RH(ZC95{2*dQ^HwQ#kg~T4!ukNA^VD6w8b}xesgFzX^iO+PYGMpO z(!0Q{hJzUS8_0UH2DU~2CvuZpE>8knDlaA z2r{09CTHfVgcs85E_$lG=srEh7=Xd1_mmDFOj%9C?fmSBZAcoN%lqHq$baWVeKD#T zPW3IgP7;_#xT3y1a*o*#=;=-#RM{3n=eYQsfoWUblPB9in)L;Yec&zXM-|wemHdSA zXdSFR5eN#-y(TP)Hixg zzBmtm1nuO{qFWR0)zW=vCm3N12+W)~%K>7~WP?#z6~y=3CB{phs8QX#w(mET1%3^? zzYDJ|)d!FKP;@$Vl$+f7&4t#>`Dh?CI%dzj0Pj8ei1D+HW8#r;PK-n8Zo5h%ex-GZ zkowxsgu(QAj>|$@Ir38ijJ;gQ2=iGBdr!=BsALnzwpWwCG0J&O)rZbY7210BeUl%xJs?j*zVbQP^gaZ{A8gHyBz(58F`u)+5T}1m#TvVP%&Wka z$=j!+mCipASA1Nw7sxAd;z~CWlen5GR6iN$ok#ehwO)8@?%R>>U2C|3P2LwE)|~nw zHb{CB2Ycsm@)lIT?QuBQ#Q-@T$X9aW4Du!2&yp{Mv>WG{PRcs9ry&i$S!)G4h;Hpj=Gz<^EOH!zAfxMSAucVj}K7#N_Q}q(Wda24M4|q1b z9>zZN!cLQ(F{%N+;@)O-xSWVon^f;~56qq4tkvLoKspe%*e1YzzD!_&$bHe@?>*pfY#UIYhLLX(xr~FQ45S=@YTdj1{i=!bW@k%A z9FL3pALgWk;p+8M7%}hy-2-|8Nmm2aCuE(UN8VsPl(y?rr`v?rb9LD&JUnTw=}0`m z6Sbr9_q!RyeH++}I|+3hXFg#mihLnnA?!`VxBJVPq^XZt=ij@D`A1#QQO7s_1DDI& zK*oSjAnq3Wh9Bs_g%?8S%-%e2;4G@CL2TQU*KU-fGQ|ZquPe%3p_3%F&P#<;bg%h^ z4WnRL-X}hP^&z3_NRQKA$zO}`Ooh2heH^Ye=8oX@yeWNMe1ABa>4mWBDa#yzd{Vg4+u{OG(V*8{MEQ53|)Q%sh5NzXYv;%g0kCGa6p|8Zs#Av`z>jT0yAcC7S1L34uitwVJ0^gw*?b#jRu!DLbMMym*p;O&b-w&0k7bd-2*-0H{+D#>G zFLE1+)1dOl9>{kMLFzZ~#+A>h7nBGB7x!iV;*pdf(VKA_cZ&2x8;H4+uZS8J`s=~b51jHI$UETOW$q|?m2@4TH3xoVF9^@+SbYwb z1zmHa9;-sNfw#MDgky=GlJFFyuk(x>YN^6ch#C-mwC`VUGKwYa4Ij+j`z!HD(qwv*qrvP9+AE-dWu z4Hpk<1S|I?aHD#=`4FyP>FCC){-z2z{M8C8hq&U2Jq-ZL`^{kmJ_^mC{HJ~S-0m*}n)l+h6c#GgnwUU=S}VZU&Wc?c`l)4nT3? z0i91{n*bx8U5 z2y5xlTb|oSmCCg{$`muYmv<}sUOO6FEJ|m^K1FcjpB{$8ZN;p99(ega=C}5K#BZy* zVw0wO*oRlSaQt?Ab<@2Fczxg*wDP+Ei`NbWt&29$sQz_qd($m%z5NXIb%}yQ!%ISmHS%eullKrz6A?K{;KNzZ2qzL5q84R5#M#M z1RtZ#IBi8Ctli#2+BSI%Gx<>q&apq0Lnr3rj{`1hKxY?RwV@V!F8<3*+FL-6H^(vX zSUP;w3&RIb`>N|p>HhE!EqLEGMlfdIH`e8i7mWMT$!+q_LF$P)<^1}Dt|G@oF3MCd z+$?_(zPNJ*nk%vJEG`^^wW|5;Z;yDw{6@TQtPibsx-`hw8}H~ng_wtCbg$`HPO-=8 zrw*v^F`r*k?}A1BgFIu-JO1WrGu(2fy}VCr6Q`dk8HN}6%cZt(ze6@?o;p~Uzf^P3 z-Re3E;08S^k3VGlF#(f4XsQ>K#qC1ixu_g#qKX?#n%ofQ{v6&%*5N#5VI zvoYht4gM** zDyd6$qvocC?8$*u(B(}r_j>OJ)9%ecI`taQ?cdJhZr6zQM8Xr+G{gXp*HlB=wfd~I z!$kSv$7oR7CbAzI55+nnttmcI?L-aXhMfA!+BVzpVD*0V?>vYdv^N0CA#9aB8)^Lb zR;~|s3YWq0NThSJb+~hpJqAw8$N7h6qIrB4|6DQ*n%^7(gtZ|1E=ENkrZB#4WeT;8{nOz+7A_S0)| z#IbwWyj}=|m_3JMy3x!c-$hDETSpG%HW+X61bvSWJZ}Nr6ZCMG$QK|SK$FK#EPBa9 zB(9Rh`^&M_5NGZW6fb5;`(5zd?Yv$Hhw7KR7)`2Aeh$rHTH|%341m)kt@Dklp-?M@VTx(*z{XjtR&fq zft4FuVfF`m{oMoo5AA})T`}-t${{pbbdCuwAY8`jEnDGY1L`H@{$l2j_b}p1EK9#7 zJ&V(q1K0jy*0iT42KPY^?|f!$^&fUP{grgY|y{4C7QIiUE`|Bhf8a@Es!u4b{YzgK+M#@UB_2_@Aie+~5#hT;PBfOS~-kn$R z_|4t|$6@G;A9&hlA)K%BLzh4;C>!#e>g^tD)&gMSBnzPRLzGNhxO9DmK(8)V-6Ji zc%sw1MetnD8HaX$#)v!BbNid{%$07cHtoMjIK!qpl`~JPgY3kfFj0eqpGWYQ$z!M- za)!^3ApLMN4++zxIR+An_3T{vo4G`v@F5b)ObQLG= zr|U*A(hTs&G6N{@VALdMq`Fb5zSz154Y({c^h67`6_Ms&sbi5OA|}9@AHITpLpoke*zElC_kb9xD4f@R|E#O zxy?pjO+hag8+3Ivhi|L0a6#wIK)(kj9{1VCwMkUZ?|9? zhseY)c>MMec++76>Ky%nSq{6HNst~cDxC+9uV z5|1MB9;55id%cGp_L5o}ItVAdC}koR^;c`c_uBi)xzQ9mUh(y~1S*Ey^i0ZqdHFv21w zVsNUqT4=bd|4_$$@5GSADMsrZ)HAjTDP>Jh-)&c}pQvKIy&j{-lq=nW9ei7Oq zbBt%Az!3UfskYEb=mt2uQXhJmmgDz+{qg)9d&1Q*(t`iW@p`kq{IA;!_OrKXmoa6x@4w-)${ zWqQA8pQx@VFuW-EXdRv&=v6{m>N50;Hes*2a7KCu8V2o`;+7Nx>1_~Wj@i{wc`I+? zR7-gC@&RR5&x4c);aH;9V&fLKP-ue1Yp;kgv;C2?!1C{Rywk%(CH;Wb`sYP_K;v~o z;?)XH-U`OY)`u&>^f_bLVJzx#4wEJy<=<Bh|0Ev($vm3NuvoAA8HB?|{4&<+uY+ zyS9lfAO8}`Tk(R(M(Ul9?crXsHY_`Q(~USiSLAHoemkK-WWBM8nA58TN}~>ldWTgT z=0cOPn}iNwG*?A%-<;V2cyzi8qnHo}Td0)dX!mA3d$OefGWI0lt5?^Q53hPqJ{;nm ziwdOUUN@+2yaa{_{#VT`7BerGWY~uN**Ml*>gS!xCr(OYzZ<1uAG0#uce7j z?^sa5E-)@T4zap-SP#wFAh^CDGK$mxane7G@SUMy0^73rJCHvTIfLg<#4w@jx@Fs{ zG%p~mM)Eq?t}Fy?J~v<|ZEDEV9TQrg>To$sWg#g0ewX%U4hHf<{MVJme4bMQ>FlLI zF;P>}G+<26A?_lcyT^wE=}UO<^g>;{NY|4NKMM5@?xTA$D*)rxV%4f8Vl3czsDhD~ zs?(f=(dykDA(X>c#k!M^qyB((72OYYoY{O1!{MxpEXG;jWrf7se6yLad@XqoT-EN4 z)g2PJwV#8VsE6Mr$fmd4%Y~-M*Pf5!JxF`7)fe^y#f|*;QqVi7s|p=3v1K=UesEtd zX&mu7iSmNR_aAAT9B|f;6&?djkCvjqw%kY?EQ*^B4uLJ@jTiJlXpOevzToscO8g9n zYjfLqbY%0#uND3XA89w0iQ}lg>3SZck@%iZcN#_5%egvzv+$Hcqsfk*&SI>xMgD0> zZaW!$Q(dvCUnNh}bq9lsjnu%kD+!zD9lx0r#s?g6jpN^};CBn$ujdVVJ*PFD* z#DcwWCGI+sR^)zmM{(Qa2bi@aL^hCeVdvMOptw(|C-f1$<`Vg}aWJ7l7PkJJgk$Du zs7)nHrGEYC6z>mtgdLz6~mmv`FTbE@g+{07yGR5??ybvJW>PE>wO;UT+~x= zFN*jOH>roIVo0Osb@84Ho%B0#;&RNd>3+5+e*Ha1qkq_6Y)3yGZjWJ-cB{~EqK)~VyqK86U+gufKBCqXAImr*&M}W|V zmvT=@A@grhet$(NDFWT!U*~VXC9qTXFOvU0LSC>PYBg>mZA}M&&AQEJ8;M502Fn!Y`|0|eEQIMT%}^IcVe06X1+M#M!1f2U8K0F%m3>slMWR;2;{Mu`}-kv zy7=_UgIwr*s(&D_3=>oJ)CHY&<%+#yka`RXc^T$+T2nT&_(I&;9?3H^^7+_(7OU%< zys!O$!f&`+grIr*2I^%0FN`o+_*aSihUh~He?a$nSOIYXr}1#&PttI~>~{$O^)DFp zq+sUahNMTnF_DvRzqQ1c9);lSG7PE5Qs+;mJTXQ$&m7^~${P!_vm0_fpqWA*~4!*6~nld7sCd?dP;UO!JSSEcnf~emJ8Z5GJ4F>`Ji) zW>o7@{pd293%}E2Cm*$9K3w!N;3ChdHvsRZL_qn{+Ys+i0i*{-?W#WO9fA6GD)|V} zTTp9C-iUdjz)Zph%CC+x^?n#}qFPeZS{43( z472Nex9D$(z5%U|D!5d{J#L8?QcqFzD-^5j;kd@NR35eB0}uz0$LUG@S&p%Tds6&2 zN+EqsIe8)RcfQrX9_7~Px}J>47t-cx^57k~q>U{n4zJUQVjZb}Mg8M4iQZ45HGrX} zX@V2^{VnC9j|k+S$cs;aRXK^!?1-_vajgZCUzbP=;>gstK=r|ESCvqo+=W%@4HOy* z2F0GF{@7OPOVAmfn2pO;#Gv@ug(0i)^vZUsc;-R;5$8oTqp$z(&mY8l|L;%6@bl36 zZzS_ve3Va5KZs5_ZPfO?NC&3};dR=3cIbkic?XS-;1asezw?fBbpAjNy1(uzx!3QA z=Cw&&Q;midbB^PsK_9{Is}Y2n-)C(c`l0Tjma5aO*Q{@gDE{l^6X>`PAU8}~&5E6k zL5F|itBLmD5!DV~lxJg)33t#V!vG)8%)-1+O#m|rvF3q|)Fhgo1I=89Yog~etqsV% zm(l%VfBV1y`3jggkAcf4EpR*ChqlkX2HH=EVGhF$<%?BX>e4e4pkYEVPN)4|bCMt6 z@3hVk_Hhv#q8Wh;PQHPMCnsXLZaOY(^Mmhh=)uReI?pD7CH`s3V8r%Oc%SxaSf6zn zep#M|VQV&{wIgHG=ljB3=SIq5Swpo*xrXcM?DyK>2H4ZAC2A}xW25cQqCDvm+%~mA z`Z$)U-6VPFkmGP{;~&f^EE3m%i1NR%X?=YdI=2NF(tTJne_Y@zoW{cImu+R;E}wA9 zKqF;;>w}8MqCt@N?IVsI*$hYMXsDxpgpvlYFHby~rrh+{3ND@ZVw8IZ7Sz0f;eAe` z&xJnfM0$>g{-0IexCA}T67Yo441VGL1n_#4$HnVbPdo{uz3f=XT+c@7jOirQX}&%4iLqdcPR{_`S!%`I>Tt_f4F6_9GwMvnTk3zlYu- z?Q!el=6G!5YP4KE3*TiGz?x&_bddR9X!OOA4PCH;8_toWfn9I#0c|G(J~xBMheDws zz(l5T@-F9BvVYbcpq_IJ`C6Afl7U4W9?E;g7c4yuVb{}O(UU&lRY({JX6T#SAN#+z zgQq&*f!0zTUlj#*FZ3}b{161G8`;^fI?|o9FL2hBVNktm3oaae1!pC;mp1j!fo2O2 zVz^HV_R6ndmaE@DX{RHsjgKQZc1VTHsX45|+Yh&Hc_6J`ZIS<>M-QymybGK5YNYz@ z{2{p3?KjQ0AkThii@NnOKQRDWb{vM)EA~Or*<@BY>IWvpc2EOGhtv0THYVmWZ!^Q3 zEiW0w%P!XPtnJ~jDR3{gwz!8**S1J;>-%y0?p@&Hp$p3L^eLS3M@}^CrrO-y%Y#%M zW|noAtG3;57P5qtZ%&AVeV(}2$8 zZTVl>Tin@eA9^pNd$uY=@$K(j3_h4&WgXF$Pw?K0eR9ZKx15LCeK({23TH4*(^S7bHiUi4d$OI@&fqz! zx5#tmcaP3BUwo8N4dYR>#@xK&7X0u_OO`II2d!y0(dQD>0i2&p&@Uq?@dJ?G!EsrO#;=Yu6o*6PGX z4kzBKlHMah$KO~(=Pqf7V{vvmyj=Gg<~BbJ6FxoU1$uW_JwGiuv)@JK(bkoe zlYMdf_pw0n!A%`6ih084>lS>PN37K0;e0&cTLE{cT;z*qa`>yR#^+n2;8Xwh811(b z2p90m$^ne9n1uxIU`5kkV({-@eEvIkoOE$H&_Oh8?nr;2^+eOjCy;6lj+$+wIc?`! z-miGz8hW;FCg*9Mw_#L^!JJ!Nrr69ys%h?=Y>jhD*WsD!c5wBG6?~tu5wz_rWWqmo zVtNW&Jn#ne?53?=KeGecy`IAL0S{QG?f<>^cy%!YbyKl#Q@(`a9-J5nd=Eo)YdspF}bqerEh2gzU? zT!kC9g<$d3rm~1_`P+P{aM*63bx{ZprLB*5i*d*UyAH)ZHQn8+%nhOK2VQ-d^`h( z*DOKOE3jZ^TgV;M72UL4k?VDuJ+xQ~u-OUIBGAUfcHs+x0LI z8a(ku;xycNXb7a&ZwqJdnX4@hjYS$WryN4>n609oDW9e>sy8@OeGoSL9{|!ac;S5& z&xmSriS*od91w?r-K7IcBh%GDdXfFN*9nRaaAn1o7IpPTaZ-Pj zFNDg*|G{q0oiMom98UTGUGF|5UPuu!kUl<)XGdK}GpmTv>Y-D*f-`y7jm7-;(|WSd zEfeGN750rYiU*yO_y%-#X1Gy)fPIh_-`T4T>>E20#agcXlESw=jo_babydRnIxV+h zL}%7?|4X3p%MMG*Nyj=u{FSXRKI}SDY+!jkIv4b#%vjfG`mSQlw7VzZ@>` z;+4$^#b~v?ymVI#R&20kqL!)VxWThR{M7#l?rxYbwLR?)q{S8DaG<&o^$$H;{t}u1 z$}Y~u8KzyC(6t49>}8=vbq+3WBZ^={u{&z-JIV`v{1S_dxj{?a+RUwc#`E_!ladi10R2u_aKR*08RCNZI5 z>YH21gjIMc>;enXr?ck{X2HwEhe(>04IIR9&*Ramd94MI4#n;oeUP{YUzfU&_E;+k z&H9xc<+L8s)HAJx&WH5Zr}+Km@j}-st#$>lOS39y|Nb!I9}T#7Odl5y9Ezl0@malQ zSe-DFlg3v?{avh{f?xc1@yM~J@GT_|DOXU$y0wKrdMwBV()HNx<#%OCqeP(i6JB(e z|5>M#o?alhNPQEbspcp3Qv(xDfV}nu?wp_}7lqyc(qO1NvzU{gfTj;P>CQIz%Y8pi z>0&GR14g_uCB3T$gkiGt>>)@sR+rah&&%R6e!)k>F@rk3p={Bwg8X%VCjsxHMrgqdZV|Hn_}%ZxK3&@{lw^64iDf z4qKzE8l=DDBVNTyr0;+{1yDYzOB24phhG~&a6fq!foX87q#l$k8!7mf9VpaNNr$rT zt&b^*ANtdM&&Et^zil0ly5@c6*0Z;P&G%_M@zw-9ce!ny*3a!)4C@|b;p&Oq*$AC= z3UQ-KdPL+QE?tohB_kuqPo{CFTQjAGyV@{%Kh-JL8JquV0hA}08x{%%CI0So4TU&| z^g|L5mQtQ7b=W!e>kVk5wXo7{vZ+LrSFHFxe$Ii_J8FDnz;R`kZ?tTHxAHmUc$ zuZN{r7RAOAa%ji0D6L(mScm9kGfqQlX(;E6V?;go^-!`7=`La$=h4`767V?6b(C4cxPB$Vq?^I=D2~mHEU= ze%j#?Ey)`vJVW7kbztl+;S9#+5$U2!R`|TZoRmBUK7AmStsg?iygX5lPC_0c@D zYMzpMh33o;X6z^N0_5$}M&F58#^Mp6<&6Hy;qxwP=!&`(w2>ln3J941dfkLN&dX$Y z8D7^TjF71{dX<9C5o*Y&^NuIvZDaTF3P;7C1GuKsB>c@d1{1;hI`QtLQC!o`9lqMz z82VSe7A>psm`lBJZb9&a7hLVcJ+AOi$Bi<6=^U%w?eoEM?v^a}&46D_5#MBG4ab?$ z4gGjeSO&=$WNuMMdf~#Ylys&QG|xr@c0E>^Gg2vP!6}yfg3M+LCjj5ki)QX1_&6}6 zT`C)QbHX=>r-}Mxd29YK1#?ip!xXPrQnwzAp`rwHL(pu=obbi3-qN_U1_B98U6!I6i#l+Kjm>(Rg0{7ShX0$|?Mi)5XDQVum z8a{b@i);PhK@>cadA!3Y)3&%RA@6h1+$<{X+=rqYZ&T;&+r^VnT)<6+j_b!C4Rd*u zCP%+VFV0iFFMY04Dk~USS+WWvlbEOt-Mj#2wMP7avF+6xYuyZZFS|}`udt3}>wkeu zZtNkfy(3}3UOrUO$5o@%PJb}Bd}=d$ljIj__-qjTb}jo|F6Fb=Ua2K{_jyr79IXmv z-Qe#SZrrsw#U<|3h4ncaI#~j5$g4t$%t`NbQu1AM$=xu?_3&NWRja62JhM29!nTK5 z;BUwZ?gsKEI9WMKoxumIIh%Z4`U+;l;5UT)$jBCwfi18@5+)FnHHX2$teSR9P4E@U zUwMdIj*KFy8`O#L1|_^>%IYZHIq4OPwtctmsXA0R5`LrHgvW64i`{$Qx9&G>M|E8- zm*0;$=q7Uq?ih`}H<4vuqtB={9m^c(56ZK&A=x|REBGJe_uitP#_p67dbZ&vffaGD zkT#a^K)2#A)YpI3rI{18K z-bpl7=2?lSlAPtXE|Q9H4ORMWW$V?HdUb?#@v5fpU;A?OWLd8y)`%%|NeTd0oDmNhxbB&uZk{6t_C*& zk9rP<{>*{=&B!1aQ(CdGZ{gY3;}e8~5csb2WtHKq6>e_&{T57P?ICpFzR4k%Z17uy zSjL`ge@5U+3cf`+ogQ;#8wsC&@$e1z+lq6&aQ4vV$Q;_ZzZ-O?lH0nc5NZ-VaJH~7 m0;>V9@wQryEN021Eqt5N{x^XvnfAZDdnxAMiwOUNpZ@>@QMl>= literal 195160 zcmbq*cQ}^e+rJ1!c7rIQBn>Gg_jTSXX=|sgp}mLpj!;&FiZqCl&_dkTdDD=}Xeruh zZw>AFJ1_71J&xb;`|tO@9mjKMJkNb!*L9xfXFbo8hQ@#Wvi^U5hME7@Uu^!*&;S1S zI`mVU9>)LuXPf{2`JX@k^=~`9Z==fY7yqxn7q8KzhZa5R(nGvnhaS50s7H_b^w6V+ zK0O-H!+;)!^f01FLwfvgf0?_Efa;+UQ08 z9IcOMLn7ee)d|>cf|k1N$~URy6n%BcCsX{Y425w+JL22iY2Z6=Bizc~hnw2$=eM%0 zvB{XWuzcZp>{Ss552tL02Zs+J)4ZEIra$rpBbM>N4HmfURwmv?Gxc4at!Q&jL#6BU zhf_?!V+O+h^Dp>qjOP1xN@!<36QdWzawiXK%-NI)ZQrJ_Z^K^T?1Bo6&K-`-x})9H+^yL-w0-B z{zB;(l`QcE?l7@&Fdo_8lx?c|#m4W;hVwh0uxHP5X~Fr>`@R9z+{i*Y6MSY6$Y?(> zFE&N8aKFIjYX|UK^M1jzJ+0Aet48je3J=&>Xeei`me6p$zI66@Uue_*92>Sg67QZ1 zh69_6A*1nFzRN5I?~mIHO`Ak8=UgA`ckVDwoM;Cb9Q(U;( zzJ)J4P5cL=`*b@s}Vxhs_t61zK;erugq+jnpa<3i}xYk;0*?PnQ)JZF4 zg;D%(Te zziBixv(v+k+qZ(|$a2_oY6DMyZwUQ|RL4#A$tbem{cXUxbnT;%z%zII|8^KG?YWC* z95RHr{hJnCo|}YY^Ws41tE~=uc8A?*JCT)NSj$EvtH1`#L%O#-uHa|4Ex${cnlKmb z|20x;Zn&`fCO@gy5~Z>mp*U-DT}U|IoSnUHtv=e;9X^cq$CnfIFe9id%*}o+b*PDh z(BbDX{MC4#JatK4vgIS}X4M_2*YM|<@o-PyLhiG)B`+>(qWs+08p>?fV`%&+nbwV6 zt4-7iMGL@xWeNw!Lo7QYI;W2lz4SaWE<~<|q7$6l=W= zXf^LM_(r>R>;9@f9B8x=FRt4F`nnR(d86JUJ8YJ1PCf1pt-dF+HyU4+w&Oo=gHhep zIp3P{?GZ4Oiz2iNQ&WtY}Hych4UqF4o3`REG zjVp$1M59Kw(t@XHINIYPIt<o*$jJSl}=hpJeMaZS|X%{AO;^E`~~?hS(~Kk+v$2D4Ki zR`TAh_95XM-O~}-fVYg!OD?=^280RJ|2y!alYyFTG7NY3?5R)>5k}TmD{fAsp0<bFOtvjUncco}dGPMM$2)h;3A>Gd7gg~q}=hqKVYNk?}2X&`kj z7$$N13`{f(V=vyO!x>f|?~X_V`Q>svT<;^)YtbC9EZKom9j|hM3u^mB{;}E|E;@zt z8HtI?-q!bFPV*Z;y~BDmIR!3ze<)5r!zJ1mjAS>M{l8eYx=RAzxn_<+Ge))B2%~Ma z)VsU6^2+xO%t&k@&+gt1FBBYA7JvK)0`oumJYg%ByMg=u=@1mS8r5gpz-#M9=63QX zv@N_1Tkq+JpWLTf!OV>JO1185He|<5v^aE(w|RCP>H6T2S;m}NT!2#_^=0Z2p!vjo z%O3;P4t~6ftmPZGxrL~i@RVkLHBOm3AG;ph2s1B^!<`NGQ_a0F>Z+bh7znevGpi5XQn&9TT-{$~%EQ z@YsG7r+LSV&u4+gph;jhcQ@X+ya=h5s=yQPBgc8&@fVb99cQ4xP;sp$7nWhq;D)N` zKf+o}o)(B>qRQ~p)ssq<^<(xuJ`}$>cEAw<({Y$VYdS+GXuGzb9633I6>s*#+g>^7 zcTE!f!d&Gha;syJ*q$BX0vl+aB=h%Y&??*uO0AkfO8J$%Ki&1zKixy%N}Ert^{Kin z=Tdi|e#5VJ5b} zIej93eZ3!VsO*Pzp^ez%obT{9_bDEEc?4c0Tgp2d*a(hg6`4m^!t8J~cy|-xq(Mmh z3zIVT0G%1_R~w`4>%y6|av*LZJem&cEW2S`iKZ(0WBh3YtlqUx5}M$K>o}zTmgZ*9 zQHe8f>%2BvSDol-$72VF!@7 ziO-vVyY?O@cbkfThY6#TS*xS#FzE7LOx28ZBivH^c3KMgYwd+5z)xj8Xzl^qJadLq zAHMUDf!Bce8K|$g;rdXh`ku7Wm_pF;Y#`=5?Fn$kMGqwcw-|gpPwvk0P+_muK#@RX179moU;@ad0ze z5@EqkG+g%`#B*qkuyW55(m%TDP4~fUVM3g!JDgvX21zMN>`(A+Y|*I%6>B$K)6f&w zZcgNcJ<{(h?eO@()nI;c3(fJWZm9NBA$~J6{(K46d3;2gSJplNYVVhDd@uFQ9-vx) zwcbA@3`D{{Hm}u1nloPry4)NEM=XDs1bv^@kzY;-5x52>>D~C)s^Of@oPV!hA^H$# zM&Ryl#t657^dwC7bOGW8BwXTqa<>DmRcJaqpUag8A6?X*!7=FWyj!f3Pj@Z^-4I=M z-ua#Le(rCAPvCGjho7$jz5@uM#f&frzwW(R$_gPBJq4A{yddZ zK7?)ia0OZ&{lf$nJN@`6rMn(yq_s%r?Bi3pwk$X%_uE^sc5!b;+KW$`_Jaw|OPcT( zq`QgSzOp|jUBihx`M*{NS*&(C1iaLgwWas4@lI1{+2bVPKn13|I^(FHUD-FsOo%XX zkPmDgAV29xKSS=tp&HGy2Ft%_2sAPJky&VZ}YWu(~ECJt~&{Ey^KjWf*wFAs#;u-9CJDU-QOY~6IvOhe!Pk&Hyu23K3 z!tx#Kq2@~i7~NtKt}?g^g!6PsC*>@TYMx-)?QJ96tRJHFju`Mh?V&`b;Hm#d2O})WCA`vC9R^zh-JA8il7hsmAZkWBoRRl}+$~8! zoWsf2fou9BCODP$zz*7a!>1+{e}^V10j!C2!S?3j} z;rgl&)MlD$k8axtuhMY!1aCQc>L*w?wLgzp`W1yXC;ufMJgKLWcEJVhP@Dk>t$y-} z9g;8R&9Zk1J_ORGTKW}rQ*58Be+G0rE9@gqfT?2fnN7h#T|Hvdpj1Y@mf5rw8FxKBL|P@&ckC;8UOz ze1B|3bDe>KjgJA%HEGmv81S)z7dcq)g(J6M`=;fXb+&}++8odQ7=wLBcu~(-%Sk>a zGT}awCPKFx?Q3-yi#C?mf-(6Jg%ReWz#RXTtyJ1CX*Ugp@KpL#b{guMbw$#aKyyy` zxk?iLmT*_HyLyw8PXNMMv~}w)&a{?uS9M4g{zBjg4!WhS5~lK@Iw#$n`^x`+e=eWO?bnFCm>CR)lOZhx7|ovwK_eht4MOKe_qhau z`-R`G9%@LMFGP4xc>Oa5J8CSf<;S+k4W&9NV#2LT{$$5V!oUvd&I4Wf(VMJxMxN?K zVZcj&p#ky@-gyJzpiI4}YJa*9RxPIzmTm=8+iz^bdLLTj+*4E-_*|memLHj?B>KBqOfG6K%6O>BVluo&4a*5LmsgJ@{Fa*6Nn{G)M5M zmPlKSL-ITZze85+bgF6twx&9SBN|AaOyD5-<~~SXor`%)u$e8+ zmybx6RcpUl68TV8Z?+TbvoyJOb`oq}LBv=$bYGQD`ZEGZ4>0#t?J&Y56{}_i@qhDA z@$l7A7-^n>pFK^1G7OctjXCbILDDubz-<<4Ht+z-aukt+G1Lq4Nuq~H+cR$8 zo}b@2l^cKVPaeL6aC?MgSY*W-DmKd3=q6}#qb_e}Hi-*eKzzpu53y-xG9w+xL;acw zorfZyGwF4OvXRz#qPL7Tzr|;TlYz1bnKV2kLmnp|hU7h&^^^1LqG1;{DWjQMy~~;l zpH6y5(*ES55~skvA%{u#+6v64d?*46qcp{OX>VtMat25HUKB|` zA!$0wIqlRRFW13T$DxpMxv$6xxtLYK0hP_SVrl41K78%RT=G89H0Ty1|1E0C%8grq zWOtIU%r9njT_Rx3J3Syj50qQM>>1C%*KPt|`FsabHie`sgr2}3FaL1zs3JE*@=oNt z8w+nrSz|Am&V&&UVM4#`TCAZnRh{c-*6KKmLmjbWR7;_uWy*87!?KaEuH0VK-|cr3 zcfp^ychM$1_D2_*x2?pt{o^rCDnrWpNS~CkRo+uT=;uxjj4zgwL2wN9h%B@(VS_?> zi||JzgX9tJstP^}p_cqA{6o(HYMW8Ayx zZ|T;2Lm2BY4J>CwGvYLi41Ug+eA*5jZ+8L8>mn`9hlX(0l>d_dSGb z*}7U@6#PqmmQw~GkvBp*KcOEb;tD89F913hg=SwqP&yt+w{oF{Ji@;Kc_KVi=MSgr zi>wu4SQS$C!E{4{^ZK-lW|e=AA^CA6Op+tU=Fz-t7C9>$IUyYdE|KryO*Ty9bp9ec zV{YiTs$JX{kn9#4(M_{ITf(d*e{190vX>CtNnG{g=h{SQZk>*yi z#)BBrqmz&_8~k-KmsR#YD?BfGzyzVsMBO>%*!=DY$J%^cWM%`eu7`hrZlg!{0Fj+i zF1-;cuZBj$Tt#mq=^^z$?;rf!nt_4O@WwHtvQBzZN+2eRH+3?@r}xBLg*WPba0UYC1ICvUFOD6C8GJKe>UV`(xTN9+?;vc{&YQ1XW6NeP$%0iE zzpyFOa*)=o1nt}l?lX-sng-#D?_qq(h9+F6l@ZXo;p-eL=6v!Z zyW8N-PNShXWSspzgNhktH@v|iqL@V-f#Mr>LK>KA65;o$#=@Ra_&`TEUoBziO z`|W6|RwW(bxwFk>-_kiaz{LdWzW0Lc_HFs(M>ew9+r5GJ`J6V>pxc)!47Cl0nRae; zzQee-UZ%9M)k0RkkEK|bY}KNv^xM$g?QElPF#TkNGehUI#%r#kos&5ZZlMKZuC3-9 zymj#TS|fS>V~#D-#zEmpE$LT*hup8&1%E8Mz;@Z$LRY;ea__^bcz;kJehctc6J1MS z;)+7>9M>N1#I%>UpNz-zL$0%G{a?zE8Z&rfa~$3sDB`K@jKR#_QJ(y0KX$r&m=BoM zHgCoE22h@8EPvjmkGkQ#uym!f%yqXx^e#8$Ti#Bz+`Ae5x0T=@PYqnry9#nUSK#J8 zzu=AbP;9;D8yi=xg#)wKyPZsn#g%K4lN)W7t+xo_m(oM zX$gD1#}epF@!YzejCvRb1~lL{<0mN}j#v~p{oaL*%o3nnxrF0%RW`QVmT%c}i5c!b z107arp=tLgFmB;TY5TN&Y{t8}Y*=Z?7zpnby zd?FalAC8S5*1^V;-?N2TrL59u5yXX7;6?C6hw6C6N-D?uuSNi!7rr~t2hxtVk`J%Z z$DZ>UH#rc)_B>9;pUG3?Uiv3cP0?0wnV7@n#tyi^%N*8Zzrbr&BjgqB4&kc5r_icJ zJU)G3j?`OuhB--4TvL-bwmeCRm{7`HZrp&u-p%;pxoeru5?`#&Z%uvwhE276&P6ZK ztl}}%4(QC-(s6`q)D!)mHB#wJq@C}BVMe_%to3Xk^f5AqWO+HGxr4jEAEA|w8`3(V z==uwY(cA%T%Jca5cgN9qOJ_zi&QFyjvVqxuv8&xxHo8kX?7z~O&AKxS-fo%BI;;x7 z4~IX(4`>hh%c>RHJJ;HOjs0$8t)A%NhE!wt-FgK~I2{8|#@+>5lln}9@v`u+Fr2`XnahYcW8Uv58>H80`b30b! z$Ya%<>l(o$H4JNf`$Iz2TC6nsz?%-)${Lub68y0kzYu2TS*<#-U zZYsm0^kBvf6$>J-h+g3yl}+Rh+des*p%Hf1ddMDRyUA_CqTsq~7U05jxYyZ6Ju!9( zhP*B0hW_785obWfV%tyq;JXrt-1lY?9eN;vi`)=H+4nZbbsnM zFQ^lL5dG8AmFzy#eEI*ikm z@x<6qKxc)I)CMqZSZeK@_C7m>Z5r>))sSS&{m1#&Sxdq1gB#wPkq;NW*WuS$pW%>+ zQ!TC+FVVp+!F6OC!v;L=j5!d`V8Wzr(x(nbA+1 zf=jm`W>VP!&vfXcEdTnHHJH8u(zCB3ogMG|N>5x*7W4mlb{#lYaSy~=bjG$}%OX0$ zS>MUjE8}t8s9dnEN>KK{yi(gc{r?VzWoL_dU`Z}&Y&g&C`dwoV`}X0BOON@V^eime zI|yDi)nRuoUVy--h4V<4nFR6G~lAk9Xar09{arG0 z%LSw6mFz>|aN-ax=y~%!_5D@$w`wpD5A!=Q88~g21N{C{7cSh`ChCgkcG;>YT5p7T z7w$+KZtZ4%_5E=4Py;!7R~{5Z4k3<+!f)4y!n8S$c+**Dc~x*R(sg(*dowO-FfsBE zzjM@(*3%!KEJYw5mjy1!UQTG%GZghQhrrg&k*tlPA!~KpgCGzCk#NUM&sGXa& z0>Xbrn9nAhZm4Pn?}BSX=R<($6fW?*dGb-(XC*7TK9kw4eIuRqtwQ22X=slZl6{wR zV0lUgpI(k|qkeBFt9k~0xDPfaZxVgTiE|k1Qx{qfO;(cT8^{eK9-#R5P+MI&{!1sR zzGscVQ1*7` z{!xai-~m{cUc&yYYzpRopYdZ`x3YGhd#j>`%kQ^U8=n2ncCWn1Ed%V>$YTpIckn4m z=oa^xkw|NjiVeMyZ~ zshzs&#K?7U?PWIzZt9|3S~L-N6)a+1?S_(O>c<6ESNJ8e#dCl2z^R+XS@N(D&V+vI z+b*AN@qbCYuFFGo?NMO1sL`a1AQX6P-MTl9+}Q=K-c)kpQT4~@2%i6WBfJ`HgYAqC zN!NzHBTRO~l$NU7i3a=FigvAWn8!9Ctb-vn(fA-iL*8Cm4nl(np3{qMtd>T-&h4HP zioXtSt<^A!>?mEbP1xY#YER*!Vongq-5A7Rg@WQv*P^-b$2^g5|xOb=-F z`~rVjqDOO91ufS{vti>eN&;sLW*%bSblz~{EM|4RxlGTIYj#Y{i+r?|lQzqv>+#hy z=CLL_jzcHQKXgX*uOF~JLkh9I2{&)06_VFx~Qmto!I z`2!X)>UF{YxNSvyh1QNVuZ*-lUUA+Egpm-vf!?1Z%|jRlbJ{$GcOl_?bDuT%_m>lf zldW=N?L)#hh0c^uFqp+-=M*c{1AOVY9W*z+d3(n#IIcQ?i8+BkC$WdNu1dYh z`%b>aKNMaEnoZ>FFbjGf$Et7F6a4OWbtB=y=6vBLpu^7(OyK3esnd||Pv_kiiw$qW zx#M?{FeLA*Wg^btb-_ZzP)*MsN^>90`^9|1F?%Kp%%tafgXgjcuszRk`hGi@eJ>Io z)a@cgzi^R^w7#+~c{NC0fc9npUEPo4$#1jZ^R7_FqAQuuS1+HQb1RLS1LTWvNP{x8 zF$|M(|1o&nU<*!}))lX1HK$s)RtIlAg+kYS)0qoYdroJFgcDNET6={!fgN(3#T|4< zsypkQ!9}i*xqn(a)#JlZ@&FntXKm!?vHj$pZY|U<`z5tiQ(qim`W@@M_@Oi}h*YWH zYHMlbokgD+SO!gJd9b9QK3IR!4OsW)FzNbtZmnnEcOzZ};w1EO=#SNvlL^0CBJGjS zc(xMR*6Hhn|cN0MaCkk9>^X?rgvvdA))3I$RlW7UP!e zL3YVPU^UL2WRHZSYWEy{F>krTf3Vff<$gQjm7O>2fo7GFcY%>ko0Q53e@+^MU3-~} zudijZZQCR8-0|_aQI*8G^G|(WvZ@79oOq9|$)3qw0>_H$lh*&hb1?9_k#BUY1f0U8nbslYxAS`{}~jj5HDp zTkH>~i%hVe??! z$Zc3Pd6Ycv_N|`U^X5Wqp6n#oJEyIVx%QLy@AL|0pE(KwoBsH?;pw6%iL?alym1f? z-H$=R*VoD_Ir#?ZWLhK4UcDZUbmELKMAVMI@2Lu}fxqt9aU1#^gYXhUKOIo2fO

9`sEqpRwOSW^`#R!X-g+>G(y5A7JONOXoKB*pVg&L!o;M6`Q_XVaxyU*u& z?B1um&}A`Yh(T=f&#_AWvIQXUC+^NLOncH@%r3@sJqF}^amoB-40q_H_8dvQH}Y|w zz$oG4ez(~N0zXSv9_NHB{N>YLD7d1-L|Y_pt@_okA)Y+MjJKRbpI&L8t=FN}bG*tK zDSQMtHM%Y^0oD9`@&HzZp-a)^!wV))pTz|pQJ+$lk;sl%_~X-W!>Eo2_#DsP>V(f7 zl-)ivG0SuSw7os8mQM(K^8c)i2Tjepkln9=y6{Q{d7%YB-WLM<9bh4EJ<)Z^K%|TX z!fxlFZ9`X;d_De~`cx{e@ne(~@Qka+(PzR`5ZqB%3NrDMr~%6yae+NE?aa?jt4G?k z84_;P*5zmNBXrnjtqM&{bHuvrSKw*tDCpcQg=RYr2t(P9cas?PztGl<^uJ7c6}5(q z;(D1^2vc4F`CdqfK8#<7FlGMqYr+F6l{w04dyvUf` zGn_D2;4({zut2kMo+vUN9HpnyoN%Gx%*GWm!YZ(R@R>bdJ+s!c5I%r?mtJ|K^U>{I zYfiowcD#F7t3O5dq5sr`zp-(~`2#+%*ZkX$`NN#H4IpkwGazoQ z%?eib-J-+{c?IJaTM)J`mxaGQye1U;t;yq8r`u6B<0F%P#re*s2=}^x=sj(_7f9!> zx`wyItRKcgW0Kx&DefWBYmjsTTy}bdO^4J6$~RQvXZZN=B=7#Z6$%fM(!DtkPf6h9 zEfvDgT6_~4&kwCt!WD(GBUo)=!31XtJRz+wZ<+0cz9)}^&G?5v`(dOlIO$CEI(3lF zK7{Yd3_)fv#9gh6nkEB7F=ymeB*rd1wL^eR$A1UJ^&l}DK z$LY9VZv$4MMSa`av86$4zZREz3Atf>D86fY$CLNhbnL;FK z*GQ44Fycgf)wuu~M(Xg2#Xp$tnfrX!otJr*d+RdC#T_}}G1I@)k&|9SqxshvX<4Lf zk!RHP!k}Z7%D${@PFyW{ms}BflS=v&?hn$Iof-{?lMjw^zm_*dzDSt5S&7!D&T}1W zs#bIl<`1nKtIqM;82Lc8BJ2*3&jX>mho5g*n{Nc1H{f+H_W)5V(u8n(RVh+N%88Rv zX#Eh~gCcjtd6|hIG)-*a5ur_h_=sh8doGP=f16Q`1=E^Eh--4n88|p!X5iPm-Y9&e@OxZjt^2KB3*5yHKNlfsbG+E%2~N+OK)Ki& z(y`e>Tab<#C@1aP;HF(+rcx$@l&N9G;#KP1Cg+%rdp~&Gs;BUQlx_8tt8Z_q)!7+u zOp$WGJj!8^YN(3Z2=9Bw>={nkbrtVljYqmBlJ9aIJ-ZKFjZ@KJV-;)sa8oTdue0`) z1(y)cl8@A6 zwX;HbG+XYtfwIFsYDcOGp~c}r}>q&Ti+XP zPM+k4hqQsYr!`b`)KLqnI3Jqe#HHu1a#kZpcE9i`&KR_Vt?t!BI12uDT*T*pcr#h-*j_WhPp7<%!kcVuYflc;SDtgshF#TgZw!sZ}V#G8YdXO@R zZN4nRR7+iJdWNMn)_{DYb70zHAl>f?mh9XLOP_Azo_7m)noSbZyb=O^!=^!EotBL5 zDVOI2evJRIj6y}?cq%PoA$<| zuyJENiv7tMhAQ?N&l;CK;3q~rb<6%a1LGdIV$C#- z!FbYUb+ay@^9uv~YG5g!DtXBIt#cM}6 zg-&V{Xq*=hHkr1vZ+0JiG-wlE{L-YL#a7kr$S;ISUrRORKrAnKzkpBv7pasa#Disz z{`f)J4E8$3@VEB@nEhT?UJ+DBy;!$|yN+0ex8xjFeDfUY-+w`KV1PY(eZYa;zQf9~xubb5uiaHO+RjvnF<;--};F6tswP9miW6%+o$GV{Jk(M&`FUACH zz{5`75Hq=+)U|&l)Oq(oDe8P319khe>2rTG^SC0GeC8MKT`*nNeEATmb|`(BgLms1 zQ2*&*gU8=t@SWMUy+ggmjThO=R0lTFEt2)pyCCi(Eh|U^i-kjZpzbBSbms=QKKq=P z?5q#eV|gB98nM={kegZUqF;Fs$PXOw7bTZk@{c6ra&6Hqx zk>&>UJhgGwRxCfS0L>|Li7L zPO>1Qo@q?<7g`qB=ve?I4PZ+JqB)gelF$JzKY z#Q{29$de|_>?w(B1~_+Dof^j~+kRgK+s*CN5$-LNd#^6RqVay5dX7>5NQ8xQ=d%uY zsU%TyN&kv`)@jtJ=Yp<-Us7*elBmDvvkv3&O8YlF=bnZ%e8m8?o75L_9z}^YkziTV_TrPz;<&svFF9%gt_`EyB7>vy1Y(nx-2sUh`kBhc*|qzD`@Njw6( zFZeS1I&SD;bc=nAwT8O;wPeB>B zEB!K==9A0P@Tod#$kyi|C%l6I_6TT(MX#{v_0@cEQ3~Gf90ODnw#e%=H*HbE4o!W> z#sxRR$9DR)=ef^25<31G#!Wl^X#LR?GX; zQ|VCf`aZkt9>ymbdSb6%X7buk^LWR%&86eBci6#jw|JIV@PySAM7aiR{5xt}-aB(Mcx)G1TPwN;+}!O6#5K8>2Nhtm2F~yqY_ZD% zfGKy5NCnrnz}A!Z_?%)}ST*o9+uSZQxBU=zEL+iq(L4anC`+?6k=2GHanI%^vc>oY za>I9>q_CbLK)6Sot_Q>k%v*0OPFv9cYi5`e9&h4=&!~Aj9U@;gkbMhYNQc4KHcG=}E42M|WFrG<>X>sLn|7v)ON>!4DdSmUhvIIz(s(gJ3fq57lX zeVTK8ku@Its+}2W63i&g#)hBHNxn)YYyC-^&i<`Jc*O~u;kKa zU(^@)wXGX7$gG0zw>8y6>pQaOZAS9A-M8`Af#0>*xM$aRCU}q5fpIUoB5@1Q9Kq`L zn=$7DeMiiAG$(x|6Ha5V*PqyeH5c3>OamD89az~c;|URN#Er#L{Ml>NTTN?cJStBc zPB!IYmO;bdJj`@(#%j$en0YpuO{#0F+Bf>62FL01?k5J9gSZauQxUWGcfdd1=p0B{)}^$D z07e)_n&BDi=_1SCM)3?CgHS8)7FypwjMLWc!IeygWx& zU3S@rb!*gJB~HUL$Fm@BOAN-J9R@vyXsGWmokH_Kf4N7mMbJw-2X;7bf|WBefH)LV zCr-eS2=WKbjD-&2SB(Ot$ST>O~|=(JN@m-Na9=6JwS$~hbMz#>(@?3m_=)~!QDFTL)Iz#9 zh@J?3{3P#X;7DBLu@`i`b~4iMkl1t;j&M(8f0-B4`@@*fvZUeVV}7}e?hSOGZY!5n zDccW>K$;<#K4UHtS3+@-F%)$;12=;=qT`ACoHT}98Z-;WoX$tnw)44}>8@H_CJqrA zgf)rDW{+L32;Ap%Prl`_#7TFld$y)xRQ`^<>7#Q&)SEa6g~r}#c}3`c*!HfOdVE1^ zxObD@H&3MRGA*mZ;1{16VGdz%BR;ZUFC6Lcp3O2?K-gm`mn1ZWhimI~+mL3A-WI43 zUZLNs{U|hf*J<&Xubn8lO3CC|8mmGh=WDlM;`+qXIHq{A(B?e;ehj19vNXp+=sNod zZu{090&K%bf5-4?p;t-oRkN@@yOog5-qOkmp>8c!>tRo~!AN|q*33Q3cl17wCC4Tz zx})+K`3GLD`3MKLIgbU;O5JEZKw3p1e8eo{IOy-=144_C-{RhS_0?-XhNF|sDbj~l zD$NuMJ^!_yo=g}CzQ?jLb)mn`Wp^*ZIha6_VHU{Dna|f_Do`(xZ`4Wzpp(X1k zxR|Z#a~rK}zQ8&E_P9tkU>ED(lU(+kgRpTA@$%?LK<5v_Z;0NaIfRHN4YA$C#XwpD zu0J!Bryh((qt*Qwojv!fK8_V@7O5o%fo)u~Rp=MQL4Wvw{&CX%Y-h|&Du6=Y+gN_% z9Y4Bj56!WrOf>-EK|7TWz*)_WWb!SdZ&jV;;Xt)>w_EU&Ep9y?{`K$y@+Sg+VBU@p zQ05O~^Y^C{cFd*oU>KDb$mSiL1f#3AD;x590o4OZPpZ`8$osuT;dM!u%Yv`G?IxmO zycJSCiH}yGhG_*ST@2m2n8~F3Yjq0o66@!GmJ`<3&JSrlPJIWpHEHPs<{{CdznJj9 zq`#HyFWv&*v2;l?kVb}l4P$j?r*fe!fpn%ydWa9c^9o{?maNZyoRx-$~U z3nI;vOd6xsTMukhM0F@UgDi{gM_yduZQS3XIhIB_1GRPzS&Ft;%Xuj_7T6;vA*MamVqux{m=Ds*u48%=fbKx65lXG68b79-Q6|fh(b%3xGiEjlC)0`I4 z9{MZuc3U#d+wH*X-(a5AD2@DgI3wRui@l`Dd3wcYB!9+FSH$t<$uBrzD?Y#BB)6OR zkv!>kaX)mkeuRVt-30$wwhLs`+Zb+@g6Ut|<_G;b2o-BSz~GXuD)}Rsj}3<~`uyNG z({`l6E=c0sY3^k)?@{Mk3mjv@Q;D1)Wb;k9-5~&XbluJf7a_6HOGu&5@6qeY&&}ZV zoJUHY&mOVY!w_qhmEg@CofYB*T%J4=i38c>r-Lx#S|6C;V=eo1lep)#WN6EK;4uW=rJ;iN8bC*9m;HA-4i!Fv^0X583dzApN>MO zyQL1;1wyla>EkIhB5XOVa5v))YWmkBc_AALvEa22=Iy+|T1k(U%f0qUBJUW>GqBI` zOT7P>QkIN!M7>#VcnZ92){@R+H_-Fg!eQ5t-D|{16No>H)<2NIF&2j8j&@ z9W%Uy7pcVxqs_0GUyTQLpV%2!^%*Pub9Ur9fv5R`#jQZ}?VIrvfzBE?#J%TqHvCM| zCNAd8J-{5vQ-Z&jCZoO+wUS9c%BN1~iL6LyTG`Th2j7s@2`JB!Xf9w<{mw#1h#I5l zRnjZ+oZ=*|m6n389W{Y;G;2_P9?7Sugh%w9>p9ps{|KzriU5JXA_pL!&5UN$g%m2>RMM&vX)pY=uP5%b93s4x z;6iYI5zbl;Z7A@uR-fi=&w;oXIoQF{o-(ODu&aR;oF5!p>siw)DpBl_^fPIhJGvelXjj3LO;`) za36V~LVk>QaJ(%rpU&$f?rgPKDeBOZczq!!{g=Df?<@KIt2BpoF-yOmOt}XWc?fYo zCtZkU2D+-qcpel|3xJC(2M)T~bQjP?q|5fs)#9zx)%5sSBTG1JvLF!BLhOs~ygR~)TA^-Mi zwBSre_|C{@!4k8bNHc~D=Ed+gUTrC>@)Q0KNux2LAvLrPu)-1c@G<@vl&viS$|Gwv zF3oFg9=OD;3D6v&+VBP=>;s3g{iKb@5H9tG7kO_Pc|75nS?g~*s6VV^!gFj<%ISi%urOU6}!v>cQ;N0`{{i9V&kg`goc_ZwYisbWT z(q^@@wP0RD_4Q(7nY52Y*Oj*>w}CExgA|(kyfHaea^Z-Bl(mrNO^8QRh=3E_ZGdtM zcKv-vuFSv2dbSGTq9?`Qliot18zb+n6+Vw8eXAqy9oGWMQz@j6$lDL*mR(!BiHw-~ zRUOf*lbYH)7_4+{_{NwAjIfUpCkihNQ(ip~xf{54^cP&{I=aeK{hoF`H044*q!Tyf@7A)_wKfd-h(2~PWo~#Cr>YOW-dI$_a`k0w_?$MR7?2UL>2iP zd>(jHp*n)dbI2QVk@pE*K-fw6RSKkGRN=oVS0bGo&xiw*;PQ^39aF<1>@Ntu6E$JN zlZZ^s$)qkkjP=3E50Tvr)*wya4G6>3>(3(i>rsPH_)yB|cou1nKa-vD)EQY7--8gp z#P=fp^InAbyZ_(+qp7L5$p7+}c9Q~W$u(Q~Q=>wRzIzzUtN!ozW8nJMv|5yLd%v4GePasJ8uclwW9^A`P>7#@-j|t7Rdf(l6~K?7ljQjc(Tu zhx|*CeDEYTb}3+8>uBPzc~c=q{{SvII}4rGPvJ%ZF=)0%3zr18%rmw$Q7_W_kVOlt z;jqaV$jPvm>u3$cJcBrxSg#ZB?0B8g>)4bmTYTgFkQ*%>he_JYV3^ZBZamWnezrKo zR%C>s^g0Kx6P%yBPVae6RatJE=BW3yntkalGgF%qt{<9$TN{Uh(qRnu8%O`Q#gqM% znJ2)Ld--@^a(ir;;DJ^X4zVr=FF~^&wyfe*27kSnJ~!5nzWcr;3ft$*bTSbD0Ym)iXMyb*fqe5u20#D zy~AZYjbM22^a{RFePImwNsGW0JkP)a->)=Kqy6mFZv(#3nVo=~uCLh1xD(iLs3#V6 za%4y7v)l6DG&p$t8Op))VPq+3pC&f)gj>1Tb zZ2sGR3j1rZgZ458*L83QS_4+hbpq-iSkw_1ApS)IY_lmB+^y%R$(|0lv1BRk-rtY6YkwGw8&smlordzRQ)aOL<{GKY z-GJ8oO&UDrJ+E`h3qyVeGPiAiv7Xf`$~mUOhqzLt{(^VEV|lY4Nl5pA0hKbauGui~ z`&w@LLyCCbARRr=VnU+g5p)U z$-X^iU22PQVI$Fck)`};iU8*lSJEV{8io7IIUr$+L^M>c!=kpx8)UD>3sb}`hS=LLs+Z% zg0ibT@A*)z_9_N^FHAo34hXk_IFG;CxR>8x zUD?X6SBNVbsI(3g&mqj?u~k#?^N)OPuidj2=ln+<#9zbqgVu(#;I*%_Og#en^t}M; zAGnuQfzRRpkoM+LJ$>K*I8l*^sLYWm2}#oPo_!@LGK4~sd5V{L3L#A*R4NUM3?U?y zMBTG5sZ2?kDRbtGnKSj>r?oz7{nqcV?{|Hy^?bj)_3ZgP_w2nN!@l>NlgTL$SmM}W z?AGdEeA>B%P__Od|LMC#Trb;f-IR3J(Ce#=hkH(zM`d-xO$p7(_XMEMOqq9@y9F;lYK^mZd(ayG zFj1>a@MDqpWb{bZR*SN{Sj&u4lv~9>Tv8j?IotKQxOS>LNOc$^6OIZ_=KOXzJ}lS{ zz9%jq@je!6^?^6_Q-F9Jqi4Ot#=5&jjZj{t0?iYw9p=aho0;GN;yg7_?=T~LhC=&q zikO$3iA_YFAdQEfTmNXcVkE6!YBG{PSzH&ZI0D*aFOdnuux#veu%G`L2=jQ= z*ZNp@zAM~J&gD(s#zCFf#u};__B?X~G+%I8TnD%{Pk?%_oY)Unu|eEPx5L=9;jUb&d~Geb4zLeYfg8`zjZFJ)v;0M*rAzoKRTEU1=>a zT?=X@4j~Pb!6SO-VtIO7wRO7{K)(a`gC~Q>j^VU+s0`>mY+j5r=b_t0J*)Gc7P3xr z_2AHoj?nYeThK}8EJP~~hjTm4-+qbcI$<&Xo4o_v>OaX9eD8ndD-gCp?CNREFs^~@ z@ai$Q9a%>OkFVl0rBz8cm`%Pb=EmM<@BbL#Ad|9OntNE%uqT>%j#ghxu!SKhg>dFg zAUv{ig`L}O37n#ND1-x7E7_E>8JeFJ%aH|U39W~7u8xKVZL8sYLQh;g<2zK^bU>j| z*83tZpSK5ZoE^YVKimU7*Q5w;R^v}c)BTyDLPud&?Qb3>ZD11i4bg$|L4v&jcOC$3t6lo$A3Q=YrBPYmhq zKp=d8EkT#S`+*)17BZR_3=T0RT*|^RIZt7HqlQ$=&b<70Yqi(p{ZOctDUpVN-rthg zZPQHR!DRe-FrATRW|y7Y%hRh~YJv-%L2CUlp}Tk;(`$^zFXjO0J9tfI6nR)PdHh*D z_0HktC_F@83peiCCRHKLg+kY!vYp2x7Odx;s*0Ij%27zbs}gaH$SXc2@~cMh)ADv5 z)t75lN|j4z!4J<&bb4HZgbzSEixHnG#ATeg42F0)Gs*>-G#pEbdBQ@dk0S7zG>*D- zUw6_rKaqTiLNNo8W2Bi-=y!^p(A46yd1KvnjC3K;ydfSfm9z}Cf%FL7uham=MUyzT zh*J&mgAY0*#g%k*D~$YcoOSH(!B)AA#R7vrc>MjjS}pp}zyJl;p49n-#fO@z9qpRY z^}bO^f5Y8T7OJ;-L)EPD4RAKNB=QIY&vZxPdG>q-<8uawlVHs_}FM;4#!QBNn`r}GkWB7gLDcIs1On5X6 z2w$=7i#HsnjDd?Yl6hE-J^xca14;YgyaWE+`@t68W#M3+*kA;6e?1XNkE(u#8ky!! z=rI1Tl`-*eGx<#UP)__)i|Ldn^gMW1EW6T?T`qlsR$W>m=@r(*JrUw2-4|oy&2uht z(p{uMd#Pm&=1@NNLE-gi{I$7|dFU@Q-!0eHGI5z?GVY&Lx0jwggr12B+TXdB<4MQK z?Y?%BNe`h_(Nu7p=`lcP>7P|Sfpk5T+f5}saE%s!hXLU+ta-Q*33r(AWuz}ba0T%v z8xY$Kj_)fFn5Z7yeuy;jbR^HnCSCql>l+9^ahA3hj?1_vydOTc-%PmZfOSt73!f?U zRc)@D6f^j+!%*NXY<=V}oo+n~UN5kyeGln4epW9Zq@*1D@MJqDJmd1mPx$R&uD~z| zITcTR<7c=p{xtOq(~)#G|1oS8kT(V5ZzTTV0_Q@F8`0;dk;mxASVoa(ow|qpKALB7C*v(H27q&jwtRhg9C&l^|)r}ImrR~o1LRtKZ}u5thY>pmr!3p4w$)=krpQG zUPF3g7f_!7NsFq*pPQ(Jv+P|)99%3q#Hd$+vo0CeYMbGUCE4<+g*xq8H#WKYM=oIx z>512z`~xRn%JKV1XnXh|PR72BvUSoUWY-4Y=WZUo3ST0z$7e`*fXs-d4AkpI&~l zCQn=qRGVzy!k;+lvY&J+&|7$Ydfyn{d4x8+Ep7{U8#mubcDM0Fx_&Imx(_4Fec;IXGRV5)!$t37+4cRB@XiT+8{vXx17KHrpxo(X z0}ws5M=6cu^LLj*zw98in4P8G9d(rYE%SuFhGtofQPplBx2W$--%|tDQRCSCYxhBa z+DlA6mWo0z&&|{3Yq|$B(p%uOzBzu_5rrH7OAy#7AL~-g7SHezzEu+WDC(AY9M)CP zT8Ft?hyz9t)|e`1sE65LRVE`22GZ*EGd=RG1KGIc+b~^|NBFv!lV8WZBXZf6U)o4L zC2-g<34b*13#93A;kO*DaWMeW4M07GT#6?Ok0o-GJT3KnjAhC*(hj|I)5bYL$9GXk zI-hzkX_z;DAY|>dM$wNGdhw0Jd^GV_(Jyf=tc^XVP+tg7jI;kn4ea?<+PtY#~x-lROp#54CuvwQH?zdbHqnjwwM zTSgjj3sAiQc^UpX#~f0_MltfxazV;ZCi*JWBNG^+3Y|37t_NY-V43=_P~#E-_xg5W zyMEV^X^w=?Q0D}=(HQlF-o&gs-?>e@O}NmZKl)veP-t!80h+kCk^e56O8v60*fPUb zeRAwP{`CDM^gKMQb6X1hy-@fd;*|a}#ftQzfh_9i>cvq&K244M(W^Grl)u!=8o@;` zO!RY!Q+S)zmM~_)GcNKaU}hB_%G(2kdz>@?_qiO2X)OcDk3N)WPGr*Ivdu<4nR+Sg z9(sfOu1U3Cv*S5;tYbPFM9+e>1QR_O;wIF%NfK#3(kb=iiZAi(7>TqN@3S-( z!-o&R&AMZNYJ=We+t&cXcRpPAG+%8yiivt!H9Z{W>ep8(zko0hNOK}_F}9p#CljCK zlDB})p)PVs>M+rRU^Jidl^RdM^O`G7TF4#t?x6nVQ-w4XBW&=2bCzo* zUKIU6sU)?CQ?F2d34LVJd7OG){8Q5{7;G>DR@B`E9U{vaVK9cem$H>RTLNK*6ga*s zAJ?sk=#Q!Q@RjH-F^79oODYsS9eMlOHk|s73JNVGRr?~gpC$1MU50^}R3zA+uraZDz7e>k~_FpOFaN zKJw;##oFi`I{)c{4gL#dg}OdCb;%*A$)-?fFl`#H7_c9&-w%SyL$**8mdK3UKJe=f zK0tGVMe<`@RMuO~)3fLDz4JUlTOTWRqqzPAe|A5qfn0Re5e;7?;(<#|<$6F2GRrz2sCVz|{M%c$;axAoycD+4^b;&Mz?FCKgWW z#us0Cr0F-9y=xAh>0~bFTWq!y@qS(z$s@)Lfd}6tSa$Ic^D27^rbb`DVfJ)96CA{* zn%kh3;}bY!uP;wnTZ+q`MC4-Iq4M5y8E`vbGcdO;+@{|a?D_aE=2sZu?cNLFw2vdL ziT(p&nAv({__#}Pki--8~t|Gr7R0<)*I!yS)ip#P8cd{J3C zH%<3vt%fz>t)l#4`kvnK@WFK@yJiUYzTNWdbBDnwuATBkt!h0_De{SwBJHLyb2o&k z{a2vhjopwmvpv%(?+jg{OW?ZiVSL=^K7ZM&jk@}81bhiRK)K#l|ZZO zd3XCPnkYwLxOD{8;T04=TPQV>KXo<)i)Gquz}W}6^LFTfQ^9iacl_Nu6c@}m$&<7S z_`x%;_>jRZdHkI~JU`G}wOF!o$f&+5 za$USv%3Yr`rh1iJLf~2H74-)P%vbA(%vpD5xN97QRi=&3|W0zLpQx5 zG)kDJnx@wQ%edLN`mX^7tTvOf4;!f?T6Klk#tQ_tuywzjseZr1u>D6dw2Q6^+iyy( z_T{l%71gxf<|1o1#0mA&t#Mz&WT1cJ`InEOuh}P>x}$gi%{|jQe1nah-J5GV zQLlGc9xiY1iF6&1=GaMf`@0;Z=|woVz8S`id&CK^q)S5@C{>NR$7W4!4V4!8 z!sremut|G>EO5kg#!J{VG6P#`wdE_mr$akCZPZRpWOds6b03>(5STb<{65N&FGyqI zG)Gd>K5r=dFcA-?r{J=SUD2;~Q_1tbb>n<(1rDqA-(q;mM#gHhm@ZXXZ)Vcs3DR~DeW>OVdtv555ceGobBYqo?_ zEV$p&L43JEE4g|}8%}YOt36I2)e=5-zlsYZRVn@s^*^T+a)-rwkhw3LU4Ch)68|Za zI*vt}(_G`5hH`53O2WG;T1zzs`ktK4C@0ij*W$qN{xbe~h8Jv_^ojSo)eH2!^A!E2 z;Xs_jL=NuXsjJepuz7EDqzY8^;=TGdU6)kM3tmxcbZc?nA!ObYUi)rIn^Vy-s}N{#{wUv zcb3cG_W&2wX>tIHYsq@-r>UW51#eY#VrQZY1TW(7%3V0^;so3@(u$?ENZ^DWc;TA9 zYQ6CR54*Be3b3-0XYYE+w+{2>7yPPVcx7AsW*Cam>1DX;+n>@n@dKE6*wVQX|A6L4B_2YzxB%jstx}!#b>uL!0nldJ zan!%mMr|=Ej+f5bOnPJu&Ux2^zYMwbOxt12eY+Dlb;C>6bX2XOHd9GMpx`5AeX`Jog5RL&tRFo4(MVhuZdehB8Z+5p)05^>Wzm}%StPEJeaoyKisR~F2bI&ND8&-U-3 zdpcfJ@0%fHr6;3$moLOkMAGNH+w?y?+3uJ|(rYP?K5HRA>|BVQ#%Zg|o*39sZsMo4 zg_3vk3QqVh2QFHZtF2W+_h)9*=FIF5b+PT?y=+1w8?>u*g{d3n2#qA&pM8gNwV~`2 z(t-sw*aX!c)^whT7TdTX5N~T@;po@V==f4w_J6Iij{OiwYoor-Y$mv58%tr1Nzq6= zFFXPeZp)^oyO6XIa~l=GlfDE>G-l<6`F$Q**aNf2|5B=R!m$3#?nrqA^5u<4T!`KD zhGD<9`}qC(Yhftu(|YvS6tCe= z@5+AnZV2?bc*wOFsm73WJu~QfoYupfg`+4^2;P)ZN@9m2qGX+9UYH@brk{M_J$ zslnN`*q{@2f=B#nfj9FLpnp9(y5>M!m)wHIm>97M3nI|%&^hKE^p}x-7d%S!Zp4VM zYB8IXg%diZni(_i>6{bX$D1)(0nN+A+c#Na^>C zkKbX(e~flR$`gL|*-Vw{op3Dy^X4Z|Ef=D{ODn;VoIDBX$6!u5jiiJ5S}!wj`;f)f zJU@gkMeb;J>j^l1qrIhc>Z(E)+)o=iVAFvaAasxpt3%#t7hz`)bUK*@#m(z6;UUX9PxBp^Joky{|b4_7m_#h+?h5|b# z^qmak&*gS^kHGA|4iag9M!pssv|I$vjpmS^8pkL`#GhHU+K{v&5)Ns|w_{h|*;uIi zop(Iw0rQuB#eZYpFrocPM}Ym$Ch~&ZIk4h;ZwSn5L0ZoPg&r9;F$V?T5tm}5|7a%i zb=`?uDOc+(B_n>JA|bhDRVzq$nGzZ(j@ zf+Mc?<$`-iuVKQZGo180EHe*;iIR-OKN`~T66rFja9*Zx(HO#O3XqmJMf0p@}*HI4$ zy-5CQv_d*u^?Uk|Sp=MC0yl`;z`SBF5=YpLOX~<_VH?0^8^Y^7<8g1F5h`J*s3}fb zLi8HA`ZXzc-CG@1Xmav|!ox{~5rXfLIGOeW*n;||CD7}_2Oc|Bm-bY&J~Q=tfbgj@ zU8hR?A#lAGYiq*VAaNQTIkibC!T;;|co*Q(ycg z^=MkkLWh#iWE3+-9Lxw);LGQ;IR5Q_q~#ajQd&nud{24tfl&_!DwfAW`qmi1IfM-k zf>V`6uW60Iq)&`EO%nCL`pZjBJsuRkvC{YkBb;W0Q-Xg4=Hkqd3j$vhb_BqtpPp3M z;W1E7;$b>xL+~^CBi^gYbTqGe!ahAPMaOUFDR-74%?IiEg4|EE=D)k&EKax}8PVB*q$4=_8gA-M=R?}IQAHjJokyC!mha{lL}0ILCfv8V6(`+O ztAoh%Lh6gA!iTCYn>_&03n4$ipGeQq%(U@-f8!rYNpQTq|L) zR59;8Qm(LxpNkagjnwK&@*M1Thn@mAk*)#%81Dtb3fxdP1-s=_Z4US?d?(L79K)!m z#SMElk<*tQV-!b;G%EaXh+Kx~5g%}xTvs)n{DOAS*_BgO zU<>q8pJh&k(IG!M>2mbObP#b8{RYx~xUc0*!krAH9A-=7+sP$8?r1*I-Vgi7#K6Y> zO$o1j*^;UA!TxaaSFxzvKu+8%e6gm@xrW3SN4a5eo-)6YmP~cTolf>; zo$FP?yW94{C*i@S^*Ct(>L-v^4EoE5_UO&V%ONN*;n2Z!un*rF2%Z!;$(q>5G4i%j z^_)cnI05g%Myf?u3y^db_&jt*>ItE#k1IU4uCEdYfY!U7q#Ijc2iJq-^L3Ck z6MhNoEMwyaBLB&6)BPvHyGUBoA-w0t_T6fGioy?zUISqWrWSXT7q6ygxt<}~{|u)d z+Ez|^${MV3A+5Ajp`XdmLr;O0<1`e#A-7Qml#6SDVhGo*ZF$0cKP=2B;Pf5%dQ}?B zxcrEx2KQr663?RF)5+}G#Hmd5(WuV>!fzP{4}>d5rvx{N{FI1Ov8vG#%7uaQR{i!8 zc?f*c;{c<+9X;Q23WjA?BK5?O{E6s0@FQ)1G3q&qo;~%Jn#j~Y$`w6A${)0PNqdIY z@yDPBx^xzk1CaLQ)C*9>`Ummnf7V6(fA2?#pZ(vSUEQwsQ~r1VEaN&gNvgGuer@skc!{Q32_{PGbKS&UKD-pOO0 zeucP{udt@^PrhzO2mBF1XO)%3K=O}7rC#4F5Wal^xHuKya=u!6TX_{um$<`n{*$FA zwnpzlZTU@?K5Ewqv&7icXE$8=xHspZ`(@e-_VHPE-|HHn}YwL|Ga zH*CGGhdTMCj=U%N4!&65Lb{pU7hQKYfR!!-Sn2A)CaybqprH;HD^z{mnypt()mfsAtm%pd%4Ao zm%Od-3;cUN5?eT5R*DYWKyWO8xE2~C#4c&aUc|fN^>fB&*WO%(YDZf*w6~3Fxm+KE z-uf^RSIS}NP#T7>99zgaCv2hLxea__AA5KZ7(+RJ68{zUQcZ655Oa$OHXfQ;wgXx5 zA{&+FPsv}=5|4FXhBL;6D53^Le6ru~#P#dTc{v*4i=BN``VKbk>H$1sx&+2&be5OX zIoQQ+aX71WGBoPl6Z-cxr!#T__=-R;)En6p#uqy%m;Nrwbu^fcpC($#5fR_`n=YAX zU1+XEKT|NAp0)ejA%j8rbS`oT&J5;ro@&b@ZXD!^kK0Ng(kn4#Pmbb|?m+u+^{=gI znkx*|251vEn`i92CB}l=+qYNy+cW~oTlS}-8QSE};4`*8R4mfA!R@)%MBMQDgVBhx zJ1EhgV9jlrYV0eDalTl;17^8R28s#z-kM0>{0W;RJyM#HKBk=ILr(wYv&a0x4f^%u z^*dKV*|!q{cQDRj*c zB37cZ${n9)mvA{`3vc17$!pMDYY3QST!e|x83`+J!YW<4`OpZoKHrG)`J^V`x~Uv}u^lTa z_`q(@%fhT~30T!J8mFKC1r7ge$^)+fd(*iDUr&C^uPlzkW`PFiV>y8TTsi{Hil6h8 zpf<3;+6+3BSnwem9NGL$lVDrXJv`QU9FAJI87aS{mupL)3P$4boCG>|cQxzdbrboRocMk?VR=sXz4CQdkqV|DUj>#KWUtbZ8@i?I0+OVmBEoyGU}$4OcT zkT8_$v6glY5GJQGYHi=FMliz=o+m@b=u%we$GN zb0kE)sKcBC8lsCuL-nK2RV6084)!fL4_zlb&!xB_;Q+kP%;2Ne9%P@{e{59gAAT;f zCDZEHOpc;wL-pns0bxFh`F}W@_MxgaW9ete<7RdCOu(DTu96BTv2vY?1 z!O$56N}WZ=q15;~7B3tlQ||GeHK^Jl;iNxE1Ivq(}B(0>I<7zeP@MBKeCu33jVH%q+Gek9|oLI23bzW z$uC0~aU*}UVFAv4^Z+fF9Rc?S4P@d7aH?7l-HWy%)fN!O{bJj>QOq`)q4^T_pHmqC$tC3cOdSQ35S(m?(<;Pm%TV+$w1NohjD8? zA6(#S4|7wrS?2WCgvWtW{a-ziJ{x+Cy#PzUtrodUpPL9RdKlnPXIgg`kfg2CB)&p_-?Sts&^`cdQFF2{+K4n79RFB_a}Vw3f-81aFK3H?oDnsr zNDuYY2|fq#)s&JP;%=&^ra)Yw#)SJL>0VjHf@%+jIv;}Q8V@mFNVvr5y7|^CoSuXA z0Qy`ewDGp%T%q?eWAm4c>IM3=dIrANua z`}8>BX=pC-FM6B&Vrz0c$O0=@-`R_{qs>*{U3)oU91;#nB`r-zOAi!y4RcyP6u*xi zSEc~v3f>u!rNo@Si|d{1K^NHqi1W~@wKJY?*HRv{W-1V$FsIwoFtToYMseWczm#jd z^}1A+-S9Ldv{b;T@i7s5R^NCykj8|tl}R`%Zjf@JZ8k)(_VQ-kK+?V|2!HxB(nKOR z(KY@cdKc_R(puQXbBNFitm&QzQM;UQMm2p)&rMnxBh@bm9Vc+=LjT2ZujhNrZg7h< z{}gzBDpNCKY$wtZ<^1+YV^JIY>`OO!k~9T{A6T2H3XQ?dUu{Eu!=7;J-6^Co3N6dx z7u`j|cRK6M8mShQ%1&>Y&@O9~dPtZ_pWTO_F11BFp1AC=_@IfI1_u#4?_rb4wJ#2tnp zV|}@62Ma8eTCy!y>%y&&Ec|})4Hq74N5x$%D>D*45(Mv(uH~d{xZcR^*g5JsI^GLa z38xkAWs4Taj$nQ3xr+TBKepd0m#@9~10KyBg{0vns!yytrk70I0FQ>opyS;`nD*%w zEDPQTG%xDEg5i+D#|@xZG2OF8xuipxNAv-7nLdrtSb+Ev>iob-!$lv>Wqi(E))6;wMJjTi7Os) zlaH4KXYx>O6mf=;4cF88hz-!wvoouF7mcJ>RpKLgXX`ppci|Q0|4~aN?~QF8!||Zn z285PvwKG-tWpM8_2={JW&V^?E<^F~(ZvKFKTzOQBX@plm9s(&&RGV6yY9F^0C&97C zxAAnzO%z^6R;OeCQRg}32P6Ngj66m6c^$Om@_W`OKAY;NcKojKT}V%9RPJ?(@ZJoF zm&ji>V5IvwBHu#@Yxri#pQM=If~><@K}=^4e3i|#hf!@UbxVl zl=ocVb66x(eVPR$%_&lyz|8*1>}GNn5>K9KJ17-q+Udwv|IAe}*QE7<^d{_nct_KY z_EcW--HVY|hS9$H#M^b$D%lG4M)bsjWj9zO?^m2O4fe7-0zx<5GwdsSnI1smlG^&G z-Uy3eNy`2a2&3CaTO^YpM7pU_^6cnC$kQ|`$IXn-3h?-98od=YUS1Jb)tlyaT= zCb6W;`f|}Tpc;eKpJk99q$s32q5N*C;AVxi42yS@@Y(4dN;fu(^?h7Xt0~CK!m-b- zai(1e-Wfice84QBk6_i;Am-jFKbQJMgd;0RcP8P1?>VFi^u+ga(rcnWLEewnK=<57 zoOuYwXKum~dQT+6Zke6!hS3*yQMjwwL=MM!wlBWVJH_G&C9ei&1W~bX7qszJw>`+DOlzPzxo#=X*8Dj zco#oVo``~9;%(bW;+iO5kT74Oz6FTfrQb!;i|kxvJj+Npjlw@1o|A~tZB|mP_f&*d ztw}i!vD*}B{wOotRsBWiP)3*{G@9gY{}@Sc!>H1A=v4KW{Rl6{l%PbQaUyXR40oh6 zf$N{bA+ti6yVp8pRSz%Xm1gS3% zJ{4QZM@i`Y`vFwR8H}_v5+2lQ4U=2pjDDAU(qV=Btu%PLl)mfqdLXUEwHD09dxlxW zf2nj{)qkjab{PL`dX+Wq`AS3l!AZZMv;QH5^e`vg!0vrF68$Ai__K@h)kvP2+6h{o zN@di~QiuyB!dOl|7N}RK4w=;uKCEu8QlEf)*<6+I3ns3Z!hS3_R9{VTrT$4Fd+p$= z5YB_9Iz?!8(wa+=v=znmG#;IGn>+6+5PFt>yPOW>>yY|_V6$~AbD2)}J2rNMhyJA! z>01!E=wPVFy9}V`GZ)KV&*Jlh7Mw6y67hI`yDlg{+ES!V**odcmDlG$#S8Ho~b%fRU?oA~KUS#(N3*X5*PqYvi z#t7Fqd2K$t+qBwzUu5+Fi0gs8JPz!=1*oqn{i%oq(r&rbABFz&EBOB5U-*RMQ-n7L z!tq?;$uig7A`kQ)KBWH#bDo$%H`5`txk^3>{06Jsc7YR+?~}$1+Dv}8DN>9e0s?^Q zgY+_!ub-oJ7UUs4($5JGPQA-?PW=vfe~Y$)54gK-7s01Uu|t{%!Z-;je*8k6mMC7| zQ;TJ)br3$HW@|J2u{=unbMW0A1m7OjfNNTgJpL3-+>2=d?}wEHjo9rE5q2s z9-Mlee9Yv0@GdX|(U%gOzHekMYJP12>LoG37oyilxi0%O?Z{UdoZ+Ma!Cy}m9*E9P zjl}QUqZH?yX{a%B;>1b9W8j|oyJ!ujm0Ykqo0I+&y%7|j8@E`&HMdpj9~Hr?seSNb zjg2bSK8Qd6|9cVtvqzIyBcV&L2J~u3FFkrSqL)6s8q=!@y$tBpoL>LiKR#7=xoOZh z81{B7F8kXT7ujyX3ADf2oRApYb>k9l3QWh|DRHsDt#!(MtE>3uY$mU?cIC}l*n`QOD?I$#MP_ZhbY6?*sZ=-22-V`Hk^(PGSo= zI{Ghb>=(qxEE)y%Zv^6akJel*Xb5IEn!?9Q3*5VN8P;{H28SE%`5^WL)!i#J4P$a( z-@Cd{ez6h0J9hv+XO6|w0}a$Q1{*QE{Rhp={=Ud#Blz^`LHO*+Ii+%kh3wv|1+zFa z0)oHXgGAjPa{QwM=()8TyA~a0YaYeIkAthlT*J_5pP|6W7+)-ohiNtqW#%6R#>H(g zv+H;~nQbW-?`W$=)U~DaPO30|-6d}2a1q4kgf@D29@Bm}_^-EsK(@-|eX z+KBlP*8#K5)42F|moW$V{jb_EbLV*3>(OQi2ydj?^a`Ov`}Se-X;bF>!wRCdk+w9i zr*^M9#aJHxu6r1{+`FmehXo_U-!W(&F=A|8zdPdd^s;>t?^h3z&C@ z^QatrK+ovw$)j)4J$q1=;hKImQHPo;dE}C*YGo-2@a7)v>b)^5+b3@#sO_D z4ACcIE&rPK1V(8$ktwG9%C(t%MoA8A%&&ycgH7ebEvu#519$TSA(^=KUT^$mbsu^^ zdxYI)O@Q)!-?8Sj3vZNf4G*Hav+8m5f!#$kXwt&vdN2 zI$Y{|Z8w~6W=78+)t8ML%mlyL8==bNAY63$hPKtom`wed(o4Vb&>$^!@bAv-JMG1j z6cxt1{*Hx|CkHj3+~0|@!8*rT*mqAlpHWn(D0V3jKQI979fztW)iFv;-YP7*@3@7f;yY)oW_eO8(= z`d^r>mp@NkwvflZUy6IC?_oVUbVsT=T)X-Ldr<_H>I-+&`pmg4h~KUkQyg?cGvG6wx9h9fDZNck;| z?`kRXRi@gI=C`B`mSQ8Yu~LLoGq`(pG?d5g=HWA=8096lo-&j6JdWT&7pCAJOK;q> z;3w|Cut3ZmtG+>JC%D`2*`96qoYVEdHRT}h9_c2Dyc5e=b)T)j8BNJVUv4m(G03VJCwJ$ZXz@749-eA@+ z$cWp3VYisvxnvh}OS}xXnkVDnotHq3O62wT?VwmLg;p~>aL$)uxN>tWezz+CSl1rA z{V)T+eZ^eS&1GjSub_?QaW3UG5+ASuC#%?2X&ckksZe|(eshCYgZY$$*6?D01&{Kw zz$JALi<)DzVk+RT=TrW&;ZU~E{4!3_HNe&Q4Pq-=iWhq1-RBNBlK83NuVF-LE2O!= zC<~B72h>qB)E!dns1inV4wdKQ2@6ZW`0fgpy6im=UIFDYSUE9F^=_g11e>vlx~JH1 zgKk%z}1mAy10^3XdRKh-NZPr$% zJO_s*gRoWCrm&>0IV-=ol`pxb;euDYk87lke*9B$rDrDxS?R)hT5~928MD6=RXY}q zdpxu9>CPgnud?)B1Gv3mIqtE_WU03VD7ukU#D^?i@0Qa`Kt9 z^uKvvW`BWc9o)?JOgB~Q+vjk>4|6(e09p@{`*%&kPW#L->t$=y*1H4skG+Q>`L1%n z&RMw>Tj|>7fi#Dkm`h-D)!y+EH%yKZ`6SKFGEjp&yJJY!ZWP?M+op>=v;Qw;Y}`0_ zHNP{A(O-^#+qG89R;*)Gdm!>)q>GBF@qyU8#Z@-eD;$={A6b5gy@(f{`4ptMPYnd( zR1kPXe24B+wPcfOBY8yXPM{b`$*;rUQFu0U@3n&;3{=^TYzf`=$gu0iPMH*))v{=+vr8(e}k91W> z`ld3ohi`JM^=E^-W+CSNc+Epy%J99{SbkJWfCA@WB<#ji+pb8O1BgSJ$j2ioqm=w{ zW0Y%^ZDqmh8P)f5#aI#`jNKTz5h<@>pScTPcyFJmA)GK}gU0maGcZm}L%MFkXDob2 z9UT8nWzpX}C4pI#n~(#EK$?r6T6YH)-s`B6o?^slYHRl`@U~N3Ed8(@O1zxKd|}U5 zZmQ{vF??Z477SWYVz)T?ER@%~$y~DNjQ6+=?0W8T5LmRp=R8&zb;iM#&DHm`hFL;S zs2R2cR+R?-~`ipItmUo+rds$hQn=xbP;>(W**7! zLU($`{0&4``h)k!JE*gJFAPr)BRm^{Cn}rJe60t6wobj|^AR6=y2E8{XKX)yEquxz zst!EZS>zz-bv6LfZxZD_uXx@My8bqX_i2hX}M4E3^%-0kB8Z|ROyApb;#>E z35^T%|7F{L^9l0}&^cx-6eYjGan*gG?be4t+<>H0C7}l>4}sS^&o12?j-P_diF0b$ zs!@50(5Qq}%%jQ$t7Cg0)h~2jkOv>s3{Lq6#2L_BcH)F1eBP5#l#N}5CWpI?m|PU! z6a~jSo9{u=N}Rq!YV21J3$rxh`q|FaV}bMw{Z0u?m{Nm&^|~umn_Td7YqMR9@`S|% zXXK{5^@Et~Bt{ww$PY+@ABKJbT>9S|E^4!8ZD8%s$6D-#EVCI%T0#;yCa~G&ZyeTt z=mqK96F`4VL%H&M2!3Df$J2H+kv=!?!-IZ|K#}j1r?~}zSI}h6aJ-(|i*??89OguR z$nE8lE_8aXMc5fD)KIl%*EH)WwMB05B z2p-AU)>Zw!@F0-Z!SjB1aqOH|ynpmfs>2)PmyVG3sAh?Ftz>_zWV~WM5GjYCg}w(o z88;QW#azI$6@O6ZDp6;o`&IJ`HiCz6t?_^I&LJ9L0#g=g8Y=sTIzKsMd^z4J8*?yBrTGO#3MB%x*A>3F-H@O6iBD z(*2r8_@7@-xpf4cJL{8(gm*$;v*p!iMb2=cJ%tBs>rdZp{GXU}c=2E)vtD#gc(R<# zu6Jl3wm)qDg?HRCv=VoJDxA>3gjmw2aGz@E!il)HsjPe1OnaJLy?N zx-MzW@DcPJek>PWN#ujzXS(iOiX)Q#QO8{Cp&I=SWyEJN)}jN}yzUQmUO2*{W!E|3 zGW1H_N|=6@bcnIaV+~~T0pRROYlioI=K>QKG;AvGJ*%s>ju-=}w_FC0CIf*Pl=~ob z8TlT0TD>;36!HRp;akNgmA8PuuA6b1O(@JuZv#!AU1LkEje)cqjVA_^q8baYqSE`> zwQnn-t$#3z_b>2qLa(&ea!lS2h5Us|cqig16EASm{KPf=@RHAB?)By=^tlzusGgy@ zgGP)AmT%Jo!r0u`m-R^_WFzq{M5;RKpN>t`Cm|zQ_F$nCyQso!Ie~BDrtT4qxx!P-D#*@(u7A|no9m6A70(_1k%ndxbsDBx|;SkG;6D>R`&AD z$p%1vgb~N9q?eH5jTvz@#2=IS{w)Vd4q zk<3HLl#q}Rl8BU&xw$Aq)L!S7DI{c!OcgTEw|RWebsfj^-|zRne|$TR=Xmb>?z(ns zuXUcEY45#uhSgHi7kNDIT?E#NJqJ5euL_N?hO{`(4HI(|vBt#l*ze3$+AH2%tC<9E zhK4Cf8Gtg|cnf%6%LdE9B5vfo8rx^rRVgQc=mS?s*UZ2>a~iXU_TeZz|NQnXnO{)%2v!9j|X0uVuVYsSYxgv)l#VznL4qZ`qkYN;_W7I{hT?_<#KCrOf9dHw?WZ| zr0L1m=5g}nU_$Q*?|lC&GwS>RUevyYV_Foj$fp~b;0f|xSgL!2(YV!>Hzd%jF5^P4 z6XtWu8CXovEaA1#u1OccWqe`6WR1X@MjCIN=1?ebUZFZ5?Y#mun*(sdjU1r*<{91V zsTmJkCF*;ioP+Zl3lY`Is1K~NGi7W!)3CTB!K4jz# zwYo&$ncHZ(2iLQl5f88nrtLxWtjIK|R**CmEckO21Rezr@KQtax4?+YZo;!*YV(QE zI`#(nhGX!(3+bm7C0f3Fyf~aqe?JPaOJ{63uM0RODlBHQ1^=|jR2`kKM|FD}Nn;8w z;38XF6|)g9Ca)2hog!+d^35(qXpXCG?sA$-Bt1`>B7wNQR9gp>6)JR05W1K2In}Qo z{B)$}EGf$&UO=N6bRX%0IYL)yJ-o;QDaYf7POs;695&;`U6(}dBk_pze$YA~ohrf0 z5!k(^633?=5!ws8FUexXz0AOE(p2G#wK|@3DI*QWNvDv1AH)rIHCJ~ud85rxI^Pcg zt8K;f9{O?~WcGzqow4?zyR`mHc;n~kuW{i;Z?R75`V|b^wvB+xamh&e5EGhGNgYR9 z-^~)AV?&H11iqpR-G4#a6UhUJy1|2!n{(3p;8AX&Hr%rhu58IrDF49c$rd=X%9t-N zKO}7{v_Q)3_{F=WC~`5x4D`=TgT4N3amnFi>^$>!rBrD7!ezlJFrM zg{STkcm*ii<-h%Xl&I%nG;dSMH?LzNPa;f*4Fh~BC#{7-!;{{ooXG%=oLdSRrp;x_ zX*v075c*f=*lk9&PMXt5raHv;O|1#DY?%9(4qAQWyKyP3U%>^gD?%3wjX`}S>!ljW zl-u$P-EHL0J+dih@_``@?-x@4;w-C&*xO(Q(wYkGN;&%?6h7b3?HbbhGG=!iyBN(E z*a%xLkKmM%!2yTcBEy5F_365ybs8=j?To$sS3$$9P{?nv9Yqd!`_U=pe5Qv?`i6R` zJEQMY{&wv{^NqRSVL7j_aJ`6 zwTS=yv)G?WJRc#hLx^h?;=08D*R=>s`pp0J=VU-UDl=Qi-C8^G|9m?0?CGv}a8fp# zm{HCLZhwqAjmwpy?hi3yNnLsGS|4m_;-s$LmgC*q+=F*nIUQf4Som zmTq*0CR>hzW$j#6eNmRpi`QV!y<7R{0uwfzePstHwZ%FGlfhT-ENp1$$Y)=Rghq=T zFsV;A&ir?fum8J&Z*P-~7O$E~t-Q;@y)ch|>ky14Q})5A!M4C3{*cmI8S?W*x8c{8 zJ6NM{KzV)+W*@ALoi6rLCdkv#tGpwgJTx4(q}a*>`=)WvozdvsDF$}4X7b>T<=AXo zBo?Ssuv6oeFfTlfwfoNyzVc)kym1tU%#V|z|4fB?0Zl>w7&k$ zI>MJlAHaiszq5wJjA?%}l~-R>;m<~IICfzk=lx&8&cMFd`&29S@5GMmQ%5V+pl}^L z-c9$H8&Af9AZND9O$J{KlCRu;1P-n}c}egxu6MQp{>q7x{8~NYktW}=KJKwkbS8ZT@=2A$tNWM!7^C+dc6tz)S6iw)=3zAS_Xy^2`H2}z8V)`adErSG(wV~hpP zBe{Hwyw=-e(#(JDpw7&6*c~-ivmjs&UjEv+=s@Z#e#pI!Of{oEi`&BQ+dA;#bN7{V zdrrZAx;9!;83>gb&hRhN3miv1fW`40u=3j!$}y<8OP8(!DLK>af8paN@G_hf!(*Cp2iMaq6&pkL@6)*T-3WQ*#(zBTVjOzS>WN2R zwB~`gqmj-Dbo{gUwX_p>cTlM^xu}-x>-|ruy`+%6&fUY0Ejh+^3_MKpxd_`lxF^2L z-qU_!j;Fn-Uk6ggehQmZzQ>+xD%r1J=Rr53m>V6_#WDRG!{v$Vpz6SW&>NFNeXtgv z4?!?3+X7qaZ(<$f?rQF*e17yrCQPn(3LF>IXEmnp__*#HVNlLyPW=Io-;M#o5JvT& zF0HD8gy7-ud)o2B0rndS7h2(seTOkjXIY`n#YE_Td?!y?R>JxxQ63z%1`HpyPzT!3 zdoBIL*pcND-U*qly4Ys&M&~BroC)TbBby-|L;dO$4OGYIyfp=^bXp_TK9p|EK-UB5 zSpViHsb>FQHW;Y3M$Ci0bsk}#wv7>*#6kU=3vk7>DfnsSEqF823p+);gO7e1@V`pe zpHe+T+%B>ei#pl?X}s(}Y9NNR z?ZG^xOddzq;p8kkvBt{TnmD%dZ6xMDX~}MdFF~=MR39?c3gN~cx|Z5RjqQ>_YcLn@ zR*!`zXWjU+Z(EV-6(-zR40o&Vqy6Y`p!JYzcdCJ9(Jk?m-xefXV5xhjVA-`XNaKf~ zCMxSyd=N{UYhd-8Nzj}PmVerWqxv}wbUPcPh0kV$nb+Xil83xshoyYzY) zs2<^=)owH#U?D$_mB~kthCrh_U9n#k-J_ReuEvd2*-cku_jTth)Hl5D!xa#o zeuPQQnjmq6tl9Am2+s%yGjP=7gYc~NKKgz>`&pii=j!C4ZPNlypXEE+|K(?VqWGn0 zru=j0RoY+E9#0G~m49DPg#~7ZA>~;CEV%T5H*MY>uA8T$I6nW812gVz<%B7Uzz2u( z-im3N8S&{W!9!p)oP2D$u}nP)zj`*sHy&ARpw10wX4y%s3&wBX$Pygmv1#*@pbHf0T5Ct8B~o5+$$k zbu^X2(X*p1Pt>PpKmv;BeIXCfXvbB}ChJ6Q=C>W{ea_arY55HdjLzYe+vB-rbA8od zh?VA7P93$*UpMw*a~;|P@v=l?RQH{4iA$@da+9O}g_Bm~LwXBc-g?k5dWLj6jBPPS z5?mq9m1>UP33R{v5IByXOmlqWdKyVGLm ze(&uN)4hwlr?fTnTXcmHE<;{pZ!kKx3c8mS3JgH!>IvX-XC$MwMyd^Z-%bMf+uo3N z40Od84o!i_sCakl9u}1l2Eo-ESP!%Ha9T5(zt|iFy^7nZ`X^11dPt??D+WV$2wlNx zZn5#a-Dnq?j-TsSVyd?@5T0mA3#m>CS-9xce70D34`EbOnQ8!5Ml^;=FCFB>{N{Yz z_ijjaB=ilBKGYhA?XN}JL|^C&!lev+nrI0PYZ4)UR~h|oHIN2C`n$~a(_}asnhFVy zmT)ETJrqQG;6u?xj7F=(}+EZLT|Eh7t@tevn3OevqPNH1+{uPa>?$~$IV5+4F2OqLB=>4g`ijn#p2hIl3?teZ$+Fk3J^VR zIAAOBLLX5R;99S-Jfn4)=s$^k1ep0X0>XBU@0=rCa1-$bIz4-)tnTE-$#+2b@>m?s zLa_AS6oqh6p}wKdyQrj#l*UDV5IWsR&FGhm-j-wetx=;{n9U)&S2vU0N^Z+m?RQ}x z_nOK*20~jPt)Wbs z0Y0DFPrYxg3294xFghF`N4&)1s(LE%K92vdqspR;W%5Y!^tlHJcLEUYE^^XtI3#aA zW;~$%dPZlm@5kd|XVWM~^#`OMX^gAM$I-RRiUdsSIG1(2(jUhhjRevUK=Y5apEzT; zlietnIggt)hZt!%PCNiKc9rm%ziQS93F`%?Fq$t|`}_eDn)mjCC=4jstkrvjhot=s zRl-Zjq<;_&`c=vYUvGotT`*E*uon<_Xhzx|&ZL}YmEWFX z>lwB1>!ORSpHnOzb(=>0ty@5zNN(}G4jMk%&kCGxaiIhAJ55l-zrI7q*Kg2sC*|8g z-gv&qM`#k|dYC0VJlGz|M{%kHl(Tk2Uv3YMuV>@n>sd%xD+}x*y{y98T0*mT7jv;? z`~WbObYT2{d0K3^IBOfw`Lnc;vlu#kJrEyq;zHuFu0UFq5r4w+@(MJruu=W5Zb#yD z(tJrkdIJuOI)pbDZz2A9!`+@-2f|;T->C|J&H{LU=obw6+6TfWMescn6v6lWb(8w+ z(zH!*;Pfs!jyqoNbBo2Up2E7mpnZ{krgPH8D)|87A%$!G6kznecsyW5ksI!e@BY>B(@e6T+`0#;tXDUOBurk7atr}1Fc zeG;S(+7IMUQScgJ9Dm(AkCUg;{L~rDm(V?sV-o0@h=^0-+*R_yS}ipBz%@>Ki(AjK z;$ohNn;7YRmUlgj>c|GkuS)ME2bJc6w1qAs&Bq0n?ns=2Cu$V}Wf=guDL}mf*C$Ly z`hS&l6>om+ArG3l6#u!^*W$m+>>4B=4Q_Fbkots^PD0y>CMw|(-;wy1*O&$g{mw*w zkyhR5GO&_o9@&U?Jx1V0yJqO>e-nQy z-x+CjMj8MD9|cqI>;uxbeD$h@#2vqsIk|&zdm9@@7_4k)nF^!@NZ;DEIKuq4 z@~PMv_zxG6IpMtrxA}$p>+wq9T%lW#Fdxp(vxX1Wv*5+TJM8?+5O@^5h%n=sMy!$R zrzaBSG?W7`h8~wX^KIvju$q(fE`-I~wSMq??JOl@zRI`MH>EY(j=_G<`3LLQ(wkcn zY~7O}#t!pSdjicbtD^m0z6@)J)JxEZz*SS zdKXXQpSX5MbD8>)5r4s@`~jRelQOc~%qgJ_IvVa3UWGEF(K6{4_VmRK%30cI@kjHh z6p8;;!fTOJD00g%AY3G$93(un(2PL(j>lxSgseOC4m!sf68R=>tjxYn%ZzSGe-6f%-nRf$*YS_*3$XAbMW#JN-RI_zGX^Q~=c}%evzPnelf~ z_=JUB2f-g`1R@8a3=n3OT{z$kJj!dXt4ny6pqqi*FBRx{Gn%~Z<# zw75=QM;h4IiL#+3{GZ!qMtK$sIdu)EbRMhV(YxC7xo?{ym3v=jr(Y$(@9T~zu*2VW zI8ye=X7q_rk{uQ>!WyA9@N7*(>^$ZQw12lsx!U6@W;s3Ky3Nd6JD6~O&JQmL7AlR(D0>3<>Y<%l6Yh|l2U)!=sc%deWq@pbL06!;qh9+gGCz@(BH_AB-kEs3j_{2*)U6?R za$(r|L=oSU`xWlbt|Cu+nf5+y4*xzXlqEHnh3BRmR2{gTo{KiwhiAqw5Sj!%S5|QH z5%lh-Fh+h(^40T{?|nCtJ~f=GR{IZ8$*+mytEAUNUkX3Vg=a2XWg}x*U6H>4=_jQ_ zgI%z_-b@s_@8u3U_N$LT+KIe)8P!oZ3M>q|H3PaWnZ!wZX>}Ii3%nd~oXv4b zoh9-&c+0RU@7|~$jySWUkbJXPD^9s2j=kBGtucy6fkll+zNYNtE~h+NXmFux;cORg z=$-NzDWeb`NUqtxf{UC;a8$0pBiuZHS#Uqqq%JlnnMS!^CZ`;Z5k6@-ocL9~Zr+Oc zW)4#Q@hbPDbRMy!SIj`@-{Ny)v6c5IRXp<`e#Em4|GWMmu0{Osx`enEAwDCnNBqA% z7eT-NKmYverNYZimhx{`Q;dv7^mpHbDl5;g;Df=yGJ*;j$th@>AVHG*KXzJ zT6*t$UMhwb)Iy(XL-uUyWj1EcL>4|f9WBPBqJfJQeA4s6VV0NJitKoN(fcP`5~+)W zjui9ADMUz+Y>aI^hQONTcUaE?1(OcUW#PZ_A^z%8n783H(-b!NeDi}`xX zZ7{vzCUkTkg`NpBSUtKA-%~b+35~Dd?t{tjdPq7?UY3NLj$g-;CVOyr|3U2GhsL<| z{xhUwW9-I$@_$k5F6Q9*_+>2-T=L7vtl^5KU-dKNtvkh9IkMD12czz=6t+WG8y|Tgs4`yoD z<>>5Xd_ZqwxwP>sRyBSks>}CkJePVwV23a8!srx4c=_NW-E4ed^%g=qE`j1tx4A#Our_Q&QPK>Fmj^4k6GU;R7&&&strgv54;#O>Em*@P! zhDXfzRucd9$X)h|%b>lbu3=KA707yc!`u4qaPzAP>RTO!daHBc!?pfU750(2bhRvM zamgCugZIN=AB{TtR6cetwV~@T{qc_HH~9R0E5;angswqbc%Q%fL@hzs%~|MGy9w_7 z!&KKgCJ?jzAOGECp%k$@QwiF(6tf#mVfw}6am0?-*y6=;X`4C#eA|CwjWW*h6;dje z)%nA0&hDe%&ng(|rK@(;RB&~9bNOl2BBYwY#b-Fn=yn_}o_Chpr5R%ChpYU}>~k#Q z-V-Ha<|n>q$yB`m;0T0Dm#(~Q>v|CNJv4O;ZZ+Etu}wyS!7gKn-|7K!vz54iZwCxK5e|bFQXk3Y zcsr5y1MwD!`j;mxn%%GNzO(!Qn@7?Wd- zq5Eb6%`bf2X&@KnwvY*zfbfRaV=>HL5)1pXo?}t-I1JkMgT>cuV?HrQwR5`nw60n{ zWdOu(yuhE%SOpGqOu?h`6dZ6f0j=KA9>HUl@}*OA;rqA*)YHGJ=){|g@8QtYyYM!= zwVGj-NIhX9_p>lmpU%*gZDRsK)WGkdS*$3wwz4E;H@z>SD}FaViw;dRkly1Rj^1x2 ziQaFMSH%3chTyT@PQ3DPhBV5y7yf!wz}(FB@pQEVT=YYkM;ELvHdQ|a+~IzEp1_K1OBfvcpC<9~ zcoaQVM>mFfzCs*dGmv*Y;f5D_eWo>B3JX>mOAjY*z?psP%V!(V^QGte$qT+MN7pHn z;lko<=6u~8Y5c6@w=OKGw*%Zi9A;ZGr@(-A?=_;QS&!@3vwSp5nx4;do0-F#j1}P2 zem9;T9?wr)`-gY@%;D4dE!cRb4-j_q6D>!A_s+eTM%NFMlnwa#?0rTx#yyShu~#L- z_^kLtC>N)Twc}oK&QO%Q9|!E7fN!klVesL)a_-U@ytKVOy3zBARM&Xc^&A^zdj_6% z9w;!A|DJOJ^1~;{L0Q|d>9`k?z-<4mUy-;3#yZgR;f>m{p)+^W-0I8I{QmJ5`L99L z0*x1f^k?B~;QPM3)Ja-y88{Wa&5zh+d z4K{?H1riXRupMbBNcfHGhts`5+v;k0(WiPHG_*d>TClo!*U=BjLX)=HF0Eg;1_-yH z{o1a?X#?qoI{!(Ubq zMekmtT4aZ_Uvaa9hWwLT1n%*>$V&&=qt~@bie_aGg?JLee|42R$JgNJnfhGxEY%#$ z-{r(-88(;8>=UH8qvphs8CYiDm~Hu8K((Dl`-N=*;#cnQyQ$h_p&p-eu_L~3xl1Fb z*O7gE!|?a1rcn5F3OwQyFn4SzHaFe?gac4$oeZ9@0L}7Uv0u>kijSoL02ra}Ox!i!DXM>4L=2M;Z zB<%jEp<_bx<6VI2g6g=MJxO8G2fBy(`uuKCG;RoZU3-k_qm97g`B7}vpeN0vj>^64 zK&%U4Ef7XC!cFRx-YTsbn|-Z}7T8_YbaI=nR8_`J&d?*1#_c z@j0!TtsLQX8>ALpK=dDJ8ZLNgdZ#S3Thj>#hSPf?Uwde^fwf5ql1>5t3+-@U^gtdE zXpeM0V%?eGwzgNtf&K;um1?YDMMwa5n{{94M(8ko2P2G7j##9?F6X|y(AoyhK6;O{ zkCh_LpE9xS5z;l*VDPO9dxnL>1FJc#&Skov@MwRfbp3rWoAj_SG8UlgpWVueIJ!5O z=9~H)+tg|(Fo%6_RRu>s8^}+-(KVVBJEUX7xL$X#-@~E!Z1qcKRJKtS_<~?hPg4O6B81fWw=vu=hPBM!F10b0A@) z8k3WVb7wh#RlqI~J$wCeEBXH56oJ7~Si)f?`0z%YO3%wA*bjmDQMF-5<9H-JfP!mg zPH|xqO}8`gU2hW$xx^i4{c^cpi&?Dw+I+z=EX=8gOd1z9({ox?ZyzgHCeV9Q0~FS5 zf0>5#htxKBEf8+X7kul<@wE=S`F=WqdZYK_(a({ZimW_#^x6c49 z5ziN&VGy2C%{@|XBh@Sl3||pa$wUt(#=2|w(edP=L&G&JE`$fi~qRy~MF0v-E&9Hl4qktp&PXS~{q$wHCG#n7=ra!qrzPSc!bd;S(}N^| zS%gjESV%akw*Q)@l>A<$#h@!I4j|PMBfN!?#{P`P2SU#-nDJhG77E-nZ2D<;IAYRgEqE5Nc;WAi0@G7^X5AeNSk#cUZMNY z?Gp;qp2ed@i6!W)TudB!3;Z*;h;_g^&9agBUL~KUQvYD7YN68H$Y0mYCaqeaSd_#I ztR*frR{d7>#s4fjs-(@7t*bVJRbjA1_`$zUD+9tjjqnJ^ZXOfbRvJ3fPGG1+>&w{T zUc4;N5hp39IQ@N$e7c1P+_&J7K6)}?ExtdJjro&2(J1T$%c{tf9@gs$w1yxwO>5IZ z=+ZkCR&Zp1tJwFPcDUF*lhIkXDA3sgH= z3>vIAwR?O`1NeHU2U2~|_s!Lx|DxE_Gk4+2fH!Q)n=|-#@@7yA?N(UO)kC3_{+)b6kJgra`MB<^H(hhgCgHg18ji|eVPZky1;+mr=k3L z-9F^0UywWmpR&xG3!a#C_aXCKT8isa_le9yteyOOPCwNwJXEWlBCmX9!UK|5=E4t6 zQhK1kRZk;7q<+Jv-qYd4b8Ff4_DDuvn7oR+T+^?Ez;n_LrEs+4X+Gl0ejpFc`gKep zuAM;s&q#JnY$EWN#cW@R#1R-B+E|vG{o}+}f~%x^&t5UgQGouwMtH=hoi9;N;f&-D zfohMmUOZR(zhrxZ4#M-Lu4oltOS*SAj_4joexM4-I{@{I)EVkA@{k(R%a9%E!zjPO zEsvUlcbg7CSPA6M=zeft5FU=SC=eIm-&;E9`^ile9!%(V${z62g@KI5C)F7{6$u;J z1&2E*der_?kT!b}eqx&6N}=x|s=*_E`mCY4$1vF4rE5I+{7VsirS1)Cr`Ugb2RUK3 z7)tviim?mK5PCz@_VcBOfpP#G>{Cc}xskZGA$%SlC_B;qG|unaqif&uOvkag@Y2F} zs;|ZbpzuJ0Gd4*kf5(8xRfu!ASxvr~yX@y?R=S`8vhHuWBEkoVOm0%IeDRCPj<_pP6lmjeRMQ#@`XbpdHxwc9f1d{#&8aw+h^&}H{9n~b=Z;qDp$-_{_6^J5_r2gULK{)ju?p!;Zu$aT!k4^AN3YWqkT49*c z0T_I~4I@5e(@JT7ZdOUWu!8cMZ9x23Pm@4MK#fxrC<1!nK-exe$ zC}p=#(2p^NayzD^41XnJ`NmQLeuvmvbqMFo&BN8KB^v)BjFWj$&!Es zOcc5e37hfS)n0TzoVTcNSW55rWcrQaL+x;(1!cl*NI4bWspZQ@z8+#3~h)fPh zCuy@VG57t7IK$6+}(z1R^UW%~g28K!LA?EoH)H;Ym^C9wKk2S`vCeA)JG$ zo4di9BQX$doW_q9l?e|lyc;o_7#G|9f|}xCZgR=OM&@7uO~J zcP&DE_W$1=P1fSW|Hq$J^EN`?`2V2rh8bE{_CfPr`50MV1NG?MX6yAacr&vX^!!j8 z)usJpyF2GF;Y1czeA>X;oXN$lmXq=1Z-q{4Fcnz-SFJ$VwUnH z0z3ORl>+rz@XSOn7Tm{F(`(0C%w==I>}?Bl(>n;`=$$6E5l3*>p$SqYaig2>6rj(^ z@jRIJLVvA9H#_J)t?oZwcw-Be|2Pe{kzr7ma-XMB>mbRvjK6ZpMm@g*e7}_5OKce= zX>_MZx`tk`a~$HlU%&ammjmJ6e>JrC^(N*v+5>Z5&xLlDKlq<|m+`=_kC>TQiXH3s zWOY*R^Y;!}*v_H@whH!nN*acW~+gR>jV~NKejm6>45q!GwJ?Lnf%$~S4 zVODPE;l`-j7`c1{#1tDsQtjG!FKGj&eJ*5|OghN{mhZ6J;VEdjX&?6ZJQQyawUw8? z{>j~Qu7lyo2UtI;D^9B4O}@||5C?Y|j8h8l@w88Ic;H()uahzsS9Y6@2lj90yL{GQ zNZ*oHW?Adc})_0|{{IRWj@5E~C8+WyU)()4v+Kn$dSj(RD{>)Q$zO0kUP594_ z?!ld81@`og&TEH!m zs@ca|I5tyTpI-{b-_5bA$6~l>-UpsPXe+m~T#5tS*Km)4*|@INA!xdGFWmfli*HpU zp^NcUuM*~Z>M%NRkc*c7t^~~ITKsAT?kz-*O-M2PmorIZv$3xSOF}OZ-r}!V6I5n1a zt-TZGWdGqh{%P3K>6hdiG8a#{bmIdbXJF!y1peimvz+y&0BeoehH)n+F{*3aXg^%u z(li7k+SNgifo++-o;y-4OC9T<#`67js4hZzo&CpoOz}D_Xk!jEPhw3#XW~5m=+$~| z`Ros`-kb#N_b_<7Vkpo$@STT;u$cMR;olKmwda6IxZJHHq|-9c}I%VbQ@wtW%vskTOW&E`hm)R?%bN>66LOs$@F#*>BA5 z>4g!lx2T4OAk8VK^}$9p_WX}YIuF#_4~?W_oO+3|-`CJUcP?c1ii53|PuQ@?kF3~o zj3y3c4EotzU<8yN9|fZ3d+n$#p9`!9<=>KVRGB**dvqUmOe{l-?JZPypCm^8g;rhn zVLLUMRrUBL{#hf=QIoL__77f7&&n91gU=gV2BA`LTyBVA7LVePhqn_fGhHKttP0G@7AAI*CE z(ev7GX#Kjur{LD|uPN#Dou90F+Z2piWTaA!)4N=};b!_Se)))r`csKu1L)dupPtq1 zz>h&-(%&4`(*5*LG)4Sb)fe1+?j%e5tl_q6M&s@*OITL3k)?Uf$8mGr zmzV(c1@h|`6v8R>$(9hw`NA0_9D;9wwQy@uHuUb*1{WnYLuG{@ez@Ek97qGs9`%~7 zh}y+zt#RU1bNJdgo{1jfWi7b)-l}`aJZ`FiyuE%~M)kutIMwj6O;WkQBf>18{zidc zRP(Sqhsn=2oWvmy20_yJi4fBJ94f62z*RpJI6H4SaaULDzSlq=<>19=ZRCZY_CTZO zeVM3%G_Q{WpCRaXF{AUvMekiP^{Y^9G^Tl=fM%#9K2_q}G--1-UyC)eBcK2tdyIKmKz`>oIdB2xcVBh+sgGA?G&S9>L;d7?3e^HX zcF&yCI9P+8v+?7lHHtyj3EsAmt=z8REe=1kwe{L9m!56twg4trq_AXzYBpnBKL~r; z3(ua3kt{c#=ThcmaV~T}TtwlPGl>|!@dxiY;4u%|R96n|{sJbO#?pHo4RGV7COFcu zp}=6?{8l*-&nd+9tba{B@$Y-S;fj&^&Z!Iuv!Ugc-K?u$J9Wt`JJEkk{_&V}@HuUL zZ&!A(K@YdXten2s<5L#x1HnMA)+#7id4TQ*?}zj`iN?g6cB}_QrL#nTYyZZSLO(cZ ze*tFr?FZkBI&zM@3c&jb*xha>6OO^T(|!2RyAd$uL_QdM2SK-kNyKxFfX2|x13Yv7uJ%|0<43k0PrR%_!g)AY6V5u|O(e~sCi<^sf9mK7-jnTH^~S31!?tmqFq$s!cWbBdb=>m zc&NZ8&@pPr?QZX3y*^F^+DC`9RaaGT`CRY2IQw5`5cBETH@MJb&{+2D(KXh3{&aX6 zor5c)Y}GRT!_wV7Hmuv>r=0#9d)0mz5GS)glgIOX*8wmm^Db$sQnu)0Is3e@igbyd zOgs*$yYyhsKW91NL>tWbSjxSkuOM*%?ax*XcMd#bXV26VJ*6)E+#3usEReLBQXT6- z_5Tq2$_b>w)`P=hPqwo97FKvYkXn^IX91_j^5Yd3@o;qs z{%lYQ>HX5Rx~=`pXRy#K3|;e^!Um_Q0*i^)mjP*9cvqIdXpOK;&qV!b58C-0al;9e zuzXOj{e|SQ3xT+t)4D^0_`fV{(?<5bMQxepoxKff16L+2lg8Ryf>Rgl+52VHoU{pX z$vXIHF3E{?Kf{*BE@EDAUF#9p(bR@nyy{Ze=2RVAwKfU}pQQR9-@t&)4CjA40v`Ph zNF(hb&e@DK&#arlVrE_aO7W$=s~SB&4=Dp1!J}73@-^SHbnZ#;W2QuSJ6(!ev>wY0 z%Aw=5C0cx?@#EaVF>KX68!?Ak&G#^2CtJ0)4zt{3FE7{JCaj;ut*Zl(@R$khK^!0n zjaoA02wz#5i}ydC;`jTGfXR~%VB@D_u<%w_+&K=2=dQEG?`B}rwatO}i&qRdjrko? zu*WVdEdMo4^e&c8>|GdnV<+Ey{)r^8zdE*vlV6ZV+$zQBU=J;?9O>c)q_>n$!IT|$ za)yNbwHRaLp_0yLM>hsR_b)qO(E8OvU&FSBCn50|GSZH$)q*^*t}I5Q zbkeS`@KJAWWs#mLC0%2qwhF#2saJi~3n%Yr9)9>nzHK4r&X88?|xG?F~G0=R7vm zq(80eQ*^qx5$5gREASKl)Efje_uV(o%>is-rktC7l#wP=&jpPkZoWreGKiC=rQ@I0 z_A=EMOZ_@tV1ee$xz1wE@Zz*PDERlc*+)Lhr@s91&JHoJg45Ne>-qrvY%3FINP)h- zQnEfkP-;DyJPS}w(mUQ#_;&Ma%qTSlsJC$Ob`!jpe2CNM8I76u^#F;uKvU~rDwYrX zA$6Q|2VSG#nyp&_r)`cg;$!exqATm``{VdV7VL1;Rw-_JHb#d`kqcH`6rK*r>!}U5Y(}9& zZjE@(iPt33!mLH;VaSVlz(s~IXSlu4ZSHGVjUepH(VPp6LGq^XfcCrS8%KNBwZ6&7 z=W^0kgpmr$gAa>8aCq<7$w*3yI&s9tJ`m#-jKG30H z10()}OZ7(Mob=8-ps1U?{P8Y!*lT4*#N@SXZ**h`9PXZ7M??JB;g%wXt9%} z_iKgO9%<0{>0SO~Rwb-++@J{E>+LeAP~d?W8`ZQ_`T8lgEc4X*iL7@IdGqpRoaT$S zp4$sZPm$)gWkc_^0^x5T?3}9Ajl=_#fh2K}5u6LU3)I($%if5&=D*G63!N!^A0v$9 zq}vp;-uK9##0jk@QU3`J0Kx>pcY=S^2@MATWinh~5b18re5;GwW_`f~6It;qd7$|` zpYvV5Ly)`*BY!He1xT~2`+hdk;w!TcGJW=5l77IMg26J9p<1s~}chhKxPNVJx6 zH-m3{dTle2BQVmIAh45qUzJ9BBWZYOw(2DpnkP9o6MSvA39JyFS27-2!oHlTrIN=J z*vW}67;zYWFWRo<^^BxAB>k+CCt}3mLSv~SgCI?dGY#Ib3(6+k^QV?NVdi}xZw0YI zlg0m-;BVIn#zGt6(`XkqEg&C;MYQ6+%?kL@PW0~Oyaq_Phd)nVrg4q|s&DnGUo?bn zwG%x?^QwZoPZK0ffPFnsXhFKhMej7avJLZHON5^l9+i)^87MTonin%pt0AdwMXtfc znvh?Sgij$3$G2fweEy(fx(9bL4q3ZhtEEI{<~J`K##!l5KF|~boX0Xt$ zyJ6b#Hxiwz7Qe$c+ye823)s3z9-nr6w(4E+2r}QU;65eGAoiCoR4_nZvejt>$~;jAosS>%U;&yvUf$H~7-xzEON;bXoRHIj*Y zkTj=4Jb}U=PB+YFhr5O|(m!nSv^k9Q1cUDeK)eD1qb`~K2J(AA~NLx zTDg3G6du3-7Af;mhjvgfC*B{Wk=Lc3eXI)qn6#3q4hgd-t3@Hc+HBMP_@XhC^@8!LF)Mo#THH@+))^tN4bpIy9x6_g`VdhvMTu05n?r?O&0BAGERu#E1 z`6r;igZ^hyL@uKUy-nxEmb^KO#Eq19l}X=bJ5v_B0qMViz&pY;(ypfj|1rwCfOH+i zG)aP)2RDlhmVD(q?h~3OMK^0FrzaPRJV&cT#qTwLtWo4zA`>RRAyc-*$cG~3xvb-~ z@026%ATOFEvTYh^ob(9N+VKv`A<{t~ zXkO`lBsQ0N>Kk7s*{QjkbX4!~2+V!zCOi`))(u9&cvU>>Ab$S$z6fz0Li~v5AjCBY z@q6*Sgm^weT&ED%BmRHAFT#fY`~UT4VNz{$^PLI<)ABK_BoF)B-(X$-b%pD7bueSa zZm6~BGP(rRR)O~Ma-2zf4a-`JG$+OzXE2D10f6nbO1w=TMLpCKPPaD*L?Unp&zmj~m2euiex^ThEn{!##s z?QSa3xx?uP4shI0!>4W7gPx|r?Ajq`d7Nf6*!_CM4o<&_4~t^JJv0JMisSIL-VW~7 zHIL5=GXtwCbIf&~M)MwqZ@%lmJ*lm{VoWw(ow^ctI}~w8y%k97!2F{VF!RkXesbbn z^bCFstM5I7o`cGnjYRkpBd&YA2P(3-y(wnVm zmyV|w9f!u=37BDjf%h9^ppHmPV|_>2V%@TAcxBQ7mL+Ub#vELO@oVbIp+*hh(t?vp z#tJ)V{K}c`4GxAz`&MGT&I^>tfJ)QN-%wR*cY45vCxm{ab`P@-) zY%qK%6<9hYvwKFH;mRt+j zE^Z#r3|H1sRJXx|fB{N0CN}|m4I`{J=BO@y#8HFf&lM(WJ-mmNW{PX+w>$WN0FOVJ3AGwV!HZ#S)bS&(;es7yJl}2s{Jr&+z4+jbx1aVy`)*G$vcYjl)aFHg z8qbXAhL?VP#^P_Z$3l7wxl-p9I~li{{r$2Drg%Jo=}|wVck!Ds$#f!8O+mt88$Q40 zDzDU;hPo4*YtQ9puQcB0!eTb|Yh5)dBMd)guT|bAWx%9kOW^fGBQgKjHPTiME*Od8 zy<7G)ka()1m~6TN8|hw{<-hMB)hMR*?@cuhj;Zy>zFg-`YH*y*R-`qWs~W^Fl-Z}thO1|{_F4+&r1a*xS>_{DWwA$%2G zQ#{^CUH8yZ-udb-?%(wsB-WCSQ=j^OGt{4^Djf!2)X+JxA2|;6o)ch{ekZ=~KOfb$ zflT8#78i7w&6S2exHkSh|IpHyN0h~4X6OC5*Z8$^<2|iIFm3@>P9KiRm!{&Nfy-c( zXC6`wYwGU456jM2qgyD4Nw*WcYWK*{VnOW}x0uhzwbC_TQ+KQBfInrO~F9nWcdXKg>)Gj@xIJV2pdT{?_M zFFOO(jMmz*`wmTaBp{t91h)Rg-OBT!bS7QL2^at;-P*w<@3r6-$`nnL5?Vuf9c0dV zfX%3iwmZfK-H&F?HrAF84BrgfSI6?&r3xc% z67xpcwlkV8ZKAeqIhRdp{2!h-xr}H04e z*_oYeZNFU@{K;6p_hvIJ?T~`QuJ(mTJ9N|*r;MRN+tEza!tBv=v8ay%mfz0sum4hT z&E4s;LR2Q+5z-17v>t9}h7tM>6L&wI!t7t;FLo%aLr5ThCayE`kPqUZvI zzu7Il?ZnZ5`@`F$wrF=p2ZW}{INA-a?VAYWT4%t2OD6#Fx_Wu&6^{{LhiQmMSi9@b zc!MZ6?ljjO#>QBwLSGQ3sf6(?x}qICG%!$Ner`bOEleFg3Fl3bFflv;1U8)tDnk3Z z4r)o$9A-T30Q{!wd4zpH+5|*DQ!lY>+HddX(z>cmT6@~}>@_aEbBP=J`Le$o_Q1D< zV`$pwh_~aPg_nP(27n5PZX8^Vu(T-3c>pSawE&5BAF{U;p9=VXBY#p-_evG_H zJZ-J|S}}~mGz@z>9Q9w+#wP>TVWs^V5cs)z?oTn77`Et@=re9Iw1FB_y$^3hQEq)@ zzhpC;{A^k)d~JD=@cuX2E^t-_ewWsrfHZyrH&7nA4N_j|%7hW}fscE&I)E^VIn6!6 z1cpysai6_@xB;kd*@iA-q05F`&|7y11kPWW7>&dOSbC*3Y)t_A++8$2E!n*eLF)0| zkB~Gh>69%wm<8Q1%ipw%s;S>Wg42AY_kMnHWhK96c48s?vC z$Ouc-Sy7JO!RuO}q2FcVyKvYyA%mWe>?1e;EDSzqNN=J`Qdd}6w~5e_e7)~CZ1dr! z&|Rp^vVv!nN!+>COg^&nEl{s3HJ8R<=KbqJS7~vIG^t$Arz2$E$bniTX#c2TU8v6= zDMz1KfTG`)t{-<`^!pGs-cNSv@=Dn|Zv?)X9);agwzD|PNbL0Q6mE=fCja-~G<2_= z%6O_oyiNBkY~-BqnTg{uIF=*M2cKO0%kN4{@ImHLW%tN>up#LrTpPDZ=u{~0HxY%l zrMhD6oD?9v4#ef8;XVqDf(sWlmS^sJ2REktlb%(C0o50CziuY$7gQkCztD#`Y5rLx z3Xb zSa0bJAT9&(J<erUOov}!7PI(Crpa;gD&j;!~>3O$8-s-oBOt^)@*Zg{Q1y>#HLG$J% z>XXy=q5o!m$UggnllBw382@gtgD;`2c~#+Et(U0npO2(_B&uEJ*LR%2e)j%+XTq~s zp`(y~7e+pDB!6^)S>~=mgQ4T`%JcImaGtay59?Dv9M%|WmrBsN<4DXyM@ZPP48E~ZO31h*?%pX9uK%!Ah#$C~%MgL9NSYfx3jbq5qY(~i@m64hc_Qrv z==cXCt^4BRt)sdS_Hp79C2`Lxp;Odl$xE4SM0Xf4WWN^k=-8lxZ#K8<7Y@Yld{y#c zX-KVY;PbIYLz*9d*}TU-{i8@9Zj-zNZYZ`3G9{`BPW6Vq(fM@0v#s!{5@9z6?Y<6* z|6(S59Qjk2cdd;idQ#vy%`aZ@NrJQUJ+N_~S|B)vxEM|k+{2>R-UHA4A!yp-0!AP=b~$p5HDH=Vu6FG9{~T04XAkGz`fw)i%5*IR?s4@hH%w`F^w3FQ&5^VqwR=RE4? zXc#he75R=sFpSn%6!X}}c{yk5dz&)to4j5Z+gFXbFLklnWq`Sy6eE9DVtPST!lksEMZ z)fO$@HmT85$F|SsK@+#&p}u!G9V1^fJCcsMky>qf54>MJA?#=>Fb?VSgx^K-k0?BB zm7@zb=^3cirjZTYU~oMb9C5k{?tN6i+`1(2kB&=~MVTD#pXvkh$VS*a&;w5_T#l3r z$mB7gUZcNSPf24SbT3Zv7>_xpP37@TX#Wh-AN8lnAUyW$7T%;YT50)0_aB;4fAHgN0qtqnIM>p)J{Ih|h+U1J_hV5&L!ut~T z@$%yVoHP)Uzg25B)Wfc?8o-YH%cMDWAn9;*sGC=wQ&By*OV4SK7}!pw`h}WHrJA=o zjb))BsU|Sw2kozS){$Mb?nW1;5gKjFPhZu0pe?<(M5eRCsmv_;W|u{cshA zHnL-mKVQJ|Kxe^YAo_K>$ttQHBS;81PaLsZ;2{bhFMOXm<0Bi^CXEs9$^|a6@WblW z+8Q8z2P0P&!5#M)_Q}^;Gk^46m^b?t`O0(-g^7&52pe{8=6B+g*0qoGRu3<4uXkBmtz;PbzAQj5<;@tFmN>WHt!Kp6{9cD=_4 zhqaoYybvon^B9gUIl!eoCc?YH(dGeSuGGeTB810K4?CN{*1HQN@-8U6C23d1CONdItrc4Bjgv@hL>$XYcA0HC+76YU3?_VT&^( zjZQUh1Rpfb<<{#B@yEyj@=K{8G;W8tBV@k=ouO#ZARx@Zp8s9d)LK`~eTKh>yJ^#9 z@)E-1@ra1iKzPFET|3B#lLZE%IM(umx$H~men#~`-k~0&bCR!4OXq`++y(OSD74kX zp6B>hHicD(w8K3qMx1;o{(5{|L)ivqpA8oo6s()uMy(F-$+tP4*7_O3Gi_hn9iNg* zIUduTe3kPu{uy{5ZjHnlTD{+K^=nT04oS=NS1rpZQ?QcbENiQ28FnzSM>1!oGo>T7 z9x9Y)K(9JuxyT0SSb+4k8u0x(JzIA|d_J~a=7*8TA4;7J{pE2JCv&QQiTt@#`_K-g zdLiF47OBppZH}JUY-1#T?KKBP{z81Cu5RZ@K6ML3E=WbHLDXMiE?;@zBGFueukB#c z57ub;tt(PCBXAz3`N<+fVVx8Di1Uy~6`MkTXKN(?ju8=_j4~0e_8>k`v)8DM^ePh> zmQ&GsiS#Uaq!!Af20xV9Z3hB*0HhofD05LsAJef8QmY&fgPdOnJ11O1;x69eCG8dO z(~*ySk}hgm_9%QK2~R`11k{-N2qEW@|Bc6wRe?ZRqkQ$|HYDu=BA1C9%#e5u?hT3r z@pI$E2_jchNUP91><7{soV-1d4iXv)AMIF49QpuA>%e`p7D8Kq;Cij5-7`dZGQt1q zzY|%~=PoNb`4CQi3O$=S({rAcAiV0uf&rX33WV<@pQwI0`~+_=8jAw=30r`?2@`tc zgC<$19k~j>+{FpXsCwwGOR>uoIt)#qQCZ4;!ky92ZJ|A9b zKET^9gMs)8TY1vDEL%UZwk^Lh%JVqoMR47{lu_?t_Pb2h`&WTN=|8K(!MWa%H+reEoG69 z^mtSR!f!N6`^VBu8#2mNIN`UFr5MmNCYwd(%!f>^gCk<5;-2LnMD0i&S5HUMKr(4W zWy;M^ZBDKnx&l3zINteYyHa}a+T(wDdnA+9(4 zZ@-BD?Q1QrM~G_>|9d_{yypMkyINe!_`g0I;YZJpjA=0wvPNXGjA^u<(R)XnP4|MF z|B?tbwfkVw@#naTN8_bQIgo@L^E=zB9addoZ|a3{_N*zU7JcUZ`rSZ(gUfLKRd*b9 z@*j&${lFq?Vxek!FIYI{6z=Hm3yba7@^3eF)Sahe(CV%|^tttr>29+IOWqUPyuF06 zlj8B-DmQjGu_JTyc*|e-UV%|ID(l=U40>F1<~zDy#-0DVLE6`Y-Xj{t!k)Ayl3%*J z+TnE+6l%hh->Gh3w?~ZqbTAk*!>bjK3nt3_%^LVXYje`MaLc*WvyT2a zP>RC4cFpN`7PHnb64;LF%{<6)6*kw@;h)o+%1@ef0(wsvxxh}&bFh&It>iepS3J@< z@I13OtSEUfybUg4o957dv%SLjulJ@fuhUKFTFaSvC?3+MIxBH|GOZi7)Ly;5b1z;r z&A`pKU7*v~AQpS5H^v5PASX6~--x@)>HOueGBeh8LJU?r&%z0G-ELu4cjhszEoRqE z=H>=_F=6X4sLaxnSEt|SYYO8uNBs%UO7q#fTRl`UKOy=G#%EVy^XyF8hY;vq5<}Rt zqX$=y72}xlSoZkmADle=0qp6OgUTLaO9Ok};9doM`qdUY{YCb}p{x4*bA7q)odxi& zT?Q=J_8dmd^aFWY9~qnN|a0e;ZCvL3wij8vB1J4kD5jp0UP=I}3O>p<80 zB=nwN#h0y4lD3w+%WGyGg2MBDupna#w%?e>dj`)!%eiqFIJJp9y;noE;cgEo(ly1< zaSOP|v_Npvp68H=W0nkJ&0@Y$9h&g`csp zjqj}&DWR5UfqIO!oVg#TeXYatpKk$|*;_RCu1n1duXoH^hFTlXZEoJjj z9pJ#341TE06R%epsN$S-E_@X4r&%<(3_{7P1vSfrAxViyuLbwb1G%V4%Q}m3hu0^z z*0~kx)zVYHbQ{kX)hfapeJthi>jP28sU7@!zX`8J^n{RxoAIpe9PE_Y9ygcj%HrSk zbMNuUflc|!gj?wL(^zibxQx%fVWcjefslP-6OY)FAzsgyK8=9g9c$71Tu_|)m$EkU zFn+INYZ~`QaOh<-%6$4WCcI25Z)zWzuk{S zt-+NgjeyZTWTKXL4PQ<*xq_}$oxorZdUmMa0Dkbkfx0qb5FfoJ0|=X_7i!_S(kWo^ zEsPE9;41%$&y;MsnzDKKrz^3hx(eYp_`cDgL4%E6=TeQaWYH+NXxbLCQ=Y=mLuIES`eqmQB;+g@w@YGm%LXv& zYyNUZEwEd1m=RYn!dPkS^~cybppc!I?+tBr(>dWD4o|&8^#%M-zH#5s24*$ZV` z+t)}nfmi0J#1*4hy>qVE%mh9u(}EMZBwL$!Nxen{0@(-F;NfuQXbLR7HXZLXPTV0<1Ujc+_Io4 zE5Coldt7ofn2$ONRKMJF=r69Hc$wS8Hxpy5DR-j#JzwqSjXM}(;QpUDLUWLCsR7XO zW5}t)IDGqOPPHzh)fU#@EEUSeDXja6YuH9EhWFOLu=u|_*f;hrOzbllYO=j?ZWl-F zoL0gu`32VV+z?p#hVB8MRab4USq1NIotL7THp64*tZ>7RePA9G4O1`9!=9nNr5o-q zkj?`{-P7@4!+ve?Zjq@>=fzu$ZH}~Lo=W`;_nugxo-fCh+D2+SDO=6ke z%|s7z>J79z`4FB=o(|&J9b5;Xexffv{JS4!{5%fXpWnkIlYQv+;S`s;=%CvH>VZ|p z>XrqUB%KShfiM|@W_zi`_qd|lHcni}EsY1m+O>u_@^ZFBH4cCN>;TI;)kx<9J(zn1NA2Kvt@&Q`dagH^)<=xCQvHr?;L&p*z~8B>CF4iN@(iD8{7r6< z{3f>o>a-jNuJg{JeTz%1`wS}>6te}Q3__4N1lgtrRFC5!&+I=I`7KAH+Gd+eSHnlQ z`@~Ua_}#|6)swfHYkB6?iO$e{#8h|^ZjJkf%FMpeY?bDR-#*<@wHSDxeHs==^_Hd4 zxzR`6*mV@HlbkUpwlQptJD{Cw?>pNBU#Pc7_ZR%h=Dphq)cb(O3~nAg1Vh&T2bWIv zz2si>Yz#KC)d8u?N+2$0 zmjfSzPgXN#I5Cm-fw{=n%yOdRRS83~(Q&UC)%jYy5K{(U2L?mg(Mw4B4ZAfs&Z$ns z+=<@e^naR5HzYm-ukfSr;6pMuIFYUhE!DxX0weacmB*CifD{slf3`n_^)b2Vlsp}_ zC%01t#!*cHX%!~;m$V+bOkq+M?UP431IKo^hv>N@F*A~$w@P`+NUtGbn0nC35>GvC z&68)B;p`h5pt(^zB=!mC!veR$^KFh8_velzdi&B$6LrF9T2H;g7MF$2;Ok#C!p!~52;Ajdj5^TQ}kS`i*p^ajEd7nCFSl#FC63ER9!vnD%J{dmu=SeLcu|yEv?lD4`;!#;Nvj8<3zlD z))2~uK4YZ2q*o&b3O@Koy-|wxmKIeu^{PbxF1bxC5HtUs;%&n-gq>YE{Tu`jb*+_2-0+1_ zox-I5*1_cABU%5TBcutAL)za6NV;e)A8675X861U;!V}orZL`{(n=<*fZP9qxhwu) zMivOU{mf+JTc_~+7aNGxbK$+`&&R5{yWz*(W2pbGn3LCFL-~vRcIJo zRVNP%HD8EV)k%*VC??F0V<`Gs~-P}sS)a5}qJIq1g4-kIgpz(M9rBAbbgVIMOPnYA5||z;3+Lj=y!&k&Ju*kf-DHnPU9V<;W*aJftMe z)000wEkqi3yl7Kfz3+3FefpKbXYTTco{N(~sX2jdho4kdw3`BfV|(y!YAe#QT~J`S z-!C6VJjRLtk@N%#4ymO3*55s9BGc~(U8>~~(%sN-SR{|On#YK@fxH=3S>>{eHj#K` z3E}AL`Cu|*Ax7WK+1m4Rhw9$Wr%2>`ROhp)@Nz~3tlw^`k`|=<%BBk6 z12@X=C2)UNymlVle0m^_lf1w` z3wEBa&x(dOQ*XEjE95_s&J#Np52svi8gVLTvnID7&hNtrgM@DsItB>y7<}G~RnrT^ z9D9=I#8BlC-xAS6=mEm+l`Jg07|Wwtsxd>>ak~r7w6#$*ml!CKub_;lyLG)6hnyex}UfI~`Z2ybo{o z!1@k%;IMBiRs1bQuwthKLdGJhZ*ujdcVwqIl9O|;rl z)CYMGAUz`w?2skAqTmcRXTut(qU#BS88jdNq2I6r%IROa>Na^Nifp9SoV$$tmMpTE zE@g91i`=4(`!b=w1U?Zz{3d=Z2XMO0{0)5h->JG9npdg&#I8(mIAOTJCGzG;FfXw_ z_uaD^2!}~e?*j2N;t$e|wPbl)7vlbO6txu7*A1tf?Zt?5k^BH7jn38-Hc)Ne>u9my z(xDlGml$ayzURq8n7H!^Y<;@{=Do9)b;6!0tBxJzBX8@=#s3Y#`B9~GP7b0EvDvE! z?A!pY-{9S>*Gn%awjmtw!!4KZQg0`V;}H3jM0kCgv;!xN2&8R+<`D^Fp&)xJ(l|;T zUhCnyo^@1_Q7yL42EuwqdR=8VW(xcOzhC+E+Sh1M_Y@Qj-wirZW^!P5k#yu;9+19P z1eXtr5q`!-XhGR4(g2COko=>((=d^de&gA{3sjMZk>_RI>pqm0H-9XpKI|(GexEKp znM^*HiCk`gZ90;^kYg|#ck5y$nltP|YdUXgbOIlaG=fa^kLOTdA4Q3yEsMLRa z=<@*Tg=HZ6h;jv{)VPD^xLceu0(do|2rCDtA&m#+K%l(0za@ARi~9GXEa*E+sYpZ8 zLp09?Nd8(o4=38`;_X56Q0PL!JK?RF(14_Gp-f3(#97|ryquC>!$BzDi*sfBEyzF;eRC3Lq?oH49e?UJ_aaUa1>FWUQaJyu)`Z zJk+S5!CYjkw>~}LCmSp!T~7N@`{;|TRN$7h+p!JYkn@DD!Nv0})cm!s!rQ6e3YCw(kUnetTlS@}zjz5MP`5)*min`;&@C-R8Ue>iJ$ z3T1?s82N0$ISQRSx?(u{eymy=_d;frgEHa-F3u&n*+6Z+GxBnK!u4SwvIo*CNMl4^ zaEJG$LvMwrl&2P5V}$cC&h;xNe23VR$#^XH42m3fvAw=bc^dz-eH4fqBYlqK31!Ot zWHC3QzG{VUpgboI34f$r6YfcbHRK<5(fuNGaqZf5O7hwNc-c5fTU+5h?#UyJJY>qk zLDVqeJ!x@$c4k%ptmxJVR>m$8J_w0JV8Gmy3iS_`KdXyP2kb!VM_iC$F0`LED;1tl zWZ9=+16w)Xp356vaLQ4{cq>IYS}o8VsY8Soss2q>!{KfVqr7r_Zt7f@u=!ecL{?T_wMOR3X~^!39-e0}gC zZr^DKC=dSf&l$dqe#WLRX%7u=%~RTEug2xHkIP=e2fVoZRXpFPmg@WIDogmXhaLYF z&FQnTWXL@{R#B#GY1S9l?xTIPZ!O^|Yn{O(zAtn?NcY0~Bq}SiH(+4tBW8Te6*@oL z4?!b$GB=|?m|eVwkMXbJ*Q6r$yvST0Yu-$~sNWuj&G6p(xtl`P59PGdf)g;JfD)d7Iw3z^ap{vF%STL4N3PzHz-No*Yrcny?ZW&{~zI zFB;G5`~QGeo5x|Hb0a)m*d2Go&Oml~J)iL*jc+gCfm!p7;Hxr3et0>7H5$3uJ3sUu zoIL0vhv)R8dws59{NV#=;5?E2_h13E{#M|fZ8!e3O>7-}@nis7-BAO>uNEmsJ?r83 z_SM8{G5jQD|Wn$z6A4@GFfa*-_sXt^V7M zzF(5ih@N2>H-9xB`8f*1RymR!CEI1P3?TF9E%cx7cvdVc4#G1TNQhuq6wu+sIB z6rP)e4;RItMbD`)$=5_$6F(4YS5~l>-EwgHql+-|v!3eSyaZzASK^!6mCU9rj$0b! zKguUUqUVuXu2}(t^fp1Vo>XV%-m}fHgfWxEN>CVhb3b9|}^VOWL{+ z^F_yx)18LUUD-2P*Zu}FoijZ{=gJF94djoB8=+G~9;p38G1fCkc}?qA&5*OOvV*hS z}nRmm8~YA(Q#oNf{D^yGr+U3t6bWxjb&ZF8%&z zTz4WE7ft!d>g#NPJ!9%B+g*n;Dd-NHI&COhIK2VYe$z8lJGlG4zRb*jV0FM4_7&~% z?z0opjqJ@3bfB337rP$Be6*P!4PJUNqBdFU@7u7dDaU2W5fDDSlS-cjjj!p#_1AOp z9o=K$B6UZ(q!%g=mf_%POLohS)?NOX&HmZF;4~)6i_K=L{{m|+=CP_NL;8I=V$2gI zGoYCNe5_&LLaanz0`&zte5s-}mt4K64<&jHRwutw&fb>g-HuyX!_~Rym-3Okr;55^tv z?x7w68Yf8T`b4uu?Wh)SN$24?SD``vbe??A6JPCZM(?!)9?gHpSBAV%Lc>zv`|>?_ zr-If=sH!FJHOhc@W}z^0V*t<8*{Nyz!A8A7_xUAs9nTUz4QAo`ZKORJXW@ZPHD$cd zu=5scxRkpGn!fWBHHj;mkAwKJiOS-IegdZ?nio86eG|HPjzwLJg*y?~aQ4`561^Y# z?3l+#`fR~iy8cyrdmt8Xapbei43O#w4NLEW^^!I?Z*wJn+Wwx_e9Pctvn%0FM<2fa z;#sJ=?h1ruY*j=rY%w+jI?k!19q)E)m-EgY-LP)RN~H53jo1#tih|Me^HV9L|8)M= z(o_k!*8z|FZUe6&m${zJJziO!j86}@!|zugvsn|j;+rW=Vd^y_SrgNZKW|n9bX@p4 z@E5L&To%FTl@O>VXd2#sbft&wH3Yg3ia`1p<6cf z&rnn zGi4hXKHldY*FTQdqcCO@ij&~UUqht&kvrx$@M{0Z2WOk+;!hu2we!|lQzj^@FqpR%XFz2VK|{C0wF(ua&qkulpy2oH<^nN@B|F@60XC>9ldrt2s z=9AHUAk{VM&2S@(UnzQ$H5zda2#eIj3I37 zhysiW?+h*voz%c~UfBLmJ`1=z0jNJ!szE##IguY-eNn3&{@Dey@pC)zUMt#yrGW<+ zPufGDZ7jFRC7nxi^Xp?zT)n)CjWyp5TWLL-_BE!U9!+Ph%HQCKr56ZmhXK7elW7lE zvx)$qdXoii6Yl|G5{~-l0Xfq~1L+AdZoG^45zJlmf^9IbVd_yw4C~Y!UBC>ehc&rN zJmB%0EH?OOZ`J*#AHDBWPPh&|i`Ga3J1s=QKkzSTt!55tjX2O$-Pe4zX4TDG%z43A zO=_Z^8uOqjMl{$Y(Y^M)V11(jbbfXaG~$ZDPWeG_NBIS< z$LZeE5l@b6sSdw75SKhWCNvNu?V$)AQQGDxMh{DYK`)s&H*~*Q=)Jjq7cuwTY{wRs zXBG#cXPv>ht*5+t`FvK`wg6k29z&`p3~ZMRLVJ)75F8?E3c9{3f|KvPKvZQ<416O0Kt**Gd-%vPXdIpV>hsY&EhQrunr|{L_ zv(Vpc67^UVJipOHMcTijX-X{k)o}!79*!aXUt@XwUecPLTM^pW;f3#e*+J*o*f@9@ z>`Ym}{0sKu#^`*^w@*Z>DX%8$pVNJ)FS*bl>#dSG)c}khl*|uYcZ9gVD^b^|2{FfF z+9%wF>h2z&{HwP2wq;Mj$InZq{*kF4f%GKR%xC`j=TPd^-(2v~V&_<`He7V+9`n+x z52QiBZ{TyPiAPABj_uagXtf9R4fTdSSO2-IcicB2orgRqp*_frJcPIR9v|0~WQ z{z}IK?HfRS9V51=@d-{E0cVZB0EbdLQvc^bAD3kqJZurI!Q6_Ersv$OFIV8DsTbk% z&%2zo0fgFb!5W7su)CeD>DA;5Bu!}qpZj*?aY;#dJ7f;n*#g^5_kNLnfW$gkF!1#T z;>n>%+D=|7^#zZr2tIbg14eZQL$Q&3{Ujid0$q3ZMCxJYvZ1+r z+ByJQhS4)P*4?TsJoe=EepYJZF4{N@Bb_0#qcnRpyZKGXBIV}IkCr(<}x zUyn#1XDP(PD(Oh^e~9(0;MbD%nQv?bkf*}t6E^~lHxe$ge0v*dacOO&-pL6zP#I|v zg>)eKr!363EtKe3v|3l_#_46-sW;LEuClP(9^?TH#Q8&Nr+-4bbD@=^^|tUsA7v0& zOWKM1e(HxmH}-OYdt_@z9kUm;-qbbZ0Y;@TD&Nsywtj|lfWF6Y96!aa*T}%6}o`1D+?BU zSxQ=WHY-14fK)@6qdASc%%<~=(LMqT=-%@pp?{#koUvdNy9zrRUt?Ql4(mdELb}rq zPV}sjghpx^))yAWtjs5$roz!T#Jo3e_iD_7?DO!$&xLxrJ)l z$`%hkcGDi;@?9Oo_;S)mJa1wWl$&g08C_^Si))q2)%&kN=~@@a8xhvG0P+>kw((&m zw5zUB5>D=&#|US+PT!{Nm^)p&t~(yyURs3*+jS-Eu0ry@=&Hn4sZHM%(mHbW z1Qu%FFaEHD+;?alxl=?J%xRK>gz?_QV~qSYX|y;NG}0PLmqC@y48Ejo7tlD35*}6g z+@~BWS9oeY#m_Fc_~D=k@)6T;&9lRz?#1}hy~9@O@aw5??zt}`??QgD21qM1IzOPg z=i>NF9cYiQf)l9UJ`z0R7HPGk|GX>s>+@Dl{f+LoY;fl8UUa=C0N<}$$$}2H7XDB3 z(t$WA+(*S zcdA$3vB547Soi+E36lQ-!Ub>gI|2*&lzJP{<^DsU?ABlT zo`Vy20O?xNr%A$t;kCEtnO)Hnks%;)ED8@Le9J7eXhwPx7hM|8PmUO<TeLtvd%N;T+NMk3mPYE(Cm9B^!M&ur%$3*7ANK@mv zPWvg#*v8lH84Z-30KGp@7Q#qR%1{2TW2F04(wUHz9}a!@HjoJ?A?Qa(BtOBZU&ZH0 zwzC!rEsSZ~UvO9d-9o?M*oOWBw-|W?Fnf{0#@huVmrntNXK6P5dWM8^AToh-l?jZF z8SN|AE0pzM*EcOy!U!JICPRErJLcJUZZp@nr&!`(3AU~1Kz^*P{G|VOiEl$1NJGs!EwY(ZC{iejC zUs_k1_qTAm@SHa5qB&LOZ9N5p60%vV=L*%%QZV;jOMP&e?M(TO zgH-sp+F#2QVBcwD!;BJo1LkUzUnv=DnZzsN_*7$Lhn4YcVD?8r3;2T{jGYR;wty#k&gXkFx`|v4_ZqcD^0r8GNqBmQQ}%}zKb*9=0_B7%d3wPS zq&>E==PgqCq2zLyl{Jtj)^UYN6~lq@0xocD^q}VIz}H67-*Y!K6Qbfc;gTfqQ)Ek{ zU@qy5o?K)@H~!r4A|IxXozzyPb7OCg2eKWTIKFpGLXpRjKI1}@P+v2Vx2?K42uK^L zuTz%clKT_Y?XGvp>t}J&?4th#Z=>t$O1^JKJ{#HPJdj_9FUGahz{k$SH}&vn)OHlO zGvUrv=CW=8X}jepG!xAU4PG9ie4Rf4r%uU3a-NaWi?C|lW$l)>Tz&s9jr z$wZBc8f)_4qu??iepL^~IjF)5QlF?IXL)F~A9_E@_YN!_4gT}`LT0VLO!V)GAG!Qx z*I>{MEJ4al)vWm&FsN#xeD(e`6nT)h&O0q%Oexg8M)|Se9JYw|1E6ujfHikTjd%zS5n4#prPb+?_|A{6;9(1R~Fvl zyBDNF?Sz9OBgS4gPGLruLP+cDM901smiN0W>RQUtc*>-4WZ^-d7i~d-cR~+sjf?Wwu zOWLq4X(12N>2bu1iP}7P6ny0y%$HLpIU8ua;9pcTp>;`j>=4-^8?9r8FT1{wC}-eb zji^pYNBusr1xf#ktWM@rNHszJ+kj)Ke{*IIsoeEmM|oh} zQp}V8;?`$#;4kgz0ksYx_nQy5myJUGCQIQ{dwZVH=CG2S-he+mdydbebvc5zAH~@X zKY_{d?b6hNaX4vOJJfqNm``f23*Bu`;oHGeaSf}j4&AkphqX<|-+wJ&o#80nCjS~g z(8?B$I|Ix-^_s7Y+=ENpwtC;8`#pajdIp#FonuFXPvd`|g4n+CyP#`97C)pJDCe&+ zRD-a(GuCNmXq|;jOMRC>i0(aEIIk@W5po7M2sQ)8`(t7J(L1>it3{>dmlHrrpITcG=q1KyUR_#JcM=! zvsiihZT{o*LuT>#5_-J3pd_?$;hku$n7YHmp`y+M-eu}Nw$ykE%;|I;sTQE*{8r`Y z&Cc?N|C;065r;s`pJBD3Y;gQ9{x?7-jy!`4CM<`NI4gR5*<_o$M;W*tmh zzM20@i&gqv>#M}rHkA5ZyTY#4x&nFms}U~M0{`l3EVa&d9vdyof4Wuh%Q{Uk{^N9c zanfcu{ow<+?+n6ov1RRafC1QdL$In`wSdDR`n{SH*xf+`I)V?F*@%Tve)j{1&ka79V>I$G0jlb?ajs z;QpN_e57ab%eG*by947tK)4`RCO(9j=G}3-X%&Rso9BIL#!jUFtMz}L=4DZR zaEx^d7yY#{z(QSDP!D>(@P>YMze39%3}3&MSoyU`%&-g4*24B{t>J6CTSyn(VMN?E ztQT*DDNkv=kM|3)OVM5aO>-FHTAbmnoSVs37c;cy_AAW;3q2^?YpGzYVM_I?AgCW})8dYDSnW zYse?|pmiAP58nj3XR2r&sn$4GcOgD`=Ff+uyW`X^IsD>+GraGVW<2X(J*3`;;J;&_ z?#TeT`r&CLjDhK=8S61>91eQs3Sn1o@W&bZn5dPceulF9yY2Yk+B8rn9Y!|2jRcn> zV7SRspx5P6|6!{2Bs{;cq3YCar9@b+QvHHo$wxluSr*XzGw^s zQ)gPh8MhK>9dwbmr?qwocUbG)v_^5@ek{%`Qx06b43$5RqnXKW7*aO|M2`y0uuC}$ zOJs>^t%dylPE#Np6qt^LcWS%x5umoML(gAFh`KkqYCw1nG7viW^lQ^VKnt<*DmBHhnyOBhkghJ`<7F*hC~VKn-B+Q@2y z#e}6J8I37~zZ;45-#26e^R_LTi$4dH!v2jd_@x=vobVF9ZpdKby^FUsR0zl5$lUg- z?wR?};;@F%?_l}ypFFhdTzX9kAE_qbqL2nawSqxjfzWwK5AV6nBe64{#c3@id1As! z5aX41UI8OD#9HTlEhF*Z(Lr zq~2YCgo!YX?#&Y%5Jqj$dZ|RY>EhQXhyq@UENDmh0r$s)Sj1EM)?J+9aIQ zcuEo2jA|oZ`uaK&Uav$(x&oKnW$>W$R8Cw9H`m|hY5Qg1T?3f-TDzwm>-oqW#hCW> znFSA=Xx;8#w-mxzApF4-K~q2+V^Z9GwlASTI@R_K-x8Wan&lIt{=r?()?-$WQEaa6 zPd@wLXY>v2L3pth+$JzaI)Tl(eY@AufgipO1ey@@d8mzSw_EYA7G3U0zyvuDDB#rkrK zG;An4>UAu? zsgc^|l!HutDeKR2<#et<})q1Ycp)?UjT#^Rf2ik4n>l z?uyW5FCB8A`er<2)~`d_?lvBsGnVjsyJp|`rs{>u)Yd8V!hJWc?<8fOM5~+!>ku{Wnr>|30j8_y$b;c@G*4 z&0~Tm3a4LE2Ab(|cH2Q-YFx?Ext(g6;2;sF$)u4mK57P5&(W~;HSTI==@C48A_k8A z^}@_vhhdq{8SM6IgXkxD`(#H>y)AH;b%|L6q}d23Z?d3&S)gh9h1Cs>;356rGNBif zhtitokM}YMy5>Ld=VDmmW+DHWy8&sAq;;9>u?BL*3&*e^wR<&LCY& z*j67B6PGKK+OLM?9lF!Kmc3=tPvV?J{df=Q`-tb|>jL!(68Fj8*?r;rx)NAdupUW! zLRiRmE_88a$5PxGrArv`n^7Mzq2bT@naIy}xj@W$FA)5uX&RjJZf^FT zdSxwtd-M!*TAYcVPm($Js{-O-*4;7=cNPxeWBb@a(<~#o@z_iZ`sd21j(MqnGRvd; ziv`xx{PF)P^SF4wn%{JPGU?+T=?n0(PLk*=el*wO*`*52}}F5FGUIYqBT?waBaPMUsb?niW z_K;y#Kgqz?$5l>;jZ8R_pW>2wOAPNzGu$a`>B~Z zL(q?0@=OiINgCElVLb5YHiKU!JcWLWaUVzIIjMLhHK!7d!G0I058=5XS@8w`$r;8< zbHcAM=E;0jx{0o|z`FWZxJ#NZ7dcdd@BDlXZDC6EtBE-h@HO=EL2+tZ;Isj(ps1vG z?9ii${wyqsD|CL?cwNLUg|&F@LHWEjL~qO0@s-3#{~;XQ>bk7#)sm}U_(VUP>znut zsW7j7{2Az6XWF;v5FMC!Py3tyB{=-51Wpoqk75S(q~-0y0Pj(<%Q09 z#`BfU1@#I?2Y}0(lhx9WVoFDA;cVBzbaOGqGChnhy{eY6!4Yg_#h^a8&ti*C$Vp}zeI&E6Y80RUL~;Gk!@1-5PVM_ zKC~@1nmpe8q1}nr)TYxj+3|Ry!1z;(Pdz7g->pR+k) zWsB-7qK{D71TW1G38N|eo`7#yFFx-(io!KOsroM zNApIbR2uQgi*1K26Y%`t=qP#V^JV(4WwI!Y{gbCG*-IzOq_P^bnpfw1TQ$Y;_tGYa zB>$$|qe(+G9%z>N48O@$yMJ6~L+O0nKyzWK6y^hIzKR+<~?-()#_}y{HT*yQn zD8ubsq-P8oG@Hba~fh<7WEbv6Y+aaGhsK!TR;jW(vdWstP zEF7no<<4%GJaB9wGfh>m~jH{vlC}hu%uV0tY4b;KC65U&RmcXN|){<{rW_T-l?bAF}%^ALA ze7mEByhDShr}EDm5I6!l!3&(SEl>Y^N%erfm@FQ(n?Znuf_`W4X;ls zz{rra@AK;B;e_0gnWB4lC*Tbs&oEVH1usXX0(~ViCRX$2+EUVpt`1z$=^1P&wQYu8 zNLG9;?5jy_-|YYfXNm)-@Z72Ek#b?>_NMZe$ZPm%-M(CMrG@^eNe{yOu-}g;XsFu2 zh!Tum0Oy<(6C<~ZzOoJ3FOH(FJC~#=pR0VZ(n-BDnh-pjXj5Jh z;vk`axNuq#?l#ktn}^hw?#(Tg#{rJ77C3gkUo24|Q>nCVP+B`Lo zLp_f2GVeOb7$y<4jPhVc?F)q!mKiZ=wM-lD64Q5tC=aGXo%U;T(&A4(vk5cgY(R+`5q?x20u3&neYa5xnhsJ z8i$7Www}F>nt22eyftlJUy>{A@g|iyA|qpDMZDhX57~6^<@bqax%}N#tnz@wx~1vy z^`1iU#EW)L$p8LQ$K#a^l~Go&ae-BqQ!~d3xc43!v=^a{X{}9`RKDnW*>L#U82K;m zRUYrn&V4ASY?w(rV}^!omQf!P+<;s(q-KhqcfT-j{rLO7upaCrSGC zO!+r&_<7l3+($9?@f!-ZSSpbplENX{F_u2EUJt}+zmXCxQ(3(PjzVknqC;MT zIrDTUQ{CI3Ug}(gIuD`FHK><57opBI{LeiQ>OKi|PD1^>Iw$cz=Ofg)3UyyZaeV#1 z?$I>e&6m#j)X^_}t;36oUSXTop?t65aqr8c_m6Hzb2!HDXg#_o;6L~OamJo)ku`O>}( zg%wES=;c?Xcl#|u;$DTF6_<#wP8+$;mty>OXd5xAa#z0Wxr};D+=e|Kw$bFvGvws# z%f^t+QFJW1Hq(@WMu!ubVqD)Jl(=`XnEHp+@5&AKrD$UE0?{lz^`kJW7`R?Y?)dzI zNBrl+|Mj^;(@PJN>#mm6R$N*`EuE`Ualem3u2|29@5S&^|0a|XzJYoyh!kr=c2m*v z=VZZHJMPw~8trnK%Ogvy=2JC#X*eGJ>L^>&wQqZP5KnUeZRgq(j?&ZQ*8FkrAqqKI z!kkt{=TEYY{yw6Rxn<8D`EJNKIkRW9nnzjGWs`h3?x1XTrwX~fnM9U_8tB6>cA*vN z3+0pT{#w1WjcCEMXy#de^MP+&%HIw5$Gy(1NC zZ!hgX+tKeW8);2Q&6Ee@3+g{QZKS8C{AI$>bzJ&FJC3Mdla{YugT3vpNYtOKQN}?p zxV|rK^jl0lCR|eMB<*g58h;XfILNAmKHMB)_V;)!f_p#X=MViUzVROEw`v>TEmcs> z32l46SS!3^0`^h$788>#^n#%o{JqXubquWDQyrUQxrs8h>j`T1Dja|QD8dRf<@Fy* zrUm5760;{fO%1PF(mYk;CJ$PIXEBv5gZ;_o(~i%9ynJ;Dy+V^oJYn4qiv9hIyc{la z=lfH6Plx8_jwiphdAoJt=yAj-Fg`;r{4|s2$$W-)?-Zs*7ug%n&uHnqm2$h@5)@UG zLNkxaFV7EAr5Tfq&8vEd#d-5&FR7TfIB34 z^POL|W!F2eIjrlX+Fl9N}Vy?r*z7ZqY!J(m;^6oPFqOLLgjx;bwdRo$7iqVhC};giu6$iW-;C$(y_T~OFCMzTCWp!y|k(JL7e`m3{QTz+IZbIo^0pa zNz^C%wu$7~6LrqasUvPiz0tnf6s9UCi>PBXZXPP<4NT_1{Fgbhc~hY<3UiS+_w!T! zlgP#A%&lu7?%z#_(Z$)S~_~3 zgK?Tpe>#7N(HqqC z*cN0U4^sE~Ul#*=X94@Ib?3z^xh$C~#v>uPstV4xG-y1$HrzSIya z?{p*74|#65Ae-(R$lwq$c-nUUYF%HZ4$LnxmxLY>pI;O*(Q`a^>x z3+MRGe>nu4mH*Ox^h(QD$uR>K2;e4f_-94Q`Tw%w?3{zGXwV2LqE}d%)h@RuZ~vkU zPL@wk*VR|vp2a8a>(DFrHVR|8)8&W!c33>Uun3{`*(0dk+Eme~7WQX;`dXl#Rlm@R zOC>n-*-5#2Y)vX(Zxub7)0)K8#yWag4xaLzww|}v{o*3Ug0oM-=kqCX{tKbFB%wzS z5jCJBe|`}nhD2s@7rRK+Cjz`r+j2@tV?b?%CsghJeO2>9>6QEg`|_Kfw}sp9WW|^K zU_fj0bR%Saer3%vFWPbarw%+jC?7#vh@j8eoOxqDL)Y-Q(NAc6jD_~>aY3{DkYeV6 z#L^_^F7*b+>LaUK2&E5Dr&KcGh%vH6akWQ9S zCBr}R#d9a+i+=}2v)dObc={SGcVHZYH%(}k)c6|dVxA0Qj8p0dijrTfCQo^nB{}v4 zwdt}|qEF?j|8_}WlllBZYhAs^=(vlANC^acbUH2e8(Y?!`Wwcgr+oOg)T*?{?jnoV?t+oS-y~&&^l0l_pCr#FFYH& zwPqCB+15}scRu!?(o~A$>C5warrlZWb2D@u*|faNvnEArR&JrPXe~UWwDV9hx{T6i z4fbVkZ6IygZgzZe(K)stI+v9Ejg-x)5aJRp>Am9?Vq&Qzb6A@S?Lq-8L!ws zoNsJdCZQ8dXhZXQbUDh&DrW}TE)e^pdYVdif=dPFhsHGDq4X==EAUo}EoaS-LT(A@ zmAw8wm>xsWl@dCZ(RXrxw2kJxsG&e_6Z8On*G*{xai!BT4ZqK*RSg;s*MD7<&>TEB zHk+Y22skcUdrp;^Is2*dwj%nm)9b18?@oMRbuDmnHv--_Dq6ms?te3qz?r1@F(>^1 z`HZQ`1y?2*zsX44GuugYs|Cf`vOzzF*r>E|FoZ@D#)JmHHn(mXAaG$PkxB=zDe^qwb?w%nD|;k z(@4eXO8?=#1Zp);11o)_^z>Bwe2kiwja*Wwe=SKit?l5Mh%tYZWz~v1oan?U^;?if zs}3ABV6EIdVGi#JSi*Al4dLw{4&B|9pMN;b)7^ve^x5!>Sz-w8^O*8?HK8_)_!`Yj z@7~zU;aPO!9o9=E=9Qp_*`s?S9v8COxOLxF$C~lEu$D6D$uRyk7~fPK)0U&W?Ldr(tjDPc%6=<8H4EN(Z^L&l*;}($``- zA4rVBxdbo0(u_|+@r^oG#W}-{Nk+ZkJRzQU^m8Fy_Ow;_Pbn?)Yta50Y7g_6PoJMM zQ_YQ3dK1UQhq7#_zsC@IQ6yelap0JIRTH^?CtL9JZ3B9ToO+$(q}R2$&Z;n6=hBt3 zYxkCW1DhMbcKVloR-je|j#24<3QgNZx!nqaKQ#izAOi_CCSAoIUVpj^=p|q{p0jsk ztgi5(_Ph%;y~a*jTzi-Hc;|BRE%l9{BcSW8%|XM5Fffo%XZ&u+JK@uJ5W%wuXhC*f zcuy)_^yQn2gvT`DGX(UD$ZbAWVWw#C>4Nf3M!(ul6mw!WPgpioQ+gK~jH{enMHcsc zly);QJ_5fJa1hrN*eOrZWD$J*uNn2(T<1raLoQ#h2z~h$PX`a#=%^WmXY}IzSbTRR zHRd(TW{c`?+f(2ViO&u+j~&f#D(zgOOc?qA&yc~MwwQa~*rpjDOfRhiKQ-vXJWU(v zYlStOMD+4YnGt`S*Qr@WP?Nk92Y4xs8ZXO)@Kx?WYj=0Ap@ z{~7$IG?p3%pBWa+9ZzmG7Ug?JjC=KZrthik}|)d$!^!1Nue0s_9}vzlo1~>zSUr!{BLg4Oxq|2IdjhgB36r+{k9& zxK!MMI+V~KbR{5`RBuB^NySxtExvP`{t2Y1X)^?rJ- zJTLK>C84o&=s)gdM3T~D?YDN~MUL*uzv3K5IrzED0-jsdhc@D67qxz}#kViO-j1Bx z{4M`Wd(S8SyppJAW7EWI%B$1*hr4C;ibd3GH?q8ag}Cgue#$H3nI+d))o?M#eZTrd6=RYdGRl45*R?w?*_4FC7eQ8D`UCoukM*-i?2RFS` z{jR)=(ztZVI+mdKq#3i1z`?1i?&93n3Ge~eC|p^G&zH`Nj>xw|&%jfs!7EjT_nhAt z7(nn@T>tt7R$4Qv>uzw~UZqVWYMT@ew&~o-4B2#rm8X`o{RqcEm4f?A)P=af!4f^G z-@egO**`@}6wOIjfJCTPU@OnCYm8x6l-J2p>|LeO#yBLsSy7g`>p+nH~?%iL~gE{{5<;Y(#hN;8|jZU)c9zMh*VFP3`5Kmp&*(2o+m zA&{>y>KMW5!@ zA7bDEgC9}r2e?@Kl?1(!n%kl^ux1ItcjV36u_JydwsY?y?+WF;fydc1wzA5HbR37V zyVgj)c(WIs9nwo%LdU?LAAku5Sm}YVl8dNjwa?;ot@3dc0~3<-~_^b1dS%O-`_`)1fkg)Q)5vfWy5a(h>aXSUfX;azFp z(iog~PUo~!EjX@lpsE2K-w8TVWrD2U|ETjTqp#(5flNa~j-&KAH?RyRTKGjm=dscu zDpOO(gkxcqS&r@3m6A)GFrfL62W-V$tVhi@gV+2I{T0gR<8ZCW5_gpqsN70G1IQ~K zdXw$EBwCU16Zytq?7?-F&}RbPo<>$(kmoU>GfdYpt5lxB$YtcJpOz*(5+A;_SLI;z z>6QyYpXsP+txbPNQ~3oo=g?9n{G94(srp%Ak#)li(nf6IlE+p853BLe1KVk9Mh{gF z+zI;&tvJvG{n?62jz)dhe|}X;$F4#df*Si*JhN{(Npk*izIYY z3Niyy>x{V;lTQrI>oZ^wXIFS*s5%56D=(yU19b6Mgx`YNT^SN zajAS5Io>~lP9kJ|Qsq0(9xun>HC(J)}e{>vhVOJO&PvTWn~ofHD8{0fd4br ze^`r*yqm&KBj(g@h8NXQ*9tGVWt9kOS;a9A-{3I?Fe)#*J0HJao~#+H!ISWT$hpuQ zt5{`{&{`6?g!iz0-?bC({>ozQ8nWV2%!5$bnexZ&)N0T}iq0T+1^A75z&PS;859{A-g1TSz!xSxA2pE|p8`qyB7yWNMquQkvk z>}GM7M&J0`>ltJbk;8!vOUuH?+tA6jrtDnt79DwmYaP6#tdsek4@yeHuZ+FzKqv}z<(hGS|>MCR3+9hQF_r5Gv!cULN zmh7^qFS)K+ZH(;{&fB7Vc~Aixnvif>^!D|UF;kAya+`Nz za(d9t26NS#Xb!PkjWJI5pvyJ4v;bJ0n^Ia&=V1}&AN z{xs!)_x9%beAqJt&r6>Cc^2Ka+e;Izk&}0L!)+fulhHAQd1ALLRa@fY!fO1z<96CG zS*=6i&bharU<>Z@#;XEw_XVht74#^3ziObEr54#_XY5R%K~_rPcKI z6810K|Id*3!pSz;PFvb}oyaV{jtj59AnW&br`$>F>G3~H!ZC=jR^8RwP=^J3DP{wq z4;S%^&qG*-t>h?%(%&Sah}jon_SY-?7h*X4qsjJYI1nHzO3fG;C~xs{O#IY;rV#k?K;zlT_JK} z*a+IR`MU7GdtLs%<)8ygaPR9AzK^{dx(~d-!+%)lH4dMr$xZBZjE$Bb9Yq5&ees;) zX`FdxlXqJ@XDK(v8MygQn;$$!)e9~J&lC#hT0^5f?s`8pd+sRN`DZ^-b2jc*yt`2r zy=BUob(ZnGiMg`rpg2)wN{);iyTiCL%SIoOZKLX0-gKRS{redZO0VehvNIl$_dPS+&+I6Z>FNptjZ$<^u#_;v|r{vQi zYw(=hQwFezO0^nA8$#|&;0J$maA3X45_)txi%J#f##d%75d|hrBjA=^@n?0O)4(T> z6VOivjvMvfn5uN@#|@%Sh=M+A=s$5zae{nGWu;fYk>qcc%@fY1iyfJZxs`QgirSUV zeh13xE`1+r#Wp>m8TH1B!CAXS_LgCyU7Md=vS$Waw`MIc#)E-n!Ztb-cxh4{zpivJ z_7s&4v>;Wp_+05R!;a9?BHyTf0(B_$LqGUyza{c?qg)=bs}RSWXruw#q9tveZ_TGq zYCbU!r>AVLpyq~9uiCI*!x&>RYTRy(`R>nZev;#Hj)>B^qaVFdnoUH zF;M1|$FJ}NE89ZVF!QT^NN9}2U;S4m=exvu=ptB~p8z9z5tQUBC? zMho&=?kXERZY+VD1Y8x0uL^mk@TX^~6z7JG;KBc4-}>U3^UdN{_58Z(9aZDE`XtKv zyCVsGNn4u55^7amJsQV13UwC)YcBw1r!sIu_`6@kd~{&w4Efx;g@M{v8U}mf-qUvW zc4G97fOcU1TuYXd4b8&G&v0N77xPBl&W!nBr|#pV*X{v|cez8oqg*|%7FWp`O~2en z)9d`@%$#oy=E?#>03XGKneFufUpmT{=N8eE5J$aTZa%5BN5tyNX0Z}C6nCLcj*^wJ zHxEyM1q$=UG0!tp*0Z|iWb;GdbEMK*=o_=(l`2@fJEHxla8^A0s@HTX|07duwpuK( zmNa0SpYi9t2WJG=p}A>2%{%Q{>vNxVrND+U^hZymTUo^!-(}Zl>-oc&<=ieMzdrf% zPuxSXh)aL_%-(Bb>EJ5^HQJ5iy12@s)sy(ngmm%a<`J#ds+|&=&qV(KZ(pjK9iwJ$Umeqxts zl@LZj9h=A!ACuHE^Whc4DWYIB`f?qihN$BB7@pE&C!zkxqTF+ygFRvD%9~Jc#eW*cr*E0tfSwGWP20!# zi6X^+0~eO@%X3zm!pp&}%7d3rQCQYPnl`EpgOB)Gx5lLObMdYP>E+h3#^q++2(_i5 zMs?_0R&&(vejUcxr2XJY3ImL8`+deQlZ_{9EGK=xqD7 z?36J;>?#zeUA0+FhYt;s)=`JV5vQU8{4IfHqEde;m3D4*Z#x0QsYv5ux|(Mkr|2;) zP@?`A7%qZ!BlY|G@^a;aH01@j+jRo>4Is5XKJ%_Juz^z(`*D|r!J_5aLT2XMSX>`@ zEcNK{*5<9Cco|$GEwx#VRuXz#iyZ65Wp>Qv9SbMR`zOB=w1S)!wE_3H-r^pwZ;8%J zaPR)4G9vnUZw{TahhOKql2x4wCa{>ZH!YB7Cq~lE$GxD#4~rvR-KYWUw6cKATYqh@ zcV3TvYT$`$1N}LFz%2o;{-dNe=I1?=GFmgo4_#=cVNmz}+%sE~sxJFPh{0xo>l%6u-OA9+(8d@z%vet7ZJ z+@oA=Utx1uMuzks@dUcKn67lFOT%Z-Teb`>i{t$aEW)*o)-B8_AEN2~lL7R3Vq564 zt)gi428J3Z=1wo)rj@EiR`^}lWjcJpd$DwdC)KZ8N7bnTJ(j1t;j8kz#JFnCra2GW zFF-RCDL#jeoQ`4CyL^%pOl5jig5G&0RBb%! z1NTx~mjw8~G5V>i7Z^SYy)c66x_sA^X2Lk~bPM#XS|5hiN^RRK$3QR1u~Q0Dmz@hS zr%Us+nbHJ%`dw)pG}+r!SiQ@qA^$9XiFZZUl+Z`C;{0X}HAWx5ua!qK{vzAYm797D z;H)DjmA+R0#?@XY(v|KZ1l};}eJP=zUedD(cwRtLQF4vFdD_$Y4)#@n4m>>cHS|uZ zS~HpxaDsbwTt-dyM^RsoZCY5?AOhxc54TCC>zd-Gm^zRZf1{2``A6tw9=)uytTm#x zt}q;4LGw7Wh4d;mdb^aR>Sv4@+xrW67O}K*DS}qy2+yLz^+||!=9v}n;i(+Zdmwa2 zE3LXWvg>aS!tUl{R{je-tkv9D0Oz4H8U8D;=kNZyq&zJ=R5H%@84C9p`p{6m16+_- zYyDfaB-KY=SNhPcK9w0fD&Rfwxk{6V!V|7xcyH`2IEk^QT)$8QaU=R6_|rmP(>C~M zsnJjQOsC%hYfXJ^cGI$qwtD=uve;X_AT^CYB9tc`vcr@7guB`Fv?ta3vIe@eGNt`^ z!}q8DV~1C`M#U|Fo$r(+)&KpDCQ)vcDmwHxH+~uk9BshLH)2eBt8S%r<^RW=$Y=Un z`KBn|QNDUkgR@Fc5qg^7W%%-ifuwLZB*}&`2H<}mYMyjOag128GQ@y3(fkJET8!m> z(AKv4h0t;An^wUDo-zEpHXt+G_;q)Rg!iWd*W#4drp1m6Z*H9Xp*LOOj z^a_1*2vgpjvrFHkh}DVECXH!GP(x#N!z|fnRW8k~Jd%GL&`jhA5}HkR_wK6dLL@EA zC(+~bN}VqP{fGS>t5J*Lca<(Mp@&7s!bC>uEP{7I7I~5XX4E|fO-f!@ZWFYdsdS@n z+D7GfgwkrMJx4Hb$3T8TL!Y)~;Ho*)u9dtzqLYTYGae^>6bcW4IaK=FWSW0vC3>@h zS-SZWfy_ZbQ;Wd9s|DsYRbe`?n{f<0xAIGFekW7m5iPw{P1pY{C#6y6wjU}iaxWoE zxgpmT-KX#o^?gb4C2FCjXzO`TgC3*o(%38XgELp@^uO~BWEBKlm&k==*@AmCcq)QM zp$_#L$wMR;*N-5Z+%XE&_LmxYwb`Df(*|3oD*L9))%9XAB8C!b(4(egutko< zoQQ8;A$;{{d2z4dLWb{_s#lBY+4Lf;tNM2{Nn4?Ts~FlORb?cf(~dDbpgwt6aO$ea za{`sJ3ogo*iBqJ~^@{&~-wKmwI?XXrw|aBScF@C>xoklX<;TP#@j%1e zE3IfKPk1y*il4s23I2suE{Y5^kI#GF{-wA?ZfRDI;Qa~ng&eCWE8hy-*Pq`zlULIN z=igy)BgG}m=Jo%&5cIF)tw$xiv2e?G+)(=O;sI;&`|GIBbG0VucY+q9<+Hk>W)_-0 zTRRJ6P<(Oua<=Pu9U0wge%0$cG(*nA8jbCWnDB-wtK&{<+MCD^kco}a z9u6XTy8mtQ@p3GET!{VS$2pRd%?$otbq|Ga4Ps~pGq*zp4#2*y7$1iwZfE2>=&eJ% ze`$4aa3z7^YB6fAZem9P!RS zTf$!`?FgSXiJUTWMc?ifNo7&$b5#!bETJ|pi7lcs2tx15wfpC*oLFgO?C~}ny7HRn z@!H=+c4{h5;oE*+qsNvn7`{YY>0XGNH+?9o1s7%1bY8B2OpsK*)_cYYJh%CVfQ}SK z>QAgkIE_14Sx0uIJ59I7g~XkJPc@PMt(3^&IPr+R4&SY^D}GdCh*sdSWuAwAU2Clz zIq4ujiod2SpMV<6>!YQ&wsFtNixiHkj7PvjDh(G) z z*l2pRdNyxZ-%mPvSn4M$HkQltd+@?<^JRC77@A#qKcCrN%3S>REY&H0S)9gsrSnY+ zuvJc+j98rQ%!{nZ8C<2yMRN7|#xVu@u;cXy{H?-NeC|5BV%?C3tv*kd zXB{}|WJTUr@eTbuSc3o7*`$R;x8mVjbHw(5Ci?z*Klw^jCl24VM0$_BFMq}2yk+t; z`MOnSww~fow&U&S*}}qn{`phdl3{TBanrd~`B-xFu1as)ddtVZcS+wBZH$#|X7Z=p z>SotjDU|Kj~y8CiiHam~Q&5}|x1Ng4wOx$0ME25Yq@EEO@M zUFZl-br(-t!ht(ma$j?m$Y0=|(R{7sEc>^lKJU9t9rEdThrh2rD_ieLqQEk{X?y1l zTAbxdI#jL#&xq^H(SB9+URi(f+|1&lOr#&?@T=j_q940>1rp|fFaG!}Py_Vf_A0Hx zmGS&elt?>JI6~h4Fp|D+8fA8ySD9vn?-t%azRC7p7P?Q<6!z{?O7F3EungPhrel0^ zbftXe7P*#}T+N65`=-;dS=R81K{T^iMP}=f{Gv`z`M1whscN#0RS3?_uN1S>MhT2d zVqBWV$trsL-2R+9j76U_XGFa<_WZ9|9o?#(jhN7LH-G-nkT713U8(67$@$DF{etMn ztbBT~Wpn;83i~mQStl=KR?t7(Va!tz4(ij2Upj}$Zu1hQ^O!Ew(|R#sZh7wIAL2Ya z>JO%!7oWy86Sjpj#AS>2qSqF8QfrLbVmqgJQF3lSt;`5ZF7VS^ChjU|T>5>V+f*z- zkH;1x)W2Nax+K3m*q-|T_kq5CNuYuY%JBTN>6CofUJqHei9%!Q$fvQsJkw)@oD%(m z9?fbJfy-5dNR*dcMn)e?%y-`XPF&zw4s%L>p(g4Zp#F5s_9Fv(qbq3 zITtgFlq{&L-`|}5iJF8B?Yt7wB&-dnPu?tG-1Fr;A`Q`ESYiBbWkh@F7tog(%%Wh?TsH1fM z-j)}%^J2eaAMosLOLN)RI0JYfjEtqMf3885HZL@8*Tep(dxmjmn`fwvO#*WxAJy%;n7NO zs&;j(sr1~~nsYX;(hsHFX7Gr@E>Ua8T>7uUJD$17R^M?sTr7R!pm)9yDtdeAzX= zBpqMXzyw#As&2Oj9OKeK8Qj6Ah-vk-8Cwh(s5hwLC_i>}&>h=kD83<|0~=+Syg)bi z9F!aP`;p`Ifi$f05Vqsr^4i)IuKmvic)eB5X<({#SGAyLO*|+)-i6bo;i;T|#wp|0 zfmk+QRpJ9xZ*sLZ#R+5AQ1jCLRaL&RAyNDYt|Di9cB50popoRq-S=4`cm8@Ji=>aB z{G;(4q?|GeL*&;lu5wEL2fTlIS?c3nn4Rx=$%sQv^xofxPFJmLcIg`|v!}*#$8%N; z45xGFlEkp!>Z%__;RrYJ?)*M3x8^vGr7mUkc%;ofbXI%5cL4hx%rxS8CVeR~f`F~` z@y&T|;xdnb0lGg<1IT^pAbRe&g?0?6!8cY}7$-KiU@Jd2r9G%Zxy{sTZLmZ=iqrMZs2VpF zZk(cOdWKWR&f;vB*nP7nKg-nG7AwoZ8#-#`s^s^#mTJpgyOtH*yAD z4jas#k(CsdiuWD2a6%7fcQXxQrZ`2WQ+vcYot*fmU`Cm392Ce-bS`OlGf zUwLU@B0*az{iHRU86p+;k3CUcrcbcqOOHz7zN$^&*~wz^bRSyfeS?MtZzrYKW>>Bw z`j5(C`{*duKU6u>j{}~37q5>rG^d3&qh*Veac|5Y+WrdvE@B86MyLyZH++=*({2RU z9J7H3{t1xi6|Ld!rS$MeQ~mPiYBaXKgZV71Hvg+1N+lmw$hDxt6roTTuJ{%X!i_20;v-g4(d z7jyO@A3A$w3_;)NgY%swaH)*j{aSmyBT@qwp5>SXXZHtF~3KR!d=BEeK+KW0t#ZB)Y)2Q$HgR)hlM})rAMfXKCb({l3dvW2# zHZ*5z6%#+lNv?&|d*}+w6VjhZ;2Z8?_VES>m>jrQZVPV8bWt1V8wgR zW3m`J*?aJ$eFC}-ds}^!uW&71&!}=bcnf>!1_*_9SZArU@BErJI@X01e=g6zhm;qI zbMQ1##~OOtxHs+#eVbN-u|^`J$6!&fUOQFq1kaOqEI4k3zg)4_0bmcFG4rDZr|+1> z(P!;UM~{UJJ`p%Z^!hKJcr$@(Z@A66MJ9vyX#AOCJb2g&4z@flZ<%TI=yfdN|3%Km zBJey;Vsxb-I%eAg`vllS*BPSv(AG--$@W)-8S^$uW~5Y;O7DdHw~WeHz0HFjJDd13 z%c)&eEgQL$2JjpAWu)}&)Nx_t)@w6?D}~bA;B#*rmsxw&Q4!s4A-y|a8@2G5N3|Oz zE4td74@Yhn<9wUZx*2$eCiYF4_NO|qKbkF0Zh;5*&T6gtUy7Cay6FV(BcSnde6A+6 z3zc|x3i@`jEb(iENaE8HxF#8UTmi%LxL3{hjC@W6+{C@X3sa#J`C9!cgyUh=6E$sn z@tuCdy#qvP6XQ+=#^~-#A4>GRC?;`_We0cOy(*A_%RFc67BS{v3_r9PV(uEUS^%5i zKPwWBgA0}Oqa@c5S!7#92^>=1N9jm@vTit?*m#0*oYeIDYGIXAo*LgiO{fKPXtC?m zpk=Qnk7kFEOdY`zX`c&aQ<$HKtn6EbT3y=m&s5oc9gotd3)&?!asm^2&{*AiEqiGjx!R*H zBKh!2fjK~bM5ZXusr)MNRe$bSg%)L%kkLaj7?}dJ(prXpCx!d%bMLEOqNDd8ic<$J z3+Ob4&k}nhyOZU$ddlB(&xEElq3tpno7JAJ`#7=cYjCenno#v<4eK}Dr)RF{vih&E z{ac05gVcUd4~93@pa;adyEfP_ehu$W9K>JdUt;J6Z^e)B#EJ{0i`NSc+6uaRn?&Bh zN=HDyfyc@#4`;$Nv)ku>ylI<@T2JVOpYRyP%_Est6<0GfnkX}3Vd~YH1x#o+4V)}b zCDx)p3AT#Awcj2X*qNlo<9dSLw{Tqo4jZLSTjubxre-*!QlV?j6Zb{&- ziTs5vCye7dMfXzcwi9Si(f-_gtX*XvSjKdeXT9ngwg1J-U3Qa|_E+`Juhv>?K^JP8hkZT^e5VP6FNv31wv+OSC(X&S za{6J;%r1mFnnxp6L>h7NZS(XX@*x5LqAwcVO<@wlx6!Ok-YR>6CXW;Fw#s)id?(Jq z4xm}9OQ_E@KmPaDZ0>wU0*|=kur$R-gsh0B&o9r7n-tPNe>jqd3#z7-#+p3+tkPl< z-jB;z9plVdBc;mHfTaRjk_L`zDJs`3YvR4Un=Cvb{v<5pt-ZRl;`cA}JJYx$iv_Yi zH3yV6bd?<2><}e&=r4A@YmPi+f1ansoMK+pJp}d`TN`2ZX$UJXw0?OOtNZ}GBEdJ( ze}}XFY~cznHDoPU?)#pRuTXp~?60&0puv-B(|+6ssdNMMi9lbe@#30PH*$J5P8Mr_ zn1RW1+1K(ip~_Bh=nJvjDT}^_-xU)V{8Ji?RsP}e>#&%!F36ldKaP-vP^nLKgg!Ka z;Y$^6kn*y1iY}PEjYP1M#_?pn;z_$Fr)-~S9bn^Vg#V*e9yvw-9LToJe zYO}!nH-6J-H);1h89Au5ud)Z)>JjSAN>t>wgnWrAHEAN?u`s8Tm1g4x+sm^5j%abM z{ujD8$X9&-v4WKsK<^UPUS^IxBW~oq8B-wuj*ZDPV>noiH& z5&Sdbz4_hEY%X7_5%045EE=6_q~2SeUK2uo$0jgpQ(nb0T5)Vz`$5<6Zy!_+qcR1F zypV>MNGD_-rgr}TbsW}IaREJZ@nPkMu7x<8D#N6Cy{JVcXUQ+KEqE3WZ@Rf5IBbkShI1j6EmOQw(09E!XcUFReo}V9wVmjN=W3PxFSq z6?Mfstv5PCkDjO4a#wi)MRD|}fjY)0;Ga36N)1zG7VpFwDthV)|EN5W;hP0~3!$fx zoh(!sNi!?pdZh|D-*z(_8G+8o9%=W}<#|2~+#zz$EuhV(x|r&I2=!9;JE(gd)ZdHY z%NbuK@b&-WIS_c8|D#v^mRbDD>mr>S_&^M9?#pXm!e(!qCa$bGB0o&($nJ;yxk}R~ zd}ii#^6YzxUl-fNvnJ1_%RNHmNdIag=65jlI@eW1WWN9|PU7dg9`f2&{iVa;z5Jq7 zN1ifmDPMEAA$m*+kdNCZK2|G<0wv@ zOG`bx9ed7YM{BN~66tdJ|3%z&xb^%+{}w7FX{nT*v?!xK_q>Zp_Lh(xGO~%R5K<(g z5>b>WEvxaq=iM-hkP*ICDmyDXBmBRUS`_q&Cv1=oyXMomV8)!FXWmHhR}gcAacHeV&@#e`y71;#|92Uom>z8 zq@5Z_!)C$C!6jI_EEQ^IgtPD|%h2$=57HRqyAzC5{%<*-`7#TptY3mlx6Q#L!@oj( zaXViBCmLyd@(cfLc>l5*2M;h)o^3n{yLKt;&iyLHqyO*l61i)Y&);dq35bG?wh(2hFslN&bygZv%>4CU|^;$R2I* zTi|&4_10e4KQ5EqecM&Kp?eiq=61*26>oU=MG<`HcSp&o*+d?F?+dTi4u>sh0liP^ zv11$ZMT~Lm$&py_JB9XTw}Orjnlnv3AMC4tDKFD}9*%bErtEKf1_nlJDcdauV&aVx zAjT!;dT;G()i3%!!8xcu|JHIN%dQ^;Yi+GTGvPgN*X}u6HWRQct_p_Pcfw}z*OA^^ z>egul#5cRb8y`3ihkTtI<}bL-YnY+y%iJ5a;6I|qgKNi380GzlA86u+%yAaKIaU{q z4yR-I>?YEif8O|f+ybbvHC8%rPvm=R{o(ZDN?4F#&P09`weE@!^ZFp=m6R8HmU3bn ze>cb&XJj(Y9AEG~<|iO_#uRKjtOL+k*aC~j;F#lw9gdn7oX`Ho)5=0&;;WI0_oSEX zTc4+l-RaL8e|rjD``2Q7p9dm`SYBukiDJe?oF8ij<07Ze*u=d(RL!)-6pzD@5OhXm zNqg$Qe>olc)s0XMc=8YnoZ@MHsy&+Z91gG6sNv3#ZM=(X9;oF#6*1s${D$l&FG2tI z-KA5rQurvVmw5YnD%+tNh;V;V1kEnT-klUu!c^O|O`a7uRxvSOKmK|hst`VOdUy+D4`Ybg90 zwh^y-(Z1T1(eU+QxJu-a>-#)8X3ZFkZcck>dQ5}U8Y`fQb~-=%X)m^-dy}@(x>fQ1 zQ`Udthdx{u*8?YcKBE}%?&?W6aQqr>--P%ua6EgmHUW#456=6k%7L^pZFoLejm|&V z%sZ`oAp32u=e}i+AVdEwP#xg(q(<_x1E=_eruE`E_~c9!e?Rsb&SuFh^1v5Z7rPsN zo@l33eJiBez5*>~*eg>f7IXh~x6s67I4-zubllcTz^AQ> zSnb{5JipE4#fBwv?dKc8=U6BF@be_>AAdro+Qg!h?yNRC5VcpvpumrVX@|I&>*2RU zz*F}CoH25Tk{$8TF2q#ny*!rtEbPVyMl8npeNN$=szy>+k+mY?SNEeSIG@TBwZlY= zC^vA>yi2^9|9x1WRKgxjY0EE6x{IUd9#d(zno0Qap6@C?4aO-mVVRwmYSz?S5I3j> z53cBX~_NY8U`<*1IJ~p25PrWIO7lIJa{s2> zJMkCOI@6oaUTO{^zX?NGn<5vfXn85j4@`!nhf}c3VI|%@dk(*kv4iay75rgdA-H|M zsB%i)3{(${<`pM(kK`|Y4S{^`QdmR#icPLQ2LA0&!Pf=t@Z9)$@NA&^o_$bOuAgd|vgfkJRqXLQaptiwpOl|16nm-->y4F;kwr z(N&sWZ6*zOd4!45UWC;hm5?egbPJg%Szh~)=NL?TR9PeX$)DI`O*IyA#$Rv*PoA_F zeptU}>GQ|pz^S_!epC$wI-O&SXB&LzO+Q*G+gY7Nbf1; zQTc9>lN)F7Ro-;BrQBhDA_nej3{<0h&*(!i{I-^~!N7^)dY<(GEu8m#32cp=jO!l( z5JvG~2V3A$pT^3c`RaL;_cSNpn3z|Z$A)?&ei0nce;GGp7B6Ok)0|&yU*{*BcuTeD zcfF`-y!kSoI4w2T`E>@foV}mv*3$mPQ_iahr{Qft-v*q*;s#jni9pDyV=MMH85MNJMov1YJHLy>4Ixa^DmZ}bjEpNnrUSF=yU z+exY`5qTk7tOdScX|DG>pvK1)njT@zP3FlT3OuUmjcS17TZgH8$*$ioZ`H7|}OlZHEFS~)Y zgC@+n&e-JCEbMZ1I8?i4hKIwf`a(d9(?g;j|d8lnn;0GdX}2DQI-}DEyf66tBeG zg)QxuVc+$RaIHfhS>*H26Gu>BMys(4DJN)tzNkUkt_}F@b|7(}{Md z7q!-UWcD&2iC@bs8Br|XnRvx8*37zhvG^wobz`k(6g1~zM+3qq3{Fa3Z*mn_!v zLW;9;+Vu^qb5w_*>Q|)EoKbqE2V35*gQGYFy6tr(O&o!>hr=6qDC(+$SYd-lw#DA?h2SiF7u*ztCf&TW0zXd}kG5?uLP%8! z#2UX8d)$YM)G$$8!XqLm=g8BxI zsEJjhx-UkGrR(xr$sz}V@LlpdzeUtDq#wP)1{jUR==k-Z-=w)hnn`YQD~D06xmLTQ z*nU?TSKIU+?*|$IX<}Mm@kDqR#upVpn4^KfETqp+2Gu`g0<$RAoOCggr^BelZ4|0; z=wzM;#JRAu(wY(Pt0-o9D>ouf$>RI}zTEaO4GOqzuB^bjN;f!|+Hucl)ruDUQsAuBvse}BD;D)_=OU)Mid@<-h|Vmcxh5@xEGdkO@9*h;xb=Xe=rvAz#R z$fc?Aobrmt4Vnuz69eHulYCS*Z3p^H!dwSVG2{Zb@-Oxfm`=Lr8I~{ARUFbBxWJJWW{LKMwsqy;PBgV{LxWelN{)adkZ4=-;xaaekrWG&IOLO1gG3()F>?PFX@j zGl4PRGlp{EWxA_(MAC)I{!0dU*hL%vYF}qTYg&rAN4t7crKoi(BhO5_)0b{OkLC23 zLV8E;ad9(m9l4Mh9kzGfu|(oS3sm`pu??{!O{@?u0eKHj?;~=Pkx!7x8zNyP6P{@0 zMxZ<$fkJatc%FnWe|y2MKgW5Gi<%N?T`^A*`Fr{-4=8!%j`}xaWU38z>DWFb-yl3V z5;u`H*GCZ(q2nnQ>|;t1&22aeo*rU0NqA54Nb1tsuib@?2lr{2jQT!&idQ`h4fT{d zZ|{KQ<4Hd|q0k1xE6mV+%nASC_S8b0Ft`z0eXl#c_iPj%)$r$a82j`RFN>Q7qBlYP zpaw6q#gL*7(MRW+N@d1b5CYiA2(+ zaAxRPfoasYn$E2!KNmiYc~6=RRAaL1q$HuWU}02eAWp~ZBPon@9Z=pv3tJc5oN^ne z|0Sozw36=YoFq>%gf*|(!HFM`uvGNAxP`?8$?*Jr^q3fe0{5Ke1i`$6qZHCQN>dFD z9Ft(iNLPTxm@>kd$1o$4b8ovw62%`yFT%QZHBQ`-g_j=d@a5VTxT^bTOz$yO_#OFu zcwe9%7AL#}iUX(qgF>1?9yz2FJR97LdJ&db&Z6*G%RA8N`2`idcSDbgcwbK3e}=fp zi^J+93~gz^vbNvBmPtNPHtQ{v!Z}PztdXhy_~3Odq~21J(EpM_a5_K!V+xYLkPbfE zgEq$#QSBq0hgcL%x~?gvAEot$pY{rGNgSyTj-i`D)RcX!pWr)o{c=1fABBA&gpoF4 zndkPBF31%AnMKF{RH8;j-Se&cWx?rZJcnJmoDoHhT$`j^bwLrmuD|M2qD9rP~?X2ta}Ta(EVgb zVd}&IRM!WDpA$VK8- z)VEPlJQWcip)H+*E{a|wJ8>vq^mU-iVh2wB0fl;!*m}Q)s_Cx)g>))?3!MonwGd9) z2hR-cA(^*K1mQgnmmXq^=FWk-up^9eN2wfH&C{k?O4O5uZHd+>d_MUd;oBPW?ec|v zNIITX9cT-r6vlG2~SP^4bck`{z+x;)?fUeyh^!>dRR<& zSF^-iX13r0I^Fd{@=rix;I?gPAK(C5NAt)QY5u66i0EPdo6Y$FRq|3y7ZoY1es*xDP`7Xwt9v_y2weA^!G%{ZrC3gKb^+ zlIMB1!l!GG;!mBU@~7?!J9hg9v*;HL-;AgSI;pL)(JvDo`Xlbx9}Y`Hr?T5?nsWZ% zD3<5F7mi%ni+@iUL1&xKe6_6x?k$qQz4#qYnKK#>E`5(u>n=ECQ?YAPtzVcnB^MIb znb10yZ}893{{q^;Y{cDv{wr4llgZXdOaKs;F-a5%Z%hrT^uBf4IBA_lQwwV zb0a?axdJ}irM;wocE=Zn2Wd}M1$!S-LEVxRi0^(-6)pe88>HO*vw|S+i9TGdZI4Sc z_rXo=iTqdgY*qZXSeRUShxcrBo*zzW!-nnY3uY&`@nydwaiM<=(DkdD^mfPoQBi#M z8W&vN_CBQ1USyp|T1(!a6VWgy2AeD^#Fs@=;bq)@e2o#XtD6?sytZPR$y;FPfJ=PG z5^oVJ81W$&vIwuX+-#>fUb+h!8*Z}PvM45ES^WHmye6#=cD`(bZf><~|1l?4S920s z@=BS12#3$>T$SYoEzoF#>m)b_Tmdn@x*A(JMf(C&?_S6?qIDr5&y*!j zZzfw^%!iubo!l?c@u5QVl{ zd@NtaUp4o|(I;0yP5)*P9vaD4G~FS0IB^E1+Kz(7YBS{QU*^2+ojho6o5_zw59jom z62*~T^8>EM&B3R+iIB6YHS^Jk$Mp8SF>Ad8UNN@;{T4~E^6Mx`FVI$^IYHOxH4yUR zJp^7J0+f^RK4CMP)t22TH&L8IG!>D1 zx5nP1y&mGBO6L=A^r(a%9M%~rPxHF=hy$uKX8XeoxB0{a)icj?pCD0O(5<2~@4x;L z(`Zr#UuOT~$v?G}m^M9Czmm157? zD8B4Kat^+WgVM8H61(kN#j(9GedOAI{UqtH!4uwgDA0Wrr93iM zUDPWr-W-9YTD7VI=PQh!$2Pr7Lru$2W;b_=$Q6F;+I>!V$Qoq_^9_+bDW~p28 zKk}**C(zi>7fZY{d0C5C-nad8_IhbRiL)htglTR~lB)yzwqWau7m+k$N&kVF@;k<(xf-7XYem@w(O)x<5Dp0ZU-E>lJ27? zT&5lO@Zm>SEdK2zZJxXs)o+JG2K6SN?`VS-RfFK&;}H5>2c=8VJb3eIBs8DXjm|2d zeLlAhWCPAMg~ekILHN)X(yePggv}qI(~;$P`|%hsv$%?9{w>IRU{MH`tIjil12jKM z_EJmfN~JYWZDLII8AhK8nU&vNXU$hQ;U!LsXab|ppGJB=-s#9!6tNWZz1Ou6Cf?hR zFX?RMe?|I?pVpO#V?;gVUHKM>AU`MDPRdjK(QvTmB9|Er=tC}eQ>HY6(ZVh7xf1Yk9Wb- zBQ5o`=jZZ_ZY5$ghIDip1;>(W4(6QSo(3%$lZf=X<)_i>s*Y9B3 zQXx!4;wk3%PM4Kz&LU2V<(J2&QvNnpUiWCJC?~ICe1HYMxZfI2HBMvd&SUYyOqq9| zTt@5dy28xzi)_MhbISWknn&}x&7>>xke?32#@7AmzHt?vZQ)P6-yA{;J1cV%Hh{Hj01~Gv4yzx? zrvn$Euamx%?)6uu+`+SN+A5RH@A2cwgJk8^HQcuaVC2edmbd1itAEK^ethF5Xg%;2 z_I|*qx8V*0JKw-~vufs@P5WCnwrjwUum=^~xWoWTwH9Hw*c(V3A?gO;`bi+}R(x+v z5V}p$JCjCn48yVf3tYUj4u}WjpPq@#{e3%pXSx;sy6)x5-^P?{4iqbM=~%k9bp78U z)r@VIar?&@PJE8GKMvrwsyOsbHBjD4!-TF8`G9cU7XQqOMaS)TR6^qtKSI9VCNOf3 zBJ7zDFaAse-5b3n;vAXgSu$BZUg%5tmqQI!J-6YM_n^`52B#Q-tKMG-O)@9F`HK^0 zAZb2^}>J?IT<9%Khn_ zatI4ITp|q4gQ~VCi6@%j>A+g5jSrkS8Jq2>;}jFxhr5yVZ7cODNyoYuFA!Xe>-xJA zcYfi-`2z1r*U>qBBblhcfz+qQIiW0|E)E@lQj&3ruF9{hE5@y zk>*FCx?(~vZ$E2{$xjC(+nC9>1k6Y0lu{=A2aTQ6ynveCPN6%|G~_n?%6^FzHNSv# zx+2C-*GB8y^Re;U^NjLIA#Fq2$X(LNLg(cV)w=%(1bjtAw{P{`Gd;gi2duoi~5r7uav#4N31c;HaMAHzXg!4@f7m zpVL(0wqv+4Aq&g9jO0|qxLEBe6EPU2>M3#!+nl{XXTtoaa&D%s{C4w0F-O%4=o|o- zAxPRv*){GekT+q(Mf~qQUnHId(qziL!ht|u4s!C%Xg}U~MtDbF$&)SmIGbItb|sv8 z287?VZgf>cK0m*F7&WgBRhB$5g8-j}Ky`yPwBG;4$T%P$g0T<3vY`9L!lS@Mg9AAG ziwf2u1FAh$M(WuHjYK*h`rV2q9i0Ftbb{T8hq$5XG`v@M83@;r^r1xWhl(nc_=EQI zIad$6XBCSYf)1?%A?(m&74bMDor!4&T$S+j|4`M_S;PnCwmJnn?(7iw&bL_{W?R0! zhJAxLJJKkMSsNzf;)_+}B{u=pG$%}>xp#*N%brqyA`i(!2wx1OHxWCOvCZ|?vhYEh zT~at*kBYET_)5CRBO9V_xj?F$hf<}p1m-0C6Z%a#aOni6+yvrJ;@7S`_EQGlc~=JH zmn8DtFlloI`R@gIEV@kolqciN7R9iv_Z~WTAQ*mKPvA6{D7dV+;1t~K{|HH^pmX=j z_~^j~!ClaG!c3Vom`d;h=@Za1TmjJ&vVlA^BwG&U#AQhN!bH7HmYYk0(|?=N$UqmQ-}JSDdgF4VZWnT z-K{e;OFTvMqOExRiNhH~N@UOK-GtNSJa)Amj5wXc2>X~%<}=Q20&q&RDwR z9g@!nbLU1_w(THka&5)HDqi+Y-NWL$o1?WMot?pk0{ISpF6IT_a`OnkzPK7mx#&?pe_n=TEh7=&7_CTg}-<$9y|&I_KRe5`>@8r#|w_ zj?o}Id)qs%{AufVNc`7Oca#U*HN9sZ)eQ#lj;xl}Sy2B3!Y=N{<(nfQB;*)$&)P1L z|0SI;5(XIGVHx9B!^yCGI`czKAv9Tb{J@>jzHy1Z(RpNENut_`gJQgYD2gzrEN0B*wzt;mO zrfg{0c^G-{4}ZG0J-&{jz3x_Kt5z%+%fIgIfuyUMYx+5ss?itTUejUyoGRJTQM)l` zzNh$$0nNQavEx)zFroMmMm*of zj-Gzd;0*}J72*^a*Y6V(y^vP(&4Kg~k}hZDRbc<bJo0jienm+(hy-3gwf~>L7f5MNJSRZk57o?qhqMdM5fpyB0K(YzFzG zh;zumBrKhCjA~~o`}Q{w2Dj8yEpWDm7x!aP;2U|A1~2z9zzxWMBU8GG`6aEVtB{va zZ8*19)S~FUu?fp&fN9|vpj)YU*R;b>JGmbUt@`@MF4g-4O=-o6yExu00jYN&coj%D zqI`6hz*LcEUcLUBN+pyh41-2ge30ncurfpQGp%$yBGf zRKEhquXF7JRLQd=DBmw*!jnoD^6%XK+(R6w9*UQ5*s;p}_CR`oG?kG=n1jKW0@75zeS0e8WWl4eX9pvY=@fnM+MCPWm3GmgG0~w|h~qOnPSX%2nj;Po4IX z=wCiaQed}@Cz7wBzNb0$6UPDR34FD94<4)0QLJ6x3#~%e2Y4gK7%s#!ARgkEe1FrN z=_wNr#Gsh7yREb&$|adRBNA>=5BC;O?SmXX4!%9eLh1>!lC~o*=t+v6FVz_%>|g?a zsi!J>UwnMf5mwmmB5tlf&iYR<#88Kc ziv)IwIe=rgN3g1m%WzC|8Wa71@6$85=zRzeOS+B;3?R;gMXUUjIqtm$*2}_6DUej4bu{NhE!h|as4`Z zvvuIHa2orebP^s}7PuCuMfbpyH>Q5HEb5*5RibA={UHaSyyL5P3wOLOuW; zR!>mGJrLqY+!GAkHz(ozv{<#}lBtOQ+GrLGjA3ftcybVMhE?+mQ1MW)*cHMlsvv{0&O=zR^kJ!xodw<8eO`*`?U@lY+_E5I&`%jKd z7z{o)lKHtxI`i-M0bB~y|JivKX*}Sno-VE#rWd!u0cmkCyJ9&z{zGQIr~bns9dp>* zkk)u%%1^#%9<9jho1#R^`n1`R&Km>HlE0ULKZTOhhrp2Q~TZ z^4l$VKs`jRTC^9H12IrOv>bfzoyaNbQ-wj!VK9D*o-`z`iSl#JO)SXm1{2H|Lc!Pl zpzXUD2K2b5GOP>66#M!7!hveq3xj-BtTjHnvX7pd1v8)9Dbc?+vX?ibDc&d8?vx4$ zQQgC=(tE5rsUKe29fHSZ+){0L)eQZ*edYI>MBwYbnRMUSJeb>dDvqcfiDTq9Tuvm- zUe`{#9e*26zpmva8i!!q4#0fccRDxK4gzRzZ=V~Tm4*9POOxKaW3Yt;0oK)Edj0?# zKP3Xf>i1w{xACxh&j6)w4-L5T{5sGzfUUO#_U@fwP{ef+Z&+ZmiACMbVm-DSNTz=` zvGR)@aPtQ}T=gK7WtDnE#`+d;=GJ{xe`9AUJu2C??~m4CyXP2>d)9+%@4pYckFhGV zu{*vkRF^0=;9^Sm;Ly6rcLAF)UdqPPgKU7}&OU`aLwomRh|7P*XFgBMqsREUIUaD^ zq=Qnhw~_@6b;7i53ovli2u6RCF74J*>HU0!wq?y>{^xEif3q6&{SnJ4KcG%G*X67G zVK)Ayf~W557gT)g1L?Q)B+5Nye9=3;jLyZYkF8-Hrrv~;tdnwV^=laQCrtIO`vKPf z({vco__b_XwiEl0Hpjz}=lHUrYvqjU_n=k`Xq>bWZCpA^nmX1v>W{9{H+T?pvkzu_ zb4;Y_s*^muM*w^A%3iwkb%v4}ktDb4n}driLzr9S6sc~LwnB4-chZ}I`$IMG+%`m7 z;I@kS>#M<*fstTSPwN|8;=p3mHB7%1A>B~g;Zg0IJa=J#mF9Ifn7R22mhHa@FV6eQ zd3&Sy?_o#fUw*EVmGe5J9OeG|9hDv}&m+wx)q5s(e6<0se1`MGkv*jkPY&?OUVRl^ z2M-kaz0)iWvTHv<)G&2^v1w349(x-uko@nMNR-FeO}CI$p5McM4{I;owLHedQ}$xF z0bS6lSrdt3&FXtK6}8QKJ5ECFjeV7(BeC%J%RJ2RILH#O=kP3e}3UTu1^`Pl=pB@A#~V4}g==53=|H9?AvL@{I+y`GA( zDTEK;P%;iQelL)zb{W-(q_p?KYi_iLk?=;-vjw=F%LK!Im-&$v+RBeTukcgAHDp%B zu>5voPPK&H9b3{K9rGv;>UquVV5Z+B8k_g*j~@$%u+y(;Uw}QY*u33!c%(%rnygv{ zlM7lZbggucU|Utql_~P|;=k~AV0U=A&w=i#U^w3)mVecx{YoCJ;U!0A0@aza?_CL< zCo>kB4Bd@US(Wmgo(SRHFTmQQqe$}rOII~wgu_@HZVrt;?-H-)S+&P`8&yx$q{|-! zW-@SL*icxe=)SqXxwt(w#zh&$%0qa!tUVSoEVn&x)b{MUWo}A!{Nsl z+VjS1Ka84I17FuP#w^=N2pD>rS#Dj+ViQUmu7hwyCVYmZxjIU)PbWTiqbU@RvjEGf zjlpWsED#vla#aRX&+H`ThOIx(r1gIj(Q$GC5FSW*-@oS(yb}mdi;=zsvw`I?b z9wT8J+y^&>^pA@07KW|Qh23Qt_-%QaEcAfmWOo!f;S#xz&HtRmDd!o@9j#G&&oq9& z6Pkw8d=qAk1+L`)bUoM?Pa$C-4r+WDOLgLb;=rhG;Dec_LcD`Jy!R1rY9R3ublarQ zwvRBtEwApeA79$SiRup6G2{rutCy)dsQxi$zrG@$IdK~dO?%0S>!3W8$y2@-u`}Oo zRHeJiT#0Lx)wf5&`*qb&FuslroA8SF?6nu~pVgE6ed+r?%i+-U^84}k+p2Sk zUGa>Q0YsG8p~$8GT-9P$&9;f_hgef{)#|O=0s7M(XGdBKERwYq8^W+g z5PxnfCU>rZp|^5q4|7u~*7P9WS<+GHE{e646j74Kh7JnFfQy^J#OQ!xd2$$zXzWiu z$6D!UKNvryoJCO|52Fj9OG!BG52PmP%bKJK)xD`l#CO%LQ+VTz%6@odkpf9BlavNjqWPfVL2w9}6QAw>RL>jNWPJ(SJI> z+X~+R}8J&(K6ah4{vi@a!8q zq}^Ca9-_hzPt0)K$xh!o>Ok}3%|{9nA{e#4KIM^i2ydOqN~*A!Eg4tYzq+X z;e`Sxs<&`_GxahMcf6~Jvx?a3qdKaL9dY1ML{L!V=7}a&_ zT#UNgjZvNR*+IsN<*RdYZP%yx?VY6}JPFkciuwIm?~DcAHZq|RvJLxT^0xLuQ!&za z=qg3Qg(<6XX!8UleGT-!u*=00P6jroIn@z0k2GF|bQNCt?Z@)=8Yuz?Ru66mr6*os z*B2cX;vFPSjD5;4b8lBk<>uS$a>lO6?zZ+Z2hyqx{~ki+SVI`5=$R_{XsvPCk6gNJB8|Cr2AJzcM8g zeA*a5$%^j=+{Nb3h5Tgh3I%tdHZSkB0!!4Jpn zd{OclC@DUREG8NPSKYuMo`<`#jNttHX)rdioaNkq$jjAsz``Y(N~4&)A6hNS7` z+LFz9{@xWvIm9oXILKljJ2BJg)A`ig2PiO{{14>6oPwnb)?r=Q2)EmhWIpfuR4`C` z0^{bqZSW0xv|j{agsNbCAii2>0vXi-^gV$EkOqOGKGRrqerwYFx(L^bQDc7uT5he! zubRDJ@aPv%NqgrR`?ppC?X>x~<-;X07uP;FQ7Gq`>0jF4rt@V+K1Kf2ZVQ&szQGf7 zTS!Uv?eO!SL(DvB2u#e~g{_^PiGL4KoT^~d78yx{;GE22boz1vMZ8ALp+4ln7Rvhb z99y5%lzI$H#V+BApno-;h4%H~172$>ReBR~;hpI~oB|gYoB<2FHIQ^}BzNf2Lb5h% zip!%9gC`4Vzz_04l%GlbZHFo_YEnge`&4nEZ%C7nuBrjE4Zkq-=x)50Z3WpQPtsb5 zIJO|ZyY#!IOf}mTCe6}U0zX;_426yZU6rtd6NGn_O$QIAv!j?oSglm}??#VmjRt-j z_FbEOHX9~=_C5z2P9`y~l>s3g3Xt?93hfqFm`d^2pnkzC^gU3BLIZ7H=|(V0V|%)!m^>;*=#ol?EkP7?mk5>@R~fMGBj;Aes#A5%6(4PBNOKHK0{hd6&~)t=XOodM(vB*GD->*Dplj^+`!xZZS-N!OSt#4F_Idcv;d z=}106cvIqDXJ+O6haVn0jFZ1(UQt`9H)4xtq81D9kH>o?LO=UbB(6{iJ=~{7cSz`L zrR-gk$;0Xm8gy$^>}70SSIV2kKV-tU2tGCFS4;JKyLQ~x)%BX4y(6K;6kA`CNPz6biK zKGvUMqNi}{^DX+O<%`hRSodN!rghX4ezbwRYUuoF$}iGt^?AfkvbW(oT#>vEX{_=` z7YkV4^Mz`p;ZaV$4ulV?)0S#Zt5EW>5IqV!X=x*knd*?rnhLwC#8J}n2gdu z(om|wfxV=T1M^fuix7`8;!x^0{N$vuRHSK0bN2;Zhx2%~^lG8bisfHKY?p zgWk_cK={f9FVZyt`A&uWE$?r92BQZI!v3GCNu$3Ky%oHa8`O}C^j<*y8nk}WUU*u< zB;y9%*0yRLoS=0FmopFH-!E@qwni}o@7oCiAIY!eKHHhb^(GpzH~*O`e%FnJ_UEK` zByA;y^bxpxFx$+?r$M!aq0pz)Us97SU-iUi{*Q!);Q5<_nfr`xge~(?eYntRvr%ie zw?sV^q*!z634l*3?XBFjo5*8@^e&M9ZMc4iEH{j-f6S;hvGjT`NFE{!od<$1opK7H zL#yp7^39YRvsF4epX4LubU)RvqYb$9?24f@sIes&Zs?}W$sB;BOQ_HGk1uwZ2||Mj zZ;`w3BEQ;tJyP$2w0c{YquL{(*~OEzhjAFi`nPMB@D4!zG4#CPEE6XPKEtet4n$7&oNt=SP-6=+SpI7m*5PetFg5U0R{_pV^{IO?_LOP3Z&jR{lcTO4v zM~)1G7{}Ap>uHXAyr+P8KJ_y>&7~{hFcwv8q`u`2?AmT8o?^93^sGL2_hSY%1LShG zW{k!m5th3O-ljeYlBSU+u07rmd(h5gq?42l&UfJB@(|uc-$o%X#Yk%bJ)cv(NpDtG z@*)3T$dk1G1Hx6-;h+og;Uk%Nm-NST;a^C9gXo`7Ovg8HpN0Q3F>joDa0>O{8nh+# z^*s!Zt>J)Dvircq#)!rdq4+T6U5KpRt6d zV_HenACU;(sMi$2MgLp$I4v4AqIw!23EZN7G7_&tZTLZf1L$`>3P;SL{UGTcYO8(O z4gI$Nt{#&nn12u+7LQuJg6tiwL~j9{a{7oq5Y8HRiN;+kFcPU3Og)n|NIgeRx(q4a z09{e#AM{VsEB4;y?e#Kl%o-FtxA80<^%IWNbu+799G@f(RPj&<2CWZXv&{#J1p%KL8-QvC0mV(26NA{@a8eDV14Xtyt zG0Eb&>PXrv)iE6}HoHd^JZk?02Zgr4va$hebIU|_^m7xa&9qe}4DEnN?H6;Ot7hCb zg~ODWYnXfGW2n720hfNg#cZW&=>AQQH!qz7Z=}Pjo_duKvgai5))|;pyOe7NeU|C( z+}zJf8d(#G@6*k|=js8hx#5IUUj9~L?oQ;ee1AI#;M!Ks>J!pT+opr;R?+rShXpVvOps5vpclp6zP4_~bVo&VZu3nWh z@++e;!m`jn_P%c+&ayfUOnn88nitCxGk$QZb&FtipH52WCyVhR-8Z7;9){08mBPxK zT`0zxDl^M}{Cm=9)YA`zcXkG#Ugv{j?uTHTF?*Q-d}AKgdRVu+Cr@qhOAhkrr3$?5 zDy_{r0=7oRxVumr++u3z-}J=%Z(2xmzz18LgN2&Il%NcKX=BGiy!^ll9!KOtw39KOTKpS3 z4wUIL&#=nBXIatD&Z>BuGMF~@72nvgkNjj^8s-``WBSSYaEaC#CZyhjh*kfA$}*A#c{d!>m;aJkbpLH zpGn9b+q^fkYS^j*3pgHl+I8>UUee@S!`R!=*O>pV40JRf#QG*TqP3!-@J!2}e{2&2 zMG12;{q0TI&~qr-<<~GVmxCN{v)61V%irEr$?q0`MYA>{%?ZmHxf~Wg+QKqd??-K!7pxnc4?$(OOT6 zF$CS4#Hr?_9|rk2)!h~L%=LxVVG~?>BrJoMQ?t48*4`L5ED&<`X-cLRAJAmk0a-I> zDaI6Y{B#P5&$jZA4SIC{Lnh4IT>vF}{{ekIH~bh(Yt?8!vWx;;dGiDoIQ?a>XWnOq zA<0M>!)ApyQ#R$k0IFFG-BJYoP8h?LN8Mna!9WmkTiT?S_Z>D6)}N;P{z3x<{&948 zEL-RHlBCOrWHD#|zLvoJcxpURYX0|`{KhL)~v+G1lwOq z0E!!onXr>;iq2B>-ij}Wwg##-`kpcb&Ru^9*RP%9t6t5)s*PpvEvJ@p#z`uzC}Po0 z(QLJSC?r4IA>znPs~d5P+hOo*v4_fI#&;+;-414!8!$oB5W}W60m319{FYx3dvp&_ zj0BePFXx`%!?R1F^VKm8`IflVp6~poKy92Ji_05@Q}VOq9y{-`Umx}n=7tMg=ha5b zF@3x(WI1ngxjth^B0SKNq>uh^;)jeYM|FqO z+2)dn5k1D#7jD8<#tiN4#^DHmZQkaU3$x3gD_eKmjg8NDkorCQ3(n2x zyuW^SB2Q?n8q%}JarphuF1)Be8;T7AbE4j@K*hWOQ^x3^>%W=2)8yAG_ex(z+>0N) z7lPZM&NBVF&b;`HZ|k~D?l2%lp2SRW_^&t&j?m#>cDBmPTsV()>UjvM_62rG@9)0g z>jwB>KtMF=xsJr7pZ%1NuDSCtkE+zl^Kcf)neJxCi|_-!Z}hT(WgGOsKjH1~&J(sm>^%ng*CYy*9~Qb2p=RfE7^OGrpicCC<&33WHh|*dEKH= zhz3a{nI$uO`@K%T@89tKdOSQlP~CgZdA*+NeeXGC=9gk)fkFJV9$M_N32E9eiBG5c3vq?!bJWgSo5-34)V0&w=W?GjxV zs#bkr;+m=L_rYy?_Dihq(y99$4Ls*#vGZ&cSo~OPF!Sp>g)sr3J}x23j9T0tgFAdhZb#(zeY zK-I`oVtmk0GYP-!w1Lc?o7l3v`cP^=4+T!W>6Z+iUWIJxn0r)brcmjXgX#J6AtA&G zs%Cnykw06i;#$-v&|%sSq&{Mi^WQ>1vTVR*^0>gAoTuBCQX5QJzHO`G-}jR^t2QL`HlnFr8-^k5m7B9)d4EO%nAg z(HfCHohs@Eg0!0Bf!qI}<X=Fx-jXo$Dl2X62FY)|Kxcr3Ok=A3xgBpk z$DwDggQmG9CoL*R+Udwz`8x!^bGO6Ol$vdI)!b|T%7yZmQjYHo*y48(1kMt^B5|v9 zWO+EH*+D#@{b+7>U=^}!+oA64D+1HG zQG82C%GQxh%^yLV1v8;U-WY$F?}gl8bGQ9iFB zgx1rrvM>k!N9Qgy+(o*q?<1l6cvDl(2tSEq=v_Cj7MSfFA>H3Bkw(GFTZdWZxs~W=-5p6YqHFdyCOm@o zI^SCSuH$aX+DsgbJD>LuS{cIJx6?WtqjRQ7Xdu!)iPyyo9dJ zj|)wrF06y-@@y0D+V7(jT(_f2_`wOEaKMv=vf!S4=`dg4JsLd~W5S5Ws^I?9ujm1} zq&tji=RB>AT)Q2AVT2a7B~xmXSu!BN8{IxRCd{!QRJ)sl-)m!`U{NVfPl>^|pl98SSyV znFDmNX-xQZ6iNH? zlZhkv$s)=J&8Gl)S0*$t^$EDGH9?z{NLW}0lx9oyfqp)WY@mqsSB0*uxi*i^TC$ew z-RTbY1~Q-cX*T{@dPZP8bi4GEzrEC#6aPY~wlH5R(;q0e(mjN!zK1CsEpgon!A z%VoHC$Q35=j<6aQyxAck}C49Cbydystb%J{JBX9ls z0V8eA$*Z7JkM@#uH&TfYSis*P%vzU>SE^!!-{8c_vbeW#@3Vn?4ha48`vdLMWqy>j zYirdocf3qBChCD7+nO&CkD%Z%;(5}pM$Tk8LPX-OK22MW5wHU8C-vm%KV0{Q>L7(TdRvm%g`y=J2)#n$+s-3 zE7QGWGo577b4Z+oq+f)u#b!%G;rC=M(Np3+7;!N5UPmwqY_InDH2^6);5{rKfYrP) z==f$ZCu)Yz-PsLk4dvZgX1JikD!$)=_Ez-p6ukkhe$Nq}myu`3)_=cI-4DcJJ)WWa zt47#i+FEYd>l_HJa&k_4wPu^Ys5$kJ={aV%Vv91TjSGC=Js2Lp-U}ZFJr%wU$S=}E z6MeAFz!tLb$Mmyn@hL$+2Sd%gMaCoigF)XTw2M9w>fD~9CCRM z`iGl~F$>=;_>T>8x444Ci|9=9II+&8as)@;F?Zk*Tpm zL_Pz;*Hae8kM4hnCmOth&8_25XmH|rB%V`*PdYs>mlO7i+C^G-;{KgT>x3Hz-zQF7 zEx1{DFkbD>DD$|jNg5c2D`v=Y%Ei-s_}13gEvN~$v-V|_DTrK9Aq^@Ek4$<4v{n^p zD7O7B(EQ4b1vM0x(Pg)mHiA7vcz z{BCq(S>#+6vkvmdubZP+OfX3EGq}jx2#+XR`@u{;hZjiYC<4y-|3@ zL#90-c_Kg#?O98D8E!d*vjrW#)@sT72d_w!U&_Qiq?Lwn;$+I9KLFuA49LqQJz)ru zF4ih}d!#wxe{}`dcSG0xr}4qzVn*vM4?iG_TpWGV{(}K-xeDb_5bk|b)4l$FMmhzB ze<8+7PT@wtR}PeSa~)SP~|qTm0Uzvi1yrm^pX zkKseG`}<0M?n7T}?Hs^w1RUXC4sOM}#&oupQxAM~o^p(ByC7?fEk>r+L;5=Cql8HJ z-7iDr;YC=_JexVP9IVjI!!A{O`OyP<>fUFM*lYWb_;+Kgi?&1PjM;^a)HbmfA>o_K zGwbbx=I#?|f5$Y8>G6TRJuJfv$i=IX(fBL%fP~Oojw;)M#&0fwwdry2dvQ%k+&x1x zzcLw@xSwEjO;~(n2X9hYfs-!2f??xV!;Q32xNLO~HSf`Vo@ZmhUs^uqm!40BlF85E z#gZ#b{A`Dqe2H~Gs<>SHBK-`Dgk!ez&^mt>pBky9TFzMjQq>cD7iEs8x*D)%Uyty% zUl&5nv(xOvnho-#4Z5AS@CNSX9TqrSXxNoP60 z$Q>@<=Ui`RIn7rL6kf_?H`4NfeimEaq7z=e&)EHdx7ht`3;e5n7AE!dz;FHQ${x=L z!QQjyaOSHSVh(uepNG&jw;Y_N{NZk|hl8=zbfE8t)BiPv)+Z(*tqZ@tb1qA3xuG z0Q7INjqAHNCQ(q%T_m z#&-qNYAv}>e{*&6<7M*Qagp$FS|D^@H$tot>Zf_*Q2#qTeA+)gjq9tH>-=Hv8+w+< zsJ`lCTL`-uJwtf^4VDgU1^;Xv@#*^@xYGVMA3rmgrvz<>eI<&bU1|vA8tun*Jd5g}Qu;cg9T*k5$)5M)ae~WjcCV9z{I}0!9FUnHee3Tp8PVQ3RMWU` zN**8mpcn`By3CS`3z6m?o^S9L^#yLT6^Rd3z@+XFcI6PV4}F?|nL|?)gChn@h>-R!n zQQbXC%$oD?0gv!`?f+qW+5`DWSqW^DuXC@Vuld@PTgunXquIulc0;mKzb6h z@1LR*5(a)ATt_98SFmk+wyKS zrh4$!b3NBQSOg6#!tuheg&2KvFKqk@(C0@9(%4v)PBgw>`2;^^FK2%9J%;VushmF9mwF;S<#qn`7ryv4*u&{1)^uOg2va@cITw;)X)C>A;s4zQiEU85l7{1BBUdY2Fe%>~GCnSDjUwH#WeA6^-P|ZMJyFD;=T@ zDxvr0SCVB?H=x?X(Ad#nIANMXy@_M?*GKAidgfp{EoeRb^RF&(Ge$p`8c;x_|~ z`J1hgwTGgp@uXg!_`AE29BW3;?2c{(&rBP_^1?YVXH#D!u3*}gd+_$T-~7TVw!sRVaLUKzJ^wlBWO6sOijVomlc8qnm(F_@N9K*IfWQ`Xt!+T%1LBhs- zT>P|*x$Jhuk^5e<@MD*`$2l|gs(EKlTnb{XqYb>V#{ z=Pr26E8)F2`{0|;){xrsI8IxUgZ+;71JOG+maDPRjzmstj-LW;IPoBqo?68k4ff&C z){mc7=7af*uF!jg%ATIcW(KQCYZ#;f;US)SnIlmT0OX-AVoQHv+naHdI#( z9|sdwt&>8mpK*DZfl57v??!s4-TT;L#JU&6on7$6Aa|Uw>L=g5$QgVyR4`cGLJqj4 zh5L?%@%fniuJV1$D2-KaBnS<3vVI+& zw!Mtsz8V2*yCEKC6^#0g3oa|}{gC*u0&cF>LI;x!7Mz=eoA;)$kKs8I)i=%=ZU~}R z!W>qirp-C@$#YddydMQQy?4R9OYQmN4mzNDSC6>A4+z{pt-PkSoS@0d+lQS>?{d%k z*68r0vD~S3a~!_Q6j#|dMCwmGyUG=z;r!Y)qxk{q8@}@Ud%hw0Cze-a;nIOYxMFxQ zjdd+F>r{Y*@6gizlH|QC5@_t4dI|}P701i>#TqMweavgJA8a$Ql#MmIavbfhP8^9Y z54NF4-_962;vR}x+K}By75(e+{3>ZnJ$9%#Sko-|2~h8$!z5oIt-yb-`>P~;lWW&J zD)*cCI+^$jWTWtlT<(Bve2i4Ou9z!)+;lPRF>Q!9 zXU=1!Teyz#A<1w4eb!RTL>?D^4|a_$13Oz?_OV1??sB*op}<03XB~~H`!*}2(an4l(7d!*mr>Ch42fV;vEE%bGl#NRgmBxM{=rm=c=3uWzyx(X~78+Y9 zS@e4b<6gCZ9f>A9XT@|pT9CpcE+5A#hpXJ9Zz4~fJyt$?AO!UEZvp8BM)S=U)is43 z?X*?WbNsz?Z~1DZnOqiemeI9Xhs_7s9rG`esCUw@eA2(p>e~+a3TaU3sP+@~&sL*c z`P4>UU2zfzHgEvLn)*VA!B*4$z_U6M7wW6evrme(!J<#EHF=LbxXyq)i23Wn)9>&q)0s6W4ItTjz+=Lpfm>%gSpZAE)#FX#E)BJzsOT zKAf)84da$S7W~GElZe+J!&z)>X zqgboK9pi9dfBKBF>MZF&f1IBZfZ=bop63<73Il_W;kBHrNOLWJDe42QowYgD2Pa=86HlR*m8nczp+3HA z1>_H)INzV!CthVEt~iqxbH$a*%30(LE5a&Y;_Xst^WI)SnuvNZ5lJIs()w5~bp4C! z^Z4Y~3DJ*8JkKr|wnF+hC^NF+>rU8m;x6LGofx>zLJeF;dt6v06V}&3;ho+dR-sw4 z7hW|_1c4!6JccSeJd(N6%otl8-o}JxRA(OIy$;3$^&r&5&chc+mH=Uu+*lfn<5~_P zk9831j1l_RTWu2gixCbBeNNoFf%LPEoW0gwV3Hzq8tE75;FD9*j>d19XOIjvrW4^8 z>n`**QhyPDzZ6_YcnZ9M~O3xWl?{o_o@{sHB};PQXdxn ziw5syCh)2HH%14J!|cJqaB6xQX(w+a zox@F+L_q98CKG-s#IaB$o#92g{=BiD9ttgH*})Pu?*ch-KN~WtgV0v|uwDw1HWm1y z3NJtC?O(?R-|umOQ{<6g`rM;}-)ptD`%hz)JUtMuGgjt=v<{;7k-PxTnpp@dBAT4uj!ZNYBH)R7aLL;UY>7Rp^YYSi z8ebl#z|@}}kT)R*Nv{Lt3Zl26?xhAQd1atE&^*#O!i6i7fV_-m_vbqbU0>7&`kgmt zqBr;1wkBR!fTFH!Z}kAGPoCTCz8E9?Flo=N{EEtd5Z^N|1p@f z=w9LDr3GGeMtNoiaY#B2DZU8e`^ksNq@Tb#|2bQ{b2YTu@r5TBj|S>@dggDDOxl?* zyCP~3H5P@Aa2obCS1R#C(p9+Ck&Ve#Cteudp4WKLy`DUZZ!B6 zPq@+;Mdn1lO!OosKCt1wI==L7+)*U{$1aOH$t|@arTjM*YCUro{1MZaYNVP6^!tss zuQpLnW(w>wK;k4gvF0}?9l?AZP6P1JhWK*UVYH12;^g0$^UE_xJ{kWsyaSJS zOpzu3P^A1rCcn)|A2|rzObIf=3sVC`&yl{NXCb!Gp7i{4h~bK)jI~N)}|fU z*AWTQ;v)@Y%KRW^P<_6sI-0mMfRl%ze92cKp5wb(+o`Lpevtm2UaO&%I^Tinsy4^G z`DYgd|C$J&%S*X_0D}Od0+B}uO^EdKG52X_m3%tx?USvXne>cJP!YehYbjse^@Asm zx&*|HR3{!>^uC!`+T5vRcPKp}?e zawJ`cv-bR9=jI&~o{v9^Jw$WtBCmUR4@N#dLE0`vq0AA8BSp@j3T;RlQ2rEn6dt{q zjl_%6+%c0RfrqrtC_MeCmqUau;zFBr?4m&K(?lll^j5tY!dus7CX@w}55CE_jO<6( z>4)Kf`vPm%xA9j~@8tX#XG)rl@C4uTPma^83JgT80k={VDCzJRk z_Gc2GMTpNK#OD*@GYN5?bxZpFzxAtQRa?jRd3y4%MoqEhOOA=fQ5cjz1?E*9V$H%2 zV0?%lfB1c{daK|zSV!on5zlY&A6?o(Ql23?>PGO3_cp+=_P#j7!v`$ARzb6(Ox~i$ zT)2KA7k3=)i^C5~=sGu^ee#_o&ADl=mZcqKvu9NDq0UX@3DSEEp4tX$l2_6foq@mS z9^tq8&0ym7p33;l6i4&ABiZh6#mvYh6klj-aBfy3)EaPDnRxv?T-}r9Sl>Ac2i&h# zK0FSm1MTm#HI1U-{O@SY@E^>*Yi#lO&F0{?tTlI{z1l+G%+&NY*TH6^%JFZ9Fe>Kd{!VtxnZ1Z;EknRTZqhJH|+b1Jc z{Mqz$cmB|=hHtis;uUvJabq7diSC*5etT)pyW#AJ#~es9?k4B`tHSvG`s(e&DLkm1 zKARcg59ZD2{gV8daN6A+lZ?+O_GZVRVi%q3`+?pi$P42OKem$3`A$`7{5(GevD>|! zxH~=(=DXkF4HD^d=V=z``fm|VN;(7?B}-U`=1%Ik(_PrG^|bf3Lvy)fF+G>PJPbX9 z!3lq^a}=x|okPzaFQCqUC)xfLI&x6EP|U1e!|LEe^k0$+v^Mgw zi#yp$v$itT3=2Hz#+z-7;=`Yn@jsun;DPjt4SZT9NnPV%${joONVDaaY$EV%wjZ|X zR0KNLCMtE?HDt#Ubmb`>CxPLzwxDUW2bWiD#Wt4@@}rl{cx7dK`Ki}J-0}4Zq@7)W zMOstf?#|Y*O?8EKAwO}Tg9)vp2RKd!wAJX#IxThNv&r9i&!PR<@UZhNNuy0;J4>~@ zjeEGQ<~N;<FHb~?^HL0%t6V6+oX%3^thk@%{YdL^4as#cG z5Vt9S<*C=;N43AGLAGXgHg2||cl7hNIijwS`aOFonq;OUT}yqPTvrx9>v`}Wq#gqw z+iaH6wWVASbtlLyORXtCe)St zwzZKzd~CrJDo=7-<6h7qYd6fQ>;ksNDd_MyRBoK=h!gi=cx=DwEl^cBA(8lUv9WFhYBFblp|cfg~j1L01F1TRbnFsfs9R)T>{_yTT0C-9T+ zL$0?z2`@CX#^KK<^NzOHIQ0{zZk-G#_iun_k516FC*p>#R&e$88%A@ER9mW|%)`su z2g0LZ+J|xEY>WgyV57PW-lqW|UVf*B5NVtN{4L$gJ^Sv-Vq#kM-nX)diUy!;v`Sf&ydI!&I8ToKfP_`l z|J#A?L0(m&g#oYMGa3UMR&bY(^4SBfO}=47;UKn1=eR_DFDJ(?;pX$}z`5 z^L)@lM&pJi53eif*(>l^LNHFg)KTq2`zr^YyoLMA(t)rLUv|0<7I!W%b#aEMAta39 z^No9mF+r;kU6y@yGI|Yq!mc&m2>UkfgwlESVEjA-nAs_xaIn3+;!_ixvddV0=kA6Z ztJiZQyE;HP0mZaG<}U`Dt`6vXXsTD zfd5KpKjv6%h;jQ21%XGA~o$IZyor6p0XR^W;9gScYSRqhy9E}2RnX;W;JI zqsAM{b`4L6`=t8%2tWKrDd+Bmg8ihSxXik%djIq>@YB@&s*7q>`Oj(EHjyoxkOsI19VF0G~WhzOf#ykd^9 z-%*pIt_nwh$gSbAy%H-=rghrb3q?BTzpq73Y*&!^M7QIV)KO#2=j2 ziV2+{aEb1Lp9uNP?R~j@t7D=m!_2D~4?7*R?1|r6J;EOZHU^(&~ zC#{1;loFXOt=IzPEP%a6Aj0qd*M*Z{Oibdn&|T76?B$_ zo|aO*e=T&BZ-c2DJxjZ}l39*i1W^yOm7>I+u(A4=Lf2&FN#=6alvC2;d)tYlRmHgJ zA|xz>#?yByq|Ly)<`iEYIRzqf8O$&pf)VXMVDd)`_@#IQVK%RcT`2SzR@)C@Ha9KV zCjNwVtU7|JpBk#fU%c4hA;!i;iSOqT8!Rv<{{fiA?8ha$eXzRbC>I)U4n50vXk-<$ z{E>~+n<}fO{iLa;=pLkq4f`4O0PgZ>3JIM?fMt3cPI?1FmeTp19}hDc8xCC82RzFp z5cSbT$5!Bq&=2hD9%qc(q^Hl`V#^DWnOe?{+yfv3D>2d8;ys0zG_nf{xuHwN!IuA~}o ztq^{yk<<0@mSbHAY~sgnkDx5;Uk&!IHbix?lUy}wA|q^)M?ZJR&Y|xhC*KoX=vi~Z zGB$8s8r7|xq!ZW|CK?taX%ifk=)k(a+RHZgJpk0x@)zslC@mfc)%$<&hK0kqt2%=F zUwtfvmQEs`S^!kn=xy)D={Fwxc@7@D$be|+4U2!&nQ0HVBD|x|-QEp{uUktU|9dx5 z7Cah#?NZ$g0x9l8iApr52Ihu`K84XoUjO9 zmA51;ibU!W=HlIr%~`paZCihw2`wyiyJyTkI5p9eQ;)FY4J4dT*)?fkd0&U;NHyn3 zyvV8VSQ0%aM)Qsh+pog!lQNO?JSR=Wa&}wiYqd6whmp@cCEu^Hy9vx8Gv4LIp3 zYtjBzsx=7xPksU(mGwfpHX}?Gyw}&Hjwg=FxWwq^$}NJMsz3X6MX9Sb3Qjp$bDT?8 z7BQMXF0^fBLLjPFs$g~lm66Y3RFC|VlOu@xk<87JI0&P^U+1g3bS8b)QFU+s2~sM8 z*@lwkwb#wpuY}43+OLbWruY8Ml4i_B+}30TBMgTBwzNd@FHk*r6u!D?A}8ARr!_gi zh|86{xv@xmq7pVJPt*6&`!tQ!VeK~KnA9}LI+(|<(%B(x?sh=IHA08^=>X)n4Y!An3oiT(4>))20$LgO7W zx`y}b6!KP9|B3#WPBp*Ch4u(f?5&dCN1@r=H^c$?MmW3h9(FoLYuKU-ADCYOUW2@F zhIT)swdQRT8pzb+64e4ZDoayBRcbd*7&m4tCK8;{+W)JzW z*9TaX{EPT+5)N|=;n_>hqi31IUfyk`GXFeoMSG#KlCQYPvk-c$ejo`Q68h#DPq9A5 z$yYP~ZoAp}SG#y(l_eUaT!pz4r+{~?mVBes1G;sV$nUry`E6Wzb}Z|186yuC+I@fzN3XMtq$7lWQ#e~mkSa_4wn|hp4O_Np}1T(9|LF(%1D2bZb&0+ z?Fr9TeBr5|b|U!?p+5+Vs*%=`5k@k?Z-IG2NAirQ+puENAS4~;NE!rIF1m%niyk_< zTj|l;0fjE5HA2#)wf80bI%NvjC3cn6fzFO1&8_yEa+a{CI}@J7v&Zb(x}=OGgVq6N zT#jYG*9=liGtUSgBlJ9c8`>Aj-y%36NKkMGjE5qAn{4il=_3PQ=GLT;b(lVTS4&kpZCr^O8 z9lx{a@V)%*^BaQWgjW>WgL2gp+S4(I^u}(uv!MkL<_I5wX72~VZnrv=cQm5=+|5b1 z!>7*YIO%WZcYXr;ESQayy-0*lI4fZ*P_HY~jIJZ~29OVv3A4E1$;Qj~;?{rjMW0FQ zbsG|&j1<|0ENc9z*IV*-_sLJJ0GfARcKvj1{-QnnASdq(yE0^KxvQ~E8jJ~#ojs%z?ByubiYeD)7FW&o2 zdUFx4|K=6m*U9Zc5oY|D71y(dg0e=^Rd1MkTec# z@V|~{+N6;-JP8i2DL~^VPdG@~eA|aI41ExKs^1hl>X)wy=>*W5n}DR{qzzpU0_k2x zx|ze+^C+-*#I$`Phd}Gf?jrj_p=AiyfNv{c#4mi|V^iU`3HK!R=H5QwAE1&B8s&KV zYq`|J+K$_w{RqF?TJU!oE%@WwS*6UAk=9ZvH2aZLS&FqSjjM>Vnwhm8e;A#=CwQB9k%nyJ^o|x<^iP!)rW;%Y@hDhgYbik5rmRwkY-!`rof4 zZD|Em8%Q`t9{LvbfFI>d{*ZSn1L>Y2ebEh0{G#@06f1CD);%?Z#;u}G_XZ-z#Zc#+ z@NY3=8Brqzcc8#;;oH6U|D*i#f+KlGx}KHD5P@o#^*cC*lSUDk1edp8VT}!*Gl8Ee z6}mw4r4Vidi@M9>nib%uS78|V_Zz273rJVOk9$8P>SdhV><(M_F^-2iH)Nz&Ibj6( za4VJkH4Mz3&M4o7G1Iq8RF|qw_Z^gx?nlx_;NpFTe6q2~%~0eBf@i%47YnZWpU=L+gL9WA zfudJXaD8*H01%nyUB@qsazGS5NqD^lZ8U7*5Lc;HNHAqm8n(pO3q(F8@+R}7wwmE1 z!ug14EikSAKEmMBoHAX)%LzDmsts;&*oCCSu|@Fi+DwzQm?Hc?;i@Y3V-mmW(r@v( zg!uFS>en(uC*GX(l-I5J!AIPA$#G>@Z{j==C_vjN8XxtEfo$GqApzJ!n04`BH->iPnt8 zZh^qZZPY0nYFiAljvw>c2Uvt>+FU<3bkf zU|sRt;HA8iUotG{mj(K_E0ie)hEQDe8tmLUqq+PLmyPLx1M-#16{9TRwKI zTh#!1=Xop!W&fw9`A^~HE35Hz`yy~X*4!bl`$C+0d>GRAv-h9q9ZQR*@~?d-vDMlk zuqb~tJW8sE4HhYNLVj1K%1J+>wH&*!rqi^F=+IVDHf8>ebB|aB1sR z+IH^~Bo%dI)308F5$2v~;;4^5^kVRUgE7pi(-o8qSMWSPn%2t%7fo^Co{zrq>#n=N zCvZD!w>^!QKI;M6uJ>Wws@YT%@zUwh-+8o46b~BBnZ>KV+HpR@rgWD|Ax`wK88TpulTbkrSjapxRVvo%n~-VG^w$ub8XAZ-Z<`wE(SNS4ed~83NUas2P-YyyEtIa&W(w zA)@X}{GFW-zee}rB}U0;Kk6>jgA>?);%tnYnZkZQj%7Z9M)F990^0A>8M?gO2W>mJ z)y}W&r(A3@Si{~e8!r1E=`Eeon~eRtWx}YMGWKJgy*#j=4pP6N_0|URI@dcebDX>A zFt!+%tR3`e6d54b!C_p9~*n??i93&=y-Ih)!@ID3;P|1FI<@En{#o2bp50|Ez3==C?5gLX zQx{Vhxj!7!E3dIL19KHIUvG!BQ#*z|hP9`gf=A>e%o#rb6ArtgR&g^`+tq{Jet4bL zSyqhWx-SE&4T*Y1Nl;DI>nr-w8oh!c1I}RTguafKwhqF;g&~r=raq=&Yv{f21Ku^L zFHh}#4&}IH)_<50@0}t`f77fWuU-z%Y*)r6Hj5DRkL%$G+;~}lXZ!Bs*TVxr^ib-A zbkVawKacHgcMy2n3xq2svj3YlCUN4wi0@27>u z$FG7;fCc^E45{597Zf!yY3dm+*5ZNjeO|ID7hUY;vQH34`Za?G=vm3uzuU_lYOLf1 z=OzK+IEp!VR`#1Erd&klx&GkQ*ApV68lv-v-mtb+5+u@jF5W8_W0M*61vW!Um&PFO zGjwn-)hOW^@2?%rnp@45>3hXGLG0ff7&GP_s|XC1MqW!ps(+Y&i{1;Ik&VySYN?O$ z5lbpMgESXR;L`CmBUpnJ^7S3F9eso6(mRjAXnVSt?aQM53u~-6VTY1GS&OhZjK9bq zL4A+~nl@WuxS=1I`nQv(nmy;Aystp(ayz!R#1RD^_>XELH#S;@=Y3Ngk7s7m*^58m zV?bkYA3qqkeQYH6dFQKAEz_wohhWLu9FSkci#3E9&-&oYkW%WgTQK}e1N>{W0L8t= z{v9S$zp!pi=i)ZUKv>d?fxwZ*Px9I1f*gS{;3J#Ct1+k9>f%OnPn&L7^5Kc{`LM3q zf8}|4x3&O!&ToT#r^jHqRUDhGXt1J94huiD2NS9gwkIBdwu}19&FF}Dm(Pag}LZtVWYH@jJLI~U30=zptWHqdQ232g~Ydn z<7M2=EfERJIdL^^@ODM-q)-Sl$i;hOxdgFEw?vDru*1U|_<;al)CrOA#iv~N)`qh5s`YYLSZ!}TbzN;~`}q_TOyztcM^ ze{U!Sx6Neg0qU2&VE3>ww=C0@iZ>i&oyLbU3%f+TIb5OpiKCi1%by=ThP0OQw2ppo z%gF%5+CAN3#}~bh!%*8w9z4*HFwTO<9w8 zY3_Nif~{D+v50!~EN;E%2Cjv&bke31uIc;;pHAAUJgn2Pw(j0Np9P)#y7R-cAHlDM z0h01+F-!gY6-Rj8;?&z0tzBXuBEBD(9kP%`eSXeM;`1l%gpW4@k+=rlTzkjQS=@A_ z{+Bx+Y)5OS!z?ly%l4!GgUB*}I!mx22u^ADHy;QyIn|EnVRm7bH_-hfX$pS7?>MzA z&Kv$KO@P8j%h*fz0OI8&_GW*s6nUvPoP3lDT6FF`@eopPuyNf#;^p7vf}de(c5{e2 zTE)G0At(Go-!-()V2Kk_FT8=3B1QuLu{!(zp~3N zWAMy2hOvX3@Y|ZL>|odfTwNT_mLB_0dc7pR7Aw!k`eA_mAi**G_P0HRXH__3)Jq!z`rdV2BHLA7M6kI`QbOdQk66 ziZZdk10Hj0kEA`N@?irNPn-F&b?OBsv;lD{7quYnW4p47`+RMO?&Gg%hA#L|7Gp?X znS??=%-@oVQMA|NkbD!-19-D_BqyCF>w3lk@p|ojHrz(fc#{CALVz5p04C!Yhd1>J~ekRR}eLRy68oQaqBlI5OG%u?0WYX&skZ=u% z)3N#WeR$#HEhTKu9oDOW_T8+c(mI=O`1U zJ)O2`b9uQwr~5|V$%Nd{-K@6CBSb z-i}jlymXhzTj2NG_W+|tAZbM)Y*2lxPjKr(U(v6s_KHnR=%WPB@ha(eb>x4R?48zm zNQpaDtILFbrukx2GeT3cK~bae+mW;Edfis&BS)j?nF=`))Bz{C=xfpE(%`R3>%A%t z+d2%(x{QQPJ|8h~-x($}C*8LsFyrlz1aa>?vaGJ0SG5*NOCo(Ar!iB$beIV|otnKD zXD&U!+L>Fb#~&K0q$7EJV~Gpz@aDf_(jE4Adh{9$iJ5@qE9c?$@Or9^S2*o|Y{p0* zkUw$8?RBbIcx5PDI@UxDcvHxW-(BSH2Q*0d$%IBCoMxoMNyqOM8ca#OULbIj_TjtW z*naaCFx;~hn=XCK?-V$*1>Z-CHI_(UGV&)HvzwP|HFwm=mPqTV6!gEqbc3#N-^p3< zu+9loR^LNWZxfp}K`qTt>5Hd{D)6233qHJ8hxd7>#T-wj!6f?-^bM|J^fMeYLRzXq zuaPDK($%zP{@}d}fbJPuXU(kjkyL{!;f!i7Pf~>k(~LNa#5F2m0h;>L`{i3h=)MgI zv-Z;G$KKK*zf%I^W%8cz?A&;P*FsOpgazQcrlGoB`O4S1j&+Kf@`VZRAwA2xb?>cO zZ`}c7Ulv2z@&SB_elzt@t|e6JH=;g@ByHm>uo{M37{bUO@t%(qT%6Sg|6To#MfTc^ z`8vu%*Df>6P=p3tc>vPXD0~`uJiOH6H0+xDn}=CdaQ#eO zxTdLtMhP?7XlX63Td|fmtMeYIPg#eWgPe3Txb$f91RSVs5S{d%P5DEN^n5$5t` zW6tqIQePnd;&ATn1W5h7iUn=(f@KxWWtv~|#E%rpE%4PCdT#5}P=366Hm~=k7pem~ zLFv+c@VBEzXmM6NeGJMktOXB~U!}8lr=El#dwgrXWZ=GT;=1tqZ-^qa4AmgB&$qMliM#N{*9G3xMtkBU|k(`89?T*5d_1$=cjuyJSqR;9ua*3OLs)H2A^f`VIvM#pAvS#i5GMm^6TUC2 z9O7ZR-23rN;ddxQ(vcsGyGirwOXp)95@Qj$6s-F10d6=$XL3>>QSKwJ3m*(^^h@}e z=~0a4L+N&SB$nnpS4qFg>6L>)_|6rbC@Y<2CH+6*zQZr4H~hbphDv0Gc2s1qbnfev z5s@8HiOQC}qG6;^DorUOt3f0r_1xFV3MHeg?2#QZ!pHW#?q1)2;rIP{y?R;C^PF?; z`?}uiIp@Bezl31PwiVbkwKs-@r@(_3_fUAAoom&3D^tM5A$iM+jQUYmain`nCOO5S;OWvG(F*xW zpgc@?Hy%{Kk&$j>Wzur~rG+J@zG8ygG@GwS!cTbhevmjHFx&rL5*+T@@hQ+T1z*GN z`g2U{bp+e)zL50zXCST9kc)~OMffPvxa5fj!y50lSUj=^*WS}y=n!0L8_l|08UbNf zCv(axV2E`Qk8QRIc9{$mo|15{r$YJ+h8y>67<=+g7!x;^bV4;JKab>d@iolsnB?W3rhX~(Hdqh0}nnQ&$9%b=Z9p!xg z5kT4#uO&}ZTmtMR=ki-pyCYvfQDA=?Jwb98R>3OPjbxd!En)j zhsegbz+~zz_?hX&DIb-IFGU90z;$O!BgylpNxJ(~Feu3mo)aVXhbQ2^@bRqV z_#nz=-H|*U^$k6L|2GzP9?T|xk`6+5i}#WqpuBh^KQt(VE%tp129L8)zqcE{I%^_} zY?b;P375I>{ZtoBWX-=Hn{biUQ?|w_17^ep4c`6j$yPG?8NmTa??vJU{%QFaJU8T* zN@U;RZB_|yp@_AZ#4oW%Q#1Mz&qf&1Ph0x=zkDWw4yX2gYAg9wp_+X5cp4h}w#3=- zl~`gM32QFQLWlmef1Oir9B;`bmE(PQt!1wSOd5yXl8?cV3AMO?%Ogo!i`F;1a)dcu zm;*B}xkBBuM|^O>R<_GlSLtl>oHbjt5?5Lu1jpr9VZl{fr8esygt>nw?KKk41zD-O zxK0nH~Dz*eiHcsQxh zal?c;@K9|sF45Cg#5j%J+6k-lA}~R3BfisVuZZIt!PGVuyXdxs@jeIPMy@9l=R5H=NA9OBHx4mY zGM&4K*D4QI48-BucCu#Ned%_pfjsxJgOc!~isyvt$y=vbD3=B$VVz4OoN615-bax6 z!ce61#hT-(u;D~Bo^=nDlb1|q!zSM2<)NE#vUHojY)d(b>3tQx-Hpz6no65LtDv*V z45Z_OTjD^b}6&;&F$c4)pg; zLZ=I_f$Ce9{PeJClSCZo{u7&9oM&Se{^2xE&^RPj)B@iX9SAR+J1aM;^r=3!g3Ad5 z4mQc)nOBN~xA=3bAX~Z5p<7s{bx4edDm6_TFJ0~xbn3Em{XF}8D&y*0>=!9jQ0D^%mM=zQ70*Q_Qi##FuS;`UzfWGI2c2bBVxTwOKr_ z$rgyz>INYzN{gM~BG$w{=Y3we$oAQPpeRjC89t~D_V&qjTyQlT`dJ@>M}ftdp?-@~ zkHcK;Y!$9J%c-yU*hMS&yX^n?uOlbfhig0d?wEx@Sb$U2t6^4S2RUxxFRq+_hfQd& zmdk_n@Tn}JU(83xQ2#J?Z+W?7^l}vR?)M+)YInx1Q+C6DOFbQb`!!aqKWxAw*Jyo- z$$9)&#}qvNvWzD$d4}uk8p)%tSwQZU!$s}$`|(hJb?6v(kC%Q}$75>dV&sz_5U_d! zr+N`MB=80|hP34I-bMJ|>q1PvzLC$obP=b2(2%Fo`oZn<3qjP|w<)G@?OHmf=O4r3 z4i;F{{~7zF7cOd`n0lOrtZ)I}_esppE}X5`7z*CCxu~ms5EouE2Qj`>>(Ye@v!ut- z%W>sv72o9HhfDkZQMsImWiy=fxQlalso9HL%Sy>+^{SK~aJ{vn*OCI@Qjv|+P}Kd;OAzzQAa@nj!6`196` zeevlC-G`_tH+^i7<@Z%OPoh1Oo`j%c(+u`G6(IG6s?xgEPdH@i3G1CssA7{GphK`Z_#W#ZA35-b2@LjMb(nW+l7QXKFEhvO z>x-7?Z9)^9R!H>)2G3qVPzNhcJcU$O(EY#yC982?upbV9}pIR+VKqdRGFlTzS0c~ zwxvK$Xf#kQO7tGQq}@T$9n_W+4igT{!8?DJ!c$qr_XX`^35)9C>k>bp9)_xePvDB@ zP@o#%z54#+mvu}O;u+<_B^Mf3+W*MHg$EyD@a{@3v~_&K1)hY-GAzrzk0U+Zn8%HE zG+sM!`SndI(et9l7Cpa*U$1N6u@p00_VN;hzO{mXlrhjbIIg?+2)4WD=}fwGHK(b3phN1*5Xln0-wD5N>o$yUBnTd zI60qk?R&7}-*btu#Ib785{YmFEMo)Uw^W6KqY1xp$G;hHnDYCmiFX7J7ZZQuHtPd0 zZ}1i{jsD3`9v%*@Zmj3_rVom*K6{KO4voX#rAuLZ?E&fudnnaJMi>q=+Xe&S7HEey zgFG*1Tnp#nVu+Ef=Xn|OpWlIT7K2gflelYU;Af{HyO}&=R1+|(-BR%Qy#@;UE#q}A z>v3d~BV4X|g=+E{e7@xYr2hE=M-M(_;<(c^T=9e1Xnx+=9@p4zR+Vmfh&N;uH|HNG3% zM2Xc+fx4VUu;kz~_>t#~q(|863eJdYp{xQhH?aiMSDeFZUQT?zsW&tp& z8|tUGB|xXk#gOk!_22e4e^z!wm7C}U|2e$j_Z+?P!9u)%H2~eLABhiq5tPy>8?->OOc`aaC+Pt{2N$=?f$)HD_=`&N}x>GJp&F6 z)+bEtinAJ@R8fE9tN}K%^RY}2yqln>1q*byA!#d4W20Es#S*5rh7p4e<)MWeNK?GV z<9&a@6m}J3T^du}eP+t}`RJOr>+F zdCQg@oURgjka!JxXYQ6{?nQJ;`dnkK@IJ7_P~wBa|L!`n>0O@q4_AKabpRu zHPI)WHBcnKXn_YPI9~9o&)a>hpkF<>#*af@x*T4$tCOf6aRfaRTK?oe7O};bJE?mz zorw?7y^8Kzcrk`kUy9xYZ$Em*;>l{ECE4vMyI4!BzRKdtZS3&XOdwpvdD$UcXsq1zP@{?$Rgh9lJ;~@g@XFQwq(9PtG&Qb1zmR?J?+^7=da}@kv)X-T7LBS%53S~D zf9Jz)`d*N{17^kD7QL*H9+yrZ+XLoy8;YZKcA?Oa={5;SJNOZ%g*3-AGKVC~Vp=;ghq&n^4=+d(o&!mH zpiR8HYEJtAAU%LD+Xk|Z19FhO4-XiWf~`_pC_=Yfw>X7bg`0R29V^l}E0M-p5&cWN zUo6ITRo*#9eT1Zud9SMGeEFgAP^G7d#1RepGeWZjNvkL)r?vsoQh2uPCXk+kXHLDy zZ*1b?nC<-NUi#o&II}H1Yjh%({phd@ra2X=R;x{ArqRKSG!Y%IvrHJH5Fd(h2EoZ| z)o-&9zbFW+YXb4!{ju*9Yn=S=CC@Hg4>U)R=GzRNz?D&tGEvV%CIn#E@%F?M8^A8X z4T%>8ClFVjr~6p9u!3!^74EkQh}*ftvWXaPUe4)!naXi8SOsNch;$1y%H1VVSA>)7 zYrjt?zaF{yq0ne257QbSS2Jc@%t~(aoIAT1I%{dZZ_e1Bp%h9$0msjePJF$`*tiJElqvbo_Z9C|LO&1VfEo*Kz^NV@^}KIq2S_x zeOS2E8_0`ss;LH@Np*YjjQ3>9V6@e$ijv(J>8#FUXF+ig!EEv&N*K47^GmFT~yA zQQEYYz3IdTzT%}{xRIs8DMu(GA0l4Et|XnnV=pS9xy2CDOKK40_>iS_*n>{CF>vg~ zSR`*HWqbH4_vyJXdJl6?*@MKLu+EO23B0Z;TQnNikOc^gxEPX1@3mBh7B*JG+*dI} zZ+h`QYNto8}w?O9*_paC+`iJ(1~O9 zB0+qXUBV?MJPGl>Lf%?>XcGq_AE2>8@)by2TvFg;D_5GkLd|hwg)|HFc~wedMSDFw zs{!+iPo);un<%7x7;ioRlKvP=-*``nngs*97bpl1+ zKz+?=JfzJ}m#{_8y^(Y?(senCTt{Fo`3oQ&4>z~9l1U#?W)h6#UC6iDVy~(MF8c1` z`)g_$WqWg6S zyO^kB(n@m0q|sEPWn9!P^&t8l+s}nABQFXI=$<mMB173YO&JQMMu;RVcBx3!+E%`AW_t&DpD&C$d7^e_pD3j+2%o8NqfpF%2ZU; zkDN3)-x8-f1?<><0C19uvnU-r|%O2`wTU|6PpPAxS8_O1}9pSn=vCd5&Gw0}CAICDOX4 zUS~*Oy(#{BJsda1k3r%Rq^yDyN08TSBB$n^b^P}(r6F5rv#c702hI%Mf!|VNB)j(S zxL(Zy%8mL9-^2C=S*!l{T5%Aql{+NP3(221WDDsw&2iCzc`#{eJJO|Xh<}>mv$FB1 zdBTwD+Yv~MNmL{J^NXugcMAos0bLJ#q-@voO>d;WRdiQJ!jz+jfOua;zF0b{*BJB6 zD#;tuy;kaX_)fJc7&Xxog!U(1(UYCi2T#ZEKaNWx{zNY>?lZ-p1%jV}UeT z14g`E9frbV)9Xb)Bl#>gm+k}eKb}thsgW!+R(S&5v-UoLyv7G`TdhSsp{F@Nfl$=Jy8j7o4(4 zAil(HQ;lJHzZJyQ3pr^TMtm$&&IJJ`HH>sLI;1sWq#q^9-6Sig{)*59b0&I`&u^}x zxt6bam5}#3APJ339E2hR95=ZaCU0v4=2pWybio;6MGEsMN_G$ha*K>HOJ zp;5s$9{yoH5ME)*()(*5ZT=T9SW$R;M_c5ym9ZWt&HheyXG}0XU zyj{c<8#rkcCbY7^U(tJ%DT;ug@V}(LR10$I zh38fgCIWfkqE6bKFw`ayDW4Pdie0_V@n$bP1W)kbj@!{#dQ|&m^9U5dS9j zu-2m=b^0-&AMyYHulJ9r)LaE$JG_I%bw5>RXItQdNJJ^J8cvLx#obq2#RD6Sa8UOQ z_GgWW98PN}?f1^YQ7J)iW9<$6eI*+8^5%kiwkKLd<^lVt1`}d>u(0jFSeEJo4^`-S z{?+sx``3xyP`L?sf=N>-kjk+Pws$=%K^5fDhFEyFt~B`EWAkQ2-ljZ z$(ys2G2ebY+_%}mKWuM`-{dT89l;@tap@+Ch00TWC)&Kaum;4oyA8PX0kZ% zp_8M*q&eNQ&|yBC*}tc}a7$1AH2*uFank|pI@RItisLLNLc+cAliAmJYLNo@TN} zK55*)sp+Xl8N1k_k%#GATPrV~By$>bKKf`h>)ERshWyu@_7zwHdxxZR<@;fXnO6-( zbf1<};SS7N)I-_ya5!AQT#Ipi_k!-|-AJ{MSxEaIm%hYRThC(F0#9C9+#d>y4Z-(^ z5myd4v9gxsOne577jK{26R&)D&&`%~kSpg~qADbqZ@bqVu2;6hRm1G%3fmB_o92tu zUo=l4zqT3;^71YaI`eif!L@6BopIDGxTc zgyY~-q}d^cSJ?hSan4j{aOB(_sGPqCU52XjS#Q%|t<+1RUV}3hwWv39J@wKRXjy!a zfA;Hy4>r>}SmnCtRajd5{1HGmoxw1uyE&}39*ep=4P{ZMH1~|^0O>rL-Kr_LZ0|O zp3+nd@BCrj6<1-Zi@U%RKEHE+!mxC-&WOhge`cZ0%v{R0a_~cHAzK4Y@Z-;Fd>`Hg zqV<|81^YM9dBs6Wr!XYU67va}x{o2Q=VwRtFCU~G<6H61Q^(=Yf+|i}0N2~!L+V9{ z`lg}We6gRfWCQcE!pfSA7{$p1iFLfG^gga2;V+*pol{`2p6%&7< zcn=VU@^3L)fyt?o=IKQ6a8Bb{i}E4-=oIV`ZU%-Ax?;@fps1C~NOJ$wIStY@*Q z!3_v#)lDw(zJ{Ou4shqWnsUe4w3hk-CwLax9`WY}2>Ne2Qa{Kzcmd4)T@PhOOQ1*l z6>!R+hz;4l6pzO4gM+Vo!)uQ@7|>=T9#}sO*WC1j^E-wJZjirL-vqOg>HLJ|7z|zN z40{rfL;8t+;HmL|w{z`}ABVdV_RYeWMfAL6=OSjbcbJsyTZn_!H04$1P2}gngW=1H z;}FrR8kVX|@uaCP(CcWUmUY0%B_*|B&vF~;IBPiv-N7DxOGM|W#Pc>z*F>L?$JIluzVEz(KwBL_6g$d%L3tqM{Aa~ z;EotaBrJkUtIh(=3s7AEeGYz}T1B}1iSVKlerlZIZNf|$;lH?k2yeRyYn24NyP^r? zwY0!?!{d?WnD#My0QD`5uz2-#mCg+{8fy)I?f>wKM@iEC4(HHl|889Lxidz__o6Y5Ctk8+^Mh;^lLL40xKn|Y zzx)kf5U(o}_h711H0(dnN+!Mr(k1wM?>~H^ewwv1?Vu2c0c1D|oNUlU4_HSaOoyg{ zJ`!O$qdFCRf}In#!H7P0RECc>5dJwq)E9erJeIPsCHq+C0hM%*|8?f#(n%pL1A^PW z-j85a0mJFpvi(RLrYtjSgZGzsb5R2oar?z*0_hTI^e7|Dp=Z6amPF#X7Kd5gtB&-( zavXD{w;c4}GpPQy1_;aL+|iAd$!i=yV9!;LQw=&s&)*$HuPFyg&}8a%){W;PaTFtc zz$4?2A!!1M^b$;RAHiJ)jZ*|iF1Y(q!o&wi+Jr?W5-c`=c1GTgY=qZ%_#Tu%97iyyh!WJ^K#p zGO3qBpT*)|)AK{u4fs=IEBU{=Lhhu|n=SgXp7p5g3&AtB7Bvvl}^SnIysS&}T$*_IPwP5WFKT;k@?wXY#8jKcEsUV=Xx@J(pyO^HoV)U63+4vRBeD5Q6 z;ZaOA4Wxyc(6DVTKakpgAI2ZBOyM_#ZUnD(PC}o;wMpBM_!K76^Ey;BRO?-t^~g^s zbZQ{&F)K9On8Wws2d%SCbH}!r4FIjG`C#~{F^r_=WJ#yv8raN;*dX*AgN$NEo`;d& zL5I?0wjky%+wXamQBQGEPa7_6L-&^xwsqJME8anm*U;obuXrm&3-jROb90fpd>MpwC}bbmW|; zYCuItBrU|vN}fq?^usW@=^{Mgk;0#7(EhJYKj8a*Tk+nqUZO9+cS;O@->(%MKJ<-Y zq7SCQaXxXvKDg@P48(B_IBF5ujR(HeRmj`%Uxf!y?YlX5w4?7SK9pjs6F+GVlaP3? z!6z&&Tmb(b&Eyj^Y8cfbj2@MUGdAsR&?r_ri=@1ZI-r(pqZ0G$Zg)~TWzfI>>19!l z3D3RRp2u#A*|PmoM5{@-FIyqci2U>-FWBtak>|SIQg!vXhue2|;e?%de#d#b{tVLg zCwZp_6A@}x!OoLgKw$l=oK4U#*W0P-xnraDr6yvPte;vNwJ%q7y|Do)@m)Ep*6S|6r zoSKL(rVWg4uwW{cev4a#M=@wM-1ufgVj1zrm+s%o?P zJ<{hoQ0*zk&zu_O(C2xiO6WQL)-4)z-tFI73e_6ReR&`9d>>PtAHYf-`VMW)7RQ_$ zofz?;tW#^#e`(Qq9=d!R(6u*cfJ8R4~LPWvkzy{Cfq-^*CDxD?@UxKHR~ zBuo%kOFA)I^eHUSJ^_;&7l8BZLvW>jC(YvvNR7HAr7S;K&dI02z|elGu~X;caP!gB?tFL)YU_0zF^?ok!x0TN-GGIdZel3!&`$KC?f8@K=3Oqo*Cqa-CT6@(vY zsWnLO6a+p$3Lc-;fw%?^?l3`tTcTEN%J(B_4W-rmBrf!;dM@pg=+p;dP3E9m*a!Id z`v`cBt#9xgq!VEjJue#RJP!zqfp{H#zt*78(W2(ai(pr+d8|6unRMbac=4oVgB}*1 zP($}83{$!Y-v;3)Vpa23UZZDOeV90xgtQ$n)ay9$)(0l?8kz?b*ZDH`D^R|{C-<~K z8e=YU3aWL`y5XV{*%0+LVNPpB1s3le zT*}Wzwm_;mmZ^7=aIOW+?0;0?DA({l!_)>pCJlcaJd4uB__M9eTv?%(E!%qT2>Ns{ z9UT)B$sA%zLwLHrMs*Tuo+iC6fiY3lN*R|ABX zgLVDAk&ewmm!9I}OE_@|>bGQ^vJlFYW@7M^6XLU2z2P*ZzE{XIIcn$zGmpA)Eacoq z9R2$eN-^3p>0P14fi#!&Yqm=G#^M1LnRs$e85g)H^g4Mz@}u45oLlOO?#pj{!9xq` zhxt^8{}KKq!QdogpkoN%+mQc}*2OkGZVUe9_Vq=WQTAH!0ZNEOHXk7E#!>$qo>hCb&cB5>X2RqZhkPt@064T~M~N0X`iuhQ0Ck+3p@jd{1&TOBOz84Buij)xmX$?I7DV=gX9moh7$5*6IQ<6T&&EJGDF_T{n>
qEU@SB2ER?&87E%|q&oVvL~7sV_9UtBCp!>_!9Q) zr%iajRd_xwv;yVGXn60I(C_%D_90(j=g04!F2{BK@8K|hQx-WOVYW)>UvbUFYYzhD zhG=SN&)%9J=MNo6G1A@4xK|#McO>sn%PF@(k$;m1=chbZGLf@f$=Zlhlg@~I5$>KI zPV>6TajkTW@{_j0mxC0u2^LS>FY2m6pOBsx`a<9?f9cW`VNXJlf76|mfptc!+H2qw z>I*p$Dj5037)cAD&^w|=DQ{wg*Nk{mWQdA*{z3c_-$7{5PkZr~|4+Xz(=(v0XKBj! zvaa%kp?C4_&MEi;udrYGRq*hJ27Z2?hkHz&mH9zCa8j)^O3)j>mT!kS>#u-K;V_83 zeF*;kJd7K|l418i7wp|K0gnZK;UUYl9M@Xw;n2vou=sNk40qTC11=H|&MxK#r;qag z(mO!?q7PV+J00!mo+57I%6>SyV&sHfaI3>itlq_M1K0X(D@!GR3i@row}fH9R}xiZrO>A9mGy z4xe+TBec~|#U-nQRGW{@foo5raMVjHNLx9d_dC~0X=TRo>D7;_{(eQ=ci3w%F7XYk_DNIbkH#;n{8&(GG|Vn4 zVEY^8LWg@-AnbHkS-$Imdg%-JG&N^1-0a3D@9hg)4isX?!;>I$U^uU-_LnNVE(P;# z+ZgF})>IZwlg24q?u_6>qOATa$Z^rOXBZJO85MHO2@s&D3 z?Ddo8n02dyJo!G-J%u&=wN{Ag+^JZv3;!PX-kZ3@dA*5%P!2z!sh%o}B# zY6%ibqoDP%cf5$6*La*2i$&_o`S(TL;LH+VDX(M=TU7l5Cb%4e*liO~T=Q8MT8sSS zW!|opt|I>3Y|Ln-+PMXMUzEq~f2={>f0tnG{WEa7V-e#QlISAOrddI{uZZNo}v1*m;IEdER2S+X2t2Y1a$$Ru~eQI6a2n;a)2z32Q z+uJ>$aojFEWcdKb#PpL3XH?~B1W&TM31R;>AoYdRy?Z7b>fTE( zI-w`i>-n(!-;P^PeWUSiFTWk@ggc_HqUi1Ys;6R3+1G#S$|VbH=9r_7hMO<3Ubp(Q zZDVxhL{H9JzrKN;XI_D`E^lDmu2h=0Ykc&FB%ac&C#o65Q*T>i>bqS0r(uq`Kpk{- zQ2LR#kc0`yYRCJY#Z5-#naW=g$WGNki}W z@ep-jrOLGJ4n3*HmtI zK$37l7~Q*UL;bOp&mB1npK5yIkhOMDpE(hJw;DwF*dC2m+47F|k|dSww4G_Ih`w~&)^G3cEsI!Q~2&%UtuYZqj7RmHEDhr zGYU17i4$oJxi{AUSHx`%ZCG&U!?1a@_=fp)vg;gx}oz$xZ4{h{hX zv2s zR9qA10h8~T$afx`Kx^$%J~^#aI`bipP1Rk3srSkSPw*G#ogjvmuIZKH$9#vCLyHkT zL3__Sd~v=6ru{bxw#wNc=B4RBT_tB}H8X4Wjc*#!6|6#O<1np}K<5CW@8>V>EUh@0 z2UM5XFGAtg+AdhSbsv8{jrI>ue*q(guj4`Fn;b^p=Dy=3U?I_>PND8(5DU1m38EdF z(D!L>0_$-~K^s`*TE((orAR}EpMhR_7I3d_TLW%*j|)MXb15RTtwNZ}mKo?54;?-b zlb1C`!cbN^aRi_4+(z&iURYu#dtFc!(`)6sJ0G$ygMx@(BS8OsHf}!lnlL*ML=Q4U zs<&g=xb9mr_Ib6PdUYlLbL=^+c+!dowYyI^a1i>!ay}+}2pj#O1vY8kReqnki96LU-Q**t&-)v#>A6(d>6aUw&?v zLTg$!mGVNgk+_|wS8W4J*&e9Qr7gRMP%k~f>ALw`m9PF8Vw{ebdU9aGoC6DpD?M&6Qem3m*+L^>J!{Dv!D6Gug%U5pf z#P;T_#mR5#Q{4lz^ic*w0pdVzufo|mK=WqZ#Q;?0e{*G{Qzw~ zm13`yCs5N`LWj#*%Kk?7_%1F5tJWN1=~Zp;>K$Vs++`PgU0~FsO3B%KX#VUU5{EGN zY+bDDdl2*QRv~E$G}g#w6E1`x)lBic*^anCI)^D`uaNkkb!~Ya!p&Aemfs3kvib+< zqER?#@(ftH@c>~~7_K`B0_$0FpMEeSc`;TGizr^{^h8Cvf`x_MLNm`(g1@ z&&$Hs0bw3=x~DIbHj_Wl8lzuUJwd8-F0gM{x5JDyhDy}!`{#>9pL6ePxo|H040cOw zL|=Nkf=76S6u7zwiq`IjK63+@z~`IOjcHwJ7g%U;g`R6`hjWv=^YgW#80-I@^)_uJ zFEfb2+U4hMxW_Zh{xN>TOOO@nz3QTnQGjvNe=(hV=Li;p;EiL z5A%Bvj6v-tbJ8jB?}~$b_Pd&Fe8pMrJ~M{9ce)D#=Tm!+-h=Az zU3bM!I~;G-d8&x7(S)0T;N%()Bl%W`0qj`daz?lXq%qmKm}9)}$7mQk>JgI8!DxOL zPIjy1YH^QMc_n{YopqTsq+}h^pLt14J?>g*&mCg(BK!@ytXHlUQBRCwQd)Ax^53;qD3~g7<5|9F{ANq(D99%FU0Mq`ZCf` zoG=JlO=%&Qwpqpl-)PbEqH##Q&1u|8m+LCRgZz0tLXs`b6~V=cZKpHx8~ATS9b8Kr zfITDr@)5(;Kw~w?e(i7Y+U0|RG%?GJod={z6w*2DnRzbSRqYUZ8fZ*-iuP&T*49cQ z%`9czDgfC2N7M}JmQM%bF4ceO`&ljObD_G2=v-J;zh_XN=c-C5)n&ais!-M9HtDiZ;R__<3wZMDB^0TDLE|f@ zp!|*(SOn~W_2rbaci#gvPw3q=f_i^2j{jW)d4F!RW#y(e@rzViAA+nz~+ zpE>n2S53}f#qQm3)8@tkbA@JB2@m#mV`hW?&7S+Rm~>;&`1(&kUWl#x))G?h4ae)7 zc2e)#A>lvQem_Umu2mG>7wjhb8>v^B-S~OrH@oqymIl3e0|#A|Gt+9{Z_;uOvfdcX;n zoz`Z0)3X`%K;sKXw1(lyz~jX~M;rjcKv;KUGrHWxV!Ay z9QZM)2q*77fh|THW#oB;ucN(g3pwc)>eb6kXvJQ7yE#-oCtPlVdYO+v-#U&9{uAeX z_eE9%|B}99qz@T+P&|`;fqZQlR@5KjOQrP^XUI63ShIcuG4$F@Fl-Gp*YsbZUG=X7hSI;{5$ayJRP|QTqj!taS0dP z*>bb4a=5pX?73zMi22jNWV()iLW6KRFU&c51e@8uFAiMpO1k_%p=Y22efN-g?mG95 zzX56n&4hpB&8E=z%gKuwX*Q(eOJaBRWr)4Og-xyJJtd4E;n zwj`J`|0;2Up3=$Z0TVn!bwagnuMlrCtH0Ngu$58vfaLX{`%&5p>#m1F{!K;L2%Gdh zIMoq<->w?TSCJAUCBwVW%y4*7x8eTBJ2(7=$;RG&AY_8Yw7Mk9yBs5%vAs ztT%rZHwgqblb%tiZdv$sLm(Uj(xM7w9Ss@8AKMN{oX89(JX4L^WsH_zp8{bt5`Hl1 z0e)`OJ#M)yQE(PM={Z{HQV5^m3*&!J!%=fS1D_EOlnt@xp9>_)V>tPB5FDKKC{xw6 z+zLx>qR35djH3&3??*-$PzC8e$?PS z2%m(ehl|06T=WBRrsyT1t>DsLTKm*Hmu2+7LHWin6rQq5EflB*kmL7dVm^BE7JiY41^!eT{0sbIm7d zrOpr3Jl`0J_ras-5cqQX0Jh-|f&3oq`{4-WUFd%A`i6P@F{nQl54bG)%#pGeJT$i> ziad{wgQN+;A+`bIaq+=LI4jE?eR8unaTwY~xkJRFUee+x69r!3|hkWdbMja1miqD>FHGz z-k!ho6?4JA?QEd~uhi#{qYv?A$F7@Dvdjt^f0DcS>=(2=CmiQfnC zlC$1Q_gD8DdVEqRV@NxEquBM~QZQ>fn-ea8=J^dw=#my;sZ4nAk|rG#k=u|qWl6gu zh^L~^FD)FYE{nD2rIB`tBCV{B3rA*(Ip;2c4s^`Z0xS8S*A+~R7hSU?UPssg~$$_gmE+$yp|Ch~|V+Z-TGEQ>5zfTcZ_Gc^k-T|C)NJjGrB5(Gr&j;%&A2xsSL7>l}tZfvg3{B-i%L|?}->svx z``_;!#P<;T^dtWMf4w%-|DJ+c$oMkux1qV*`ulMFk@E_5avS42(hPt8(slcC>V{m-* z$>Qqz{#dbeC_MU@2};NHFy+)C+!Umb2Q8Z5(Z&97{FA=odB}~{oD0NhuQmAam%C{1 z^OI1t!U%U?y$`!RzVPVn<`7q9i-TSK&{FMIaeK=>c%{cH+NWhM8janB>4(cCE1fi$ ze!Z#WoZd$sHgg#sTh$Tn4JhOXv#r1+B8hFUTFBM&QX!v|s2!Gwl1J8XVl+5Z<}q^5rdY z^3v|oxdYnR$9kjczo31X{YD*rT3&-=Z_3!+i=$yoz<&7j)l?S8IlMp{$4=-Do~8@= zf|a&Vm{x=XHabJgrp{=)H;I4LYXS5BI`g(A-SEQf9DFO+!ZoXY3cVI*>uytRd6h2B z|9gtR-s&XAkvXRqKz)a4=(j_QbtErZ`#ql<4{^ocEq38&sTwv-zQI2v+JjzAEP5Hr zaIT%bOxMZEyBgs3IfmfcDL|FZ`YB_3NoQU znTjn;KC&-eY*am*U!(oq9W2f*4xi>-fbw1Wus}-_^EXb!E^(y)gU8{YKX%}nX32(F zNPK1NGf0O_ICcIFX9+3Ti0=3P6BsXf_+G)v9Xlbgc$9J`?H{A-!qL+7W3PX z0GF>UXN{-o;FTGzPQ{K5#h|8!vSn*GrSIKoaQES0yr#KY)Ss+XT!^i9wwHfxFT@A2 zy+wV1sFU&04(J`Z9}iD5l3H%9#-_Ww$zM0!1hqD+FtkvTkN%TIZ}3LV_2lxdu`uUh z7ihV*27|OlKvGy6$lPeCgbiCFY4_U242GDLNUa*kAEqM&CU&er1 z)=f0|+7A;BhoPCY7{0bX%B{oJf!y;i&>V<=Lyh^xNOJ@4yE~$T_ZGPBMf-d_+shyM zmt*yo!^lG#@>b{m@aNTWW_0EZmae#q|J<_p?&p=N)kE5_@1q>yY!3^? zqw*u}bsfwjw${PGRwkHl@Igx4rVnqI7|UHU-$O)~IBqaRjYq8g0s5DX<%NTTFgK(~ zoFg+Hr6(ItR|EQgWvg=$J(n4)db=c*H6P~9x4Yejz+neDojbXB1?+M;y)UgCoz>^)i2&5o>gQ!MUl{2YcPJOoh_`>l7P=p%tC zUHV)qRt~kp8~%-GPoAdom>=tKmii5L{#yh-Toeq8?xht^dmT>iF$K?#z2L=w)mUzL zlaKy{^t|*fJf_`Q?CIVc}>)~$NOJ!F^ z8`K&``@yCEk{rMTJyvUR>d6L-$XM1@+CC=^J+*&`{(z`ads)AyS^V4NS}Y15{6D;X ziC0hE_kWS13?ZeYl1Q0T^uA}`j2TlhB&1L>WqQnW(nM0CND75el0tRQzQ|N$o@GcX zGGv~|-#)GNS?l)~e4n*i>yfwjJ$IkIU&FrloP%b)j8T5lobL;df&RA;XkK8)0|)7{ z&keS&UnrY#a5cU+*n;$V<-*e@cr1Me)sUgwyRR+?tdDQ=57WoDgO79SA@9)x3p@D0 zoXI(8_s$RqC(&urFLuJ}1`o|mCw$j|#Colv^@u8ltxrOd<78>Pc0*iP)r-+M)CpG} zOSg}`;h$`tv7X^{Mxzh1mo$x)zMpgb}c-^m=e+M)5N zrI51L7cNZeAYv=7Dewq-wCV@$!HHu2;bfCWH0~%Q4uj(^`Z%HDCJT#@Rgu#a&w}>Z z=kU5wJ$yCeHp{(ri~V~Lf)1(Uk!nxgIdX@xb(XcP9qY$ZHF2;s!yG)XBw(9}8Wi|P zcm|K6b7>Ct5$9~fvYT&!IEek3a~V=S^yr+!a9H;KPA&iT&aq>oX0K*R{0H2ao6GZm z+e?JKoUj)D%yHup6Gmas+zNie%2R%OlJ?%xJaE1e5YG>;f5|r*^ru{`!mX7JRH{`R z*}+wCFsuC4A4f0hhg8Qv`~tzW7j)CW<~V%QC90zh*x^qh=oC!@n@3rYy|V;Go}|?a z5cL6Pf73h0x>H2HL7nYwpMPo`Kxo0#q}F8C=sxa*0SN8I@33b4(_ zxNON=s=*d?A7PkSFPYKjIAIIbrz4w{))YujV840vP7HBb%i&S{m)#aTIg{gAvka6L zJmy{2M-fi;#WpcV6~c5}@GBGRRqe+7C_V7%Q2}SSkj7rQ3#hIjqs|TfYvn4SdB~(GQGo628Ig2`B$R&ZZqgYq2Nq+u_zRhmmj)+s-n;29u{CX`EVK-Mdpq{l|xJdp@O> zr&fGV#L)OTNEjd!M<^|f*FlMPny5wPGY?^RoLZ`+6M65PgS?<`B^Q`PdPKfd@e(Ff zjFj(~_zBE_<~GOR!qXTa9H;wf0rZ_fct9N2SZF+%bR6`1c@&=CYb|nFT3m1i2)~tr z;@>!_p*`L{7NmCTJskyp9XLJ;SK2;-W%0J^;n@-Rvp1dFP&5s@Ru+I;4|CEU?I`Eg zvqF~}O5Gq0)v6)vxoe52L77}qK_+J_-VkDCW5gaAAh-j}mA4hy?(@G{xXZkbRL+d#yEMZ3= zEcn$)Hhwl*UAS;7wsbAx$?dbHy*opp!<;IKJb=IzPWQr{-i_s?LuFA@1G>Csq{*@2 z_L+RPZYV3hWTwV#+laYSx?x^3D^+Mk!Wu>BXfb~S)IsQ{6IdG$!ZVq4KPN5$(wGWA zL(h&3ZPb|Dw*_mzgk$FCU6``=iO{sfJBN|w8Mlo0PZ<)a7jiL51dD?wwT~;Vv z`r#`Pc3^1ZyS)7l4{Uj2CYv+a0p8rEvxEW*u}*^`C_k}568>%Xn1#y1syyKZkPkV4 zJvL6oJ^9bUZ%aSk_uqcJdXS#Iyy$_CtIK)gUyoq$?=3j(PBqs$*9c!obY7tOdw$L@ zfFYcJRIaAIDD#QL)p7s zj?nISdEg`RR40M-pU}c+JflWva6b9|3b=5_iw|5e9nwpR>3rDwD&e+D-cY^o{sM_> z1@1G8evVk^Lg$Or*HSa|<0(Hb3%`cVb7MJaKZxC@!rWmh&O5i4JkwDKwJ9MTzJ@>W z%D~=juZWyw6}wvy7oLQR2|*B_{SZkj5jPaF`tgQ9dB}}vEf8rq_R=N^a`Zyr?)DAX z#594t*Em{&Vt_6o#>ghv-1+y^H--I~2wMx1Nb2gblzj;H^&)QO5vv2|%vnm{k|M9es zzWm6X_P8564sY-0;)4(igcVh)nqV4?byYs?y(;A%gL*d zcDM*}T5oDSHTgA+TKOE`Tpmce{yM_cPSA7SIv6qf2rh~ChnRjPq$9f%p6=%4Gf~Vf zc|9)p?wfxXXyz~#4?vL6;o_bIm!W0y6lPYp8jk1{xSoBNtgV8RDeyrq>V6n-(1sRB># z2kqwLBW=jbeiZkQdTE7hqI&~a-|MiLZ_3rp5Px+m%$FOY>*k4Y-6)JFos}_8D+s7Q zXm5+gNcG2$ep|*)ou7e2#`fc+708orgJTa(pzF^rc&pk>&i$~lRtpnWvV!MM{Hc>4 z5T7fgi4>t>?`XfmB*${vw`YzddO|M6w0GRNbWZpSZH(!hU9XN@aBH2WQ9uXI;HQk` z*ndhQ3Y}3|b(I(OSPqmEIMez$#(k9V+p7x#3t`3lwS=>41V=IAG`8a0>)QSbOwE)Y zr5)fxw`Xa_iTgr>bE83H_*x-+kRDu%gAJyiMSM}{tF(1(0__4e^=K-CEXxXPV}rBk|i*c`e@fcU<>&=B%Z6CbLx{(Ppdf)hVp=v zNeby0W@+;aNPEy)orzLp*Oy5B3(E73BHlpQgXA$`nQjTq$55E`)scMD3cjXlHHdou z9a@MjBK9j=E7zmR1dv28=Z$L?){l?PPh72ol{InvIPjj-UuHw_iN zAWmauH=fddXb<%y~efDm)w21jkL~g3AlWrI@Pivj<><>gBBK&n!{v#+n|da7<)wYZJenlIO#_aeujELNLm)H_U#w<4ulbm zd?s{?`7XSO8a?7SCmaNO{tUiOm?```P`o+mEP48;HZqM-O`m6`c3E$L#JM~n-%`{L z>jaHO?g8~8fqFL1b|T>?Yv~^h5w54DP`w)ZJ$T(&e1xw` zzp;q}U6JmKJ<)AN8srzhb+es1mf6bGvt`s<;Sa7glj~Jki5@KBfvH@4X$QNuWgJrf z4agsG>R+Mg6H}i?UGd!%w)p#^=ns;v5pk5x?m3J10v^dfvpdQHOGQ6-T|XUF^kWt~ zI011q`Pu@-zrO?O+LsDVL2DW|QomSN6*}oa`z6AUt6PS@gzy+`ier(c;Q4V$=mODy zNNY8Y`eMP<_kOBS4MN$BXdHMXfqKpnDC*aFfGPDPd#ZH*=19iu;VYi8w zLZPyzS*_*}b>M$&418%k8*5H@@{xIacze4}NaGfLdi8(nAoS>gfIj_*eVWAY#6C^p zTx+pEQ)~M9zxc0p6rBsSxQ^^|AR40wcEItto6gRe4*iFW!6@fcbTP>0%fH9sz93V1 z;o7~ZedHlVrPMH|TiY=DcoOe6xFvM)>4?=Y??LFUol;`&H;Ps4ELL*y6K2}xbFU`# z`Q5UQaN6jZQhw?@rk?Y~@DI0fcimj@42{InUw?Vg;TJgk2laK2rK7UHH58p_g5&27 zWRu-i^LOUW=MHAW zx`VkZ4e(u5m}F&d#V;j}MjiJ#aA#RM<=Utz-wx9vi2eY!b@L`dtR2pywDho@1F+E-ro)iurm`d}f~j zoW8~x=eBfV6bIS1%7v$ndChJ`D`2<(6|(~`YLN$Ta#$9Ur4t=Kg92aDAkEw2iiqL~}f z9p*pV%me4=@K>9g$rK;73%x<-rD<5}!~1C*^jx!hGA>gC%QgzWu$vex8m86&Ax{+tlEtA$J^lrA_H2ZS z-9BQ?!h5h)y9j4jr$9*LTlV$1i99Nr?2 zg0mfCrLtSznB--4Dm6G0zgP8?J8BP8qmQ>Lu=p9wO>=v&lyHqKa#ly345{ay@Tip8 zV!S}}1&jYC^ZOB<)vS<~%&5aLyt<_b=sHsGR-$V>HN=6zly_20G#T{u3 zu45DhIY`tr%{t&8uSv+T2F@$Zb$jPp<1Z5 zXAE6SUmm((8e6dRH&2||8ImV_XAb9^%9d@udM|5wOK|&M-2?s zLAy`q@J}Zl%9C%fG&!3!y7iPTe7_B}XW6knfw360ECY($t)p`%@^I|rKB`GiiI*DLi` z7H*y(?w?CdWpzbHPepsyL7e!8&RJg^!jnw~!rij}U`@{x=U9 zHw|8f6hCm?dmKbe1xGxY9nFJ3t^p(T55*I^u z8+w)+`-Jr?!c^Q7jPKkfHJHt|D0=(cevj?tZj$zL)tA)oU)FHESP;c@+6)GcXgzR1Q=K``Jgnm$!A^k0O@D zMSQxUEB0TQAb1qS*t_RN@ei?t2_s$Df1Y8|rY&`_TkCtur5+71y!jmXbny@$uXPJ5 zr)jApzmq07W=mK+gIyY9jS<(l;AlK_$OZlTHpkstd!wt<87R$j!`727<3;;&Tn_!& zgdKVi{IRb5v-lVLl)f37ls^ZfZRtRqtk8YnzcX`i-=B6!d;rJSzT~-$TFR!>1L!pG zB+RAv28b`&tH2w~_kJe~N;;?6>PlR1Hycdv&@dsfG0=w zB<=K#H}Gtrmg?Bb6hGp`Xy{+jK<@3m0j=CLg0Hc`gPMY`KZXG19XtuB!i>_#LK}gT z$xv*tAXO3^Mm2?T7ImOwTRMxPB8)eFR9_XmPneFr=4FCcdETRoLQ^PN9}Hx|5=~W* zFMLciAe~u_MdlN5|NL*z$-Nzx>ZD_d)(XW!?w?=Gn`J04bY%?TLo&N|k~FDy2#8uBeFma_=Y8Ev z?`|E0g5)o(PJUfjv(8W!JXrSS1CS1-Tnffjf8Me~fei&WV&g}VOz1PJ4b8`Yclr7` zR|V$E=}+lw!x7OiQDa7DsT=~UIX6M*3c^&OtpxWm@jdie@VT@f;bt|Xd*xIU(#OZL zysASsn0(YzX`H;^l}BxCgr-SyT#KJqUWeY^`e0RGPxe|g3V-!_$L4*tL2-Z8J6F+p z0A1nFvFci0ojGL{GXF2I#@mDHD_CepaOn7$>6X0UpN;FG(Cx{$wm|M_RnY`@1DBnM z^>vP5=QE2TaNvBPI>II+#xwo2zL5Q|sZ6<}3Vb83|a?NsREAX|WARIL_(&rCOaz z@e$giszWARf18f!JDs8R;r2p9A?1Zi7^5C!!-S@VrRC2g%AEqC?F5#J{G=FgsyC@L zK9^HI3;m^5tT$!Cm)I^ICKLbDxjT*2#F>YAf{!g?;^}=!i>bvliy^+4W6^nA$il0x;%W?u-a#f8Yk7$)jB>U9x5iD@qr z{~+-)asMW6cchtIJj00xdL;6L69f2J+==eP`U?Dm#0R_CqFX-D_-`Av;MFx&+^#N? z_QWx}ja1hQ?)c87Bkfz3fm&-op0FbV!b2tlOPEo3C^@AVTl_raEBN6GGP}|4bgZc82?0rls78zm@+EkUo964uk|Emze3)V2`x%G z6%EIQI1`8C*L&Ig`{J`K@s|dD3jYc{!1s4{1@ZtY@hB@h+S`TnAFJ-^06+GGLX#1t zLNkN=>{2keS_h_IZl-2cR{-Tb;qU-x@@f;1hGO*nVBKgczPd9&6?I732nkDI=I^7> zrTZ(i>tk7~a~$v`)D#}YI;A_2e6l3`%bMlYjQ9%4Q?ZnOdO#dZT0~{92RM^AcIG1Q zOy{muNgE#B!b90 z${n~pBof2-E=Tg+?9aRtqV8q)N5g?|53}2J1fL1Hm_2nOCq5RM4=EmC;nD+3^De{e z3-wTF@f#D9@a_d|q&QRWK2#D~kT{D`odMMg`EpzNQM1EPUE~5yeV=M*Tu7Y6EX>W+ zi}sU&a7p+;ATC9mb`iTYxq&X7FG`QoG#cylL?B%ZtG1;P|HM#j26Mt_EIK?5y|Z?Z zr+o(hN{k^S@D5tPF5v<{Nk3wzP8&JtJuu$a85UdR;n1#5N=4JZio4<{>J&sggf{uE zHv&o@R0++4|9RHK^Np!*K{!gZ<3J3aur z8yyADs$CA&lUhOm}jxXd){A!#FOOX>D@}|-}tqQVB~pdJw^uT zTv!k2O4!%_G(@c&ja${NwLKa7Gxzt;1j0p_cRLDa4cQ2%vl67|4?V%x+z*LIkn#;P z>+J&JEd?hM-m>csrKDeHN!P2^@C9BKZ12rV4QT+ru-Q@OFnlp_(p;R(Kk)6>m%^Z_ z<(&8wPgeF*cDHjw!c(Z}wvX=~G!=APlICiEx}ZhnS_rslDKGHy$MRFI(C)_#_|?Y* z3|AykO?DJmD6|jg9&|_X`^(uONVq7GKjH>{G2%OsJO!^m^CuhnIs~Y9qMn~{1b&8I ztnFVN{I(s*V~E}Y_^i6DkRC$vfsopi_9dvlj{Ig_mGmM!FUW_%v8lYeXATZn_8EM3 zO%>i)cyUhnFOyfmHAQDI+@&j=T;!o>NB5IS2TPP|n0@=Yk{z)asJDa`*G{R_yTJ65 zp28baErtmU2GZzA9tMV_9H`CXUL%^K=;tJE9LO&mSdSv_o-fh`(iIpNIvhoRsQTqt zpt`8_lUvIUlWtzl<8Ih0u@3Rl(3MS8`n*g!gFGbdDM>vO8WX3!g1{EaVPl2ni24Cb zncb%WwLK;3mmxfi)rj7pn4eX;+mJLSkX969LY-wtK;VV&6@m|rHTz0EiE5mFIZp9A z(OmANwiG-F#8RCLo5DAjm4MiHgZv?Lhll%u(T`Tc&pDO@V?$d)9#070ri%Y zLxCG%`ovwe+$3}xaU*s;^U3#O-t4=i^WE6&PB%29`!sjPHpXQQ_X=(V(F6N8Y6T;| z0_i*Rq1V>4Ao?|e=lS|6>ZrI)O!Nb89l3=92Zat7I+XM+aoc!KzEdXtFCYw7g^nPP zhURq6gi?G0a#Qw_HjEH;#NDnI)p||x>u~mo6H;!})_HcsSM==F1*vxceI{0G&dfa{ zyfS5ymk)D&<`LHP;8d5WgzCuD*W<$bh`t8lQ>|7KK2G@5?5{JR(riB(G&>H2 zZ5rYmrhmQ^&+nQH)MHSn-vg$B50qI$8mftdjYuD?MA7$3{?ZAASEss=?tk6Q$v>!d z%YHJ^|1&NcsFFrhU3LOYwx5dR*#rhN>Yw1&wv#|`RLZ5-ETeQHBM#()Z{Mb#ND%3l zJ+<1ttuxi?*BP8}21tLx)#G%|BlU_YCjsO1Ve&as*5FyZ(Ddq1&#SyL_yew6_>fzC z@aAhxSKyh=4+YQ3frN$Bc-XWn9t=OvyoYbJU5?m)eG$+FE-B+taCCk@n-XOy#&(B48@QIAsKjzpS{ zbo*OAIEShKTLU51MHtbKIK%pXzx%&<{_y`_;`#9JF0q}hhY3|iS^jN4x_7*;;A<~S=kFKZ*Gtd#K*#fkCx2s z#UZ}A(J@SyVtAW28Eiq{c4%b074z5sgunOVSm)C_kglOjnH#~Ehm8fd?z(i}PoNQB zimit1#wkX+>b0h0@zT~ST;F9lJ}vBk#aH%XGuuaS=}H~NJKT)To?#|G*mVXx`srg* z(k?va7>u`9Dbl|E-ayyp8)zT%tOsWb+`nfrF|O#bx0Ibz24iKtRp{%x4~!p!TB5O+ znqPKSZ?5ves}pX6dxJYt??=rcYj!JqWH1|}hLy%&BO zZe&jk+JkTKd~Wdh8n$T;t2}tNg3s2&g`khwW1w z;q&<6vXQOCCcpU#yHs20)u9%A$clZqt;z+e6F0+qTT3w6J5}j7I1z%GF+bk=3w|Dw zj`>}F@YaSG#XYGj!;Ztg{rBO*l?%|me=gi;>cr?IQrs8XP}xoUDbyGzf!DTY^nP3n zcw}|Qb?HgW*x3bcu6o3$1{U(^m{rimg=CMukmmaP!(^z?<3O(G>$3k7XFb4Y#HdRyY4zqf_-&RadCGj= zeA4FHJQ4Zl|7Nr%VveWy)ea{hmdUg&x(MWXwM|Dvop zzm?10&fkect?Hz9{519|X7e&tCxw`?;ULgqXN z%5iA^_z8YBypDUXHjw{T=d$bFQ66l9?T%XdnGo#T*@?YzZvBt zCoI7RpFc=r7A1g5h$%myozJQMREo8-F1-~NnWVz1rzf!eky4nuJcS$DuEyx;%NQHH z1{xXvWND#Y!0tzLJT~nk!Xqa*O7G>23W&oOF0mly^k&crVC{36&dnL}ifax)xyMr$ zT;^Z4gy7_YP&C`ypKx&p7xM3UCwM47&A+=Z(NxdDa8^t5*sF*_(bsymUn?(;i40V6*p!vOq zCr9|Pt`Yrl*ot*HvcYC>R_OiNQ(MH}LwTepw;vr3uIJC;(vcCE(e|12r1M7}vuqXN zvn70^HKNypyyZD}3UTX(GB(RQ0Vsw_i}@}1nsh%r(Ci62=Gq?nAL}EBt$8K+6by#C z=F?EBStU^(BGtUWVfZ-YB6mGzfrS|-X?^c%h_8A|c(V;&evVVFHN6L27q0gQ4!o{!gFRDpq`*OrvcRT2p(EI?Ol@rWct?Ix zrYk0FkmZsNp-3?XkK`y2cwWO3(d)X2`t+td^odJ=jiq7u#N3Ssos;p*7gKma`gl}8 zGx^o!$&BWcKiEWTDGaZJRC1J2Y zmM`fjZ{Lsv>QY0wQDiu%JA9P(DaYWIhY{8|zd)+5+FGHS;#3>lV^lXz*e?+#Vu$`s z;nIKFYK3`yd8el}d$6k?gw{#Mb0z~|W6oDD^5lMxBz}IfC3cV4BT??dst-QcNq;+E zda{c=J1UUtM|MyPDp!d6W>-(#!6o17VSJw{X#2y7Ik|UW&m$Z08$}PW{*t}C-MuK* z`k%EtzuO)-WAYq#+j*dG@CTk2e;fP?_JeMqm0Z`LFMZYqrWl1I)jLuR;D@(^`1PT* zKFYQNvR+l=v7=X5W#Sl|J!1zn;den&3?R~GA9#hMboA9g{57kQy6RdiJTAP%2r~-` zN2t)CqJ#;oJnA(ECK_E5oWhQu`Gh{cMQm{QOAzg(2TcxT!`HWUdGnCRU~Ib`HpNCl zqexx!JD>$rXN>d#pE1K1Z&*Jh9MT1I;wVHCFui0^~ue7%xZicX?0k0C^*vvjjuc6 z`aK_!w1UVvYumvXGu2A6lD9hy@_y-J_G(Lfnq3Emc0T|ePXoc)ncxg|Wr#v+{3Q2E(Tfa`Y@bG0!tlFv6Pn?Hd;Q*8)0xN-dR3`l? zQ~j~rh97|R883JFD%t5PykX}s*y)){80?0fTDeH>FCy8Cdi30}q>1eKOpD^ZhKIQA zfHxtPoUoL+HyFbR^Hhork}eXu73p*G$=(|ER#6_`?imP`iFfc%n^<;Y+6Z><=TzvS z%*Q_tJw@I~R6{_ThOJ+-pEPSbm9!=o!PtX02uSD~9ckgie5 zDK>m%!UQhU*^5KY2P4(9O7kOlifekU;5oFX9_5X^F^BpR<0g|%ea$i-R6^9)FVbKu z8@Y$Pm+3F511~&I(7gBIBg`Hk)ud8xZcmylnu++Vus=q%5RRl}v2@INdfu*uZ4B#y z&@lHlxxgcXVj%s+DCaqGjH0__AX0oa&FA+*-NHR^f6R-53iC)tIRP=t=uGip&4I23 z{(pW-#0f&H0C7Fccnu%@Yy+gl;EBP0_Fq&gBRmoMR(eryf=qbM8+#mqleA~d z^$9s*{J7jd3iT>xNCYeBsXhSh%C58Z-7NrnZb=cC98s55M<3=#D@3^2;R5 zUCRq6SFrr`%f%e9zD+7&*hV|%);%0g?NC^J)g%sU9nr9!1MhKgSU$xYqxM`U&bf|b zJDLi9QiCTxV?XcR)%ahkuL|$8_x&C0ey~PhycFeR$BsCjldPj}g8AM(P#Jjyw%y*2 z0$WFX-zX6lpw$IC^>zIX`(9?M<1+K02o{zlR?c!V6AZ zz^QHtza|33Q4$*Fj7cEI=H&2}Z~GLG2SL`phZ@*(6A-4Mh^eS~_wQYJmrnh_cWwzQ z4-6A}0tDYvyjfnu!y@N7VWX&75ICcA^CkAKDd5CUF623Jv3O^&-YVjf`s(yS&E!LQ z3g|4EPPKah$yFaLN&lWQIB!LTf=RW6+>W!Aur6$n!-eD{?c%{&HOBTo5!QS^{;Pby>xZ2GZx^AC= z&ksFdKV}_Kj2|P^9kUaKz9QUK?H8A$!>7SOzD^07XrXGoOkhH{I3Ltg1=a>OUIT5b zZ-M<(j)s$RkvI)KPK~9xZYKZhuQ0-5wf*P^?8T%!Uw5Nz_*;f4NUVF?a?XtM&K2Ycx1)<3S|6 zR!RHGRg?Y##ad`e^54tZ_O50~c~n5!R`^-r>qYN?&&bu#8m;;;@{|*7{Cz_Sx>F$$ zf8lLw4SBMyOn6w*@l5WOhc7BN)n1D>$48ncwNs}`@z|Ke`q0+K4%*)YGcW}t4O+mdIa_~S9ciUDto+SE?Te5grMAQIBj!`GBx#HZ5^!3 zFXYr~pq{?F(7F<7A|$WPNK@ic|HGQU)y|B(E7mi2#p&M8D%4Cs@*9G8A!D}8iF=VR zbvQ}o%Uz2aGSBrZTS4NmF-Mar+DGRiP*gYGuDR;ziC1 z&CEhRv{cC(v4v4uD*1Qj|K>KI{AMw?)%Owo14i{Kr`mOv?H5nRHkSK;ybAfAzgV_pxac47%@*T9 zGp`%%_mWI8n8hydyMgZ>pJhTLxZMqv+qPNHiK`Xjcxl(wD@%1B?EiR&stlaiNG59~jI;Z8O9-gXywoVrKr zP4(2MJtM%+=m4MC?mZuLK9%tJ2km9~MtDQ&JM03R=x(Itb{yOGG>%BHN;rbp6gm0#P$w(kghoW~tnnV>Eu*SA8 z@!kw*{Vx^LLbEvKvU0-9o*f*vg;PvONA$s>|I+B}g&@A-T8>1$H0Y_*5y-1UvwDW4 z9m3()tms-Du&8fcS?H#789Om2CJlscAnxG8Z;tJlh?ILMx8ID!6OgsKFOJNL0rEX~ z?cE0ItvRw|yKSX>uSg`%fD8KWqdwAcAdZzf-k5?>vG&5hL6Tk(&x)A_jXk=exVFfV zHFe54VTpRg^%YRxO4Noz{T+Vc%0Rw4Yzq*tU^Slzj-T3tU)?(DD0*i@6SuOqpGkTN z$9*hR2=9S3HhH8XAioDDTYB^1S+_XVFr%IqIL%v#F$1HxUPTS&zS_&kdn0)$5ieD+_PlM(zA85NR${k-tS924m-l zD!8|+Qeccs-bv^t>OFN(kNyfm(q`0;_zl$S5?&khZWgha>Gy!}f>B=+NDJed4@y4O zAJVu~@-;YR&{7DVI7P%rXa?civ77rF+UG17D4xP6En){-^qELPEPjB0)0)MD(fG`S%C0}FYO=|0ba2Hy>Jx%rM3KVNb z`OnF#B6%B`^Z=v&k9W=>IUQ zwm(eT0fqk@=R1S99>15cq*x|jDZ8XS;pE?IwHtW|D1I_jcr{MCk@{p)sONSJx7}`8 z+s|2>u8LYxsSk|glQFRIKk1-Pfik{eCQ{D`8klQyw^29whz7Z^D|0Pz%T-1_Ab47_ z5QKKON$~>VNk|9C2l9M`-r&>=;li&}AL^y-f1gVl;-~NkK)p2LqRX|KihMantg?Xkob+SMEED3&&nmMm?gv3W|zgg-s*Zm*lCF5tbIG4ut&|pXwu4r$3#i$7 z6XF$Hm>5j&R}b%t%|@H4lNXn>VK=IjP2K2Bj^XiO=b@$QTlk@Az)LiBvj_8oKA4_K z>uc5^F8}dX$(y90QTRPp=hp+w-m;5X*QYaeXU1dp-BGMGB9p^lTlhOjhGusAfc_n@ zkI0wPo(T~n-owi3ZyIe&J^AvshU$KYxd4OH>7C;-IAJ!ur|d}kGnF4jv3~qxtM1t0&?i3RQfsI;>kOX$uNVf7cf$7ijlfv1m;B*9hqTd; zu}c!iv^66|{NU?}O{{+LXO?f{4*q3p`R*pPetBSf*(*{@J?!3zYgNQ>9n-+Y5-|G&-l?8qh#g=R4KUS`E>xdf)V)5;k25R)*x@!Hf(@lK;K3LX1IiKF|L$ZT8f!l6 zI7{nrb@gTRtOK3y*|Np=J1{5hFMPYnc=(rW3$x~B!-AKVvd57nypx#^ODR|b+YFn@ z_8o6Y%NNi&07-v&@yz>>WSR&fKASCv;_nL+(JL|-KZL}wf5)FgX+$U0&LbGkx2xi# z_4YvHVOyAN^HbuPUvU1lHZcF%9^CJ>Q{)ZT)IA0R682z1)lMwzY6QWrPJm`$wPsOo zeVFc*17@TCVP{&9XQc=*^D*Q{)WIUxsN=oj*VS1x16 z=0@yx2gY|>?`HH_WtvtoZ^vK3izl7cb>nv_6sKOIcH9Ur-B&4))5%4x?6?_RF6v5M zKeUtAoa&1-7M}HDDEl~YKlGxIQ5~r zz-4&=_zio5-b)(DtFGtsRlho;x7${hUv7$Ju{~tVj3hc6b}H&Vc&&VaCH&`%j*LEw zA}=GIkK%60k0m~=Vcnl*@$LqD@k)3LI506le$+!-wme&krnyn@$GjM#USENcpI^bM zDKY%Tlf$^@??-stWHb&)XoJ6B>En6lRxq{cDekarxI|-w0+0RzE7V}`r@%880o5V9 z5+0AMt~*O14|)rX;PZbbNDXGMMUzF7;Qm?aMV{FP`?MU;s3rCA97`GHH$RjVgIg*l zBE=p%QQvmImpPr^prwvpbcs=2aH=bDEquJ{4{M*+5vwL%Wi(dq6>y7hs0>4qe}Qo+ z;N5}NW;NN&E}vP8Sz|scOF!<10yPNxwde2s4#8dxLSF!6x?LZ8R2b3e!wex1ix5$g`0sci#I~%1>>~ixCBAv#$ZEzsVQ=rM0`qaiHfPL89ZPXv1%Ysk#VvA# zOMR_z)X@{DEO&;S9n;`uj5%w(xIT(pAnZlL8hVD;OCc^ni!t4CY|LQ1f3_ud8x)JF z+(nEV=sQ8+J^daiPucZ~A|Sj~i67vQV-`C`dle+k9zgSP6|5I8fv3+~%aYT42rV8A zBB!qQT0n92gw&-QV3sS#pbIC9L`yZl?I|DL zJPDe2o2~5f-6-9g-36bHF2b7uwkYs&RR3tIuOJxSq$!HrH)lQAfn0ZZV?P5LHmU|# zu+y0^2^^MP!Q57Z(0p@iS)q4=dxhKA9F~S8f%{7oSuDBuP6MudCFAE>i zfzIOy$6kkr@cgS4v>(SCoN{aEm^%D~_2>;x9z$nd_>=AZj)zs3op zx#Q1H;Mgved;CqLdTxOkerY`IP!Tq4G+lbLR|}e~x(TLdC*K4-gb35%Z6aoIjGhp(16T;Y&Fl^IN2>YbMz~mXqvZ(XSbAp=~4~9*i z2K4;}xvh$rb&s>O7h)(9XTzy=r`T`rV?3wRNv`Q)2m8;RN9+1mIq4OFb(k{jE)aHd zx8LI#&7Hc9MQEBx=4#X`8%1E>%MEqJyyEDgWANHgJ5D%-l^tm>OTAv0Qt%JMEP4{Vv6Cs+Sc0>;m(9d+tU^i;6W%9322Ptl zfpLZhfbty)rzv;!kT_f#>O6?foYPeWUJy@FO-DneUO+)7S5G)Iem(Q}YYNvFrSSE= zmoZUubvy^Cgt0)nRcIiXyLmAW;I}d4!6|m8I7jF~rBkK@VZ?ftFlwMm$jMAPMaz>PxpnV|M;oHuRY>n{TRjnF;MTs(&NsyLAeuc;sj8HC9{q z`-OMjxC3cY=V$$|3q1jCcI?FyOP7I&cZ)}bnDjOcjQY2Q4Ymp^&DjHDUdORyh43FQ zpV0=NF-b5$XBabEVhMK=0>E(UW=PJ-V;5I7hJM~(VMW7zO8VNSSQXiya+~@olV9MU ztQ35_=MZ;)T_HtY(vwXbIx(T2X&wbWz@UvE`SJNjxrjY!XE0N4LDa~uKzsq$qaVY( zo^_G%i;r9FiMG$nq#@6I`0lB<*^ccRrk?63FY3JkYh+8p!Y6#fZBDvc!P}#=~g4#T%@nNM`I3rEUsIKLu<01uaGtyoBK;$?{q4%06@43s$ z*8ua`?xBiW(;c-Ay%iNqsGmB2dLK^nf|S$De|SgDX>(s3xB3Q8NLUUAyH28iY#9

`Q$P($gD5gIMD1bAr!h;XMe4gnmHd#At|}Z~`fYY--cqNEoaVue0<_U0iZ( z1y(scp!bxg2_9FjOt8X+mvY6t^6tkqKyjh(p2&%VF(|c0Aq-{Yd4%@IYe)5k2B4g9 z!=I;G@FuH9P|nA4${p2R(gkltM)Do_g-%HvnEVnyt{;zSWs zOwQ?r>edrT`kmQ)y9?(Q>cQ*}O<-0XJ<^O4lFpX%ulgftHSANN1MSo3+!^(tL>>uD zW1fgu0(Ws^0}2xq(o}5NrtWg1-|z8z^S(IQ*P0Q}(^)kED0C8OF-DxM2B(McN-te` z&B|!F8ITTy*SOkc3chkTR_O=##no4AuUbLJ=3C*(HzRe%#Gi0#Rxq4NmdW=psZ(YS zw9(du=U;=s-}(fryY)M!zf+{a54Uo!HkLr&%hb+e$jdB%8JRn|N+C>!b3@`l#OUuJ6@*8K%ddt>^_^9b1Hz*gk2IBqJ|IqHSqMqbMM)64eg8S|uC%rG{|F|zah0yk-m72Uk!p;zb0v$ZX^7(tJFtc4Kk~ps6Uh@cre`arNPdGfc`j_x3|2{}aq@f6;m`tn z)9jFvwKsw1jP1c@`)P!?7TSl-gSHm;!Og0QiIdjAh;8`=B6oIdF8~XB#OkU=5Zq@E zF8zPaU3pMdRTvjg(;@*i&?HAe6iA10L6-A9ZOAQ`lu;N`l*M#R(*RLJBMnkf5D?sV z3~|ydO$>PFyX5!)1;r&WFavkf+^}&8sp$9Jnf~p+W_mNsFavY$x%YhEZ@c%NcXA-f zJOpPRteDKtCk`ObrcRcm=t+AR4q>JL@^YF`WRou<_) z4eqvfIF-m%_X3Qv#MPF7LzM*GuHgS!^fIMiR?zzD%B)i1YFcpY3mw|68+3XOVXc+; zKxirjEu>LX44qL@9=Khfa(J#@d~ne80vA8^BHOv8{KVgnZ7~-Z_^+JrCGpYnpA|eX zG{_`q6c3s@yR$i_Pn$;Z?%;7h;XFPvA{jN(nc7cwFq+CM2ppxQzo92Nd(}ZD_1(R< zKe)4v5x8+J);EDab5EpI*y~JcE5R#+q4V|7Wf|;zuUX?S zYidEiAybiAe6RBUN}$WV#?glT@dP~Pg?3jYgW&2hbNSo5YskrIf3Wy{_gXv${w$ZF z>){h`Yvc;Ft|pU;zsmVw&kX6q1^7RYCU;@y(P8nF+J+=ZjJRKr<-^#33>ybQ8XT- z*BR@eq4{WTor|iqx{zoMm+bZi#!#p?Xh%sQ$WXNBjAEAK zVGJ!i>9@9w#pAD!{Y%bMav>g+AHtiwFQ|g8lSG?a<^OFN71ZUx=T`BHf%T;nmlUAq z*{`OWd5<(Q897h#$tkpAz05T(adfgQsJ_mwW%g!2Lt|JXZ6l1jkP^eM`gh>nX^o7> zB3Te>l(zY1GqN9Qy?R-Tr+j2L*chFWppX;j$o5IxWx114R-_mj#Tu92AKYKb;5-_X ze*$%0f&3<&bBgC^=ycT*w~+cjx-HqAY92O}>|5Rw9?aF_}8S%aOr>YLZWu zaEKQCPfcjV*h-8r~-_`KaC3`%2 z(Uw-+Yuz=4XGG2<$;IqL%ar&<;0Qz0(2r9;MlbYJz(0z?eXRzkTDI>y5%smfymsJj zW1(FXr6=dp*I|i#a`+9#n&{~ARI|S;x-P(6zk)7*%~*N3qXOPqC7ZmQ`Gm{yt{u3X z>JG#4A^JMSq^Ad5xElQ@p`h4>Ghq?}3bQtmGL%(6OS?gB$XEt-x`G zYoSo1Qtz~+u!SamXrtU44WdyPxMe^$Sim#E(r@tV9MF_(2zS4JYmBM+!=|TGQu;{g z6C>`GPJHppZE{@0zVA8sy$#S(9@s|`?{lP-f~yS`J$QgZPNc8i-YH(0fv5b#s@>ESck9MW_nA< zr`~4946m-?`8dPXM$zRC#;T%F(F~?fi8(V4T0U8F6LMR0RnK02M_oAG!SwkL4vs-T zOe68**IaVB)>~8f`;(2A+}w<(XD1MRh!u6GaSklLBGOi-p!c-+|L1**mGp_^V(4$d zc;(P3TOs$fKz}fNq8{GzwuZKsOu*_JehTkQjb&VCC3#{&_zoG1yEm=?enoIpLkI!e z;J-IP3vRSbe;8!>Eq=S1nV?Tavr+izA!c5CKPg>_S4%6H%IKxQyK7U-F@Q;oY!3O- zeaV}I`^cpx*a{pZWN{=MDr*vuGKDRDmh$n@HKrdrc5*w5W{{jnum?U8oLMNfqC+AU z)3{z8MB7WwKs~BEQek}#38sh+eVo$Su=U)@q9Y&X;$6TM&g^q%7x(;^8x14yH#_{u Reog;}&p-2#cQ^iv&)+A|7i|Cl diff --git a/.autodoc/docs/json/src/cli/commands/estimate/index.json b/.autodoc/docs/json/src/cli/commands/estimate/index.json index 01fb65d..c388997 100644 --- a/.autodoc/docs/json/src/cli/commands/estimate/index.json +++ b/.autodoc/docs/json/src/cli/commands/estimate/index.json @@ -1,7 +1,8 @@ { "fileName": "index.ts", - "filePath": "src/cli/commands/estimate/index.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/estimate/index.ts", - "summary": "The `estimate` function in this code file is responsible for providing an estimated cost of indexing a given repository using the AutodocRepoConfig configuration. This function is particularly useful for users who want to get an idea of the cost involved in processing their repository before actually running the process.\n\nThe function takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the repository name, URL, root directory, output directory, and other settings related to the processing of the repository.\n\nThe main steps involved in the function are:\n\n1. Set the output path for the JSON files generated during the process.\n2. Update the spinner text to display \"Estimating cost...\".\n3. Perform a dry run of the `processRepository` function with the given configuration options. The dry run does not actually process the repository but instead returns the details of the models that would be processed.\n4. Stop the spinner once the dry run is complete.\n5. Print the details of the models obtained from the dry run using the `printModelDetails` utility function.\n6. Calculate the total estimated cost using the `totalIndexCostEstimate` utility function.\n7. Display the estimated cost in a user-friendly format using the `chalk` library.\n\nHere's an example of how the `estimate` function might be used in the larger project:\n\n```javascript\nimport { estimate } from './autodoc/estimate';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output/',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\nThis example demonstrates how a user can call the `estimate` function with a specific configuration to get an estimated cost for processing their repository.", - "questions": "1. **What is the purpose of the `estimate` function and what parameters does it accept?**\n\n The `estimate` function is used to estimate the cost of processing a repository for indexing. It accepts an `AutodocRepoConfig` object as a parameter, which contains various configuration options such as repository URL, output path, and other settings.\n\n2. **How does the `estimate` function calculate the cost estimate?**\n\n The `estimate` function performs a dry run of the `processRepository` command to get the estimated price for indexing the repository. It then uses the `totalIndexCostEstimate` function to calculate the total cost based on the returned run details.\n\n3. **What is the purpose of the `printModelDetails` function and how is it used in the `estimate` function?**\n\n The `printModelDetails` function is used to display the details of the models used in the estimation process. In the `estimate` function, it is called with the values of the `runDetails` object to print the model details before displaying the total cost estimate." + "filePath": "src\\cli\\commands\\estimate\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\estimate\\index.ts", + "summary": "The `estimate` function in this code is responsible for providing an estimated cost of processing a given repository using the Autodoc project. It takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the repository name, URL, root directory, output directory, and other settings related to the processing of the repository.\n\nThe function starts by constructing the path to the JSON output directory, which will be used to store the intermediate results of the processing. It then updates the spinner text to indicate that the cost estimation is in progress.\n\nNext, the `processRepository` function is called with the provided configuration options and a `true` flag to indicate that this is a dry run. This means that the repository will not actually be processed, but the function will return the details of what would happen if it were processed. This is used to calculate the estimated cost of processing the repository.\n\nOnce the dry run is complete, the spinner is updated to show success, and the results are printed using the `printModelDetails` function. The total estimated cost is then calculated using the `totalIndexCostEstimate` function, which takes the values of the `runDetails` object as input.\n\nFinally, the estimated cost is displayed in the console using the `chalk.redBright` function to format the text in a red color. The message also includes a disclaimer that the actual cost may vary and recommends setting a limit in the user's OpenAI account to prevent unexpected charges.\n\nHere's an example of how the `estimate` function might be used in the larger project:\n\n```javascript\nimport { estimate } from './path/to/this/file';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\nThis example would estimate the cost of processing the \"my-repo\" repository with the specified configuration options.", + "questions": "1. **What is the purpose of the `estimate` function?**\n\n The `estimate` function is used to perform a dry run of the `processRepository` command to get an estimated price for indexing the given repository. It then prints the model details and the total estimated cost.\n\n2. **What are the parameters passed to the `processRepository` function?**\n\n The `processRepository` function is called with an object containing the following properties: `name`, `repositoryUrl`, `root`, `output`, `llms`, `ignore`, `filePrompt`, `folderPrompt`, `chatPrompt`, `contentType`, `targetAudience`, and `linkHosted`. Additionally, a second argument `true` is passed to indicate that it's a dry run.\n\n3. **How is the total estimated cost calculated and displayed?**\n\n The total estimated cost is calculated using the `totalIndexCostEstimate` function, which takes an array of values from the `runDetails` object. The cost is then displayed using `console.log` with `chalk.redBright` for formatting, showing the cost with two decimal places and a note that the actual cost may vary.", + "checksum": "2b0b3903432ae423bbc597d04b052ecb" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/cli/commands/estimate/summary.json b/.autodoc/docs/json/src/cli/commands/estimate/summary.json index 2a640a1..e88bd06 100644 --- a/.autodoc/docs/json/src/cli/commands/estimate/summary.json +++ b/.autodoc/docs/json/src/cli/commands/estimate/summary.json @@ -1,17 +1,19 @@ { "folderName": "estimate", - "folderPath": ".autodoc/docs/json/src/cli/commands/estimate", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/estimate", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\commands\\estimate", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands\\estimate", "files": [ { "fileName": "index.ts", - "filePath": "src/cli/commands/estimate/index.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/estimate/index.ts", - "summary": "The `estimate` function in this code file is responsible for providing an estimated cost of indexing a given repository using the AutodocRepoConfig configuration. This function is particularly useful for users who want to get an idea of the cost involved in processing their repository before actually running the process.\n\nThe function takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the repository name, URL, root directory, output directory, and other settings related to the processing of the repository.\n\nThe main steps involved in the function are:\n\n1. Set the output path for the JSON files generated during the process.\n2. Update the spinner text to display \"Estimating cost...\".\n3. Perform a dry run of the `processRepository` function with the given configuration options. The dry run does not actually process the repository but instead returns the details of the models that would be processed.\n4. Stop the spinner once the dry run is complete.\n5. Print the details of the models obtained from the dry run using the `printModelDetails` utility function.\n6. Calculate the total estimated cost using the `totalIndexCostEstimate` utility function.\n7. Display the estimated cost in a user-friendly format using the `chalk` library.\n\nHere's an example of how the `estimate` function might be used in the larger project:\n\n```javascript\nimport { estimate } from './autodoc/estimate';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output/',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\nThis example demonstrates how a user can call the `estimate` function with a specific configuration to get an estimated cost for processing their repository.", - "questions": "1. **What is the purpose of the `estimate` function and what parameters does it accept?**\n\n The `estimate` function is used to estimate the cost of processing a repository for indexing. It accepts an `AutodocRepoConfig` object as a parameter, which contains various configuration options such as repository URL, output path, and other settings.\n\n2. **How does the `estimate` function calculate the cost estimate?**\n\n The `estimate` function performs a dry run of the `processRepository` command to get the estimated price for indexing the repository. It then uses the `totalIndexCostEstimate` function to calculate the total cost based on the returned run details.\n\n3. **What is the purpose of the `printModelDetails` function and how is it used in the `estimate` function?**\n\n The `printModelDetails` function is used to display the details of the models used in the estimation process. In the `estimate` function, it is called with the values of the `runDetails` object to print the model details before displaying the total cost estimate." + "filePath": "src\\cli\\commands\\estimate\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\estimate\\index.ts", + "summary": "The `estimate` function in this code is responsible for providing an estimated cost of processing a given repository using the Autodoc project. It takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the repository name, URL, root directory, output directory, and other settings related to the processing of the repository.\n\nThe function starts by constructing the path to the JSON output directory, which will be used to store the intermediate results of the processing. It then updates the spinner text to indicate that the cost estimation is in progress.\n\nNext, the `processRepository` function is called with the provided configuration options and a `true` flag to indicate that this is a dry run. This means that the repository will not actually be processed, but the function will return the details of what would happen if it were processed. This is used to calculate the estimated cost of processing the repository.\n\nOnce the dry run is complete, the spinner is updated to show success, and the results are printed using the `printModelDetails` function. The total estimated cost is then calculated using the `totalIndexCostEstimate` function, which takes the values of the `runDetails` object as input.\n\nFinally, the estimated cost is displayed in the console using the `chalk.redBright` function to format the text in a red color. The message also includes a disclaimer that the actual cost may vary and recommends setting a limit in the user's OpenAI account to prevent unexpected charges.\n\nHere's an example of how the `estimate` function might be used in the larger project:\n\n```javascript\nimport { estimate } from './path/to/this/file';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\nThis example would estimate the cost of processing the \"my-repo\" repository with the specified configuration options.", + "questions": "1. **What is the purpose of the `estimate` function?**\n\n The `estimate` function is used to perform a dry run of the `processRepository` command to get an estimated price for indexing the given repository. It then prints the model details and the total estimated cost.\n\n2. **What are the parameters passed to the `processRepository` function?**\n\n The `processRepository` function is called with an object containing the following properties: `name`, `repositoryUrl`, `root`, `output`, `llms`, `ignore`, `filePrompt`, `folderPrompt`, `chatPrompt`, `contentType`, `targetAudience`, and `linkHosted`. Additionally, a second argument `true` is passed to indicate that it's a dry run.\n\n3. **How is the total estimated cost calculated and displayed?**\n\n The total estimated cost is calculated using the `totalIndexCostEstimate` function, which takes an array of values from the `runDetails` object. The cost is then displayed using `console.log` with `chalk.redBright` for formatting, showing the cost with two decimal places and a note that the actual cost may vary.", + "checksum": "2b0b3903432ae423bbc597d04b052ecb" } ], "folders": [], - "summary": "The `estimate` function in `index.ts` is a crucial part of the Autodoc project, as it allows users to estimate the cost of indexing a given repository before actually processing it. This function takes an `AutodocRepoConfig` object as input, which contains various configuration options for processing the repository.\n\nThe main steps involved in the `estimate` function are:\n\n1. Setting the output path for the JSON files generated during the process.\n2. Updating the spinner text to display \"Estimating cost...\".\n3. Performing a dry run of the `processRepository` function with the given configuration options. The dry run does not actually process the repository but instead returns the details of the models that would be processed.\n4. Stopping the spinner once the dry run is complete.\n5. Printing the details of the models obtained from the dry run using the `printModelDetails` utility function.\n6. Calculating the total estimated cost using the `totalIndexCostEstimate` utility function.\n7. Displaying the estimated cost in a user-friendly format using the `chalk` library.\n\nHere's an example of how the `estimate` function might be used in the larger project:\n\n```javascript\nimport { estimate } from './autodoc/estimate';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output/',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\nThis example demonstrates how a user can call the `estimate` function with a specific configuration to get an estimated cost for processing their repository. The function is designed to work seamlessly with other parts of the Autodoc project, such as the `processRepository` function, which is responsible for the actual processing of the repository.\n\nBy providing an estimated cost upfront, the `estimate` function helps users make informed decisions about whether to proceed with the indexing process or not. This can be particularly useful for users with large repositories or those who are working within a budget. Overall, the `estimate` function is an essential tool for users looking to leverage the power of Autodoc while managing their costs effectively.", - "questions": "" + "summary": "The `estimate` function in `index.ts` is a crucial part of the Autodoc project, as it provides an estimated cost of processing a given repository. It takes an `AutodocRepoConfig` object as input, containing various configuration options such as repository name, URL, root directory, output directory, and other settings related to the processing of the repository.\n\nThe function begins by constructing the path to the JSON output directory, which stores intermediate results of the processing. It then updates the spinner text to indicate that cost estimation is in progress. The `processRepository` function is called with the provided configuration options and a `true` flag, signifying a dry run. This dry run returns the details of what would happen if the repository were processed, which is used to calculate the estimated cost.\n\nUpon completion of the dry run, the spinner is updated to show success, and the results are printed using the `printModelDetails` function. The total estimated cost is calculated using the `totalIndexCostEstimate` function, which takes the values of the `runDetails` object as input.\n\nFinally, the estimated cost is displayed in the console using the `chalk.redBright` function to format the text in red. The message also includes a disclaimer that the actual cost may vary and recommends setting a limit in the user's OpenAI account to prevent unexpected charges.\n\nHere's an example of how the `estimate` function might be used in the larger project:\n\n```javascript\nimport { estimate } from './path/to/this/file';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\nThis example would estimate the cost of processing the \"my-repo\" repository with the specified configuration options.", + "questions": "", + "checksum": "4b8fd2b2abaec4959873fc3396c414d8" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/cli/commands/index/convertJsonToMarkdown.json b/.autodoc/docs/json/src/cli/commands/index/convertJsonToMarkdown.json index 8d9d783..e0676ac 100644 --- a/.autodoc/docs/json/src/cli/commands/index/convertJsonToMarkdown.json +++ b/.autodoc/docs/json/src/cli/commands/index/convertJsonToMarkdown.json @@ -1,7 +1,8 @@ { "fileName": "convertJsonToMarkdown.ts", - "filePath": "src/cli/commands/index/convertJsonToMarkdown.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/index/convertJsonToMarkdown.ts", - "summary": "The `convertJsonToMarkdown` function in this code is responsible for converting JSON files containing documentation information into Markdown files. This is done in two main steps: counting the number of files in the project and creating Markdown files for each code file in the project.\n\nFirst, the function uses the `traverseFileSystem` utility to count the number of files in the project. It takes an `AutodocRepoConfig` object as input, which contains information about the project, such as its name, root directory, output directory, and other configuration options. The `traverseFileSystem` utility is called with a `processFile` function that increments the `files` counter for each file encountered.\n\n```javascript\nawait traverseFileSystem({\n inputPath: inputRoot,\n projectName,\n processFile: () => {\n files++;\n return Promise.resolve();\n },\n ignore: [],\n filePrompt,\n folderPrompt,\n contentType,\n targetAudience,\n linkHosted,\n});\n```\n\nNext, the function defines another `processFile` function that reads the content of each JSON file, converts it to a Markdown format, and writes the output to a new Markdown file in the specified output directory. It first checks if the content exists, and if not, it returns early. It then creates the output directory if it doesn't exist, and parses the JSON content into either a `FolderSummary` or a `FileSummary` object, depending on the file name.\n\nThe function then constructs the Markdown content by including a link to the code on GitHub, the summary, and any questions if they exist. Finally, it writes the Markdown content to the output file with the `.md` extension.\n\n```javascript\nconst outputPath = getFileName(markdownFilePath, '.', '.md');\nawait fs.writeFile(outputPath, markdown, 'utf-8');\n```\n\nThe `convertJsonToMarkdown` function is then called again with the new `processFile` function to create the Markdown files for each code file in the project.\n\n```javascript\nawait traverseFileSystem({\n inputPath: inputRoot,\n projectName,\n processFile,\n ignore: [],\n filePrompt,\n folderPrompt,\n contentType,\n targetAudience,\n linkHosted,\n});\n```\n\nIn summary, this code is responsible for converting JSON files containing documentation information into Markdown files, which can be used in the larger Autodoc project to generate documentation for code repositories.", - "questions": "1. **What is the purpose of the `convertJsonToMarkdown` function?**\n\n The `convertJsonToMarkdown` function is responsible for converting JSON files containing summaries and questions about code files in a project into Markdown files. It traverses the file system, reads the JSON files, and creates corresponding Markdown files with the provided information.\n\n2. **How does the `traverseFileSystem` function work and what are its parameters?**\n\n The `traverseFileSystem` function is a utility function that recursively traverses the file system starting from a given input path. It takes an object as a parameter with properties such as `inputPath`, `projectName`, `processFile`, `ignore`, `filePrompt`, `folderPrompt`, `contentType`, `targetAudience`, and `linkHosted`. The function processes each file using the provided `processFile` callback and can be configured to ignore certain files or folders.\n\n3. **What is the purpose of the `processFile` function inside `convertJsonToMarkdown`?**\n\n The `processFile` function is a callback function that is passed to the `traverseFileSystem` function. It is responsible for reading the content of a JSON file, parsing it, and creating a corresponding Markdown file with the summary and questions. It also handles creating the output directory if it doesn't exist and writing the Markdown content to the output file." + "filePath": "src\\cli\\commands\\index\\convertJsonToMarkdown.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\convertJsonToMarkdown.ts", + "summary": "The `convertJsonToMarkdown` function in this code is responsible for converting JSON files containing documentation information into Markdown files. This function is part of the larger Autodoc project, which aims to automate the process of generating documentation for code repositories.\n\nThe function takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the project name, input and output directories, and other settings related to the documentation generation process.\n\nThe code first counts the number of files in the project by traversing the file system using the `traverseFileSystem` utility function. This is done to provide a progress update to the user via the `updateSpinnerText` function.\n\nNext, the `processFile` function is defined, which is responsible for reading the content of each JSON file, parsing it, and converting it into a Markdown format. The function checks if the file has a summary, and if so, it generates the Markdown content with a link to the code on GitHub, the summary, and any questions if present. The output Markdown file is then saved in the specified output directory.\n\nFinally, the `traverseFileSystem` function is called again, this time with the `processFile` function as an argument. This allows the code to process each JSON file in the project and convert it into a Markdown file. Once the process is complete, a success message is displayed to the user using the `spinnerSuccess` function.\n\nExample usage:\n\n```javascript\nconvertJsonToMarkdown({\n name: \"myProject\",\n root: \"./input\",\n output: \"./output\",\n filePrompt: true,\n folderPrompt: true,\n contentType: \"code\",\n targetAudience: \"developers\",\n linkHosted: \"https://github.com/user/myProject\",\n});\n```\n\nThis will convert all JSON files in the `./input` directory into Markdown files and save them in the `./output` directory.", + "questions": "1. **Question:** What is the purpose of the `convertJsonToMarkdown` function and what are the expected inputs?\n **Answer:** The `convertJsonToMarkdown` function is used to convert JSON files to Markdown files for each code file in the project. It takes an `AutodocRepoConfig` object as input, which contains various properties like projectName, root, output, filePrompt, folderPrompt, contentType, targetAudience, and linkHosted.\n\n2. **Question:** How does the `traverseFileSystem` function work and what is its role in this code?\n **Answer:** The `traverseFileSystem` function is a utility function that recursively traverses the file system, starting from the inputPath, and processes each file using the provided `processFile` function. In this code, it is used twice: first to count the number of files in the project, and then to create Markdown files for each code file in the project.\n\n3. **Question:** How are the output directories and Markdown files created, and what is the structure of the generated Markdown content?\n **Answer:** The output directories are created using the `fs.mkdir` function with the `recursive: true` option. The Markdown files are created using the `fs.writeFile` function. The structure of the generated Markdown content includes a link to view the code on GitHub, the summary, and optionally, a list of questions if they exist.", + "checksum": "79c860becf47b9882441682f0213d534" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/cli/commands/index/createVectorStore.json b/.autodoc/docs/json/src/cli/commands/index/createVectorStore.json index ab4f379..300717a 100644 --- a/.autodoc/docs/json/src/cli/commands/index/createVectorStore.json +++ b/.autodoc/docs/json/src/cli/commands/index/createVectorStore.json @@ -1,7 +1,8 @@ { "fileName": "createVectorStore.ts", - "filePath": "src/cli/commands/index/createVectorStore.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/index/createVectorStore.ts", - "summary": "The code in this file is responsible for processing a directory of text files, splitting the text into chunks, and creating a vector store using the HNSWLib library and OpenAIEmbeddings.\n\nThe `processFile` function takes a file path as input and returns a Promise that resolves to a Document object. It reads the file contents and creates a Document object with the file contents as `pageContent` and the file path as metadata.\n\nThe `processDirectory` function takes a directory path as input and returns a Promise that resolves to an array of Document objects. It reads the files in the directory and calls `processFile` for each file. If a file is a directory, it calls `processDirectory` recursively. The function accumulates all the Document objects in an array and returns it.\n\nThe `RepoLoader` class extends the `BaseDocumentLoader` class and has a constructor that takes a file path as input. It has a `load` method that calls the `processDirectory` function with the file path and returns the resulting array of Document objects.\n\nThe `createVectorStore` function is an async function that takes an AutodocRepoConfig object as input, which contains the root directory and output file path. It creates a RepoLoader instance with the root directory, loads the raw documents, and splits them into chunks using the `RecursiveCharacterTextSplitter` class. It then creates a vector store using the HNSWLib library and OpenAIEmbeddings, and saves the vector store to the output file path.\n\nExample usage:\n\n```javascript\nconst config = {\n root: './data/documents',\n output: './data/vector_store',\n};\n\ncreateVectorStore(config).then(() => {\n console.log('Vector store created successfully');\n});\n```\n\nThis code snippet would process all the text files in the `./data/documents` directory, split the text into chunks, create a vector store using the HNSWLib library and OpenAIEmbeddings, and save the vector store to the `./data/vector_store` file.", - "questions": "1. **Question:** What is the purpose of the `processFile` function and how does it handle errors?\n **Answer:** The `processFile` function reads the content of a file and creates a `Document` object with the file contents and metadata. If there is an error while reading the file, it rejects the promise with the error.\n\n2. **Question:** How does the `processDirectory` function handle nested directories and files?\n **Answer:** The `processDirectory` function iterates through the files in a directory. If it encounters a subdirectory, it calls itself recursively to process the subdirectory. If it encounters a file, it processes the file using the `processFile` function and adds the resulting `Document` object to the `docs` array.\n\n3. **Question:** What is the purpose of the `createVectorStore` function and how does it use the `RepoLoader` class?\n **Answer:** The `createVectorStore` function is responsible for creating a vector store from a given repository. It uses the `RepoLoader` class to load all the documents from the repository, splits the text into chunks using the `RecursiveCharacterTextSplitter`, and then creates a vector store using the `HNSWLib.fromDocuments` method with the `OpenAIEmbeddings`. Finally, it saves the vector store to the specified output path." + "filePath": "src\\cli\\commands\\index\\createVectorStore.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\createVectorStore.ts", + "summary": "The code in this file is responsible for processing a directory of text files, splitting the text into chunks, and creating a vector store using the HNSWLib library and OpenAIEmbeddings. This vector store can be used for efficient similarity search and retrieval of documents in the larger project.\n\nThe `processFile` function reads a file's content and creates a `Document` object with the content and metadata (source file path). It returns a Promise that resolves to the created Document.\n\nThe `processDirectory` function is a recursive function that processes a directory and its subdirectories. It reads the files in the directory, and for each file, it checks if it's a directory or a regular file. If it's a directory, the function calls itself with the new directory path. If it's a file, it calls the `processFile` function to create a Document object. The function returns an array of Document objects.\n\nThe `RepoLoader` class extends the `BaseDocumentLoader` class and has a constructor that takes a file path as an argument. It has a `load` method that calls the `processDirectory` function with the given file path and returns the array of Document objects.\n\nThe `createVectorStore` function is an async function that takes an `AutodocRepoConfig` object as an argument, which contains the root directory and output file path. It creates a `RepoLoader` instance with the root directory and loads the documents using the `load` method. It then creates a `RecursiveCharacterTextSplitter` instance with a specified chunk size and chunk overlap and splits the documents into chunks. Finally, it creates a vector store using the HNSWLib library and OpenAIEmbeddings with the processed documents and saves the vector store to the output file path.\n\nExample usage:\n\n```javascript\nconst config = {\n root: './data/documents',\n output: './data/vector_store',\n};\n\ncreateVectorStore(config).then(() => {\n console.log('Vector store created successfully');\n});\n```", + "questions": "1. **Question:** What is the purpose of the `processFile` function and what does it return?\n **Answer:** The `processFile` function is an asynchronous function that reads the content of a file given its file path, creates a `Document` object with the file contents and metadata (source file path), and returns a Promise that resolves to the created `Document` object.\n\n2. **Question:** How does the `processDirectory` function work and what does it return?\n **Answer:** The `processDirectory` function is an asynchronous function that takes a directory path as input, reads all the files and subdirectories within it, and processes them recursively. It returns a Promise that resolves to an array of `Document` objects created from the files in the directory and its subdirectories.\n\n3. **Question:** What is the purpose of the `createVectorStore` function and how does it work?\n **Answer:** The `createVectorStore` function is an asynchronous function that takes an `AutodocRepoConfig` object as input, which contains the root directory path and output file path. The function loads all the documents from the root directory using the `RepoLoader`, splits the text into chunks using the `RecursiveCharacterTextSplitter`, creates a vector store from the documents using the `HNSWLib` and `OpenAIEmbeddings`, and saves the vector store to the specified output file.", + "checksum": "a3409c4340753a867c72eebef7626fb9" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/cli/commands/index/index.json b/.autodoc/docs/json/src/cli/commands/index/index.json index c524105..46652c2 100644 --- a/.autodoc/docs/json/src/cli/commands/index/index.json +++ b/.autodoc/docs/json/src/cli/commands/index/index.json @@ -1,7 +1,8 @@ { "fileName": "index.ts", - "filePath": "src/cli/commands/index/index.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/index/index.ts", - "summary": "The code in this file is responsible for processing a given repository and generating documentation in JSON and Markdown formats, as well as creating vector files for the documentation. It exports a single function `index` that takes an `AutodocRepoConfig` object as input, which contains various configuration options for processing the repository.\n\nThe `index` function performs the following steps:\n\n1. Define the paths for JSON, Markdown, and data output directories within the `output` folder.\n\n2. Process the repository by traversing its files, calling the LLMS (Language Learning Management System) for each file, and creating JSON files with the results. This is done using the `processRepository` function, which takes the same configuration options as the `index` function. The spinner text is updated to show the progress of this step.\n\n3. Convert the generated JSON files into Markdown format using the `convertJsonToMarkdown` function. This function also takes the same configuration options as the `index` function. The spinner text is updated to show the progress of this step, and a success message is displayed upon completion.\n\n4. Create vector files for the generated Markdown documentation using the `createVectorStore` function. This function also takes the same configuration options as the `index` function. The spinner text is updated to show the progress of this step, and a success message is displayed upon completion.\n\nHere's an example of how this code might be used in the larger project:\n\n```javascript\nimport autodoc from './autodoc';\n\nconst config = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n root: './src',\n output: './output',\n llms: 'https://llms.example.com',\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'text',\n targetAudience: 'developers',\n linkHosted: 'https://myproject-docs.example.com',\n};\n\nautodoc.index(config);\n```\n\nThis example would process the `MyProject` repository, generate JSON and Markdown documentation, and create vector files for the documentation, all while providing progress updates through spinner text.", - "questions": "1. **What is the purpose of the `index` function in this code?**\n\n The `index` function is the main entry point for the autodoc project. It processes a given repository, converts the JSON files to markdown, and creates vector files based on the provided configuration options.\n\n2. **What are the different steps involved in processing the repository?**\n\n The processing of the repository involves three main steps: (1) traversing the repository and calling LLMS for each file to create JSON files with the results, (2) converting the JSON files to markdown files, and (3) creating vector files from the markdown files.\n\n3. **What is the role of the `AutodocRepoConfig` type?**\n\n The `AutodocRepoConfig` type is used to define the shape of the configuration object that is passed to the `index` function. It specifies the properties and their types that are required for the function to process the repository, convert JSON to markdown, and create vector files." + "filePath": "src\\cli\\commands\\index\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\index.ts", + "summary": "The code in this file is responsible for processing a given repository and generating documentation in JSON, Markdown, and vector formats. It exports a single function `index` that takes an `AutodocRepoConfig` object as its argument, which contains various configuration options for processing the repository.\n\nThe `index` function performs three main tasks:\n\n1. **Process the repository**: It traverses the repository, calls the LLMS (Language Learning Management System) for each file, and creates JSON files with the results. This is done using the `processRepository` function, which takes the same configuration options as the `index` function. The JSON files are stored in the `output/docs/json/` directory.\n\n ```javascript\n updateSpinnerText('Processing repository...');\n await processRepository({ /* configuration options */ });\n spinnerSuccess();\n ```\n\n2. **Create Markdown files**: It converts the generated JSON files into Markdown files using the `convertJsonToMarkdown` function. This function also takes the same configuration options as the `index` function. The Markdown files are stored in the `output/docs/markdown/` directory.\n\n ```javascript\n updateSpinnerText('Creating markdown files...');\n await convertJsonToMarkdown({ /* configuration options */ });\n spinnerSuccess();\n ```\n\n3. **Create vector files**: It creates vector files from the generated Markdown files using the `createVectorStore` function. This function also takes the same configuration options as the `index` function. The vector files are stored in the `output/docs/data/` directory.\n\n ```javascript\n updateSpinnerText('Create vector files...');\n await createVectorStore({ /* configuration options */ });\n spinnerSuccess();\n ```\n\nThroughout the execution of these tasks, the code uses `updateSpinnerText` and `spinnerSuccess` functions to provide visual feedback on the progress of the tasks.\n\nIn the larger project, this code would be used to automatically generate documentation for a given repository based on the provided configuration options. The generated documentation can then be used for various purposes, such as displaying it on a website or analyzing the content for specific insights.", + "questions": "1. **What does the `index` function do in this code?**\n\n The `index` function is the main entry point for the autodoc project. It takes an `AutodocRepoConfig` object as input and performs three main tasks: processing the repository and creating JSON files, converting JSON files to markdown files, and creating vector files.\n\n2. **What is the purpose of the `processRepository`, `convertJsonToMarkdown`, and `createVectorStore` functions?**\n\n The `processRepository` function traverses the repository, calls LLMS for each file, and creates JSON files with the results. The `convertJsonToMarkdown` function creates markdown files from the generated JSON files. The `createVectorStore` function creates vector files from the markdown files.\n\n3. **What are the different types of prompts (`filePrompt`, `folderPrompt`, `chatPrompt`) used for in this code?**\n\n These prompts are likely used to interact with the user during the processing of the repository. The `filePrompt` might be used to ask the user for input regarding specific files, the `folderPrompt` for input regarding folders, and the `chatPrompt` for general input or feedback during the processing.", + "checksum": "4060b1affae5a6c385cda308b3cd1750" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/cli/commands/index/processRepository.json b/.autodoc/docs/json/src/cli/commands/index/processRepository.json index 5ff7f39..339cc2a 100644 --- a/.autodoc/docs/json/src/cli/commands/index/processRepository.json +++ b/.autodoc/docs/json/src/cli/commands/index/processRepository.json @@ -1,7 +1,8 @@ { "fileName": "processRepository.ts", - "filePath": "src/cli/commands/index/processRepository.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/index/processRepository.ts", - "summary": "The `processRepository` function in this code is responsible for processing a given code repository and generating summaries and questions for each file and folder within the repository. It takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the repository URL, input and output paths, language models to use, and other settings.\n\nThe function starts by initializing an `APIRateLimit` instance to limit the number of API calls made to the language models. It then defines several helper functions, such as `callLLM` for making API calls, `isModel` for checking if a given model is valid, `processFile` for processing individual files, and `processFolder` for processing folders.\n\nThe `processFile` function reads the content of a file, generates prompts for summaries and questions using the `createCodeFileSummary` and `createCodeQuestions` functions, and selects the best language model to use based on the token length of the prompts. It then calls the language model API to generate the summaries and questions, and saves the results as JSON files in the output directory.\n\nThe `processFolder` function reads the contents of a folder, filters out ignored files, and processes each file and subfolder within the folder. It then generates a summary prompt using the `folderSummaryPrompt` function and calls the language model API to generate a summary for the folder. The folder summary, along with the summaries and questions of its files and subfolders, is saved as a JSON file in the output directory.\n\nThe main part of the `processRepository` function first counts the number of files and folders in the input directory using the `filesAndFolders` function. It then processes each file and folder using the `traverseFileSystem` function, which calls the `processFile` and `processFolder` functions for each file and folder encountered. Finally, the function returns the language models used during processing.\n\nExample usage of the `processRepository` function:\n\n```javascript\nconst autodocConfig = {\n name: 'myProject',\n repositoryUrl: 'https://github.com/user/myProject',\n root: 'src',\n output: 'output',\n llms: [LLMModels.GPT3, LLMModels.GPT4],\n ignore: ['.git', 'node_modules'],\n filePrompt: 'Explain this code file',\n folderPrompt: 'Summarize this folder',\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nprocessRepository(autodocConfig).then((models) => {\n console.log('Processing complete');\n});\n```\n\nThis code would process the `src` directory of the `myProject` repository, generating summaries and questions for each file and folder, and saving the results in the `output` directory.", - "questions": "1. **Question:** What is the purpose of the `processRepository` function and what are its input parameters?\n **Answer:** The `processRepository` function is responsible for processing a code repository by generating summaries and questions for each file and folder in the project. It takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the project name, repository URL, input and output paths, language models, and other settings. Additionally, it accepts an optional `dryRun` parameter, which, if set to true, will not save the generated summaries and questions to disk.\n\n2. **Question:** How does the code determine the best language model to use for generating summaries and questions?\n **Answer:** The code checks the maximum token length of each available language model (GPT3, GPT4, and GPT432k) and compares it with the token length of the prompts (summary and questions). It selects the first model that can handle the maximum token length and is included in the `llms` array provided in the configuration.\n\n3. **Question:** How does the code handle traversing the file system and processing files and folders?\n **Answer:** The code uses the `traverseFileSystem` utility function to traverse the file system. It takes an object with various configuration options, including the input path, project name, and callbacks for processing files and folders. The `processFile` and `processFolder` functions are passed as callbacks to handle the processing of files and folders, respectively." + "filePath": "src\\cli\\commands\\index\\processRepository.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\processRepository.ts", + "summary": "The `processRepository` function in this code is responsible for generating summaries and questions for code files and folders in a given repository. It takes an `AutodocRepoConfig` object as input, which contains information about the project, repository URL, input and output paths, language models, and other configurations. An optional `dryRun` parameter can be provided to skip actual API calls and file writing.\n\nThe function starts by initializing the encoding and rate limit for API calls. It then defines two main helper functions: `processFile` and `processFolder`. The `processFile` function is responsible for processing individual code files. It reads the file content, calculates a checksum, and checks if reindexing is needed. If reindexing is required, it creates prompts for summaries and questions, selects the appropriate language model based on the input length, and calls the language model API to generate the summaries and questions. The results are then saved to a JSON file in the output directory.\n\nThe `processFolder` function is responsible for processing folders. It reads the folder content, calculates a checksum, and checks if reindexing is needed. If reindexing is required, it reads the summaries and questions of all files and subfolders in the folder, calls the language model API to generate a summary for the folder, and saves the result to a `summary.json` file in the folder.\n\nThe main function then counts the number of files and folders in the project and processes them using the `traverseFileSystem` utility function. It processes all files first, followed by all folders. Finally, it returns the language model usage statistics.\n\nThe `calculateChecksum` function calculates the checksum of a list of file contents, while the `reindexCheck` function checks if reindexing is needed by comparing the new and old checksums of a file or folder.", + "questions": "1. **Question:** What is the purpose of the `processRepository` function and what are its inputs and outputs?\n **Answer:** The `processRepository` function processes a given code repository, generating summaries and questions for each file and folder within the repository. It takes an `AutodocRepoConfig` object and an optional `dryRun` boolean as inputs. The function returns a `Promise` that resolves to an object containing the models used during processing.\n\n2. **Question:** How does the `calculateChecksum` function work and what is its purpose?\n **Answer:** The `calculateChecksum` function takes an array of file contents as input and calculates a checksum for each file using the MD5 hashing algorithm. It then concatenates all the checksums and calculates a final checksum using MD5 again. The purpose of this function is to generate a unique identifier for the contents of the files, which can be used to determine if the files have changed and need to be reprocessed.\n\n3. **Question:** How does the `reindexCheck` function work and when is it used?\n **Answer:** The `reindexCheck` function checks if a summary.json file exists in the given file or folder path and compares the stored checksum with the new checksum to determine if the file or folder needs to be reindexed. It is used in the `processFile` and `processFolder` functions to decide whether to regenerate summaries and questions for a file or folder based on changes in their contents.", + "checksum": "5b3ae9ffad1d4b4a22c6f7fd66bbde6f" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/cli/commands/index/prompts.json b/.autodoc/docs/json/src/cli/commands/index/prompts.json index 3c89ae9..4b43d3c 100644 --- a/.autodoc/docs/json/src/cli/commands/index/prompts.json +++ b/.autodoc/docs/json/src/cli/commands/index/prompts.json @@ -1,7 +1,8 @@ { "fileName": "prompts.ts", - "filePath": "src/cli/commands/index/prompts.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/index/prompts.ts", - "summary": "The code in this file provides three functions that generate prompts for documentation experts to create summaries and answer questions about code files and folders in a project. These functions are likely used in the larger autodoc project to automate the process of generating documentation for code files and folders.\n\n1. `createCodeFileSummary`: This function takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `filePrompt`. It returns a formatted string prompt for a documentation expert to write a summary of the code file. The prompt includes the file path, project name, content type, and a custom file prompt. For example:\n\n```javascript\ncreateCodeFileSummary('src/example.js', 'autodoc', 'console.log(\"Hello, World!\");', 'JavaScript', 'Write a detailed technical explanation of what this code does.');\n```\n\n2. `createCodeQuestions`: This function takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `targetAudience`. It returns a formatted string prompt for a documentation expert to generate three questions and answers that a target audience might have about the code file. The prompt includes the file path, project name, content type, and target audience. For example:\n\n```javascript\ncreateCodeQuestions('src/example.js', 'autodoc', 'console.log(\"Hello, World!\");', 'JavaScript', 'beginner');\n```\n\n3. `folderSummaryPrompt`: This function takes six parameters: `folderPath`, `projectName`, `files`, `folders`, `contentType`, and `folderPrompt`. It returns a formatted string prompt for a documentation expert to write a summary of the folder and its contents. The prompt includes the folder path, project name, content type, a list of files and their summaries, a list of subfolders and their summaries, and a custom folder prompt. For example:\n\n```javascript\nfolderSummaryPrompt('src/', 'autodoc', [{fileName: 'example.js', summary: 'A simple example file'}], [{folderName: 'utils', summary: 'Utility functions'}], 'JavaScript', 'Write a detailed technical explanation of the folder structure and contents.');\n```\n\nThese functions can be used in the autodoc project to generate prompts for documentation experts, helping to streamline the process of creating documentation for code files and folders.", - "questions": "1. **Question:** What is the purpose of the `createCodeFileSummary` function?\n **Answer:** The `createCodeFileSummary` function generates a string template for a code file summary prompt, which includes the file path, project name, file contents, content type, and a file prompt.\n\n2. **Question:** How does the `createCodeQuestions` function differ from the `createCodeFileSummary` function?\n **Answer:** The `createCodeQuestions` function generates a string template for a code documentation prompt that asks for 3 questions and their answers, while the `createCodeFileSummary` function generates a string template for a code file summary prompt.\n\n3. **Question:** What is the purpose of the `folderSummaryPrompt` function and what parameters does it take?\n **Answer:** The `folderSummaryPrompt` function generates a string template for a folder summary prompt, which includes the folder path, project name, files, folders, content type, and a folder prompt. It takes parameters such as folderPath, projectName, files, folders, contentType, and folderPrompt." + "filePath": "src\\cli\\commands\\index\\prompts.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\prompts.ts", + "summary": "This code defines three utility functions that generate prompts for documentation experts working on a project. These functions are used to create documentation for code files and folders within a project. The generated prompts are in markdown format and include specific instructions for the documentation expert.\n\n1. `createCodeFileSummary`: This function generates a prompt for creating a summary of a code file. It takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `filePrompt`. The function returns a markdown formatted string that includes the file's content and a custom prompt for the documentation expert.\n\nExample usage:\n```javascript\nconst prompt = createCodeFileSummary('path/to/file.js', 'MyProject', 'const x = 10;', 'JavaScript', 'Write a detailed technical explanation of this code.');\n```\n\n2. `createCodeQuestions`: This function generates a prompt for creating a list of questions and answers about a code file. It takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `targetAudience`. The function returns a markdown formatted string that includes the file's content and a custom prompt for the documentation expert to provide questions and answers.\n\nExample usage:\n```javascript\nconst prompt = createCodeQuestions('path/to/file.js', 'MyProject', 'const x = 10;', 'JavaScript', 'beginner');\n```\n\n3. `folderSummaryPrompt`: This function generates a prompt for creating a summary of a folder containing code files and subfolders. It takes six parameters: `folderPath`, `projectName`, `files`, `folders`, `contentType`, and `folderPrompt`. The `files` parameter is an array of `FileSummary` objects, and the `folders` parameter is an array of `FolderSummary` objects. The function returns a markdown formatted string that includes a list of files and folders with their summaries and a custom prompt for the documentation expert.\n\nExample usage:\n```javascript\nconst prompt = folderSummaryPrompt('path/to/folder', 'MyProject', fileSummaries, folderSummaries, 'JavaScript', 'Write a detailed technical explanation of this folder structure.');\n```\n\nThese functions can be used in the larger project to generate documentation tasks for experts, ensuring consistent formatting and instructions across different parts of the project.", + "questions": "1. **What is the purpose of the `createCodeFileSummary` function?**\n\n The `createCodeFileSummary` function generates a string template for a code file summary prompt, which includes the file path, project name, file contents, content type, and a file prompt.\n\n2. **How does the `createCodeQuestions` function differ from the `createCodeFileSummary` function?**\n\n The `createCodeQuestions` function generates a string template for a code documentation prompt that asks for 3 questions and their answers, while the `createCodeFileSummary` function generates a string template for a code file summary prompt.\n\n3. **What is the role of the `folderSummaryPrompt` function?**\n\n The `folderSummaryPrompt` function generates a string template for a folder summary prompt, which includes the folder path, project name, lists of files and folders with their summaries, content type, and a folder prompt.", + "checksum": "e44b82bf4912be69149685a997b6bde3" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/cli/commands/index/summary.json b/.autodoc/docs/json/src/cli/commands/index/summary.json index 587234e..93bc582 100644 --- a/.autodoc/docs/json/src/cli/commands/index/summary.json +++ b/.autodoc/docs/json/src/cli/commands/index/summary.json @@ -1,45 +1,51 @@ { "folderName": "index", - "folderPath": ".autodoc/docs/json/src/cli/commands/index", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/index", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\commands\\index", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands\\index", "files": [ { "fileName": "convertJsonToMarkdown.ts", - "filePath": "src/cli/commands/index/convertJsonToMarkdown.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/index/convertJsonToMarkdown.ts", - "summary": "The `convertJsonToMarkdown` function in this code is responsible for converting JSON files containing documentation information into Markdown files. This is done in two main steps: counting the number of files in the project and creating Markdown files for each code file in the project.\n\nFirst, the function uses the `traverseFileSystem` utility to count the number of files in the project. It takes an `AutodocRepoConfig` object as input, which contains information about the project, such as its name, root directory, output directory, and other configuration options. The `traverseFileSystem` utility is called with a `processFile` function that increments the `files` counter for each file encountered.\n\n```javascript\nawait traverseFileSystem({\n inputPath: inputRoot,\n projectName,\n processFile: () => {\n files++;\n return Promise.resolve();\n },\n ignore: [],\n filePrompt,\n folderPrompt,\n contentType,\n targetAudience,\n linkHosted,\n});\n```\n\nNext, the function defines another `processFile` function that reads the content of each JSON file, converts it to a Markdown format, and writes the output to a new Markdown file in the specified output directory. It first checks if the content exists, and if not, it returns early. It then creates the output directory if it doesn't exist, and parses the JSON content into either a `FolderSummary` or a `FileSummary` object, depending on the file name.\n\nThe function then constructs the Markdown content by including a link to the code on GitHub, the summary, and any questions if they exist. Finally, it writes the Markdown content to the output file with the `.md` extension.\n\n```javascript\nconst outputPath = getFileName(markdownFilePath, '.', '.md');\nawait fs.writeFile(outputPath, markdown, 'utf-8');\n```\n\nThe `convertJsonToMarkdown` function is then called again with the new `processFile` function to create the Markdown files for each code file in the project.\n\n```javascript\nawait traverseFileSystem({\n inputPath: inputRoot,\n projectName,\n processFile,\n ignore: [],\n filePrompt,\n folderPrompt,\n contentType,\n targetAudience,\n linkHosted,\n});\n```\n\nIn summary, this code is responsible for converting JSON files containing documentation information into Markdown files, which can be used in the larger Autodoc project to generate documentation for code repositories.", - "questions": "1. **What is the purpose of the `convertJsonToMarkdown` function?**\n\n The `convertJsonToMarkdown` function is responsible for converting JSON files containing summaries and questions about code files in a project into Markdown files. It traverses the file system, reads the JSON files, and creates corresponding Markdown files with the provided information.\n\n2. **How does the `traverseFileSystem` function work and what are its parameters?**\n\n The `traverseFileSystem` function is a utility function that recursively traverses the file system starting from a given input path. It takes an object as a parameter with properties such as `inputPath`, `projectName`, `processFile`, `ignore`, `filePrompt`, `folderPrompt`, `contentType`, `targetAudience`, and `linkHosted`. The function processes each file using the provided `processFile` callback and can be configured to ignore certain files or folders.\n\n3. **What is the purpose of the `processFile` function inside `convertJsonToMarkdown`?**\n\n The `processFile` function is a callback function that is passed to the `traverseFileSystem` function. It is responsible for reading the content of a JSON file, parsing it, and creating a corresponding Markdown file with the summary and questions. It also handles creating the output directory if it doesn't exist and writing the Markdown content to the output file." + "filePath": "src\\cli\\commands\\index\\convertJsonToMarkdown.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\convertJsonToMarkdown.ts", + "summary": "The `convertJsonToMarkdown` function in this code is responsible for converting JSON files containing documentation information into Markdown files. This function is part of the larger Autodoc project, which aims to automate the process of generating documentation for code repositories.\n\nThe function takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the project name, input and output directories, and other settings related to the documentation generation process.\n\nThe code first counts the number of files in the project by traversing the file system using the `traverseFileSystem` utility function. This is done to provide a progress update to the user via the `updateSpinnerText` function.\n\nNext, the `processFile` function is defined, which is responsible for reading the content of each JSON file, parsing it, and converting it into a Markdown format. The function checks if the file has a summary, and if so, it generates the Markdown content with a link to the code on GitHub, the summary, and any questions if present. The output Markdown file is then saved in the specified output directory.\n\nFinally, the `traverseFileSystem` function is called again, this time with the `processFile` function as an argument. This allows the code to process each JSON file in the project and convert it into a Markdown file. Once the process is complete, a success message is displayed to the user using the `spinnerSuccess` function.\n\nExample usage:\n\n```javascript\nconvertJsonToMarkdown({\n name: \"myProject\",\n root: \"./input\",\n output: \"./output\",\n filePrompt: true,\n folderPrompt: true,\n contentType: \"code\",\n targetAudience: \"developers\",\n linkHosted: \"https://github.com/user/myProject\",\n});\n```\n\nThis will convert all JSON files in the `./input` directory into Markdown files and save them in the `./output` directory.", + "questions": "1. **Question:** What is the purpose of the `convertJsonToMarkdown` function and what are the expected inputs?\n **Answer:** The `convertJsonToMarkdown` function is used to convert JSON files to Markdown files for each code file in the project. It takes an `AutodocRepoConfig` object as input, which contains various properties like projectName, root, output, filePrompt, folderPrompt, contentType, targetAudience, and linkHosted.\n\n2. **Question:** How does the `traverseFileSystem` function work and what is its role in this code?\n **Answer:** The `traverseFileSystem` function is a utility function that recursively traverses the file system, starting from the inputPath, and processes each file using the provided `processFile` function. In this code, it is used twice: first to count the number of files in the project, and then to create Markdown files for each code file in the project.\n\n3. **Question:** How are the output directories and Markdown files created, and what is the structure of the generated Markdown content?\n **Answer:** The output directories are created using the `fs.mkdir` function with the `recursive: true` option. The Markdown files are created using the `fs.writeFile` function. The structure of the generated Markdown content includes a link to view the code on GitHub, the summary, and optionally, a list of questions if they exist.", + "checksum": "79c860becf47b9882441682f0213d534" }, { "fileName": "createVectorStore.ts", - "filePath": "src/cli/commands/index/createVectorStore.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/index/createVectorStore.ts", - "summary": "The code in this file is responsible for processing a directory of text files, splitting the text into chunks, and creating a vector store using the HNSWLib library and OpenAIEmbeddings.\n\nThe `processFile` function takes a file path as input and returns a Promise that resolves to a Document object. It reads the file contents and creates a Document object with the file contents as `pageContent` and the file path as metadata.\n\nThe `processDirectory` function takes a directory path as input and returns a Promise that resolves to an array of Document objects. It reads the files in the directory and calls `processFile` for each file. If a file is a directory, it calls `processDirectory` recursively. The function accumulates all the Document objects in an array and returns it.\n\nThe `RepoLoader` class extends the `BaseDocumentLoader` class and has a constructor that takes a file path as input. It has a `load` method that calls the `processDirectory` function with the file path and returns the resulting array of Document objects.\n\nThe `createVectorStore` function is an async function that takes an AutodocRepoConfig object as input, which contains the root directory and output file path. It creates a RepoLoader instance with the root directory, loads the raw documents, and splits them into chunks using the `RecursiveCharacterTextSplitter` class. It then creates a vector store using the HNSWLib library and OpenAIEmbeddings, and saves the vector store to the output file path.\n\nExample usage:\n\n```javascript\nconst config = {\n root: './data/documents',\n output: './data/vector_store',\n};\n\ncreateVectorStore(config).then(() => {\n console.log('Vector store created successfully');\n});\n```\n\nThis code snippet would process all the text files in the `./data/documents` directory, split the text into chunks, create a vector store using the HNSWLib library and OpenAIEmbeddings, and save the vector store to the `./data/vector_store` file.", - "questions": "1. **Question:** What is the purpose of the `processFile` function and how does it handle errors?\n **Answer:** The `processFile` function reads the content of a file and creates a `Document` object with the file contents and metadata. If there is an error while reading the file, it rejects the promise with the error.\n\n2. **Question:** How does the `processDirectory` function handle nested directories and files?\n **Answer:** The `processDirectory` function iterates through the files in a directory. If it encounters a subdirectory, it calls itself recursively to process the subdirectory. If it encounters a file, it processes the file using the `processFile` function and adds the resulting `Document` object to the `docs` array.\n\n3. **Question:** What is the purpose of the `createVectorStore` function and how does it use the `RepoLoader` class?\n **Answer:** The `createVectorStore` function is responsible for creating a vector store from a given repository. It uses the `RepoLoader` class to load all the documents from the repository, splits the text into chunks using the `RecursiveCharacterTextSplitter`, and then creates a vector store using the `HNSWLib.fromDocuments` method with the `OpenAIEmbeddings`. Finally, it saves the vector store to the specified output path." + "filePath": "src\\cli\\commands\\index\\createVectorStore.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\createVectorStore.ts", + "summary": "The code in this file is responsible for processing a directory of text files, splitting the text into chunks, and creating a vector store using the HNSWLib library and OpenAIEmbeddings. This vector store can be used for efficient similarity search and retrieval of documents in the larger project.\n\nThe `processFile` function reads a file's content and creates a `Document` object with the content and metadata (source file path). It returns a Promise that resolves to the created Document.\n\nThe `processDirectory` function is a recursive function that processes a directory and its subdirectories. It reads the files in the directory, and for each file, it checks if it's a directory or a regular file. If it's a directory, the function calls itself with the new directory path. If it's a file, it calls the `processFile` function to create a Document object. The function returns an array of Document objects.\n\nThe `RepoLoader` class extends the `BaseDocumentLoader` class and has a constructor that takes a file path as an argument. It has a `load` method that calls the `processDirectory` function with the given file path and returns the array of Document objects.\n\nThe `createVectorStore` function is an async function that takes an `AutodocRepoConfig` object as an argument, which contains the root directory and output file path. It creates a `RepoLoader` instance with the root directory and loads the documents using the `load` method. It then creates a `RecursiveCharacterTextSplitter` instance with a specified chunk size and chunk overlap and splits the documents into chunks. Finally, it creates a vector store using the HNSWLib library and OpenAIEmbeddings with the processed documents and saves the vector store to the output file path.\n\nExample usage:\n\n```javascript\nconst config = {\n root: './data/documents',\n output: './data/vector_store',\n};\n\ncreateVectorStore(config).then(() => {\n console.log('Vector store created successfully');\n});\n```", + "questions": "1. **Question:** What is the purpose of the `processFile` function and what does it return?\n **Answer:** The `processFile` function is an asynchronous function that reads the content of a file given its file path, creates a `Document` object with the file contents and metadata (source file path), and returns a Promise that resolves to the created `Document` object.\n\n2. **Question:** How does the `processDirectory` function work and what does it return?\n **Answer:** The `processDirectory` function is an asynchronous function that takes a directory path as input, reads all the files and subdirectories within it, and processes them recursively. It returns a Promise that resolves to an array of `Document` objects created from the files in the directory and its subdirectories.\n\n3. **Question:** What is the purpose of the `createVectorStore` function and how does it work?\n **Answer:** The `createVectorStore` function is an asynchronous function that takes an `AutodocRepoConfig` object as input, which contains the root directory path and output file path. The function loads all the documents from the root directory using the `RepoLoader`, splits the text into chunks using the `RecursiveCharacterTextSplitter`, creates a vector store from the documents using the `HNSWLib` and `OpenAIEmbeddings`, and saves the vector store to the specified output file.", + "checksum": "a3409c4340753a867c72eebef7626fb9" }, { "fileName": "index.ts", - "filePath": "src/cli/commands/index/index.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/index/index.ts", - "summary": "The code in this file is responsible for processing a given repository and generating documentation in JSON and Markdown formats, as well as creating vector files for the documentation. It exports a single function `index` that takes an `AutodocRepoConfig` object as input, which contains various configuration options for processing the repository.\n\nThe `index` function performs the following steps:\n\n1. Define the paths for JSON, Markdown, and data output directories within the `output` folder.\n\n2. Process the repository by traversing its files, calling the LLMS (Language Learning Management System) for each file, and creating JSON files with the results. This is done using the `processRepository` function, which takes the same configuration options as the `index` function. The spinner text is updated to show the progress of this step.\n\n3. Convert the generated JSON files into Markdown format using the `convertJsonToMarkdown` function. This function also takes the same configuration options as the `index` function. The spinner text is updated to show the progress of this step, and a success message is displayed upon completion.\n\n4. Create vector files for the generated Markdown documentation using the `createVectorStore` function. This function also takes the same configuration options as the `index` function. The spinner text is updated to show the progress of this step, and a success message is displayed upon completion.\n\nHere's an example of how this code might be used in the larger project:\n\n```javascript\nimport autodoc from './autodoc';\n\nconst config = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n root: './src',\n output: './output',\n llms: 'https://llms.example.com',\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'text',\n targetAudience: 'developers',\n linkHosted: 'https://myproject-docs.example.com',\n};\n\nautodoc.index(config);\n```\n\nThis example would process the `MyProject` repository, generate JSON and Markdown documentation, and create vector files for the documentation, all while providing progress updates through spinner text.", - "questions": "1. **What is the purpose of the `index` function in this code?**\n\n The `index` function is the main entry point for the autodoc project. It processes a given repository, converts the JSON files to markdown, and creates vector files based on the provided configuration options.\n\n2. **What are the different steps involved in processing the repository?**\n\n The processing of the repository involves three main steps: (1) traversing the repository and calling LLMS for each file to create JSON files with the results, (2) converting the JSON files to markdown files, and (3) creating vector files from the markdown files.\n\n3. **What is the role of the `AutodocRepoConfig` type?**\n\n The `AutodocRepoConfig` type is used to define the shape of the configuration object that is passed to the `index` function. It specifies the properties and their types that are required for the function to process the repository, convert JSON to markdown, and create vector files." + "filePath": "src\\cli\\commands\\index\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\index.ts", + "summary": "The code in this file is responsible for processing a given repository and generating documentation in JSON, Markdown, and vector formats. It exports a single function `index` that takes an `AutodocRepoConfig` object as its argument, which contains various configuration options for processing the repository.\n\nThe `index` function performs three main tasks:\n\n1. **Process the repository**: It traverses the repository, calls the LLMS (Language Learning Management System) for each file, and creates JSON files with the results. This is done using the `processRepository` function, which takes the same configuration options as the `index` function. The JSON files are stored in the `output/docs/json/` directory.\n\n ```javascript\n updateSpinnerText('Processing repository...');\n await processRepository({ /* configuration options */ });\n spinnerSuccess();\n ```\n\n2. **Create Markdown files**: It converts the generated JSON files into Markdown files using the `convertJsonToMarkdown` function. This function also takes the same configuration options as the `index` function. The Markdown files are stored in the `output/docs/markdown/` directory.\n\n ```javascript\n updateSpinnerText('Creating markdown files...');\n await convertJsonToMarkdown({ /* configuration options */ });\n spinnerSuccess();\n ```\n\n3. **Create vector files**: It creates vector files from the generated Markdown files using the `createVectorStore` function. This function also takes the same configuration options as the `index` function. The vector files are stored in the `output/docs/data/` directory.\n\n ```javascript\n updateSpinnerText('Create vector files...');\n await createVectorStore({ /* configuration options */ });\n spinnerSuccess();\n ```\n\nThroughout the execution of these tasks, the code uses `updateSpinnerText` and `spinnerSuccess` functions to provide visual feedback on the progress of the tasks.\n\nIn the larger project, this code would be used to automatically generate documentation for a given repository based on the provided configuration options. The generated documentation can then be used for various purposes, such as displaying it on a website or analyzing the content for specific insights.", + "questions": "1. **What does the `index` function do in this code?**\n\n The `index` function is the main entry point for the autodoc project. It takes an `AutodocRepoConfig` object as input and performs three main tasks: processing the repository and creating JSON files, converting JSON files to markdown files, and creating vector files.\n\n2. **What is the purpose of the `processRepository`, `convertJsonToMarkdown`, and `createVectorStore` functions?**\n\n The `processRepository` function traverses the repository, calls LLMS for each file, and creates JSON files with the results. The `convertJsonToMarkdown` function creates markdown files from the generated JSON files. The `createVectorStore` function creates vector files from the markdown files.\n\n3. **What are the different types of prompts (`filePrompt`, `folderPrompt`, `chatPrompt`) used for in this code?**\n\n These prompts are likely used to interact with the user during the processing of the repository. The `filePrompt` might be used to ask the user for input regarding specific files, the `folderPrompt` for input regarding folders, and the `chatPrompt` for general input or feedback during the processing.", + "checksum": "4060b1affae5a6c385cda308b3cd1750" }, { "fileName": "processRepository.ts", - "filePath": "src/cli/commands/index/processRepository.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/index/processRepository.ts", - "summary": "The `processRepository` function in this code is responsible for processing a given code repository and generating summaries and questions for each file and folder within the repository. It takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the repository URL, input and output paths, language models to use, and other settings.\n\nThe function starts by initializing an `APIRateLimit` instance to limit the number of API calls made to the language models. It then defines several helper functions, such as `callLLM` for making API calls, `isModel` for checking if a given model is valid, `processFile` for processing individual files, and `processFolder` for processing folders.\n\nThe `processFile` function reads the content of a file, generates prompts for summaries and questions using the `createCodeFileSummary` and `createCodeQuestions` functions, and selects the best language model to use based on the token length of the prompts. It then calls the language model API to generate the summaries and questions, and saves the results as JSON files in the output directory.\n\nThe `processFolder` function reads the contents of a folder, filters out ignored files, and processes each file and subfolder within the folder. It then generates a summary prompt using the `folderSummaryPrompt` function and calls the language model API to generate a summary for the folder. The folder summary, along with the summaries and questions of its files and subfolders, is saved as a JSON file in the output directory.\n\nThe main part of the `processRepository` function first counts the number of files and folders in the input directory using the `filesAndFolders` function. It then processes each file and folder using the `traverseFileSystem` function, which calls the `processFile` and `processFolder` functions for each file and folder encountered. Finally, the function returns the language models used during processing.\n\nExample usage of the `processRepository` function:\n\n```javascript\nconst autodocConfig = {\n name: 'myProject',\n repositoryUrl: 'https://github.com/user/myProject',\n root: 'src',\n output: 'output',\n llms: [LLMModels.GPT3, LLMModels.GPT4],\n ignore: ['.git', 'node_modules'],\n filePrompt: 'Explain this code file',\n folderPrompt: 'Summarize this folder',\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nprocessRepository(autodocConfig).then((models) => {\n console.log('Processing complete');\n});\n```\n\nThis code would process the `src` directory of the `myProject` repository, generating summaries and questions for each file and folder, and saving the results in the `output` directory.", - "questions": "1. **Question:** What is the purpose of the `processRepository` function and what are its input parameters?\n **Answer:** The `processRepository` function is responsible for processing a code repository by generating summaries and questions for each file and folder in the project. It takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the project name, repository URL, input and output paths, language models, and other settings. Additionally, it accepts an optional `dryRun` parameter, which, if set to true, will not save the generated summaries and questions to disk.\n\n2. **Question:** How does the code determine the best language model to use for generating summaries and questions?\n **Answer:** The code checks the maximum token length of each available language model (GPT3, GPT4, and GPT432k) and compares it with the token length of the prompts (summary and questions). It selects the first model that can handle the maximum token length and is included in the `llms` array provided in the configuration.\n\n3. **Question:** How does the code handle traversing the file system and processing files and folders?\n **Answer:** The code uses the `traverseFileSystem` utility function to traverse the file system. It takes an object with various configuration options, including the input path, project name, and callbacks for processing files and folders. The `processFile` and `processFolder` functions are passed as callbacks to handle the processing of files and folders, respectively." + "filePath": "src\\cli\\commands\\index\\processRepository.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\processRepository.ts", + "summary": "The `processRepository` function in this code is responsible for generating summaries and questions for code files and folders in a given repository. It takes an `AutodocRepoConfig` object as input, which contains information about the project, repository URL, input and output paths, language models, and other configurations. An optional `dryRun` parameter can be provided to skip actual API calls and file writing.\n\nThe function starts by initializing the encoding and rate limit for API calls. It then defines two main helper functions: `processFile` and `processFolder`. The `processFile` function is responsible for processing individual code files. It reads the file content, calculates a checksum, and checks if reindexing is needed. If reindexing is required, it creates prompts for summaries and questions, selects the appropriate language model based on the input length, and calls the language model API to generate the summaries and questions. The results are then saved to a JSON file in the output directory.\n\nThe `processFolder` function is responsible for processing folders. It reads the folder content, calculates a checksum, and checks if reindexing is needed. If reindexing is required, it reads the summaries and questions of all files and subfolders in the folder, calls the language model API to generate a summary for the folder, and saves the result to a `summary.json` file in the folder.\n\nThe main function then counts the number of files and folders in the project and processes them using the `traverseFileSystem` utility function. It processes all files first, followed by all folders. Finally, it returns the language model usage statistics.\n\nThe `calculateChecksum` function calculates the checksum of a list of file contents, while the `reindexCheck` function checks if reindexing is needed by comparing the new and old checksums of a file or folder.", + "questions": "1. **Question:** What is the purpose of the `processRepository` function and what are its inputs and outputs?\n **Answer:** The `processRepository` function processes a given code repository, generating summaries and questions for each file and folder within the repository. It takes an `AutodocRepoConfig` object and an optional `dryRun` boolean as inputs. The function returns a `Promise` that resolves to an object containing the models used during processing.\n\n2. **Question:** How does the `calculateChecksum` function work and what is its purpose?\n **Answer:** The `calculateChecksum` function takes an array of file contents as input and calculates a checksum for each file using the MD5 hashing algorithm. It then concatenates all the checksums and calculates a final checksum using MD5 again. The purpose of this function is to generate a unique identifier for the contents of the files, which can be used to determine if the files have changed and need to be reprocessed.\n\n3. **Question:** How does the `reindexCheck` function work and when is it used?\n **Answer:** The `reindexCheck` function checks if a summary.json file exists in the given file or folder path and compares the stored checksum with the new checksum to determine if the file or folder needs to be reindexed. It is used in the `processFile` and `processFolder` functions to decide whether to regenerate summaries and questions for a file or folder based on changes in their contents.", + "checksum": "5b3ae9ffad1d4b4a22c6f7fd66bbde6f" }, { "fileName": "prompts.ts", - "filePath": "src/cli/commands/index/prompts.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/index/prompts.ts", - "summary": "The code in this file provides three functions that generate prompts for documentation experts to create summaries and answer questions about code files and folders in a project. These functions are likely used in the larger autodoc project to automate the process of generating documentation for code files and folders.\n\n1. `createCodeFileSummary`: This function takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `filePrompt`. It returns a formatted string prompt for a documentation expert to write a summary of the code file. The prompt includes the file path, project name, content type, and a custom file prompt. For example:\n\n```javascript\ncreateCodeFileSummary('src/example.js', 'autodoc', 'console.log(\"Hello, World!\");', 'JavaScript', 'Write a detailed technical explanation of what this code does.');\n```\n\n2. `createCodeQuestions`: This function takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `targetAudience`. It returns a formatted string prompt for a documentation expert to generate three questions and answers that a target audience might have about the code file. The prompt includes the file path, project name, content type, and target audience. For example:\n\n```javascript\ncreateCodeQuestions('src/example.js', 'autodoc', 'console.log(\"Hello, World!\");', 'JavaScript', 'beginner');\n```\n\n3. `folderSummaryPrompt`: This function takes six parameters: `folderPath`, `projectName`, `files`, `folders`, `contentType`, and `folderPrompt`. It returns a formatted string prompt for a documentation expert to write a summary of the folder and its contents. The prompt includes the folder path, project name, content type, a list of files and their summaries, a list of subfolders and their summaries, and a custom folder prompt. For example:\n\n```javascript\nfolderSummaryPrompt('src/', 'autodoc', [{fileName: 'example.js', summary: 'A simple example file'}], [{folderName: 'utils', summary: 'Utility functions'}], 'JavaScript', 'Write a detailed technical explanation of the folder structure and contents.');\n```\n\nThese functions can be used in the autodoc project to generate prompts for documentation experts, helping to streamline the process of creating documentation for code files and folders.", - "questions": "1. **Question:** What is the purpose of the `createCodeFileSummary` function?\n **Answer:** The `createCodeFileSummary` function generates a string template for a code file summary prompt, which includes the file path, project name, file contents, content type, and a file prompt.\n\n2. **Question:** How does the `createCodeQuestions` function differ from the `createCodeFileSummary` function?\n **Answer:** The `createCodeQuestions` function generates a string template for a code documentation prompt that asks for 3 questions and their answers, while the `createCodeFileSummary` function generates a string template for a code file summary prompt.\n\n3. **Question:** What is the purpose of the `folderSummaryPrompt` function and what parameters does it take?\n **Answer:** The `folderSummaryPrompt` function generates a string template for a folder summary prompt, which includes the folder path, project name, files, folders, content type, and a folder prompt. It takes parameters such as folderPath, projectName, files, folders, contentType, and folderPrompt." + "filePath": "src\\cli\\commands\\index\\prompts.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\prompts.ts", + "summary": "This code defines three utility functions that generate prompts for documentation experts working on a project. These functions are used to create documentation for code files and folders within a project. The generated prompts are in markdown format and include specific instructions for the documentation expert.\n\n1. `createCodeFileSummary`: This function generates a prompt for creating a summary of a code file. It takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `filePrompt`. The function returns a markdown formatted string that includes the file's content and a custom prompt for the documentation expert.\n\nExample usage:\n```javascript\nconst prompt = createCodeFileSummary('path/to/file.js', 'MyProject', 'const x = 10;', 'JavaScript', 'Write a detailed technical explanation of this code.');\n```\n\n2. `createCodeQuestions`: This function generates a prompt for creating a list of questions and answers about a code file. It takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `targetAudience`. The function returns a markdown formatted string that includes the file's content and a custom prompt for the documentation expert to provide questions and answers.\n\nExample usage:\n```javascript\nconst prompt = createCodeQuestions('path/to/file.js', 'MyProject', 'const x = 10;', 'JavaScript', 'beginner');\n```\n\n3. `folderSummaryPrompt`: This function generates a prompt for creating a summary of a folder containing code files and subfolders. It takes six parameters: `folderPath`, `projectName`, `files`, `folders`, `contentType`, and `folderPrompt`. The `files` parameter is an array of `FileSummary` objects, and the `folders` parameter is an array of `FolderSummary` objects. The function returns a markdown formatted string that includes a list of files and folders with their summaries and a custom prompt for the documentation expert.\n\nExample usage:\n```javascript\nconst prompt = folderSummaryPrompt('path/to/folder', 'MyProject', fileSummaries, folderSummaries, 'JavaScript', 'Write a detailed technical explanation of this folder structure.');\n```\n\nThese functions can be used in the larger project to generate documentation tasks for experts, ensuring consistent formatting and instructions across different parts of the project.", + "questions": "1. **What is the purpose of the `createCodeFileSummary` function?**\n\n The `createCodeFileSummary` function generates a string template for a code file summary prompt, which includes the file path, project name, file contents, content type, and a file prompt.\n\n2. **How does the `createCodeQuestions` function differ from the `createCodeFileSummary` function?**\n\n The `createCodeQuestions` function generates a string template for a code documentation prompt that asks for 3 questions and their answers, while the `createCodeFileSummary` function generates a string template for a code file summary prompt.\n\n3. **What is the role of the `folderSummaryPrompt` function?**\n\n The `folderSummaryPrompt` function generates a string template for a folder summary prompt, which includes the folder path, project name, lists of files and folders with their summaries, content type, and a folder prompt.", + "checksum": "e44b82bf4912be69149685a997b6bde3" } ], "folders": [], - "summary": "The code in this folder is responsible for processing a given code repository, generating documentation in JSON and Markdown formats, and creating vector files for the documentation. It provides several functions and utilities to achieve these tasks, such as traversing the file system, calling language models, and converting JSON files to Markdown.\n\nFor example, the `processRepository` function processes a code repository and generates summaries and questions for each file and folder within the repository. It uses helper functions like `callLLM` to make API calls to language models and `processFile` and `processFolder` to process individual files and folders. The results are saved as JSON files in the output directory.\n\nThe `convertJsonToMarkdown` function converts JSON files containing documentation information into Markdown files. It counts the number of files in the project and creates Markdown files for each code file in the project using the `traverseFileSystem` utility.\n\nThe `createVectorStore` function processes a directory of text files, splits the text into chunks, and creates a vector store using the HNSWLib library and OpenAIEmbeddings. It processes the files in the directory and calls `processFile` for each file, creating a vector store and saving it to the output file path.\n\nHere's an example of how this code might be used in the larger project:\n\n```javascript\nimport autodoc from './autodoc';\n\nconst config = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n root: './src',\n output: './output',\n llms: 'https://llms.example.com',\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'text',\n targetAudience: 'developers',\n linkHosted: 'https://myproject-docs.example.com',\n};\n\nautodoc.index(config);\n```\n\nThis example would process the `MyProject` repository, generate JSON and Markdown documentation, and create vector files for the documentation, all while providing progress updates through spinner text.\n\nIn summary, the code in this folder plays a crucial role in the Autodoc project by processing code repositories, generating documentation in various formats, and creating vector files for the documentation. This helps developers to easily generate and maintain documentation for their projects, making it more accessible and understandable for other developers and users.", - "questions": "" + "summary": "The code in this folder is responsible for processing a given repository and generating documentation in JSON, Markdown, and vector formats. It consists of several functions and utilities that work together to automate the documentation generation process.\n\nThe main function, `index`, takes an `AutodocRepoConfig` object as input, which contains various configuration options for processing the repository. It performs three main tasks:\n\n1. **Process the repository**: It calls the `processRepository` function to traverse the repository, generate summaries and questions for code files and folders using the LLMS (Language Learning Management System), and create JSON files with the results. These JSON files are stored in the `output/docs/json/` directory.\n\n2. **Create Markdown files**: It uses the `convertJsonToMarkdown` function to convert the generated JSON files into Markdown files. These Markdown files are stored in the `output/docs/markdown/` directory.\n\n3. **Create vector files**: It calls the `createVectorStore` function to create vector files from the generated Markdown files. These vector files are stored in the `output/docs/data/` directory.\n\nThroughout the execution of these tasks, the code provides visual feedback on the progress of the tasks using `updateSpinnerText` and `spinnerSuccess` functions.\n\nHere's an example of how this code might be used:\n\n```javascript\nindex({\n name: \"myProject\",\n root: \"./input\",\n output: \"./output\",\n filePrompt: true,\n folderPrompt: true,\n contentType: \"code\",\n targetAudience: \"developers\",\n linkHosted: \"https://github.com/user/myProject\",\n});\n```\n\nThis will process the repository located at `./input`, generate documentation in JSON, Markdown, and vector formats, and save the results in the `./output` directory.\n\nThe `prompts.ts` file contains utility functions that generate prompts for documentation experts. These functions create markdown formatted strings with specific instructions for the documentation expert, ensuring consistent formatting and instructions across different parts of the project.\n\nIn summary, the code in this folder automates the process of generating documentation for a given repository based on the provided configuration options. The generated documentation can be used for various purposes, such as displaying it on a website or analyzing the content for specific insights.", + "questions": "", + "checksum": "376f96417f8cbea6a5ab2463268fe4af" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/cli/commands/init/index.json b/.autodoc/docs/json/src/cli/commands/init/index.json index 884e632..9f3af03 100644 --- a/.autodoc/docs/json/src/cli/commands/init/index.json +++ b/.autodoc/docs/json/src/cli/commands/init/index.json @@ -1,7 +1,8 @@ { "fileName": "index.ts", - "filePath": "src/cli/commands/init/index.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/init/index.ts", - "summary": "This code is responsible for initializing and configuring the `autodoc` project. It provides a function `init` that creates a configuration file `autodoc.config.json` with user inputs and default values. The configuration file is essential for the project to function correctly and adapt to different user requirements.\n\nThe `makeConfigTemplate` function generates a default configuration object with pre-defined values. It takes an optional `config` parameter to override the default values. The returned object contains settings such as repository name, URL, output directory, LLM models, and various prompts for generating documentation.\n\nThe `init` function is an asynchronous function that takes an optional `config` parameter. It first checks if a configuration file already exists in the project directory. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf there is no existing configuration file or the user chooses to overwrite, the function prompts the user for the repository name, URL, and LLM models they have access to. These values are then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration object is written to the `autodoc.config.json` file in the project directory. A success message is displayed, instructing the user to run `doc index` to get started.\n\nHere's an example of how the `init` function is used:\n\n```javascript\nimport { init } from './autodoc';\n\n(async () => {\n await init();\n})();\n```\n\nThis code imports the `init` function and calls it, initializing the `autodoc` project with the user's inputs and default values.", - "questions": "1. **Question:** What is the purpose of the `makeConfigTemplate` function and what does it return?\n **Answer:** The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc project. It takes an optional `config` parameter of type `AutodocRepoConfig` and returns a new `AutodocRepoConfig` object with default values for each property, using the provided `config` values if available.\n\n2. **Question:** How does the `init` function work and what does it do with the user's input?\n **Answer:** The `init` function is an asynchronous function that initializes the Autodoc configuration by prompting the user for input using the `inquirer` package. It takes an optional `config` parameter of type `AutodocRepoConfig` and uses it as the default values for the prompts. After collecting the user's input, it creates a new configuration object using the `makeConfigTemplate` function and writes it to a file named `autodoc.config.json`.\n\n3. **Question:** What are the different LLM models available in the `llms` prompt and how are they used in the configuration?\n **Answer:** The `llms` prompt provides three choices for the user to select the LLM models they have access to: GPT-3.5 Turbo, GPT-3.5 Turbo and GPT-4 8K (Early Access), and GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The selected LLM models are stored in the `llms` property of the `AutodocRepoConfig` object, which can be used later in the project to determine which models to use for generating documentation." + "filePath": "src\\cli\\commands\\init\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\init\\index.ts", + "summary": "This code is responsible for initializing the configuration of the Autodoc project. It provides a template for the configuration and prompts the user to input necessary information to set up the project. The main functionality is exposed through the `init` function, which is an asynchronous function that takes an optional `AutodocRepoConfig` object as an argument.\n\nThe `makeConfigTemplate` function creates a default configuration object with pre-defined values for various properties. It takes an optional `config` parameter and returns a new `AutodocRepoConfig` object with the provided values or default values if not provided.\n\nThe `init` function first checks if an `autodoc.config.json` file already exists in the project root. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nNext, the user is prompted to enter the name of their repository, the GitHub URL of their repository, and the LLMs they have access to. The LLMs are language models used for generating documentation. The user can choose between GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access).\n\nAfter the user provides the necessary information, a new configuration object is created using the `makeConfigTemplate` function with the user's input. The new configuration is then written to the `autodoc.config.json` file in the project root.\n\nFinally, a success message is displayed, instructing the user to run `doc index` to get started with the Autodoc project.\n\nExample usage:\n\n```javascript\nimport { init } from './path/to/this/file';\n\n// Initialize the configuration with default values\nawait init();\n\n// Initialize the configuration with custom values\nawait init({\n name: 'My Custom Repository',\n repositoryUrl: 'https://github.com/user/repo',\n});\n```", + "questions": "1. **What is the purpose of the `makeConfigTemplate` function?**\n\n The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc project. It takes an optional `config` parameter of type `AutodocRepoConfig` and returns a new configuration object with default values for various properties.\n\n2. **How does the `init` function work and when is it called?**\n\n The `init` function is an asynchronous function that initializes the Autodoc configuration by creating an `autodoc.config.json` file in the specified location. It takes an optional `config` parameter of type `AutodocRepoConfig` and prompts the user for input to set the configuration values. It is called when the user wants to set up the Autodoc configuration for their project.\n\n3. **What is the purpose of the `inquirer.prompt` calls in the `init` function?**\n\n The `inquirer.prompt` calls are used to interactively prompt the user for input to set the configuration values for the Autodoc project. The user is asked for the repository name, repository URL, and the LLMs they have access to. The input is then used to create a new configuration object and write it to the `autodoc.config.json` file.", + "checksum": "b93831ff1f4023ab61c3bea963a8a112" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/cli/commands/init/summary.json b/.autodoc/docs/json/src/cli/commands/init/summary.json index 285900b..49e0a77 100644 --- a/.autodoc/docs/json/src/cli/commands/init/summary.json +++ b/.autodoc/docs/json/src/cli/commands/init/summary.json @@ -1,17 +1,19 @@ { "folderName": "init", - "folderPath": ".autodoc/docs/json/src/cli/commands/init", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/init", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\commands\\init", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands\\init", "files": [ { "fileName": "index.ts", - "filePath": "src/cli/commands/init/index.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/init/index.ts", - "summary": "This code is responsible for initializing and configuring the `autodoc` project. It provides a function `init` that creates a configuration file `autodoc.config.json` with user inputs and default values. The configuration file is essential for the project to function correctly and adapt to different user requirements.\n\nThe `makeConfigTemplate` function generates a default configuration object with pre-defined values. It takes an optional `config` parameter to override the default values. The returned object contains settings such as repository name, URL, output directory, LLM models, and various prompts for generating documentation.\n\nThe `init` function is an asynchronous function that takes an optional `config` parameter. It first checks if a configuration file already exists in the project directory. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf there is no existing configuration file or the user chooses to overwrite, the function prompts the user for the repository name, URL, and LLM models they have access to. These values are then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration object is written to the `autodoc.config.json` file in the project directory. A success message is displayed, instructing the user to run `doc index` to get started.\n\nHere's an example of how the `init` function is used:\n\n```javascript\nimport { init } from './autodoc';\n\n(async () => {\n await init();\n})();\n```\n\nThis code imports the `init` function and calls it, initializing the `autodoc` project with the user's inputs and default values.", - "questions": "1. **Question:** What is the purpose of the `makeConfigTemplate` function and what does it return?\n **Answer:** The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc project. It takes an optional `config` parameter of type `AutodocRepoConfig` and returns a new `AutodocRepoConfig` object with default values for each property, using the provided `config` values if available.\n\n2. **Question:** How does the `init` function work and what does it do with the user's input?\n **Answer:** The `init` function is an asynchronous function that initializes the Autodoc configuration by prompting the user for input using the `inquirer` package. It takes an optional `config` parameter of type `AutodocRepoConfig` and uses it as the default values for the prompts. After collecting the user's input, it creates a new configuration object using the `makeConfigTemplate` function and writes it to a file named `autodoc.config.json`.\n\n3. **Question:** What are the different LLM models available in the `llms` prompt and how are they used in the configuration?\n **Answer:** The `llms` prompt provides three choices for the user to select the LLM models they have access to: GPT-3.5 Turbo, GPT-3.5 Turbo and GPT-4 8K (Early Access), and GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The selected LLM models are stored in the `llms` property of the `AutodocRepoConfig` object, which can be used later in the project to determine which models to use for generating documentation." + "filePath": "src\\cli\\commands\\init\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\init\\index.ts", + "summary": "This code is responsible for initializing the configuration of the Autodoc project. It provides a template for the configuration and prompts the user to input necessary information to set up the project. The main functionality is exposed through the `init` function, which is an asynchronous function that takes an optional `AutodocRepoConfig` object as an argument.\n\nThe `makeConfigTemplate` function creates a default configuration object with pre-defined values for various properties. It takes an optional `config` parameter and returns a new `AutodocRepoConfig` object with the provided values or default values if not provided.\n\nThe `init` function first checks if an `autodoc.config.json` file already exists in the project root. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nNext, the user is prompted to enter the name of their repository, the GitHub URL of their repository, and the LLMs they have access to. The LLMs are language models used for generating documentation. The user can choose between GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access).\n\nAfter the user provides the necessary information, a new configuration object is created using the `makeConfigTemplate` function with the user's input. The new configuration is then written to the `autodoc.config.json` file in the project root.\n\nFinally, a success message is displayed, instructing the user to run `doc index` to get started with the Autodoc project.\n\nExample usage:\n\n```javascript\nimport { init } from './path/to/this/file';\n\n// Initialize the configuration with default values\nawait init();\n\n// Initialize the configuration with custom values\nawait init({\n name: 'My Custom Repository',\n repositoryUrl: 'https://github.com/user/repo',\n});\n```", + "questions": "1. **What is the purpose of the `makeConfigTemplate` function?**\n\n The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc project. It takes an optional `config` parameter of type `AutodocRepoConfig` and returns a new configuration object with default values for various properties.\n\n2. **How does the `init` function work and when is it called?**\n\n The `init` function is an asynchronous function that initializes the Autodoc configuration by creating an `autodoc.config.json` file in the specified location. It takes an optional `config` parameter of type `AutodocRepoConfig` and prompts the user for input to set the configuration values. It is called when the user wants to set up the Autodoc configuration for their project.\n\n3. **What is the purpose of the `inquirer.prompt` calls in the `init` function?**\n\n The `inquirer.prompt` calls are used to interactively prompt the user for input to set the configuration values for the Autodoc project. The user is asked for the repository name, repository URL, and the LLMs they have access to. The input is then used to create a new configuration object and write it to the `autodoc.config.json` file.", + "checksum": "b93831ff1f4023ab61c3bea963a8a112" } ], "folders": [], - "summary": "The `index.ts` file in the `init` folder is responsible for initializing and configuring the `autodoc` project. It provides an essential function called `init` that creates a configuration file named `autodoc.config.json` with user inputs and default values. This configuration file is crucial for the project to function correctly and adapt to different user requirements.\n\nThe `makeConfigTemplate` function generates a default configuration object with pre-defined values. It takes an optional `config` parameter to override the default values. The returned object contains settings such as repository name, URL, output directory, LLM models, and various prompts for generating documentation.\n\nThe `init` function is an asynchronous function that takes an optional `config` parameter. It first checks if a configuration file already exists in the project directory. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf there is no existing configuration file or the user chooses to overwrite, the function prompts the user for the repository name, URL, and LLM models they have access to. These values are then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration object is written to the `autodoc.config.json` file in the project directory. A success message is displayed, instructing the user to run `doc index` to get started.\n\nHere's an example of how the `init` function is used:\n\n```javascript\nimport { init } from './autodoc';\n\n(async () => {\n await init();\n})();\n```\n\nThis code imports the `init` function and calls it, initializing the `autodoc` project with the user's inputs and default values. The `init` function is a crucial part of the project, as it sets up the necessary configuration for the project to work correctly. It interacts with other parts of the project by providing the required settings and values, ensuring that the project can adapt to different user requirements and preferences.", - "questions": "" + "summary": "The `index.ts` file in the `.autodoc\\docs\\json\\src\\cli\\commands\\init` folder is responsible for initializing the configuration of the Autodoc project. It provides a template for the configuration and prompts the user to input necessary information to set up the project. The main functionality is exposed through the `init` function, which is an asynchronous function that takes an optional `AutodocRepoConfig` object as an argument.\n\nThe `makeConfigTemplate` function creates a default configuration object with pre-defined values for various properties. It takes an optional `config` parameter and returns a new `AutodocRepoConfig` object with the provided values or default values if not provided.\n\nThe `init` function first checks if an `autodoc.config.json` file already exists in the project root. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nNext, the user is prompted to enter the name of their repository, the GitHub URL of their repository, and the LLMs they have access to. The LLMs are language models used for generating documentation. The user can choose between GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access).\n\nAfter the user provides the necessary information, a new configuration object is created using the `makeConfigTemplate` function with the user's input. The new configuration is then written to the `autodoc.config.json` file in the project root.\n\nFinally, a success message is displayed, instructing the user to run `doc index` to get started with the Autodoc project.\n\nExample usage:\n\n```javascript\nimport { init } from './path/to/this/file';\n\n// Initialize the configuration with default values\nawait init();\n\n// Initialize the configuration with custom values\nawait init({\n name: 'My Custom Repository',\n repositoryUrl: 'https://github.com/user/repo',\n});\n```\n\nThis code is essential for setting up the Autodoc project, as it creates the necessary configuration file and gathers user input to customize the project. It works in conjunction with other parts of the project, such as the CLI and the documentation generation process, which rely on the configuration file to function correctly.", + "questions": "", + "checksum": "4b8fd2b2abaec4959873fc3396c414d8" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/cli/commands/query/createChatChain.json b/.autodoc/docs/json/src/cli/commands/query/createChatChain.json index 82bf9d0..f9e4f02 100644 --- a/.autodoc/docs/json/src/cli/commands/query/createChatChain.json +++ b/.autodoc/docs/json/src/cli/commands/query/createChatChain.json @@ -1,7 +1,8 @@ { "fileName": "createChatChain.ts", - "filePath": "src/cli/commands/query/createChatChain.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/query/createChatChain.ts", - "summary": "This code defines a function `makeChain` that creates a chatbot for answering questions about a software project. The chatbot is built using the `ChatVectorDBQAChain` class, which combines two separate language models: a question generator and a document chain.\n\nThe question generator is an instance of the `LLMChain` class, which uses the OpenAIChat API to generate standalone questions based on a given conversation history. The `CONDENSE_PROMPT` template is used to format the input for the language model.\n\nThe document chain is created using the `loadQAChain` function, which takes an instance of the OpenAIChat API and a prompt template as input. The `makeQAPrompt` function generates this template, which instructs the language model to provide a conversational answer with hyperlinks to the project's GitHub repository. The answer should be tailored to the target audience and include code examples when appropriate.\n\nThe `makeChain` function takes the following parameters:\n\n- `projectName`: The name of the software project.\n- `repositoryUrl`: The URL of the project's GitHub repository.\n- `contentType`: The type of content the chatbot is trained on (e.g., code, documentation).\n- `chatPrompt`: Additional instructions for answering questions about the content.\n- `targetAudience`: The intended audience for the chatbot's answers (e.g., developers, users).\n- `vectorstore`: An instance of the `HNSWLib` class for storing and searching vectors.\n- `llms`: An array of language models (e.g., GPT-3, GPT-4).\n- `onTokenStream`: An optional callback function to handle streaming tokens.\n\nExample usage:\n\n```javascript\nconst chatbot = makeChain(\n \"autodoc\",\n \"https://github.com/autodoc/autodoc\",\n \"code\",\n \"\",\n \"developer\",\n vectorstore,\n [gpt3, gpt4],\n (token) => console.log(token)\n);\n```\n\nThis creates a chatbot that can answer questions about the \"autodoc\" project, using the provided language models and vector store.", - "questions": "1. **Question:** What is the purpose of the `makeChain` function and what are its input parameters?\n **Answer:** The `makeChain` function is used to create a new `ChatVectorDBQAChain` instance, which is responsible for generating questions and answers based on the given input parameters. The input parameters include `projectName`, `repositoryUrl`, `contentType`, `chatPrompt`, `targetAudience`, `vectorstore`, `llms`, and an optional `onTokenStream` callback function.\n\n2. **Question:** What are the roles of `CONDENSE_PROMPT` and `QA_PROMPT` in the code?\n **Answer:** `CONDENSE_PROMPT` is a template for generating a standalone question from a given chat history and follow-up input. `QA_PROMPT` is a template for generating a conversational answer with hyperlinks back to GitHub, based on the given context and question. Both templates are used in the `LLMChain` and `loadQAChain` instances, respectively.\n\n3. **Question:** How does the `onTokenStream` callback function work and when is it used?\n **Answer:** The `onTokenStream` callback function is an optional parameter in the `makeChain` function. It is used to handle the streaming of tokens generated by the OpenAIChat instance. If provided, it will be called with each new token generated during the chat process, allowing developers to handle or process the tokens in real-time." + "filePath": "src\\cli\\commands\\query\\createChatChain.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\query\\createChatChain.ts", + "summary": "This code defines a function `makeChain` that creates a chatbot for answering questions about a software project called `projectName`. The chatbot is trained on the content of the project, which is located at `repositoryUrl`. The content type of the project is specified by the `contentType` parameter. The chatbot is designed to provide conversational answers with hyperlinks back to GitHub, including code examples and links to the examples where appropriate. The target audience for the chatbot is specified by the `targetAudience` parameter.\n\nThe `makeChain` function takes several parameters:\n\n- `projectName`: The name of the software project.\n- `repositoryUrl`: The URL of the project's repository.\n- `contentType`: The type of content the chatbot is trained on.\n- `chatPrompt`: Additional instructions for answering questions about the content type.\n- `targetAudience`: The intended audience for the chatbot's answers.\n- `vectorstore`: An instance of HNSWLib for efficient nearest neighbor search.\n- `llms`: An array of LLMModels, which are language models used for generating answers.\n- `onTokenStream`: An optional callback function that is called when a new token is generated by the language model.\n\nThe `makeChain` function first creates a question generator using the `LLMChain` class. This generator is responsible for rephrasing follow-up questions to be standalone questions. It uses the `CONDENSE_PROMPT` template, which is defined at the beginning of the code.\n\nNext, the function creates a `QA_PROMPT` template using the `makeQAPrompt` function. This template is used to generate answers to the questions in a conversational manner, with hyperlinks back to GitHub and code examples where appropriate.\n\nFinally, the function creates and returns a new instance of the `ChatVectorDBQAChain` class, which combines the question generator and the document chain to create a chatbot that can answer questions about the software project. The chatbot uses the `vectorstore` for efficient nearest neighbor search and the `llms` language models for generating answers. If the `onTokenStream` callback is provided, it will be called when a new token is generated by the language model.", + "questions": "1. **Question:** What is the purpose of the `makeChain` function and what are its input parameters?\n\n **Answer:** The `makeChain` function is used to create a `ChatVectorDBQAChain` instance, which is responsible for generating questions and answers based on the given input parameters. The input parameters include `projectName`, `repositoryUrl`, `contentType`, `chatPrompt`, `targetAudience`, `vectorstore`, `llms`, and an optional `onTokenStream` function.\n\n2. **Question:** What are the roles of `CONDENSE_PROMPT` and `QA_PROMPT` in this code?\n\n **Answer:** `CONDENSE_PROMPT` is a template for generating standalone questions from a given chat history and follow-up question. `QA_PROMPT` is a template for generating conversational answers with hyperlinks to GitHub, based on the provided context and question. Both templates are used in the `LLMChain` and `loadQAChain` instances, respectively.\n\n3. **Question:** How does the `onTokenStream` function work and when is it used?\n\n **Answer:** The `onTokenStream` function is an optional callback that can be provided to the `makeChain` function. It is used to handle the streaming of tokens generated by the OpenAIChat instance. If provided, it will be called with each new token generated during the chat process.", + "checksum": "6869048a06de62499933b14c37cddc1d" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/cli/commands/query/index.json b/.autodoc/docs/json/src/cli/commands/query/index.json index 2b7acb4..9497900 100644 --- a/.autodoc/docs/json/src/cli/commands/query/index.json +++ b/.autodoc/docs/json/src/cli/commands/query/index.json @@ -1,7 +1,8 @@ { "fileName": "index.ts", - "filePath": "src/cli/commands/query/index.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/query/index.ts", - "summary": "This code defines a chatbot interface for the Autodoc project, which allows users to ask questions related to a specific codebase and receive answers in a conversational manner. The chatbot uses a language model to generate responses based on the user's input and the codebase documentation.\n\nThe code starts by importing necessary libraries and setting up the `marked` library with a custom terminal renderer for displaying Markdown content. It then defines a `chatHistory` array to store the conversation history between the user and the chatbot.\n\nThe `displayWelcomeMessage` function is used to display a welcome message to the user when they start the chatbot. The `clearScreenAndMoveCursorToTop` function clears the terminal screen and moves the cursor to the top.\n\nThe main function, `query`, takes two arguments: `AutodocRepoConfig` and `AutodocUserConfig`. It initializes the `vectorStore` by loading pre-trained embeddings and creates a `chain` object using the `makeChain` function. This chain object is responsible for generating responses based on the user's input.\n\nThe `getQuestion` function uses the `inquirer` library to prompt the user for a question. The main loop of the chatbot starts by getting the user's question and continues until the user types 'exit'. Inside the loop, the code updates the spinner text to 'Thinking...' and calls the `chain` object with the user's question and chat history. The response is then displayed in Markdown format using the `marked` library.\n\nIf an error occurs during the process, the chatbot displays an error message and prompts the user for another question.\n\nExample usage:\n\n```javascript\nquery(repoConfig, userConfig);\n```\n\nThis chatbot interface can be used in the larger Autodoc project to help users navigate and understand the codebase more efficiently by providing a conversational interface for asking questions and receiving answers.", - "questions": "1. **What is the purpose of the `query` function and what are its input parameters?**\n\n The `query` function is used to interact with the chatbot, taking user input and providing responses based on the given codebase. It takes two input parameters: an `AutodocRepoConfig` object containing information about the repository, and an `AutodocUserConfig` object containing user-specific configuration.\n\n2. **How does the `vectorStore` work and what is its role in the code?**\n\n The `vectorStore` is an instance of HNSWLib loaded with data from the specified output directory and using OpenAIEmbeddings. It is used to store and retrieve vector representations of the codebase, which are then used by the `makeChain` function to generate responses to user questions.\n\n3. **How does the chat history work and what is its purpose?**\n\n The `chatHistory` is an array of string pairs, where each pair represents a user question and the corresponding chatbot response. It is used to store the conversation history between the user and the chatbot, allowing the chatbot to provide context-aware responses based on previous interactions." + "filePath": "src\\cli\\commands\\query\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\query\\index.ts", + "summary": "This code defines a chatbot interface for the Autodoc project, which allows users to ask questions related to a specific codebase and receive answers in a conversational manner. The chatbot uses a combination of the `inquirer` library for user input, `marked` and `marked-terminal` for rendering Markdown output, and the `langchain` library for handling natural language processing tasks.\n\nThe `query` function is the main entry point for the chatbot. It takes two arguments: an `AutodocRepoConfig` object containing information about the code repository, and an `AutodocUserConfig` object containing user-specific settings. The function initializes a vector store using the `HNSWLib` and `OpenAIEmbeddings` classes, and creates a chat chain using the `makeChain` function.\n\nThe chatbot interface is displayed using the `displayWelcomeMessage` function, which prints a welcome message to the console. The `getQuestion` function is used to prompt the user for a question using the `inquirer` library. The chatbot then enters a loop, where it processes the user's question, generates a response using the chat chain, and displays the response as Markdown in the terminal.\n\nIf an error occurs during the processing of a question, the chatbot will display an error message and continue to prompt the user for a new question. The loop continues until the user types 'exit', at which point the chatbot terminates.\n\nHere's an example of how the `query` function might be used:\n\n```javascript\nimport { query } from './autodoc';\n\nconst repoConfig = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n output: 'path/to/output',\n contentType: 'code',\n chatPrompt: 'Ask me anything about MyProject',\n targetAudience: 'developers',\n};\n\nconst userConfig = {\n llms: 'path/to/llms',\n};\n\nquery(repoConfig, userConfig);\n```\n\nThis example would initialize the chatbot with the specified repository and user configurations, and start the chatbot interface for the user to ask questions about the \"MyProject\" codebase.", + "questions": "1. **What is the purpose of the `query` function in this code?**\n\n The `query` function is responsible for handling user interactions with the chatbot. It takes in an AutodocRepoConfig object and an AutodocUserConfig object, sets up the necessary data structures, and then enters a loop where it prompts the user for questions, processes them, and displays the results.\n\n2. **How does the code handle rendering Markdown text in the terminal?**\n\n The code uses the `marked` library along with a custom `TerminalRenderer` to render Markdown text in the terminal. The `marked` library is configured with the custom renderer using `marked.setOptions({ renderer: new TerminalRenderer() });`.\n\n3. **What is the purpose of the `chatHistory` variable and how is it used?**\n\n The `chatHistory` variable is an array that stores the history of questions and answers in the chat session. It is used to keep track of the conversation between the user and the chatbot. When a new question is asked, the chat history is passed to the `chain.call()` function, and the new question and its corresponding answer are added to the `chatHistory` array.", + "checksum": "19807a33957666422f31136970c37245" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/cli/commands/query/summary.json b/.autodoc/docs/json/src/cli/commands/query/summary.json index ee82d66..273457f 100644 --- a/.autodoc/docs/json/src/cli/commands/query/summary.json +++ b/.autodoc/docs/json/src/cli/commands/query/summary.json @@ -1,24 +1,27 @@ { "folderName": "query", - "folderPath": ".autodoc/docs/json/src/cli/commands/query", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/query", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\commands\\query", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands\\query", "files": [ { "fileName": "createChatChain.ts", - "filePath": "src/cli/commands/query/createChatChain.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/query/createChatChain.ts", - "summary": "This code defines a function `makeChain` that creates a chatbot for answering questions about a software project. The chatbot is built using the `ChatVectorDBQAChain` class, which combines two separate language models: a question generator and a document chain.\n\nThe question generator is an instance of the `LLMChain` class, which uses the OpenAIChat API to generate standalone questions based on a given conversation history. The `CONDENSE_PROMPT` template is used to format the input for the language model.\n\nThe document chain is created using the `loadQAChain` function, which takes an instance of the OpenAIChat API and a prompt template as input. The `makeQAPrompt` function generates this template, which instructs the language model to provide a conversational answer with hyperlinks to the project's GitHub repository. The answer should be tailored to the target audience and include code examples when appropriate.\n\nThe `makeChain` function takes the following parameters:\n\n- `projectName`: The name of the software project.\n- `repositoryUrl`: The URL of the project's GitHub repository.\n- `contentType`: The type of content the chatbot is trained on (e.g., code, documentation).\n- `chatPrompt`: Additional instructions for answering questions about the content.\n- `targetAudience`: The intended audience for the chatbot's answers (e.g., developers, users).\n- `vectorstore`: An instance of the `HNSWLib` class for storing and searching vectors.\n- `llms`: An array of language models (e.g., GPT-3, GPT-4).\n- `onTokenStream`: An optional callback function to handle streaming tokens.\n\nExample usage:\n\n```javascript\nconst chatbot = makeChain(\n \"autodoc\",\n \"https://github.com/autodoc/autodoc\",\n \"code\",\n \"\",\n \"developer\",\n vectorstore,\n [gpt3, gpt4],\n (token) => console.log(token)\n);\n```\n\nThis creates a chatbot that can answer questions about the \"autodoc\" project, using the provided language models and vector store.", - "questions": "1. **Question:** What is the purpose of the `makeChain` function and what are its input parameters?\n **Answer:** The `makeChain` function is used to create a new `ChatVectorDBQAChain` instance, which is responsible for generating questions and answers based on the given input parameters. The input parameters include `projectName`, `repositoryUrl`, `contentType`, `chatPrompt`, `targetAudience`, `vectorstore`, `llms`, and an optional `onTokenStream` callback function.\n\n2. **Question:** What are the roles of `CONDENSE_PROMPT` and `QA_PROMPT` in the code?\n **Answer:** `CONDENSE_PROMPT` is a template for generating a standalone question from a given chat history and follow-up input. `QA_PROMPT` is a template for generating a conversational answer with hyperlinks back to GitHub, based on the given context and question. Both templates are used in the `LLMChain` and `loadQAChain` instances, respectively.\n\n3. **Question:** How does the `onTokenStream` callback function work and when is it used?\n **Answer:** The `onTokenStream` callback function is an optional parameter in the `makeChain` function. It is used to handle the streaming of tokens generated by the OpenAIChat instance. If provided, it will be called with each new token generated during the chat process, allowing developers to handle or process the tokens in real-time." + "filePath": "src\\cli\\commands\\query\\createChatChain.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\query\\createChatChain.ts", + "summary": "This code defines a function `makeChain` that creates a chatbot for answering questions about a software project called `projectName`. The chatbot is trained on the content of the project, which is located at `repositoryUrl`. The content type of the project is specified by the `contentType` parameter. The chatbot is designed to provide conversational answers with hyperlinks back to GitHub, including code examples and links to the examples where appropriate. The target audience for the chatbot is specified by the `targetAudience` parameter.\n\nThe `makeChain` function takes several parameters:\n\n- `projectName`: The name of the software project.\n- `repositoryUrl`: The URL of the project's repository.\n- `contentType`: The type of content the chatbot is trained on.\n- `chatPrompt`: Additional instructions for answering questions about the content type.\n- `targetAudience`: The intended audience for the chatbot's answers.\n- `vectorstore`: An instance of HNSWLib for efficient nearest neighbor search.\n- `llms`: An array of LLMModels, which are language models used for generating answers.\n- `onTokenStream`: An optional callback function that is called when a new token is generated by the language model.\n\nThe `makeChain` function first creates a question generator using the `LLMChain` class. This generator is responsible for rephrasing follow-up questions to be standalone questions. It uses the `CONDENSE_PROMPT` template, which is defined at the beginning of the code.\n\nNext, the function creates a `QA_PROMPT` template using the `makeQAPrompt` function. This template is used to generate answers to the questions in a conversational manner, with hyperlinks back to GitHub and code examples where appropriate.\n\nFinally, the function creates and returns a new instance of the `ChatVectorDBQAChain` class, which combines the question generator and the document chain to create a chatbot that can answer questions about the software project. The chatbot uses the `vectorstore` for efficient nearest neighbor search and the `llms` language models for generating answers. If the `onTokenStream` callback is provided, it will be called when a new token is generated by the language model.", + "questions": "1. **Question:** What is the purpose of the `makeChain` function and what are its input parameters?\n\n **Answer:** The `makeChain` function is used to create a `ChatVectorDBQAChain` instance, which is responsible for generating questions and answers based on the given input parameters. The input parameters include `projectName`, `repositoryUrl`, `contentType`, `chatPrompt`, `targetAudience`, `vectorstore`, `llms`, and an optional `onTokenStream` function.\n\n2. **Question:** What are the roles of `CONDENSE_PROMPT` and `QA_PROMPT` in this code?\n\n **Answer:** `CONDENSE_PROMPT` is a template for generating standalone questions from a given chat history and follow-up question. `QA_PROMPT` is a template for generating conversational answers with hyperlinks to GitHub, based on the provided context and question. Both templates are used in the `LLMChain` and `loadQAChain` instances, respectively.\n\n3. **Question:** How does the `onTokenStream` function work and when is it used?\n\n **Answer:** The `onTokenStream` function is an optional callback that can be provided to the `makeChain` function. It is used to handle the streaming of tokens generated by the OpenAIChat instance. If provided, it will be called with each new token generated during the chat process.", + "checksum": "6869048a06de62499933b14c37cddc1d" }, { "fileName": "index.ts", - "filePath": "src/cli/commands/query/index.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/query/index.ts", - "summary": "This code defines a chatbot interface for the Autodoc project, which allows users to ask questions related to a specific codebase and receive answers in a conversational manner. The chatbot uses a language model to generate responses based on the user's input and the codebase documentation.\n\nThe code starts by importing necessary libraries and setting up the `marked` library with a custom terminal renderer for displaying Markdown content. It then defines a `chatHistory` array to store the conversation history between the user and the chatbot.\n\nThe `displayWelcomeMessage` function is used to display a welcome message to the user when they start the chatbot. The `clearScreenAndMoveCursorToTop` function clears the terminal screen and moves the cursor to the top.\n\nThe main function, `query`, takes two arguments: `AutodocRepoConfig` and `AutodocUserConfig`. It initializes the `vectorStore` by loading pre-trained embeddings and creates a `chain` object using the `makeChain` function. This chain object is responsible for generating responses based on the user's input.\n\nThe `getQuestion` function uses the `inquirer` library to prompt the user for a question. The main loop of the chatbot starts by getting the user's question and continues until the user types 'exit'. Inside the loop, the code updates the spinner text to 'Thinking...' and calls the `chain` object with the user's question and chat history. The response is then displayed in Markdown format using the `marked` library.\n\nIf an error occurs during the process, the chatbot displays an error message and prompts the user for another question.\n\nExample usage:\n\n```javascript\nquery(repoConfig, userConfig);\n```\n\nThis chatbot interface can be used in the larger Autodoc project to help users navigate and understand the codebase more efficiently by providing a conversational interface for asking questions and receiving answers.", - "questions": "1. **What is the purpose of the `query` function and what are its input parameters?**\n\n The `query` function is used to interact with the chatbot, taking user input and providing responses based on the given codebase. It takes two input parameters: an `AutodocRepoConfig` object containing information about the repository, and an `AutodocUserConfig` object containing user-specific configuration.\n\n2. **How does the `vectorStore` work and what is its role in the code?**\n\n The `vectorStore` is an instance of HNSWLib loaded with data from the specified output directory and using OpenAIEmbeddings. It is used to store and retrieve vector representations of the codebase, which are then used by the `makeChain` function to generate responses to user questions.\n\n3. **How does the chat history work and what is its purpose?**\n\n The `chatHistory` is an array of string pairs, where each pair represents a user question and the corresponding chatbot response. It is used to store the conversation history between the user and the chatbot, allowing the chatbot to provide context-aware responses based on previous interactions." + "filePath": "src\\cli\\commands\\query\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\query\\index.ts", + "summary": "This code defines a chatbot interface for the Autodoc project, which allows users to ask questions related to a specific codebase and receive answers in a conversational manner. The chatbot uses a combination of the `inquirer` library for user input, `marked` and `marked-terminal` for rendering Markdown output, and the `langchain` library for handling natural language processing tasks.\n\nThe `query` function is the main entry point for the chatbot. It takes two arguments: an `AutodocRepoConfig` object containing information about the code repository, and an `AutodocUserConfig` object containing user-specific settings. The function initializes a vector store using the `HNSWLib` and `OpenAIEmbeddings` classes, and creates a chat chain using the `makeChain` function.\n\nThe chatbot interface is displayed using the `displayWelcomeMessage` function, which prints a welcome message to the console. The `getQuestion` function is used to prompt the user for a question using the `inquirer` library. The chatbot then enters a loop, where it processes the user's question, generates a response using the chat chain, and displays the response as Markdown in the terminal.\n\nIf an error occurs during the processing of a question, the chatbot will display an error message and continue to prompt the user for a new question. The loop continues until the user types 'exit', at which point the chatbot terminates.\n\nHere's an example of how the `query` function might be used:\n\n```javascript\nimport { query } from './autodoc';\n\nconst repoConfig = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n output: 'path/to/output',\n contentType: 'code',\n chatPrompt: 'Ask me anything about MyProject',\n targetAudience: 'developers',\n};\n\nconst userConfig = {\n llms: 'path/to/llms',\n};\n\nquery(repoConfig, userConfig);\n```\n\nThis example would initialize the chatbot with the specified repository and user configurations, and start the chatbot interface for the user to ask questions about the \"MyProject\" codebase.", + "questions": "1. **What is the purpose of the `query` function in this code?**\n\n The `query` function is responsible for handling user interactions with the chatbot. It takes in an AutodocRepoConfig object and an AutodocUserConfig object, sets up the necessary data structures, and then enters a loop where it prompts the user for questions, processes them, and displays the results.\n\n2. **How does the code handle rendering Markdown text in the terminal?**\n\n The code uses the `marked` library along with a custom `TerminalRenderer` to render Markdown text in the terminal. The `marked` library is configured with the custom renderer using `marked.setOptions({ renderer: new TerminalRenderer() });`.\n\n3. **What is the purpose of the `chatHistory` variable and how is it used?**\n\n The `chatHistory` variable is an array that stores the history of questions and answers in the chat session. It is used to keep track of the conversation between the user and the chatbot. When a new question is asked, the chat history is passed to the `chain.call()` function, and the new question and its corresponding answer are added to the `chatHistory` array.", + "checksum": "19807a33957666422f31136970c37245" } ], "folders": [], - "summary": "The `query` folder in the Autodoc project contains code for creating a chatbot interface that allows users to ask questions related to a specific codebase and receive answers in a conversational manner. The chatbot uses a language model to generate responses based on the user's input and the codebase documentation.\n\nIn `createChatChain.ts`, the `makeChain` function is defined, which creates a chatbot using the `ChatVectorDBQAChain` class. This class combines two separate language models: a question generator and a document chain. The question generator is an instance of the `LLMChain` class, which uses the OpenAIChat API to generate standalone questions based on a given conversation history. The document chain is created using the `loadQAChain` function, which takes an instance of the OpenAIChat API and a prompt template as input.\n\nExample usage of `makeChain`:\n\n```javascript\nconst chatbot = makeChain(\n \"autodoc\",\n \"https://github.com/autodoc/autodoc\",\n \"code\",\n \"\",\n \"developer\",\n vectorstore,\n [gpt3, gpt4],\n (token) => console.log(token)\n);\n```\n\nIn `index.ts`, the main chatbot interface is defined. It starts by importing necessary libraries and setting up the `marked` library with a custom terminal renderer for displaying Markdown content. The main function, `query`, takes two arguments: `AutodocRepoConfig` and `AutodocUserConfig`. It initializes the `vectorStore` by loading pre-trained embeddings and creates a `chain` object using the `makeChain` function. This chain object is responsible for generating responses based on the user's input.\n\nThe main loop of the chatbot starts by getting the user's question and continues until the user types 'exit'. Inside the loop, the code updates the spinner text to 'Thinking...' and calls the `chain` object with the user's question and chat history. The response is then displayed in Markdown format using the `marked` library.\n\nExample usage of the chatbot interface:\n\n```javascript\nquery(repoConfig, userConfig);\n```\n\nThis chatbot interface can be used in the larger Autodoc project to help users navigate and understand the codebase more efficiently by providing a conversational interface for asking questions and receiving answers.", - "questions": "" + "summary": "The `query` folder in the Autodoc project contains code for creating a chatbot that can answer questions about a specific software project in a conversational manner. The chatbot is trained on the content of the project and provides answers with hyperlinks back to GitHub, including code examples and links to the examples where appropriate.\n\nThe main entry point for the chatbot is the `query` function in `index.ts`. It takes two arguments: an `AutodocRepoConfig` object containing information about the code repository, and an `AutodocUserConfig` object containing user-specific settings. The function initializes a vector store and creates a chat chain using the `makeChain` function from `createChatChain.ts`.\n\nHere's an example of how the `query` function might be used:\n\n```javascript\nimport { query } from './autodoc';\n\nconst repoConfig = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n output: 'path/to/output',\n contentType: 'code',\n chatPrompt: 'Ask me anything about MyProject',\n targetAudience: 'developers',\n};\n\nconst userConfig = {\n llms: 'path/to/llms',\n};\n\nquery(repoConfig, userConfig);\n```\n\nThis example initializes the chatbot with the specified repository and user configurations and starts the chatbot interface for the user to ask questions about the \"MyProject\" codebase.\n\nThe `createChatChain.ts` file defines the `makeChain` function, which creates a chatbot for answering questions about a software project. The chatbot is designed to provide conversational answers with hyperlinks back to GitHub, including code examples and links to the examples where appropriate. The target audience for the chatbot is specified by the `targetAudience` parameter.\n\nThe `makeChain` function takes several parameters, such as `projectName`, `repositoryUrl`, `contentType`, `chatPrompt`, `targetAudience`, `vectorstore`, `llms`, and `onTokenStream`. It first creates a question generator using the `LLMChain` class, then creates a `QA_PROMPT` template using the `makeQAPrompt` function, and finally creates and returns a new instance of the `ChatVectorDBQAChain` class, which combines the question generator and the document chain to create a chatbot that can answer questions about the software project.\n\nIn summary, the code in the `query` folder is responsible for creating a chatbot that can answer questions about a specific software project in a conversational manner. The chatbot uses a combination of natural language processing techniques and efficient nearest neighbor search to generate accurate and relevant answers for the user.", + "questions": "", + "checksum": "9e0d0f111bf588e2df66862dce9db288" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/cli/commands/summary.json b/.autodoc/docs/json/src/cli/commands/summary.json index 32cd3e8..fd42bfc 100644 --- a/.autodoc/docs/json/src/cli/commands/summary.json +++ b/.autodoc/docs/json/src/cli/commands/summary.json @@ -1,130 +1,146 @@ { "folderName": "commands", - "folderPath": ".autodoc/docs/json/src/cli/commands", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\commands", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands", "files": [], "folders": [ { "folderName": "estimate", - "folderPath": ".autodoc/docs/json/src/cli/commands/estimate", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/estimate", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\commands\\estimate", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands\\estimate", "files": [ { "fileName": "index.ts", - "filePath": "src/cli/commands/estimate/index.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/estimate/index.ts", - "summary": "The `estimate` function in this code file is responsible for providing an estimated cost of indexing a given repository using the AutodocRepoConfig configuration. This function is particularly useful for users who want to get an idea of the cost involved in processing their repository before actually running the process.\n\nThe function takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the repository name, URL, root directory, output directory, and other settings related to the processing of the repository.\n\nThe main steps involved in the function are:\n\n1. Set the output path for the JSON files generated during the process.\n2. Update the spinner text to display \"Estimating cost...\".\n3. Perform a dry run of the `processRepository` function with the given configuration options. The dry run does not actually process the repository but instead returns the details of the models that would be processed.\n4. Stop the spinner once the dry run is complete.\n5. Print the details of the models obtained from the dry run using the `printModelDetails` utility function.\n6. Calculate the total estimated cost using the `totalIndexCostEstimate` utility function.\n7. Display the estimated cost in a user-friendly format using the `chalk` library.\n\nHere's an example of how the `estimate` function might be used in the larger project:\n\n```javascript\nimport { estimate } from './autodoc/estimate';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output/',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\nThis example demonstrates how a user can call the `estimate` function with a specific configuration to get an estimated cost for processing their repository.", - "questions": "1. **What is the purpose of the `estimate` function and what parameters does it accept?**\n\n The `estimate` function is used to estimate the cost of processing a repository for indexing. It accepts an `AutodocRepoConfig` object as a parameter, which contains various configuration options such as repository URL, output path, and other settings.\n\n2. **How does the `estimate` function calculate the cost estimate?**\n\n The `estimate` function performs a dry run of the `processRepository` command to get the estimated price for indexing the repository. It then uses the `totalIndexCostEstimate` function to calculate the total cost based on the returned run details.\n\n3. **What is the purpose of the `printModelDetails` function and how is it used in the `estimate` function?**\n\n The `printModelDetails` function is used to display the details of the models used in the estimation process. In the `estimate` function, it is called with the values of the `runDetails` object to print the model details before displaying the total cost estimate." + "filePath": "src\\cli\\commands\\estimate\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\estimate\\index.ts", + "summary": "The `estimate` function in this code is responsible for providing an estimated cost of processing a given repository using the Autodoc project. It takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the repository name, URL, root directory, output directory, and other settings related to the processing of the repository.\n\nThe function starts by constructing the path to the JSON output directory, which will be used to store the intermediate results of the processing. It then updates the spinner text to indicate that the cost estimation is in progress.\n\nNext, the `processRepository` function is called with the provided configuration options and a `true` flag to indicate that this is a dry run. This means that the repository will not actually be processed, but the function will return the details of what would happen if it were processed. This is used to calculate the estimated cost of processing the repository.\n\nOnce the dry run is complete, the spinner is updated to show success, and the results are printed using the `printModelDetails` function. The total estimated cost is then calculated using the `totalIndexCostEstimate` function, which takes the values of the `runDetails` object as input.\n\nFinally, the estimated cost is displayed in the console using the `chalk.redBright` function to format the text in a red color. The message also includes a disclaimer that the actual cost may vary and recommends setting a limit in the user's OpenAI account to prevent unexpected charges.\n\nHere's an example of how the `estimate` function might be used in the larger project:\n\n```javascript\nimport { estimate } from './path/to/this/file';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\nThis example would estimate the cost of processing the \"my-repo\" repository with the specified configuration options.", + "questions": "1. **What is the purpose of the `estimate` function?**\n\n The `estimate` function is used to perform a dry run of the `processRepository` command to get an estimated price for indexing the given repository. It then prints the model details and the total estimated cost.\n\n2. **What are the parameters passed to the `processRepository` function?**\n\n The `processRepository` function is called with an object containing the following properties: `name`, `repositoryUrl`, `root`, `output`, `llms`, `ignore`, `filePrompt`, `folderPrompt`, `chatPrompt`, `contentType`, `targetAudience`, and `linkHosted`. Additionally, a second argument `true` is passed to indicate that it's a dry run.\n\n3. **How is the total estimated cost calculated and displayed?**\n\n The total estimated cost is calculated using the `totalIndexCostEstimate` function, which takes an array of values from the `runDetails` object. The cost is then displayed using `console.log` with `chalk.redBright` for formatting, showing the cost with two decimal places and a note that the actual cost may vary.", + "checksum": "2b0b3903432ae423bbc597d04b052ecb" } ], "folders": [], - "summary": "The `estimate` function in `index.ts` is a crucial part of the Autodoc project, as it allows users to estimate the cost of indexing a given repository before actually processing it. This function takes an `AutodocRepoConfig` object as input, which contains various configuration options for processing the repository.\n\nThe main steps involved in the `estimate` function are:\n\n1. Setting the output path for the JSON files generated during the process.\n2. Updating the spinner text to display \"Estimating cost...\".\n3. Performing a dry run of the `processRepository` function with the given configuration options. The dry run does not actually process the repository but instead returns the details of the models that would be processed.\n4. Stopping the spinner once the dry run is complete.\n5. Printing the details of the models obtained from the dry run using the `printModelDetails` utility function.\n6. Calculating the total estimated cost using the `totalIndexCostEstimate` utility function.\n7. Displaying the estimated cost in a user-friendly format using the `chalk` library.\n\nHere's an example of how the `estimate` function might be used in the larger project:\n\n```javascript\nimport { estimate } from './autodoc/estimate';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output/',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\nThis example demonstrates how a user can call the `estimate` function with a specific configuration to get an estimated cost for processing their repository. The function is designed to work seamlessly with other parts of the Autodoc project, such as the `processRepository` function, which is responsible for the actual processing of the repository.\n\nBy providing an estimated cost upfront, the `estimate` function helps users make informed decisions about whether to proceed with the indexing process or not. This can be particularly useful for users with large repositories or those who are working within a budget. Overall, the `estimate` function is an essential tool for users looking to leverage the power of Autodoc while managing their costs effectively.", - "questions": "" + "summary": "The `estimate` function in `index.ts` is a crucial part of the Autodoc project, as it provides an estimated cost of processing a given repository. It takes an `AutodocRepoConfig` object as input, containing various configuration options such as repository name, URL, root directory, output directory, and other settings related to the processing of the repository.\n\nThe function begins by constructing the path to the JSON output directory, which stores intermediate results of the processing. It then updates the spinner text to indicate that cost estimation is in progress. The `processRepository` function is called with the provided configuration options and a `true` flag, signifying a dry run. This dry run returns the details of what would happen if the repository were processed, which is used to calculate the estimated cost.\n\nUpon completion of the dry run, the spinner is updated to show success, and the results are printed using the `printModelDetails` function. The total estimated cost is calculated using the `totalIndexCostEstimate` function, which takes the values of the `runDetails` object as input.\n\nFinally, the estimated cost is displayed in the console using the `chalk.redBright` function to format the text in red. The message also includes a disclaimer that the actual cost may vary and recommends setting a limit in the user's OpenAI account to prevent unexpected charges.\n\nHere's an example of how the `estimate` function might be used in the larger project:\n\n```javascript\nimport { estimate } from './path/to/this/file';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\nThis example would estimate the cost of processing the \"my-repo\" repository with the specified configuration options.", + "questions": "", + "checksum": "4b8fd2b2abaec4959873fc3396c414d8" }, { "folderName": "index", - "folderPath": ".autodoc/docs/json/src/cli/commands/index", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/index", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\commands\\index", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands\\index", "files": [ { "fileName": "convertJsonToMarkdown.ts", - "filePath": "src/cli/commands/index/convertJsonToMarkdown.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/index/convertJsonToMarkdown.ts", - "summary": "The `convertJsonToMarkdown` function in this code is responsible for converting JSON files containing documentation information into Markdown files. This is done in two main steps: counting the number of files in the project and creating Markdown files for each code file in the project.\n\nFirst, the function uses the `traverseFileSystem` utility to count the number of files in the project. It takes an `AutodocRepoConfig` object as input, which contains information about the project, such as its name, root directory, output directory, and other configuration options. The `traverseFileSystem` utility is called with a `processFile` function that increments the `files` counter for each file encountered.\n\n```javascript\nawait traverseFileSystem({\n inputPath: inputRoot,\n projectName,\n processFile: () => {\n files++;\n return Promise.resolve();\n },\n ignore: [],\n filePrompt,\n folderPrompt,\n contentType,\n targetAudience,\n linkHosted,\n});\n```\n\nNext, the function defines another `processFile` function that reads the content of each JSON file, converts it to a Markdown format, and writes the output to a new Markdown file in the specified output directory. It first checks if the content exists, and if not, it returns early. It then creates the output directory if it doesn't exist, and parses the JSON content into either a `FolderSummary` or a `FileSummary` object, depending on the file name.\n\nThe function then constructs the Markdown content by including a link to the code on GitHub, the summary, and any questions if they exist. Finally, it writes the Markdown content to the output file with the `.md` extension.\n\n```javascript\nconst outputPath = getFileName(markdownFilePath, '.', '.md');\nawait fs.writeFile(outputPath, markdown, 'utf-8');\n```\n\nThe `convertJsonToMarkdown` function is then called again with the new `processFile` function to create the Markdown files for each code file in the project.\n\n```javascript\nawait traverseFileSystem({\n inputPath: inputRoot,\n projectName,\n processFile,\n ignore: [],\n filePrompt,\n folderPrompt,\n contentType,\n targetAudience,\n linkHosted,\n});\n```\n\nIn summary, this code is responsible for converting JSON files containing documentation information into Markdown files, which can be used in the larger Autodoc project to generate documentation for code repositories.", - "questions": "1. **What is the purpose of the `convertJsonToMarkdown` function?**\n\n The `convertJsonToMarkdown` function is responsible for converting JSON files containing summaries and questions about code files in a project into Markdown files. It traverses the file system, reads the JSON files, and creates corresponding Markdown files with the provided information.\n\n2. **How does the `traverseFileSystem` function work and what are its parameters?**\n\n The `traverseFileSystem` function is a utility function that recursively traverses the file system starting from a given input path. It takes an object as a parameter with properties such as `inputPath`, `projectName`, `processFile`, `ignore`, `filePrompt`, `folderPrompt`, `contentType`, `targetAudience`, and `linkHosted`. The function processes each file using the provided `processFile` callback and can be configured to ignore certain files or folders.\n\n3. **What is the purpose of the `processFile` function inside `convertJsonToMarkdown`?**\n\n The `processFile` function is a callback function that is passed to the `traverseFileSystem` function. It is responsible for reading the content of a JSON file, parsing it, and creating a corresponding Markdown file with the summary and questions. It also handles creating the output directory if it doesn't exist and writing the Markdown content to the output file." + "filePath": "src\\cli\\commands\\index\\convertJsonToMarkdown.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\convertJsonToMarkdown.ts", + "summary": "The `convertJsonToMarkdown` function in this code is responsible for converting JSON files containing documentation information into Markdown files. This function is part of the larger Autodoc project, which aims to automate the process of generating documentation for code repositories.\n\nThe function takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the project name, input and output directories, and other settings related to the documentation generation process.\n\nThe code first counts the number of files in the project by traversing the file system using the `traverseFileSystem` utility function. This is done to provide a progress update to the user via the `updateSpinnerText` function.\n\nNext, the `processFile` function is defined, which is responsible for reading the content of each JSON file, parsing it, and converting it into a Markdown format. The function checks if the file has a summary, and if so, it generates the Markdown content with a link to the code on GitHub, the summary, and any questions if present. The output Markdown file is then saved in the specified output directory.\n\nFinally, the `traverseFileSystem` function is called again, this time with the `processFile` function as an argument. This allows the code to process each JSON file in the project and convert it into a Markdown file. Once the process is complete, a success message is displayed to the user using the `spinnerSuccess` function.\n\nExample usage:\n\n```javascript\nconvertJsonToMarkdown({\n name: \"myProject\",\n root: \"./input\",\n output: \"./output\",\n filePrompt: true,\n folderPrompt: true,\n contentType: \"code\",\n targetAudience: \"developers\",\n linkHosted: \"https://github.com/user/myProject\",\n});\n```\n\nThis will convert all JSON files in the `./input` directory into Markdown files and save them in the `./output` directory.", + "questions": "1. **Question:** What is the purpose of the `convertJsonToMarkdown` function and what are the expected inputs?\n **Answer:** The `convertJsonToMarkdown` function is used to convert JSON files to Markdown files for each code file in the project. It takes an `AutodocRepoConfig` object as input, which contains various properties like projectName, root, output, filePrompt, folderPrompt, contentType, targetAudience, and linkHosted.\n\n2. **Question:** How does the `traverseFileSystem` function work and what is its role in this code?\n **Answer:** The `traverseFileSystem` function is a utility function that recursively traverses the file system, starting from the inputPath, and processes each file using the provided `processFile` function. In this code, it is used twice: first to count the number of files in the project, and then to create Markdown files for each code file in the project.\n\n3. **Question:** How are the output directories and Markdown files created, and what is the structure of the generated Markdown content?\n **Answer:** The output directories are created using the `fs.mkdir` function with the `recursive: true` option. The Markdown files are created using the `fs.writeFile` function. The structure of the generated Markdown content includes a link to view the code on GitHub, the summary, and optionally, a list of questions if they exist.", + "checksum": "79c860becf47b9882441682f0213d534" }, { "fileName": "createVectorStore.ts", - "filePath": "src/cli/commands/index/createVectorStore.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/index/createVectorStore.ts", - "summary": "The code in this file is responsible for processing a directory of text files, splitting the text into chunks, and creating a vector store using the HNSWLib library and OpenAIEmbeddings.\n\nThe `processFile` function takes a file path as input and returns a Promise that resolves to a Document object. It reads the file contents and creates a Document object with the file contents as `pageContent` and the file path as metadata.\n\nThe `processDirectory` function takes a directory path as input and returns a Promise that resolves to an array of Document objects. It reads the files in the directory and calls `processFile` for each file. If a file is a directory, it calls `processDirectory` recursively. The function accumulates all the Document objects in an array and returns it.\n\nThe `RepoLoader` class extends the `BaseDocumentLoader` class and has a constructor that takes a file path as input. It has a `load` method that calls the `processDirectory` function with the file path and returns the resulting array of Document objects.\n\nThe `createVectorStore` function is an async function that takes an AutodocRepoConfig object as input, which contains the root directory and output file path. It creates a RepoLoader instance with the root directory, loads the raw documents, and splits them into chunks using the `RecursiveCharacterTextSplitter` class. It then creates a vector store using the HNSWLib library and OpenAIEmbeddings, and saves the vector store to the output file path.\n\nExample usage:\n\n```javascript\nconst config = {\n root: './data/documents',\n output: './data/vector_store',\n};\n\ncreateVectorStore(config).then(() => {\n console.log('Vector store created successfully');\n});\n```\n\nThis code snippet would process all the text files in the `./data/documents` directory, split the text into chunks, create a vector store using the HNSWLib library and OpenAIEmbeddings, and save the vector store to the `./data/vector_store` file.", - "questions": "1. **Question:** What is the purpose of the `processFile` function and how does it handle errors?\n **Answer:** The `processFile` function reads the content of a file and creates a `Document` object with the file contents and metadata. If there is an error while reading the file, it rejects the promise with the error.\n\n2. **Question:** How does the `processDirectory` function handle nested directories and files?\n **Answer:** The `processDirectory` function iterates through the files in a directory. If it encounters a subdirectory, it calls itself recursively to process the subdirectory. If it encounters a file, it processes the file using the `processFile` function and adds the resulting `Document` object to the `docs` array.\n\n3. **Question:** What is the purpose of the `createVectorStore` function and how does it use the `RepoLoader` class?\n **Answer:** The `createVectorStore` function is responsible for creating a vector store from a given repository. It uses the `RepoLoader` class to load all the documents from the repository, splits the text into chunks using the `RecursiveCharacterTextSplitter`, and then creates a vector store using the `HNSWLib.fromDocuments` method with the `OpenAIEmbeddings`. Finally, it saves the vector store to the specified output path." + "filePath": "src\\cli\\commands\\index\\createVectorStore.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\createVectorStore.ts", + "summary": "The code in this file is responsible for processing a directory of text files, splitting the text into chunks, and creating a vector store using the HNSWLib library and OpenAIEmbeddings. This vector store can be used for efficient similarity search and retrieval of documents in the larger project.\n\nThe `processFile` function reads a file's content and creates a `Document` object with the content and metadata (source file path). It returns a Promise that resolves to the created Document.\n\nThe `processDirectory` function is a recursive function that processes a directory and its subdirectories. It reads the files in the directory, and for each file, it checks if it's a directory or a regular file. If it's a directory, the function calls itself with the new directory path. If it's a file, it calls the `processFile` function to create a Document object. The function returns an array of Document objects.\n\nThe `RepoLoader` class extends the `BaseDocumentLoader` class and has a constructor that takes a file path as an argument. It has a `load` method that calls the `processDirectory` function with the given file path and returns the array of Document objects.\n\nThe `createVectorStore` function is an async function that takes an `AutodocRepoConfig` object as an argument, which contains the root directory and output file path. It creates a `RepoLoader` instance with the root directory and loads the documents using the `load` method. It then creates a `RecursiveCharacterTextSplitter` instance with a specified chunk size and chunk overlap and splits the documents into chunks. Finally, it creates a vector store using the HNSWLib library and OpenAIEmbeddings with the processed documents and saves the vector store to the output file path.\n\nExample usage:\n\n```javascript\nconst config = {\n root: './data/documents',\n output: './data/vector_store',\n};\n\ncreateVectorStore(config).then(() => {\n console.log('Vector store created successfully');\n});\n```", + "questions": "1. **Question:** What is the purpose of the `processFile` function and what does it return?\n **Answer:** The `processFile` function is an asynchronous function that reads the content of a file given its file path, creates a `Document` object with the file contents and metadata (source file path), and returns a Promise that resolves to the created `Document` object.\n\n2. **Question:** How does the `processDirectory` function work and what does it return?\n **Answer:** The `processDirectory` function is an asynchronous function that takes a directory path as input, reads all the files and subdirectories within it, and processes them recursively. It returns a Promise that resolves to an array of `Document` objects created from the files in the directory and its subdirectories.\n\n3. **Question:** What is the purpose of the `createVectorStore` function and how does it work?\n **Answer:** The `createVectorStore` function is an asynchronous function that takes an `AutodocRepoConfig` object as input, which contains the root directory path and output file path. The function loads all the documents from the root directory using the `RepoLoader`, splits the text into chunks using the `RecursiveCharacterTextSplitter`, creates a vector store from the documents using the `HNSWLib` and `OpenAIEmbeddings`, and saves the vector store to the specified output file.", + "checksum": "a3409c4340753a867c72eebef7626fb9" }, { "fileName": "index.ts", - "filePath": "src/cli/commands/index/index.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/index/index.ts", - "summary": "The code in this file is responsible for processing a given repository and generating documentation in JSON and Markdown formats, as well as creating vector files for the documentation. It exports a single function `index` that takes an `AutodocRepoConfig` object as input, which contains various configuration options for processing the repository.\n\nThe `index` function performs the following steps:\n\n1. Define the paths for JSON, Markdown, and data output directories within the `output` folder.\n\n2. Process the repository by traversing its files, calling the LLMS (Language Learning Management System) for each file, and creating JSON files with the results. This is done using the `processRepository` function, which takes the same configuration options as the `index` function. The spinner text is updated to show the progress of this step.\n\n3. Convert the generated JSON files into Markdown format using the `convertJsonToMarkdown` function. This function also takes the same configuration options as the `index` function. The spinner text is updated to show the progress of this step, and a success message is displayed upon completion.\n\n4. Create vector files for the generated Markdown documentation using the `createVectorStore` function. This function also takes the same configuration options as the `index` function. The spinner text is updated to show the progress of this step, and a success message is displayed upon completion.\n\nHere's an example of how this code might be used in the larger project:\n\n```javascript\nimport autodoc from './autodoc';\n\nconst config = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n root: './src',\n output: './output',\n llms: 'https://llms.example.com',\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'text',\n targetAudience: 'developers',\n linkHosted: 'https://myproject-docs.example.com',\n};\n\nautodoc.index(config);\n```\n\nThis example would process the `MyProject` repository, generate JSON and Markdown documentation, and create vector files for the documentation, all while providing progress updates through spinner text.", - "questions": "1. **What is the purpose of the `index` function in this code?**\n\n The `index` function is the main entry point for the autodoc project. It processes a given repository, converts the JSON files to markdown, and creates vector files based on the provided configuration options.\n\n2. **What are the different steps involved in processing the repository?**\n\n The processing of the repository involves three main steps: (1) traversing the repository and calling LLMS for each file to create JSON files with the results, (2) converting the JSON files to markdown files, and (3) creating vector files from the markdown files.\n\n3. **What is the role of the `AutodocRepoConfig` type?**\n\n The `AutodocRepoConfig` type is used to define the shape of the configuration object that is passed to the `index` function. It specifies the properties and their types that are required for the function to process the repository, convert JSON to markdown, and create vector files." + "filePath": "src\\cli\\commands\\index\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\index.ts", + "summary": "The code in this file is responsible for processing a given repository and generating documentation in JSON, Markdown, and vector formats. It exports a single function `index` that takes an `AutodocRepoConfig` object as its argument, which contains various configuration options for processing the repository.\n\nThe `index` function performs three main tasks:\n\n1. **Process the repository**: It traverses the repository, calls the LLMS (Language Learning Management System) for each file, and creates JSON files with the results. This is done using the `processRepository` function, which takes the same configuration options as the `index` function. The JSON files are stored in the `output/docs/json/` directory.\n\n ```javascript\n updateSpinnerText('Processing repository...');\n await processRepository({ /* configuration options */ });\n spinnerSuccess();\n ```\n\n2. **Create Markdown files**: It converts the generated JSON files into Markdown files using the `convertJsonToMarkdown` function. This function also takes the same configuration options as the `index` function. The Markdown files are stored in the `output/docs/markdown/` directory.\n\n ```javascript\n updateSpinnerText('Creating markdown files...');\n await convertJsonToMarkdown({ /* configuration options */ });\n spinnerSuccess();\n ```\n\n3. **Create vector files**: It creates vector files from the generated Markdown files using the `createVectorStore` function. This function also takes the same configuration options as the `index` function. The vector files are stored in the `output/docs/data/` directory.\n\n ```javascript\n updateSpinnerText('Create vector files...');\n await createVectorStore({ /* configuration options */ });\n spinnerSuccess();\n ```\n\nThroughout the execution of these tasks, the code uses `updateSpinnerText` and `spinnerSuccess` functions to provide visual feedback on the progress of the tasks.\n\nIn the larger project, this code would be used to automatically generate documentation for a given repository based on the provided configuration options. The generated documentation can then be used for various purposes, such as displaying it on a website or analyzing the content for specific insights.", + "questions": "1. **What does the `index` function do in this code?**\n\n The `index` function is the main entry point for the autodoc project. It takes an `AutodocRepoConfig` object as input and performs three main tasks: processing the repository and creating JSON files, converting JSON files to markdown files, and creating vector files.\n\n2. **What is the purpose of the `processRepository`, `convertJsonToMarkdown`, and `createVectorStore` functions?**\n\n The `processRepository` function traverses the repository, calls LLMS for each file, and creates JSON files with the results. The `convertJsonToMarkdown` function creates markdown files from the generated JSON files. The `createVectorStore` function creates vector files from the markdown files.\n\n3. **What are the different types of prompts (`filePrompt`, `folderPrompt`, `chatPrompt`) used for in this code?**\n\n These prompts are likely used to interact with the user during the processing of the repository. The `filePrompt` might be used to ask the user for input regarding specific files, the `folderPrompt` for input regarding folders, and the `chatPrompt` for general input or feedback during the processing.", + "checksum": "4060b1affae5a6c385cda308b3cd1750" }, { "fileName": "processRepository.ts", - "filePath": "src/cli/commands/index/processRepository.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/index/processRepository.ts", - "summary": "The `processRepository` function in this code is responsible for processing a given code repository and generating summaries and questions for each file and folder within the repository. It takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the repository URL, input and output paths, language models to use, and other settings.\n\nThe function starts by initializing an `APIRateLimit` instance to limit the number of API calls made to the language models. It then defines several helper functions, such as `callLLM` for making API calls, `isModel` for checking if a given model is valid, `processFile` for processing individual files, and `processFolder` for processing folders.\n\nThe `processFile` function reads the content of a file, generates prompts for summaries and questions using the `createCodeFileSummary` and `createCodeQuestions` functions, and selects the best language model to use based on the token length of the prompts. It then calls the language model API to generate the summaries and questions, and saves the results as JSON files in the output directory.\n\nThe `processFolder` function reads the contents of a folder, filters out ignored files, and processes each file and subfolder within the folder. It then generates a summary prompt using the `folderSummaryPrompt` function and calls the language model API to generate a summary for the folder. The folder summary, along with the summaries and questions of its files and subfolders, is saved as a JSON file in the output directory.\n\nThe main part of the `processRepository` function first counts the number of files and folders in the input directory using the `filesAndFolders` function. It then processes each file and folder using the `traverseFileSystem` function, which calls the `processFile` and `processFolder` functions for each file and folder encountered. Finally, the function returns the language models used during processing.\n\nExample usage of the `processRepository` function:\n\n```javascript\nconst autodocConfig = {\n name: 'myProject',\n repositoryUrl: 'https://github.com/user/myProject',\n root: 'src',\n output: 'output',\n llms: [LLMModels.GPT3, LLMModels.GPT4],\n ignore: ['.git', 'node_modules'],\n filePrompt: 'Explain this code file',\n folderPrompt: 'Summarize this folder',\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nprocessRepository(autodocConfig).then((models) => {\n console.log('Processing complete');\n});\n```\n\nThis code would process the `src` directory of the `myProject` repository, generating summaries and questions for each file and folder, and saving the results in the `output` directory.", - "questions": "1. **Question:** What is the purpose of the `processRepository` function and what are its input parameters?\n **Answer:** The `processRepository` function is responsible for processing a code repository by generating summaries and questions for each file and folder in the project. It takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the project name, repository URL, input and output paths, language models, and other settings. Additionally, it accepts an optional `dryRun` parameter, which, if set to true, will not save the generated summaries and questions to disk.\n\n2. **Question:** How does the code determine the best language model to use for generating summaries and questions?\n **Answer:** The code checks the maximum token length of each available language model (GPT3, GPT4, and GPT432k) and compares it with the token length of the prompts (summary and questions). It selects the first model that can handle the maximum token length and is included in the `llms` array provided in the configuration.\n\n3. **Question:** How does the code handle traversing the file system and processing files and folders?\n **Answer:** The code uses the `traverseFileSystem` utility function to traverse the file system. It takes an object with various configuration options, including the input path, project name, and callbacks for processing files and folders. The `processFile` and `processFolder` functions are passed as callbacks to handle the processing of files and folders, respectively." + "filePath": "src\\cli\\commands\\index\\processRepository.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\processRepository.ts", + "summary": "The `processRepository` function in this code is responsible for generating summaries and questions for code files and folders in a given repository. It takes an `AutodocRepoConfig` object as input, which contains information about the project, repository URL, input and output paths, language models, and other configurations. An optional `dryRun` parameter can be provided to skip actual API calls and file writing.\n\nThe function starts by initializing the encoding and rate limit for API calls. It then defines two main helper functions: `processFile` and `processFolder`. The `processFile` function is responsible for processing individual code files. It reads the file content, calculates a checksum, and checks if reindexing is needed. If reindexing is required, it creates prompts for summaries and questions, selects the appropriate language model based on the input length, and calls the language model API to generate the summaries and questions. The results are then saved to a JSON file in the output directory.\n\nThe `processFolder` function is responsible for processing folders. It reads the folder content, calculates a checksum, and checks if reindexing is needed. If reindexing is required, it reads the summaries and questions of all files and subfolders in the folder, calls the language model API to generate a summary for the folder, and saves the result to a `summary.json` file in the folder.\n\nThe main function then counts the number of files and folders in the project and processes them using the `traverseFileSystem` utility function. It processes all files first, followed by all folders. Finally, it returns the language model usage statistics.\n\nThe `calculateChecksum` function calculates the checksum of a list of file contents, while the `reindexCheck` function checks if reindexing is needed by comparing the new and old checksums of a file or folder.", + "questions": "1. **Question:** What is the purpose of the `processRepository` function and what are its inputs and outputs?\n **Answer:** The `processRepository` function processes a given code repository, generating summaries and questions for each file and folder within the repository. It takes an `AutodocRepoConfig` object and an optional `dryRun` boolean as inputs. The function returns a `Promise` that resolves to an object containing the models used during processing.\n\n2. **Question:** How does the `calculateChecksum` function work and what is its purpose?\n **Answer:** The `calculateChecksum` function takes an array of file contents as input and calculates a checksum for each file using the MD5 hashing algorithm. It then concatenates all the checksums and calculates a final checksum using MD5 again. The purpose of this function is to generate a unique identifier for the contents of the files, which can be used to determine if the files have changed and need to be reprocessed.\n\n3. **Question:** How does the `reindexCheck` function work and when is it used?\n **Answer:** The `reindexCheck` function checks if a summary.json file exists in the given file or folder path and compares the stored checksum with the new checksum to determine if the file or folder needs to be reindexed. It is used in the `processFile` and `processFolder` functions to decide whether to regenerate summaries and questions for a file or folder based on changes in their contents.", + "checksum": "5b3ae9ffad1d4b4a22c6f7fd66bbde6f" }, { "fileName": "prompts.ts", - "filePath": "src/cli/commands/index/prompts.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/index/prompts.ts", - "summary": "The code in this file provides three functions that generate prompts for documentation experts to create summaries and answer questions about code files and folders in a project. These functions are likely used in the larger autodoc project to automate the process of generating documentation for code files and folders.\n\n1. `createCodeFileSummary`: This function takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `filePrompt`. It returns a formatted string prompt for a documentation expert to write a summary of the code file. The prompt includes the file path, project name, content type, and a custom file prompt. For example:\n\n```javascript\ncreateCodeFileSummary('src/example.js', 'autodoc', 'console.log(\"Hello, World!\");', 'JavaScript', 'Write a detailed technical explanation of what this code does.');\n```\n\n2. `createCodeQuestions`: This function takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `targetAudience`. It returns a formatted string prompt for a documentation expert to generate three questions and answers that a target audience might have about the code file. The prompt includes the file path, project name, content type, and target audience. For example:\n\n```javascript\ncreateCodeQuestions('src/example.js', 'autodoc', 'console.log(\"Hello, World!\");', 'JavaScript', 'beginner');\n```\n\n3. `folderSummaryPrompt`: This function takes six parameters: `folderPath`, `projectName`, `files`, `folders`, `contentType`, and `folderPrompt`. It returns a formatted string prompt for a documentation expert to write a summary of the folder and its contents. The prompt includes the folder path, project name, content type, a list of files and their summaries, a list of subfolders and their summaries, and a custom folder prompt. For example:\n\n```javascript\nfolderSummaryPrompt('src/', 'autodoc', [{fileName: 'example.js', summary: 'A simple example file'}], [{folderName: 'utils', summary: 'Utility functions'}], 'JavaScript', 'Write a detailed technical explanation of the folder structure and contents.');\n```\n\nThese functions can be used in the autodoc project to generate prompts for documentation experts, helping to streamline the process of creating documentation for code files and folders.", - "questions": "1. **Question:** What is the purpose of the `createCodeFileSummary` function?\n **Answer:** The `createCodeFileSummary` function generates a string template for a code file summary prompt, which includes the file path, project name, file contents, content type, and a file prompt.\n\n2. **Question:** How does the `createCodeQuestions` function differ from the `createCodeFileSummary` function?\n **Answer:** The `createCodeQuestions` function generates a string template for a code documentation prompt that asks for 3 questions and their answers, while the `createCodeFileSummary` function generates a string template for a code file summary prompt.\n\n3. **Question:** What is the purpose of the `folderSummaryPrompt` function and what parameters does it take?\n **Answer:** The `folderSummaryPrompt` function generates a string template for a folder summary prompt, which includes the folder path, project name, files, folders, content type, and a folder prompt. It takes parameters such as folderPath, projectName, files, folders, contentType, and folderPrompt." + "filePath": "src\\cli\\commands\\index\\prompts.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\prompts.ts", + "summary": "This code defines three utility functions that generate prompts for documentation experts working on a project. These functions are used to create documentation for code files and folders within a project. The generated prompts are in markdown format and include specific instructions for the documentation expert.\n\n1. `createCodeFileSummary`: This function generates a prompt for creating a summary of a code file. It takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `filePrompt`. The function returns a markdown formatted string that includes the file's content and a custom prompt for the documentation expert.\n\nExample usage:\n```javascript\nconst prompt = createCodeFileSummary('path/to/file.js', 'MyProject', 'const x = 10;', 'JavaScript', 'Write a detailed technical explanation of this code.');\n```\n\n2. `createCodeQuestions`: This function generates a prompt for creating a list of questions and answers about a code file. It takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `targetAudience`. The function returns a markdown formatted string that includes the file's content and a custom prompt for the documentation expert to provide questions and answers.\n\nExample usage:\n```javascript\nconst prompt = createCodeQuestions('path/to/file.js', 'MyProject', 'const x = 10;', 'JavaScript', 'beginner');\n```\n\n3. `folderSummaryPrompt`: This function generates a prompt for creating a summary of a folder containing code files and subfolders. It takes six parameters: `folderPath`, `projectName`, `files`, `folders`, `contentType`, and `folderPrompt`. The `files` parameter is an array of `FileSummary` objects, and the `folders` parameter is an array of `FolderSummary` objects. The function returns a markdown formatted string that includes a list of files and folders with their summaries and a custom prompt for the documentation expert.\n\nExample usage:\n```javascript\nconst prompt = folderSummaryPrompt('path/to/folder', 'MyProject', fileSummaries, folderSummaries, 'JavaScript', 'Write a detailed technical explanation of this folder structure.');\n```\n\nThese functions can be used in the larger project to generate documentation tasks for experts, ensuring consistent formatting and instructions across different parts of the project.", + "questions": "1. **What is the purpose of the `createCodeFileSummary` function?**\n\n The `createCodeFileSummary` function generates a string template for a code file summary prompt, which includes the file path, project name, file contents, content type, and a file prompt.\n\n2. **How does the `createCodeQuestions` function differ from the `createCodeFileSummary` function?**\n\n The `createCodeQuestions` function generates a string template for a code documentation prompt that asks for 3 questions and their answers, while the `createCodeFileSummary` function generates a string template for a code file summary prompt.\n\n3. **What is the role of the `folderSummaryPrompt` function?**\n\n The `folderSummaryPrompt` function generates a string template for a folder summary prompt, which includes the folder path, project name, lists of files and folders with their summaries, content type, and a folder prompt.", + "checksum": "e44b82bf4912be69149685a997b6bde3" } ], "folders": [], - "summary": "The code in this folder is responsible for processing a given code repository, generating documentation in JSON and Markdown formats, and creating vector files for the documentation. It provides several functions and utilities to achieve these tasks, such as traversing the file system, calling language models, and converting JSON files to Markdown.\n\nFor example, the `processRepository` function processes a code repository and generates summaries and questions for each file and folder within the repository. It uses helper functions like `callLLM` to make API calls to language models and `processFile` and `processFolder` to process individual files and folders. The results are saved as JSON files in the output directory.\n\nThe `convertJsonToMarkdown` function converts JSON files containing documentation information into Markdown files. It counts the number of files in the project and creates Markdown files for each code file in the project using the `traverseFileSystem` utility.\n\nThe `createVectorStore` function processes a directory of text files, splits the text into chunks, and creates a vector store using the HNSWLib library and OpenAIEmbeddings. It processes the files in the directory and calls `processFile` for each file, creating a vector store and saving it to the output file path.\n\nHere's an example of how this code might be used in the larger project:\n\n```javascript\nimport autodoc from './autodoc';\n\nconst config = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n root: './src',\n output: './output',\n llms: 'https://llms.example.com',\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'text',\n targetAudience: 'developers',\n linkHosted: 'https://myproject-docs.example.com',\n};\n\nautodoc.index(config);\n```\n\nThis example would process the `MyProject` repository, generate JSON and Markdown documentation, and create vector files for the documentation, all while providing progress updates through spinner text.\n\nIn summary, the code in this folder plays a crucial role in the Autodoc project by processing code repositories, generating documentation in various formats, and creating vector files for the documentation. This helps developers to easily generate and maintain documentation for their projects, making it more accessible and understandable for other developers and users.", - "questions": "" + "summary": "The code in this folder is responsible for processing a given repository and generating documentation in JSON, Markdown, and vector formats. It consists of several functions and utilities that work together to automate the documentation generation process.\n\nThe main function, `index`, takes an `AutodocRepoConfig` object as input, which contains various configuration options for processing the repository. It performs three main tasks:\n\n1. **Process the repository**: It calls the `processRepository` function to traverse the repository, generate summaries and questions for code files and folders using the LLMS (Language Learning Management System), and create JSON files with the results. These JSON files are stored in the `output/docs/json/` directory.\n\n2. **Create Markdown files**: It uses the `convertJsonToMarkdown` function to convert the generated JSON files into Markdown files. These Markdown files are stored in the `output/docs/markdown/` directory.\n\n3. **Create vector files**: It calls the `createVectorStore` function to create vector files from the generated Markdown files. These vector files are stored in the `output/docs/data/` directory.\n\nThroughout the execution of these tasks, the code provides visual feedback on the progress of the tasks using `updateSpinnerText` and `spinnerSuccess` functions.\n\nHere's an example of how this code might be used:\n\n```javascript\nindex({\n name: \"myProject\",\n root: \"./input\",\n output: \"./output\",\n filePrompt: true,\n folderPrompt: true,\n contentType: \"code\",\n targetAudience: \"developers\",\n linkHosted: \"https://github.com/user/myProject\",\n});\n```\n\nThis will process the repository located at `./input`, generate documentation in JSON, Markdown, and vector formats, and save the results in the `./output` directory.\n\nThe `prompts.ts` file contains utility functions that generate prompts for documentation experts. These functions create markdown formatted strings with specific instructions for the documentation expert, ensuring consistent formatting and instructions across different parts of the project.\n\nIn summary, the code in this folder automates the process of generating documentation for a given repository based on the provided configuration options. The generated documentation can be used for various purposes, such as displaying it on a website or analyzing the content for specific insights.", + "questions": "", + "checksum": "376f96417f8cbea6a5ab2463268fe4af" }, { "folderName": "init", - "folderPath": ".autodoc/docs/json/src/cli/commands/init", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/init", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\commands\\init", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands\\init", "files": [ { "fileName": "index.ts", - "filePath": "src/cli/commands/init/index.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/init/index.ts", - "summary": "This code is responsible for initializing and configuring the `autodoc` project. It provides a function `init` that creates a configuration file `autodoc.config.json` with user inputs and default values. The configuration file is essential for the project to function correctly and adapt to different user requirements.\n\nThe `makeConfigTemplate` function generates a default configuration object with pre-defined values. It takes an optional `config` parameter to override the default values. The returned object contains settings such as repository name, URL, output directory, LLM models, and various prompts for generating documentation.\n\nThe `init` function is an asynchronous function that takes an optional `config` parameter. It first checks if a configuration file already exists in the project directory. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf there is no existing configuration file or the user chooses to overwrite, the function prompts the user for the repository name, URL, and LLM models they have access to. These values are then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration object is written to the `autodoc.config.json` file in the project directory. A success message is displayed, instructing the user to run `doc index` to get started.\n\nHere's an example of how the `init` function is used:\n\n```javascript\nimport { init } from './autodoc';\n\n(async () => {\n await init();\n})();\n```\n\nThis code imports the `init` function and calls it, initializing the `autodoc` project with the user's inputs and default values.", - "questions": "1. **Question:** What is the purpose of the `makeConfigTemplate` function and what does it return?\n **Answer:** The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc project. It takes an optional `config` parameter of type `AutodocRepoConfig` and returns a new `AutodocRepoConfig` object with default values for each property, using the provided `config` values if available.\n\n2. **Question:** How does the `init` function work and what does it do with the user's input?\n **Answer:** The `init` function is an asynchronous function that initializes the Autodoc configuration by prompting the user for input using the `inquirer` package. It takes an optional `config` parameter of type `AutodocRepoConfig` and uses it as the default values for the prompts. After collecting the user's input, it creates a new configuration object using the `makeConfigTemplate` function and writes it to a file named `autodoc.config.json`.\n\n3. **Question:** What are the different LLM models available in the `llms` prompt and how are they used in the configuration?\n **Answer:** The `llms` prompt provides three choices for the user to select the LLM models they have access to: GPT-3.5 Turbo, GPT-3.5 Turbo and GPT-4 8K (Early Access), and GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The selected LLM models are stored in the `llms` property of the `AutodocRepoConfig` object, which can be used later in the project to determine which models to use for generating documentation." + "filePath": "src\\cli\\commands\\init\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\init\\index.ts", + "summary": "This code is responsible for initializing the configuration of the Autodoc project. It provides a template for the configuration and prompts the user to input necessary information to set up the project. The main functionality is exposed through the `init` function, which is an asynchronous function that takes an optional `AutodocRepoConfig` object as an argument.\n\nThe `makeConfigTemplate` function creates a default configuration object with pre-defined values for various properties. It takes an optional `config` parameter and returns a new `AutodocRepoConfig` object with the provided values or default values if not provided.\n\nThe `init` function first checks if an `autodoc.config.json` file already exists in the project root. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nNext, the user is prompted to enter the name of their repository, the GitHub URL of their repository, and the LLMs they have access to. The LLMs are language models used for generating documentation. The user can choose between GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access).\n\nAfter the user provides the necessary information, a new configuration object is created using the `makeConfigTemplate` function with the user's input. The new configuration is then written to the `autodoc.config.json` file in the project root.\n\nFinally, a success message is displayed, instructing the user to run `doc index` to get started with the Autodoc project.\n\nExample usage:\n\n```javascript\nimport { init } from './path/to/this/file';\n\n// Initialize the configuration with default values\nawait init();\n\n// Initialize the configuration with custom values\nawait init({\n name: 'My Custom Repository',\n repositoryUrl: 'https://github.com/user/repo',\n});\n```", + "questions": "1. **What is the purpose of the `makeConfigTemplate` function?**\n\n The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc project. It takes an optional `config` parameter of type `AutodocRepoConfig` and returns a new configuration object with default values for various properties.\n\n2. **How does the `init` function work and when is it called?**\n\n The `init` function is an asynchronous function that initializes the Autodoc configuration by creating an `autodoc.config.json` file in the specified location. It takes an optional `config` parameter of type `AutodocRepoConfig` and prompts the user for input to set the configuration values. It is called when the user wants to set up the Autodoc configuration for their project.\n\n3. **What is the purpose of the `inquirer.prompt` calls in the `init` function?**\n\n The `inquirer.prompt` calls are used to interactively prompt the user for input to set the configuration values for the Autodoc project. The user is asked for the repository name, repository URL, and the LLMs they have access to. The input is then used to create a new configuration object and write it to the `autodoc.config.json` file.", + "checksum": "b93831ff1f4023ab61c3bea963a8a112" } ], "folders": [], - "summary": "The `index.ts` file in the `init` folder is responsible for initializing and configuring the `autodoc` project. It provides an essential function called `init` that creates a configuration file named `autodoc.config.json` with user inputs and default values. This configuration file is crucial for the project to function correctly and adapt to different user requirements.\n\nThe `makeConfigTemplate` function generates a default configuration object with pre-defined values. It takes an optional `config` parameter to override the default values. The returned object contains settings such as repository name, URL, output directory, LLM models, and various prompts for generating documentation.\n\nThe `init` function is an asynchronous function that takes an optional `config` parameter. It first checks if a configuration file already exists in the project directory. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf there is no existing configuration file or the user chooses to overwrite, the function prompts the user for the repository name, URL, and LLM models they have access to. These values are then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration object is written to the `autodoc.config.json` file in the project directory. A success message is displayed, instructing the user to run `doc index` to get started.\n\nHere's an example of how the `init` function is used:\n\n```javascript\nimport { init } from './autodoc';\n\n(async () => {\n await init();\n})();\n```\n\nThis code imports the `init` function and calls it, initializing the `autodoc` project with the user's inputs and default values. The `init` function is a crucial part of the project, as it sets up the necessary configuration for the project to work correctly. It interacts with other parts of the project by providing the required settings and values, ensuring that the project can adapt to different user requirements and preferences.", - "questions": "" + "summary": "The `index.ts` file in the `.autodoc\\docs\\json\\src\\cli\\commands\\init` folder is responsible for initializing the configuration of the Autodoc project. It provides a template for the configuration and prompts the user to input necessary information to set up the project. The main functionality is exposed through the `init` function, which is an asynchronous function that takes an optional `AutodocRepoConfig` object as an argument.\n\nThe `makeConfigTemplate` function creates a default configuration object with pre-defined values for various properties. It takes an optional `config` parameter and returns a new `AutodocRepoConfig` object with the provided values or default values if not provided.\n\nThe `init` function first checks if an `autodoc.config.json` file already exists in the project root. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nNext, the user is prompted to enter the name of their repository, the GitHub URL of their repository, and the LLMs they have access to. The LLMs are language models used for generating documentation. The user can choose between GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access).\n\nAfter the user provides the necessary information, a new configuration object is created using the `makeConfigTemplate` function with the user's input. The new configuration is then written to the `autodoc.config.json` file in the project root.\n\nFinally, a success message is displayed, instructing the user to run `doc index` to get started with the Autodoc project.\n\nExample usage:\n\n```javascript\nimport { init } from './path/to/this/file';\n\n// Initialize the configuration with default values\nawait init();\n\n// Initialize the configuration with custom values\nawait init({\n name: 'My Custom Repository',\n repositoryUrl: 'https://github.com/user/repo',\n});\n```\n\nThis code is essential for setting up the Autodoc project, as it creates the necessary configuration file and gathers user input to customize the project. It works in conjunction with other parts of the project, such as the CLI and the documentation generation process, which rely on the configuration file to function correctly.", + "questions": "", + "checksum": "4b8fd2b2abaec4959873fc3396c414d8" }, { "folderName": "query", - "folderPath": ".autodoc/docs/json/src/cli/commands/query", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/query", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\commands\\query", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands\\query", "files": [ { "fileName": "createChatChain.ts", - "filePath": "src/cli/commands/query/createChatChain.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/query/createChatChain.ts", - "summary": "This code defines a function `makeChain` that creates a chatbot for answering questions about a software project. The chatbot is built using the `ChatVectorDBQAChain` class, which combines two separate language models: a question generator and a document chain.\n\nThe question generator is an instance of the `LLMChain` class, which uses the OpenAIChat API to generate standalone questions based on a given conversation history. The `CONDENSE_PROMPT` template is used to format the input for the language model.\n\nThe document chain is created using the `loadQAChain` function, which takes an instance of the OpenAIChat API and a prompt template as input. The `makeQAPrompt` function generates this template, which instructs the language model to provide a conversational answer with hyperlinks to the project's GitHub repository. The answer should be tailored to the target audience and include code examples when appropriate.\n\nThe `makeChain` function takes the following parameters:\n\n- `projectName`: The name of the software project.\n- `repositoryUrl`: The URL of the project's GitHub repository.\n- `contentType`: The type of content the chatbot is trained on (e.g., code, documentation).\n- `chatPrompt`: Additional instructions for answering questions about the content.\n- `targetAudience`: The intended audience for the chatbot's answers (e.g., developers, users).\n- `vectorstore`: An instance of the `HNSWLib` class for storing and searching vectors.\n- `llms`: An array of language models (e.g., GPT-3, GPT-4).\n- `onTokenStream`: An optional callback function to handle streaming tokens.\n\nExample usage:\n\n```javascript\nconst chatbot = makeChain(\n \"autodoc\",\n \"https://github.com/autodoc/autodoc\",\n \"code\",\n \"\",\n \"developer\",\n vectorstore,\n [gpt3, gpt4],\n (token) => console.log(token)\n);\n```\n\nThis creates a chatbot that can answer questions about the \"autodoc\" project, using the provided language models and vector store.", - "questions": "1. **Question:** What is the purpose of the `makeChain` function and what are its input parameters?\n **Answer:** The `makeChain` function is used to create a new `ChatVectorDBQAChain` instance, which is responsible for generating questions and answers based on the given input parameters. The input parameters include `projectName`, `repositoryUrl`, `contentType`, `chatPrompt`, `targetAudience`, `vectorstore`, `llms`, and an optional `onTokenStream` callback function.\n\n2. **Question:** What are the roles of `CONDENSE_PROMPT` and `QA_PROMPT` in the code?\n **Answer:** `CONDENSE_PROMPT` is a template for generating a standalone question from a given chat history and follow-up input. `QA_PROMPT` is a template for generating a conversational answer with hyperlinks back to GitHub, based on the given context and question. Both templates are used in the `LLMChain` and `loadQAChain` instances, respectively.\n\n3. **Question:** How does the `onTokenStream` callback function work and when is it used?\n **Answer:** The `onTokenStream` callback function is an optional parameter in the `makeChain` function. It is used to handle the streaming of tokens generated by the OpenAIChat instance. If provided, it will be called with each new token generated during the chat process, allowing developers to handle or process the tokens in real-time." + "filePath": "src\\cli\\commands\\query\\createChatChain.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\query\\createChatChain.ts", + "summary": "This code defines a function `makeChain` that creates a chatbot for answering questions about a software project called `projectName`. The chatbot is trained on the content of the project, which is located at `repositoryUrl`. The content type of the project is specified by the `contentType` parameter. The chatbot is designed to provide conversational answers with hyperlinks back to GitHub, including code examples and links to the examples where appropriate. The target audience for the chatbot is specified by the `targetAudience` parameter.\n\nThe `makeChain` function takes several parameters:\n\n- `projectName`: The name of the software project.\n- `repositoryUrl`: The URL of the project's repository.\n- `contentType`: The type of content the chatbot is trained on.\n- `chatPrompt`: Additional instructions for answering questions about the content type.\n- `targetAudience`: The intended audience for the chatbot's answers.\n- `vectorstore`: An instance of HNSWLib for efficient nearest neighbor search.\n- `llms`: An array of LLMModels, which are language models used for generating answers.\n- `onTokenStream`: An optional callback function that is called when a new token is generated by the language model.\n\nThe `makeChain` function first creates a question generator using the `LLMChain` class. This generator is responsible for rephrasing follow-up questions to be standalone questions. It uses the `CONDENSE_PROMPT` template, which is defined at the beginning of the code.\n\nNext, the function creates a `QA_PROMPT` template using the `makeQAPrompt` function. This template is used to generate answers to the questions in a conversational manner, with hyperlinks back to GitHub and code examples where appropriate.\n\nFinally, the function creates and returns a new instance of the `ChatVectorDBQAChain` class, which combines the question generator and the document chain to create a chatbot that can answer questions about the software project. The chatbot uses the `vectorstore` for efficient nearest neighbor search and the `llms` language models for generating answers. If the `onTokenStream` callback is provided, it will be called when a new token is generated by the language model.", + "questions": "1. **Question:** What is the purpose of the `makeChain` function and what are its input parameters?\n\n **Answer:** The `makeChain` function is used to create a `ChatVectorDBQAChain` instance, which is responsible for generating questions and answers based on the given input parameters. The input parameters include `projectName`, `repositoryUrl`, `contentType`, `chatPrompt`, `targetAudience`, `vectorstore`, `llms`, and an optional `onTokenStream` function.\n\n2. **Question:** What are the roles of `CONDENSE_PROMPT` and `QA_PROMPT` in this code?\n\n **Answer:** `CONDENSE_PROMPT` is a template for generating standalone questions from a given chat history and follow-up question. `QA_PROMPT` is a template for generating conversational answers with hyperlinks to GitHub, based on the provided context and question. Both templates are used in the `LLMChain` and `loadQAChain` instances, respectively.\n\n3. **Question:** How does the `onTokenStream` function work and when is it used?\n\n **Answer:** The `onTokenStream` function is an optional callback that can be provided to the `makeChain` function. It is used to handle the streaming of tokens generated by the OpenAIChat instance. If provided, it will be called with each new token generated during the chat process.", + "checksum": "6869048a06de62499933b14c37cddc1d" }, { "fileName": "index.ts", - "filePath": "src/cli/commands/query/index.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/query/index.ts", - "summary": "This code defines a chatbot interface for the Autodoc project, which allows users to ask questions related to a specific codebase and receive answers in a conversational manner. The chatbot uses a language model to generate responses based on the user's input and the codebase documentation.\n\nThe code starts by importing necessary libraries and setting up the `marked` library with a custom terminal renderer for displaying Markdown content. It then defines a `chatHistory` array to store the conversation history between the user and the chatbot.\n\nThe `displayWelcomeMessage` function is used to display a welcome message to the user when they start the chatbot. The `clearScreenAndMoveCursorToTop` function clears the terminal screen and moves the cursor to the top.\n\nThe main function, `query`, takes two arguments: `AutodocRepoConfig` and `AutodocUserConfig`. It initializes the `vectorStore` by loading pre-trained embeddings and creates a `chain` object using the `makeChain` function. This chain object is responsible for generating responses based on the user's input.\n\nThe `getQuestion` function uses the `inquirer` library to prompt the user for a question. The main loop of the chatbot starts by getting the user's question and continues until the user types 'exit'. Inside the loop, the code updates the spinner text to 'Thinking...' and calls the `chain` object with the user's question and chat history. The response is then displayed in Markdown format using the `marked` library.\n\nIf an error occurs during the process, the chatbot displays an error message and prompts the user for another question.\n\nExample usage:\n\n```javascript\nquery(repoConfig, userConfig);\n```\n\nThis chatbot interface can be used in the larger Autodoc project to help users navigate and understand the codebase more efficiently by providing a conversational interface for asking questions and receiving answers.", - "questions": "1. **What is the purpose of the `query` function and what are its input parameters?**\n\n The `query` function is used to interact with the chatbot, taking user input and providing responses based on the given codebase. It takes two input parameters: an `AutodocRepoConfig` object containing information about the repository, and an `AutodocUserConfig` object containing user-specific configuration.\n\n2. **How does the `vectorStore` work and what is its role in the code?**\n\n The `vectorStore` is an instance of HNSWLib loaded with data from the specified output directory and using OpenAIEmbeddings. It is used to store and retrieve vector representations of the codebase, which are then used by the `makeChain` function to generate responses to user questions.\n\n3. **How does the chat history work and what is its purpose?**\n\n The `chatHistory` is an array of string pairs, where each pair represents a user question and the corresponding chatbot response. It is used to store the conversation history between the user and the chatbot, allowing the chatbot to provide context-aware responses based on previous interactions." + "filePath": "src\\cli\\commands\\query\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\query\\index.ts", + "summary": "This code defines a chatbot interface for the Autodoc project, which allows users to ask questions related to a specific codebase and receive answers in a conversational manner. The chatbot uses a combination of the `inquirer` library for user input, `marked` and `marked-terminal` for rendering Markdown output, and the `langchain` library for handling natural language processing tasks.\n\nThe `query` function is the main entry point for the chatbot. It takes two arguments: an `AutodocRepoConfig` object containing information about the code repository, and an `AutodocUserConfig` object containing user-specific settings. The function initializes a vector store using the `HNSWLib` and `OpenAIEmbeddings` classes, and creates a chat chain using the `makeChain` function.\n\nThe chatbot interface is displayed using the `displayWelcomeMessage` function, which prints a welcome message to the console. The `getQuestion` function is used to prompt the user for a question using the `inquirer` library. The chatbot then enters a loop, where it processes the user's question, generates a response using the chat chain, and displays the response as Markdown in the terminal.\n\nIf an error occurs during the processing of a question, the chatbot will display an error message and continue to prompt the user for a new question. The loop continues until the user types 'exit', at which point the chatbot terminates.\n\nHere's an example of how the `query` function might be used:\n\n```javascript\nimport { query } from './autodoc';\n\nconst repoConfig = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n output: 'path/to/output',\n contentType: 'code',\n chatPrompt: 'Ask me anything about MyProject',\n targetAudience: 'developers',\n};\n\nconst userConfig = {\n llms: 'path/to/llms',\n};\n\nquery(repoConfig, userConfig);\n```\n\nThis example would initialize the chatbot with the specified repository and user configurations, and start the chatbot interface for the user to ask questions about the \"MyProject\" codebase.", + "questions": "1. **What is the purpose of the `query` function in this code?**\n\n The `query` function is responsible for handling user interactions with the chatbot. It takes in an AutodocRepoConfig object and an AutodocUserConfig object, sets up the necessary data structures, and then enters a loop where it prompts the user for questions, processes them, and displays the results.\n\n2. **How does the code handle rendering Markdown text in the terminal?**\n\n The code uses the `marked` library along with a custom `TerminalRenderer` to render Markdown text in the terminal. The `marked` library is configured with the custom renderer using `marked.setOptions({ renderer: new TerminalRenderer() });`.\n\n3. **What is the purpose of the `chatHistory` variable and how is it used?**\n\n The `chatHistory` variable is an array that stores the history of questions and answers in the chat session. It is used to keep track of the conversation between the user and the chatbot. When a new question is asked, the chat history is passed to the `chain.call()` function, and the new question and its corresponding answer are added to the `chatHistory` array.", + "checksum": "19807a33957666422f31136970c37245" } ], "folders": [], - "summary": "The `query` folder in the Autodoc project contains code for creating a chatbot interface that allows users to ask questions related to a specific codebase and receive answers in a conversational manner. The chatbot uses a language model to generate responses based on the user's input and the codebase documentation.\n\nIn `createChatChain.ts`, the `makeChain` function is defined, which creates a chatbot using the `ChatVectorDBQAChain` class. This class combines two separate language models: a question generator and a document chain. The question generator is an instance of the `LLMChain` class, which uses the OpenAIChat API to generate standalone questions based on a given conversation history. The document chain is created using the `loadQAChain` function, which takes an instance of the OpenAIChat API and a prompt template as input.\n\nExample usage of `makeChain`:\n\n```javascript\nconst chatbot = makeChain(\n \"autodoc\",\n \"https://github.com/autodoc/autodoc\",\n \"code\",\n \"\",\n \"developer\",\n vectorstore,\n [gpt3, gpt4],\n (token) => console.log(token)\n);\n```\n\nIn `index.ts`, the main chatbot interface is defined. It starts by importing necessary libraries and setting up the `marked` library with a custom terminal renderer for displaying Markdown content. The main function, `query`, takes two arguments: `AutodocRepoConfig` and `AutodocUserConfig`. It initializes the `vectorStore` by loading pre-trained embeddings and creates a `chain` object using the `makeChain` function. This chain object is responsible for generating responses based on the user's input.\n\nThe main loop of the chatbot starts by getting the user's question and continues until the user types 'exit'. Inside the loop, the code updates the spinner text to 'Thinking...' and calls the `chain` object with the user's question and chat history. The response is then displayed in Markdown format using the `marked` library.\n\nExample usage of the chatbot interface:\n\n```javascript\nquery(repoConfig, userConfig);\n```\n\nThis chatbot interface can be used in the larger Autodoc project to help users navigate and understand the codebase more efficiently by providing a conversational interface for asking questions and receiving answers.", - "questions": "" + "summary": "The `query` folder in the Autodoc project contains code for creating a chatbot that can answer questions about a specific software project in a conversational manner. The chatbot is trained on the content of the project and provides answers with hyperlinks back to GitHub, including code examples and links to the examples where appropriate.\n\nThe main entry point for the chatbot is the `query` function in `index.ts`. It takes two arguments: an `AutodocRepoConfig` object containing information about the code repository, and an `AutodocUserConfig` object containing user-specific settings. The function initializes a vector store and creates a chat chain using the `makeChain` function from `createChatChain.ts`.\n\nHere's an example of how the `query` function might be used:\n\n```javascript\nimport { query } from './autodoc';\n\nconst repoConfig = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n output: 'path/to/output',\n contentType: 'code',\n chatPrompt: 'Ask me anything about MyProject',\n targetAudience: 'developers',\n};\n\nconst userConfig = {\n llms: 'path/to/llms',\n};\n\nquery(repoConfig, userConfig);\n```\n\nThis example initializes the chatbot with the specified repository and user configurations and starts the chatbot interface for the user to ask questions about the \"MyProject\" codebase.\n\nThe `createChatChain.ts` file defines the `makeChain` function, which creates a chatbot for answering questions about a software project. The chatbot is designed to provide conversational answers with hyperlinks back to GitHub, including code examples and links to the examples where appropriate. The target audience for the chatbot is specified by the `targetAudience` parameter.\n\nThe `makeChain` function takes several parameters, such as `projectName`, `repositoryUrl`, `contentType`, `chatPrompt`, `targetAudience`, `vectorstore`, `llms`, and `onTokenStream`. It first creates a question generator using the `LLMChain` class, then creates a `QA_PROMPT` template using the `makeQAPrompt` function, and finally creates and returns a new instance of the `ChatVectorDBQAChain` class, which combines the question generator and the document chain to create a chatbot that can answer questions about the software project.\n\nIn summary, the code in the `query` folder is responsible for creating a chatbot that can answer questions about a specific software project in a conversational manner. The chatbot uses a combination of natural language processing techniques and efficient nearest neighbor search to generate accurate and relevant answers for the user.", + "questions": "", + "checksum": "9e0d0f111bf588e2df66862dce9db288" }, { "folderName": "user", - "folderPath": ".autodoc/docs/json/src/cli/commands/user", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/user", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\commands\\user", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands\\user", "files": [ { "fileName": "index.ts", - "filePath": "src/cli/commands/user/index.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/user/index.ts", - "summary": "This code is responsible for managing the user configuration for the Autodoc project. It provides a way to create, update, and save the user configuration file, which stores information about the user's access to different Language Learning Models (LLMs) such as GPT-3.5 Turbo, GPT-4 8K, and GPT-4 32K.\n\nThe `makeConfigTemplate` function is used to create a default configuration object with the specified LLMs or default to GPT-3.5 Turbo if none are provided. This function is used to generate the initial configuration object for the user.\n\nThe `user` function is an asynchronous function that handles the user configuration process. It first checks if a user configuration file already exists. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf the user decides to continue or if no configuration file exists, the function proceeds to create the necessary directories for the configuration file. It then prompts the user to select the LLMs they have access to using the `inquirer` library. The user can choose from three options:\n\n1. GPT-3.5 Turbo\n2. GPT-3.5 Turbo, GPT-4 8K (Early Access)\n3. GPT-3.5 Turbo, GPT-4 8K (Early Access), GPT-4 32K (Early Access)\n\nAfter the user makes their selection, the new configuration object is created using the `makeConfigTemplate` function with the selected LLMs. The configuration object is then saved to the user configuration file in JSON format.\n\nFinally, the user is informed that the configuration has been saved and they can start querying by running the `doc q` command.", - "questions": "1. **Question:** What is the purpose of the `makeConfigTemplate` function and what does it return?\n **Answer:** The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc user. It takes an optional `config` parameter of type `AutodocUserConfig` and returns a new configuration object with the `llms` property set to the provided value or a default value of `[LLMModels.GPT3]`.\n\n2. **Question:** How does the `user` function handle existing user configuration files?\n **Answer:** The `user` function checks if a user configuration file already exists at the `userConfigFilePath`. If it does, the function prompts the user with a confirmation message to overwrite the existing configuration. If the user chooses not to overwrite, the process exits; otherwise, the function proceeds to create a new configuration.\n\n3. **Question:** What are the available choices for the LLMs in the `user` function, and how are they used to create the new configuration?\n **Answer:** The available choices for LLMs are GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user can select one of these options, and the corresponding LLM models will be set as the value of the `llms` property in the new configuration object." + "filePath": "src\\cli\\commands\\user\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\user\\index.ts", + "summary": "This code is responsible for managing the user configuration for the Autodoc project. It provides a way to create, update, and save the user configuration file, which stores information about the user's access to different Language Learning Models (LLMs) such as GPT-3, GPT-4, and GPT-4 32K.\n\nThe `makeConfigTemplate` function is used to create a default configuration object with the provided `config` parameter or with GPT-3 as the default LLM. This function is used to generate a new configuration object when needed.\n\nThe main function, `user`, is an asynchronous function that takes an optional `config` parameter. It first checks if a user configuration file already exists at the `userConfigFilePath`. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf the user configuration file does not exist, the code attempts to create the necessary directories for the file. If there's an error during this process, it logs the error and exits with a non-zero status code.\n\nNext, the user is prompted to select which LLMs they have access to. The available options are GPT-3.5 Turbo, GPT-3.5 Turbo with GPT-4 8K (Early Access), and GPT-3.5 Turbo with GPT-4 8K and GPT-4 32K (Early Access). The user's selection is then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration object is written to the user configuration file in JSON format. A success message is displayed to the user, indicating that the configuration has been saved and they can start querying using the `doc q` command.\n\nExample usage:\n\n```javascript\nimport { user } from './path/to/this/file';\n\n// Create a new user configuration with default settings\nawait user();\n\n// Update the user configuration with a custom config object\nawait user({ llms: [LLMModels.GPT3, LLMModels.GPT4] });\n```", + "questions": "1. **What is the purpose of the `makeConfigTemplate` function?**\n\n The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc user. It takes an optional `config` parameter and returns an object with a `llms` property, which is an array of LLM models.\n\n2. **How does the `user` function handle existing user configuration files?**\n\n The `user` function checks if a user configuration file already exists using `fsSync.existsSync`. If it does, the user is prompted with a confirmation message to overwrite the existing configuration. If the user chooses not to overwrite, the process exits with a status code of 0.\n\n3. **What are the available choices for LLM models in the `user` function?**\n\n The available choices for LLM models are GPT-3.5 Turbo, GPT-3.5 Turbo and GPT-4 8K (Early Access), and GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user can select one of these options, and the selected value is stored in the `llms` property of the new configuration object.", + "checksum": "76bc1e6d5d61e24907832c4cac443225" } ], "folders": [], - "summary": "The `index.ts` file in the `user` folder is responsible for managing the user configuration for the Autodoc project. It provides a way to create, update, and save the user configuration file, which stores information about the user's access to different Language Learning Models (LLMs) such as GPT-3.5 Turbo, GPT-4 8K, and GPT-4 32K.\n\nThe `makeConfigTemplate` function is used to create a default configuration object with the specified LLMs or default to GPT-3.5 Turbo if none are provided. This function is used to generate the initial configuration object for the user.\n\n```typescript\nfunction makeConfigTemplate(llms: string[]): ConfigTemplate {\n // ...\n}\n```\n\nThe `user` function is an asynchronous function that handles the user configuration process. It first checks if a user configuration file already exists. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\n```typescript\nasync function user(): Promise {\n // ...\n}\n```\n\nIf the user decides to continue or if no configuration file exists, the function proceeds to create the necessary directories for the configuration file. It then prompts the user to select the LLMs they have access to using the `inquirer` library. The user can choose from three options:\n\n1. GPT-3.5 Turbo\n2. GPT-3.5 Turbo, GPT-4 8K (Early Access)\n3. GPT-3.5 Turbo, GPT-4 8K (Early Access), GPT-4 32K (Early Access)\n\nAfter the user makes their selection, the new configuration object is created using the `makeConfigTemplate` function with the selected LLMs. The configuration object is then saved to the user configuration file in JSON format.\n\n```typescript\nconst configTemplate = makeConfigTemplate(selectedLLMs);\nawait fs.promises.writeFile(configPath, JSON.stringify(configTemplate, null, 2));\n```\n\nFinally, the user is informed that the configuration has been saved and they can start querying by running the `doc q` command.\n\nThis code is essential for setting up the user's environment and preferences for the Autodoc project. It ensures that the user has the correct configuration file in place, which is necessary for the proper functioning of the project. The user configuration file is used by other parts of the project to determine which LLMs the user has access to and can query.\n\nFor example, when a user runs the `doc q` command, the project will read the user configuration file to determine which LLMs are available for querying. This ensures that the user only queries the LLMs they have access to, preventing any unauthorized access or usage.\n\nIn summary, the `index.ts` file in the `user` folder is responsible for managing the user configuration for the Autodoc project, ensuring that the user has the correct configuration file in place, and allowing the user to select the LLMs they have access to. This is essential for the proper functioning of the project and for maintaining the user's preferences and access to different LLMs.", - "questions": "" + "summary": "The `index.ts` file in the `user` folder is responsible for managing the user configuration for the Autodoc project. It allows users to create, update, and save their configuration file, which stores information about their access to different Language Learning Models (LLMs) such as GPT-3, GPT-4, and GPT-4 32K.\n\nThe `makeConfigTemplate` function creates a default configuration object with either the provided `config` parameter or GPT-3 as the default LLM. This function is useful for generating a new configuration object when needed.\n\nThe main function, `user`, is an asynchronous function that takes an optional `config` parameter. It first checks if a user configuration file already exists at the `userConfigFilePath`. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf the user configuration file does not exist, the code attempts to create the necessary directories for the file. If there's an error during this process, it logs the error and exits with a non-zero status code.\n\nNext, the user is prompted to select which LLMs they have access to. The available options are GPT-3.5 Turbo, GPT-3.5 Turbo with GPT-4 8K (Early Access), and GPT-3.5 Turbo with GPT-4 8K and GPT-4 32K (Early Access). The user's selection is then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration object is written to the user configuration file in JSON format. A success message is displayed to the user, indicating that the configuration has been saved and they can start querying using the `doc q` command.\n\nThis code is essential for the Autodoc project as it allows users to manage their access to different LLMs and store their preferences in a configuration file. This configuration file can then be used by other parts of the project to determine which LLMs the user has access to and tailor the querying process accordingly.\n\nExample usage:\n\n```javascript\nimport { user } from './path/to/this/file';\n\n// Create a new user configuration with default settings\nawait user();\n\n// Update the user configuration with a custom config object\nawait user({ llms: [LLMModels.GPT3, LLMModels.GPT4] });\n```\n\nIn summary, the `index.ts` file in the `user` folder is a crucial part of the Autodoc project, allowing users to manage their LLM access and preferences. This configuration is then used by other parts of the project to provide a tailored experience based on the user's access to different LLMs.", + "questions": "", + "checksum": "4b8fd2b2abaec4959873fc3396c414d8" } ], - "summary": "The code in the `src/cli/commands` folder is responsible for handling various command-line tasks in the Autodoc project. It contains several subfolders, each dedicated to a specific command or functionality, such as estimating costs, processing repositories, initializing the project, querying the chatbot, and managing user configurations.\n\nFor instance, the `estimate` subfolder contains a function that allows users to estimate the cost of indexing a given repository before actually processing it. This function takes an `AutodocRepoConfig` object as input and performs a dry run of the `processRepository` function. It then calculates the total estimated cost and displays it to the user. This helps users make informed decisions about whether to proceed with the indexing process or not.\n\n```javascript\nimport { estimate } from './autodoc/estimate';\n\nconst config = {\n // ...configuration options...\n};\n\nestimate(config);\n```\n\nThe `index` subfolder contains code for processing a given code repository, generating documentation in JSON and Markdown formats, and creating vector files for the documentation. It provides several functions and utilities to achieve these tasks, such as traversing the file system, calling language models, and converting JSON files to Markdown.\n\n```javascript\nimport autodoc from './autodoc';\n\nconst config = {\n // ...configuration options...\n};\n\nautodoc.index(config);\n```\n\nThe `init` subfolder is responsible for initializing and configuring the `autodoc` project. It provides an essential function called `init` that creates a configuration file named `autodoc.config.json` with user inputs and default values.\n\n```javascript\nimport { init } from './autodoc';\n\n(async () => {\n await init();\n})();\n```\n\nThe `query` subfolder contains code for creating a chatbot interface that allows users to ask questions related to a specific codebase and receive answers in a conversational manner. The chatbot uses a language model to generate responses based on the user's input and the codebase documentation.\n\n```javascript\nquery(repoConfig, userConfig);\n```\n\nThe `user` subfolder is responsible for managing the user configuration for the Autodoc project. It provides a way to create, update, and save the user configuration file, which stores information about the user's access to different Language Learning Models (LLMs).\n\n```typescript\nasync function user(): Promise {\n // ...\n}\n```\n\nIn summary, the code in the `src/cli/commands` folder plays a crucial role in the Autodoc project by providing various command-line functionalities, such as estimating costs, processing repositories, initializing the project, querying the chatbot, and managing user configurations. These functionalities help developers to easily generate and maintain documentation for their projects, making it more accessible and understandable for other developers and users.", - "questions": "" + "summary": "The code in the `.autodoc\\docs\\json\\src\\cli\\commands` folder is responsible for various tasks related to the Autodoc project, such as initializing the configuration, processing repositories, generating documentation, and creating a chatbot for answering questions about a specific software project. The folder contains several subfolders, each with a specific purpose.\n\n### estimate\n\nThe `estimate` function provides an estimated cost of processing a given repository. It takes an `AutodocRepoConfig` object as input and performs a dry run of the repository processing to calculate the estimated cost. Example usage:\n\n```javascript\nimport { estimate } from './path/to/this/file';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\n### index\n\nThe code in this folder processes a given repository and generates documentation in JSON, Markdown, and vector formats. It takes an `AutodocRepoConfig` object as input and performs three main tasks: processing the repository, creating Markdown files, and creating vector files. Example usage:\n\n```javascript\nindex({\n name: \"myProject\",\n root: \"./input\",\n output: \"./output\",\n filePrompt: true,\n folderPrompt: true,\n contentType: \"code\",\n targetAudience: \"developers\",\n linkHosted: \"https://github.com/user/myProject\",\n});\n```\n\n### init\n\nThe `init` function initializes the configuration of the Autodoc project. It prompts the user to input necessary information to set up the project and creates the `autodoc.config.json` file in the project root. Example usage:\n\n```javascript\nimport { init } from './path/to/this/file';\n\n// Initialize the configuration with default values\nawait init();\n\n// Initialize the configuration with custom values\nawait init({\n name: 'My Custom Repository',\n repositoryUrl: 'https://github.com/user/repo',\n});\n```\n\n### query\n\nThe `query` folder contains code for creating a chatbot that can answer questions about a specific software project. The main entry point is the `query` function, which takes an `AutodocRepoConfig` object and an `AutodocUserConfig` object as input. Example usage:\n\n```javascript\nimport { query } from './autodoc';\n\nconst repoConfig = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n output: 'path/to/output',\n contentType: 'code',\n chatPrompt: 'Ask me anything about MyProject',\n targetAudience: 'developers',\n};\n\nconst userConfig = {\n llms: 'path/to/llms',\n};\n\nquery(repoConfig, userConfig);\n```\n\n### user\n\nThe `user` folder manages the user configuration for the Autodoc project. It allows users to create, update, and save their configuration file, which stores information about their access to different Language Learning Models (LLMs). Example usage:\n\n```javascript\nimport { user } from './path/to/this/file';\n\n// Create a new user configuration with default settings\nawait user();\n\n// Update the user configuration with a custom config object\nawait user({ llms: [LLMModels.GPT3, LLMModels.GPT4] });\n```\n\nIn summary, the code in this folder is essential for various tasks related to the Autodoc project, such as initializing the configuration, processing repositories, generating documentation, and creating a chatbot for answering questions about a specific software project.", + "questions": "", + "checksum": "d11f941351fb51140313ada9b52bbf1a" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/cli/commands/user/index.json b/.autodoc/docs/json/src/cli/commands/user/index.json index bd017c2..cfc49a8 100644 --- a/.autodoc/docs/json/src/cli/commands/user/index.json +++ b/.autodoc/docs/json/src/cli/commands/user/index.json @@ -1,7 +1,8 @@ { "fileName": "index.ts", - "filePath": "src/cli/commands/user/index.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/user/index.ts", - "summary": "This code is responsible for managing the user configuration for the Autodoc project. It provides a way to create, update, and save the user configuration file, which stores information about the user's access to different Language Learning Models (LLMs) such as GPT-3.5 Turbo, GPT-4 8K, and GPT-4 32K.\n\nThe `makeConfigTemplate` function is used to create a default configuration object with the specified LLMs or default to GPT-3.5 Turbo if none are provided. This function is used to generate the initial configuration object for the user.\n\nThe `user` function is an asynchronous function that handles the user configuration process. It first checks if a user configuration file already exists. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf the user decides to continue or if no configuration file exists, the function proceeds to create the necessary directories for the configuration file. It then prompts the user to select the LLMs they have access to using the `inquirer` library. The user can choose from three options:\n\n1. GPT-3.5 Turbo\n2. GPT-3.5 Turbo, GPT-4 8K (Early Access)\n3. GPT-3.5 Turbo, GPT-4 8K (Early Access), GPT-4 32K (Early Access)\n\nAfter the user makes their selection, the new configuration object is created using the `makeConfigTemplate` function with the selected LLMs. The configuration object is then saved to the user configuration file in JSON format.\n\nFinally, the user is informed that the configuration has been saved and they can start querying by running the `doc q` command.", - "questions": "1. **Question:** What is the purpose of the `makeConfigTemplate` function and what does it return?\n **Answer:** The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc user. It takes an optional `config` parameter of type `AutodocUserConfig` and returns a new configuration object with the `llms` property set to the provided value or a default value of `[LLMModels.GPT3]`.\n\n2. **Question:** How does the `user` function handle existing user configuration files?\n **Answer:** The `user` function checks if a user configuration file already exists at the `userConfigFilePath`. If it does, the function prompts the user with a confirmation message to overwrite the existing configuration. If the user chooses not to overwrite, the process exits; otherwise, the function proceeds to create a new configuration.\n\n3. **Question:** What are the available choices for the LLMs in the `user` function, and how are they used to create the new configuration?\n **Answer:** The available choices for LLMs are GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user can select one of these options, and the corresponding LLM models will be set as the value of the `llms` property in the new configuration object." + "filePath": "src\\cli\\commands\\user\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\user\\index.ts", + "summary": "This code is responsible for managing the user configuration for the Autodoc project. It provides a way to create, update, and save the user configuration file, which stores information about the user's access to different Language Learning Models (LLMs) such as GPT-3, GPT-4, and GPT-4 32K.\n\nThe `makeConfigTemplate` function is used to create a default configuration object with the provided `config` parameter or with GPT-3 as the default LLM. This function is used to generate a new configuration object when needed.\n\nThe main function, `user`, is an asynchronous function that takes an optional `config` parameter. It first checks if a user configuration file already exists at the `userConfigFilePath`. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf the user configuration file does not exist, the code attempts to create the necessary directories for the file. If there's an error during this process, it logs the error and exits with a non-zero status code.\n\nNext, the user is prompted to select which LLMs they have access to. The available options are GPT-3.5 Turbo, GPT-3.5 Turbo with GPT-4 8K (Early Access), and GPT-3.5 Turbo with GPT-4 8K and GPT-4 32K (Early Access). The user's selection is then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration object is written to the user configuration file in JSON format. A success message is displayed to the user, indicating that the configuration has been saved and they can start querying using the `doc q` command.\n\nExample usage:\n\n```javascript\nimport { user } from './path/to/this/file';\n\n// Create a new user configuration with default settings\nawait user();\n\n// Update the user configuration with a custom config object\nawait user({ llms: [LLMModels.GPT3, LLMModels.GPT4] });\n```", + "questions": "1. **What is the purpose of the `makeConfigTemplate` function?**\n\n The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc user. It takes an optional `config` parameter and returns an object with a `llms` property, which is an array of LLM models.\n\n2. **How does the `user` function handle existing user configuration files?**\n\n The `user` function checks if a user configuration file already exists using `fsSync.existsSync`. If it does, the user is prompted with a confirmation message to overwrite the existing configuration. If the user chooses not to overwrite, the process exits with a status code of 0.\n\n3. **What are the available choices for LLM models in the `user` function?**\n\n The available choices for LLM models are GPT-3.5 Turbo, GPT-3.5 Turbo and GPT-4 8K (Early Access), and GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user can select one of these options, and the selected value is stored in the `llms` property of the new configuration object.", + "checksum": "76bc1e6d5d61e24907832c4cac443225" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/cli/commands/user/summary.json b/.autodoc/docs/json/src/cli/commands/user/summary.json index 79bbbbe..c33f9ce 100644 --- a/.autodoc/docs/json/src/cli/commands/user/summary.json +++ b/.autodoc/docs/json/src/cli/commands/user/summary.json @@ -1,17 +1,19 @@ { "folderName": "user", - "folderPath": ".autodoc/docs/json/src/cli/commands/user", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/user", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\commands\\user", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands\\user", "files": [ { "fileName": "index.ts", - "filePath": "src/cli/commands/user/index.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/user/index.ts", - "summary": "This code is responsible for managing the user configuration for the Autodoc project. It provides a way to create, update, and save the user configuration file, which stores information about the user's access to different Language Learning Models (LLMs) such as GPT-3.5 Turbo, GPT-4 8K, and GPT-4 32K.\n\nThe `makeConfigTemplate` function is used to create a default configuration object with the specified LLMs or default to GPT-3.5 Turbo if none are provided. This function is used to generate the initial configuration object for the user.\n\nThe `user` function is an asynchronous function that handles the user configuration process. It first checks if a user configuration file already exists. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf the user decides to continue or if no configuration file exists, the function proceeds to create the necessary directories for the configuration file. It then prompts the user to select the LLMs they have access to using the `inquirer` library. The user can choose from three options:\n\n1. GPT-3.5 Turbo\n2. GPT-3.5 Turbo, GPT-4 8K (Early Access)\n3. GPT-3.5 Turbo, GPT-4 8K (Early Access), GPT-4 32K (Early Access)\n\nAfter the user makes their selection, the new configuration object is created using the `makeConfigTemplate` function with the selected LLMs. The configuration object is then saved to the user configuration file in JSON format.\n\nFinally, the user is informed that the configuration has been saved and they can start querying by running the `doc q` command.", - "questions": "1. **Question:** What is the purpose of the `makeConfigTemplate` function and what does it return?\n **Answer:** The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc user. It takes an optional `config` parameter of type `AutodocUserConfig` and returns a new configuration object with the `llms` property set to the provided value or a default value of `[LLMModels.GPT3]`.\n\n2. **Question:** How does the `user` function handle existing user configuration files?\n **Answer:** The `user` function checks if a user configuration file already exists at the `userConfigFilePath`. If it does, the function prompts the user with a confirmation message to overwrite the existing configuration. If the user chooses not to overwrite, the process exits; otherwise, the function proceeds to create a new configuration.\n\n3. **Question:** What are the available choices for the LLMs in the `user` function, and how are they used to create the new configuration?\n **Answer:** The available choices for LLMs are GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user can select one of these options, and the corresponding LLM models will be set as the value of the `llms` property in the new configuration object." + "filePath": "src\\cli\\commands\\user\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\user\\index.ts", + "summary": "This code is responsible for managing the user configuration for the Autodoc project. It provides a way to create, update, and save the user configuration file, which stores information about the user's access to different Language Learning Models (LLMs) such as GPT-3, GPT-4, and GPT-4 32K.\n\nThe `makeConfigTemplate` function is used to create a default configuration object with the provided `config` parameter or with GPT-3 as the default LLM. This function is used to generate a new configuration object when needed.\n\nThe main function, `user`, is an asynchronous function that takes an optional `config` parameter. It first checks if a user configuration file already exists at the `userConfigFilePath`. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf the user configuration file does not exist, the code attempts to create the necessary directories for the file. If there's an error during this process, it logs the error and exits with a non-zero status code.\n\nNext, the user is prompted to select which LLMs they have access to. The available options are GPT-3.5 Turbo, GPT-3.5 Turbo with GPT-4 8K (Early Access), and GPT-3.5 Turbo with GPT-4 8K and GPT-4 32K (Early Access). The user's selection is then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration object is written to the user configuration file in JSON format. A success message is displayed to the user, indicating that the configuration has been saved and they can start querying using the `doc q` command.\n\nExample usage:\n\n```javascript\nimport { user } from './path/to/this/file';\n\n// Create a new user configuration with default settings\nawait user();\n\n// Update the user configuration with a custom config object\nawait user({ llms: [LLMModels.GPT3, LLMModels.GPT4] });\n```", + "questions": "1. **What is the purpose of the `makeConfigTemplate` function?**\n\n The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc user. It takes an optional `config` parameter and returns an object with a `llms` property, which is an array of LLM models.\n\n2. **How does the `user` function handle existing user configuration files?**\n\n The `user` function checks if a user configuration file already exists using `fsSync.existsSync`. If it does, the user is prompted with a confirmation message to overwrite the existing configuration. If the user chooses not to overwrite, the process exits with a status code of 0.\n\n3. **What are the available choices for LLM models in the `user` function?**\n\n The available choices for LLM models are GPT-3.5 Turbo, GPT-3.5 Turbo and GPT-4 8K (Early Access), and GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user can select one of these options, and the selected value is stored in the `llms` property of the new configuration object.", + "checksum": "76bc1e6d5d61e24907832c4cac443225" } ], "folders": [], - "summary": "The `index.ts` file in the `user` folder is responsible for managing the user configuration for the Autodoc project. It provides a way to create, update, and save the user configuration file, which stores information about the user's access to different Language Learning Models (LLMs) such as GPT-3.5 Turbo, GPT-4 8K, and GPT-4 32K.\n\nThe `makeConfigTemplate` function is used to create a default configuration object with the specified LLMs or default to GPT-3.5 Turbo if none are provided. This function is used to generate the initial configuration object for the user.\n\n```typescript\nfunction makeConfigTemplate(llms: string[]): ConfigTemplate {\n // ...\n}\n```\n\nThe `user` function is an asynchronous function that handles the user configuration process. It first checks if a user configuration file already exists. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\n```typescript\nasync function user(): Promise {\n // ...\n}\n```\n\nIf the user decides to continue or if no configuration file exists, the function proceeds to create the necessary directories for the configuration file. It then prompts the user to select the LLMs they have access to using the `inquirer` library. The user can choose from three options:\n\n1. GPT-3.5 Turbo\n2. GPT-3.5 Turbo, GPT-4 8K (Early Access)\n3. GPT-3.5 Turbo, GPT-4 8K (Early Access), GPT-4 32K (Early Access)\n\nAfter the user makes their selection, the new configuration object is created using the `makeConfigTemplate` function with the selected LLMs. The configuration object is then saved to the user configuration file in JSON format.\n\n```typescript\nconst configTemplate = makeConfigTemplate(selectedLLMs);\nawait fs.promises.writeFile(configPath, JSON.stringify(configTemplate, null, 2));\n```\n\nFinally, the user is informed that the configuration has been saved and they can start querying by running the `doc q` command.\n\nThis code is essential for setting up the user's environment and preferences for the Autodoc project. It ensures that the user has the correct configuration file in place, which is necessary for the proper functioning of the project. The user configuration file is used by other parts of the project to determine which LLMs the user has access to and can query.\n\nFor example, when a user runs the `doc q` command, the project will read the user configuration file to determine which LLMs are available for querying. This ensures that the user only queries the LLMs they have access to, preventing any unauthorized access or usage.\n\nIn summary, the `index.ts` file in the `user` folder is responsible for managing the user configuration for the Autodoc project, ensuring that the user has the correct configuration file in place, and allowing the user to select the LLMs they have access to. This is essential for the proper functioning of the project and for maintaining the user's preferences and access to different LLMs.", - "questions": "" + "summary": "The `index.ts` file in the `user` folder is responsible for managing the user configuration for the Autodoc project. It allows users to create, update, and save their configuration file, which stores information about their access to different Language Learning Models (LLMs) such as GPT-3, GPT-4, and GPT-4 32K.\n\nThe `makeConfigTemplate` function creates a default configuration object with either the provided `config` parameter or GPT-3 as the default LLM. This function is useful for generating a new configuration object when needed.\n\nThe main function, `user`, is an asynchronous function that takes an optional `config` parameter. It first checks if a user configuration file already exists at the `userConfigFilePath`. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf the user configuration file does not exist, the code attempts to create the necessary directories for the file. If there's an error during this process, it logs the error and exits with a non-zero status code.\n\nNext, the user is prompted to select which LLMs they have access to. The available options are GPT-3.5 Turbo, GPT-3.5 Turbo with GPT-4 8K (Early Access), and GPT-3.5 Turbo with GPT-4 8K and GPT-4 32K (Early Access). The user's selection is then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration object is written to the user configuration file in JSON format. A success message is displayed to the user, indicating that the configuration has been saved and they can start querying using the `doc q` command.\n\nThis code is essential for the Autodoc project as it allows users to manage their access to different LLMs and store their preferences in a configuration file. This configuration file can then be used by other parts of the project to determine which LLMs the user has access to and tailor the querying process accordingly.\n\nExample usage:\n\n```javascript\nimport { user } from './path/to/this/file';\n\n// Create a new user configuration with default settings\nawait user();\n\n// Update the user configuration with a custom config object\nawait user({ llms: [LLMModels.GPT3, LLMModels.GPT4] });\n```\n\nIn summary, the `index.ts` file in the `user` folder is a crucial part of the Autodoc project, allowing users to manage their LLM access and preferences. This configuration is then used by other parts of the project to provide a tailored experience based on the user's access to different LLMs.", + "questions": "", + "checksum": "4b8fd2b2abaec4959873fc3396c414d8" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/cli/spinner.json b/.autodoc/docs/json/src/cli/spinner.json index 144eef6..7508112 100644 --- a/.autodoc/docs/json/src/cli/spinner.json +++ b/.autodoc/docs/json/src/cli/spinner.json @@ -1,7 +1,8 @@ { "fileName": "spinner.ts", - "filePath": "src/cli/spinner.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/spinner.ts", - "summary": "This code provides a utility for managing a command-line spinner using the `ora` library. The spinner is a visual indicator that displays a series of characters in a loop, giving the user feedback that a process is running in the background. The code exports several functions to control the spinner's behavior, such as updating the text, stopping the spinner, and displaying success, error, or informational messages.\n\nThe `spinner` object is created as a singleton to ensure that there is only one instance of the spinner at any given time. This prevents multiple spinners from being displayed simultaneously, which could cause confusion for the user. The spinner is configured to use the 'dots' style.\n\nThe `updateSpinnerText` function is used to update the spinner's text. If the spinner is already spinning, it updates the text directly; otherwise, it starts the spinner with the given message. For example:\n\n```javascript\nupdateSpinnerText('Loading data...');\n```\n\nThe `stopSpinner` function stops the spinner if it is currently spinning:\n\n```javascript\nstopSpinner();\n```\n\nThe `spinnerError`, `spinnerSuccess`, and `spinnerInfo` functions are used to display error, success, and informational messages, respectively. These functions first check if the spinner is spinning and then call the appropriate `ora` method to display the message with the corresponding status symbol (e.g., a red cross for errors, a green checkmark for success, etc.):\n\n```javascript\nspinnerError('An error occurred');\nspinnerSuccess('Operation completed successfully');\nspinnerInfo('Please wait...');\n```\n\nIn the larger project, this utility can be used to provide a consistent and user-friendly interface for displaying progress and status messages during long-running tasks or processes.", - "questions": "1. **What is the purpose of the `ora` package in this code?**\n\n The `ora` package is used to create a spinner in the terminal, providing a visual indication of a running process. In this code, it is used to create a singleton spinner with the 'dots' style.\n\n2. **What are the different states of the spinner and how are they updated?**\n\n The spinner can have different states such as spinning, stopped, failed, succeeded, and displaying information. The functions `updateSpinnerText`, `stopSpinner`, `spinnerError`, `spinnerSuccess`, and `spinnerInfo` are used to update the spinner's state and text accordingly.\n\n3. **How does the `updateSpinnerText` function work and when should it be used?**\n\n The `updateSpinnerText` function updates the spinner's text with the provided message. If the spinner is already spinning, it updates the text directly; otherwise, it starts the spinner with the new message. This function should be used when you want to change the spinner's text while it is spinning or start it with a new message." + "filePath": "src\\cli\\spinner.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\spinner.ts", + "summary": "This code is responsible for managing a spinner, which is a visual element that indicates a process is running in the background. The spinner is created using the `ora` library, which provides a simple and customizable way to create spinners for command-line interfaces.\n\nThe code starts by importing the `ora` library and creating a singleton spinner instance with the 'dots' style. This ensures that there will only be one spinner active at any given time.\n\nThere are several functions exported by this module to interact with the spinner:\n\n1. `updateSpinnerText(message: string)`: This function updates the spinner's text with the provided message. If the spinner is already spinning, it simply updates the text; otherwise, it starts the spinner with the new message.\n\n Example usage:\n ```javascript\n updateSpinnerText('Loading data...');\n ```\n\n2. `stopSpinner()`: This function stops the spinner if it is currently spinning.\n\n Example usage:\n ```javascript\n stopSpinner();\n ```\n\n3. `spinnerError(message?: string)`: This function stops the spinner and marks it as failed with an optional error message. It only takes effect if the spinner is currently spinning.\n\n Example usage:\n ```javascript\n spinnerError('Failed to load data');\n ```\n\n4. `spinnerSuccess(message?: string)`: This function stops the spinner and marks it as successful with an optional success message. It only takes effect if the spinner is currently spinning.\n\n Example usage:\n ```javascript\n spinnerSuccess('Data loaded successfully');\n ```\n\n5. `spinnerInfo(message: string)`: This function displays an informational message without affecting the spinner's state.\n\n Example usage:\n ```javascript\n spinnerInfo('Connecting to server...');\n ```\n\nIn the larger project, this module can be used to provide visual feedback to users when a background process is running, such as loading data, connecting to a server, or performing a complex calculation. By using the exported functions, developers can easily update the spinner's text, stop it, or change its state to indicate success, failure, or display informational messages.", + "questions": "1. **What is the purpose of the `ora` package in this code?**\n\n The `ora` package is used to create a spinner in the command line interface, providing a visual indication of a running process. In this code, it is used to create a singleton spinner with the 'dots' style.\n\n2. **How does the `updateSpinnerText` function work?**\n\n The `updateSpinnerText` function takes a message as an input and updates the spinner's text with the given message. If the spinner is already spinning, it updates the text directly; otherwise, it starts the spinner with the new message.\n\n3. **What are the differences between `spinnerError`, `spinnerSuccess`, and `spinnerInfo` functions?**\n\n These functions are used to update the spinner's state and message based on the outcome of a process. `spinnerError` is called when there is an error, and it stops the spinner with a failure message. `spinnerSuccess` is called when the process is successful, and it stops the spinner with a success message. `spinnerInfo` is used to display an informational message without stopping the spinner.", + "checksum": "d93ad7e714ce5446916bb1d63cbb6031" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/cli/summary.json b/.autodoc/docs/json/src/cli/summary.json index 01f91a8..84cfdca 100644 --- a/.autodoc/docs/json/src/cli/summary.json +++ b/.autodoc/docs/json/src/cli/summary.json @@ -1,193 +1,217 @@ { "folderName": "cli", - "folderPath": ".autodoc/docs/json/src/cli", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli", + "folderPath": ".autodoc\\docs\\json\\src\\cli", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli", "files": [ { "fileName": "spinner.ts", - "filePath": "src/cli/spinner.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/spinner.ts", - "summary": "This code provides a utility for managing a command-line spinner using the `ora` library. The spinner is a visual indicator that displays a series of characters in a loop, giving the user feedback that a process is running in the background. The code exports several functions to control the spinner's behavior, such as updating the text, stopping the spinner, and displaying success, error, or informational messages.\n\nThe `spinner` object is created as a singleton to ensure that there is only one instance of the spinner at any given time. This prevents multiple spinners from being displayed simultaneously, which could cause confusion for the user. The spinner is configured to use the 'dots' style.\n\nThe `updateSpinnerText` function is used to update the spinner's text. If the spinner is already spinning, it updates the text directly; otherwise, it starts the spinner with the given message. For example:\n\n```javascript\nupdateSpinnerText('Loading data...');\n```\n\nThe `stopSpinner` function stops the spinner if it is currently spinning:\n\n```javascript\nstopSpinner();\n```\n\nThe `spinnerError`, `spinnerSuccess`, and `spinnerInfo` functions are used to display error, success, and informational messages, respectively. These functions first check if the spinner is spinning and then call the appropriate `ora` method to display the message with the corresponding status symbol (e.g., a red cross for errors, a green checkmark for success, etc.):\n\n```javascript\nspinnerError('An error occurred');\nspinnerSuccess('Operation completed successfully');\nspinnerInfo('Please wait...');\n```\n\nIn the larger project, this utility can be used to provide a consistent and user-friendly interface for displaying progress and status messages during long-running tasks or processes.", - "questions": "1. **What is the purpose of the `ora` package in this code?**\n\n The `ora` package is used to create a spinner in the terminal, providing a visual indication of a running process. In this code, it is used to create a singleton spinner with the 'dots' style.\n\n2. **What are the different states of the spinner and how are they updated?**\n\n The spinner can have different states such as spinning, stopped, failed, succeeded, and displaying information. The functions `updateSpinnerText`, `stopSpinner`, `spinnerError`, `spinnerSuccess`, and `spinnerInfo` are used to update the spinner's state and text accordingly.\n\n3. **How does the `updateSpinnerText` function work and when should it be used?**\n\n The `updateSpinnerText` function updates the spinner's text with the provided message. If the spinner is already spinning, it updates the text directly; otherwise, it starts the spinner with the new message. This function should be used when you want to change the spinner's text while it is spinning or start it with a new message." + "filePath": "src\\cli\\spinner.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\spinner.ts", + "summary": "This code is responsible for managing a spinner, which is a visual element that indicates a process is running in the background. The spinner is created using the `ora` library, which provides a simple and customizable way to create spinners for command-line interfaces.\n\nThe code starts by importing the `ora` library and creating a singleton spinner instance with the 'dots' style. This ensures that there will only be one spinner active at any given time.\n\nThere are several functions exported by this module to interact with the spinner:\n\n1. `updateSpinnerText(message: string)`: This function updates the spinner's text with the provided message. If the spinner is already spinning, it simply updates the text; otherwise, it starts the spinner with the new message.\n\n Example usage:\n ```javascript\n updateSpinnerText('Loading data...');\n ```\n\n2. `stopSpinner()`: This function stops the spinner if it is currently spinning.\n\n Example usage:\n ```javascript\n stopSpinner();\n ```\n\n3. `spinnerError(message?: string)`: This function stops the spinner and marks it as failed with an optional error message. It only takes effect if the spinner is currently spinning.\n\n Example usage:\n ```javascript\n spinnerError('Failed to load data');\n ```\n\n4. `spinnerSuccess(message?: string)`: This function stops the spinner and marks it as successful with an optional success message. It only takes effect if the spinner is currently spinning.\n\n Example usage:\n ```javascript\n spinnerSuccess('Data loaded successfully');\n ```\n\n5. `spinnerInfo(message: string)`: This function displays an informational message without affecting the spinner's state.\n\n Example usage:\n ```javascript\n spinnerInfo('Connecting to server...');\n ```\n\nIn the larger project, this module can be used to provide visual feedback to users when a background process is running, such as loading data, connecting to a server, or performing a complex calculation. By using the exported functions, developers can easily update the spinner's text, stop it, or change its state to indicate success, failure, or display informational messages.", + "questions": "1. **What is the purpose of the `ora` package in this code?**\n\n The `ora` package is used to create a spinner in the command line interface, providing a visual indication of a running process. In this code, it is used to create a singleton spinner with the 'dots' style.\n\n2. **How does the `updateSpinnerText` function work?**\n\n The `updateSpinnerText` function takes a message as an input and updates the spinner's text with the given message. If the spinner is already spinning, it updates the text directly; otherwise, it starts the spinner with the new message.\n\n3. **What are the differences between `spinnerError`, `spinnerSuccess`, and `spinnerInfo` functions?**\n\n These functions are used to update the spinner's state and message based on the outcome of a process. `spinnerError` is called when there is an error, and it stops the spinner with a failure message. `spinnerSuccess` is called when the process is successful, and it stops the spinner with a success message. `spinnerInfo` is used to display an informational message without stopping the spinner.", + "checksum": "d93ad7e714ce5446916bb1d63cbb6031" } ], "folders": [ { "folderName": "commands", - "folderPath": ".autodoc/docs/json/src/cli/commands", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\commands", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands", "files": [], "folders": [ { "folderName": "estimate", - "folderPath": ".autodoc/docs/json/src/cli/commands/estimate", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/estimate", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\commands\\estimate", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands\\estimate", "files": [ { "fileName": "index.ts", - "filePath": "src/cli/commands/estimate/index.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/estimate/index.ts", - "summary": "The `estimate` function in this code file is responsible for providing an estimated cost of indexing a given repository using the AutodocRepoConfig configuration. This function is particularly useful for users who want to get an idea of the cost involved in processing their repository before actually running the process.\n\nThe function takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the repository name, URL, root directory, output directory, and other settings related to the processing of the repository.\n\nThe main steps involved in the function are:\n\n1. Set the output path for the JSON files generated during the process.\n2. Update the spinner text to display \"Estimating cost...\".\n3. Perform a dry run of the `processRepository` function with the given configuration options. The dry run does not actually process the repository but instead returns the details of the models that would be processed.\n4. Stop the spinner once the dry run is complete.\n5. Print the details of the models obtained from the dry run using the `printModelDetails` utility function.\n6. Calculate the total estimated cost using the `totalIndexCostEstimate` utility function.\n7. Display the estimated cost in a user-friendly format using the `chalk` library.\n\nHere's an example of how the `estimate` function might be used in the larger project:\n\n```javascript\nimport { estimate } from './autodoc/estimate';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output/',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\nThis example demonstrates how a user can call the `estimate` function with a specific configuration to get an estimated cost for processing their repository.", - "questions": "1. **What is the purpose of the `estimate` function and what parameters does it accept?**\n\n The `estimate` function is used to estimate the cost of processing a repository for indexing. It accepts an `AutodocRepoConfig` object as a parameter, which contains various configuration options such as repository URL, output path, and other settings.\n\n2. **How does the `estimate` function calculate the cost estimate?**\n\n The `estimate` function performs a dry run of the `processRepository` command to get the estimated price for indexing the repository. It then uses the `totalIndexCostEstimate` function to calculate the total cost based on the returned run details.\n\n3. **What is the purpose of the `printModelDetails` function and how is it used in the `estimate` function?**\n\n The `printModelDetails` function is used to display the details of the models used in the estimation process. In the `estimate` function, it is called with the values of the `runDetails` object to print the model details before displaying the total cost estimate." + "filePath": "src\\cli\\commands\\estimate\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\estimate\\index.ts", + "summary": "The `estimate` function in this code is responsible for providing an estimated cost of processing a given repository using the Autodoc project. It takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the repository name, URL, root directory, output directory, and other settings related to the processing of the repository.\n\nThe function starts by constructing the path to the JSON output directory, which will be used to store the intermediate results of the processing. It then updates the spinner text to indicate that the cost estimation is in progress.\n\nNext, the `processRepository` function is called with the provided configuration options and a `true` flag to indicate that this is a dry run. This means that the repository will not actually be processed, but the function will return the details of what would happen if it were processed. This is used to calculate the estimated cost of processing the repository.\n\nOnce the dry run is complete, the spinner is updated to show success, and the results are printed using the `printModelDetails` function. The total estimated cost is then calculated using the `totalIndexCostEstimate` function, which takes the values of the `runDetails` object as input.\n\nFinally, the estimated cost is displayed in the console using the `chalk.redBright` function to format the text in a red color. The message also includes a disclaimer that the actual cost may vary and recommends setting a limit in the user's OpenAI account to prevent unexpected charges.\n\nHere's an example of how the `estimate` function might be used in the larger project:\n\n```javascript\nimport { estimate } from './path/to/this/file';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\nThis example would estimate the cost of processing the \"my-repo\" repository with the specified configuration options.", + "questions": "1. **What is the purpose of the `estimate` function?**\n\n The `estimate` function is used to perform a dry run of the `processRepository` command to get an estimated price for indexing the given repository. It then prints the model details and the total estimated cost.\n\n2. **What are the parameters passed to the `processRepository` function?**\n\n The `processRepository` function is called with an object containing the following properties: `name`, `repositoryUrl`, `root`, `output`, `llms`, `ignore`, `filePrompt`, `folderPrompt`, `chatPrompt`, `contentType`, `targetAudience`, and `linkHosted`. Additionally, a second argument `true` is passed to indicate that it's a dry run.\n\n3. **How is the total estimated cost calculated and displayed?**\n\n The total estimated cost is calculated using the `totalIndexCostEstimate` function, which takes an array of values from the `runDetails` object. The cost is then displayed using `console.log` with `chalk.redBright` for formatting, showing the cost with two decimal places and a note that the actual cost may vary.", + "checksum": "2b0b3903432ae423bbc597d04b052ecb" } ], "folders": [], - "summary": "The `estimate` function in `index.ts` is a crucial part of the Autodoc project, as it allows users to estimate the cost of indexing a given repository before actually processing it. This function takes an `AutodocRepoConfig` object as input, which contains various configuration options for processing the repository.\n\nThe main steps involved in the `estimate` function are:\n\n1. Setting the output path for the JSON files generated during the process.\n2. Updating the spinner text to display \"Estimating cost...\".\n3. Performing a dry run of the `processRepository` function with the given configuration options. The dry run does not actually process the repository but instead returns the details of the models that would be processed.\n4. Stopping the spinner once the dry run is complete.\n5. Printing the details of the models obtained from the dry run using the `printModelDetails` utility function.\n6. Calculating the total estimated cost using the `totalIndexCostEstimate` utility function.\n7. Displaying the estimated cost in a user-friendly format using the `chalk` library.\n\nHere's an example of how the `estimate` function might be used in the larger project:\n\n```javascript\nimport { estimate } from './autodoc/estimate';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output/',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\nThis example demonstrates how a user can call the `estimate` function with a specific configuration to get an estimated cost for processing their repository. The function is designed to work seamlessly with other parts of the Autodoc project, such as the `processRepository` function, which is responsible for the actual processing of the repository.\n\nBy providing an estimated cost upfront, the `estimate` function helps users make informed decisions about whether to proceed with the indexing process or not. This can be particularly useful for users with large repositories or those who are working within a budget. Overall, the `estimate` function is an essential tool for users looking to leverage the power of Autodoc while managing their costs effectively.", - "questions": "" + "summary": "The `estimate` function in `index.ts` is a crucial part of the Autodoc project, as it provides an estimated cost of processing a given repository. It takes an `AutodocRepoConfig` object as input, containing various configuration options such as repository name, URL, root directory, output directory, and other settings related to the processing of the repository.\n\nThe function begins by constructing the path to the JSON output directory, which stores intermediate results of the processing. It then updates the spinner text to indicate that cost estimation is in progress. The `processRepository` function is called with the provided configuration options and a `true` flag, signifying a dry run. This dry run returns the details of what would happen if the repository were processed, which is used to calculate the estimated cost.\n\nUpon completion of the dry run, the spinner is updated to show success, and the results are printed using the `printModelDetails` function. The total estimated cost is calculated using the `totalIndexCostEstimate` function, which takes the values of the `runDetails` object as input.\n\nFinally, the estimated cost is displayed in the console using the `chalk.redBright` function to format the text in red. The message also includes a disclaimer that the actual cost may vary and recommends setting a limit in the user's OpenAI account to prevent unexpected charges.\n\nHere's an example of how the `estimate` function might be used in the larger project:\n\n```javascript\nimport { estimate } from './path/to/this/file';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\nThis example would estimate the cost of processing the \"my-repo\" repository with the specified configuration options.", + "questions": "", + "checksum": "4b8fd2b2abaec4959873fc3396c414d8" }, { "folderName": "index", - "folderPath": ".autodoc/docs/json/src/cli/commands/index", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/index", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\commands\\index", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands\\index", "files": [ { "fileName": "convertJsonToMarkdown.ts", - "filePath": "src/cli/commands/index/convertJsonToMarkdown.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/index/convertJsonToMarkdown.ts", - "summary": "The `convertJsonToMarkdown` function in this code is responsible for converting JSON files containing documentation information into Markdown files. This is done in two main steps: counting the number of files in the project and creating Markdown files for each code file in the project.\n\nFirst, the function uses the `traverseFileSystem` utility to count the number of files in the project. It takes an `AutodocRepoConfig` object as input, which contains information about the project, such as its name, root directory, output directory, and other configuration options. The `traverseFileSystem` utility is called with a `processFile` function that increments the `files` counter for each file encountered.\n\n```javascript\nawait traverseFileSystem({\n inputPath: inputRoot,\n projectName,\n processFile: () => {\n files++;\n return Promise.resolve();\n },\n ignore: [],\n filePrompt,\n folderPrompt,\n contentType,\n targetAudience,\n linkHosted,\n});\n```\n\nNext, the function defines another `processFile` function that reads the content of each JSON file, converts it to a Markdown format, and writes the output to a new Markdown file in the specified output directory. It first checks if the content exists, and if not, it returns early. It then creates the output directory if it doesn't exist, and parses the JSON content into either a `FolderSummary` or a `FileSummary` object, depending on the file name.\n\nThe function then constructs the Markdown content by including a link to the code on GitHub, the summary, and any questions if they exist. Finally, it writes the Markdown content to the output file with the `.md` extension.\n\n```javascript\nconst outputPath = getFileName(markdownFilePath, '.', '.md');\nawait fs.writeFile(outputPath, markdown, 'utf-8');\n```\n\nThe `convertJsonToMarkdown` function is then called again with the new `processFile` function to create the Markdown files for each code file in the project.\n\n```javascript\nawait traverseFileSystem({\n inputPath: inputRoot,\n projectName,\n processFile,\n ignore: [],\n filePrompt,\n folderPrompt,\n contentType,\n targetAudience,\n linkHosted,\n});\n```\n\nIn summary, this code is responsible for converting JSON files containing documentation information into Markdown files, which can be used in the larger Autodoc project to generate documentation for code repositories.", - "questions": "1. **What is the purpose of the `convertJsonToMarkdown` function?**\n\n The `convertJsonToMarkdown` function is responsible for converting JSON files containing summaries and questions about code files in a project into Markdown files. It traverses the file system, reads the JSON files, and creates corresponding Markdown files with the provided information.\n\n2. **How does the `traverseFileSystem` function work and what are its parameters?**\n\n The `traverseFileSystem` function is a utility function that recursively traverses the file system starting from a given input path. It takes an object as a parameter with properties such as `inputPath`, `projectName`, `processFile`, `ignore`, `filePrompt`, `folderPrompt`, `contentType`, `targetAudience`, and `linkHosted`. The function processes each file using the provided `processFile` callback and can be configured to ignore certain files or folders.\n\n3. **What is the purpose of the `processFile` function inside `convertJsonToMarkdown`?**\n\n The `processFile` function is a callback function that is passed to the `traverseFileSystem` function. It is responsible for reading the content of a JSON file, parsing it, and creating a corresponding Markdown file with the summary and questions. It also handles creating the output directory if it doesn't exist and writing the Markdown content to the output file." + "filePath": "src\\cli\\commands\\index\\convertJsonToMarkdown.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\convertJsonToMarkdown.ts", + "summary": "The `convertJsonToMarkdown` function in this code is responsible for converting JSON files containing documentation information into Markdown files. This function is part of the larger Autodoc project, which aims to automate the process of generating documentation for code repositories.\n\nThe function takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the project name, input and output directories, and other settings related to the documentation generation process.\n\nThe code first counts the number of files in the project by traversing the file system using the `traverseFileSystem` utility function. This is done to provide a progress update to the user via the `updateSpinnerText` function.\n\nNext, the `processFile` function is defined, which is responsible for reading the content of each JSON file, parsing it, and converting it into a Markdown format. The function checks if the file has a summary, and if so, it generates the Markdown content with a link to the code on GitHub, the summary, and any questions if present. The output Markdown file is then saved in the specified output directory.\n\nFinally, the `traverseFileSystem` function is called again, this time with the `processFile` function as an argument. This allows the code to process each JSON file in the project and convert it into a Markdown file. Once the process is complete, a success message is displayed to the user using the `spinnerSuccess` function.\n\nExample usage:\n\n```javascript\nconvertJsonToMarkdown({\n name: \"myProject\",\n root: \"./input\",\n output: \"./output\",\n filePrompt: true,\n folderPrompt: true,\n contentType: \"code\",\n targetAudience: \"developers\",\n linkHosted: \"https://github.com/user/myProject\",\n});\n```\n\nThis will convert all JSON files in the `./input` directory into Markdown files and save them in the `./output` directory.", + "questions": "1. **Question:** What is the purpose of the `convertJsonToMarkdown` function and what are the expected inputs?\n **Answer:** The `convertJsonToMarkdown` function is used to convert JSON files to Markdown files for each code file in the project. It takes an `AutodocRepoConfig` object as input, which contains various properties like projectName, root, output, filePrompt, folderPrompt, contentType, targetAudience, and linkHosted.\n\n2. **Question:** How does the `traverseFileSystem` function work and what is its role in this code?\n **Answer:** The `traverseFileSystem` function is a utility function that recursively traverses the file system, starting from the inputPath, and processes each file using the provided `processFile` function. In this code, it is used twice: first to count the number of files in the project, and then to create Markdown files for each code file in the project.\n\n3. **Question:** How are the output directories and Markdown files created, and what is the structure of the generated Markdown content?\n **Answer:** The output directories are created using the `fs.mkdir` function with the `recursive: true` option. The Markdown files are created using the `fs.writeFile` function. The structure of the generated Markdown content includes a link to view the code on GitHub, the summary, and optionally, a list of questions if they exist.", + "checksum": "79c860becf47b9882441682f0213d534" }, { "fileName": "createVectorStore.ts", - "filePath": "src/cli/commands/index/createVectorStore.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/index/createVectorStore.ts", - "summary": "The code in this file is responsible for processing a directory of text files, splitting the text into chunks, and creating a vector store using the HNSWLib library and OpenAIEmbeddings.\n\nThe `processFile` function takes a file path as input and returns a Promise that resolves to a Document object. It reads the file contents and creates a Document object with the file contents as `pageContent` and the file path as metadata.\n\nThe `processDirectory` function takes a directory path as input and returns a Promise that resolves to an array of Document objects. It reads the files in the directory and calls `processFile` for each file. If a file is a directory, it calls `processDirectory` recursively. The function accumulates all the Document objects in an array and returns it.\n\nThe `RepoLoader` class extends the `BaseDocumentLoader` class and has a constructor that takes a file path as input. It has a `load` method that calls the `processDirectory` function with the file path and returns the resulting array of Document objects.\n\nThe `createVectorStore` function is an async function that takes an AutodocRepoConfig object as input, which contains the root directory and output file path. It creates a RepoLoader instance with the root directory, loads the raw documents, and splits them into chunks using the `RecursiveCharacterTextSplitter` class. It then creates a vector store using the HNSWLib library and OpenAIEmbeddings, and saves the vector store to the output file path.\n\nExample usage:\n\n```javascript\nconst config = {\n root: './data/documents',\n output: './data/vector_store',\n};\n\ncreateVectorStore(config).then(() => {\n console.log('Vector store created successfully');\n});\n```\n\nThis code snippet would process all the text files in the `./data/documents` directory, split the text into chunks, create a vector store using the HNSWLib library and OpenAIEmbeddings, and save the vector store to the `./data/vector_store` file.", - "questions": "1. **Question:** What is the purpose of the `processFile` function and how does it handle errors?\n **Answer:** The `processFile` function reads the content of a file and creates a `Document` object with the file contents and metadata. If there is an error while reading the file, it rejects the promise with the error.\n\n2. **Question:** How does the `processDirectory` function handle nested directories and files?\n **Answer:** The `processDirectory` function iterates through the files in a directory. If it encounters a subdirectory, it calls itself recursively to process the subdirectory. If it encounters a file, it processes the file using the `processFile` function and adds the resulting `Document` object to the `docs` array.\n\n3. **Question:** What is the purpose of the `createVectorStore` function and how does it use the `RepoLoader` class?\n **Answer:** The `createVectorStore` function is responsible for creating a vector store from a given repository. It uses the `RepoLoader` class to load all the documents from the repository, splits the text into chunks using the `RecursiveCharacterTextSplitter`, and then creates a vector store using the `HNSWLib.fromDocuments` method with the `OpenAIEmbeddings`. Finally, it saves the vector store to the specified output path." + "filePath": "src\\cli\\commands\\index\\createVectorStore.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\createVectorStore.ts", + "summary": "The code in this file is responsible for processing a directory of text files, splitting the text into chunks, and creating a vector store using the HNSWLib library and OpenAIEmbeddings. This vector store can be used for efficient similarity search and retrieval of documents in the larger project.\n\nThe `processFile` function reads a file's content and creates a `Document` object with the content and metadata (source file path). It returns a Promise that resolves to the created Document.\n\nThe `processDirectory` function is a recursive function that processes a directory and its subdirectories. It reads the files in the directory, and for each file, it checks if it's a directory or a regular file. If it's a directory, the function calls itself with the new directory path. If it's a file, it calls the `processFile` function to create a Document object. The function returns an array of Document objects.\n\nThe `RepoLoader` class extends the `BaseDocumentLoader` class and has a constructor that takes a file path as an argument. It has a `load` method that calls the `processDirectory` function with the given file path and returns the array of Document objects.\n\nThe `createVectorStore` function is an async function that takes an `AutodocRepoConfig` object as an argument, which contains the root directory and output file path. It creates a `RepoLoader` instance with the root directory and loads the documents using the `load` method. It then creates a `RecursiveCharacterTextSplitter` instance with a specified chunk size and chunk overlap and splits the documents into chunks. Finally, it creates a vector store using the HNSWLib library and OpenAIEmbeddings with the processed documents and saves the vector store to the output file path.\n\nExample usage:\n\n```javascript\nconst config = {\n root: './data/documents',\n output: './data/vector_store',\n};\n\ncreateVectorStore(config).then(() => {\n console.log('Vector store created successfully');\n});\n```", + "questions": "1. **Question:** What is the purpose of the `processFile` function and what does it return?\n **Answer:** The `processFile` function is an asynchronous function that reads the content of a file given its file path, creates a `Document` object with the file contents and metadata (source file path), and returns a Promise that resolves to the created `Document` object.\n\n2. **Question:** How does the `processDirectory` function work and what does it return?\n **Answer:** The `processDirectory` function is an asynchronous function that takes a directory path as input, reads all the files and subdirectories within it, and processes them recursively. It returns a Promise that resolves to an array of `Document` objects created from the files in the directory and its subdirectories.\n\n3. **Question:** What is the purpose of the `createVectorStore` function and how does it work?\n **Answer:** The `createVectorStore` function is an asynchronous function that takes an `AutodocRepoConfig` object as input, which contains the root directory path and output file path. The function loads all the documents from the root directory using the `RepoLoader`, splits the text into chunks using the `RecursiveCharacterTextSplitter`, creates a vector store from the documents using the `HNSWLib` and `OpenAIEmbeddings`, and saves the vector store to the specified output file.", + "checksum": "a3409c4340753a867c72eebef7626fb9" }, { "fileName": "index.ts", - "filePath": "src/cli/commands/index/index.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/index/index.ts", - "summary": "The code in this file is responsible for processing a given repository and generating documentation in JSON and Markdown formats, as well as creating vector files for the documentation. It exports a single function `index` that takes an `AutodocRepoConfig` object as input, which contains various configuration options for processing the repository.\n\nThe `index` function performs the following steps:\n\n1. Define the paths for JSON, Markdown, and data output directories within the `output` folder.\n\n2. Process the repository by traversing its files, calling the LLMS (Language Learning Management System) for each file, and creating JSON files with the results. This is done using the `processRepository` function, which takes the same configuration options as the `index` function. The spinner text is updated to show the progress of this step.\n\n3. Convert the generated JSON files into Markdown format using the `convertJsonToMarkdown` function. This function also takes the same configuration options as the `index` function. The spinner text is updated to show the progress of this step, and a success message is displayed upon completion.\n\n4. Create vector files for the generated Markdown documentation using the `createVectorStore` function. This function also takes the same configuration options as the `index` function. The spinner text is updated to show the progress of this step, and a success message is displayed upon completion.\n\nHere's an example of how this code might be used in the larger project:\n\n```javascript\nimport autodoc from './autodoc';\n\nconst config = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n root: './src',\n output: './output',\n llms: 'https://llms.example.com',\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'text',\n targetAudience: 'developers',\n linkHosted: 'https://myproject-docs.example.com',\n};\n\nautodoc.index(config);\n```\n\nThis example would process the `MyProject` repository, generate JSON and Markdown documentation, and create vector files for the documentation, all while providing progress updates through spinner text.", - "questions": "1. **What is the purpose of the `index` function in this code?**\n\n The `index` function is the main entry point for the autodoc project. It processes a given repository, converts the JSON files to markdown, and creates vector files based on the provided configuration options.\n\n2. **What are the different steps involved in processing the repository?**\n\n The processing of the repository involves three main steps: (1) traversing the repository and calling LLMS for each file to create JSON files with the results, (2) converting the JSON files to markdown files, and (3) creating vector files from the markdown files.\n\n3. **What is the role of the `AutodocRepoConfig` type?**\n\n The `AutodocRepoConfig` type is used to define the shape of the configuration object that is passed to the `index` function. It specifies the properties and their types that are required for the function to process the repository, convert JSON to markdown, and create vector files." + "filePath": "src\\cli\\commands\\index\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\index.ts", + "summary": "The code in this file is responsible for processing a given repository and generating documentation in JSON, Markdown, and vector formats. It exports a single function `index` that takes an `AutodocRepoConfig` object as its argument, which contains various configuration options for processing the repository.\n\nThe `index` function performs three main tasks:\n\n1. **Process the repository**: It traverses the repository, calls the LLMS (Language Learning Management System) for each file, and creates JSON files with the results. This is done using the `processRepository` function, which takes the same configuration options as the `index` function. The JSON files are stored in the `output/docs/json/` directory.\n\n ```javascript\n updateSpinnerText('Processing repository...');\n await processRepository({ /* configuration options */ });\n spinnerSuccess();\n ```\n\n2. **Create Markdown files**: It converts the generated JSON files into Markdown files using the `convertJsonToMarkdown` function. This function also takes the same configuration options as the `index` function. The Markdown files are stored in the `output/docs/markdown/` directory.\n\n ```javascript\n updateSpinnerText('Creating markdown files...');\n await convertJsonToMarkdown({ /* configuration options */ });\n spinnerSuccess();\n ```\n\n3. **Create vector files**: It creates vector files from the generated Markdown files using the `createVectorStore` function. This function also takes the same configuration options as the `index` function. The vector files are stored in the `output/docs/data/` directory.\n\n ```javascript\n updateSpinnerText('Create vector files...');\n await createVectorStore({ /* configuration options */ });\n spinnerSuccess();\n ```\n\nThroughout the execution of these tasks, the code uses `updateSpinnerText` and `spinnerSuccess` functions to provide visual feedback on the progress of the tasks.\n\nIn the larger project, this code would be used to automatically generate documentation for a given repository based on the provided configuration options. The generated documentation can then be used for various purposes, such as displaying it on a website or analyzing the content for specific insights.", + "questions": "1. **What does the `index` function do in this code?**\n\n The `index` function is the main entry point for the autodoc project. It takes an `AutodocRepoConfig` object as input and performs three main tasks: processing the repository and creating JSON files, converting JSON files to markdown files, and creating vector files.\n\n2. **What is the purpose of the `processRepository`, `convertJsonToMarkdown`, and `createVectorStore` functions?**\n\n The `processRepository` function traverses the repository, calls LLMS for each file, and creates JSON files with the results. The `convertJsonToMarkdown` function creates markdown files from the generated JSON files. The `createVectorStore` function creates vector files from the markdown files.\n\n3. **What are the different types of prompts (`filePrompt`, `folderPrompt`, `chatPrompt`) used for in this code?**\n\n These prompts are likely used to interact with the user during the processing of the repository. The `filePrompt` might be used to ask the user for input regarding specific files, the `folderPrompt` for input regarding folders, and the `chatPrompt` for general input or feedback during the processing.", + "checksum": "4060b1affae5a6c385cda308b3cd1750" }, { "fileName": "processRepository.ts", - "filePath": "src/cli/commands/index/processRepository.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/index/processRepository.ts", - "summary": "The `processRepository` function in this code is responsible for processing a given code repository and generating summaries and questions for each file and folder within the repository. It takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the repository URL, input and output paths, language models to use, and other settings.\n\nThe function starts by initializing an `APIRateLimit` instance to limit the number of API calls made to the language models. It then defines several helper functions, such as `callLLM` for making API calls, `isModel` for checking if a given model is valid, `processFile` for processing individual files, and `processFolder` for processing folders.\n\nThe `processFile` function reads the content of a file, generates prompts for summaries and questions using the `createCodeFileSummary` and `createCodeQuestions` functions, and selects the best language model to use based on the token length of the prompts. It then calls the language model API to generate the summaries and questions, and saves the results as JSON files in the output directory.\n\nThe `processFolder` function reads the contents of a folder, filters out ignored files, and processes each file and subfolder within the folder. It then generates a summary prompt using the `folderSummaryPrompt` function and calls the language model API to generate a summary for the folder. The folder summary, along with the summaries and questions of its files and subfolders, is saved as a JSON file in the output directory.\n\nThe main part of the `processRepository` function first counts the number of files and folders in the input directory using the `filesAndFolders` function. It then processes each file and folder using the `traverseFileSystem` function, which calls the `processFile` and `processFolder` functions for each file and folder encountered. Finally, the function returns the language models used during processing.\n\nExample usage of the `processRepository` function:\n\n```javascript\nconst autodocConfig = {\n name: 'myProject',\n repositoryUrl: 'https://github.com/user/myProject',\n root: 'src',\n output: 'output',\n llms: [LLMModels.GPT3, LLMModels.GPT4],\n ignore: ['.git', 'node_modules'],\n filePrompt: 'Explain this code file',\n folderPrompt: 'Summarize this folder',\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nprocessRepository(autodocConfig).then((models) => {\n console.log('Processing complete');\n});\n```\n\nThis code would process the `src` directory of the `myProject` repository, generating summaries and questions for each file and folder, and saving the results in the `output` directory.", - "questions": "1. **Question:** What is the purpose of the `processRepository` function and what are its input parameters?\n **Answer:** The `processRepository` function is responsible for processing a code repository by generating summaries and questions for each file and folder in the project. It takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the project name, repository URL, input and output paths, language models, and other settings. Additionally, it accepts an optional `dryRun` parameter, which, if set to true, will not save the generated summaries and questions to disk.\n\n2. **Question:** How does the code determine the best language model to use for generating summaries and questions?\n **Answer:** The code checks the maximum token length of each available language model (GPT3, GPT4, and GPT432k) and compares it with the token length of the prompts (summary and questions). It selects the first model that can handle the maximum token length and is included in the `llms` array provided in the configuration.\n\n3. **Question:** How does the code handle traversing the file system and processing files and folders?\n **Answer:** The code uses the `traverseFileSystem` utility function to traverse the file system. It takes an object with various configuration options, including the input path, project name, and callbacks for processing files and folders. The `processFile` and `processFolder` functions are passed as callbacks to handle the processing of files and folders, respectively." + "filePath": "src\\cli\\commands\\index\\processRepository.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\processRepository.ts", + "summary": "The `processRepository` function in this code is responsible for generating summaries and questions for code files and folders in a given repository. It takes an `AutodocRepoConfig` object as input, which contains information about the project, repository URL, input and output paths, language models, and other configurations. An optional `dryRun` parameter can be provided to skip actual API calls and file writing.\n\nThe function starts by initializing the encoding and rate limit for API calls. It then defines two main helper functions: `processFile` and `processFolder`. The `processFile` function is responsible for processing individual code files. It reads the file content, calculates a checksum, and checks if reindexing is needed. If reindexing is required, it creates prompts for summaries and questions, selects the appropriate language model based on the input length, and calls the language model API to generate the summaries and questions. The results are then saved to a JSON file in the output directory.\n\nThe `processFolder` function is responsible for processing folders. It reads the folder content, calculates a checksum, and checks if reindexing is needed. If reindexing is required, it reads the summaries and questions of all files and subfolders in the folder, calls the language model API to generate a summary for the folder, and saves the result to a `summary.json` file in the folder.\n\nThe main function then counts the number of files and folders in the project and processes them using the `traverseFileSystem` utility function. It processes all files first, followed by all folders. Finally, it returns the language model usage statistics.\n\nThe `calculateChecksum` function calculates the checksum of a list of file contents, while the `reindexCheck` function checks if reindexing is needed by comparing the new and old checksums of a file or folder.", + "questions": "1. **Question:** What is the purpose of the `processRepository` function and what are its inputs and outputs?\n **Answer:** The `processRepository` function processes a given code repository, generating summaries and questions for each file and folder within the repository. It takes an `AutodocRepoConfig` object and an optional `dryRun` boolean as inputs. The function returns a `Promise` that resolves to an object containing the models used during processing.\n\n2. **Question:** How does the `calculateChecksum` function work and what is its purpose?\n **Answer:** The `calculateChecksum` function takes an array of file contents as input and calculates a checksum for each file using the MD5 hashing algorithm. It then concatenates all the checksums and calculates a final checksum using MD5 again. The purpose of this function is to generate a unique identifier for the contents of the files, which can be used to determine if the files have changed and need to be reprocessed.\n\n3. **Question:** How does the `reindexCheck` function work and when is it used?\n **Answer:** The `reindexCheck` function checks if a summary.json file exists in the given file or folder path and compares the stored checksum with the new checksum to determine if the file or folder needs to be reindexed. It is used in the `processFile` and `processFolder` functions to decide whether to regenerate summaries and questions for a file or folder based on changes in their contents.", + "checksum": "5b3ae9ffad1d4b4a22c6f7fd66bbde6f" }, { "fileName": "prompts.ts", - "filePath": "src/cli/commands/index/prompts.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/index/prompts.ts", - "summary": "The code in this file provides three functions that generate prompts for documentation experts to create summaries and answer questions about code files and folders in a project. These functions are likely used in the larger autodoc project to automate the process of generating documentation for code files and folders.\n\n1. `createCodeFileSummary`: This function takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `filePrompt`. It returns a formatted string prompt for a documentation expert to write a summary of the code file. The prompt includes the file path, project name, content type, and a custom file prompt. For example:\n\n```javascript\ncreateCodeFileSummary('src/example.js', 'autodoc', 'console.log(\"Hello, World!\");', 'JavaScript', 'Write a detailed technical explanation of what this code does.');\n```\n\n2. `createCodeQuestions`: This function takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `targetAudience`. It returns a formatted string prompt for a documentation expert to generate three questions and answers that a target audience might have about the code file. The prompt includes the file path, project name, content type, and target audience. For example:\n\n```javascript\ncreateCodeQuestions('src/example.js', 'autodoc', 'console.log(\"Hello, World!\");', 'JavaScript', 'beginner');\n```\n\n3. `folderSummaryPrompt`: This function takes six parameters: `folderPath`, `projectName`, `files`, `folders`, `contentType`, and `folderPrompt`. It returns a formatted string prompt for a documentation expert to write a summary of the folder and its contents. The prompt includes the folder path, project name, content type, a list of files and their summaries, a list of subfolders and their summaries, and a custom folder prompt. For example:\n\n```javascript\nfolderSummaryPrompt('src/', 'autodoc', [{fileName: 'example.js', summary: 'A simple example file'}], [{folderName: 'utils', summary: 'Utility functions'}], 'JavaScript', 'Write a detailed technical explanation of the folder structure and contents.');\n```\n\nThese functions can be used in the autodoc project to generate prompts for documentation experts, helping to streamline the process of creating documentation for code files and folders.", - "questions": "1. **Question:** What is the purpose of the `createCodeFileSummary` function?\n **Answer:** The `createCodeFileSummary` function generates a string template for a code file summary prompt, which includes the file path, project name, file contents, content type, and a file prompt.\n\n2. **Question:** How does the `createCodeQuestions` function differ from the `createCodeFileSummary` function?\n **Answer:** The `createCodeQuestions` function generates a string template for a code documentation prompt that asks for 3 questions and their answers, while the `createCodeFileSummary` function generates a string template for a code file summary prompt.\n\n3. **Question:** What is the purpose of the `folderSummaryPrompt` function and what parameters does it take?\n **Answer:** The `folderSummaryPrompt` function generates a string template for a folder summary prompt, which includes the folder path, project name, files, folders, content type, and a folder prompt. It takes parameters such as folderPath, projectName, files, folders, contentType, and folderPrompt." + "filePath": "src\\cli\\commands\\index\\prompts.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\prompts.ts", + "summary": "This code defines three utility functions that generate prompts for documentation experts working on a project. These functions are used to create documentation for code files and folders within a project. The generated prompts are in markdown format and include specific instructions for the documentation expert.\n\n1. `createCodeFileSummary`: This function generates a prompt for creating a summary of a code file. It takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `filePrompt`. The function returns a markdown formatted string that includes the file's content and a custom prompt for the documentation expert.\n\nExample usage:\n```javascript\nconst prompt = createCodeFileSummary('path/to/file.js', 'MyProject', 'const x = 10;', 'JavaScript', 'Write a detailed technical explanation of this code.');\n```\n\n2. `createCodeQuestions`: This function generates a prompt for creating a list of questions and answers about a code file. It takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `targetAudience`. The function returns a markdown formatted string that includes the file's content and a custom prompt for the documentation expert to provide questions and answers.\n\nExample usage:\n```javascript\nconst prompt = createCodeQuestions('path/to/file.js', 'MyProject', 'const x = 10;', 'JavaScript', 'beginner');\n```\n\n3. `folderSummaryPrompt`: This function generates a prompt for creating a summary of a folder containing code files and subfolders. It takes six parameters: `folderPath`, `projectName`, `files`, `folders`, `contentType`, and `folderPrompt`. The `files` parameter is an array of `FileSummary` objects, and the `folders` parameter is an array of `FolderSummary` objects. The function returns a markdown formatted string that includes a list of files and folders with their summaries and a custom prompt for the documentation expert.\n\nExample usage:\n```javascript\nconst prompt = folderSummaryPrompt('path/to/folder', 'MyProject', fileSummaries, folderSummaries, 'JavaScript', 'Write a detailed technical explanation of this folder structure.');\n```\n\nThese functions can be used in the larger project to generate documentation tasks for experts, ensuring consistent formatting and instructions across different parts of the project.", + "questions": "1. **What is the purpose of the `createCodeFileSummary` function?**\n\n The `createCodeFileSummary` function generates a string template for a code file summary prompt, which includes the file path, project name, file contents, content type, and a file prompt.\n\n2. **How does the `createCodeQuestions` function differ from the `createCodeFileSummary` function?**\n\n The `createCodeQuestions` function generates a string template for a code documentation prompt that asks for 3 questions and their answers, while the `createCodeFileSummary` function generates a string template for a code file summary prompt.\n\n3. **What is the role of the `folderSummaryPrompt` function?**\n\n The `folderSummaryPrompt` function generates a string template for a folder summary prompt, which includes the folder path, project name, lists of files and folders with their summaries, content type, and a folder prompt.", + "checksum": "e44b82bf4912be69149685a997b6bde3" } ], "folders": [], - "summary": "The code in this folder is responsible for processing a given code repository, generating documentation in JSON and Markdown formats, and creating vector files for the documentation. It provides several functions and utilities to achieve these tasks, such as traversing the file system, calling language models, and converting JSON files to Markdown.\n\nFor example, the `processRepository` function processes a code repository and generates summaries and questions for each file and folder within the repository. It uses helper functions like `callLLM` to make API calls to language models and `processFile` and `processFolder` to process individual files and folders. The results are saved as JSON files in the output directory.\n\nThe `convertJsonToMarkdown` function converts JSON files containing documentation information into Markdown files. It counts the number of files in the project and creates Markdown files for each code file in the project using the `traverseFileSystem` utility.\n\nThe `createVectorStore` function processes a directory of text files, splits the text into chunks, and creates a vector store using the HNSWLib library and OpenAIEmbeddings. It processes the files in the directory and calls `processFile` for each file, creating a vector store and saving it to the output file path.\n\nHere's an example of how this code might be used in the larger project:\n\n```javascript\nimport autodoc from './autodoc';\n\nconst config = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n root: './src',\n output: './output',\n llms: 'https://llms.example.com',\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'text',\n targetAudience: 'developers',\n linkHosted: 'https://myproject-docs.example.com',\n};\n\nautodoc.index(config);\n```\n\nThis example would process the `MyProject` repository, generate JSON and Markdown documentation, and create vector files for the documentation, all while providing progress updates through spinner text.\n\nIn summary, the code in this folder plays a crucial role in the Autodoc project by processing code repositories, generating documentation in various formats, and creating vector files for the documentation. This helps developers to easily generate and maintain documentation for their projects, making it more accessible and understandable for other developers and users.", - "questions": "" + "summary": "The code in this folder is responsible for processing a given repository and generating documentation in JSON, Markdown, and vector formats. It consists of several functions and utilities that work together to automate the documentation generation process.\n\nThe main function, `index`, takes an `AutodocRepoConfig` object as input, which contains various configuration options for processing the repository. It performs three main tasks:\n\n1. **Process the repository**: It calls the `processRepository` function to traverse the repository, generate summaries and questions for code files and folders using the LLMS (Language Learning Management System), and create JSON files with the results. These JSON files are stored in the `output/docs/json/` directory.\n\n2. **Create Markdown files**: It uses the `convertJsonToMarkdown` function to convert the generated JSON files into Markdown files. These Markdown files are stored in the `output/docs/markdown/` directory.\n\n3. **Create vector files**: It calls the `createVectorStore` function to create vector files from the generated Markdown files. These vector files are stored in the `output/docs/data/` directory.\n\nThroughout the execution of these tasks, the code provides visual feedback on the progress of the tasks using `updateSpinnerText` and `spinnerSuccess` functions.\n\nHere's an example of how this code might be used:\n\n```javascript\nindex({\n name: \"myProject\",\n root: \"./input\",\n output: \"./output\",\n filePrompt: true,\n folderPrompt: true,\n contentType: \"code\",\n targetAudience: \"developers\",\n linkHosted: \"https://github.com/user/myProject\",\n});\n```\n\nThis will process the repository located at `./input`, generate documentation in JSON, Markdown, and vector formats, and save the results in the `./output` directory.\n\nThe `prompts.ts` file contains utility functions that generate prompts for documentation experts. These functions create markdown formatted strings with specific instructions for the documentation expert, ensuring consistent formatting and instructions across different parts of the project.\n\nIn summary, the code in this folder automates the process of generating documentation for a given repository based on the provided configuration options. The generated documentation can be used for various purposes, such as displaying it on a website or analyzing the content for specific insights.", + "questions": "", + "checksum": "376f96417f8cbea6a5ab2463268fe4af" }, { "folderName": "init", - "folderPath": ".autodoc/docs/json/src/cli/commands/init", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/init", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\commands\\init", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands\\init", "files": [ { "fileName": "index.ts", - "filePath": "src/cli/commands/init/index.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/init/index.ts", - "summary": "This code is responsible for initializing and configuring the `autodoc` project. It provides a function `init` that creates a configuration file `autodoc.config.json` with user inputs and default values. The configuration file is essential for the project to function correctly and adapt to different user requirements.\n\nThe `makeConfigTemplate` function generates a default configuration object with pre-defined values. It takes an optional `config` parameter to override the default values. The returned object contains settings such as repository name, URL, output directory, LLM models, and various prompts for generating documentation.\n\nThe `init` function is an asynchronous function that takes an optional `config` parameter. It first checks if a configuration file already exists in the project directory. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf there is no existing configuration file or the user chooses to overwrite, the function prompts the user for the repository name, URL, and LLM models they have access to. These values are then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration object is written to the `autodoc.config.json` file in the project directory. A success message is displayed, instructing the user to run `doc index` to get started.\n\nHere's an example of how the `init` function is used:\n\n```javascript\nimport { init } from './autodoc';\n\n(async () => {\n await init();\n})();\n```\n\nThis code imports the `init` function and calls it, initializing the `autodoc` project with the user's inputs and default values.", - "questions": "1. **Question:** What is the purpose of the `makeConfigTemplate` function and what does it return?\n **Answer:** The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc project. It takes an optional `config` parameter of type `AutodocRepoConfig` and returns a new `AutodocRepoConfig` object with default values for each property, using the provided `config` values if available.\n\n2. **Question:** How does the `init` function work and what does it do with the user's input?\n **Answer:** The `init` function is an asynchronous function that initializes the Autodoc configuration by prompting the user for input using the `inquirer` package. It takes an optional `config` parameter of type `AutodocRepoConfig` and uses it as the default values for the prompts. After collecting the user's input, it creates a new configuration object using the `makeConfigTemplate` function and writes it to a file named `autodoc.config.json`.\n\n3. **Question:** What are the different LLM models available in the `llms` prompt and how are they used in the configuration?\n **Answer:** The `llms` prompt provides three choices for the user to select the LLM models they have access to: GPT-3.5 Turbo, GPT-3.5 Turbo and GPT-4 8K (Early Access), and GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The selected LLM models are stored in the `llms` property of the `AutodocRepoConfig` object, which can be used later in the project to determine which models to use for generating documentation." + "filePath": "src\\cli\\commands\\init\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\init\\index.ts", + "summary": "This code is responsible for initializing the configuration of the Autodoc project. It provides a template for the configuration and prompts the user to input necessary information to set up the project. The main functionality is exposed through the `init` function, which is an asynchronous function that takes an optional `AutodocRepoConfig` object as an argument.\n\nThe `makeConfigTemplate` function creates a default configuration object with pre-defined values for various properties. It takes an optional `config` parameter and returns a new `AutodocRepoConfig` object with the provided values or default values if not provided.\n\nThe `init` function first checks if an `autodoc.config.json` file already exists in the project root. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nNext, the user is prompted to enter the name of their repository, the GitHub URL of their repository, and the LLMs they have access to. The LLMs are language models used for generating documentation. The user can choose between GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access).\n\nAfter the user provides the necessary information, a new configuration object is created using the `makeConfigTemplate` function with the user's input. The new configuration is then written to the `autodoc.config.json` file in the project root.\n\nFinally, a success message is displayed, instructing the user to run `doc index` to get started with the Autodoc project.\n\nExample usage:\n\n```javascript\nimport { init } from './path/to/this/file';\n\n// Initialize the configuration with default values\nawait init();\n\n// Initialize the configuration with custom values\nawait init({\n name: 'My Custom Repository',\n repositoryUrl: 'https://github.com/user/repo',\n});\n```", + "questions": "1. **What is the purpose of the `makeConfigTemplate` function?**\n\n The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc project. It takes an optional `config` parameter of type `AutodocRepoConfig` and returns a new configuration object with default values for various properties.\n\n2. **How does the `init` function work and when is it called?**\n\n The `init` function is an asynchronous function that initializes the Autodoc configuration by creating an `autodoc.config.json` file in the specified location. It takes an optional `config` parameter of type `AutodocRepoConfig` and prompts the user for input to set the configuration values. It is called when the user wants to set up the Autodoc configuration for their project.\n\n3. **What is the purpose of the `inquirer.prompt` calls in the `init` function?**\n\n The `inquirer.prompt` calls are used to interactively prompt the user for input to set the configuration values for the Autodoc project. The user is asked for the repository name, repository URL, and the LLMs they have access to. The input is then used to create a new configuration object and write it to the `autodoc.config.json` file.", + "checksum": "b93831ff1f4023ab61c3bea963a8a112" } ], "folders": [], - "summary": "The `index.ts` file in the `init` folder is responsible for initializing and configuring the `autodoc` project. It provides an essential function called `init` that creates a configuration file named `autodoc.config.json` with user inputs and default values. This configuration file is crucial for the project to function correctly and adapt to different user requirements.\n\nThe `makeConfigTemplate` function generates a default configuration object with pre-defined values. It takes an optional `config` parameter to override the default values. The returned object contains settings such as repository name, URL, output directory, LLM models, and various prompts for generating documentation.\n\nThe `init` function is an asynchronous function that takes an optional `config` parameter. It first checks if a configuration file already exists in the project directory. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf there is no existing configuration file or the user chooses to overwrite, the function prompts the user for the repository name, URL, and LLM models they have access to. These values are then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration object is written to the `autodoc.config.json` file in the project directory. A success message is displayed, instructing the user to run `doc index` to get started.\n\nHere's an example of how the `init` function is used:\n\n```javascript\nimport { init } from './autodoc';\n\n(async () => {\n await init();\n})();\n```\n\nThis code imports the `init` function and calls it, initializing the `autodoc` project with the user's inputs and default values. The `init` function is a crucial part of the project, as it sets up the necessary configuration for the project to work correctly. It interacts with other parts of the project by providing the required settings and values, ensuring that the project can adapt to different user requirements and preferences.", - "questions": "" + "summary": "The `index.ts` file in the `.autodoc\\docs\\json\\src\\cli\\commands\\init` folder is responsible for initializing the configuration of the Autodoc project. It provides a template for the configuration and prompts the user to input necessary information to set up the project. The main functionality is exposed through the `init` function, which is an asynchronous function that takes an optional `AutodocRepoConfig` object as an argument.\n\nThe `makeConfigTemplate` function creates a default configuration object with pre-defined values for various properties. It takes an optional `config` parameter and returns a new `AutodocRepoConfig` object with the provided values or default values if not provided.\n\nThe `init` function first checks if an `autodoc.config.json` file already exists in the project root. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nNext, the user is prompted to enter the name of their repository, the GitHub URL of their repository, and the LLMs they have access to. The LLMs are language models used for generating documentation. The user can choose between GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access).\n\nAfter the user provides the necessary information, a new configuration object is created using the `makeConfigTemplate` function with the user's input. The new configuration is then written to the `autodoc.config.json` file in the project root.\n\nFinally, a success message is displayed, instructing the user to run `doc index` to get started with the Autodoc project.\n\nExample usage:\n\n```javascript\nimport { init } from './path/to/this/file';\n\n// Initialize the configuration with default values\nawait init();\n\n// Initialize the configuration with custom values\nawait init({\n name: 'My Custom Repository',\n repositoryUrl: 'https://github.com/user/repo',\n});\n```\n\nThis code is essential for setting up the Autodoc project, as it creates the necessary configuration file and gathers user input to customize the project. It works in conjunction with other parts of the project, such as the CLI and the documentation generation process, which rely on the configuration file to function correctly.", + "questions": "", + "checksum": "4b8fd2b2abaec4959873fc3396c414d8" }, { "folderName": "query", - "folderPath": ".autodoc/docs/json/src/cli/commands/query", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/query", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\commands\\query", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands\\query", "files": [ { "fileName": "createChatChain.ts", - "filePath": "src/cli/commands/query/createChatChain.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/query/createChatChain.ts", - "summary": "This code defines a function `makeChain` that creates a chatbot for answering questions about a software project. The chatbot is built using the `ChatVectorDBQAChain` class, which combines two separate language models: a question generator and a document chain.\n\nThe question generator is an instance of the `LLMChain` class, which uses the OpenAIChat API to generate standalone questions based on a given conversation history. The `CONDENSE_PROMPT` template is used to format the input for the language model.\n\nThe document chain is created using the `loadQAChain` function, which takes an instance of the OpenAIChat API and a prompt template as input. The `makeQAPrompt` function generates this template, which instructs the language model to provide a conversational answer with hyperlinks to the project's GitHub repository. The answer should be tailored to the target audience and include code examples when appropriate.\n\nThe `makeChain` function takes the following parameters:\n\n- `projectName`: The name of the software project.\n- `repositoryUrl`: The URL of the project's GitHub repository.\n- `contentType`: The type of content the chatbot is trained on (e.g., code, documentation).\n- `chatPrompt`: Additional instructions for answering questions about the content.\n- `targetAudience`: The intended audience for the chatbot's answers (e.g., developers, users).\n- `vectorstore`: An instance of the `HNSWLib` class for storing and searching vectors.\n- `llms`: An array of language models (e.g., GPT-3, GPT-4).\n- `onTokenStream`: An optional callback function to handle streaming tokens.\n\nExample usage:\n\n```javascript\nconst chatbot = makeChain(\n \"autodoc\",\n \"https://github.com/autodoc/autodoc\",\n \"code\",\n \"\",\n \"developer\",\n vectorstore,\n [gpt3, gpt4],\n (token) => console.log(token)\n);\n```\n\nThis creates a chatbot that can answer questions about the \"autodoc\" project, using the provided language models and vector store.", - "questions": "1. **Question:** What is the purpose of the `makeChain` function and what are its input parameters?\n **Answer:** The `makeChain` function is used to create a new `ChatVectorDBQAChain` instance, which is responsible for generating questions and answers based on the given input parameters. The input parameters include `projectName`, `repositoryUrl`, `contentType`, `chatPrompt`, `targetAudience`, `vectorstore`, `llms`, and an optional `onTokenStream` callback function.\n\n2. **Question:** What are the roles of `CONDENSE_PROMPT` and `QA_PROMPT` in the code?\n **Answer:** `CONDENSE_PROMPT` is a template for generating a standalone question from a given chat history and follow-up input. `QA_PROMPT` is a template for generating a conversational answer with hyperlinks back to GitHub, based on the given context and question. Both templates are used in the `LLMChain` and `loadQAChain` instances, respectively.\n\n3. **Question:** How does the `onTokenStream` callback function work and when is it used?\n **Answer:** The `onTokenStream` callback function is an optional parameter in the `makeChain` function. It is used to handle the streaming of tokens generated by the OpenAIChat instance. If provided, it will be called with each new token generated during the chat process, allowing developers to handle or process the tokens in real-time." + "filePath": "src\\cli\\commands\\query\\createChatChain.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\query\\createChatChain.ts", + "summary": "This code defines a function `makeChain` that creates a chatbot for answering questions about a software project called `projectName`. The chatbot is trained on the content of the project, which is located at `repositoryUrl`. The content type of the project is specified by the `contentType` parameter. The chatbot is designed to provide conversational answers with hyperlinks back to GitHub, including code examples and links to the examples where appropriate. The target audience for the chatbot is specified by the `targetAudience` parameter.\n\nThe `makeChain` function takes several parameters:\n\n- `projectName`: The name of the software project.\n- `repositoryUrl`: The URL of the project's repository.\n- `contentType`: The type of content the chatbot is trained on.\n- `chatPrompt`: Additional instructions for answering questions about the content type.\n- `targetAudience`: The intended audience for the chatbot's answers.\n- `vectorstore`: An instance of HNSWLib for efficient nearest neighbor search.\n- `llms`: An array of LLMModels, which are language models used for generating answers.\n- `onTokenStream`: An optional callback function that is called when a new token is generated by the language model.\n\nThe `makeChain` function first creates a question generator using the `LLMChain` class. This generator is responsible for rephrasing follow-up questions to be standalone questions. It uses the `CONDENSE_PROMPT` template, which is defined at the beginning of the code.\n\nNext, the function creates a `QA_PROMPT` template using the `makeQAPrompt` function. This template is used to generate answers to the questions in a conversational manner, with hyperlinks back to GitHub and code examples where appropriate.\n\nFinally, the function creates and returns a new instance of the `ChatVectorDBQAChain` class, which combines the question generator and the document chain to create a chatbot that can answer questions about the software project. The chatbot uses the `vectorstore` for efficient nearest neighbor search and the `llms` language models for generating answers. If the `onTokenStream` callback is provided, it will be called when a new token is generated by the language model.", + "questions": "1. **Question:** What is the purpose of the `makeChain` function and what are its input parameters?\n\n **Answer:** The `makeChain` function is used to create a `ChatVectorDBQAChain` instance, which is responsible for generating questions and answers based on the given input parameters. The input parameters include `projectName`, `repositoryUrl`, `contentType`, `chatPrompt`, `targetAudience`, `vectorstore`, `llms`, and an optional `onTokenStream` function.\n\n2. **Question:** What are the roles of `CONDENSE_PROMPT` and `QA_PROMPT` in this code?\n\n **Answer:** `CONDENSE_PROMPT` is a template for generating standalone questions from a given chat history and follow-up question. `QA_PROMPT` is a template for generating conversational answers with hyperlinks to GitHub, based on the provided context and question. Both templates are used in the `LLMChain` and `loadQAChain` instances, respectively.\n\n3. **Question:** How does the `onTokenStream` function work and when is it used?\n\n **Answer:** The `onTokenStream` function is an optional callback that can be provided to the `makeChain` function. It is used to handle the streaming of tokens generated by the OpenAIChat instance. If provided, it will be called with each new token generated during the chat process.", + "checksum": "6869048a06de62499933b14c37cddc1d" }, { "fileName": "index.ts", - "filePath": "src/cli/commands/query/index.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/query/index.ts", - "summary": "This code defines a chatbot interface for the Autodoc project, which allows users to ask questions related to a specific codebase and receive answers in a conversational manner. The chatbot uses a language model to generate responses based on the user's input and the codebase documentation.\n\nThe code starts by importing necessary libraries and setting up the `marked` library with a custom terminal renderer for displaying Markdown content. It then defines a `chatHistory` array to store the conversation history between the user and the chatbot.\n\nThe `displayWelcomeMessage` function is used to display a welcome message to the user when they start the chatbot. The `clearScreenAndMoveCursorToTop` function clears the terminal screen and moves the cursor to the top.\n\nThe main function, `query`, takes two arguments: `AutodocRepoConfig` and `AutodocUserConfig`. It initializes the `vectorStore` by loading pre-trained embeddings and creates a `chain` object using the `makeChain` function. This chain object is responsible for generating responses based on the user's input.\n\nThe `getQuestion` function uses the `inquirer` library to prompt the user for a question. The main loop of the chatbot starts by getting the user's question and continues until the user types 'exit'. Inside the loop, the code updates the spinner text to 'Thinking...' and calls the `chain` object with the user's question and chat history. The response is then displayed in Markdown format using the `marked` library.\n\nIf an error occurs during the process, the chatbot displays an error message and prompts the user for another question.\n\nExample usage:\n\n```javascript\nquery(repoConfig, userConfig);\n```\n\nThis chatbot interface can be used in the larger Autodoc project to help users navigate and understand the codebase more efficiently by providing a conversational interface for asking questions and receiving answers.", - "questions": "1. **What is the purpose of the `query` function and what are its input parameters?**\n\n The `query` function is used to interact with the chatbot, taking user input and providing responses based on the given codebase. It takes two input parameters: an `AutodocRepoConfig` object containing information about the repository, and an `AutodocUserConfig` object containing user-specific configuration.\n\n2. **How does the `vectorStore` work and what is its role in the code?**\n\n The `vectorStore` is an instance of HNSWLib loaded with data from the specified output directory and using OpenAIEmbeddings. It is used to store and retrieve vector representations of the codebase, which are then used by the `makeChain` function to generate responses to user questions.\n\n3. **How does the chat history work and what is its purpose?**\n\n The `chatHistory` is an array of string pairs, where each pair represents a user question and the corresponding chatbot response. It is used to store the conversation history between the user and the chatbot, allowing the chatbot to provide context-aware responses based on previous interactions." + "filePath": "src\\cli\\commands\\query\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\query\\index.ts", + "summary": "This code defines a chatbot interface for the Autodoc project, which allows users to ask questions related to a specific codebase and receive answers in a conversational manner. The chatbot uses a combination of the `inquirer` library for user input, `marked` and `marked-terminal` for rendering Markdown output, and the `langchain` library for handling natural language processing tasks.\n\nThe `query` function is the main entry point for the chatbot. It takes two arguments: an `AutodocRepoConfig` object containing information about the code repository, and an `AutodocUserConfig` object containing user-specific settings. The function initializes a vector store using the `HNSWLib` and `OpenAIEmbeddings` classes, and creates a chat chain using the `makeChain` function.\n\nThe chatbot interface is displayed using the `displayWelcomeMessage` function, which prints a welcome message to the console. The `getQuestion` function is used to prompt the user for a question using the `inquirer` library. The chatbot then enters a loop, where it processes the user's question, generates a response using the chat chain, and displays the response as Markdown in the terminal.\n\nIf an error occurs during the processing of a question, the chatbot will display an error message and continue to prompt the user for a new question. The loop continues until the user types 'exit', at which point the chatbot terminates.\n\nHere's an example of how the `query` function might be used:\n\n```javascript\nimport { query } from './autodoc';\n\nconst repoConfig = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n output: 'path/to/output',\n contentType: 'code',\n chatPrompt: 'Ask me anything about MyProject',\n targetAudience: 'developers',\n};\n\nconst userConfig = {\n llms: 'path/to/llms',\n};\n\nquery(repoConfig, userConfig);\n```\n\nThis example would initialize the chatbot with the specified repository and user configurations, and start the chatbot interface for the user to ask questions about the \"MyProject\" codebase.", + "questions": "1. **What is the purpose of the `query` function in this code?**\n\n The `query` function is responsible for handling user interactions with the chatbot. It takes in an AutodocRepoConfig object and an AutodocUserConfig object, sets up the necessary data structures, and then enters a loop where it prompts the user for questions, processes them, and displays the results.\n\n2. **How does the code handle rendering Markdown text in the terminal?**\n\n The code uses the `marked` library along with a custom `TerminalRenderer` to render Markdown text in the terminal. The `marked` library is configured with the custom renderer using `marked.setOptions({ renderer: new TerminalRenderer() });`.\n\n3. **What is the purpose of the `chatHistory` variable and how is it used?**\n\n The `chatHistory` variable is an array that stores the history of questions and answers in the chat session. It is used to keep track of the conversation between the user and the chatbot. When a new question is asked, the chat history is passed to the `chain.call()` function, and the new question and its corresponding answer are added to the `chatHistory` array.", + "checksum": "19807a33957666422f31136970c37245" } ], "folders": [], - "summary": "The `query` folder in the Autodoc project contains code for creating a chatbot interface that allows users to ask questions related to a specific codebase and receive answers in a conversational manner. The chatbot uses a language model to generate responses based on the user's input and the codebase documentation.\n\nIn `createChatChain.ts`, the `makeChain` function is defined, which creates a chatbot using the `ChatVectorDBQAChain` class. This class combines two separate language models: a question generator and a document chain. The question generator is an instance of the `LLMChain` class, which uses the OpenAIChat API to generate standalone questions based on a given conversation history. The document chain is created using the `loadQAChain` function, which takes an instance of the OpenAIChat API and a prompt template as input.\n\nExample usage of `makeChain`:\n\n```javascript\nconst chatbot = makeChain(\n \"autodoc\",\n \"https://github.com/autodoc/autodoc\",\n \"code\",\n \"\",\n \"developer\",\n vectorstore,\n [gpt3, gpt4],\n (token) => console.log(token)\n);\n```\n\nIn `index.ts`, the main chatbot interface is defined. It starts by importing necessary libraries and setting up the `marked` library with a custom terminal renderer for displaying Markdown content. The main function, `query`, takes two arguments: `AutodocRepoConfig` and `AutodocUserConfig`. It initializes the `vectorStore` by loading pre-trained embeddings and creates a `chain` object using the `makeChain` function. This chain object is responsible for generating responses based on the user's input.\n\nThe main loop of the chatbot starts by getting the user's question and continues until the user types 'exit'. Inside the loop, the code updates the spinner text to 'Thinking...' and calls the `chain` object with the user's question and chat history. The response is then displayed in Markdown format using the `marked` library.\n\nExample usage of the chatbot interface:\n\n```javascript\nquery(repoConfig, userConfig);\n```\n\nThis chatbot interface can be used in the larger Autodoc project to help users navigate and understand the codebase more efficiently by providing a conversational interface for asking questions and receiving answers.", - "questions": "" + "summary": "The `query` folder in the Autodoc project contains code for creating a chatbot that can answer questions about a specific software project in a conversational manner. The chatbot is trained on the content of the project and provides answers with hyperlinks back to GitHub, including code examples and links to the examples where appropriate.\n\nThe main entry point for the chatbot is the `query` function in `index.ts`. It takes two arguments: an `AutodocRepoConfig` object containing information about the code repository, and an `AutodocUserConfig` object containing user-specific settings. The function initializes a vector store and creates a chat chain using the `makeChain` function from `createChatChain.ts`.\n\nHere's an example of how the `query` function might be used:\n\n```javascript\nimport { query } from './autodoc';\n\nconst repoConfig = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n output: 'path/to/output',\n contentType: 'code',\n chatPrompt: 'Ask me anything about MyProject',\n targetAudience: 'developers',\n};\n\nconst userConfig = {\n llms: 'path/to/llms',\n};\n\nquery(repoConfig, userConfig);\n```\n\nThis example initializes the chatbot with the specified repository and user configurations and starts the chatbot interface for the user to ask questions about the \"MyProject\" codebase.\n\nThe `createChatChain.ts` file defines the `makeChain` function, which creates a chatbot for answering questions about a software project. The chatbot is designed to provide conversational answers with hyperlinks back to GitHub, including code examples and links to the examples where appropriate. The target audience for the chatbot is specified by the `targetAudience` parameter.\n\nThe `makeChain` function takes several parameters, such as `projectName`, `repositoryUrl`, `contentType`, `chatPrompt`, `targetAudience`, `vectorstore`, `llms`, and `onTokenStream`. It first creates a question generator using the `LLMChain` class, then creates a `QA_PROMPT` template using the `makeQAPrompt` function, and finally creates and returns a new instance of the `ChatVectorDBQAChain` class, which combines the question generator and the document chain to create a chatbot that can answer questions about the software project.\n\nIn summary, the code in the `query` folder is responsible for creating a chatbot that can answer questions about a specific software project in a conversational manner. The chatbot uses a combination of natural language processing techniques and efficient nearest neighbor search to generate accurate and relevant answers for the user.", + "questions": "", + "checksum": "9e0d0f111bf588e2df66862dce9db288" }, { "folderName": "user", - "folderPath": ".autodoc/docs/json/src/cli/commands/user", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/user", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\commands\\user", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands\\user", "files": [ { "fileName": "index.ts", - "filePath": "src/cli/commands/user/index.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/user/index.ts", - "summary": "This code is responsible for managing the user configuration for the Autodoc project. It provides a way to create, update, and save the user configuration file, which stores information about the user's access to different Language Learning Models (LLMs) such as GPT-3.5 Turbo, GPT-4 8K, and GPT-4 32K.\n\nThe `makeConfigTemplate` function is used to create a default configuration object with the specified LLMs or default to GPT-3.5 Turbo if none are provided. This function is used to generate the initial configuration object for the user.\n\nThe `user` function is an asynchronous function that handles the user configuration process. It first checks if a user configuration file already exists. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf the user decides to continue or if no configuration file exists, the function proceeds to create the necessary directories for the configuration file. It then prompts the user to select the LLMs they have access to using the `inquirer` library. The user can choose from three options:\n\n1. GPT-3.5 Turbo\n2. GPT-3.5 Turbo, GPT-4 8K (Early Access)\n3. GPT-3.5 Turbo, GPT-4 8K (Early Access), GPT-4 32K (Early Access)\n\nAfter the user makes their selection, the new configuration object is created using the `makeConfigTemplate` function with the selected LLMs. The configuration object is then saved to the user configuration file in JSON format.\n\nFinally, the user is informed that the configuration has been saved and they can start querying by running the `doc q` command.", - "questions": "1. **Question:** What is the purpose of the `makeConfigTemplate` function and what does it return?\n **Answer:** The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc user. It takes an optional `config` parameter of type `AutodocUserConfig` and returns a new configuration object with the `llms` property set to the provided value or a default value of `[LLMModels.GPT3]`.\n\n2. **Question:** How does the `user` function handle existing user configuration files?\n **Answer:** The `user` function checks if a user configuration file already exists at the `userConfigFilePath`. If it does, the function prompts the user with a confirmation message to overwrite the existing configuration. If the user chooses not to overwrite, the process exits; otherwise, the function proceeds to create a new configuration.\n\n3. **Question:** What are the available choices for the LLMs in the `user` function, and how are they used to create the new configuration?\n **Answer:** The available choices for LLMs are GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user can select one of these options, and the corresponding LLM models will be set as the value of the `llms` property in the new configuration object." + "filePath": "src\\cli\\commands\\user\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\user\\index.ts", + "summary": "This code is responsible for managing the user configuration for the Autodoc project. It provides a way to create, update, and save the user configuration file, which stores information about the user's access to different Language Learning Models (LLMs) such as GPT-3, GPT-4, and GPT-4 32K.\n\nThe `makeConfigTemplate` function is used to create a default configuration object with the provided `config` parameter or with GPT-3 as the default LLM. This function is used to generate a new configuration object when needed.\n\nThe main function, `user`, is an asynchronous function that takes an optional `config` parameter. It first checks if a user configuration file already exists at the `userConfigFilePath`. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf the user configuration file does not exist, the code attempts to create the necessary directories for the file. If there's an error during this process, it logs the error and exits with a non-zero status code.\n\nNext, the user is prompted to select which LLMs they have access to. The available options are GPT-3.5 Turbo, GPT-3.5 Turbo with GPT-4 8K (Early Access), and GPT-3.5 Turbo with GPT-4 8K and GPT-4 32K (Early Access). The user's selection is then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration object is written to the user configuration file in JSON format. A success message is displayed to the user, indicating that the configuration has been saved and they can start querying using the `doc q` command.\n\nExample usage:\n\n```javascript\nimport { user } from './path/to/this/file';\n\n// Create a new user configuration with default settings\nawait user();\n\n// Update the user configuration with a custom config object\nawait user({ llms: [LLMModels.GPT3, LLMModels.GPT4] });\n```", + "questions": "1. **What is the purpose of the `makeConfigTemplate` function?**\n\n The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc user. It takes an optional `config` parameter and returns an object with a `llms` property, which is an array of LLM models.\n\n2. **How does the `user` function handle existing user configuration files?**\n\n The `user` function checks if a user configuration file already exists using `fsSync.existsSync`. If it does, the user is prompted with a confirmation message to overwrite the existing configuration. If the user chooses not to overwrite, the process exits with a status code of 0.\n\n3. **What are the available choices for LLM models in the `user` function?**\n\n The available choices for LLM models are GPT-3.5 Turbo, GPT-3.5 Turbo and GPT-4 8K (Early Access), and GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user can select one of these options, and the selected value is stored in the `llms` property of the new configuration object.", + "checksum": "76bc1e6d5d61e24907832c4cac443225" } ], "folders": [], - "summary": "The `index.ts` file in the `user` folder is responsible for managing the user configuration for the Autodoc project. It provides a way to create, update, and save the user configuration file, which stores information about the user's access to different Language Learning Models (LLMs) such as GPT-3.5 Turbo, GPT-4 8K, and GPT-4 32K.\n\nThe `makeConfigTemplate` function is used to create a default configuration object with the specified LLMs or default to GPT-3.5 Turbo if none are provided. This function is used to generate the initial configuration object for the user.\n\n```typescript\nfunction makeConfigTemplate(llms: string[]): ConfigTemplate {\n // ...\n}\n```\n\nThe `user` function is an asynchronous function that handles the user configuration process. It first checks if a user configuration file already exists. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\n```typescript\nasync function user(): Promise {\n // ...\n}\n```\n\nIf the user decides to continue or if no configuration file exists, the function proceeds to create the necessary directories for the configuration file. It then prompts the user to select the LLMs they have access to using the `inquirer` library. The user can choose from three options:\n\n1. GPT-3.5 Turbo\n2. GPT-3.5 Turbo, GPT-4 8K (Early Access)\n3. GPT-3.5 Turbo, GPT-4 8K (Early Access), GPT-4 32K (Early Access)\n\nAfter the user makes their selection, the new configuration object is created using the `makeConfigTemplate` function with the selected LLMs. The configuration object is then saved to the user configuration file in JSON format.\n\n```typescript\nconst configTemplate = makeConfigTemplate(selectedLLMs);\nawait fs.promises.writeFile(configPath, JSON.stringify(configTemplate, null, 2));\n```\n\nFinally, the user is informed that the configuration has been saved and they can start querying by running the `doc q` command.\n\nThis code is essential for setting up the user's environment and preferences for the Autodoc project. It ensures that the user has the correct configuration file in place, which is necessary for the proper functioning of the project. The user configuration file is used by other parts of the project to determine which LLMs the user has access to and can query.\n\nFor example, when a user runs the `doc q` command, the project will read the user configuration file to determine which LLMs are available for querying. This ensures that the user only queries the LLMs they have access to, preventing any unauthorized access or usage.\n\nIn summary, the `index.ts` file in the `user` folder is responsible for managing the user configuration for the Autodoc project, ensuring that the user has the correct configuration file in place, and allowing the user to select the LLMs they have access to. This is essential for the proper functioning of the project and for maintaining the user's preferences and access to different LLMs.", - "questions": "" + "summary": "The `index.ts` file in the `user` folder is responsible for managing the user configuration for the Autodoc project. It allows users to create, update, and save their configuration file, which stores information about their access to different Language Learning Models (LLMs) such as GPT-3, GPT-4, and GPT-4 32K.\n\nThe `makeConfigTemplate` function creates a default configuration object with either the provided `config` parameter or GPT-3 as the default LLM. This function is useful for generating a new configuration object when needed.\n\nThe main function, `user`, is an asynchronous function that takes an optional `config` parameter. It first checks if a user configuration file already exists at the `userConfigFilePath`. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf the user configuration file does not exist, the code attempts to create the necessary directories for the file. If there's an error during this process, it logs the error and exits with a non-zero status code.\n\nNext, the user is prompted to select which LLMs they have access to. The available options are GPT-3.5 Turbo, GPT-3.5 Turbo with GPT-4 8K (Early Access), and GPT-3.5 Turbo with GPT-4 8K and GPT-4 32K (Early Access). The user's selection is then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration object is written to the user configuration file in JSON format. A success message is displayed to the user, indicating that the configuration has been saved and they can start querying using the `doc q` command.\n\nThis code is essential for the Autodoc project as it allows users to manage their access to different LLMs and store their preferences in a configuration file. This configuration file can then be used by other parts of the project to determine which LLMs the user has access to and tailor the querying process accordingly.\n\nExample usage:\n\n```javascript\nimport { user } from './path/to/this/file';\n\n// Create a new user configuration with default settings\nawait user();\n\n// Update the user configuration with a custom config object\nawait user({ llms: [LLMModels.GPT3, LLMModels.GPT4] });\n```\n\nIn summary, the `index.ts` file in the `user` folder is a crucial part of the Autodoc project, allowing users to manage their LLM access and preferences. This configuration is then used by other parts of the project to provide a tailored experience based on the user's access to different LLMs.", + "questions": "", + "checksum": "4b8fd2b2abaec4959873fc3396c414d8" } ], - "summary": "The code in the `src/cli/commands` folder is responsible for handling various command-line tasks in the Autodoc project. It contains several subfolders, each dedicated to a specific command or functionality, such as estimating costs, processing repositories, initializing the project, querying the chatbot, and managing user configurations.\n\nFor instance, the `estimate` subfolder contains a function that allows users to estimate the cost of indexing a given repository before actually processing it. This function takes an `AutodocRepoConfig` object as input and performs a dry run of the `processRepository` function. It then calculates the total estimated cost and displays it to the user. This helps users make informed decisions about whether to proceed with the indexing process or not.\n\n```javascript\nimport { estimate } from './autodoc/estimate';\n\nconst config = {\n // ...configuration options...\n};\n\nestimate(config);\n```\n\nThe `index` subfolder contains code for processing a given code repository, generating documentation in JSON and Markdown formats, and creating vector files for the documentation. It provides several functions and utilities to achieve these tasks, such as traversing the file system, calling language models, and converting JSON files to Markdown.\n\n```javascript\nimport autodoc from './autodoc';\n\nconst config = {\n // ...configuration options...\n};\n\nautodoc.index(config);\n```\n\nThe `init` subfolder is responsible for initializing and configuring the `autodoc` project. It provides an essential function called `init` that creates a configuration file named `autodoc.config.json` with user inputs and default values.\n\n```javascript\nimport { init } from './autodoc';\n\n(async () => {\n await init();\n})();\n```\n\nThe `query` subfolder contains code for creating a chatbot interface that allows users to ask questions related to a specific codebase and receive answers in a conversational manner. The chatbot uses a language model to generate responses based on the user's input and the codebase documentation.\n\n```javascript\nquery(repoConfig, userConfig);\n```\n\nThe `user` subfolder is responsible for managing the user configuration for the Autodoc project. It provides a way to create, update, and save the user configuration file, which stores information about the user's access to different Language Learning Models (LLMs).\n\n```typescript\nasync function user(): Promise {\n // ...\n}\n```\n\nIn summary, the code in the `src/cli/commands` folder plays a crucial role in the Autodoc project by providing various command-line functionalities, such as estimating costs, processing repositories, initializing the project, querying the chatbot, and managing user configurations. These functionalities help developers to easily generate and maintain documentation for their projects, making it more accessible and understandable for other developers and users.", - "questions": "" + "summary": "The code in the `.autodoc\\docs\\json\\src\\cli\\commands` folder is responsible for various tasks related to the Autodoc project, such as initializing the configuration, processing repositories, generating documentation, and creating a chatbot for answering questions about a specific software project. The folder contains several subfolders, each with a specific purpose.\n\n### estimate\n\nThe `estimate` function provides an estimated cost of processing a given repository. It takes an `AutodocRepoConfig` object as input and performs a dry run of the repository processing to calculate the estimated cost. Example usage:\n\n```javascript\nimport { estimate } from './path/to/this/file';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\n### index\n\nThe code in this folder processes a given repository and generates documentation in JSON, Markdown, and vector formats. It takes an `AutodocRepoConfig` object as input and performs three main tasks: processing the repository, creating Markdown files, and creating vector files. Example usage:\n\n```javascript\nindex({\n name: \"myProject\",\n root: \"./input\",\n output: \"./output\",\n filePrompt: true,\n folderPrompt: true,\n contentType: \"code\",\n targetAudience: \"developers\",\n linkHosted: \"https://github.com/user/myProject\",\n});\n```\n\n### init\n\nThe `init` function initializes the configuration of the Autodoc project. It prompts the user to input necessary information to set up the project and creates the `autodoc.config.json` file in the project root. Example usage:\n\n```javascript\nimport { init } from './path/to/this/file';\n\n// Initialize the configuration with default values\nawait init();\n\n// Initialize the configuration with custom values\nawait init({\n name: 'My Custom Repository',\n repositoryUrl: 'https://github.com/user/repo',\n});\n```\n\n### query\n\nThe `query` folder contains code for creating a chatbot that can answer questions about a specific software project. The main entry point is the `query` function, which takes an `AutodocRepoConfig` object and an `AutodocUserConfig` object as input. Example usage:\n\n```javascript\nimport { query } from './autodoc';\n\nconst repoConfig = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n output: 'path/to/output',\n contentType: 'code',\n chatPrompt: 'Ask me anything about MyProject',\n targetAudience: 'developers',\n};\n\nconst userConfig = {\n llms: 'path/to/llms',\n};\n\nquery(repoConfig, userConfig);\n```\n\n### user\n\nThe `user` folder manages the user configuration for the Autodoc project. It allows users to create, update, and save their configuration file, which stores information about their access to different Language Learning Models (LLMs). Example usage:\n\n```javascript\nimport { user } from './path/to/this/file';\n\n// Create a new user configuration with default settings\nawait user();\n\n// Update the user configuration with a custom config object\nawait user({ llms: [LLMModels.GPT3, LLMModels.GPT4] });\n```\n\nIn summary, the code in this folder is essential for various tasks related to the Autodoc project, such as initializing the configuration, processing repositories, generating documentation, and creating a chatbot for answering questions about a specific software project.", + "questions": "", + "checksum": "d11f941351fb51140313ada9b52bbf1a" }, { "folderName": "utils", - "folderPath": ".autodoc/docs/json/src/cli/utils", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/utils", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\utils", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\utils", "files": [ { "fileName": "APIRateLimit.ts", - "filePath": "src/cli/utils/APIRateLimit.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/utils/APIRateLimit.ts", - "summary": "The `APIRateLimit` class in this code snippet is designed to manage and limit the number of concurrent API calls made by the application. This is useful in situations where the API being called has a rate limit or when the application needs to control the number of simultaneous requests to avoid overloading the server.\n\nThe class has a constructor that takes an optional `maxConcurrentCalls` parameter, which defaults to 50. This parameter determines the maximum number of API calls that can be made concurrently.\n\nThe main method of this class is `callApi(apiFunction: () => Promise): Promise`. This method takes a function `apiFunction` that returns a promise and wraps it in a rate-limited execution. The method returns a promise that resolves with the result of the API call or rejects with an error if the call fails.\n\nWhen `callApi` is called, it adds the `executeCall` function to the `queue`. The `executeCall` function is responsible for executing the API call, resolving or rejecting the promise, and managing the `inProgress` counter. After adding the `executeCall` function to the queue, the code checks if there are available slots for concurrent calls by comparing `inProgress` with `maxConcurrentCalls`. If there are available slots, it calls the `dequeueAndExecute` method.\n\nThe `dequeueAndExecute` method is responsible for executing the queued API calls while ensuring that the number of concurrent calls does not exceed the `maxConcurrentCalls` limit. It dequeues the next API call from the queue and executes it if there are available slots for concurrent calls.\n\nHere's an example of how this class can be used in the larger project:\n\n```javascript\nconst apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls\n\nasync function fetchData(id) {\n // Simulate an API call\n return new Promise((resolve) => setTimeout(() => resolve(`Data for ${id}`), 1000));\n}\n\nasync function getData(id) {\n return apiRateLimiter.callApi(() => fetchData(id));\n}\n\n// Usage\ngetData(1).then(console.log); // Fetches data for ID 1, rate-limited\n```\n\nIn this example, the `APIRateLimit` class is used to limit the number of concurrent calls to the `fetchData` function, which simulates an API call.", - "questions": "1. **What is the purpose of the `APIRateLimit` class?**\n\n The `APIRateLimit` class is designed to manage and limit the number of concurrent API calls to a specified maximum, preventing the application from overwhelming the API with too many requests at once.\n\n2. **How does the `callApi` method work and what is its return type?**\n\n The `callApi` method takes an `apiFunction` as an argument, which is a function that returns a Promise. It adds the API call to a queue and manages the execution of queued calls based on the available slots for concurrent calls. The method returns a Promise of type `T`, where `T` is the expected return type of the `apiFunction`.\n\n3. **How does the `dequeueAndExecute` method work?**\n\n The `dequeueAndExecute` method is responsible for executing the queued API calls. It checks if there are any calls in the queue and if there are available slots for concurrent calls. If both conditions are met, it dequeues the next call from the queue and executes it. This method is called whenever a new API call is added to the queue or when an in-progress call is completed." + "filePath": "src\\cli\\utils\\APIRateLimit.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\utils\\APIRateLimit.ts", + "summary": "The `APIRateLimit` class in this code snippet is designed to manage and limit the number of concurrent API calls made by the application. This is useful in situations where the API being called has a rate limit or when the application needs to prevent overwhelming the server with too many requests at once.\n\nThe class constructor takes an optional parameter `maxConcurrentCalls`, which defaults to 50, to set the maximum number of concurrent API calls allowed. It maintains a queue of API calls and keeps track of the number of calls in progress.\n\nThe main method of this class is `callApi(apiFunction: () => Promise): Promise`. It takes a function `apiFunction` that returns a promise and wraps it in a new promise. The purpose of this wrapping is to control the execution of the API calls and ensure that they do not exceed the specified rate limit.\n\nWhen `callApi` is called, the provided `apiFunction` is added to the queue and the `dequeueAndExecute` method is triggered if there are available slots for concurrent calls. The `dequeueAndExecute` method checks if there are any API calls in the queue and if the number of in-progress calls is below the maximum limit. If both conditions are met, it dequeues the next API call and executes it.\n\nThe `executeCall` function inside `callApi` is responsible for actually calling the API function, resolving or rejecting the promise based on the result, and updating the number of in-progress calls. Once an API call is completed, the `dequeueAndExecute` method is called again to process any remaining calls in the queue.\n\nHere's an example of how this class can be used in the larger project:\n\n```javascript\nconst apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls\n\nasync function fetchSomeData(id) {\n // Call the API using the rate limiter\n const result = await apiRateLimiter.callApi(() => fetch(`https://api.example.com/data/${id}`));\n return result;\n}\n```\n\nIn this example, the `APIRateLimit` class is used to limit the number of concurrent calls to the `fetch` function, ensuring that no more than 10 calls are made at once.", + "questions": "1. **What is the purpose of the `APIRateLimit` class?**\n\n The `APIRateLimit` class is designed to manage and limit the number of concurrent API calls to a specified maximum, preventing the application from overwhelming the API with too many requests at once.\n\n2. **How does the `callApi` method work and what is its return type?**\n\n The `callApi` method takes an `apiFunction` as an argument, which is a function that returns a Promise. It adds the API call to a queue and executes it when there are available slots for concurrent calls. The method returns a Promise of type `T`, where `T` is the expected return type of the `apiFunction`.\n\n3. **How can the maximum number of concurrent calls be configured?**\n\n The maximum number of concurrent calls can be configured by passing a value to the `maxConcurrentCalls` parameter in the constructor of the `APIRateLimit` class. If no value is provided, the default value is set to 50.", + "checksum": "8862552c9cfd8b6db454d45e565081ef" }, { "fileName": "FileUtil.ts", - "filePath": "src/cli/utils/FileUtil.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/utils/FileUtil.ts", - "summary": "This code provides utility functions for handling file and folder paths in the autodoc project. The main purpose of these functions is to generate file names and GitHub URLs for files and folders.\n\n1. `getFileName(input: string, delimiter = '.', extension = '.md'): string`: This function takes an input string, an optional delimiter (default is '.'), and an optional extension (default is '.md'). It returns a new file name with the given extension. If the delimiter is not found in the input string, the function appends the extension to the input string. If the delimiter is found, the function replaces the part after the last delimiter with the extension. For example:\n\n ```javascript\n getFileName(\"example.txt\"); // returns \"example.md\"\n getFileName(\"example\"); // returns \"example.md\"\n ```\n\n2. `githubFileUrl(githubRoot: string, inputRoot: string, filePath: string, linkHosted: boolean): string`: This function generates a GitHub URL for a file. It takes the GitHub root URL, the input root path, the file path, and a boolean flag `linkHosted`. If `linkHosted` is true, the function returns a URL pointing to the hosted version of the file. If `linkHosted` is false, the function returns a URL pointing to the file in the GitHub repository. For example:\n\n ```javascript\n githubFileUrl(\"https://github.com/user/repo\", \"/input\", \"/input/example.md\", true); // returns \"https://github.com/user/repo/example.md\"\n githubFileUrl(\"https://github.com/user/repo\", \"/input\", \"/input/example.md\", false); // returns \"https://github.com/user/repo/blob/master/example.md\"\n ```\n\n3. `githubFolderUrl(githubRoot: string, inputRoot: string, folderPath: string, linkHosted: boolean): string`: This function is similar to `githubFileUrl`, but it generates a GitHub URL for a folder instead of a file. If `linkHosted` is true, the function returns a URL pointing to the hosted version of the folder. If `linkHosted` is false, the function returns a URL pointing to the folder in the GitHub repository. For example:\n\n ```javascript\n githubFolderUrl(\"https://github.com/user/repo\", \"/input\", \"/input/folder\", true); // returns \"https://github.com/user/repo/folder\"\n githubFolderUrl(\"https://github.com/user/repo\", \"/input\", \"/input/folder\", false); // returns \"https://github.com/user/repo/tree/master/folder\"\n ```\n\nThese utility functions can be used in the autodoc project to generate file names and URLs for documentation files and folders, making it easier to manage and navigate the documentation structure.", - "questions": "1. **What does the `getFileName` function do?**\n\n The `getFileName` function takes an input string, an optional delimiter (default is '.'), and an optional extension (default is '.md'). It returns the input string with the specified extension, replacing the part after the last occurrence of the delimiter if it exists.\n\n2. **What is the purpose of the `githubFileUrl` and `githubFolderUrl` functions?**\n\n Both `githubFileUrl` and `githubFolderUrl` functions are used to generate URLs for files and folders, respectively, in a GitHub repository. They take a `githubRoot`, `inputRoot`, a `filePath` or `folderPath`, and a `linkHosted` boolean flag. If `linkHosted` is true, the generated URL will point to the hosted version of the file or folder; otherwise, it will point to the file or folder in the GitHub repository.\n\n3. **Why is the `inputRoot.length - 1` used in the `substring` method for both `githubFileUrl` and `githubFolderUrl` functions?**\n\n The `inputRoot.length - 1` is used to remove the `inputRoot` part from the `filePath` or `folderPath` when generating the final URL. This ensures that the generated URL only contains the relevant path relative to the GitHub repository root." + "filePath": "src\\cli\\utils\\FileUtil.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\utils\\FileUtil.ts", + "summary": "This code provides utility functions for handling file and folder paths in the autodoc project. The main purpose of these functions is to generate file names and GitHub URLs for documentation files.\n\n1. `getFileName(input, delimiter, extension)`: This function takes an input string, an optional delimiter (default is '.'), and an optional extension (default is '.md'). It returns a new string with the given extension. If the delimiter is found in the input string, the function removes the part of the string after the last occurrence of the delimiter and appends the extension. If the delimiter is not found, the function simply appends the extension to the input string. This function can be used to generate file names for documentation files with the desired extension.\n\n Example usage:\n\n ```\n getFileName('example.txt'); // returns 'example.md'\n getFileName('example', '_', '.html'); // returns 'example.html'\n ```\n\n2. `githubFileUrl(githubRoot, inputRoot, filePath, linkHosted)`: This function generates a GitHub URL for a file. It takes the GitHub repository root URL, the input root folder path, the file path, and a boolean flag indicating whether the URL should be for the hosted version of the file or the source code. It returns a string with the generated URL.\n\n Example usage:\n\n ```\n githubFileUrl('https://github.com/user/repo', '/input', '/input/example.md', true);\n // returns 'https://github.com/user/repo/example.md'\n ```\n\n3. `githubFolderUrl(githubRoot, inputRoot, folderPath, linkHosted)`: This function is similar to `githubFileUrl`, but it generates a GitHub URL for a folder instead of a file. It takes the same arguments as `githubFileUrl` and returns a string with the generated URL.\n\n Example usage:\n\n ```\n githubFolderUrl('https://github.com/user/repo', '/input', '/input/folder', true);\n // returns 'https://github.com/user/repo/folder'\n ```\n\nThese utility functions can be used throughout the autodoc project to generate file names and GitHub URLs for documentation files and folders, ensuring consistent naming and URL generation across the project.", + "questions": "1. **What is the purpose of the `getFileName` function?**\n\n The `getFileName` function takes an input string, an optional delimiter, and an optional extension, and returns a new string with the given extension. If the delimiter is not found in the input string, the extension is simply appended to the input string. If the delimiter is found, the input string is sliced up to the last delimiter index and the extension is appended.\n\n2. **What are the differences between the `githubFileUrl` and `githubFolderUrl` functions?**\n\n Both functions take the same parameters: `githubRoot`, `inputRoot`, a path (either `filePath` or `folderPath`), and a `linkHosted` boolean. The main difference is in the returned URL: `githubFileUrl` returns a URL pointing to a file in the GitHub repository, while `githubFolderUrl` returns a URL pointing to a folder in the GitHub repository. The URL structure differs slightly, with `/blob/master/` for files and `/tree/master/` for folders.\n\n3. **What is the purpose of the `linkHosted` parameter in the `githubFileUrl` and `githubFolderUrl` functions?**\n\n The `linkHosted` parameter is a boolean that determines whether the returned URL should point to the hosted version of the file or folder on GitHub Pages (if `true`) or to the file or folder within the GitHub repository itself (if `false`). Depending on the value of `linkHosted`, the functions will return different URL structures.", + "checksum": "d1f26fc674b4a9b4a2053642771871c8" }, { "fileName": "LLMUtil.ts", - "filePath": "src/cli/utils/LLMUtil.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/utils/LLMUtil.ts", - "summary": "This code defines and manages different language models (LLMs) and their associated costs for a project. It imports the `OpenAIChat` class from the `langchain/llms` module and the `LLMModelDetails` and `LLMModels` types from the `../../types.js` file.\n\nThe `models` object contains three LLMs: GPT3, GPT4, and GPT432k. Each model has a set of properties, such as `name`, `inputCostPer1KTokens`, `outputCostPer1KTokens`, `maxLength`, and an instance of `OpenAIChat` with specific configurations. The `inputTokens`, `outputTokens`, `succeeded`, `failed`, and `total` properties are initialized to 0.\n\n```javascript\n{\n name: LLMModels.GPT3,\n inputCostPer1KTokens: 0.002,\n outputCostPer1KTokens: 0.002,\n maxLength: 3050,\n llm: new OpenAIChat({ ... }),\n inputTokens: 0,\n outputTokens: 0,\n succeeded: 0,\n failed: 0,\n total: 0,\n}\n```\n\nThe `printModelDetails` function takes an array of `LLMModelDetails` and prints a summary table to the console. It calculates the total cost for each model based on the number of input and output tokens and their respective costs per 1,000 tokens. It also calculates the total file count, succeeded, failed, tokens, and cost across all models.\n\nThe `totalIndexCostEstimate` function calculates the total cost for all models in the input array. It uses the same cost calculation as in `printModelDetails` but returns the total cost as a number.\n\nThese functions can be used in the larger project to manage and analyze the usage and costs of different language models. For example, the `printModelDetails` function can provide a summary of the project's LLM usage, while the `totalIndexCostEstimate` function can help estimate the overall cost of using these models.", - "questions": "1. **Question**: What is the purpose of the `models` object and what are the different models available?\n **Answer**: The `models` object is a record that maps the available LLMModels (GPT3, GPT4, and GPT432k) to their respective details, such as name, input and output costs, maxLength, and an instance of OpenAIChat with the corresponding model.\n\n2. **Question**: How does the `printModelDetails` function work and what information does it display?\n **Answer**: The `printModelDetails` function takes an array of LLMModelDetails and generates an output object containing the model name, file count, succeeded, failed, tokens, and cost. It then calculates the totals for each property and displays the information in a console table.\n\n3. **Question**: What is the purpose of the `totalIndexCostEstimate` function and how does it calculate the total cost?\n **Answer**: The `totalIndexCostEstimate` function calculates the total cost of indexing the given models by iterating through the models array and summing up the input and output costs per 1K tokens for each model." + "filePath": "src\\cli\\utils\\LLMUtil.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\utils\\LLMUtil.ts", + "summary": "This code defines and manages different language models (LLMs) and their associated costs for a project that utilizes OpenAI's GPT models. It imports the `OpenAIChat` class from the `langchain/llms` module and the `LLMModelDetails` and `LLMModels` types from the `../../types.js` file.\n\nThe `models` object contains three LLMs: GPT3, GPT4, and GPT432k. Each model has its own properties, such as `name`, `inputCostPer1KTokens`, `outputCostPer1KTokens`, `maxLength`, and an instance of the `OpenAIChat` class with the respective model name and API key. Additionally, each model has counters for input tokens, output tokens, succeeded, failed, and total files processed.\n\nThe `printModelDetails` function takes an array of `LLMModelDetails` and prints a summary table to the console. It calculates the total cost for each model based on the input and output tokens and their respective costs per 1,000 tokens. It also calculates the total file count, succeeded, failed, tokens, and cost across all models.\n\nThe `totalIndexCostEstimate` function calculates the total cost of indexing all models in the input array. It uses the same cost calculation as in `printModelDetails` but returns the total cost as a number.\n\nThese functions can be used in the larger project to manage and analyze the usage and costs of different LLMs. For example, the `printModelDetails` function can be called to display a summary of the models' usage and costs:\n\n```javascript\nimport { models, printModelDetails } from './path/to/this/file';\n\n// Process files with models...\n// Update models' properties...\n\nprintModelDetails(Object.values(models));\n```\n\nAnd the `totalIndexCostEstimate` function can be used to estimate the total cost of indexing all models:\n\n```javascript\nimport { models, totalIndexCostEstimate } from './path/to/this/file';\n\n// Process files with models...\n// Update models' properties...\n\nconst totalCost = totalIndexCostEstimate(Object.values(models));\nconsole.log(`Total cost: ${totalCost}`);\n```", + "questions": "1. **Question:** What is the purpose of the `models` object and how are the different GPT models being used?\n **Answer:** The `models` object is a record that maps different GPT models (GPT3, GPT4, and GPT432k) to their respective details, such as cost per tokens, maximum length, and an instance of `OpenAIChat` with the corresponding model configuration.\n\n2. **Question:** How does the `printModelDetails` function work and what information does it display?\n **Answer:** The `printModelDetails` function takes an array of `LLMModelDetails` as input, processes the information for each model, and then prints a summary table to the console. The table includes the model name, file count, succeeded and failed counts, total tokens, and cost.\n\n3. **Question:** What is the purpose of the `totalIndexCostEstimate` function and how is it calculating the total cost?\n **Answer:** The `totalIndexCostEstimate` function calculates the total cost of processing the given models by iterating through the input `models` array and summing up the costs based on the input and output tokens and their respective costs per 1K tokens.", + "checksum": "f4464cf197f4af827ac0eac950d568fc" }, { - "fileName": "WaitUtil.ts", - "filePath": "src/cli/utils/WaitUtil.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/utils/WaitUtil.ts", - "summary": "The code in this file provides two utility functions, `wait` and `forTrue`, which are designed to help manage asynchronous operations in the larger project. Both functions return a `Promise`, which is a JavaScript object that represents the eventual completion (or failure) of an asynchronous operation and its resulting value.\n\n### wait function\n\nThe `wait` function takes two arguments: `timeoutMs`, which is the number of milliseconds to wait before resolving the promise, and `value`, which is an optional value to be returned when the promise resolves. The function creates a new `Promise` and uses `setTimeout` to resolve it with the given `value` after the specified `timeoutMs` has passed.\n\nExample usage:\n\n```javascript\n// Wait for 2 seconds and then log \"Hello, world!\"\nwait(2000, \"Hello, world!\").then(console.log);\n```\n\n### forTrue function\n\nThe `forTrue` function takes a single argument, `fn`, which is a function that returns a boolean value. The purpose of this function is to repeatedly check if the given function `fn` returns `true`. If it does, the promise resolves with `true`. If the function does not return `true` after 200 checks, the promise is rejected.\n\nThe function uses `setInterval` to repeatedly call the given function `fn` every 50 milliseconds. If `fn` returns `true`, the interval is cleared, and the promise is resolved. If the function has been called 200 times without returning `true`, the promise is rejected.\n\nExample usage:\n\n```javascript\n// Check if a certain element is visible on the page\nconst isElementVisible = () => document.querySelector(\"#my-element\").offsetParent !== null;\n\n// Wait for the element to become visible, then log \"Element is visible!\"\nforTrue(isElementVisible).then(() => console.log(\"Element is visible!\"));\n```\n\nIn summary, these utility functions help manage asynchronous operations by providing a way to wait for a certain amount of time or for a specific condition to be met. They can be used in various parts of the larger project to handle timing and conditional logic in an asynchronous manner.", - "questions": "1. **What is the purpose of the `wait` function?**\n\n The `wait` function is an asynchronous utility function that resolves a promise after a specified timeout in milliseconds. It can be used to introduce a delay in the execution of asynchronous code.\n\n2. **How does the `forTrue` function work and what is its use case?**\n\n The `forTrue` function takes a function `fn` as an argument, which returns a boolean value. It repeatedly checks the result of `fn` every 50 milliseconds until it returns `true` or the maximum number of checks (200) is reached. This function can be used to wait for a specific condition to be met before proceeding with the execution of asynchronous code.\n\n3. **Is there any error handling or customization for the `forTrue` function, such as customizing the interval or maximum number of checks?**\n\n Currently, there is no error handling or customization options for the `forTrue` function. The interval is hardcoded to 50 milliseconds, and the maximum number of checks is hardcoded to 200. To add customization, additional parameters could be added to the function signature and used in the implementation." + "fileName": "traverseFileSystem.ts", + "filePath": "src\\cli\\utils\\traverseFileSystem.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\utils\\traverseFileSystem.ts", + "summary": "The `traverseFileSystem` function in this code is an asynchronous function that recursively traverses a given file system, processing files and folders based on the provided parameters. It is designed to be used in the larger project for generating documentation or performing other tasks that require processing files and folders in a directory structure.\n\nThe function takes an object of type `TraverseFileSystemParams` as its input, which contains various properties to control the traversal and processing behavior. These properties include:\n\n- `inputPath`: The root path to start the traversal from.\n- `projectName`: The name of the project being processed.\n- `processFile`: An optional callback function to process a file.\n- `processFolder`: An optional callback function to process a folder.\n- `ignore`: An array of patterns to ignore during traversal.\n- `filePrompt`, `folderPrompt`: Optional prompts for user interaction.\n- `contentType`, `targetAudience`, `linkHosted`: Additional metadata for processing.\n\nThe function first checks if the provided `inputPath` exists using `fs.access`. If the path does not exist, it logs an error message and returns. It then defines a helper function `shouldIgnore` that checks if a given file or folder should be ignored based on the `ignore` patterns.\n\nThe main logic of the function is implemented in the `dfs` (depth-first search) function, which is called recursively to traverse the file system. It reads the contents of the current directory using `fs.readdir`, filters out ignored items, and processes the remaining items.\n\nFor each item, if it is a directory, the `dfs` function is called recursively, and the `processFolder` callback is invoked if provided. If it is a file and its content is text (checked using `isText`), the `processFile` callback is invoked if provided.\n\nThe traversal is performed using `Promise.all` to process items concurrently, improving performance. If an error occurs during traversal, it is logged and rethrown.\n\nHere's an example of how this function might be used in the larger project:\n\n```javascript\nawait traverseFileSystem({\n inputPath: './src',\n projectName: 'myProject',\n processFile: (params) => {\n // Process file logic here\n },\n processFolder: (params) => {\n // Process folder logic here\n },\n ignore: ['node_modules/**', '.git/**'],\n});\n```", + "questions": "1. **What is the purpose of the `traverseFileSystem` function?**\n\n The `traverseFileSystem` function is an asynchronous function that traverses a given file system, processes folders and files based on the provided parameters, and ignores files and folders based on the given ignore patterns.\n\n2. **How does the `shouldIgnore` function work?**\n\n The `shouldIgnore` function takes a file name as input and returns a boolean value indicating whether the file should be ignored or not. It checks if the file name matches any of the ignore patterns provided in the `ignore` parameter using the `minimatch` library.\n\n3. **What is the role of the `dfs` function inside `traverseFileSystem`?**\n\n The `dfs` function is an asynchronous function that performs a depth-first search on the file system starting from the given `currentPath`. It processes folders and files based on the provided parameters and recursively calls itself for each subdirectory found.", + "checksum": "b9e957c10ee6c009864c90aa2fa93763" }, { - "fileName": "traverseFileSystem.ts", - "filePath": "src/cli/utils/traverseFileSystem.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/utils/traverseFileSystem.ts", - "summary": "The `traverseFileSystem` function in this code is an asynchronous function that recursively traverses a given file system, processes folders and files, and filters out ignored files based on provided patterns. It is designed to be used in the larger project for processing and generating documentation for a given project.\n\nThe function takes an object of type `TraverseFileSystemParams` as its input, which contains the following properties:\n\n- `inputPath`: The root folder path to start traversing.\n- `projectName`: The name of the project being documented.\n- `processFile`: An optional callback function to process files.\n- `processFolder`: An optional callback function to process folders.\n- `ignore`: An array of patterns to ignore files and folders.\n- `filePrompt`: An optional prompt for processing files.\n- `folderPrompt`: An optional prompt for processing folders.\n- `contentType`: The type of content being processed.\n- `targetAudience`: The target audience for the documentation.\n- `linkHosted`: A flag indicating if the documentation should be linked to a hosted version.\n\nThe function first checks if the provided `inputPath` exists. If not, it logs an error message and returns. It then defines a helper function `shouldIgnore` that checks if a given file or folder should be ignored based on the `ignore` patterns.\n\nThe main logic of the function is implemented in the `dfs` (depth-first search) function, which recursively traverses the file system. It reads the contents of the current folder, filters out ignored files and folders, and processes them accordingly. If an entry is a directory, it calls `dfs` recursively and then calls the `processFolder` callback if provided. If an entry is a file and is a text file, it calls the `processFile` callback if provided.\n\nHere's an example of how this function might be used in the larger project:\n\n```javascript\nimport { traverseFileSystem } from './autodoc';\n\nconst params = {\n inputPath: './myProject',\n projectName: 'My Project',\n ignore: ['node_modules/**', '.git/**'],\n processFile: async (fileInfo) => {\n // Process the file, e.g., generate documentation\n },\n processFolder: async (folderInfo) => {\n // Process the folder, e.g., create a folder in the output directory\n },\n};\n\ntraverseFileSystem(params);\n```\n\nThis example would traverse the `myProject` folder, ignoring any files and folders within `node_modules` and `.git`, and process the remaining files and folders using the provided callback functions.", - "questions": "1. **What is the purpose of the `traverseFileSystem` function?**\n\n The `traverseFileSystem` function is an asynchronous function that traverses a given file system, processes files and folders based on the provided parameters, and ignores files and folders that match the specified ignore patterns.\n\n2. **How does the `shouldIgnore` function work?**\n\n The `shouldIgnore` function takes a file or folder name as input and returns a boolean value indicating whether the file or folder should be ignored based on the provided ignore patterns. It uses the `minimatch` library to check if the file or folder name matches any of the ignore patterns.\n\n3. **What is the role of the `dfs` function inside `traverseFileSystem`?**\n\n The `dfs` function is an asynchronous function that performs a depth-first search on the file system starting from the given `currentPath`. It processes folders and files based on the provided parameters and recursively calls itself for each subdirectory." + "fileName": "WaitUtil.ts", + "filePath": "src\\cli\\utils\\WaitUtil.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\utils\\WaitUtil.ts", + "summary": "The code in this file provides two utility functions, `wait` and `forTrue`, which are designed to help manage asynchronous operations in the larger project. Both functions return a `Promise`, making them suitable for use with `async/await` syntax.\n\n### wait\n\nThe `wait` function takes two arguments: `timeoutMs`, a number representing the desired waiting time in milliseconds, and an optional `value` that defaults to `null`. It returns a `Promise` that resolves with the provided `value` after the specified `timeoutMs` has elapsed. This function can be used to introduce a delay in the execution of asynchronous code.\n\nExample usage:\n\n```javascript\nasync function delayedEcho() {\n console.log(\"Start\");\n await wait(1000, \"Hello\");\n console.log(\"End\");\n}\n\ndelayedEcho(); // Output: Start -> (1 second delay) -> End\n```\n\n### forTrue\n\nThe `forTrue` function takes a single argument, `fn`, which is a function that returns a boolean value. It returns a `Promise` that resolves with `true` when the provided function `fn` returns `true`. The function `fn` is checked every 50 milliseconds, up to a maximum of 200 times (i.e., 10 seconds). If `fn` does not return `true` within this time, the `Promise` is rejected.\n\nThis function can be used to wait for a specific condition to be met before continuing the execution of asynchronous code.\n\nExample usage:\n\n```javascript\nlet condition = false;\n\nsetTimeout(() => {\n condition = true;\n}, 3000);\n\nasync function waitForCondition() {\n console.log(\"Waiting for condition...\");\n await forTrue(() => condition);\n console.log(\"Condition met!\");\n}\n\nwaitForCondition(); // Output: Waiting for condition... -> (3 second delay) -> Condition met!\n```\n\nIn summary, this file provides two utility functions that help manage asynchronous operations by introducing delays and waiting for specific conditions to be met. These functions can be used in the larger project to control the flow of asynchronous code execution.", + "questions": "1. **What is the purpose of the `wait` function?**\n\n The `wait` function is an asynchronous utility function that resolves a promise after a specified timeout in milliseconds, optionally returning a value when the promise is resolved.\n\n2. **How does the `forTrue` function work?**\n\n The `forTrue` function takes a function `fn` as an argument, which should return a boolean value. It checks the result of `fn` every 50 milliseconds and resolves the promise when `fn` returns `true`. If `fn` does not return `true` after 200 attempts, the promise is rejected.\n\n3. **What is the use case for the `forTrue` function?**\n\n The `forTrue` function can be used to wait for a certain condition to be met before proceeding with the execution of the code. This can be useful in situations where you need to wait for an asynchronous operation to complete or a specific state to be reached before continuing.", + "checksum": "bf4acebb6c2736274af75a8c8441c9d2" } ], "folders": [], - "summary": "The code in the `.autodoc/docs/json/src/cli/utils` folder provides utility functions and classes that help manage various aspects of the autodoc project, such as rate-limiting API calls, handling file and folder paths, managing language models, and traversing file systems.\n\n`APIRateLimit.ts` contains the `APIRateLimit` class, which is designed to manage and limit the number of concurrent API calls made by the application. This is useful when the API being called has a rate limit or when the application needs to control the number of simultaneous requests to avoid overloading the server. For example:\n\n```javascript\nconst apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls\nasync function getData(id) {\n return apiRateLimiter.callApi(() => fetchData(id));\n}\ngetData(1).then(console.log); // Fetches data for ID 1, rate-limited\n```\n\n`FileUtil.ts` provides utility functions for handling file and folder paths, such as generating file names and GitHub URLs for files and folders. These functions can be used to manage and navigate the documentation structure. For example:\n\n```javascript\ngetFileName(\"example.txt\"); // returns \"example.md\"\ngithubFileUrl(\"https://github.com/user/repo\", \"/input\", \"/input/example.md\", true); // returns \"https://github.com/user/repo/example.md\"\n```\n\n`LLMUtil.ts` defines and manages different language models (LLMs) and their associated costs for a project. It provides functions like `printModelDetails` and `totalIndexCostEstimate` to manage and analyze the usage and costs of different language models. For example, the `printModelDetails` function can provide a summary of the project's LLM usage, while the `totalIndexCostEstimate` function can help estimate the overall cost of using these models.\n\n`WaitUtil.ts` provides two utility functions, `wait` and `forTrue`, which help manage asynchronous operations in the larger project. They can be used in various parts of the project to handle timing and conditional logic in an asynchronous manner. For example:\n\n```javascript\nwait(2000, \"Hello, world!\").then(console.log); // Waits for 2 seconds and then logs \"Hello, world!\"\nforTrue(isElementVisible).then(() => console.log(\"Element is visible!\")); // Waits for an element to become visible, then logs \"Element is visible!\"\n```\n\n`traverseFileSystem.ts` contains the `traverseFileSystem` function, which recursively traverses a given file system, processes folders and files, and filters out ignored files based on provided patterns. It is designed to be used for processing and generating documentation for a given project. For example:\n\n```javascript\nconst params = {\n inputPath: './myProject',\n projectName: 'My Project',\n ignore: ['node_modules/**', '.git/**'],\n processFile: async (fileInfo) => {\n // Process the file, e.g., generate documentation\n },\n processFolder: async (folderInfo) => {\n // Process the folder, e.g., create a folder in the output directory\n },\n};\ntraverseFileSystem(params);\n```\n\nIn summary, the code in this folder provides various utility functions and classes that help manage different aspects of the autodoc project, making it easier to handle tasks such as rate-limiting, file and folder management, language model management, asynchronous operations, and file system traversal.", - "questions": "" + "summary": "The `.autodoc\\docs\\json\\src\\cli\\utils` folder contains utility functions and classes that assist in managing API rate limits, handling file and folder paths, managing language models, traversing file systems, and controlling asynchronous operations. These utilities can be used throughout the autodoc project to ensure consistent behavior and improve code organization.\n\n`APIRateLimit.ts` provides the `APIRateLimit` class, which manages and limits the number of concurrent API calls made by the application. This is useful when working with rate-limited APIs or preventing server overload. Example usage:\n\n```javascript\nconst apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls\nasync function fetchSomeData(id) {\n const result = await apiRateLimiter.callApi(() => fetch(`https://api.example.com/data/${id}`));\n return result;\n}\n```\n\n`FileUtil.ts` offers utility functions for generating file names and GitHub URLs for documentation files. These functions ensure consistent naming and URL generation across the project. Example usage:\n\n```javascript\ngetFileName('example.txt'); // returns 'example.md'\ngithubFileUrl('https://github.com/user/repo', '/input', '/input/example.md', true); // returns 'https://github.com/user/repo/example.md'\n```\n\n`LLMUtil.ts` defines and manages different language models (LLMs) and their associated costs for a project utilizing OpenAI's GPT models. Functions like `printModelDetails` and `totalIndexCostEstimate` can be used to manage and analyze the usage and costs of different LLMs. Example usage:\n\n```javascript\nimport { models, printModelDetails } from './path/to/this/file';\nprintModelDetails(Object.values(models));\nconst totalCost = totalIndexCostEstimate(Object.values(models));\nconsole.log(`Total cost: ${totalCost}`);\n```\n\n`traverseFileSystem.ts` contains the `traverseFileSystem` function, which recursively traverses a given file system, processing files and folders based on provided parameters. This is useful for generating documentation or performing tasks that require processing files and folders in a directory structure. Example usage:\n\n```javascript\nawait traverseFileSystem({\n inputPath: './src',\n projectName: 'myProject',\n processFile: (params) => { /* Process file logic */ },\n processFolder: (params) => { /* Process folder logic */ },\n ignore: ['node_modules/**', '.git/**'],\n});\n```\n\n`WaitUtil.ts` provides two utility functions, `wait` and `forTrue`, which help manage asynchronous operations by introducing delays and waiting for specific conditions to be met. These functions can be used to control the flow of asynchronous code execution. Example usage:\n\n```javascript\nasync function delayedEcho() {\n console.log(\"Start\");\n await wait(1000, \"Hello\");\n console.log(\"End\");\n}\n\nasync function waitForCondition() {\n console.log(\"Waiting for condition...\");\n await forTrue(() => condition);\n console.log(\"Condition met!\");\n}\n```\n\nIn summary, the utilities in this folder enhance the autodoc project by providing consistent behavior, improving code organization, and managing various aspects of the project, such as API rate limits, file and folder paths, language models, file system traversal, and asynchronous operations.", + "questions": "", + "checksum": "a4b7088863601cd326edbec7726eefe7" } ], - "summary": "The `spinner.ts` file in the `.autodoc/docs/json/src/cli` folder provides a utility for managing a command-line spinner using the `ora` library. The spinner is a visual indicator that displays a series of characters in a loop, giving the user feedback that a process is running in the background. The code exports several functions to control the spinner's behavior, such as updating the text, stopping the spinner, and displaying success, error, or informational messages.\n\nThe `spinner` object is created as a singleton to ensure that there is only one instance of the spinner at any given time. This prevents multiple spinners from being displayed simultaneously, which could cause confusion for the user. The spinner is configured to use the 'dots' style.\n\nThe `updateSpinnerText` function is used to update the spinner's text. If the spinner is already spinning, it updates the text directly; otherwise, it starts the spinner with the given message. For example:\n\n```javascript\nupdateSpinnerText('Loading data...');\n```\n\nThe `stopSpinner` function stops the spinner if it is currently spinning:\n\n```javascript\nstopSpinner();\n```\n\nThe `spinnerError`, `spinnerSuccess`, and `spinnerInfo` functions are used to display error, success, and informational messages, respectively. These functions first check if the spinner is spinning and then call the appropriate `ora` method to display the message with the corresponding status symbol (e.g., a red cross for errors, a green checkmark for success, etc.):\n\n```javascript\nspinnerError('An error occurred');\nspinnerSuccess('Operation completed successfully');\nspinnerInfo('Please wait...');\n```\n\nIn the larger project, this utility can be used to provide a consistent and user-friendly interface for displaying progress and status messages during long-running tasks or processes.", - "questions": "" + "summary": "The code in the `spinner.ts` file, located in the `.autodoc\\docs\\json\\src\\cli` folder, is responsible for managing a spinner, a visual element that indicates a background process is running. The spinner is created using the `ora` library, which provides a simple and customizable way to create spinners for command-line interfaces.\n\nThe module exports several functions to interact with the spinner:\n\n1. `updateSpinnerText(message: string)`: Updates the spinner's text with the provided message. If the spinner is already spinning, it simply updates the text; otherwise, it starts the spinner with the new message.\n\n Example usage:\n ```javascript\n updateSpinnerText('Loading data...');\n ```\n\n2. `stopSpinner()`: Stops the spinner if it is currently spinning.\n\n Example usage:\n ```javascript\n stopSpinner();\n ```\n\n3. `spinnerError(message?: string)`: Stops the spinner and marks it as failed with an optional error message. It only takes effect if the spinner is currently spinning.\n\n Example usage:\n ```javascript\n spinnerError('Failed to load data');\n ```\n\n4. `spinnerSuccess(message?: string)`: Stops the spinner and marks it as successful with an optional success message. It only takes effect if the spinner is currently spinning.\n\n Example usage:\n ```javascript\n spinnerSuccess('Data loaded successfully');\n ```\n\n5. `spinnerInfo(message: string)`: Displays an informational message without affecting the spinner's state.\n\n Example usage:\n ```javascript\n spinnerInfo('Connecting to server...');\n ```\n\nIn the larger project, this module can be used to provide visual feedback to users when a background process is running, such as loading data, connecting to a server, or performing a complex calculation. By using the exported functions, developers can easily update the spinner's text, stop it, or change its state to indicate success, failure, or display informational messages.", + "questions": "", + "checksum": "e9d728bc3244f1081af08994f5fb1cd0" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/cli/utils/APIRateLimit.json b/.autodoc/docs/json/src/cli/utils/APIRateLimit.json index 1beed52..0e5e81c 100644 --- a/.autodoc/docs/json/src/cli/utils/APIRateLimit.json +++ b/.autodoc/docs/json/src/cli/utils/APIRateLimit.json @@ -1,7 +1,8 @@ { "fileName": "APIRateLimit.ts", - "filePath": "src/cli/utils/APIRateLimit.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/utils/APIRateLimit.ts", - "summary": "The `APIRateLimit` class in this code snippet is designed to manage and limit the number of concurrent API calls made by the application. This is useful in situations where the API being called has a rate limit or when the application needs to control the number of simultaneous requests to avoid overloading the server.\n\nThe class has a constructor that takes an optional `maxConcurrentCalls` parameter, which defaults to 50. This parameter determines the maximum number of API calls that can be made concurrently.\n\nThe main method of this class is `callApi(apiFunction: () => Promise): Promise`. This method takes a function `apiFunction` that returns a promise and wraps it in a rate-limited execution. The method returns a promise that resolves with the result of the API call or rejects with an error if the call fails.\n\nWhen `callApi` is called, it adds the `executeCall` function to the `queue`. The `executeCall` function is responsible for executing the API call, resolving or rejecting the promise, and managing the `inProgress` counter. After adding the `executeCall` function to the queue, the code checks if there are available slots for concurrent calls by comparing `inProgress` with `maxConcurrentCalls`. If there are available slots, it calls the `dequeueAndExecute` method.\n\nThe `dequeueAndExecute` method is responsible for executing the queued API calls while ensuring that the number of concurrent calls does not exceed the `maxConcurrentCalls` limit. It dequeues the next API call from the queue and executes it if there are available slots for concurrent calls.\n\nHere's an example of how this class can be used in the larger project:\n\n```javascript\nconst apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls\n\nasync function fetchData(id) {\n // Simulate an API call\n return new Promise((resolve) => setTimeout(() => resolve(`Data for ${id}`), 1000));\n}\n\nasync function getData(id) {\n return apiRateLimiter.callApi(() => fetchData(id));\n}\n\n// Usage\ngetData(1).then(console.log); // Fetches data for ID 1, rate-limited\n```\n\nIn this example, the `APIRateLimit` class is used to limit the number of concurrent calls to the `fetchData` function, which simulates an API call.", - "questions": "1. **What is the purpose of the `APIRateLimit` class?**\n\n The `APIRateLimit` class is designed to manage and limit the number of concurrent API calls to a specified maximum, preventing the application from overwhelming the API with too many requests at once.\n\n2. **How does the `callApi` method work and what is its return type?**\n\n The `callApi` method takes an `apiFunction` as an argument, which is a function that returns a Promise. It adds the API call to a queue and manages the execution of queued calls based on the available slots for concurrent calls. The method returns a Promise of type `T`, where `T` is the expected return type of the `apiFunction`.\n\n3. **How does the `dequeueAndExecute` method work?**\n\n The `dequeueAndExecute` method is responsible for executing the queued API calls. It checks if there are any calls in the queue and if there are available slots for concurrent calls. If both conditions are met, it dequeues the next call from the queue and executes it. This method is called whenever a new API call is added to the queue or when an in-progress call is completed." + "filePath": "src\\cli\\utils\\APIRateLimit.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\utils\\APIRateLimit.ts", + "summary": "The `APIRateLimit` class in this code snippet is designed to manage and limit the number of concurrent API calls made by the application. This is useful in situations where the API being called has a rate limit or when the application needs to prevent overwhelming the server with too many requests at once.\n\nThe class constructor takes an optional parameter `maxConcurrentCalls`, which defaults to 50, to set the maximum number of concurrent API calls allowed. It maintains a queue of API calls and keeps track of the number of calls in progress.\n\nThe main method of this class is `callApi(apiFunction: () => Promise): Promise`. It takes a function `apiFunction` that returns a promise and wraps it in a new promise. The purpose of this wrapping is to control the execution of the API calls and ensure that they do not exceed the specified rate limit.\n\nWhen `callApi` is called, the provided `apiFunction` is added to the queue and the `dequeueAndExecute` method is triggered if there are available slots for concurrent calls. The `dequeueAndExecute` method checks if there are any API calls in the queue and if the number of in-progress calls is below the maximum limit. If both conditions are met, it dequeues the next API call and executes it.\n\nThe `executeCall` function inside `callApi` is responsible for actually calling the API function, resolving or rejecting the promise based on the result, and updating the number of in-progress calls. Once an API call is completed, the `dequeueAndExecute` method is called again to process any remaining calls in the queue.\n\nHere's an example of how this class can be used in the larger project:\n\n```javascript\nconst apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls\n\nasync function fetchSomeData(id) {\n // Call the API using the rate limiter\n const result = await apiRateLimiter.callApi(() => fetch(`https://api.example.com/data/${id}`));\n return result;\n}\n```\n\nIn this example, the `APIRateLimit` class is used to limit the number of concurrent calls to the `fetch` function, ensuring that no more than 10 calls are made at once.", + "questions": "1. **What is the purpose of the `APIRateLimit` class?**\n\n The `APIRateLimit` class is designed to manage and limit the number of concurrent API calls to a specified maximum, preventing the application from overwhelming the API with too many requests at once.\n\n2. **How does the `callApi` method work and what is its return type?**\n\n The `callApi` method takes an `apiFunction` as an argument, which is a function that returns a Promise. It adds the API call to a queue and executes it when there are available slots for concurrent calls. The method returns a Promise of type `T`, where `T` is the expected return type of the `apiFunction`.\n\n3. **How can the maximum number of concurrent calls be configured?**\n\n The maximum number of concurrent calls can be configured by passing a value to the `maxConcurrentCalls` parameter in the constructor of the `APIRateLimit` class. If no value is provided, the default value is set to 50.", + "checksum": "8862552c9cfd8b6db454d45e565081ef" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/cli/utils/FileUtil.json b/.autodoc/docs/json/src/cli/utils/FileUtil.json index a4178d9..e9ed28b 100644 --- a/.autodoc/docs/json/src/cli/utils/FileUtil.json +++ b/.autodoc/docs/json/src/cli/utils/FileUtil.json @@ -1,7 +1,8 @@ { "fileName": "FileUtil.ts", - "filePath": "src/cli/utils/FileUtil.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/utils/FileUtil.ts", - "summary": "This code provides utility functions for handling file and folder paths in the autodoc project. The main purpose of these functions is to generate file names and GitHub URLs for files and folders.\n\n1. `getFileName(input: string, delimiter = '.', extension = '.md'): string`: This function takes an input string, an optional delimiter (default is '.'), and an optional extension (default is '.md'). It returns a new file name with the given extension. If the delimiter is not found in the input string, the function appends the extension to the input string. If the delimiter is found, the function replaces the part after the last delimiter with the extension. For example:\n\n ```javascript\n getFileName(\"example.txt\"); // returns \"example.md\"\n getFileName(\"example\"); // returns \"example.md\"\n ```\n\n2. `githubFileUrl(githubRoot: string, inputRoot: string, filePath: string, linkHosted: boolean): string`: This function generates a GitHub URL for a file. It takes the GitHub root URL, the input root path, the file path, and a boolean flag `linkHosted`. If `linkHosted` is true, the function returns a URL pointing to the hosted version of the file. If `linkHosted` is false, the function returns a URL pointing to the file in the GitHub repository. For example:\n\n ```javascript\n githubFileUrl(\"https://github.com/user/repo\", \"/input\", \"/input/example.md\", true); // returns \"https://github.com/user/repo/example.md\"\n githubFileUrl(\"https://github.com/user/repo\", \"/input\", \"/input/example.md\", false); // returns \"https://github.com/user/repo/blob/master/example.md\"\n ```\n\n3. `githubFolderUrl(githubRoot: string, inputRoot: string, folderPath: string, linkHosted: boolean): string`: This function is similar to `githubFileUrl`, but it generates a GitHub URL for a folder instead of a file. If `linkHosted` is true, the function returns a URL pointing to the hosted version of the folder. If `linkHosted` is false, the function returns a URL pointing to the folder in the GitHub repository. For example:\n\n ```javascript\n githubFolderUrl(\"https://github.com/user/repo\", \"/input\", \"/input/folder\", true); // returns \"https://github.com/user/repo/folder\"\n githubFolderUrl(\"https://github.com/user/repo\", \"/input\", \"/input/folder\", false); // returns \"https://github.com/user/repo/tree/master/folder\"\n ```\n\nThese utility functions can be used in the autodoc project to generate file names and URLs for documentation files and folders, making it easier to manage and navigate the documentation structure.", - "questions": "1. **What does the `getFileName` function do?**\n\n The `getFileName` function takes an input string, an optional delimiter (default is '.'), and an optional extension (default is '.md'). It returns the input string with the specified extension, replacing the part after the last occurrence of the delimiter if it exists.\n\n2. **What is the purpose of the `githubFileUrl` and `githubFolderUrl` functions?**\n\n Both `githubFileUrl` and `githubFolderUrl` functions are used to generate URLs for files and folders, respectively, in a GitHub repository. They take a `githubRoot`, `inputRoot`, a `filePath` or `folderPath`, and a `linkHosted` boolean flag. If `linkHosted` is true, the generated URL will point to the hosted version of the file or folder; otherwise, it will point to the file or folder in the GitHub repository.\n\n3. **Why is the `inputRoot.length - 1` used in the `substring` method for both `githubFileUrl` and `githubFolderUrl` functions?**\n\n The `inputRoot.length - 1` is used to remove the `inputRoot` part from the `filePath` or `folderPath` when generating the final URL. This ensures that the generated URL only contains the relevant path relative to the GitHub repository root." + "filePath": "src\\cli\\utils\\FileUtil.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\utils\\FileUtil.ts", + "summary": "This code provides utility functions for handling file and folder paths in the autodoc project. The main purpose of these functions is to generate file names and GitHub URLs for documentation files.\n\n1. `getFileName(input, delimiter, extension)`: This function takes an input string, an optional delimiter (default is '.'), and an optional extension (default is '.md'). It returns a new string with the given extension. If the delimiter is found in the input string, the function removes the part of the string after the last occurrence of the delimiter and appends the extension. If the delimiter is not found, the function simply appends the extension to the input string. This function can be used to generate file names for documentation files with the desired extension.\n\n Example usage:\n\n ```\n getFileName('example.txt'); // returns 'example.md'\n getFileName('example', '_', '.html'); // returns 'example.html'\n ```\n\n2. `githubFileUrl(githubRoot, inputRoot, filePath, linkHosted)`: This function generates a GitHub URL for a file. It takes the GitHub repository root URL, the input root folder path, the file path, and a boolean flag indicating whether the URL should be for the hosted version of the file or the source code. It returns a string with the generated URL.\n\n Example usage:\n\n ```\n githubFileUrl('https://github.com/user/repo', '/input', '/input/example.md', true);\n // returns 'https://github.com/user/repo/example.md'\n ```\n\n3. `githubFolderUrl(githubRoot, inputRoot, folderPath, linkHosted)`: This function is similar to `githubFileUrl`, but it generates a GitHub URL for a folder instead of a file. It takes the same arguments as `githubFileUrl` and returns a string with the generated URL.\n\n Example usage:\n\n ```\n githubFolderUrl('https://github.com/user/repo', '/input', '/input/folder', true);\n // returns 'https://github.com/user/repo/folder'\n ```\n\nThese utility functions can be used throughout the autodoc project to generate file names and GitHub URLs for documentation files and folders, ensuring consistent naming and URL generation across the project.", + "questions": "1. **What is the purpose of the `getFileName` function?**\n\n The `getFileName` function takes an input string, an optional delimiter, and an optional extension, and returns a new string with the given extension. If the delimiter is not found in the input string, the extension is simply appended to the input string. If the delimiter is found, the input string is sliced up to the last delimiter index and the extension is appended.\n\n2. **What are the differences between the `githubFileUrl` and `githubFolderUrl` functions?**\n\n Both functions take the same parameters: `githubRoot`, `inputRoot`, a path (either `filePath` or `folderPath`), and a `linkHosted` boolean. The main difference is in the returned URL: `githubFileUrl` returns a URL pointing to a file in the GitHub repository, while `githubFolderUrl` returns a URL pointing to a folder in the GitHub repository. The URL structure differs slightly, with `/blob/master/` for files and `/tree/master/` for folders.\n\n3. **What is the purpose of the `linkHosted` parameter in the `githubFileUrl` and `githubFolderUrl` functions?**\n\n The `linkHosted` parameter is a boolean that determines whether the returned URL should point to the hosted version of the file or folder on GitHub Pages (if `true`) or to the file or folder within the GitHub repository itself (if `false`). Depending on the value of `linkHosted`, the functions will return different URL structures.", + "checksum": "d1f26fc674b4a9b4a2053642771871c8" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/cli/utils/LLMUtil.json b/.autodoc/docs/json/src/cli/utils/LLMUtil.json index 02cdb15..cfa3bba 100644 --- a/.autodoc/docs/json/src/cli/utils/LLMUtil.json +++ b/.autodoc/docs/json/src/cli/utils/LLMUtil.json @@ -1,7 +1,8 @@ { "fileName": "LLMUtil.ts", - "filePath": "src/cli/utils/LLMUtil.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/utils/LLMUtil.ts", - "summary": "This code defines and manages different language models (LLMs) and their associated costs for a project. It imports the `OpenAIChat` class from the `langchain/llms` module and the `LLMModelDetails` and `LLMModels` types from the `../../types.js` file.\n\nThe `models` object contains three LLMs: GPT3, GPT4, and GPT432k. Each model has a set of properties, such as `name`, `inputCostPer1KTokens`, `outputCostPer1KTokens`, `maxLength`, and an instance of `OpenAIChat` with specific configurations. The `inputTokens`, `outputTokens`, `succeeded`, `failed`, and `total` properties are initialized to 0.\n\n```javascript\n{\n name: LLMModels.GPT3,\n inputCostPer1KTokens: 0.002,\n outputCostPer1KTokens: 0.002,\n maxLength: 3050,\n llm: new OpenAIChat({ ... }),\n inputTokens: 0,\n outputTokens: 0,\n succeeded: 0,\n failed: 0,\n total: 0,\n}\n```\n\nThe `printModelDetails` function takes an array of `LLMModelDetails` and prints a summary table to the console. It calculates the total cost for each model based on the number of input and output tokens and their respective costs per 1,000 tokens. It also calculates the total file count, succeeded, failed, tokens, and cost across all models.\n\nThe `totalIndexCostEstimate` function calculates the total cost for all models in the input array. It uses the same cost calculation as in `printModelDetails` but returns the total cost as a number.\n\nThese functions can be used in the larger project to manage and analyze the usage and costs of different language models. For example, the `printModelDetails` function can provide a summary of the project's LLM usage, while the `totalIndexCostEstimate` function can help estimate the overall cost of using these models.", - "questions": "1. **Question**: What is the purpose of the `models` object and what are the different models available?\n **Answer**: The `models` object is a record that maps the available LLMModels (GPT3, GPT4, and GPT432k) to their respective details, such as name, input and output costs, maxLength, and an instance of OpenAIChat with the corresponding model.\n\n2. **Question**: How does the `printModelDetails` function work and what information does it display?\n **Answer**: The `printModelDetails` function takes an array of LLMModelDetails and generates an output object containing the model name, file count, succeeded, failed, tokens, and cost. It then calculates the totals for each property and displays the information in a console table.\n\n3. **Question**: What is the purpose of the `totalIndexCostEstimate` function and how does it calculate the total cost?\n **Answer**: The `totalIndexCostEstimate` function calculates the total cost of indexing the given models by iterating through the models array and summing up the input and output costs per 1K tokens for each model." + "filePath": "src\\cli\\utils\\LLMUtil.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\utils\\LLMUtil.ts", + "summary": "This code defines and manages different language models (LLMs) and their associated costs for a project that utilizes OpenAI's GPT models. It imports the `OpenAIChat` class from the `langchain/llms` module and the `LLMModelDetails` and `LLMModels` types from the `../../types.js` file.\n\nThe `models` object contains three LLMs: GPT3, GPT4, and GPT432k. Each model has its own properties, such as `name`, `inputCostPer1KTokens`, `outputCostPer1KTokens`, `maxLength`, and an instance of the `OpenAIChat` class with the respective model name and API key. Additionally, each model has counters for input tokens, output tokens, succeeded, failed, and total files processed.\n\nThe `printModelDetails` function takes an array of `LLMModelDetails` and prints a summary table to the console. It calculates the total cost for each model based on the input and output tokens and their respective costs per 1,000 tokens. It also calculates the total file count, succeeded, failed, tokens, and cost across all models.\n\nThe `totalIndexCostEstimate` function calculates the total cost of indexing all models in the input array. It uses the same cost calculation as in `printModelDetails` but returns the total cost as a number.\n\nThese functions can be used in the larger project to manage and analyze the usage and costs of different LLMs. For example, the `printModelDetails` function can be called to display a summary of the models' usage and costs:\n\n```javascript\nimport { models, printModelDetails } from './path/to/this/file';\n\n// Process files with models...\n// Update models' properties...\n\nprintModelDetails(Object.values(models));\n```\n\nAnd the `totalIndexCostEstimate` function can be used to estimate the total cost of indexing all models:\n\n```javascript\nimport { models, totalIndexCostEstimate } from './path/to/this/file';\n\n// Process files with models...\n// Update models' properties...\n\nconst totalCost = totalIndexCostEstimate(Object.values(models));\nconsole.log(`Total cost: ${totalCost}`);\n```", + "questions": "1. **Question:** What is the purpose of the `models` object and how are the different GPT models being used?\n **Answer:** The `models` object is a record that maps different GPT models (GPT3, GPT4, and GPT432k) to their respective details, such as cost per tokens, maximum length, and an instance of `OpenAIChat` with the corresponding model configuration.\n\n2. **Question:** How does the `printModelDetails` function work and what information does it display?\n **Answer:** The `printModelDetails` function takes an array of `LLMModelDetails` as input, processes the information for each model, and then prints a summary table to the console. The table includes the model name, file count, succeeded and failed counts, total tokens, and cost.\n\n3. **Question:** What is the purpose of the `totalIndexCostEstimate` function and how is it calculating the total cost?\n **Answer:** The `totalIndexCostEstimate` function calculates the total cost of processing the given models by iterating through the input `models` array and summing up the costs based on the input and output tokens and their respective costs per 1K tokens.", + "checksum": "f4464cf197f4af827ac0eac950d568fc" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/cli/utils/WaitUtil.json b/.autodoc/docs/json/src/cli/utils/WaitUtil.json index e476c3e..da72db7 100644 --- a/.autodoc/docs/json/src/cli/utils/WaitUtil.json +++ b/.autodoc/docs/json/src/cli/utils/WaitUtil.json @@ -1,7 +1,8 @@ { "fileName": "WaitUtil.ts", - "filePath": "src/cli/utils/WaitUtil.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/utils/WaitUtil.ts", - "summary": "The code in this file provides two utility functions, `wait` and `forTrue`, which are designed to help manage asynchronous operations in the larger project. Both functions return a `Promise`, which is a JavaScript object that represents the eventual completion (or failure) of an asynchronous operation and its resulting value.\n\n### wait function\n\nThe `wait` function takes two arguments: `timeoutMs`, which is the number of milliseconds to wait before resolving the promise, and `value`, which is an optional value to be returned when the promise resolves. The function creates a new `Promise` and uses `setTimeout` to resolve it with the given `value` after the specified `timeoutMs` has passed.\n\nExample usage:\n\n```javascript\n// Wait for 2 seconds and then log \"Hello, world!\"\nwait(2000, \"Hello, world!\").then(console.log);\n```\n\n### forTrue function\n\nThe `forTrue` function takes a single argument, `fn`, which is a function that returns a boolean value. The purpose of this function is to repeatedly check if the given function `fn` returns `true`. If it does, the promise resolves with `true`. If the function does not return `true` after 200 checks, the promise is rejected.\n\nThe function uses `setInterval` to repeatedly call the given function `fn` every 50 milliseconds. If `fn` returns `true`, the interval is cleared, and the promise is resolved. If the function has been called 200 times without returning `true`, the promise is rejected.\n\nExample usage:\n\n```javascript\n// Check if a certain element is visible on the page\nconst isElementVisible = () => document.querySelector(\"#my-element\").offsetParent !== null;\n\n// Wait for the element to become visible, then log \"Element is visible!\"\nforTrue(isElementVisible).then(() => console.log(\"Element is visible!\"));\n```\n\nIn summary, these utility functions help manage asynchronous operations by providing a way to wait for a certain amount of time or for a specific condition to be met. They can be used in various parts of the larger project to handle timing and conditional logic in an asynchronous manner.", - "questions": "1. **What is the purpose of the `wait` function?**\n\n The `wait` function is an asynchronous utility function that resolves a promise after a specified timeout in milliseconds. It can be used to introduce a delay in the execution of asynchronous code.\n\n2. **How does the `forTrue` function work and what is its use case?**\n\n The `forTrue` function takes a function `fn` as an argument, which returns a boolean value. It repeatedly checks the result of `fn` every 50 milliseconds until it returns `true` or the maximum number of checks (200) is reached. This function can be used to wait for a specific condition to be met before proceeding with the execution of asynchronous code.\n\n3. **Is there any error handling or customization for the `forTrue` function, such as customizing the interval or maximum number of checks?**\n\n Currently, there is no error handling or customization options for the `forTrue` function. The interval is hardcoded to 50 milliseconds, and the maximum number of checks is hardcoded to 200. To add customization, additional parameters could be added to the function signature and used in the implementation." + "filePath": "src\\cli\\utils\\WaitUtil.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\utils\\WaitUtil.ts", + "summary": "The code in this file provides two utility functions, `wait` and `forTrue`, which are designed to help manage asynchronous operations in the larger project. Both functions return a `Promise`, making them suitable for use with `async/await` syntax.\n\n### wait\n\nThe `wait` function takes two arguments: `timeoutMs`, a number representing the desired waiting time in milliseconds, and an optional `value` that defaults to `null`. It returns a `Promise` that resolves with the provided `value` after the specified `timeoutMs` has elapsed. This function can be used to introduce a delay in the execution of asynchronous code.\n\nExample usage:\n\n```javascript\nasync function delayedEcho() {\n console.log(\"Start\");\n await wait(1000, \"Hello\");\n console.log(\"End\");\n}\n\ndelayedEcho(); // Output: Start -> (1 second delay) -> End\n```\n\n### forTrue\n\nThe `forTrue` function takes a single argument, `fn`, which is a function that returns a boolean value. It returns a `Promise` that resolves with `true` when the provided function `fn` returns `true`. The function `fn` is checked every 50 milliseconds, up to a maximum of 200 times (i.e., 10 seconds). If `fn` does not return `true` within this time, the `Promise` is rejected.\n\nThis function can be used to wait for a specific condition to be met before continuing the execution of asynchronous code.\n\nExample usage:\n\n```javascript\nlet condition = false;\n\nsetTimeout(() => {\n condition = true;\n}, 3000);\n\nasync function waitForCondition() {\n console.log(\"Waiting for condition...\");\n await forTrue(() => condition);\n console.log(\"Condition met!\");\n}\n\nwaitForCondition(); // Output: Waiting for condition... -> (3 second delay) -> Condition met!\n```\n\nIn summary, this file provides two utility functions that help manage asynchronous operations by introducing delays and waiting for specific conditions to be met. These functions can be used in the larger project to control the flow of asynchronous code execution.", + "questions": "1. **What is the purpose of the `wait` function?**\n\n The `wait` function is an asynchronous utility function that resolves a promise after a specified timeout in milliseconds, optionally returning a value when the promise is resolved.\n\n2. **How does the `forTrue` function work?**\n\n The `forTrue` function takes a function `fn` as an argument, which should return a boolean value. It checks the result of `fn` every 50 milliseconds and resolves the promise when `fn` returns `true`. If `fn` does not return `true` after 200 attempts, the promise is rejected.\n\n3. **What is the use case for the `forTrue` function?**\n\n The `forTrue` function can be used to wait for a certain condition to be met before proceeding with the execution of the code. This can be useful in situations where you need to wait for an asynchronous operation to complete or a specific state to be reached before continuing.", + "checksum": "bf4acebb6c2736274af75a8c8441c9d2" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/cli/utils/summary.json b/.autodoc/docs/json/src/cli/utils/summary.json index 2025542..eda89bb 100644 --- a/.autodoc/docs/json/src/cli/utils/summary.json +++ b/.autodoc/docs/json/src/cli/utils/summary.json @@ -1,45 +1,51 @@ { "folderName": "utils", - "folderPath": ".autodoc/docs/json/src/cli/utils", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/utils", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\utils", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\utils", "files": [ { "fileName": "APIRateLimit.ts", - "filePath": "src/cli/utils/APIRateLimit.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/utils/APIRateLimit.ts", - "summary": "The `APIRateLimit` class in this code snippet is designed to manage and limit the number of concurrent API calls made by the application. This is useful in situations where the API being called has a rate limit or when the application needs to control the number of simultaneous requests to avoid overloading the server.\n\nThe class has a constructor that takes an optional `maxConcurrentCalls` parameter, which defaults to 50. This parameter determines the maximum number of API calls that can be made concurrently.\n\nThe main method of this class is `callApi(apiFunction: () => Promise): Promise`. This method takes a function `apiFunction` that returns a promise and wraps it in a rate-limited execution. The method returns a promise that resolves with the result of the API call or rejects with an error if the call fails.\n\nWhen `callApi` is called, it adds the `executeCall` function to the `queue`. The `executeCall` function is responsible for executing the API call, resolving or rejecting the promise, and managing the `inProgress` counter. After adding the `executeCall` function to the queue, the code checks if there are available slots for concurrent calls by comparing `inProgress` with `maxConcurrentCalls`. If there are available slots, it calls the `dequeueAndExecute` method.\n\nThe `dequeueAndExecute` method is responsible for executing the queued API calls while ensuring that the number of concurrent calls does not exceed the `maxConcurrentCalls` limit. It dequeues the next API call from the queue and executes it if there are available slots for concurrent calls.\n\nHere's an example of how this class can be used in the larger project:\n\n```javascript\nconst apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls\n\nasync function fetchData(id) {\n // Simulate an API call\n return new Promise((resolve) => setTimeout(() => resolve(`Data for ${id}`), 1000));\n}\n\nasync function getData(id) {\n return apiRateLimiter.callApi(() => fetchData(id));\n}\n\n// Usage\ngetData(1).then(console.log); // Fetches data for ID 1, rate-limited\n```\n\nIn this example, the `APIRateLimit` class is used to limit the number of concurrent calls to the `fetchData` function, which simulates an API call.", - "questions": "1. **What is the purpose of the `APIRateLimit` class?**\n\n The `APIRateLimit` class is designed to manage and limit the number of concurrent API calls to a specified maximum, preventing the application from overwhelming the API with too many requests at once.\n\n2. **How does the `callApi` method work and what is its return type?**\n\n The `callApi` method takes an `apiFunction` as an argument, which is a function that returns a Promise. It adds the API call to a queue and manages the execution of queued calls based on the available slots for concurrent calls. The method returns a Promise of type `T`, where `T` is the expected return type of the `apiFunction`.\n\n3. **How does the `dequeueAndExecute` method work?**\n\n The `dequeueAndExecute` method is responsible for executing the queued API calls. It checks if there are any calls in the queue and if there are available slots for concurrent calls. If both conditions are met, it dequeues the next call from the queue and executes it. This method is called whenever a new API call is added to the queue or when an in-progress call is completed." + "filePath": "src\\cli\\utils\\APIRateLimit.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\utils\\APIRateLimit.ts", + "summary": "The `APIRateLimit` class in this code snippet is designed to manage and limit the number of concurrent API calls made by the application. This is useful in situations where the API being called has a rate limit or when the application needs to prevent overwhelming the server with too many requests at once.\n\nThe class constructor takes an optional parameter `maxConcurrentCalls`, which defaults to 50, to set the maximum number of concurrent API calls allowed. It maintains a queue of API calls and keeps track of the number of calls in progress.\n\nThe main method of this class is `callApi(apiFunction: () => Promise): Promise`. It takes a function `apiFunction` that returns a promise and wraps it in a new promise. The purpose of this wrapping is to control the execution of the API calls and ensure that they do not exceed the specified rate limit.\n\nWhen `callApi` is called, the provided `apiFunction` is added to the queue and the `dequeueAndExecute` method is triggered if there are available slots for concurrent calls. The `dequeueAndExecute` method checks if there are any API calls in the queue and if the number of in-progress calls is below the maximum limit. If both conditions are met, it dequeues the next API call and executes it.\n\nThe `executeCall` function inside `callApi` is responsible for actually calling the API function, resolving or rejecting the promise based on the result, and updating the number of in-progress calls. Once an API call is completed, the `dequeueAndExecute` method is called again to process any remaining calls in the queue.\n\nHere's an example of how this class can be used in the larger project:\n\n```javascript\nconst apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls\n\nasync function fetchSomeData(id) {\n // Call the API using the rate limiter\n const result = await apiRateLimiter.callApi(() => fetch(`https://api.example.com/data/${id}`));\n return result;\n}\n```\n\nIn this example, the `APIRateLimit` class is used to limit the number of concurrent calls to the `fetch` function, ensuring that no more than 10 calls are made at once.", + "questions": "1. **What is the purpose of the `APIRateLimit` class?**\n\n The `APIRateLimit` class is designed to manage and limit the number of concurrent API calls to a specified maximum, preventing the application from overwhelming the API with too many requests at once.\n\n2. **How does the `callApi` method work and what is its return type?**\n\n The `callApi` method takes an `apiFunction` as an argument, which is a function that returns a Promise. It adds the API call to a queue and executes it when there are available slots for concurrent calls. The method returns a Promise of type `T`, where `T` is the expected return type of the `apiFunction`.\n\n3. **How can the maximum number of concurrent calls be configured?**\n\n The maximum number of concurrent calls can be configured by passing a value to the `maxConcurrentCalls` parameter in the constructor of the `APIRateLimit` class. If no value is provided, the default value is set to 50.", + "checksum": "8862552c9cfd8b6db454d45e565081ef" }, { "fileName": "FileUtil.ts", - "filePath": "src/cli/utils/FileUtil.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/utils/FileUtil.ts", - "summary": "This code provides utility functions for handling file and folder paths in the autodoc project. The main purpose of these functions is to generate file names and GitHub URLs for files and folders.\n\n1. `getFileName(input: string, delimiter = '.', extension = '.md'): string`: This function takes an input string, an optional delimiter (default is '.'), and an optional extension (default is '.md'). It returns a new file name with the given extension. If the delimiter is not found in the input string, the function appends the extension to the input string. If the delimiter is found, the function replaces the part after the last delimiter with the extension. For example:\n\n ```javascript\n getFileName(\"example.txt\"); // returns \"example.md\"\n getFileName(\"example\"); // returns \"example.md\"\n ```\n\n2. `githubFileUrl(githubRoot: string, inputRoot: string, filePath: string, linkHosted: boolean): string`: This function generates a GitHub URL for a file. It takes the GitHub root URL, the input root path, the file path, and a boolean flag `linkHosted`. If `linkHosted` is true, the function returns a URL pointing to the hosted version of the file. If `linkHosted` is false, the function returns a URL pointing to the file in the GitHub repository. For example:\n\n ```javascript\n githubFileUrl(\"https://github.com/user/repo\", \"/input\", \"/input/example.md\", true); // returns \"https://github.com/user/repo/example.md\"\n githubFileUrl(\"https://github.com/user/repo\", \"/input\", \"/input/example.md\", false); // returns \"https://github.com/user/repo/blob/master/example.md\"\n ```\n\n3. `githubFolderUrl(githubRoot: string, inputRoot: string, folderPath: string, linkHosted: boolean): string`: This function is similar to `githubFileUrl`, but it generates a GitHub URL for a folder instead of a file. If `linkHosted` is true, the function returns a URL pointing to the hosted version of the folder. If `linkHosted` is false, the function returns a URL pointing to the folder in the GitHub repository. For example:\n\n ```javascript\n githubFolderUrl(\"https://github.com/user/repo\", \"/input\", \"/input/folder\", true); // returns \"https://github.com/user/repo/folder\"\n githubFolderUrl(\"https://github.com/user/repo\", \"/input\", \"/input/folder\", false); // returns \"https://github.com/user/repo/tree/master/folder\"\n ```\n\nThese utility functions can be used in the autodoc project to generate file names and URLs for documentation files and folders, making it easier to manage and navigate the documentation structure.", - "questions": "1. **What does the `getFileName` function do?**\n\n The `getFileName` function takes an input string, an optional delimiter (default is '.'), and an optional extension (default is '.md'). It returns the input string with the specified extension, replacing the part after the last occurrence of the delimiter if it exists.\n\n2. **What is the purpose of the `githubFileUrl` and `githubFolderUrl` functions?**\n\n Both `githubFileUrl` and `githubFolderUrl` functions are used to generate URLs for files and folders, respectively, in a GitHub repository. They take a `githubRoot`, `inputRoot`, a `filePath` or `folderPath`, and a `linkHosted` boolean flag. If `linkHosted` is true, the generated URL will point to the hosted version of the file or folder; otherwise, it will point to the file or folder in the GitHub repository.\n\n3. **Why is the `inputRoot.length - 1` used in the `substring` method for both `githubFileUrl` and `githubFolderUrl` functions?**\n\n The `inputRoot.length - 1` is used to remove the `inputRoot` part from the `filePath` or `folderPath` when generating the final URL. This ensures that the generated URL only contains the relevant path relative to the GitHub repository root." + "filePath": "src\\cli\\utils\\FileUtil.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\utils\\FileUtil.ts", + "summary": "This code provides utility functions for handling file and folder paths in the autodoc project. The main purpose of these functions is to generate file names and GitHub URLs for documentation files.\n\n1. `getFileName(input, delimiter, extension)`: This function takes an input string, an optional delimiter (default is '.'), and an optional extension (default is '.md'). It returns a new string with the given extension. If the delimiter is found in the input string, the function removes the part of the string after the last occurrence of the delimiter and appends the extension. If the delimiter is not found, the function simply appends the extension to the input string. This function can be used to generate file names for documentation files with the desired extension.\n\n Example usage:\n\n ```\n getFileName('example.txt'); // returns 'example.md'\n getFileName('example', '_', '.html'); // returns 'example.html'\n ```\n\n2. `githubFileUrl(githubRoot, inputRoot, filePath, linkHosted)`: This function generates a GitHub URL for a file. It takes the GitHub repository root URL, the input root folder path, the file path, and a boolean flag indicating whether the URL should be for the hosted version of the file or the source code. It returns a string with the generated URL.\n\n Example usage:\n\n ```\n githubFileUrl('https://github.com/user/repo', '/input', '/input/example.md', true);\n // returns 'https://github.com/user/repo/example.md'\n ```\n\n3. `githubFolderUrl(githubRoot, inputRoot, folderPath, linkHosted)`: This function is similar to `githubFileUrl`, but it generates a GitHub URL for a folder instead of a file. It takes the same arguments as `githubFileUrl` and returns a string with the generated URL.\n\n Example usage:\n\n ```\n githubFolderUrl('https://github.com/user/repo', '/input', '/input/folder', true);\n // returns 'https://github.com/user/repo/folder'\n ```\n\nThese utility functions can be used throughout the autodoc project to generate file names and GitHub URLs for documentation files and folders, ensuring consistent naming and URL generation across the project.", + "questions": "1. **What is the purpose of the `getFileName` function?**\n\n The `getFileName` function takes an input string, an optional delimiter, and an optional extension, and returns a new string with the given extension. If the delimiter is not found in the input string, the extension is simply appended to the input string. If the delimiter is found, the input string is sliced up to the last delimiter index and the extension is appended.\n\n2. **What are the differences between the `githubFileUrl` and `githubFolderUrl` functions?**\n\n Both functions take the same parameters: `githubRoot`, `inputRoot`, a path (either `filePath` or `folderPath`), and a `linkHosted` boolean. The main difference is in the returned URL: `githubFileUrl` returns a URL pointing to a file in the GitHub repository, while `githubFolderUrl` returns a URL pointing to a folder in the GitHub repository. The URL structure differs slightly, with `/blob/master/` for files and `/tree/master/` for folders.\n\n3. **What is the purpose of the `linkHosted` parameter in the `githubFileUrl` and `githubFolderUrl` functions?**\n\n The `linkHosted` parameter is a boolean that determines whether the returned URL should point to the hosted version of the file or folder on GitHub Pages (if `true`) or to the file or folder within the GitHub repository itself (if `false`). Depending on the value of `linkHosted`, the functions will return different URL structures.", + "checksum": "d1f26fc674b4a9b4a2053642771871c8" }, { "fileName": "LLMUtil.ts", - "filePath": "src/cli/utils/LLMUtil.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/utils/LLMUtil.ts", - "summary": "This code defines and manages different language models (LLMs) and their associated costs for a project. It imports the `OpenAIChat` class from the `langchain/llms` module and the `LLMModelDetails` and `LLMModels` types from the `../../types.js` file.\n\nThe `models` object contains three LLMs: GPT3, GPT4, and GPT432k. Each model has a set of properties, such as `name`, `inputCostPer1KTokens`, `outputCostPer1KTokens`, `maxLength`, and an instance of `OpenAIChat` with specific configurations. The `inputTokens`, `outputTokens`, `succeeded`, `failed`, and `total` properties are initialized to 0.\n\n```javascript\n{\n name: LLMModels.GPT3,\n inputCostPer1KTokens: 0.002,\n outputCostPer1KTokens: 0.002,\n maxLength: 3050,\n llm: new OpenAIChat({ ... }),\n inputTokens: 0,\n outputTokens: 0,\n succeeded: 0,\n failed: 0,\n total: 0,\n}\n```\n\nThe `printModelDetails` function takes an array of `LLMModelDetails` and prints a summary table to the console. It calculates the total cost for each model based on the number of input and output tokens and their respective costs per 1,000 tokens. It also calculates the total file count, succeeded, failed, tokens, and cost across all models.\n\nThe `totalIndexCostEstimate` function calculates the total cost for all models in the input array. It uses the same cost calculation as in `printModelDetails` but returns the total cost as a number.\n\nThese functions can be used in the larger project to manage and analyze the usage and costs of different language models. For example, the `printModelDetails` function can provide a summary of the project's LLM usage, while the `totalIndexCostEstimate` function can help estimate the overall cost of using these models.", - "questions": "1. **Question**: What is the purpose of the `models` object and what are the different models available?\n **Answer**: The `models` object is a record that maps the available LLMModels (GPT3, GPT4, and GPT432k) to their respective details, such as name, input and output costs, maxLength, and an instance of OpenAIChat with the corresponding model.\n\n2. **Question**: How does the `printModelDetails` function work and what information does it display?\n **Answer**: The `printModelDetails` function takes an array of LLMModelDetails and generates an output object containing the model name, file count, succeeded, failed, tokens, and cost. It then calculates the totals for each property and displays the information in a console table.\n\n3. **Question**: What is the purpose of the `totalIndexCostEstimate` function and how does it calculate the total cost?\n **Answer**: The `totalIndexCostEstimate` function calculates the total cost of indexing the given models by iterating through the models array and summing up the input and output costs per 1K tokens for each model." + "filePath": "src\\cli\\utils\\LLMUtil.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\utils\\LLMUtil.ts", + "summary": "This code defines and manages different language models (LLMs) and their associated costs for a project that utilizes OpenAI's GPT models. It imports the `OpenAIChat` class from the `langchain/llms` module and the `LLMModelDetails` and `LLMModels` types from the `../../types.js` file.\n\nThe `models` object contains three LLMs: GPT3, GPT4, and GPT432k. Each model has its own properties, such as `name`, `inputCostPer1KTokens`, `outputCostPer1KTokens`, `maxLength`, and an instance of the `OpenAIChat` class with the respective model name and API key. Additionally, each model has counters for input tokens, output tokens, succeeded, failed, and total files processed.\n\nThe `printModelDetails` function takes an array of `LLMModelDetails` and prints a summary table to the console. It calculates the total cost for each model based on the input and output tokens and their respective costs per 1,000 tokens. It also calculates the total file count, succeeded, failed, tokens, and cost across all models.\n\nThe `totalIndexCostEstimate` function calculates the total cost of indexing all models in the input array. It uses the same cost calculation as in `printModelDetails` but returns the total cost as a number.\n\nThese functions can be used in the larger project to manage and analyze the usage and costs of different LLMs. For example, the `printModelDetails` function can be called to display a summary of the models' usage and costs:\n\n```javascript\nimport { models, printModelDetails } from './path/to/this/file';\n\n// Process files with models...\n// Update models' properties...\n\nprintModelDetails(Object.values(models));\n```\n\nAnd the `totalIndexCostEstimate` function can be used to estimate the total cost of indexing all models:\n\n```javascript\nimport { models, totalIndexCostEstimate } from './path/to/this/file';\n\n// Process files with models...\n// Update models' properties...\n\nconst totalCost = totalIndexCostEstimate(Object.values(models));\nconsole.log(`Total cost: ${totalCost}`);\n```", + "questions": "1. **Question:** What is the purpose of the `models` object and how are the different GPT models being used?\n **Answer:** The `models` object is a record that maps different GPT models (GPT3, GPT4, and GPT432k) to their respective details, such as cost per tokens, maximum length, and an instance of `OpenAIChat` with the corresponding model configuration.\n\n2. **Question:** How does the `printModelDetails` function work and what information does it display?\n **Answer:** The `printModelDetails` function takes an array of `LLMModelDetails` as input, processes the information for each model, and then prints a summary table to the console. The table includes the model name, file count, succeeded and failed counts, total tokens, and cost.\n\n3. **Question:** What is the purpose of the `totalIndexCostEstimate` function and how is it calculating the total cost?\n **Answer:** The `totalIndexCostEstimate` function calculates the total cost of processing the given models by iterating through the input `models` array and summing up the costs based on the input and output tokens and their respective costs per 1K tokens.", + "checksum": "f4464cf197f4af827ac0eac950d568fc" }, { - "fileName": "WaitUtil.ts", - "filePath": "src/cli/utils/WaitUtil.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/utils/WaitUtil.ts", - "summary": "The code in this file provides two utility functions, `wait` and `forTrue`, which are designed to help manage asynchronous operations in the larger project. Both functions return a `Promise`, which is a JavaScript object that represents the eventual completion (or failure) of an asynchronous operation and its resulting value.\n\n### wait function\n\nThe `wait` function takes two arguments: `timeoutMs`, which is the number of milliseconds to wait before resolving the promise, and `value`, which is an optional value to be returned when the promise resolves. The function creates a new `Promise` and uses `setTimeout` to resolve it with the given `value` after the specified `timeoutMs` has passed.\n\nExample usage:\n\n```javascript\n// Wait for 2 seconds and then log \"Hello, world!\"\nwait(2000, \"Hello, world!\").then(console.log);\n```\n\n### forTrue function\n\nThe `forTrue` function takes a single argument, `fn`, which is a function that returns a boolean value. The purpose of this function is to repeatedly check if the given function `fn` returns `true`. If it does, the promise resolves with `true`. If the function does not return `true` after 200 checks, the promise is rejected.\n\nThe function uses `setInterval` to repeatedly call the given function `fn` every 50 milliseconds. If `fn` returns `true`, the interval is cleared, and the promise is resolved. If the function has been called 200 times without returning `true`, the promise is rejected.\n\nExample usage:\n\n```javascript\n// Check if a certain element is visible on the page\nconst isElementVisible = () => document.querySelector(\"#my-element\").offsetParent !== null;\n\n// Wait for the element to become visible, then log \"Element is visible!\"\nforTrue(isElementVisible).then(() => console.log(\"Element is visible!\"));\n```\n\nIn summary, these utility functions help manage asynchronous operations by providing a way to wait for a certain amount of time or for a specific condition to be met. They can be used in various parts of the larger project to handle timing and conditional logic in an asynchronous manner.", - "questions": "1. **What is the purpose of the `wait` function?**\n\n The `wait` function is an asynchronous utility function that resolves a promise after a specified timeout in milliseconds. It can be used to introduce a delay in the execution of asynchronous code.\n\n2. **How does the `forTrue` function work and what is its use case?**\n\n The `forTrue` function takes a function `fn` as an argument, which returns a boolean value. It repeatedly checks the result of `fn` every 50 milliseconds until it returns `true` or the maximum number of checks (200) is reached. This function can be used to wait for a specific condition to be met before proceeding with the execution of asynchronous code.\n\n3. **Is there any error handling or customization for the `forTrue` function, such as customizing the interval or maximum number of checks?**\n\n Currently, there is no error handling or customization options for the `forTrue` function. The interval is hardcoded to 50 milliseconds, and the maximum number of checks is hardcoded to 200. To add customization, additional parameters could be added to the function signature and used in the implementation." + "fileName": "traverseFileSystem.ts", + "filePath": "src\\cli\\utils\\traverseFileSystem.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\utils\\traverseFileSystem.ts", + "summary": "The `traverseFileSystem` function in this code is an asynchronous function that recursively traverses a given file system, processing files and folders based on the provided parameters. It is designed to be used in the larger project for generating documentation or performing other tasks that require processing files and folders in a directory structure.\n\nThe function takes an object of type `TraverseFileSystemParams` as its input, which contains various properties to control the traversal and processing behavior. These properties include:\n\n- `inputPath`: The root path to start the traversal from.\n- `projectName`: The name of the project being processed.\n- `processFile`: An optional callback function to process a file.\n- `processFolder`: An optional callback function to process a folder.\n- `ignore`: An array of patterns to ignore during traversal.\n- `filePrompt`, `folderPrompt`: Optional prompts for user interaction.\n- `contentType`, `targetAudience`, `linkHosted`: Additional metadata for processing.\n\nThe function first checks if the provided `inputPath` exists using `fs.access`. If the path does not exist, it logs an error message and returns. It then defines a helper function `shouldIgnore` that checks if a given file or folder should be ignored based on the `ignore` patterns.\n\nThe main logic of the function is implemented in the `dfs` (depth-first search) function, which is called recursively to traverse the file system. It reads the contents of the current directory using `fs.readdir`, filters out ignored items, and processes the remaining items.\n\nFor each item, if it is a directory, the `dfs` function is called recursively, and the `processFolder` callback is invoked if provided. If it is a file and its content is text (checked using `isText`), the `processFile` callback is invoked if provided.\n\nThe traversal is performed using `Promise.all` to process items concurrently, improving performance. If an error occurs during traversal, it is logged and rethrown.\n\nHere's an example of how this function might be used in the larger project:\n\n```javascript\nawait traverseFileSystem({\n inputPath: './src',\n projectName: 'myProject',\n processFile: (params) => {\n // Process file logic here\n },\n processFolder: (params) => {\n // Process folder logic here\n },\n ignore: ['node_modules/**', '.git/**'],\n});\n```", + "questions": "1. **What is the purpose of the `traverseFileSystem` function?**\n\n The `traverseFileSystem` function is an asynchronous function that traverses a given file system, processes folders and files based on the provided parameters, and ignores files and folders based on the given ignore patterns.\n\n2. **How does the `shouldIgnore` function work?**\n\n The `shouldIgnore` function takes a file name as input and returns a boolean value indicating whether the file should be ignored or not. It checks if the file name matches any of the ignore patterns provided in the `ignore` parameter using the `minimatch` library.\n\n3. **What is the role of the `dfs` function inside `traverseFileSystem`?**\n\n The `dfs` function is an asynchronous function that performs a depth-first search on the file system starting from the given `currentPath`. It processes folders and files based on the provided parameters and recursively calls itself for each subdirectory found.", + "checksum": "b9e957c10ee6c009864c90aa2fa93763" }, { - "fileName": "traverseFileSystem.ts", - "filePath": "src/cli/utils/traverseFileSystem.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/utils/traverseFileSystem.ts", - "summary": "The `traverseFileSystem` function in this code is an asynchronous function that recursively traverses a given file system, processes folders and files, and filters out ignored files based on provided patterns. It is designed to be used in the larger project for processing and generating documentation for a given project.\n\nThe function takes an object of type `TraverseFileSystemParams` as its input, which contains the following properties:\n\n- `inputPath`: The root folder path to start traversing.\n- `projectName`: The name of the project being documented.\n- `processFile`: An optional callback function to process files.\n- `processFolder`: An optional callback function to process folders.\n- `ignore`: An array of patterns to ignore files and folders.\n- `filePrompt`: An optional prompt for processing files.\n- `folderPrompt`: An optional prompt for processing folders.\n- `contentType`: The type of content being processed.\n- `targetAudience`: The target audience for the documentation.\n- `linkHosted`: A flag indicating if the documentation should be linked to a hosted version.\n\nThe function first checks if the provided `inputPath` exists. If not, it logs an error message and returns. It then defines a helper function `shouldIgnore` that checks if a given file or folder should be ignored based on the `ignore` patterns.\n\nThe main logic of the function is implemented in the `dfs` (depth-first search) function, which recursively traverses the file system. It reads the contents of the current folder, filters out ignored files and folders, and processes them accordingly. If an entry is a directory, it calls `dfs` recursively and then calls the `processFolder` callback if provided. If an entry is a file and is a text file, it calls the `processFile` callback if provided.\n\nHere's an example of how this function might be used in the larger project:\n\n```javascript\nimport { traverseFileSystem } from './autodoc';\n\nconst params = {\n inputPath: './myProject',\n projectName: 'My Project',\n ignore: ['node_modules/**', '.git/**'],\n processFile: async (fileInfo) => {\n // Process the file, e.g., generate documentation\n },\n processFolder: async (folderInfo) => {\n // Process the folder, e.g., create a folder in the output directory\n },\n};\n\ntraverseFileSystem(params);\n```\n\nThis example would traverse the `myProject` folder, ignoring any files and folders within `node_modules` and `.git`, and process the remaining files and folders using the provided callback functions.", - "questions": "1. **What is the purpose of the `traverseFileSystem` function?**\n\n The `traverseFileSystem` function is an asynchronous function that traverses a given file system, processes files and folders based on the provided parameters, and ignores files and folders that match the specified ignore patterns.\n\n2. **How does the `shouldIgnore` function work?**\n\n The `shouldIgnore` function takes a file or folder name as input and returns a boolean value indicating whether the file or folder should be ignored based on the provided ignore patterns. It uses the `minimatch` library to check if the file or folder name matches any of the ignore patterns.\n\n3. **What is the role of the `dfs` function inside `traverseFileSystem`?**\n\n The `dfs` function is an asynchronous function that performs a depth-first search on the file system starting from the given `currentPath`. It processes folders and files based on the provided parameters and recursively calls itself for each subdirectory." + "fileName": "WaitUtil.ts", + "filePath": "src\\cli\\utils\\WaitUtil.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\utils\\WaitUtil.ts", + "summary": "The code in this file provides two utility functions, `wait` and `forTrue`, which are designed to help manage asynchronous operations in the larger project. Both functions return a `Promise`, making them suitable for use with `async/await` syntax.\n\n### wait\n\nThe `wait` function takes two arguments: `timeoutMs`, a number representing the desired waiting time in milliseconds, and an optional `value` that defaults to `null`. It returns a `Promise` that resolves with the provided `value` after the specified `timeoutMs` has elapsed. This function can be used to introduce a delay in the execution of asynchronous code.\n\nExample usage:\n\n```javascript\nasync function delayedEcho() {\n console.log(\"Start\");\n await wait(1000, \"Hello\");\n console.log(\"End\");\n}\n\ndelayedEcho(); // Output: Start -> (1 second delay) -> End\n```\n\n### forTrue\n\nThe `forTrue` function takes a single argument, `fn`, which is a function that returns a boolean value. It returns a `Promise` that resolves with `true` when the provided function `fn` returns `true`. The function `fn` is checked every 50 milliseconds, up to a maximum of 200 times (i.e., 10 seconds). If `fn` does not return `true` within this time, the `Promise` is rejected.\n\nThis function can be used to wait for a specific condition to be met before continuing the execution of asynchronous code.\n\nExample usage:\n\n```javascript\nlet condition = false;\n\nsetTimeout(() => {\n condition = true;\n}, 3000);\n\nasync function waitForCondition() {\n console.log(\"Waiting for condition...\");\n await forTrue(() => condition);\n console.log(\"Condition met!\");\n}\n\nwaitForCondition(); // Output: Waiting for condition... -> (3 second delay) -> Condition met!\n```\n\nIn summary, this file provides two utility functions that help manage asynchronous operations by introducing delays and waiting for specific conditions to be met. These functions can be used in the larger project to control the flow of asynchronous code execution.", + "questions": "1. **What is the purpose of the `wait` function?**\n\n The `wait` function is an asynchronous utility function that resolves a promise after a specified timeout in milliseconds, optionally returning a value when the promise is resolved.\n\n2. **How does the `forTrue` function work?**\n\n The `forTrue` function takes a function `fn` as an argument, which should return a boolean value. It checks the result of `fn` every 50 milliseconds and resolves the promise when `fn` returns `true`. If `fn` does not return `true` after 200 attempts, the promise is rejected.\n\n3. **What is the use case for the `forTrue` function?**\n\n The `forTrue` function can be used to wait for a certain condition to be met before proceeding with the execution of the code. This can be useful in situations where you need to wait for an asynchronous operation to complete or a specific state to be reached before continuing.", + "checksum": "bf4acebb6c2736274af75a8c8441c9d2" } ], "folders": [], - "summary": "The code in the `.autodoc/docs/json/src/cli/utils` folder provides utility functions and classes that help manage various aspects of the autodoc project, such as rate-limiting API calls, handling file and folder paths, managing language models, and traversing file systems.\n\n`APIRateLimit.ts` contains the `APIRateLimit` class, which is designed to manage and limit the number of concurrent API calls made by the application. This is useful when the API being called has a rate limit or when the application needs to control the number of simultaneous requests to avoid overloading the server. For example:\n\n```javascript\nconst apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls\nasync function getData(id) {\n return apiRateLimiter.callApi(() => fetchData(id));\n}\ngetData(1).then(console.log); // Fetches data for ID 1, rate-limited\n```\n\n`FileUtil.ts` provides utility functions for handling file and folder paths, such as generating file names and GitHub URLs for files and folders. These functions can be used to manage and navigate the documentation structure. For example:\n\n```javascript\ngetFileName(\"example.txt\"); // returns \"example.md\"\ngithubFileUrl(\"https://github.com/user/repo\", \"/input\", \"/input/example.md\", true); // returns \"https://github.com/user/repo/example.md\"\n```\n\n`LLMUtil.ts` defines and manages different language models (LLMs) and their associated costs for a project. It provides functions like `printModelDetails` and `totalIndexCostEstimate` to manage and analyze the usage and costs of different language models. For example, the `printModelDetails` function can provide a summary of the project's LLM usage, while the `totalIndexCostEstimate` function can help estimate the overall cost of using these models.\n\n`WaitUtil.ts` provides two utility functions, `wait` and `forTrue`, which help manage asynchronous operations in the larger project. They can be used in various parts of the project to handle timing and conditional logic in an asynchronous manner. For example:\n\n```javascript\nwait(2000, \"Hello, world!\").then(console.log); // Waits for 2 seconds and then logs \"Hello, world!\"\nforTrue(isElementVisible).then(() => console.log(\"Element is visible!\")); // Waits for an element to become visible, then logs \"Element is visible!\"\n```\n\n`traverseFileSystem.ts` contains the `traverseFileSystem` function, which recursively traverses a given file system, processes folders and files, and filters out ignored files based on provided patterns. It is designed to be used for processing and generating documentation for a given project. For example:\n\n```javascript\nconst params = {\n inputPath: './myProject',\n projectName: 'My Project',\n ignore: ['node_modules/**', '.git/**'],\n processFile: async (fileInfo) => {\n // Process the file, e.g., generate documentation\n },\n processFolder: async (folderInfo) => {\n // Process the folder, e.g., create a folder in the output directory\n },\n};\ntraverseFileSystem(params);\n```\n\nIn summary, the code in this folder provides various utility functions and classes that help manage different aspects of the autodoc project, making it easier to handle tasks such as rate-limiting, file and folder management, language model management, asynchronous operations, and file system traversal.", - "questions": "" + "summary": "The `.autodoc\\docs\\json\\src\\cli\\utils` folder contains utility functions and classes that assist in managing API rate limits, handling file and folder paths, managing language models, traversing file systems, and controlling asynchronous operations. These utilities can be used throughout the autodoc project to ensure consistent behavior and improve code organization.\n\n`APIRateLimit.ts` provides the `APIRateLimit` class, which manages and limits the number of concurrent API calls made by the application. This is useful when working with rate-limited APIs or preventing server overload. Example usage:\n\n```javascript\nconst apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls\nasync function fetchSomeData(id) {\n const result = await apiRateLimiter.callApi(() => fetch(`https://api.example.com/data/${id}`));\n return result;\n}\n```\n\n`FileUtil.ts` offers utility functions for generating file names and GitHub URLs for documentation files. These functions ensure consistent naming and URL generation across the project. Example usage:\n\n```javascript\ngetFileName('example.txt'); // returns 'example.md'\ngithubFileUrl('https://github.com/user/repo', '/input', '/input/example.md', true); // returns 'https://github.com/user/repo/example.md'\n```\n\n`LLMUtil.ts` defines and manages different language models (LLMs) and their associated costs for a project utilizing OpenAI's GPT models. Functions like `printModelDetails` and `totalIndexCostEstimate` can be used to manage and analyze the usage and costs of different LLMs. Example usage:\n\n```javascript\nimport { models, printModelDetails } from './path/to/this/file';\nprintModelDetails(Object.values(models));\nconst totalCost = totalIndexCostEstimate(Object.values(models));\nconsole.log(`Total cost: ${totalCost}`);\n```\n\n`traverseFileSystem.ts` contains the `traverseFileSystem` function, which recursively traverses a given file system, processing files and folders based on provided parameters. This is useful for generating documentation or performing tasks that require processing files and folders in a directory structure. Example usage:\n\n```javascript\nawait traverseFileSystem({\n inputPath: './src',\n projectName: 'myProject',\n processFile: (params) => { /* Process file logic */ },\n processFolder: (params) => { /* Process folder logic */ },\n ignore: ['node_modules/**', '.git/**'],\n});\n```\n\n`WaitUtil.ts` provides two utility functions, `wait` and `forTrue`, which help manage asynchronous operations by introducing delays and waiting for specific conditions to be met. These functions can be used to control the flow of asynchronous code execution. Example usage:\n\n```javascript\nasync function delayedEcho() {\n console.log(\"Start\");\n await wait(1000, \"Hello\");\n console.log(\"End\");\n}\n\nasync function waitForCondition() {\n console.log(\"Waiting for condition...\");\n await forTrue(() => condition);\n console.log(\"Condition met!\");\n}\n```\n\nIn summary, the utilities in this folder enhance the autodoc project by providing consistent behavior, improving code organization, and managing various aspects of the project, such as API rate limits, file and folder paths, language models, file system traversal, and asynchronous operations.", + "questions": "", + "checksum": "a4b7088863601cd326edbec7726eefe7" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/cli/utils/traverseFileSystem.json b/.autodoc/docs/json/src/cli/utils/traverseFileSystem.json index 62be6e8..159296e 100644 --- a/.autodoc/docs/json/src/cli/utils/traverseFileSystem.json +++ b/.autodoc/docs/json/src/cli/utils/traverseFileSystem.json @@ -1,7 +1,8 @@ { "fileName": "traverseFileSystem.ts", - "filePath": "src/cli/utils/traverseFileSystem.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/utils/traverseFileSystem.ts", - "summary": "The `traverseFileSystem` function in this code is an asynchronous function that recursively traverses a given file system, processes folders and files, and filters out ignored files based on provided patterns. It is designed to be used in the larger project for processing and generating documentation for a given project.\n\nThe function takes an object of type `TraverseFileSystemParams` as its input, which contains the following properties:\n\n- `inputPath`: The root folder path to start traversing.\n- `projectName`: The name of the project being documented.\n- `processFile`: An optional callback function to process files.\n- `processFolder`: An optional callback function to process folders.\n- `ignore`: An array of patterns to ignore files and folders.\n- `filePrompt`: An optional prompt for processing files.\n- `folderPrompt`: An optional prompt for processing folders.\n- `contentType`: The type of content being processed.\n- `targetAudience`: The target audience for the documentation.\n- `linkHosted`: A flag indicating if the documentation should be linked to a hosted version.\n\nThe function first checks if the provided `inputPath` exists. If not, it logs an error message and returns. It then defines a helper function `shouldIgnore` that checks if a given file or folder should be ignored based on the `ignore` patterns.\n\nThe main logic of the function is implemented in the `dfs` (depth-first search) function, which recursively traverses the file system. It reads the contents of the current folder, filters out ignored files and folders, and processes them accordingly. If an entry is a directory, it calls `dfs` recursively and then calls the `processFolder` callback if provided. If an entry is a file and is a text file, it calls the `processFile` callback if provided.\n\nHere's an example of how this function might be used in the larger project:\n\n```javascript\nimport { traverseFileSystem } from './autodoc';\n\nconst params = {\n inputPath: './myProject',\n projectName: 'My Project',\n ignore: ['node_modules/**', '.git/**'],\n processFile: async (fileInfo) => {\n // Process the file, e.g., generate documentation\n },\n processFolder: async (folderInfo) => {\n // Process the folder, e.g., create a folder in the output directory\n },\n};\n\ntraverseFileSystem(params);\n```\n\nThis example would traverse the `myProject` folder, ignoring any files and folders within `node_modules` and `.git`, and process the remaining files and folders using the provided callback functions.", - "questions": "1. **What is the purpose of the `traverseFileSystem` function?**\n\n The `traverseFileSystem` function is an asynchronous function that traverses a given file system, processes files and folders based on the provided parameters, and ignores files and folders that match the specified ignore patterns.\n\n2. **How does the `shouldIgnore` function work?**\n\n The `shouldIgnore` function takes a file or folder name as input and returns a boolean value indicating whether the file or folder should be ignored based on the provided ignore patterns. It uses the `minimatch` library to check if the file or folder name matches any of the ignore patterns.\n\n3. **What is the role of the `dfs` function inside `traverseFileSystem`?**\n\n The `dfs` function is an asynchronous function that performs a depth-first search on the file system starting from the given `currentPath`. It processes folders and files based on the provided parameters and recursively calls itself for each subdirectory." + "filePath": "src\\cli\\utils\\traverseFileSystem.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\utils\\traverseFileSystem.ts", + "summary": "The `traverseFileSystem` function in this code is an asynchronous function that recursively traverses a given file system, processing files and folders based on the provided parameters. It is designed to be used in the larger project for generating documentation or performing other tasks that require processing files and folders in a directory structure.\n\nThe function takes an object of type `TraverseFileSystemParams` as its input, which contains various properties to control the traversal and processing behavior. These properties include:\n\n- `inputPath`: The root path to start the traversal from.\n- `projectName`: The name of the project being processed.\n- `processFile`: An optional callback function to process a file.\n- `processFolder`: An optional callback function to process a folder.\n- `ignore`: An array of patterns to ignore during traversal.\n- `filePrompt`, `folderPrompt`: Optional prompts for user interaction.\n- `contentType`, `targetAudience`, `linkHosted`: Additional metadata for processing.\n\nThe function first checks if the provided `inputPath` exists using `fs.access`. If the path does not exist, it logs an error message and returns. It then defines a helper function `shouldIgnore` that checks if a given file or folder should be ignored based on the `ignore` patterns.\n\nThe main logic of the function is implemented in the `dfs` (depth-first search) function, which is called recursively to traverse the file system. It reads the contents of the current directory using `fs.readdir`, filters out ignored items, and processes the remaining items.\n\nFor each item, if it is a directory, the `dfs` function is called recursively, and the `processFolder` callback is invoked if provided. If it is a file and its content is text (checked using `isText`), the `processFile` callback is invoked if provided.\n\nThe traversal is performed using `Promise.all` to process items concurrently, improving performance. If an error occurs during traversal, it is logged and rethrown.\n\nHere's an example of how this function might be used in the larger project:\n\n```javascript\nawait traverseFileSystem({\n inputPath: './src',\n projectName: 'myProject',\n processFile: (params) => {\n // Process file logic here\n },\n processFolder: (params) => {\n // Process folder logic here\n },\n ignore: ['node_modules/**', '.git/**'],\n});\n```", + "questions": "1. **What is the purpose of the `traverseFileSystem` function?**\n\n The `traverseFileSystem` function is an asynchronous function that traverses a given file system, processes folders and files based on the provided parameters, and ignores files and folders based on the given ignore patterns.\n\n2. **How does the `shouldIgnore` function work?**\n\n The `shouldIgnore` function takes a file name as input and returns a boolean value indicating whether the file should be ignored or not. It checks if the file name matches any of the ignore patterns provided in the `ignore` parameter using the `minimatch` library.\n\n3. **What is the role of the `dfs` function inside `traverseFileSystem`?**\n\n The `dfs` function is an asynchronous function that performs a depth-first search on the file system starting from the given `currentPath`. It processes folders and files based on the provided parameters and recursively calls itself for each subdirectory found.", + "checksum": "b9e957c10ee6c009864c90aa2fa93763" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/const.json b/.autodoc/docs/json/src/const.json index 6e9c89e..e37d363 100644 --- a/.autodoc/docs/json/src/const.json +++ b/.autodoc/docs/json/src/const.json @@ -1,7 +1,8 @@ { "fileName": "const.ts", - "filePath": "src/const.ts", - "url": "https://github.com/context-labs/autodoc/src/const.ts", - "summary": "The code in this file is responsible for managing the user configuration file for the Autodoc project. It imports two Node.js built-in modules, `path` and `os`, which are used to handle file paths and operating system-related utility functions, respectively.\n\nThe `userConfigFileName` constant is defined as `'autodoc.user.json'`. This constant represents the name of the user configuration file that will be used by the Autodoc project.\n\nThe `userConfigFilePath` constant is created using the `path.resolve()` function, which resolves a sequence of paths into an absolute path. It takes three arguments:\n\n1. `os.homedir()`: This function returns the current user's home directory. It ensures that the user configuration file is stored in the user's home directory, making it user-specific.\n2. `'./.config/autodoc/'`: This string specifies the subdirectory within the user's home directory where the configuration file will be stored. The `.config` directory is a common location for storing configuration files on Unix-based systems, and the `autodoc` subdirectory is used to keep the Autodoc configuration files organized.\n3. `userConfigFileName`: This constant is used as the file name for the user configuration file.\n\nThe `userConfigFilePath` constant will store the absolute path to the user configuration file, which can be used by other parts of the Autodoc project to read or write user-specific settings.\n\nIn summary, this code is responsible for defining the location and name of the user configuration file for the Autodoc project. It ensures that the configuration file is stored in a user-specific directory and follows a standard naming convention. This allows the Autodoc project to easily manage user-specific settings and preferences.", - "questions": "1. **What is the purpose of the `userConfigFileName` and `userConfigFilePath` constants?**\n\n The `userConfigFileName` constant defines the name of the user configuration file for the autodoc project, while the `userConfigFilePath` constant defines the absolute path to this file, which is located in the user's home directory under the `.config/autodoc/` folder.\n\n2. **Why are the `node:path` and `node:os` modules imported?**\n\n The `node:path` module is imported to provide utilities for working with file and directory paths, such as the `path.resolve()` function used to construct the `userConfigFilePath`. The `node:os` module is imported to provide operating system-related utility methods, such as `os.homedir()` which returns the current user's home directory.\n\n3. **Is this code compatible with different operating systems?**\n\n Yes, this code is compatible with different operating systems. The `os.homedir()` function from the `node:os` module returns the correct home directory path for the current user, regardless of the operating system. Additionally, the `path.resolve()` function from the `node:path` module handles path separators and other OS-specific details, ensuring the correct file path is generated." + "filePath": "src\\const.ts", + "url": "https://github.com/context-labs/autodoc/src\\const.ts", + "summary": "The code in this file is responsible for managing the user configuration file for the autodoc project. It imports two Node.js built-in modules, `path` and `os`, which are used to handle file paths and operating system-related utility functions, respectively.\n\nThe `userConfigFileName` constant is defined as `'autodoc.user.json'`, which represents the name of the user configuration file. This file is expected to store user-specific settings for the autodoc project in JSON format.\n\nThe `userConfigFilePath` constant is created using the `path.resolve()` function, which combines the provided arguments into an absolute file path. The `os.homedir()` function is used to get the current user's home directory, and `./.config/autodoc/` is appended to it as the folder where the user configuration file should be stored. Finally, the `userConfigFileName` constant is appended to the path, resulting in the complete file path for the user configuration file.\n\nBy exporting both `userConfigFileName` and `userConfigFilePath`, other parts of the autodoc project can easily access and use these constants to read or write user-specific settings. For example, when the autodoc application starts, it can read the user configuration file from the specified path, and apply the settings accordingly.\n\nHere's a code example of how these constants might be used in another part of the autodoc project:\n\n```javascript\nimport { userConfigFilePath } from './path/to/this/file';\n\n// Read user configuration from the file\nconst userConfig = JSON.parse(fs.readFileSync(userConfigFilePath, 'utf-8'));\n\n// Apply user settings\napplyUserSettings(userConfig);\n```\n\nIn summary, this code is responsible for defining the name and file path of the user configuration file for the autodoc project, allowing other parts of the project to easily access and manage user-specific settings.", + "questions": "1. **What is the purpose of the `userConfigFileName` and `userConfigFilePath` constants?**\n\n The `userConfigFileName` constant defines the name of the user configuration file for the autodoc project, while the `userConfigFilePath` constant defines the absolute path to this file, which is located in the user's home directory under the `.config/autodoc/` folder.\n\n2. **Why are the `node:path` and `node:os` modules being imported?**\n\n The `node:path` module is imported to provide utilities for working with file and directory paths, such as resolving the absolute path to the user configuration file. The `node:os` module is imported to provide operating system-related utility methods, such as getting the user's home directory.\n\n3. **Is this code compatible with different operating systems?**\n\n Yes, this code is compatible with different operating systems. The `os.homedir()` method returns the home directory of the current user, which is platform-specific, and the `path.resolve()` method takes care of handling the correct path separators for the current operating system.", + "checksum": "ce40980fffc58e17b13690b9e37a6015" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/index.json b/.autodoc/docs/json/src/index.json index dff5d2c..ba5c65f 100644 --- a/.autodoc/docs/json/src/index.json +++ b/.autodoc/docs/json/src/index.json @@ -1,7 +1,8 @@ { "fileName": "index.ts", - "filePath": "src/index.ts", - "url": "https://github.com/context-labs/autodoc/src/index.ts", - "summary": "The code is a CLI (Command Line Interface) tool for the Autodoc project, which helps in generating documentation for a codebase. It uses the `commander` package to define and manage commands, and `inquirer` for interactive prompts. The main commands supported are `init`, `estimate`, `index`, `user`, and `q`.\n\n1. `init`: Initializes the repository by creating an `autodoc.config.json` file in the current directory. If the file already exists, it uses the existing configuration.\n ```bash\n autodoc init\n ```\n\n2. `estimate`: Estimates the cost of running the `index` command on the repository. It requires the `autodoc.config.json` file to be present.\n ```bash\n autodoc estimate\n ```\n\n3. `index`: Traverses the codebase, writes documentation using LLM (Language Model), and creates a locally stored index. It prompts the user to confirm before starting the indexing process.\n ```bash\n autodoc index\n ```\n\n4. `user`: Sets the Autodoc user configuration. If a user configuration file exists, it uses the existing configuration; otherwise, it creates a new one.\n ```bash\n autodoc user\n ```\n\n5. `q`: Queries an Autodoc index. It requires both `autodoc.config.json` and user configuration files to be present.\n ```bash\n autodoc q\n ```\n\nThe code also handles unhandled promise rejections by logging the error stack, showing an error spinner, stopping the spinner, and exiting with an error code.\n\nOverall, this CLI tool simplifies the process of generating documentation for a codebase by providing an easy-to-use interface for managing configurations and running the Autodoc project's core functionalities.", - "questions": "1. **Question:** What is the purpose of the `autodoc.config.json` file and how is it used in the code?\n **Answer:** The `autodoc.config.json` file is used to store the configuration for the Autodoc repository. It is read and parsed in various commands like `init`, `estimate`, `index`, and `q` to provide the necessary configuration for each command's execution.\n\n2. **Question:** How does the `estimate` command work and what does it do?\n **Answer:** The `estimate` command reads the `autodoc.config.json` file, parses it into a configuration object, and then calls the `estimate` function with the configuration. The purpose of this command is to estimate the cost of running the `index` command on the repository.\n\n3. **Question:** What is the purpose of the `user` command and how does it handle user configuration?\n **Answer:** The `user` command is used to set the Autodoc user configuration. It reads the user configuration file specified by `userConfigFilePath`, parses it into a configuration object, and then calls the `user` function with the configuration. If the configuration file is not found, it calls the `user` function without any configuration, allowing the user to set up their configuration." + "filePath": "src\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\index.ts", + "summary": "This code is the main entry point for the Autodoc CLI tool, which provides a set of commands to help developers automatically generate documentation for their codebase. The tool uses the `commander` library to define and handle commands, and `inquirer` for interactive prompts.\n\nThe available commands are:\n\n1. `init`: Initializes the repository by creating an `autodoc.config.json` file in the current directory. If the file already exists, it uses the existing configuration.\n ```bash\n autodoc init\n ```\n\n2. `estimate`: Estimates the cost of running the `index` command on the repository. It requires the `autodoc.config.json` file to be present.\n ```bash\n autodoc estimate\n ```\n\n3. `index`: Traverses the codebase, writes documentation using LLM, and creates a locally stored index. Before starting the indexing process, it prompts the user for confirmation. It requires the `autodoc.config.json` file to be present.\n ```bash\n autodoc index\n ```\n\n4. `user`: Sets the Autodoc user configuration. If a user configuration file exists, it uses the existing configuration.\n ```bash\n autodoc user\n ```\n\n5. `q`: Queries an Autodoc index. It requires both the `autodoc.config.json` and user configuration files to be present.\n ```bash\n autodoc q\n ```\n\nThe code also listens for unhandled promise rejections and handles them gracefully by showing an error spinner, stopping the spinner, and exiting with an error code.\n\nIn the larger project, this CLI tool serves as the primary interface for users to interact with Autodoc, allowing them to easily generate and manage documentation for their codebase.", + "questions": "1. **What is the purpose of the Autodoc CLI Tool?**\n\n The Autodoc CLI Tool is designed to help developers automatically generate documentation for their codebase by traversing the code, writing docs via LLM, and creating a locally stored index.\n\n2. **How does the `estimate` command work and what does it return?**\n\n The `estimate` command reads the `autodoc.config.json` file and estimates the cost of running the `index` command on the repository. It provides an estimation of the resources required to generate the documentation.\n\n3. **What is the role of the `user` command and how does it interact with the user configuration file?**\n\n The `user` command is responsible for setting the Autodoc user configuration. It reads the user configuration file (if it exists) and allows the user to update or create a new configuration. This configuration is then used in other commands, such as the `query` command, to interact with the Autodoc index.", + "checksum": "7bc160e4c4ef027d4968e3650a305a7d" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/langchain/hnswlib.json b/.autodoc/docs/json/src/langchain/hnswlib.json index 029a66b..12d9cdb 100644 --- a/.autodoc/docs/json/src/langchain/hnswlib.json +++ b/.autodoc/docs/json/src/langchain/hnswlib.json @@ -1,7 +1,8 @@ { "fileName": "hnswlib.ts", - "filePath": "src/langchain/hnswlib.ts", - "url": "https://github.com/context-labs/autodoc/src/langchain/hnswlib.ts", - "summary": "The `HNSWLib` class in this code is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm from the `hnswlib-node` library. It extends the `SaveableVectorStore` class and provides methods for adding documents, searching for similar documents, and saving/loading the index.\n\nThe constructor takes an `Embeddings` object and an `HNSWLibArgs` object as arguments. The `Embeddings` object is used to convert text documents into numerical vectors, while the `HNSWLibArgs` object contains configuration options for the HNSW index and an optional `InMemoryDocstore` object for storing document metadata.\n\nThe `addDocuments` method takes an array of `Document` objects, converts their text content into numerical vectors using the `Embeddings` object, and adds the vectors to the HNSW index. The `addVectors` method is responsible for initializing the index, resizing it if necessary, and adding the vectors and their corresponding metadata to the `InMemoryDocstore`.\n\nThe `similaritySearchVectorWithScore` method takes a query vector and a number `k`, and returns the top `k` most similar documents in the index along with their similarity scores. It checks if the query vector has the correct dimensions and if `k` is within the valid range before performing the search.\n\nThe `save` and `load` methods allow the HNSW index and its associated metadata to be saved to and loaded from a specified directory. The `fromTexts` and `fromDocuments` static methods provide convenient ways to create an `HNSWLib` instance from an array of text strings or `Document` objects, respectively.\n\nExample usage:\n\n```javascript\nconst embeddings = new Embeddings(/* ... */);\nconst hnswLib = await HNSWLib.fromTexts(texts, metadatas, embeddings);\n\nconst queryVector = await embeddings.embedText(\"example query\");\nconst similarDocuments = await hnswLib.similaritySearchVectorWithScore(queryVector, 5);\n```\n\nIn the larger project, this class can be used to efficiently store and search for similar documents based on their embeddings, which can be useful for tasks such as document clustering, nearest neighbor search, and recommendation systems.", - "questions": "1. **Question:** What is the purpose of the `HNSWLib` class and how does it relate to the `SaveableVectorStore` class?\n **Answer:** The `HNSWLib` class is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm from the `hnswlib-node` library. It extends the `SaveableVectorStore` class, which provides a base class for vector stores that can be saved and loaded from disk.\n\n2. **Question:** How does the `addDocuments` method work and what is its purpose?\n **Answer:** The `addDocuments` method takes an array of `Document` objects, extracts their `pageContent`, and embeds them into vectors using the `embedDocuments` method from the `embeddings` object. It then adds these vectors and the corresponding documents to the HNSW index and the `docstore` respectively.\n\n3. **Question:** How does the `similaritySearchVectorWithScore` method work and what does it return?\n **Answer:** The `similaritySearchVectorWithScore` method takes a query vector and a number `k` as input. It checks if the query vector has the same length as the number of dimensions and if `k` is not greater than the number of elements in the index. It then performs a k-nearest neighbors search on the HNSW index using the query vector and returns an array of `[Document, number]` tuples, where each tuple contains a document from the `docstore` and its corresponding distance score to the query vector." + "filePath": "src\\langchain\\hnswlib.ts", + "url": "https://github.com/context-labs/autodoc/src\\langchain\\hnswlib.ts", + "summary": "The `HNSWLib` class in this code is a specialized vector store that uses the Hierarchical Navigable Small World (HNSW) algorithm for efficient similarity search. It is built on top of the `hnswlib-node` library and extends the `SaveableVectorStore` class. The main purpose of this class is to store and search for documents based on their embeddings, which are high-dimensional vectors representing the documents' content.\n\nThe constructor of the `HNSWLib` class takes an `Embeddings` object and an `HNSWLibArgs` object as arguments. The `Embeddings` object is used to convert documents into their corresponding vector representations, while the `HNSWLibArgs` object contains configuration options for the HNSW index and an optional `InMemoryDocstore` object for storing the documents.\n\nThe `addDocuments` method takes an array of `Document` objects, converts them into embeddings using the `Embeddings` object, and adds them to the HNSW index. The `similaritySearchVectorWithScore` method takes a query vector and a number `k`, and returns the top `k` most similar documents along with their similarity scores.\n\nThe `save` and `load` methods allow for persisting the HNSW index, document store, and configuration options to disk and loading them back into memory. The `fromTexts` and `fromDocuments` static methods provide convenient ways to create an `HNSWLib` instance from an array of texts or documents, respectively.\n\nHere's an example of how to use the `HNSWLib` class:\n\n```javascript\nconst embeddings = new Embeddings(/* ... */);\nconst args = { space: 'cosine' };\nconst hnswLib = new HNSWLib(embeddings, args);\n\n// Add documents to the index\nawait hnswLib.addDocuments(documents);\n\n// Perform a similarity search\nconst queryVector = /* ... */;\nconst k = 10;\nconst results = await hnswLib.similaritySearchVectorWithScore(queryVector, k);\n```\n\nIn the larger project, the `HNSWLib` class can be used to efficiently store and search for documents based on their content similarity, which can be useful for tasks such as document clustering, recommendation systems, or information retrieval.", + "questions": "1. **Question**: What is the purpose of the `HNSWLib` class and how does it relate to the `SaveableVectorStore` class?\n **Answer**: The `HNSWLib` class is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm from the `hnswlib-node` library. It extends the `SaveableVectorStore` class, which provides a base class for vector stores that can be saved and loaded from disk.\n\n2. **Question**: How does the `addDocuments` method work and what is its purpose?\n **Answer**: The `addDocuments` method takes an array of `Document` objects, extracts their `pageContent`, and embeds them using the provided `Embeddings` instance. It then adds the resulting vectors and documents to the HNSW index and the `InMemoryDocstore`, respectively.\n\n3. **Question**: How does the `similaritySearchVectorWithScore` method work and what does it return?\n **Answer**: The `similaritySearchVectorWithScore` method takes a query vector and a number `k` as input, and searches for the `k` most similar vectors in the HNSW index. It returns an array of tuples, where each tuple contains a `Document` object and its corresponding similarity score to the query vector.", + "checksum": "4725f6bfddda88355b55a980a1eae582" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/langchain/summary.json b/.autodoc/docs/json/src/langchain/summary.json index 4ec7faf..d88bc7c 100644 --- a/.autodoc/docs/json/src/langchain/summary.json +++ b/.autodoc/docs/json/src/langchain/summary.json @@ -1,17 +1,19 @@ { "folderName": "langchain", - "folderPath": ".autodoc/docs/json/src/langchain", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/langchain", + "folderPath": ".autodoc\\docs\\json\\src\\langchain", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\langchain", "files": [ { "fileName": "hnswlib.ts", - "filePath": "src/langchain/hnswlib.ts", - "url": "https://github.com/context-labs/autodoc/src/langchain/hnswlib.ts", - "summary": "The `HNSWLib` class in this code is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm from the `hnswlib-node` library. It extends the `SaveableVectorStore` class and provides methods for adding documents, searching for similar documents, and saving/loading the index.\n\nThe constructor takes an `Embeddings` object and an `HNSWLibArgs` object as arguments. The `Embeddings` object is used to convert text documents into numerical vectors, while the `HNSWLibArgs` object contains configuration options for the HNSW index and an optional `InMemoryDocstore` object for storing document metadata.\n\nThe `addDocuments` method takes an array of `Document` objects, converts their text content into numerical vectors using the `Embeddings` object, and adds the vectors to the HNSW index. The `addVectors` method is responsible for initializing the index, resizing it if necessary, and adding the vectors and their corresponding metadata to the `InMemoryDocstore`.\n\nThe `similaritySearchVectorWithScore` method takes a query vector and a number `k`, and returns the top `k` most similar documents in the index along with their similarity scores. It checks if the query vector has the correct dimensions and if `k` is within the valid range before performing the search.\n\nThe `save` and `load` methods allow the HNSW index and its associated metadata to be saved to and loaded from a specified directory. The `fromTexts` and `fromDocuments` static methods provide convenient ways to create an `HNSWLib` instance from an array of text strings or `Document` objects, respectively.\n\nExample usage:\n\n```javascript\nconst embeddings = new Embeddings(/* ... */);\nconst hnswLib = await HNSWLib.fromTexts(texts, metadatas, embeddings);\n\nconst queryVector = await embeddings.embedText(\"example query\");\nconst similarDocuments = await hnswLib.similaritySearchVectorWithScore(queryVector, 5);\n```\n\nIn the larger project, this class can be used to efficiently store and search for similar documents based on their embeddings, which can be useful for tasks such as document clustering, nearest neighbor search, and recommendation systems.", - "questions": "1. **Question:** What is the purpose of the `HNSWLib` class and how does it relate to the `SaveableVectorStore` class?\n **Answer:** The `HNSWLib` class is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm from the `hnswlib-node` library. It extends the `SaveableVectorStore` class, which provides a base class for vector stores that can be saved and loaded from disk.\n\n2. **Question:** How does the `addDocuments` method work and what is its purpose?\n **Answer:** The `addDocuments` method takes an array of `Document` objects, extracts their `pageContent`, and embeds them into vectors using the `embedDocuments` method from the `embeddings` object. It then adds these vectors and the corresponding documents to the HNSW index and the `docstore` respectively.\n\n3. **Question:** How does the `similaritySearchVectorWithScore` method work and what does it return?\n **Answer:** The `similaritySearchVectorWithScore` method takes a query vector and a number `k` as input. It checks if the query vector has the same length as the number of dimensions and if `k` is not greater than the number of elements in the index. It then performs a k-nearest neighbors search on the HNSW index using the query vector and returns an array of `[Document, number]` tuples, where each tuple contains a document from the `docstore` and its corresponding distance score to the query vector." + "filePath": "src\\langchain\\hnswlib.ts", + "url": "https://github.com/context-labs/autodoc/src\\langchain\\hnswlib.ts", + "summary": "The `HNSWLib` class in this code is a specialized vector store that uses the Hierarchical Navigable Small World (HNSW) algorithm for efficient similarity search. It is built on top of the `hnswlib-node` library and extends the `SaveableVectorStore` class. The main purpose of this class is to store and search for documents based on their embeddings, which are high-dimensional vectors representing the documents' content.\n\nThe constructor of the `HNSWLib` class takes an `Embeddings` object and an `HNSWLibArgs` object as arguments. The `Embeddings` object is used to convert documents into their corresponding vector representations, while the `HNSWLibArgs` object contains configuration options for the HNSW index and an optional `InMemoryDocstore` object for storing the documents.\n\nThe `addDocuments` method takes an array of `Document` objects, converts them into embeddings using the `Embeddings` object, and adds them to the HNSW index. The `similaritySearchVectorWithScore` method takes a query vector and a number `k`, and returns the top `k` most similar documents along with their similarity scores.\n\nThe `save` and `load` methods allow for persisting the HNSW index, document store, and configuration options to disk and loading them back into memory. The `fromTexts` and `fromDocuments` static methods provide convenient ways to create an `HNSWLib` instance from an array of texts or documents, respectively.\n\nHere's an example of how to use the `HNSWLib` class:\n\n```javascript\nconst embeddings = new Embeddings(/* ... */);\nconst args = { space: 'cosine' };\nconst hnswLib = new HNSWLib(embeddings, args);\n\n// Add documents to the index\nawait hnswLib.addDocuments(documents);\n\n// Perform a similarity search\nconst queryVector = /* ... */;\nconst k = 10;\nconst results = await hnswLib.similaritySearchVectorWithScore(queryVector, k);\n```\n\nIn the larger project, the `HNSWLib` class can be used to efficiently store and search for documents based on their content similarity, which can be useful for tasks such as document clustering, recommendation systems, or information retrieval.", + "questions": "1. **Question**: What is the purpose of the `HNSWLib` class and how does it relate to the `SaveableVectorStore` class?\n **Answer**: The `HNSWLib` class is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm from the `hnswlib-node` library. It extends the `SaveableVectorStore` class, which provides a base class for vector stores that can be saved and loaded from disk.\n\n2. **Question**: How does the `addDocuments` method work and what is its purpose?\n **Answer**: The `addDocuments` method takes an array of `Document` objects, extracts their `pageContent`, and embeds them using the provided `Embeddings` instance. It then adds the resulting vectors and documents to the HNSW index and the `InMemoryDocstore`, respectively.\n\n3. **Question**: How does the `similaritySearchVectorWithScore` method work and what does it return?\n **Answer**: The `similaritySearchVectorWithScore` method takes a query vector and a number `k` as input, and searches for the `k` most similar vectors in the HNSW index. It returns an array of tuples, where each tuple contains a `Document` object and its corresponding similarity score to the query vector.", + "checksum": "4725f6bfddda88355b55a980a1eae582" } ], "folders": [], - "summary": "The `hnswlib.ts` file in the `.autodoc/docs/json/src/langchain` folder contains the `HNSWLib` class, which is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm from the `hnswlib-node` library. This class is designed to efficiently store and search for similar documents based on their embeddings, making it useful for tasks such as document clustering, nearest neighbor search, and recommendation systems.\n\nThe `HNSWLib` class extends the `SaveableVectorStore` class and provides methods for adding documents, searching for similar documents, and saving/loading the index. It takes an `Embeddings` object and an `HNSWLibArgs` object as arguments in its constructor. The `Embeddings` object is responsible for converting text documents into numerical vectors, while the `HNSWLibArgs` object contains configuration options for the HNSW index and an optional `InMemoryDocstore` object for storing document metadata.\n\nThe `addDocuments` method accepts an array of `Document` objects, converts their text content into numerical vectors using the `Embeddings` object, and adds the vectors to the HNSW index. The `addVectors` method initializes the index, resizes it if necessary, and adds the vectors and their corresponding metadata to the `InMemoryDocstore`.\n\nThe `similaritySearchVectorWithScore` method takes a query vector and a number `k`, and returns the top `k` most similar documents in the index along with their similarity scores. It checks if the query vector has the correct dimensions and if `k` is within the valid range before performing the search.\n\nThe `save` and `load` methods allow the HNSW index and its associated metadata to be saved to and loaded from a specified directory. The `fromTexts` and `fromDocuments` static methods provide convenient ways to create an `HNSWLib` instance from an array of text strings or `Document` objects, respectively.\n\nHere's an example of how this code might be used:\n\n```javascript\nconst embeddings = new Embeddings(/* ... */);\nconst hnswLib = await HNSWLib.fromTexts(texts, metadatas, embeddings);\n\nconst queryVector = await embeddings.embedText(\"example query\");\nconst similarDocuments = await hnswLib.similaritySearchVectorWithScore(queryVector, 5);\n```\n\nIn the larger project, the `HNSWLib` class can be integrated with other components to build efficient and scalable systems for document similarity search, clustering, and recommendations based on text embeddings.", - "questions": "" + "summary": "The `hnswlib.ts` file in the `.autodoc\\docs\\json\\src\\langchain` folder contains the `HNSWLib` class, which is a specialized vector store utilizing the Hierarchical Navigable Small World (HNSW) algorithm for efficient similarity search. This class is built on top of the `hnswlib-node` library and extends the `SaveableVectorStore` class. Its primary purpose is to store and search for documents based on their embeddings, which are high-dimensional vectors representing the documents' content.\n\nThe `HNSWLib` class constructor takes an `Embeddings` object and an `HNSWLibArgs` object as arguments. The `Embeddings` object is responsible for converting documents into their corresponding vector representations, while the `HNSWLibArgs` object contains configuration options for the HNSW index and an optional `InMemoryDocstore` object for storing the documents.\n\nThe `addDocuments` method accepts an array of `Document` objects, converts them into embeddings using the `Embeddings` object, and adds them to the HNSW index. The `similaritySearchVectorWithScore` method takes a query vector and a number `k`, and returns the top `k` most similar documents along with their similarity scores.\n\nThe `save` and `load` methods enable persisting the HNSW index, document store, and configuration options to disk and loading them back into memory. The `fromTexts` and `fromDocuments` static methods provide convenient ways to create an `HNSWLib` instance from an array of texts or documents, respectively.\n\nIn the larger project, the `HNSWLib` class can be employed to efficiently store and search for documents based on their content similarity, which can be beneficial for tasks such as document clustering, recommendation systems, or information retrieval.\n\nHere's an example of how to use the `HNSWLib` class:\n\n```javascript\nconst embeddings = new Embeddings(/* ... */);\nconst args = { space: 'cosine' };\nconst hnswLib = new HNSWLib(embeddings, args);\n\n// Add documents to the index\nawait hnswLib.addDocuments(documents);\n\n// Perform a similarity search\nconst queryVector = /* ... */;\nconst k = 10;\nconst results = await hnswLib.similaritySearchVectorWithScore(queryVector, k);\n```\n\nThis code snippet demonstrates how to create an `HNSWLib` instance, add documents to the index, and perform a similarity search. The results can then be used for various purposes, such as finding related documents or generating recommendations based on content similarity.", + "questions": "", + "checksum": "ccbe47bddb9d048f35d29fb2d8c04d7f" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/summary.json b/.autodoc/docs/json/src/summary.json index 0e74775..7b33b05 100644 --- a/.autodoc/docs/json/src/summary.json +++ b/.autodoc/docs/json/src/summary.json @@ -1,242 +1,272 @@ { "folderName": "src", - "folderPath": ".autodoc/docs/json/src", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src", + "folderPath": ".autodoc\\docs\\json\\src", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src", "files": [ { "fileName": "const.ts", - "filePath": "src/const.ts", - "url": "https://github.com/context-labs/autodoc/src/const.ts", - "summary": "The code in this file is responsible for managing the user configuration file for the Autodoc project. It imports two Node.js built-in modules, `path` and `os`, which are used to handle file paths and operating system-related utility functions, respectively.\n\nThe `userConfigFileName` constant is defined as `'autodoc.user.json'`. This constant represents the name of the user configuration file that will be used by the Autodoc project.\n\nThe `userConfigFilePath` constant is created using the `path.resolve()` function, which resolves a sequence of paths into an absolute path. It takes three arguments:\n\n1. `os.homedir()`: This function returns the current user's home directory. It ensures that the user configuration file is stored in the user's home directory, making it user-specific.\n2. `'./.config/autodoc/'`: This string specifies the subdirectory within the user's home directory where the configuration file will be stored. The `.config` directory is a common location for storing configuration files on Unix-based systems, and the `autodoc` subdirectory is used to keep the Autodoc configuration files organized.\n3. `userConfigFileName`: This constant is used as the file name for the user configuration file.\n\nThe `userConfigFilePath` constant will store the absolute path to the user configuration file, which can be used by other parts of the Autodoc project to read or write user-specific settings.\n\nIn summary, this code is responsible for defining the location and name of the user configuration file for the Autodoc project. It ensures that the configuration file is stored in a user-specific directory and follows a standard naming convention. This allows the Autodoc project to easily manage user-specific settings and preferences.", - "questions": "1. **What is the purpose of the `userConfigFileName` and `userConfigFilePath` constants?**\n\n The `userConfigFileName` constant defines the name of the user configuration file for the autodoc project, while the `userConfigFilePath` constant defines the absolute path to this file, which is located in the user's home directory under the `.config/autodoc/` folder.\n\n2. **Why are the `node:path` and `node:os` modules imported?**\n\n The `node:path` module is imported to provide utilities for working with file and directory paths, such as the `path.resolve()` function used to construct the `userConfigFilePath`. The `node:os` module is imported to provide operating system-related utility methods, such as `os.homedir()` which returns the current user's home directory.\n\n3. **Is this code compatible with different operating systems?**\n\n Yes, this code is compatible with different operating systems. The `os.homedir()` function from the `node:os` module returns the correct home directory path for the current user, regardless of the operating system. Additionally, the `path.resolve()` function from the `node:path` module handles path separators and other OS-specific details, ensuring the correct file path is generated." + "filePath": "src\\const.ts", + "url": "https://github.com/context-labs/autodoc/src\\const.ts", + "summary": "The code in this file is responsible for managing the user configuration file for the autodoc project. It imports two Node.js built-in modules, `path` and `os`, which are used to handle file paths and operating system-related utility functions, respectively.\n\nThe `userConfigFileName` constant is defined as `'autodoc.user.json'`, which represents the name of the user configuration file. This file is expected to store user-specific settings for the autodoc project in JSON format.\n\nThe `userConfigFilePath` constant is created using the `path.resolve()` function, which combines the provided arguments into an absolute file path. The `os.homedir()` function is used to get the current user's home directory, and `./.config/autodoc/` is appended to it as the folder where the user configuration file should be stored. Finally, the `userConfigFileName` constant is appended to the path, resulting in the complete file path for the user configuration file.\n\nBy exporting both `userConfigFileName` and `userConfigFilePath`, other parts of the autodoc project can easily access and use these constants to read or write user-specific settings. For example, when the autodoc application starts, it can read the user configuration file from the specified path, and apply the settings accordingly.\n\nHere's a code example of how these constants might be used in another part of the autodoc project:\n\n```javascript\nimport { userConfigFilePath } from './path/to/this/file';\n\n// Read user configuration from the file\nconst userConfig = JSON.parse(fs.readFileSync(userConfigFilePath, 'utf-8'));\n\n// Apply user settings\napplyUserSettings(userConfig);\n```\n\nIn summary, this code is responsible for defining the name and file path of the user configuration file for the autodoc project, allowing other parts of the project to easily access and manage user-specific settings.", + "questions": "1. **What is the purpose of the `userConfigFileName` and `userConfigFilePath` constants?**\n\n The `userConfigFileName` constant defines the name of the user configuration file for the autodoc project, while the `userConfigFilePath` constant defines the absolute path to this file, which is located in the user's home directory under the `.config/autodoc/` folder.\n\n2. **Why are the `node:path` and `node:os` modules being imported?**\n\n The `node:path` module is imported to provide utilities for working with file and directory paths, such as resolving the absolute path to the user configuration file. The `node:os` module is imported to provide operating system-related utility methods, such as getting the user's home directory.\n\n3. **Is this code compatible with different operating systems?**\n\n Yes, this code is compatible with different operating systems. The `os.homedir()` method returns the home directory of the current user, which is platform-specific, and the `path.resolve()` method takes care of handling the correct path separators for the current operating system.", + "checksum": "ce40980fffc58e17b13690b9e37a6015" }, { "fileName": "index.ts", - "filePath": "src/index.ts", - "url": "https://github.com/context-labs/autodoc/src/index.ts", - "summary": "The code is a CLI (Command Line Interface) tool for the Autodoc project, which helps in generating documentation for a codebase. It uses the `commander` package to define and manage commands, and `inquirer` for interactive prompts. The main commands supported are `init`, `estimate`, `index`, `user`, and `q`.\n\n1. `init`: Initializes the repository by creating an `autodoc.config.json` file in the current directory. If the file already exists, it uses the existing configuration.\n ```bash\n autodoc init\n ```\n\n2. `estimate`: Estimates the cost of running the `index` command on the repository. It requires the `autodoc.config.json` file to be present.\n ```bash\n autodoc estimate\n ```\n\n3. `index`: Traverses the codebase, writes documentation using LLM (Language Model), and creates a locally stored index. It prompts the user to confirm before starting the indexing process.\n ```bash\n autodoc index\n ```\n\n4. `user`: Sets the Autodoc user configuration. If a user configuration file exists, it uses the existing configuration; otherwise, it creates a new one.\n ```bash\n autodoc user\n ```\n\n5. `q`: Queries an Autodoc index. It requires both `autodoc.config.json` and user configuration files to be present.\n ```bash\n autodoc q\n ```\n\nThe code also handles unhandled promise rejections by logging the error stack, showing an error spinner, stopping the spinner, and exiting with an error code.\n\nOverall, this CLI tool simplifies the process of generating documentation for a codebase by providing an easy-to-use interface for managing configurations and running the Autodoc project's core functionalities.", - "questions": "1. **Question:** What is the purpose of the `autodoc.config.json` file and how is it used in the code?\n **Answer:** The `autodoc.config.json` file is used to store the configuration for the Autodoc repository. It is read and parsed in various commands like `init`, `estimate`, `index`, and `q` to provide the necessary configuration for each command's execution.\n\n2. **Question:** How does the `estimate` command work and what does it do?\n **Answer:** The `estimate` command reads the `autodoc.config.json` file, parses it into a configuration object, and then calls the `estimate` function with the configuration. The purpose of this command is to estimate the cost of running the `index` command on the repository.\n\n3. **Question:** What is the purpose of the `user` command and how does it handle user configuration?\n **Answer:** The `user` command is used to set the Autodoc user configuration. It reads the user configuration file specified by `userConfigFilePath`, parses it into a configuration object, and then calls the `user` function with the configuration. If the configuration file is not found, it calls the `user` function without any configuration, allowing the user to set up their configuration." + "filePath": "src\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\index.ts", + "summary": "This code is the main entry point for the Autodoc CLI tool, which provides a set of commands to help developers automatically generate documentation for their codebase. The tool uses the `commander` library to define and handle commands, and `inquirer` for interactive prompts.\n\nThe available commands are:\n\n1. `init`: Initializes the repository by creating an `autodoc.config.json` file in the current directory. If the file already exists, it uses the existing configuration.\n ```bash\n autodoc init\n ```\n\n2. `estimate`: Estimates the cost of running the `index` command on the repository. It requires the `autodoc.config.json` file to be present.\n ```bash\n autodoc estimate\n ```\n\n3. `index`: Traverses the codebase, writes documentation using LLM, and creates a locally stored index. Before starting the indexing process, it prompts the user for confirmation. It requires the `autodoc.config.json` file to be present.\n ```bash\n autodoc index\n ```\n\n4. `user`: Sets the Autodoc user configuration. If a user configuration file exists, it uses the existing configuration.\n ```bash\n autodoc user\n ```\n\n5. `q`: Queries an Autodoc index. It requires both the `autodoc.config.json` and user configuration files to be present.\n ```bash\n autodoc q\n ```\n\nThe code also listens for unhandled promise rejections and handles them gracefully by showing an error spinner, stopping the spinner, and exiting with an error code.\n\nIn the larger project, this CLI tool serves as the primary interface for users to interact with Autodoc, allowing them to easily generate and manage documentation for their codebase.", + "questions": "1. **What is the purpose of the Autodoc CLI Tool?**\n\n The Autodoc CLI Tool is designed to help developers automatically generate documentation for their codebase by traversing the code, writing docs via LLM, and creating a locally stored index.\n\n2. **How does the `estimate` command work and what does it return?**\n\n The `estimate` command reads the `autodoc.config.json` file and estimates the cost of running the `index` command on the repository. It provides an estimation of the resources required to generate the documentation.\n\n3. **What is the role of the `user` command and how does it interact with the user configuration file?**\n\n The `user` command is responsible for setting the Autodoc user configuration. It reads the user configuration file (if it exists) and allows the user to update or create a new configuration. This configuration is then used in other commands, such as the `query` command, to interact with the Autodoc index.", + "checksum": "7bc160e4c4ef027d4968e3650a305a7d" }, { "fileName": "types.ts", - "filePath": "src/types.ts", - "url": "https://github.com/context-labs/autodoc/src/types.ts", - "summary": "This code defines the types and interfaces for the `autodoc` project, which aims to automatically generate documentation for a given code repository. The project uses OpenAI's language models (LLMs) to process and generate summaries, questions, and other relevant information for files and folders within the repository.\n\nThe code starts by importing `OpenAIChat` from the `langchain/llms` package. It then defines several types and interfaces that are used throughout the project:\n\n- `AutodocUserConfig`: Represents the user configuration for the autodoc project, including the LLM models to be used.\n- `AutodocRepoConfig`: Represents the configuration for a specific repository, including its name, URL, root directory, output directory, LLM models, and other settings.\n- `FileSummary` and `FolderSummary`: Represent the summaries and questions generated for files and folders, respectively.\n- `ProcessFileParams`, `ProcessFolderParams`, and `TraverseFileSystemParams`: Define the parameters for processing files, folders, and traversing the file system, respectively.\n- `ProcessFile` and `ProcessFolder`: Define the function types for processing files and folders, respectively.\n- `LLMModels`: Enumerates the available LLM models, such as GPT-3.5-turbo, GPT-4, and GPT-4-32k.\n- `LLMModelDetails`: Represents the details of an LLM model, including its name, cost per 1K tokens, maximum length, and other statistics.\n\nFor example, when using this code in the larger project, you might define a `ProcessFile` function that takes a `ProcessFileParams` object as input and generates a summary and questions for the file using the specified LLM model. Similarly, you could define a `ProcessFolder` function that processes all files and subfolders within a folder, generating summaries and questions for each.\n\nThe `TraverseFileSystemParams` type allows you to configure how the file system is traversed, including specifying which files and folders to ignore, and what prompts to use for generating summaries and questions.\n\nOverall, this code provides the foundation for the `autodoc` project by defining the types and interfaces needed to process code repositories and generate documentation using OpenAI's language models.", - "questions": "1. **Question:** What is the purpose of the `LLMModels` enum and how is it used in the code?\n **Answer:** The `LLMModels` enum defines the available language models for the autodoc project. It is used in the `AutodocUserConfig` and `AutodocRepoConfig` types to specify which language models should be used for processing files and folders.\n\n2. **Question:** What are the `ProcessFile` and `ProcessFolder` types and how are they used in the code?\n **Answer:** `ProcessFile` and `ProcessFolder` are types for functions that process a file or a folder, respectively. They are used as optional parameters in the `TraverseFileSystemParams` type, allowing developers to provide custom processing functions when traversing the file system.\n\n3. **Question:** What is the purpose of the `TraverseFileSystemParams` type and how is it used in the code?\n **Answer:** The `TraverseFileSystemParams` type defines the parameters required for traversing the file system. It is used to pass configuration options, such as input path, project name, custom processing functions, and other settings, to a function that will traverse the file system and process files and folders accordingly." + "filePath": "src\\types.ts", + "url": "https://github.com/context-labs/autodoc/src\\types.ts", + "summary": "This code defines the types and interfaces for the `autodoc` project, which aims to automatically generate documentation for a given code repository. The project uses OpenAI's language models (LLMs) to process and generate summaries, questions, and other relevant information for files and folders in the repository.\n\nThe `AutodocUserConfig` and `AutodocRepoConfig` types define the configuration options for the user and repository, respectively. These include settings such as the LLM models to use, repository URL, output directory, and content type.\n\n`FileSummary` and `FolderSummary` types represent the generated summaries for files and folders, including their paths, URLs, and checksums. The `ProcessFileParams` and `ProcessFolderParams` types define the parameters required for processing files and folders, such as the file or folder name, path, and content type.\n\n`ProcessFile` and `ProcessFolder` are function types that take the respective parameters and return a promise. These functions are responsible for processing the files and folders, generating summaries, and updating the documentation.\n\n`TraverseFileSystemParams` type defines the parameters for traversing the file system, including the input path, project name, and optional `processFile` and `processFolder` functions. It also includes settings for ignoring certain files or folders and content type preferences.\n\nThe `LLMModels` enum lists the available language models, such as GPT-3.5 Turbo, GPT-4, and GPT-4 32k. The `LLMModelDetails` type provides information about each model, including the cost per 1K tokens, maximum length, and success/failure statistics.\n\nIn the larger project, these types and interfaces would be used to configure and run the `autodoc` tool, allowing users to automatically generate documentation for their code repositories using OpenAI's language models. For example, a user could provide an `AutodocRepoConfig` object to configure the tool, and then use the `TraverseFileSystem` function to process the repository and generate the documentation.", + "questions": "1. **What is the purpose of the `AutodocUserConfig` and `AutodocRepoConfig` types?**\n\n The `AutodocUserConfig` type is used to define the user configuration for the autodoc project, which includes an array of LLMModels. The `AutodocRepoConfig` type is used to define the repository configuration for the autodoc project, which includes various properties such as name, repository URL, root, output, LLMModels, and more.\n\n2. **What are the different LLMModels available in the `LLMModels` enum?**\n\n The `LLMModels` enum lists the available language models for the autodoc project. Currently, there are three models: GPT3 (gpt-3.5-turbo), GPT4 (gpt-4), and GPT432k (gpt-4-32k).\n\n3. **What is the purpose of the `ProcessFile` and `ProcessFolder` types?**\n\n The `ProcessFile` type is a function type that takes a `ProcessFileParams` object as input and returns a Promise. It is used to process a single file in the autodoc project. The `ProcessFolder` type is a function type that takes a `ProcessFolderParams` object as input and returns a Promise. It is used to process a folder in the autodoc project.", + "checksum": "796822d4da09cce719cb86b540d2fb66" } ], "folders": [ { "folderName": "cli", - "folderPath": ".autodoc/docs/json/src/cli", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli", + "folderPath": ".autodoc\\docs\\json\\src\\cli", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli", "files": [ { "fileName": "spinner.ts", - "filePath": "src/cli/spinner.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/spinner.ts", - "summary": "This code provides a utility for managing a command-line spinner using the `ora` library. The spinner is a visual indicator that displays a series of characters in a loop, giving the user feedback that a process is running in the background. The code exports several functions to control the spinner's behavior, such as updating the text, stopping the spinner, and displaying success, error, or informational messages.\n\nThe `spinner` object is created as a singleton to ensure that there is only one instance of the spinner at any given time. This prevents multiple spinners from being displayed simultaneously, which could cause confusion for the user. The spinner is configured to use the 'dots' style.\n\nThe `updateSpinnerText` function is used to update the spinner's text. If the spinner is already spinning, it updates the text directly; otherwise, it starts the spinner with the given message. For example:\n\n```javascript\nupdateSpinnerText('Loading data...');\n```\n\nThe `stopSpinner` function stops the spinner if it is currently spinning:\n\n```javascript\nstopSpinner();\n```\n\nThe `spinnerError`, `spinnerSuccess`, and `spinnerInfo` functions are used to display error, success, and informational messages, respectively. These functions first check if the spinner is spinning and then call the appropriate `ora` method to display the message with the corresponding status symbol (e.g., a red cross for errors, a green checkmark for success, etc.):\n\n```javascript\nspinnerError('An error occurred');\nspinnerSuccess('Operation completed successfully');\nspinnerInfo('Please wait...');\n```\n\nIn the larger project, this utility can be used to provide a consistent and user-friendly interface for displaying progress and status messages during long-running tasks or processes.", - "questions": "1. **What is the purpose of the `ora` package in this code?**\n\n The `ora` package is used to create a spinner in the terminal, providing a visual indication of a running process. In this code, it is used to create a singleton spinner with the 'dots' style.\n\n2. **What are the different states of the spinner and how are they updated?**\n\n The spinner can have different states such as spinning, stopped, failed, succeeded, and displaying information. The functions `updateSpinnerText`, `stopSpinner`, `spinnerError`, `spinnerSuccess`, and `spinnerInfo` are used to update the spinner's state and text accordingly.\n\n3. **How does the `updateSpinnerText` function work and when should it be used?**\n\n The `updateSpinnerText` function updates the spinner's text with the provided message. If the spinner is already spinning, it updates the text directly; otherwise, it starts the spinner with the new message. This function should be used when you want to change the spinner's text while it is spinning or start it with a new message." + "filePath": "src\\cli\\spinner.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\spinner.ts", + "summary": "This code is responsible for managing a spinner, which is a visual element that indicates a process is running in the background. The spinner is created using the `ora` library, which provides a simple and customizable way to create spinners for command-line interfaces.\n\nThe code starts by importing the `ora` library and creating a singleton spinner instance with the 'dots' style. This ensures that there will only be one spinner active at any given time.\n\nThere are several functions exported by this module to interact with the spinner:\n\n1. `updateSpinnerText(message: string)`: This function updates the spinner's text with the provided message. If the spinner is already spinning, it simply updates the text; otherwise, it starts the spinner with the new message.\n\n Example usage:\n ```javascript\n updateSpinnerText('Loading data...');\n ```\n\n2. `stopSpinner()`: This function stops the spinner if it is currently spinning.\n\n Example usage:\n ```javascript\n stopSpinner();\n ```\n\n3. `spinnerError(message?: string)`: This function stops the spinner and marks it as failed with an optional error message. It only takes effect if the spinner is currently spinning.\n\n Example usage:\n ```javascript\n spinnerError('Failed to load data');\n ```\n\n4. `spinnerSuccess(message?: string)`: This function stops the spinner and marks it as successful with an optional success message. It only takes effect if the spinner is currently spinning.\n\n Example usage:\n ```javascript\n spinnerSuccess('Data loaded successfully');\n ```\n\n5. `spinnerInfo(message: string)`: This function displays an informational message without affecting the spinner's state.\n\n Example usage:\n ```javascript\n spinnerInfo('Connecting to server...');\n ```\n\nIn the larger project, this module can be used to provide visual feedback to users when a background process is running, such as loading data, connecting to a server, or performing a complex calculation. By using the exported functions, developers can easily update the spinner's text, stop it, or change its state to indicate success, failure, or display informational messages.", + "questions": "1. **What is the purpose of the `ora` package in this code?**\n\n The `ora` package is used to create a spinner in the command line interface, providing a visual indication of a running process. In this code, it is used to create a singleton spinner with the 'dots' style.\n\n2. **How does the `updateSpinnerText` function work?**\n\n The `updateSpinnerText` function takes a message as an input and updates the spinner's text with the given message. If the spinner is already spinning, it updates the text directly; otherwise, it starts the spinner with the new message.\n\n3. **What are the differences between `spinnerError`, `spinnerSuccess`, and `spinnerInfo` functions?**\n\n These functions are used to update the spinner's state and message based on the outcome of a process. `spinnerError` is called when there is an error, and it stops the spinner with a failure message. `spinnerSuccess` is called when the process is successful, and it stops the spinner with a success message. `spinnerInfo` is used to display an informational message without stopping the spinner.", + "checksum": "d93ad7e714ce5446916bb1d63cbb6031" } ], "folders": [ { "folderName": "commands", - "folderPath": ".autodoc/docs/json/src/cli/commands", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\commands", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands", "files": [], "folders": [ { "folderName": "estimate", - "folderPath": ".autodoc/docs/json/src/cli/commands/estimate", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/estimate", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\commands\\estimate", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands\\estimate", "files": [ { "fileName": "index.ts", - "filePath": "src/cli/commands/estimate/index.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/estimate/index.ts", - "summary": "The `estimate` function in this code file is responsible for providing an estimated cost of indexing a given repository using the AutodocRepoConfig configuration. This function is particularly useful for users who want to get an idea of the cost involved in processing their repository before actually running the process.\n\nThe function takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the repository name, URL, root directory, output directory, and other settings related to the processing of the repository.\n\nThe main steps involved in the function are:\n\n1. Set the output path for the JSON files generated during the process.\n2. Update the spinner text to display \"Estimating cost...\".\n3. Perform a dry run of the `processRepository` function with the given configuration options. The dry run does not actually process the repository but instead returns the details of the models that would be processed.\n4. Stop the spinner once the dry run is complete.\n5. Print the details of the models obtained from the dry run using the `printModelDetails` utility function.\n6. Calculate the total estimated cost using the `totalIndexCostEstimate` utility function.\n7. Display the estimated cost in a user-friendly format using the `chalk` library.\n\nHere's an example of how the `estimate` function might be used in the larger project:\n\n```javascript\nimport { estimate } from './autodoc/estimate';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output/',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\nThis example demonstrates how a user can call the `estimate` function with a specific configuration to get an estimated cost for processing their repository.", - "questions": "1. **What is the purpose of the `estimate` function and what parameters does it accept?**\n\n The `estimate` function is used to estimate the cost of processing a repository for indexing. It accepts an `AutodocRepoConfig` object as a parameter, which contains various configuration options such as repository URL, output path, and other settings.\n\n2. **How does the `estimate` function calculate the cost estimate?**\n\n The `estimate` function performs a dry run of the `processRepository` command to get the estimated price for indexing the repository. It then uses the `totalIndexCostEstimate` function to calculate the total cost based on the returned run details.\n\n3. **What is the purpose of the `printModelDetails` function and how is it used in the `estimate` function?**\n\n The `printModelDetails` function is used to display the details of the models used in the estimation process. In the `estimate` function, it is called with the values of the `runDetails` object to print the model details before displaying the total cost estimate." + "filePath": "src\\cli\\commands\\estimate\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\estimate\\index.ts", + "summary": "The `estimate` function in this code is responsible for providing an estimated cost of processing a given repository using the Autodoc project. It takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the repository name, URL, root directory, output directory, and other settings related to the processing of the repository.\n\nThe function starts by constructing the path to the JSON output directory, which will be used to store the intermediate results of the processing. It then updates the spinner text to indicate that the cost estimation is in progress.\n\nNext, the `processRepository` function is called with the provided configuration options and a `true` flag to indicate that this is a dry run. This means that the repository will not actually be processed, but the function will return the details of what would happen if it were processed. This is used to calculate the estimated cost of processing the repository.\n\nOnce the dry run is complete, the spinner is updated to show success, and the results are printed using the `printModelDetails` function. The total estimated cost is then calculated using the `totalIndexCostEstimate` function, which takes the values of the `runDetails` object as input.\n\nFinally, the estimated cost is displayed in the console using the `chalk.redBright` function to format the text in a red color. The message also includes a disclaimer that the actual cost may vary and recommends setting a limit in the user's OpenAI account to prevent unexpected charges.\n\nHere's an example of how the `estimate` function might be used in the larger project:\n\n```javascript\nimport { estimate } from './path/to/this/file';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\nThis example would estimate the cost of processing the \"my-repo\" repository with the specified configuration options.", + "questions": "1. **What is the purpose of the `estimate` function?**\n\n The `estimate` function is used to perform a dry run of the `processRepository` command to get an estimated price for indexing the given repository. It then prints the model details and the total estimated cost.\n\n2. **What are the parameters passed to the `processRepository` function?**\n\n The `processRepository` function is called with an object containing the following properties: `name`, `repositoryUrl`, `root`, `output`, `llms`, `ignore`, `filePrompt`, `folderPrompt`, `chatPrompt`, `contentType`, `targetAudience`, and `linkHosted`. Additionally, a second argument `true` is passed to indicate that it's a dry run.\n\n3. **How is the total estimated cost calculated and displayed?**\n\n The total estimated cost is calculated using the `totalIndexCostEstimate` function, which takes an array of values from the `runDetails` object. The cost is then displayed using `console.log` with `chalk.redBright` for formatting, showing the cost with two decimal places and a note that the actual cost may vary.", + "checksum": "2b0b3903432ae423bbc597d04b052ecb" } ], "folders": [], - "summary": "The `estimate` function in `index.ts` is a crucial part of the Autodoc project, as it allows users to estimate the cost of indexing a given repository before actually processing it. This function takes an `AutodocRepoConfig` object as input, which contains various configuration options for processing the repository.\n\nThe main steps involved in the `estimate` function are:\n\n1. Setting the output path for the JSON files generated during the process.\n2. Updating the spinner text to display \"Estimating cost...\".\n3. Performing a dry run of the `processRepository` function with the given configuration options. The dry run does not actually process the repository but instead returns the details of the models that would be processed.\n4. Stopping the spinner once the dry run is complete.\n5. Printing the details of the models obtained from the dry run using the `printModelDetails` utility function.\n6. Calculating the total estimated cost using the `totalIndexCostEstimate` utility function.\n7. Displaying the estimated cost in a user-friendly format using the `chalk` library.\n\nHere's an example of how the `estimate` function might be used in the larger project:\n\n```javascript\nimport { estimate } from './autodoc/estimate';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output/',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\nThis example demonstrates how a user can call the `estimate` function with a specific configuration to get an estimated cost for processing their repository. The function is designed to work seamlessly with other parts of the Autodoc project, such as the `processRepository` function, which is responsible for the actual processing of the repository.\n\nBy providing an estimated cost upfront, the `estimate` function helps users make informed decisions about whether to proceed with the indexing process or not. This can be particularly useful for users with large repositories or those who are working within a budget. Overall, the `estimate` function is an essential tool for users looking to leverage the power of Autodoc while managing their costs effectively.", - "questions": "" + "summary": "The `estimate` function in `index.ts` is a crucial part of the Autodoc project, as it provides an estimated cost of processing a given repository. It takes an `AutodocRepoConfig` object as input, containing various configuration options such as repository name, URL, root directory, output directory, and other settings related to the processing of the repository.\n\nThe function begins by constructing the path to the JSON output directory, which stores intermediate results of the processing. It then updates the spinner text to indicate that cost estimation is in progress. The `processRepository` function is called with the provided configuration options and a `true` flag, signifying a dry run. This dry run returns the details of what would happen if the repository were processed, which is used to calculate the estimated cost.\n\nUpon completion of the dry run, the spinner is updated to show success, and the results are printed using the `printModelDetails` function. The total estimated cost is calculated using the `totalIndexCostEstimate` function, which takes the values of the `runDetails` object as input.\n\nFinally, the estimated cost is displayed in the console using the `chalk.redBright` function to format the text in red. The message also includes a disclaimer that the actual cost may vary and recommends setting a limit in the user's OpenAI account to prevent unexpected charges.\n\nHere's an example of how the `estimate` function might be used in the larger project:\n\n```javascript\nimport { estimate } from './path/to/this/file';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\nThis example would estimate the cost of processing the \"my-repo\" repository with the specified configuration options.", + "questions": "", + "checksum": "4b8fd2b2abaec4959873fc3396c414d8" }, { "folderName": "index", - "folderPath": ".autodoc/docs/json/src/cli/commands/index", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/index", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\commands\\index", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands\\index", "files": [ { "fileName": "convertJsonToMarkdown.ts", - "filePath": "src/cli/commands/index/convertJsonToMarkdown.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/index/convertJsonToMarkdown.ts", - "summary": "The `convertJsonToMarkdown` function in this code is responsible for converting JSON files containing documentation information into Markdown files. This is done in two main steps: counting the number of files in the project and creating Markdown files for each code file in the project.\n\nFirst, the function uses the `traverseFileSystem` utility to count the number of files in the project. It takes an `AutodocRepoConfig` object as input, which contains information about the project, such as its name, root directory, output directory, and other configuration options. The `traverseFileSystem` utility is called with a `processFile` function that increments the `files` counter for each file encountered.\n\n```javascript\nawait traverseFileSystem({\n inputPath: inputRoot,\n projectName,\n processFile: () => {\n files++;\n return Promise.resolve();\n },\n ignore: [],\n filePrompt,\n folderPrompt,\n contentType,\n targetAudience,\n linkHosted,\n});\n```\n\nNext, the function defines another `processFile` function that reads the content of each JSON file, converts it to a Markdown format, and writes the output to a new Markdown file in the specified output directory. It first checks if the content exists, and if not, it returns early. It then creates the output directory if it doesn't exist, and parses the JSON content into either a `FolderSummary` or a `FileSummary` object, depending on the file name.\n\nThe function then constructs the Markdown content by including a link to the code on GitHub, the summary, and any questions if they exist. Finally, it writes the Markdown content to the output file with the `.md` extension.\n\n```javascript\nconst outputPath = getFileName(markdownFilePath, '.', '.md');\nawait fs.writeFile(outputPath, markdown, 'utf-8');\n```\n\nThe `convertJsonToMarkdown` function is then called again with the new `processFile` function to create the Markdown files for each code file in the project.\n\n```javascript\nawait traverseFileSystem({\n inputPath: inputRoot,\n projectName,\n processFile,\n ignore: [],\n filePrompt,\n folderPrompt,\n contentType,\n targetAudience,\n linkHosted,\n});\n```\n\nIn summary, this code is responsible for converting JSON files containing documentation information into Markdown files, which can be used in the larger Autodoc project to generate documentation for code repositories.", - "questions": "1. **What is the purpose of the `convertJsonToMarkdown` function?**\n\n The `convertJsonToMarkdown` function is responsible for converting JSON files containing summaries and questions about code files in a project into Markdown files. It traverses the file system, reads the JSON files, and creates corresponding Markdown files with the provided information.\n\n2. **How does the `traverseFileSystem` function work and what are its parameters?**\n\n The `traverseFileSystem` function is a utility function that recursively traverses the file system starting from a given input path. It takes an object as a parameter with properties such as `inputPath`, `projectName`, `processFile`, `ignore`, `filePrompt`, `folderPrompt`, `contentType`, `targetAudience`, and `linkHosted`. The function processes each file using the provided `processFile` callback and can be configured to ignore certain files or folders.\n\n3. **What is the purpose of the `processFile` function inside `convertJsonToMarkdown`?**\n\n The `processFile` function is a callback function that is passed to the `traverseFileSystem` function. It is responsible for reading the content of a JSON file, parsing it, and creating a corresponding Markdown file with the summary and questions. It also handles creating the output directory if it doesn't exist and writing the Markdown content to the output file." + "filePath": "src\\cli\\commands\\index\\convertJsonToMarkdown.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\convertJsonToMarkdown.ts", + "summary": "The `convertJsonToMarkdown` function in this code is responsible for converting JSON files containing documentation information into Markdown files. This function is part of the larger Autodoc project, which aims to automate the process of generating documentation for code repositories.\n\nThe function takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the project name, input and output directories, and other settings related to the documentation generation process.\n\nThe code first counts the number of files in the project by traversing the file system using the `traverseFileSystem` utility function. This is done to provide a progress update to the user via the `updateSpinnerText` function.\n\nNext, the `processFile` function is defined, which is responsible for reading the content of each JSON file, parsing it, and converting it into a Markdown format. The function checks if the file has a summary, and if so, it generates the Markdown content with a link to the code on GitHub, the summary, and any questions if present. The output Markdown file is then saved in the specified output directory.\n\nFinally, the `traverseFileSystem` function is called again, this time with the `processFile` function as an argument. This allows the code to process each JSON file in the project and convert it into a Markdown file. Once the process is complete, a success message is displayed to the user using the `spinnerSuccess` function.\n\nExample usage:\n\n```javascript\nconvertJsonToMarkdown({\n name: \"myProject\",\n root: \"./input\",\n output: \"./output\",\n filePrompt: true,\n folderPrompt: true,\n contentType: \"code\",\n targetAudience: \"developers\",\n linkHosted: \"https://github.com/user/myProject\",\n});\n```\n\nThis will convert all JSON files in the `./input` directory into Markdown files and save them in the `./output` directory.", + "questions": "1. **Question:** What is the purpose of the `convertJsonToMarkdown` function and what are the expected inputs?\n **Answer:** The `convertJsonToMarkdown` function is used to convert JSON files to Markdown files for each code file in the project. It takes an `AutodocRepoConfig` object as input, which contains various properties like projectName, root, output, filePrompt, folderPrompt, contentType, targetAudience, and linkHosted.\n\n2. **Question:** How does the `traverseFileSystem` function work and what is its role in this code?\n **Answer:** The `traverseFileSystem` function is a utility function that recursively traverses the file system, starting from the inputPath, and processes each file using the provided `processFile` function. In this code, it is used twice: first to count the number of files in the project, and then to create Markdown files for each code file in the project.\n\n3. **Question:** How are the output directories and Markdown files created, and what is the structure of the generated Markdown content?\n **Answer:** The output directories are created using the `fs.mkdir` function with the `recursive: true` option. The Markdown files are created using the `fs.writeFile` function. The structure of the generated Markdown content includes a link to view the code on GitHub, the summary, and optionally, a list of questions if they exist.", + "checksum": "79c860becf47b9882441682f0213d534" }, { "fileName": "createVectorStore.ts", - "filePath": "src/cli/commands/index/createVectorStore.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/index/createVectorStore.ts", - "summary": "The code in this file is responsible for processing a directory of text files, splitting the text into chunks, and creating a vector store using the HNSWLib library and OpenAIEmbeddings.\n\nThe `processFile` function takes a file path as input and returns a Promise that resolves to a Document object. It reads the file contents and creates a Document object with the file contents as `pageContent` and the file path as metadata.\n\nThe `processDirectory` function takes a directory path as input and returns a Promise that resolves to an array of Document objects. It reads the files in the directory and calls `processFile` for each file. If a file is a directory, it calls `processDirectory` recursively. The function accumulates all the Document objects in an array and returns it.\n\nThe `RepoLoader` class extends the `BaseDocumentLoader` class and has a constructor that takes a file path as input. It has a `load` method that calls the `processDirectory` function with the file path and returns the resulting array of Document objects.\n\nThe `createVectorStore` function is an async function that takes an AutodocRepoConfig object as input, which contains the root directory and output file path. It creates a RepoLoader instance with the root directory, loads the raw documents, and splits them into chunks using the `RecursiveCharacterTextSplitter` class. It then creates a vector store using the HNSWLib library and OpenAIEmbeddings, and saves the vector store to the output file path.\n\nExample usage:\n\n```javascript\nconst config = {\n root: './data/documents',\n output: './data/vector_store',\n};\n\ncreateVectorStore(config).then(() => {\n console.log('Vector store created successfully');\n});\n```\n\nThis code snippet would process all the text files in the `./data/documents` directory, split the text into chunks, create a vector store using the HNSWLib library and OpenAIEmbeddings, and save the vector store to the `./data/vector_store` file.", - "questions": "1. **Question:** What is the purpose of the `processFile` function and how does it handle errors?\n **Answer:** The `processFile` function reads the content of a file and creates a `Document` object with the file contents and metadata. If there is an error while reading the file, it rejects the promise with the error.\n\n2. **Question:** How does the `processDirectory` function handle nested directories and files?\n **Answer:** The `processDirectory` function iterates through the files in a directory. If it encounters a subdirectory, it calls itself recursively to process the subdirectory. If it encounters a file, it processes the file using the `processFile` function and adds the resulting `Document` object to the `docs` array.\n\n3. **Question:** What is the purpose of the `createVectorStore` function and how does it use the `RepoLoader` class?\n **Answer:** The `createVectorStore` function is responsible for creating a vector store from a given repository. It uses the `RepoLoader` class to load all the documents from the repository, splits the text into chunks using the `RecursiveCharacterTextSplitter`, and then creates a vector store using the `HNSWLib.fromDocuments` method with the `OpenAIEmbeddings`. Finally, it saves the vector store to the specified output path." + "filePath": "src\\cli\\commands\\index\\createVectorStore.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\createVectorStore.ts", + "summary": "The code in this file is responsible for processing a directory of text files, splitting the text into chunks, and creating a vector store using the HNSWLib library and OpenAIEmbeddings. This vector store can be used for efficient similarity search and retrieval of documents in the larger project.\n\nThe `processFile` function reads a file's content and creates a `Document` object with the content and metadata (source file path). It returns a Promise that resolves to the created Document.\n\nThe `processDirectory` function is a recursive function that processes a directory and its subdirectories. It reads the files in the directory, and for each file, it checks if it's a directory or a regular file. If it's a directory, the function calls itself with the new directory path. If it's a file, it calls the `processFile` function to create a Document object. The function returns an array of Document objects.\n\nThe `RepoLoader` class extends the `BaseDocumentLoader` class and has a constructor that takes a file path as an argument. It has a `load` method that calls the `processDirectory` function with the given file path and returns the array of Document objects.\n\nThe `createVectorStore` function is an async function that takes an `AutodocRepoConfig` object as an argument, which contains the root directory and output file path. It creates a `RepoLoader` instance with the root directory and loads the documents using the `load` method. It then creates a `RecursiveCharacterTextSplitter` instance with a specified chunk size and chunk overlap and splits the documents into chunks. Finally, it creates a vector store using the HNSWLib library and OpenAIEmbeddings with the processed documents and saves the vector store to the output file path.\n\nExample usage:\n\n```javascript\nconst config = {\n root: './data/documents',\n output: './data/vector_store',\n};\n\ncreateVectorStore(config).then(() => {\n console.log('Vector store created successfully');\n});\n```", + "questions": "1. **Question:** What is the purpose of the `processFile` function and what does it return?\n **Answer:** The `processFile` function is an asynchronous function that reads the content of a file given its file path, creates a `Document` object with the file contents and metadata (source file path), and returns a Promise that resolves to the created `Document` object.\n\n2. **Question:** How does the `processDirectory` function work and what does it return?\n **Answer:** The `processDirectory` function is an asynchronous function that takes a directory path as input, reads all the files and subdirectories within it, and processes them recursively. It returns a Promise that resolves to an array of `Document` objects created from the files in the directory and its subdirectories.\n\n3. **Question:** What is the purpose of the `createVectorStore` function and how does it work?\n **Answer:** The `createVectorStore` function is an asynchronous function that takes an `AutodocRepoConfig` object as input, which contains the root directory path and output file path. The function loads all the documents from the root directory using the `RepoLoader`, splits the text into chunks using the `RecursiveCharacterTextSplitter`, creates a vector store from the documents using the `HNSWLib` and `OpenAIEmbeddings`, and saves the vector store to the specified output file.", + "checksum": "a3409c4340753a867c72eebef7626fb9" }, { "fileName": "index.ts", - "filePath": "src/cli/commands/index/index.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/index/index.ts", - "summary": "The code in this file is responsible for processing a given repository and generating documentation in JSON and Markdown formats, as well as creating vector files for the documentation. It exports a single function `index` that takes an `AutodocRepoConfig` object as input, which contains various configuration options for processing the repository.\n\nThe `index` function performs the following steps:\n\n1. Define the paths for JSON, Markdown, and data output directories within the `output` folder.\n\n2. Process the repository by traversing its files, calling the LLMS (Language Learning Management System) for each file, and creating JSON files with the results. This is done using the `processRepository` function, which takes the same configuration options as the `index` function. The spinner text is updated to show the progress of this step.\n\n3. Convert the generated JSON files into Markdown format using the `convertJsonToMarkdown` function. This function also takes the same configuration options as the `index` function. The spinner text is updated to show the progress of this step, and a success message is displayed upon completion.\n\n4. Create vector files for the generated Markdown documentation using the `createVectorStore` function. This function also takes the same configuration options as the `index` function. The spinner text is updated to show the progress of this step, and a success message is displayed upon completion.\n\nHere's an example of how this code might be used in the larger project:\n\n```javascript\nimport autodoc from './autodoc';\n\nconst config = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n root: './src',\n output: './output',\n llms: 'https://llms.example.com',\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'text',\n targetAudience: 'developers',\n linkHosted: 'https://myproject-docs.example.com',\n};\n\nautodoc.index(config);\n```\n\nThis example would process the `MyProject` repository, generate JSON and Markdown documentation, and create vector files for the documentation, all while providing progress updates through spinner text.", - "questions": "1. **What is the purpose of the `index` function in this code?**\n\n The `index` function is the main entry point for the autodoc project. It processes a given repository, converts the JSON files to markdown, and creates vector files based on the provided configuration options.\n\n2. **What are the different steps involved in processing the repository?**\n\n The processing of the repository involves three main steps: (1) traversing the repository and calling LLMS for each file to create JSON files with the results, (2) converting the JSON files to markdown files, and (3) creating vector files from the markdown files.\n\n3. **What is the role of the `AutodocRepoConfig` type?**\n\n The `AutodocRepoConfig` type is used to define the shape of the configuration object that is passed to the `index` function. It specifies the properties and their types that are required for the function to process the repository, convert JSON to markdown, and create vector files." + "filePath": "src\\cli\\commands\\index\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\index.ts", + "summary": "The code in this file is responsible for processing a given repository and generating documentation in JSON, Markdown, and vector formats. It exports a single function `index` that takes an `AutodocRepoConfig` object as its argument, which contains various configuration options for processing the repository.\n\nThe `index` function performs three main tasks:\n\n1. **Process the repository**: It traverses the repository, calls the LLMS (Language Learning Management System) for each file, and creates JSON files with the results. This is done using the `processRepository` function, which takes the same configuration options as the `index` function. The JSON files are stored in the `output/docs/json/` directory.\n\n ```javascript\n updateSpinnerText('Processing repository...');\n await processRepository({ /* configuration options */ });\n spinnerSuccess();\n ```\n\n2. **Create Markdown files**: It converts the generated JSON files into Markdown files using the `convertJsonToMarkdown` function. This function also takes the same configuration options as the `index` function. The Markdown files are stored in the `output/docs/markdown/` directory.\n\n ```javascript\n updateSpinnerText('Creating markdown files...');\n await convertJsonToMarkdown({ /* configuration options */ });\n spinnerSuccess();\n ```\n\n3. **Create vector files**: It creates vector files from the generated Markdown files using the `createVectorStore` function. This function also takes the same configuration options as the `index` function. The vector files are stored in the `output/docs/data/` directory.\n\n ```javascript\n updateSpinnerText('Create vector files...');\n await createVectorStore({ /* configuration options */ });\n spinnerSuccess();\n ```\n\nThroughout the execution of these tasks, the code uses `updateSpinnerText` and `spinnerSuccess` functions to provide visual feedback on the progress of the tasks.\n\nIn the larger project, this code would be used to automatically generate documentation for a given repository based on the provided configuration options. The generated documentation can then be used for various purposes, such as displaying it on a website or analyzing the content for specific insights.", + "questions": "1. **What does the `index` function do in this code?**\n\n The `index` function is the main entry point for the autodoc project. It takes an `AutodocRepoConfig` object as input and performs three main tasks: processing the repository and creating JSON files, converting JSON files to markdown files, and creating vector files.\n\n2. **What is the purpose of the `processRepository`, `convertJsonToMarkdown`, and `createVectorStore` functions?**\n\n The `processRepository` function traverses the repository, calls LLMS for each file, and creates JSON files with the results. The `convertJsonToMarkdown` function creates markdown files from the generated JSON files. The `createVectorStore` function creates vector files from the markdown files.\n\n3. **What are the different types of prompts (`filePrompt`, `folderPrompt`, `chatPrompt`) used for in this code?**\n\n These prompts are likely used to interact with the user during the processing of the repository. The `filePrompt` might be used to ask the user for input regarding specific files, the `folderPrompt` for input regarding folders, and the `chatPrompt` for general input or feedback during the processing.", + "checksum": "4060b1affae5a6c385cda308b3cd1750" }, { "fileName": "processRepository.ts", - "filePath": "src/cli/commands/index/processRepository.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/index/processRepository.ts", - "summary": "The `processRepository` function in this code is responsible for processing a given code repository and generating summaries and questions for each file and folder within the repository. It takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the repository URL, input and output paths, language models to use, and other settings.\n\nThe function starts by initializing an `APIRateLimit` instance to limit the number of API calls made to the language models. It then defines several helper functions, such as `callLLM` for making API calls, `isModel` for checking if a given model is valid, `processFile` for processing individual files, and `processFolder` for processing folders.\n\nThe `processFile` function reads the content of a file, generates prompts for summaries and questions using the `createCodeFileSummary` and `createCodeQuestions` functions, and selects the best language model to use based on the token length of the prompts. It then calls the language model API to generate the summaries and questions, and saves the results as JSON files in the output directory.\n\nThe `processFolder` function reads the contents of a folder, filters out ignored files, and processes each file and subfolder within the folder. It then generates a summary prompt using the `folderSummaryPrompt` function and calls the language model API to generate a summary for the folder. The folder summary, along with the summaries and questions of its files and subfolders, is saved as a JSON file in the output directory.\n\nThe main part of the `processRepository` function first counts the number of files and folders in the input directory using the `filesAndFolders` function. It then processes each file and folder using the `traverseFileSystem` function, which calls the `processFile` and `processFolder` functions for each file and folder encountered. Finally, the function returns the language models used during processing.\n\nExample usage of the `processRepository` function:\n\n```javascript\nconst autodocConfig = {\n name: 'myProject',\n repositoryUrl: 'https://github.com/user/myProject',\n root: 'src',\n output: 'output',\n llms: [LLMModels.GPT3, LLMModels.GPT4],\n ignore: ['.git', 'node_modules'],\n filePrompt: 'Explain this code file',\n folderPrompt: 'Summarize this folder',\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nprocessRepository(autodocConfig).then((models) => {\n console.log('Processing complete');\n});\n```\n\nThis code would process the `src` directory of the `myProject` repository, generating summaries and questions for each file and folder, and saving the results in the `output` directory.", - "questions": "1. **Question:** What is the purpose of the `processRepository` function and what are its input parameters?\n **Answer:** The `processRepository` function is responsible for processing a code repository by generating summaries and questions for each file and folder in the project. It takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the project name, repository URL, input and output paths, language models, and other settings. Additionally, it accepts an optional `dryRun` parameter, which, if set to true, will not save the generated summaries and questions to disk.\n\n2. **Question:** How does the code determine the best language model to use for generating summaries and questions?\n **Answer:** The code checks the maximum token length of each available language model (GPT3, GPT4, and GPT432k) and compares it with the token length of the prompts (summary and questions). It selects the first model that can handle the maximum token length and is included in the `llms` array provided in the configuration.\n\n3. **Question:** How does the code handle traversing the file system and processing files and folders?\n **Answer:** The code uses the `traverseFileSystem` utility function to traverse the file system. It takes an object with various configuration options, including the input path, project name, and callbacks for processing files and folders. The `processFile` and `processFolder` functions are passed as callbacks to handle the processing of files and folders, respectively." + "filePath": "src\\cli\\commands\\index\\processRepository.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\processRepository.ts", + "summary": "The `processRepository` function in this code is responsible for generating summaries and questions for code files and folders in a given repository. It takes an `AutodocRepoConfig` object as input, which contains information about the project, repository URL, input and output paths, language models, and other configurations. An optional `dryRun` parameter can be provided to skip actual API calls and file writing.\n\nThe function starts by initializing the encoding and rate limit for API calls. It then defines two main helper functions: `processFile` and `processFolder`. The `processFile` function is responsible for processing individual code files. It reads the file content, calculates a checksum, and checks if reindexing is needed. If reindexing is required, it creates prompts for summaries and questions, selects the appropriate language model based on the input length, and calls the language model API to generate the summaries and questions. The results are then saved to a JSON file in the output directory.\n\nThe `processFolder` function is responsible for processing folders. It reads the folder content, calculates a checksum, and checks if reindexing is needed. If reindexing is required, it reads the summaries and questions of all files and subfolders in the folder, calls the language model API to generate a summary for the folder, and saves the result to a `summary.json` file in the folder.\n\nThe main function then counts the number of files and folders in the project and processes them using the `traverseFileSystem` utility function. It processes all files first, followed by all folders. Finally, it returns the language model usage statistics.\n\nThe `calculateChecksum` function calculates the checksum of a list of file contents, while the `reindexCheck` function checks if reindexing is needed by comparing the new and old checksums of a file or folder.", + "questions": "1. **Question:** What is the purpose of the `processRepository` function and what are its inputs and outputs?\n **Answer:** The `processRepository` function processes a given code repository, generating summaries and questions for each file and folder within the repository. It takes an `AutodocRepoConfig` object and an optional `dryRun` boolean as inputs. The function returns a `Promise` that resolves to an object containing the models used during processing.\n\n2. **Question:** How does the `calculateChecksum` function work and what is its purpose?\n **Answer:** The `calculateChecksum` function takes an array of file contents as input and calculates a checksum for each file using the MD5 hashing algorithm. It then concatenates all the checksums and calculates a final checksum using MD5 again. The purpose of this function is to generate a unique identifier for the contents of the files, which can be used to determine if the files have changed and need to be reprocessed.\n\n3. **Question:** How does the `reindexCheck` function work and when is it used?\n **Answer:** The `reindexCheck` function checks if a summary.json file exists in the given file or folder path and compares the stored checksum with the new checksum to determine if the file or folder needs to be reindexed. It is used in the `processFile` and `processFolder` functions to decide whether to regenerate summaries and questions for a file or folder based on changes in their contents.", + "checksum": "5b3ae9ffad1d4b4a22c6f7fd66bbde6f" }, { "fileName": "prompts.ts", - "filePath": "src/cli/commands/index/prompts.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/index/prompts.ts", - "summary": "The code in this file provides three functions that generate prompts for documentation experts to create summaries and answer questions about code files and folders in a project. These functions are likely used in the larger autodoc project to automate the process of generating documentation for code files and folders.\n\n1. `createCodeFileSummary`: This function takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `filePrompt`. It returns a formatted string prompt for a documentation expert to write a summary of the code file. The prompt includes the file path, project name, content type, and a custom file prompt. For example:\n\n```javascript\ncreateCodeFileSummary('src/example.js', 'autodoc', 'console.log(\"Hello, World!\");', 'JavaScript', 'Write a detailed technical explanation of what this code does.');\n```\n\n2. `createCodeQuestions`: This function takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `targetAudience`. It returns a formatted string prompt for a documentation expert to generate three questions and answers that a target audience might have about the code file. The prompt includes the file path, project name, content type, and target audience. For example:\n\n```javascript\ncreateCodeQuestions('src/example.js', 'autodoc', 'console.log(\"Hello, World!\");', 'JavaScript', 'beginner');\n```\n\n3. `folderSummaryPrompt`: This function takes six parameters: `folderPath`, `projectName`, `files`, `folders`, `contentType`, and `folderPrompt`. It returns a formatted string prompt for a documentation expert to write a summary of the folder and its contents. The prompt includes the folder path, project name, content type, a list of files and their summaries, a list of subfolders and their summaries, and a custom folder prompt. For example:\n\n```javascript\nfolderSummaryPrompt('src/', 'autodoc', [{fileName: 'example.js', summary: 'A simple example file'}], [{folderName: 'utils', summary: 'Utility functions'}], 'JavaScript', 'Write a detailed technical explanation of the folder structure and contents.');\n```\n\nThese functions can be used in the autodoc project to generate prompts for documentation experts, helping to streamline the process of creating documentation for code files and folders.", - "questions": "1. **Question:** What is the purpose of the `createCodeFileSummary` function?\n **Answer:** The `createCodeFileSummary` function generates a string template for a code file summary prompt, which includes the file path, project name, file contents, content type, and a file prompt.\n\n2. **Question:** How does the `createCodeQuestions` function differ from the `createCodeFileSummary` function?\n **Answer:** The `createCodeQuestions` function generates a string template for a code documentation prompt that asks for 3 questions and their answers, while the `createCodeFileSummary` function generates a string template for a code file summary prompt.\n\n3. **Question:** What is the purpose of the `folderSummaryPrompt` function and what parameters does it take?\n **Answer:** The `folderSummaryPrompt` function generates a string template for a folder summary prompt, which includes the folder path, project name, files, folders, content type, and a folder prompt. It takes parameters such as folderPath, projectName, files, folders, contentType, and folderPrompt." + "filePath": "src\\cli\\commands\\index\\prompts.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\index\\prompts.ts", + "summary": "This code defines three utility functions that generate prompts for documentation experts working on a project. These functions are used to create documentation for code files and folders within a project. The generated prompts are in markdown format and include specific instructions for the documentation expert.\n\n1. `createCodeFileSummary`: This function generates a prompt for creating a summary of a code file. It takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `filePrompt`. The function returns a markdown formatted string that includes the file's content and a custom prompt for the documentation expert.\n\nExample usage:\n```javascript\nconst prompt = createCodeFileSummary('path/to/file.js', 'MyProject', 'const x = 10;', 'JavaScript', 'Write a detailed technical explanation of this code.');\n```\n\n2. `createCodeQuestions`: This function generates a prompt for creating a list of questions and answers about a code file. It takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `targetAudience`. The function returns a markdown formatted string that includes the file's content and a custom prompt for the documentation expert to provide questions and answers.\n\nExample usage:\n```javascript\nconst prompt = createCodeQuestions('path/to/file.js', 'MyProject', 'const x = 10;', 'JavaScript', 'beginner');\n```\n\n3. `folderSummaryPrompt`: This function generates a prompt for creating a summary of a folder containing code files and subfolders. It takes six parameters: `folderPath`, `projectName`, `files`, `folders`, `contentType`, and `folderPrompt`. The `files` parameter is an array of `FileSummary` objects, and the `folders` parameter is an array of `FolderSummary` objects. The function returns a markdown formatted string that includes a list of files and folders with their summaries and a custom prompt for the documentation expert.\n\nExample usage:\n```javascript\nconst prompt = folderSummaryPrompt('path/to/folder', 'MyProject', fileSummaries, folderSummaries, 'JavaScript', 'Write a detailed technical explanation of this folder structure.');\n```\n\nThese functions can be used in the larger project to generate documentation tasks for experts, ensuring consistent formatting and instructions across different parts of the project.", + "questions": "1. **What is the purpose of the `createCodeFileSummary` function?**\n\n The `createCodeFileSummary` function generates a string template for a code file summary prompt, which includes the file path, project name, file contents, content type, and a file prompt.\n\n2. **How does the `createCodeQuestions` function differ from the `createCodeFileSummary` function?**\n\n The `createCodeQuestions` function generates a string template for a code documentation prompt that asks for 3 questions and their answers, while the `createCodeFileSummary` function generates a string template for a code file summary prompt.\n\n3. **What is the role of the `folderSummaryPrompt` function?**\n\n The `folderSummaryPrompt` function generates a string template for a folder summary prompt, which includes the folder path, project name, lists of files and folders with their summaries, content type, and a folder prompt.", + "checksum": "e44b82bf4912be69149685a997b6bde3" } ], "folders": [], - "summary": "The code in this folder is responsible for processing a given code repository, generating documentation in JSON and Markdown formats, and creating vector files for the documentation. It provides several functions and utilities to achieve these tasks, such as traversing the file system, calling language models, and converting JSON files to Markdown.\n\nFor example, the `processRepository` function processes a code repository and generates summaries and questions for each file and folder within the repository. It uses helper functions like `callLLM` to make API calls to language models and `processFile` and `processFolder` to process individual files and folders. The results are saved as JSON files in the output directory.\n\nThe `convertJsonToMarkdown` function converts JSON files containing documentation information into Markdown files. It counts the number of files in the project and creates Markdown files for each code file in the project using the `traverseFileSystem` utility.\n\nThe `createVectorStore` function processes a directory of text files, splits the text into chunks, and creates a vector store using the HNSWLib library and OpenAIEmbeddings. It processes the files in the directory and calls `processFile` for each file, creating a vector store and saving it to the output file path.\n\nHere's an example of how this code might be used in the larger project:\n\n```javascript\nimport autodoc from './autodoc';\n\nconst config = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n root: './src',\n output: './output',\n llms: 'https://llms.example.com',\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'text',\n targetAudience: 'developers',\n linkHosted: 'https://myproject-docs.example.com',\n};\n\nautodoc.index(config);\n```\n\nThis example would process the `MyProject` repository, generate JSON and Markdown documentation, and create vector files for the documentation, all while providing progress updates through spinner text.\n\nIn summary, the code in this folder plays a crucial role in the Autodoc project by processing code repositories, generating documentation in various formats, and creating vector files for the documentation. This helps developers to easily generate and maintain documentation for their projects, making it more accessible and understandable for other developers and users.", - "questions": "" + "summary": "The code in this folder is responsible for processing a given repository and generating documentation in JSON, Markdown, and vector formats. It consists of several functions and utilities that work together to automate the documentation generation process.\n\nThe main function, `index`, takes an `AutodocRepoConfig` object as input, which contains various configuration options for processing the repository. It performs three main tasks:\n\n1. **Process the repository**: It calls the `processRepository` function to traverse the repository, generate summaries and questions for code files and folders using the LLMS (Language Learning Management System), and create JSON files with the results. These JSON files are stored in the `output/docs/json/` directory.\n\n2. **Create Markdown files**: It uses the `convertJsonToMarkdown` function to convert the generated JSON files into Markdown files. These Markdown files are stored in the `output/docs/markdown/` directory.\n\n3. **Create vector files**: It calls the `createVectorStore` function to create vector files from the generated Markdown files. These vector files are stored in the `output/docs/data/` directory.\n\nThroughout the execution of these tasks, the code provides visual feedback on the progress of the tasks using `updateSpinnerText` and `spinnerSuccess` functions.\n\nHere's an example of how this code might be used:\n\n```javascript\nindex({\n name: \"myProject\",\n root: \"./input\",\n output: \"./output\",\n filePrompt: true,\n folderPrompt: true,\n contentType: \"code\",\n targetAudience: \"developers\",\n linkHosted: \"https://github.com/user/myProject\",\n});\n```\n\nThis will process the repository located at `./input`, generate documentation in JSON, Markdown, and vector formats, and save the results in the `./output` directory.\n\nThe `prompts.ts` file contains utility functions that generate prompts for documentation experts. These functions create markdown formatted strings with specific instructions for the documentation expert, ensuring consistent formatting and instructions across different parts of the project.\n\nIn summary, the code in this folder automates the process of generating documentation for a given repository based on the provided configuration options. The generated documentation can be used for various purposes, such as displaying it on a website or analyzing the content for specific insights.", + "questions": "", + "checksum": "376f96417f8cbea6a5ab2463268fe4af" }, { "folderName": "init", - "folderPath": ".autodoc/docs/json/src/cli/commands/init", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/init", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\commands\\init", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands\\init", "files": [ { "fileName": "index.ts", - "filePath": "src/cli/commands/init/index.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/init/index.ts", - "summary": "This code is responsible for initializing and configuring the `autodoc` project. It provides a function `init` that creates a configuration file `autodoc.config.json` with user inputs and default values. The configuration file is essential for the project to function correctly and adapt to different user requirements.\n\nThe `makeConfigTemplate` function generates a default configuration object with pre-defined values. It takes an optional `config` parameter to override the default values. The returned object contains settings such as repository name, URL, output directory, LLM models, and various prompts for generating documentation.\n\nThe `init` function is an asynchronous function that takes an optional `config` parameter. It first checks if a configuration file already exists in the project directory. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf there is no existing configuration file or the user chooses to overwrite, the function prompts the user for the repository name, URL, and LLM models they have access to. These values are then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration object is written to the `autodoc.config.json` file in the project directory. A success message is displayed, instructing the user to run `doc index` to get started.\n\nHere's an example of how the `init` function is used:\n\n```javascript\nimport { init } from './autodoc';\n\n(async () => {\n await init();\n})();\n```\n\nThis code imports the `init` function and calls it, initializing the `autodoc` project with the user's inputs and default values.", - "questions": "1. **Question:** What is the purpose of the `makeConfigTemplate` function and what does it return?\n **Answer:** The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc project. It takes an optional `config` parameter of type `AutodocRepoConfig` and returns a new `AutodocRepoConfig` object with default values for each property, using the provided `config` values if available.\n\n2. **Question:** How does the `init` function work and what does it do with the user's input?\n **Answer:** The `init` function is an asynchronous function that initializes the Autodoc configuration by prompting the user for input using the `inquirer` package. It takes an optional `config` parameter of type `AutodocRepoConfig` and uses it as the default values for the prompts. After collecting the user's input, it creates a new configuration object using the `makeConfigTemplate` function and writes it to a file named `autodoc.config.json`.\n\n3. **Question:** What are the different LLM models available in the `llms` prompt and how are they used in the configuration?\n **Answer:** The `llms` prompt provides three choices for the user to select the LLM models they have access to: GPT-3.5 Turbo, GPT-3.5 Turbo and GPT-4 8K (Early Access), and GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The selected LLM models are stored in the `llms` property of the `AutodocRepoConfig` object, which can be used later in the project to determine which models to use for generating documentation." + "filePath": "src\\cli\\commands\\init\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\init\\index.ts", + "summary": "This code is responsible for initializing the configuration of the Autodoc project. It provides a template for the configuration and prompts the user to input necessary information to set up the project. The main functionality is exposed through the `init` function, which is an asynchronous function that takes an optional `AutodocRepoConfig` object as an argument.\n\nThe `makeConfigTemplate` function creates a default configuration object with pre-defined values for various properties. It takes an optional `config` parameter and returns a new `AutodocRepoConfig` object with the provided values or default values if not provided.\n\nThe `init` function first checks if an `autodoc.config.json` file already exists in the project root. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nNext, the user is prompted to enter the name of their repository, the GitHub URL of their repository, and the LLMs they have access to. The LLMs are language models used for generating documentation. The user can choose between GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access).\n\nAfter the user provides the necessary information, a new configuration object is created using the `makeConfigTemplate` function with the user's input. The new configuration is then written to the `autodoc.config.json` file in the project root.\n\nFinally, a success message is displayed, instructing the user to run `doc index` to get started with the Autodoc project.\n\nExample usage:\n\n```javascript\nimport { init } from './path/to/this/file';\n\n// Initialize the configuration with default values\nawait init();\n\n// Initialize the configuration with custom values\nawait init({\n name: 'My Custom Repository',\n repositoryUrl: 'https://github.com/user/repo',\n});\n```", + "questions": "1. **What is the purpose of the `makeConfigTemplate` function?**\n\n The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc project. It takes an optional `config` parameter of type `AutodocRepoConfig` and returns a new configuration object with default values for various properties.\n\n2. **How does the `init` function work and when is it called?**\n\n The `init` function is an asynchronous function that initializes the Autodoc configuration by creating an `autodoc.config.json` file in the specified location. It takes an optional `config` parameter of type `AutodocRepoConfig` and prompts the user for input to set the configuration values. It is called when the user wants to set up the Autodoc configuration for their project.\n\n3. **What is the purpose of the `inquirer.prompt` calls in the `init` function?**\n\n The `inquirer.prompt` calls are used to interactively prompt the user for input to set the configuration values for the Autodoc project. The user is asked for the repository name, repository URL, and the LLMs they have access to. The input is then used to create a new configuration object and write it to the `autodoc.config.json` file.", + "checksum": "b93831ff1f4023ab61c3bea963a8a112" } ], "folders": [], - "summary": "The `index.ts` file in the `init` folder is responsible for initializing and configuring the `autodoc` project. It provides an essential function called `init` that creates a configuration file named `autodoc.config.json` with user inputs and default values. This configuration file is crucial for the project to function correctly and adapt to different user requirements.\n\nThe `makeConfigTemplate` function generates a default configuration object with pre-defined values. It takes an optional `config` parameter to override the default values. The returned object contains settings such as repository name, URL, output directory, LLM models, and various prompts for generating documentation.\n\nThe `init` function is an asynchronous function that takes an optional `config` parameter. It first checks if a configuration file already exists in the project directory. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf there is no existing configuration file or the user chooses to overwrite, the function prompts the user for the repository name, URL, and LLM models they have access to. These values are then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration object is written to the `autodoc.config.json` file in the project directory. A success message is displayed, instructing the user to run `doc index` to get started.\n\nHere's an example of how the `init` function is used:\n\n```javascript\nimport { init } from './autodoc';\n\n(async () => {\n await init();\n})();\n```\n\nThis code imports the `init` function and calls it, initializing the `autodoc` project with the user's inputs and default values. The `init` function is a crucial part of the project, as it sets up the necessary configuration for the project to work correctly. It interacts with other parts of the project by providing the required settings and values, ensuring that the project can adapt to different user requirements and preferences.", - "questions": "" + "summary": "The `index.ts` file in the `.autodoc\\docs\\json\\src\\cli\\commands\\init` folder is responsible for initializing the configuration of the Autodoc project. It provides a template for the configuration and prompts the user to input necessary information to set up the project. The main functionality is exposed through the `init` function, which is an asynchronous function that takes an optional `AutodocRepoConfig` object as an argument.\n\nThe `makeConfigTemplate` function creates a default configuration object with pre-defined values for various properties. It takes an optional `config` parameter and returns a new `AutodocRepoConfig` object with the provided values or default values if not provided.\n\nThe `init` function first checks if an `autodoc.config.json` file already exists in the project root. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nNext, the user is prompted to enter the name of their repository, the GitHub URL of their repository, and the LLMs they have access to. The LLMs are language models used for generating documentation. The user can choose between GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access).\n\nAfter the user provides the necessary information, a new configuration object is created using the `makeConfigTemplate` function with the user's input. The new configuration is then written to the `autodoc.config.json` file in the project root.\n\nFinally, a success message is displayed, instructing the user to run `doc index` to get started with the Autodoc project.\n\nExample usage:\n\n```javascript\nimport { init } from './path/to/this/file';\n\n// Initialize the configuration with default values\nawait init();\n\n// Initialize the configuration with custom values\nawait init({\n name: 'My Custom Repository',\n repositoryUrl: 'https://github.com/user/repo',\n});\n```\n\nThis code is essential for setting up the Autodoc project, as it creates the necessary configuration file and gathers user input to customize the project. It works in conjunction with other parts of the project, such as the CLI and the documentation generation process, which rely on the configuration file to function correctly.", + "questions": "", + "checksum": "4b8fd2b2abaec4959873fc3396c414d8" }, { "folderName": "query", - "folderPath": ".autodoc/docs/json/src/cli/commands/query", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/query", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\commands\\query", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands\\query", "files": [ { "fileName": "createChatChain.ts", - "filePath": "src/cli/commands/query/createChatChain.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/query/createChatChain.ts", - "summary": "This code defines a function `makeChain` that creates a chatbot for answering questions about a software project. The chatbot is built using the `ChatVectorDBQAChain` class, which combines two separate language models: a question generator and a document chain.\n\nThe question generator is an instance of the `LLMChain` class, which uses the OpenAIChat API to generate standalone questions based on a given conversation history. The `CONDENSE_PROMPT` template is used to format the input for the language model.\n\nThe document chain is created using the `loadQAChain` function, which takes an instance of the OpenAIChat API and a prompt template as input. The `makeQAPrompt` function generates this template, which instructs the language model to provide a conversational answer with hyperlinks to the project's GitHub repository. The answer should be tailored to the target audience and include code examples when appropriate.\n\nThe `makeChain` function takes the following parameters:\n\n- `projectName`: The name of the software project.\n- `repositoryUrl`: The URL of the project's GitHub repository.\n- `contentType`: The type of content the chatbot is trained on (e.g., code, documentation).\n- `chatPrompt`: Additional instructions for answering questions about the content.\n- `targetAudience`: The intended audience for the chatbot's answers (e.g., developers, users).\n- `vectorstore`: An instance of the `HNSWLib` class for storing and searching vectors.\n- `llms`: An array of language models (e.g., GPT-3, GPT-4).\n- `onTokenStream`: An optional callback function to handle streaming tokens.\n\nExample usage:\n\n```javascript\nconst chatbot = makeChain(\n \"autodoc\",\n \"https://github.com/autodoc/autodoc\",\n \"code\",\n \"\",\n \"developer\",\n vectorstore,\n [gpt3, gpt4],\n (token) => console.log(token)\n);\n```\n\nThis creates a chatbot that can answer questions about the \"autodoc\" project, using the provided language models and vector store.", - "questions": "1. **Question:** What is the purpose of the `makeChain` function and what are its input parameters?\n **Answer:** The `makeChain` function is used to create a new `ChatVectorDBQAChain` instance, which is responsible for generating questions and answers based on the given input parameters. The input parameters include `projectName`, `repositoryUrl`, `contentType`, `chatPrompt`, `targetAudience`, `vectorstore`, `llms`, and an optional `onTokenStream` callback function.\n\n2. **Question:** What are the roles of `CONDENSE_PROMPT` and `QA_PROMPT` in the code?\n **Answer:** `CONDENSE_PROMPT` is a template for generating a standalone question from a given chat history and follow-up input. `QA_PROMPT` is a template for generating a conversational answer with hyperlinks back to GitHub, based on the given context and question. Both templates are used in the `LLMChain` and `loadQAChain` instances, respectively.\n\n3. **Question:** How does the `onTokenStream` callback function work and when is it used?\n **Answer:** The `onTokenStream` callback function is an optional parameter in the `makeChain` function. It is used to handle the streaming of tokens generated by the OpenAIChat instance. If provided, it will be called with each new token generated during the chat process, allowing developers to handle or process the tokens in real-time." + "filePath": "src\\cli\\commands\\query\\createChatChain.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\query\\createChatChain.ts", + "summary": "This code defines a function `makeChain` that creates a chatbot for answering questions about a software project called `projectName`. The chatbot is trained on the content of the project, which is located at `repositoryUrl`. The content type of the project is specified by the `contentType` parameter. The chatbot is designed to provide conversational answers with hyperlinks back to GitHub, including code examples and links to the examples where appropriate. The target audience for the chatbot is specified by the `targetAudience` parameter.\n\nThe `makeChain` function takes several parameters:\n\n- `projectName`: The name of the software project.\n- `repositoryUrl`: The URL of the project's repository.\n- `contentType`: The type of content the chatbot is trained on.\n- `chatPrompt`: Additional instructions for answering questions about the content type.\n- `targetAudience`: The intended audience for the chatbot's answers.\n- `vectorstore`: An instance of HNSWLib for efficient nearest neighbor search.\n- `llms`: An array of LLMModels, which are language models used for generating answers.\n- `onTokenStream`: An optional callback function that is called when a new token is generated by the language model.\n\nThe `makeChain` function first creates a question generator using the `LLMChain` class. This generator is responsible for rephrasing follow-up questions to be standalone questions. It uses the `CONDENSE_PROMPT` template, which is defined at the beginning of the code.\n\nNext, the function creates a `QA_PROMPT` template using the `makeQAPrompt` function. This template is used to generate answers to the questions in a conversational manner, with hyperlinks back to GitHub and code examples where appropriate.\n\nFinally, the function creates and returns a new instance of the `ChatVectorDBQAChain` class, which combines the question generator and the document chain to create a chatbot that can answer questions about the software project. The chatbot uses the `vectorstore` for efficient nearest neighbor search and the `llms` language models for generating answers. If the `onTokenStream` callback is provided, it will be called when a new token is generated by the language model.", + "questions": "1. **Question:** What is the purpose of the `makeChain` function and what are its input parameters?\n\n **Answer:** The `makeChain` function is used to create a `ChatVectorDBQAChain` instance, which is responsible for generating questions and answers based on the given input parameters. The input parameters include `projectName`, `repositoryUrl`, `contentType`, `chatPrompt`, `targetAudience`, `vectorstore`, `llms`, and an optional `onTokenStream` function.\n\n2. **Question:** What are the roles of `CONDENSE_PROMPT` and `QA_PROMPT` in this code?\n\n **Answer:** `CONDENSE_PROMPT` is a template for generating standalone questions from a given chat history and follow-up question. `QA_PROMPT` is a template for generating conversational answers with hyperlinks to GitHub, based on the provided context and question. Both templates are used in the `LLMChain` and `loadQAChain` instances, respectively.\n\n3. **Question:** How does the `onTokenStream` function work and when is it used?\n\n **Answer:** The `onTokenStream` function is an optional callback that can be provided to the `makeChain` function. It is used to handle the streaming of tokens generated by the OpenAIChat instance. If provided, it will be called with each new token generated during the chat process.", + "checksum": "6869048a06de62499933b14c37cddc1d" }, { "fileName": "index.ts", - "filePath": "src/cli/commands/query/index.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/query/index.ts", - "summary": "This code defines a chatbot interface for the Autodoc project, which allows users to ask questions related to a specific codebase and receive answers in a conversational manner. The chatbot uses a language model to generate responses based on the user's input and the codebase documentation.\n\nThe code starts by importing necessary libraries and setting up the `marked` library with a custom terminal renderer for displaying Markdown content. It then defines a `chatHistory` array to store the conversation history between the user and the chatbot.\n\nThe `displayWelcomeMessage` function is used to display a welcome message to the user when they start the chatbot. The `clearScreenAndMoveCursorToTop` function clears the terminal screen and moves the cursor to the top.\n\nThe main function, `query`, takes two arguments: `AutodocRepoConfig` and `AutodocUserConfig`. It initializes the `vectorStore` by loading pre-trained embeddings and creates a `chain` object using the `makeChain` function. This chain object is responsible for generating responses based on the user's input.\n\nThe `getQuestion` function uses the `inquirer` library to prompt the user for a question. The main loop of the chatbot starts by getting the user's question and continues until the user types 'exit'. Inside the loop, the code updates the spinner text to 'Thinking...' and calls the `chain` object with the user's question and chat history. The response is then displayed in Markdown format using the `marked` library.\n\nIf an error occurs during the process, the chatbot displays an error message and prompts the user for another question.\n\nExample usage:\n\n```javascript\nquery(repoConfig, userConfig);\n```\n\nThis chatbot interface can be used in the larger Autodoc project to help users navigate and understand the codebase more efficiently by providing a conversational interface for asking questions and receiving answers.", - "questions": "1. **What is the purpose of the `query` function and what are its input parameters?**\n\n The `query` function is used to interact with the chatbot, taking user input and providing responses based on the given codebase. It takes two input parameters: an `AutodocRepoConfig` object containing information about the repository, and an `AutodocUserConfig` object containing user-specific configuration.\n\n2. **How does the `vectorStore` work and what is its role in the code?**\n\n The `vectorStore` is an instance of HNSWLib loaded with data from the specified output directory and using OpenAIEmbeddings. It is used to store and retrieve vector representations of the codebase, which are then used by the `makeChain` function to generate responses to user questions.\n\n3. **How does the chat history work and what is its purpose?**\n\n The `chatHistory` is an array of string pairs, where each pair represents a user question and the corresponding chatbot response. It is used to store the conversation history between the user and the chatbot, allowing the chatbot to provide context-aware responses based on previous interactions." + "filePath": "src\\cli\\commands\\query\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\query\\index.ts", + "summary": "This code defines a chatbot interface for the Autodoc project, which allows users to ask questions related to a specific codebase and receive answers in a conversational manner. The chatbot uses a combination of the `inquirer` library for user input, `marked` and `marked-terminal` for rendering Markdown output, and the `langchain` library for handling natural language processing tasks.\n\nThe `query` function is the main entry point for the chatbot. It takes two arguments: an `AutodocRepoConfig` object containing information about the code repository, and an `AutodocUserConfig` object containing user-specific settings. The function initializes a vector store using the `HNSWLib` and `OpenAIEmbeddings` classes, and creates a chat chain using the `makeChain` function.\n\nThe chatbot interface is displayed using the `displayWelcomeMessage` function, which prints a welcome message to the console. The `getQuestion` function is used to prompt the user for a question using the `inquirer` library. The chatbot then enters a loop, where it processes the user's question, generates a response using the chat chain, and displays the response as Markdown in the terminal.\n\nIf an error occurs during the processing of a question, the chatbot will display an error message and continue to prompt the user for a new question. The loop continues until the user types 'exit', at which point the chatbot terminates.\n\nHere's an example of how the `query` function might be used:\n\n```javascript\nimport { query } from './autodoc';\n\nconst repoConfig = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n output: 'path/to/output',\n contentType: 'code',\n chatPrompt: 'Ask me anything about MyProject',\n targetAudience: 'developers',\n};\n\nconst userConfig = {\n llms: 'path/to/llms',\n};\n\nquery(repoConfig, userConfig);\n```\n\nThis example would initialize the chatbot with the specified repository and user configurations, and start the chatbot interface for the user to ask questions about the \"MyProject\" codebase.", + "questions": "1. **What is the purpose of the `query` function in this code?**\n\n The `query` function is responsible for handling user interactions with the chatbot. It takes in an AutodocRepoConfig object and an AutodocUserConfig object, sets up the necessary data structures, and then enters a loop where it prompts the user for questions, processes them, and displays the results.\n\n2. **How does the code handle rendering Markdown text in the terminal?**\n\n The code uses the `marked` library along with a custom `TerminalRenderer` to render Markdown text in the terminal. The `marked` library is configured with the custom renderer using `marked.setOptions({ renderer: new TerminalRenderer() });`.\n\n3. **What is the purpose of the `chatHistory` variable and how is it used?**\n\n The `chatHistory` variable is an array that stores the history of questions and answers in the chat session. It is used to keep track of the conversation between the user and the chatbot. When a new question is asked, the chat history is passed to the `chain.call()` function, and the new question and its corresponding answer are added to the `chatHistory` array.", + "checksum": "19807a33957666422f31136970c37245" } ], "folders": [], - "summary": "The `query` folder in the Autodoc project contains code for creating a chatbot interface that allows users to ask questions related to a specific codebase and receive answers in a conversational manner. The chatbot uses a language model to generate responses based on the user's input and the codebase documentation.\n\nIn `createChatChain.ts`, the `makeChain` function is defined, which creates a chatbot using the `ChatVectorDBQAChain` class. This class combines two separate language models: a question generator and a document chain. The question generator is an instance of the `LLMChain` class, which uses the OpenAIChat API to generate standalone questions based on a given conversation history. The document chain is created using the `loadQAChain` function, which takes an instance of the OpenAIChat API and a prompt template as input.\n\nExample usage of `makeChain`:\n\n```javascript\nconst chatbot = makeChain(\n \"autodoc\",\n \"https://github.com/autodoc/autodoc\",\n \"code\",\n \"\",\n \"developer\",\n vectorstore,\n [gpt3, gpt4],\n (token) => console.log(token)\n);\n```\n\nIn `index.ts`, the main chatbot interface is defined. It starts by importing necessary libraries and setting up the `marked` library with a custom terminal renderer for displaying Markdown content. The main function, `query`, takes two arguments: `AutodocRepoConfig` and `AutodocUserConfig`. It initializes the `vectorStore` by loading pre-trained embeddings and creates a `chain` object using the `makeChain` function. This chain object is responsible for generating responses based on the user's input.\n\nThe main loop of the chatbot starts by getting the user's question and continues until the user types 'exit'. Inside the loop, the code updates the spinner text to 'Thinking...' and calls the `chain` object with the user's question and chat history. The response is then displayed in Markdown format using the `marked` library.\n\nExample usage of the chatbot interface:\n\n```javascript\nquery(repoConfig, userConfig);\n```\n\nThis chatbot interface can be used in the larger Autodoc project to help users navigate and understand the codebase more efficiently by providing a conversational interface for asking questions and receiving answers.", - "questions": "" + "summary": "The `query` folder in the Autodoc project contains code for creating a chatbot that can answer questions about a specific software project in a conversational manner. The chatbot is trained on the content of the project and provides answers with hyperlinks back to GitHub, including code examples and links to the examples where appropriate.\n\nThe main entry point for the chatbot is the `query` function in `index.ts`. It takes two arguments: an `AutodocRepoConfig` object containing information about the code repository, and an `AutodocUserConfig` object containing user-specific settings. The function initializes a vector store and creates a chat chain using the `makeChain` function from `createChatChain.ts`.\n\nHere's an example of how the `query` function might be used:\n\n```javascript\nimport { query } from './autodoc';\n\nconst repoConfig = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n output: 'path/to/output',\n contentType: 'code',\n chatPrompt: 'Ask me anything about MyProject',\n targetAudience: 'developers',\n};\n\nconst userConfig = {\n llms: 'path/to/llms',\n};\n\nquery(repoConfig, userConfig);\n```\n\nThis example initializes the chatbot with the specified repository and user configurations and starts the chatbot interface for the user to ask questions about the \"MyProject\" codebase.\n\nThe `createChatChain.ts` file defines the `makeChain` function, which creates a chatbot for answering questions about a software project. The chatbot is designed to provide conversational answers with hyperlinks back to GitHub, including code examples and links to the examples where appropriate. The target audience for the chatbot is specified by the `targetAudience` parameter.\n\nThe `makeChain` function takes several parameters, such as `projectName`, `repositoryUrl`, `contentType`, `chatPrompt`, `targetAudience`, `vectorstore`, `llms`, and `onTokenStream`. It first creates a question generator using the `LLMChain` class, then creates a `QA_PROMPT` template using the `makeQAPrompt` function, and finally creates and returns a new instance of the `ChatVectorDBQAChain` class, which combines the question generator and the document chain to create a chatbot that can answer questions about the software project.\n\nIn summary, the code in the `query` folder is responsible for creating a chatbot that can answer questions about a specific software project in a conversational manner. The chatbot uses a combination of natural language processing techniques and efficient nearest neighbor search to generate accurate and relevant answers for the user.", + "questions": "", + "checksum": "9e0d0f111bf588e2df66862dce9db288" }, { "folderName": "user", - "folderPath": ".autodoc/docs/json/src/cli/commands/user", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/user", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\commands\\user", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\commands\\user", "files": [ { "fileName": "index.ts", - "filePath": "src/cli/commands/user/index.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/commands/user/index.ts", - "summary": "This code is responsible for managing the user configuration for the Autodoc project. It provides a way to create, update, and save the user configuration file, which stores information about the user's access to different Language Learning Models (LLMs) such as GPT-3.5 Turbo, GPT-4 8K, and GPT-4 32K.\n\nThe `makeConfigTemplate` function is used to create a default configuration object with the specified LLMs or default to GPT-3.5 Turbo if none are provided. This function is used to generate the initial configuration object for the user.\n\nThe `user` function is an asynchronous function that handles the user configuration process. It first checks if a user configuration file already exists. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf the user decides to continue or if no configuration file exists, the function proceeds to create the necessary directories for the configuration file. It then prompts the user to select the LLMs they have access to using the `inquirer` library. The user can choose from three options:\n\n1. GPT-3.5 Turbo\n2. GPT-3.5 Turbo, GPT-4 8K (Early Access)\n3. GPT-3.5 Turbo, GPT-4 8K (Early Access), GPT-4 32K (Early Access)\n\nAfter the user makes their selection, the new configuration object is created using the `makeConfigTemplate` function with the selected LLMs. The configuration object is then saved to the user configuration file in JSON format.\n\nFinally, the user is informed that the configuration has been saved and they can start querying by running the `doc q` command.", - "questions": "1. **Question:** What is the purpose of the `makeConfigTemplate` function and what does it return?\n **Answer:** The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc user. It takes an optional `config` parameter of type `AutodocUserConfig` and returns a new configuration object with the `llms` property set to the provided value or a default value of `[LLMModels.GPT3]`.\n\n2. **Question:** How does the `user` function handle existing user configuration files?\n **Answer:** The `user` function checks if a user configuration file already exists at the `userConfigFilePath`. If it does, the function prompts the user with a confirmation message to overwrite the existing configuration. If the user chooses not to overwrite, the process exits; otherwise, the function proceeds to create a new configuration.\n\n3. **Question:** What are the available choices for the LLMs in the `user` function, and how are they used to create the new configuration?\n **Answer:** The available choices for LLMs are GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user can select one of these options, and the corresponding LLM models will be set as the value of the `llms` property in the new configuration object." + "filePath": "src\\cli\\commands\\user\\index.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\commands\\user\\index.ts", + "summary": "This code is responsible for managing the user configuration for the Autodoc project. It provides a way to create, update, and save the user configuration file, which stores information about the user's access to different Language Learning Models (LLMs) such as GPT-3, GPT-4, and GPT-4 32K.\n\nThe `makeConfigTemplate` function is used to create a default configuration object with the provided `config` parameter or with GPT-3 as the default LLM. This function is used to generate a new configuration object when needed.\n\nThe main function, `user`, is an asynchronous function that takes an optional `config` parameter. It first checks if a user configuration file already exists at the `userConfigFilePath`. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf the user configuration file does not exist, the code attempts to create the necessary directories for the file. If there's an error during this process, it logs the error and exits with a non-zero status code.\n\nNext, the user is prompted to select which LLMs they have access to. The available options are GPT-3.5 Turbo, GPT-3.5 Turbo with GPT-4 8K (Early Access), and GPT-3.5 Turbo with GPT-4 8K and GPT-4 32K (Early Access). The user's selection is then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration object is written to the user configuration file in JSON format. A success message is displayed to the user, indicating that the configuration has been saved and they can start querying using the `doc q` command.\n\nExample usage:\n\n```javascript\nimport { user } from './path/to/this/file';\n\n// Create a new user configuration with default settings\nawait user();\n\n// Update the user configuration with a custom config object\nawait user({ llms: [LLMModels.GPT3, LLMModels.GPT4] });\n```", + "questions": "1. **What is the purpose of the `makeConfigTemplate` function?**\n\n The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc user. It takes an optional `config` parameter and returns an object with a `llms` property, which is an array of LLM models.\n\n2. **How does the `user` function handle existing user configuration files?**\n\n The `user` function checks if a user configuration file already exists using `fsSync.existsSync`. If it does, the user is prompted with a confirmation message to overwrite the existing configuration. If the user chooses not to overwrite, the process exits with a status code of 0.\n\n3. **What are the available choices for LLM models in the `user` function?**\n\n The available choices for LLM models are GPT-3.5 Turbo, GPT-3.5 Turbo and GPT-4 8K (Early Access), and GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user can select one of these options, and the selected value is stored in the `llms` property of the new configuration object.", + "checksum": "76bc1e6d5d61e24907832c4cac443225" } ], "folders": [], - "summary": "The `index.ts` file in the `user` folder is responsible for managing the user configuration for the Autodoc project. It provides a way to create, update, and save the user configuration file, which stores information about the user's access to different Language Learning Models (LLMs) such as GPT-3.5 Turbo, GPT-4 8K, and GPT-4 32K.\n\nThe `makeConfigTemplate` function is used to create a default configuration object with the specified LLMs or default to GPT-3.5 Turbo if none are provided. This function is used to generate the initial configuration object for the user.\n\n```typescript\nfunction makeConfigTemplate(llms: string[]): ConfigTemplate {\n // ...\n}\n```\n\nThe `user` function is an asynchronous function that handles the user configuration process. It first checks if a user configuration file already exists. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\n```typescript\nasync function user(): Promise {\n // ...\n}\n```\n\nIf the user decides to continue or if no configuration file exists, the function proceeds to create the necessary directories for the configuration file. It then prompts the user to select the LLMs they have access to using the `inquirer` library. The user can choose from three options:\n\n1. GPT-3.5 Turbo\n2. GPT-3.5 Turbo, GPT-4 8K (Early Access)\n3. GPT-3.5 Turbo, GPT-4 8K (Early Access), GPT-4 32K (Early Access)\n\nAfter the user makes their selection, the new configuration object is created using the `makeConfigTemplate` function with the selected LLMs. The configuration object is then saved to the user configuration file in JSON format.\n\n```typescript\nconst configTemplate = makeConfigTemplate(selectedLLMs);\nawait fs.promises.writeFile(configPath, JSON.stringify(configTemplate, null, 2));\n```\n\nFinally, the user is informed that the configuration has been saved and they can start querying by running the `doc q` command.\n\nThis code is essential for setting up the user's environment and preferences for the Autodoc project. It ensures that the user has the correct configuration file in place, which is necessary for the proper functioning of the project. The user configuration file is used by other parts of the project to determine which LLMs the user has access to and can query.\n\nFor example, when a user runs the `doc q` command, the project will read the user configuration file to determine which LLMs are available for querying. This ensures that the user only queries the LLMs they have access to, preventing any unauthorized access or usage.\n\nIn summary, the `index.ts` file in the `user` folder is responsible for managing the user configuration for the Autodoc project, ensuring that the user has the correct configuration file in place, and allowing the user to select the LLMs they have access to. This is essential for the proper functioning of the project and for maintaining the user's preferences and access to different LLMs.", - "questions": "" + "summary": "The `index.ts` file in the `user` folder is responsible for managing the user configuration for the Autodoc project. It allows users to create, update, and save their configuration file, which stores information about their access to different Language Learning Models (LLMs) such as GPT-3, GPT-4, and GPT-4 32K.\n\nThe `makeConfigTemplate` function creates a default configuration object with either the provided `config` parameter or GPT-3 as the default LLM. This function is useful for generating a new configuration object when needed.\n\nThe main function, `user`, is an asynchronous function that takes an optional `config` parameter. It first checks if a user configuration file already exists at the `userConfigFilePath`. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\nIf the user configuration file does not exist, the code attempts to create the necessary directories for the file. If there's an error during this process, it logs the error and exits with a non-zero status code.\n\nNext, the user is prompted to select which LLMs they have access to. The available options are GPT-3.5 Turbo, GPT-3.5 Turbo with GPT-4 8K (Early Access), and GPT-3.5 Turbo with GPT-4 8K and GPT-4 32K (Early Access). The user's selection is then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration object is written to the user configuration file in JSON format. A success message is displayed to the user, indicating that the configuration has been saved and they can start querying using the `doc q` command.\n\nThis code is essential for the Autodoc project as it allows users to manage their access to different LLMs and store their preferences in a configuration file. This configuration file can then be used by other parts of the project to determine which LLMs the user has access to and tailor the querying process accordingly.\n\nExample usage:\n\n```javascript\nimport { user } from './path/to/this/file';\n\n// Create a new user configuration with default settings\nawait user();\n\n// Update the user configuration with a custom config object\nawait user({ llms: [LLMModels.GPT3, LLMModels.GPT4] });\n```\n\nIn summary, the `index.ts` file in the `user` folder is a crucial part of the Autodoc project, allowing users to manage their LLM access and preferences. This configuration is then used by other parts of the project to provide a tailored experience based on the user's access to different LLMs.", + "questions": "", + "checksum": "4b8fd2b2abaec4959873fc3396c414d8" } ], - "summary": "The code in the `src/cli/commands` folder is responsible for handling various command-line tasks in the Autodoc project. It contains several subfolders, each dedicated to a specific command or functionality, such as estimating costs, processing repositories, initializing the project, querying the chatbot, and managing user configurations.\n\nFor instance, the `estimate` subfolder contains a function that allows users to estimate the cost of indexing a given repository before actually processing it. This function takes an `AutodocRepoConfig` object as input and performs a dry run of the `processRepository` function. It then calculates the total estimated cost and displays it to the user. This helps users make informed decisions about whether to proceed with the indexing process or not.\n\n```javascript\nimport { estimate } from './autodoc/estimate';\n\nconst config = {\n // ...configuration options...\n};\n\nestimate(config);\n```\n\nThe `index` subfolder contains code for processing a given code repository, generating documentation in JSON and Markdown formats, and creating vector files for the documentation. It provides several functions and utilities to achieve these tasks, such as traversing the file system, calling language models, and converting JSON files to Markdown.\n\n```javascript\nimport autodoc from './autodoc';\n\nconst config = {\n // ...configuration options...\n};\n\nautodoc.index(config);\n```\n\nThe `init` subfolder is responsible for initializing and configuring the `autodoc` project. It provides an essential function called `init` that creates a configuration file named `autodoc.config.json` with user inputs and default values.\n\n```javascript\nimport { init } from './autodoc';\n\n(async () => {\n await init();\n})();\n```\n\nThe `query` subfolder contains code for creating a chatbot interface that allows users to ask questions related to a specific codebase and receive answers in a conversational manner. The chatbot uses a language model to generate responses based on the user's input and the codebase documentation.\n\n```javascript\nquery(repoConfig, userConfig);\n```\n\nThe `user` subfolder is responsible for managing the user configuration for the Autodoc project. It provides a way to create, update, and save the user configuration file, which stores information about the user's access to different Language Learning Models (LLMs).\n\n```typescript\nasync function user(): Promise {\n // ...\n}\n```\n\nIn summary, the code in the `src/cli/commands` folder plays a crucial role in the Autodoc project by providing various command-line functionalities, such as estimating costs, processing repositories, initializing the project, querying the chatbot, and managing user configurations. These functionalities help developers to easily generate and maintain documentation for their projects, making it more accessible and understandable for other developers and users.", - "questions": "" + "summary": "The code in the `.autodoc\\docs\\json\\src\\cli\\commands` folder is responsible for various tasks related to the Autodoc project, such as initializing the configuration, processing repositories, generating documentation, and creating a chatbot for answering questions about a specific software project. The folder contains several subfolders, each with a specific purpose.\n\n### estimate\n\nThe `estimate` function provides an estimated cost of processing a given repository. It takes an `AutodocRepoConfig` object as input and performs a dry run of the repository processing to calculate the estimated cost. Example usage:\n\n```javascript\nimport { estimate } from './path/to/this/file';\n\nconst config = {\n name: 'my-repo',\n repositoryUrl: 'https://github.com/user/my-repo.git',\n root: './',\n output: './output',\n llms: ['en'],\n ignore: ['.git', 'node_modules'],\n filePrompt: true,\n folderPrompt: true,\n chatPrompt: true,\n contentType: 'code',\n targetAudience: 'developers',\n linkHosted: true,\n};\n\nestimate(config);\n```\n\n### index\n\nThe code in this folder processes a given repository and generates documentation in JSON, Markdown, and vector formats. It takes an `AutodocRepoConfig` object as input and performs three main tasks: processing the repository, creating Markdown files, and creating vector files. Example usage:\n\n```javascript\nindex({\n name: \"myProject\",\n root: \"./input\",\n output: \"./output\",\n filePrompt: true,\n folderPrompt: true,\n contentType: \"code\",\n targetAudience: \"developers\",\n linkHosted: \"https://github.com/user/myProject\",\n});\n```\n\n### init\n\nThe `init` function initializes the configuration of the Autodoc project. It prompts the user to input necessary information to set up the project and creates the `autodoc.config.json` file in the project root. Example usage:\n\n```javascript\nimport { init } from './path/to/this/file';\n\n// Initialize the configuration with default values\nawait init();\n\n// Initialize the configuration with custom values\nawait init({\n name: 'My Custom Repository',\n repositoryUrl: 'https://github.com/user/repo',\n});\n```\n\n### query\n\nThe `query` folder contains code for creating a chatbot that can answer questions about a specific software project. The main entry point is the `query` function, which takes an `AutodocRepoConfig` object and an `AutodocUserConfig` object as input. Example usage:\n\n```javascript\nimport { query } from './autodoc';\n\nconst repoConfig = {\n name: 'MyProject',\n repositoryUrl: 'https://github.com/user/myproject',\n output: 'path/to/output',\n contentType: 'code',\n chatPrompt: 'Ask me anything about MyProject',\n targetAudience: 'developers',\n};\n\nconst userConfig = {\n llms: 'path/to/llms',\n};\n\nquery(repoConfig, userConfig);\n```\n\n### user\n\nThe `user` folder manages the user configuration for the Autodoc project. It allows users to create, update, and save their configuration file, which stores information about their access to different Language Learning Models (LLMs). Example usage:\n\n```javascript\nimport { user } from './path/to/this/file';\n\n// Create a new user configuration with default settings\nawait user();\n\n// Update the user configuration with a custom config object\nawait user({ llms: [LLMModels.GPT3, LLMModels.GPT4] });\n```\n\nIn summary, the code in this folder is essential for various tasks related to the Autodoc project, such as initializing the configuration, processing repositories, generating documentation, and creating a chatbot for answering questions about a specific software project.", + "questions": "", + "checksum": "d11f941351fb51140313ada9b52bbf1a" }, { "folderName": "utils", - "folderPath": ".autodoc/docs/json/src/cli/utils", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/utils", + "folderPath": ".autodoc\\docs\\json\\src\\cli\\utils", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\cli\\utils", "files": [ { "fileName": "APIRateLimit.ts", - "filePath": "src/cli/utils/APIRateLimit.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/utils/APIRateLimit.ts", - "summary": "The `APIRateLimit` class in this code snippet is designed to manage and limit the number of concurrent API calls made by the application. This is useful in situations where the API being called has a rate limit or when the application needs to control the number of simultaneous requests to avoid overloading the server.\n\nThe class has a constructor that takes an optional `maxConcurrentCalls` parameter, which defaults to 50. This parameter determines the maximum number of API calls that can be made concurrently.\n\nThe main method of this class is `callApi(apiFunction: () => Promise): Promise`. This method takes a function `apiFunction` that returns a promise and wraps it in a rate-limited execution. The method returns a promise that resolves with the result of the API call or rejects with an error if the call fails.\n\nWhen `callApi` is called, it adds the `executeCall` function to the `queue`. The `executeCall` function is responsible for executing the API call, resolving or rejecting the promise, and managing the `inProgress` counter. After adding the `executeCall` function to the queue, the code checks if there are available slots for concurrent calls by comparing `inProgress` with `maxConcurrentCalls`. If there are available slots, it calls the `dequeueAndExecute` method.\n\nThe `dequeueAndExecute` method is responsible for executing the queued API calls while ensuring that the number of concurrent calls does not exceed the `maxConcurrentCalls` limit. It dequeues the next API call from the queue and executes it if there are available slots for concurrent calls.\n\nHere's an example of how this class can be used in the larger project:\n\n```javascript\nconst apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls\n\nasync function fetchData(id) {\n // Simulate an API call\n return new Promise((resolve) => setTimeout(() => resolve(`Data for ${id}`), 1000));\n}\n\nasync function getData(id) {\n return apiRateLimiter.callApi(() => fetchData(id));\n}\n\n// Usage\ngetData(1).then(console.log); // Fetches data for ID 1, rate-limited\n```\n\nIn this example, the `APIRateLimit` class is used to limit the number of concurrent calls to the `fetchData` function, which simulates an API call.", - "questions": "1. **What is the purpose of the `APIRateLimit` class?**\n\n The `APIRateLimit` class is designed to manage and limit the number of concurrent API calls to a specified maximum, preventing the application from overwhelming the API with too many requests at once.\n\n2. **How does the `callApi` method work and what is its return type?**\n\n The `callApi` method takes an `apiFunction` as an argument, which is a function that returns a Promise. It adds the API call to a queue and manages the execution of queued calls based on the available slots for concurrent calls. The method returns a Promise of type `T`, where `T` is the expected return type of the `apiFunction`.\n\n3. **How does the `dequeueAndExecute` method work?**\n\n The `dequeueAndExecute` method is responsible for executing the queued API calls. It checks if there are any calls in the queue and if there are available slots for concurrent calls. If both conditions are met, it dequeues the next call from the queue and executes it. This method is called whenever a new API call is added to the queue or when an in-progress call is completed." + "filePath": "src\\cli\\utils\\APIRateLimit.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\utils\\APIRateLimit.ts", + "summary": "The `APIRateLimit` class in this code snippet is designed to manage and limit the number of concurrent API calls made by the application. This is useful in situations where the API being called has a rate limit or when the application needs to prevent overwhelming the server with too many requests at once.\n\nThe class constructor takes an optional parameter `maxConcurrentCalls`, which defaults to 50, to set the maximum number of concurrent API calls allowed. It maintains a queue of API calls and keeps track of the number of calls in progress.\n\nThe main method of this class is `callApi(apiFunction: () => Promise): Promise`. It takes a function `apiFunction` that returns a promise and wraps it in a new promise. The purpose of this wrapping is to control the execution of the API calls and ensure that they do not exceed the specified rate limit.\n\nWhen `callApi` is called, the provided `apiFunction` is added to the queue and the `dequeueAndExecute` method is triggered if there are available slots for concurrent calls. The `dequeueAndExecute` method checks if there are any API calls in the queue and if the number of in-progress calls is below the maximum limit. If both conditions are met, it dequeues the next API call and executes it.\n\nThe `executeCall` function inside `callApi` is responsible for actually calling the API function, resolving or rejecting the promise based on the result, and updating the number of in-progress calls. Once an API call is completed, the `dequeueAndExecute` method is called again to process any remaining calls in the queue.\n\nHere's an example of how this class can be used in the larger project:\n\n```javascript\nconst apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls\n\nasync function fetchSomeData(id) {\n // Call the API using the rate limiter\n const result = await apiRateLimiter.callApi(() => fetch(`https://api.example.com/data/${id}`));\n return result;\n}\n```\n\nIn this example, the `APIRateLimit` class is used to limit the number of concurrent calls to the `fetch` function, ensuring that no more than 10 calls are made at once.", + "questions": "1. **What is the purpose of the `APIRateLimit` class?**\n\n The `APIRateLimit` class is designed to manage and limit the number of concurrent API calls to a specified maximum, preventing the application from overwhelming the API with too many requests at once.\n\n2. **How does the `callApi` method work and what is its return type?**\n\n The `callApi` method takes an `apiFunction` as an argument, which is a function that returns a Promise. It adds the API call to a queue and executes it when there are available slots for concurrent calls. The method returns a Promise of type `T`, where `T` is the expected return type of the `apiFunction`.\n\n3. **How can the maximum number of concurrent calls be configured?**\n\n The maximum number of concurrent calls can be configured by passing a value to the `maxConcurrentCalls` parameter in the constructor of the `APIRateLimit` class. If no value is provided, the default value is set to 50.", + "checksum": "8862552c9cfd8b6db454d45e565081ef" }, { "fileName": "FileUtil.ts", - "filePath": "src/cli/utils/FileUtil.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/utils/FileUtil.ts", - "summary": "This code provides utility functions for handling file and folder paths in the autodoc project. The main purpose of these functions is to generate file names and GitHub URLs for files and folders.\n\n1. `getFileName(input: string, delimiter = '.', extension = '.md'): string`: This function takes an input string, an optional delimiter (default is '.'), and an optional extension (default is '.md'). It returns a new file name with the given extension. If the delimiter is not found in the input string, the function appends the extension to the input string. If the delimiter is found, the function replaces the part after the last delimiter with the extension. For example:\n\n ```javascript\n getFileName(\"example.txt\"); // returns \"example.md\"\n getFileName(\"example\"); // returns \"example.md\"\n ```\n\n2. `githubFileUrl(githubRoot: string, inputRoot: string, filePath: string, linkHosted: boolean): string`: This function generates a GitHub URL for a file. It takes the GitHub root URL, the input root path, the file path, and a boolean flag `linkHosted`. If `linkHosted` is true, the function returns a URL pointing to the hosted version of the file. If `linkHosted` is false, the function returns a URL pointing to the file in the GitHub repository. For example:\n\n ```javascript\n githubFileUrl(\"https://github.com/user/repo\", \"/input\", \"/input/example.md\", true); // returns \"https://github.com/user/repo/example.md\"\n githubFileUrl(\"https://github.com/user/repo\", \"/input\", \"/input/example.md\", false); // returns \"https://github.com/user/repo/blob/master/example.md\"\n ```\n\n3. `githubFolderUrl(githubRoot: string, inputRoot: string, folderPath: string, linkHosted: boolean): string`: This function is similar to `githubFileUrl`, but it generates a GitHub URL for a folder instead of a file. If `linkHosted` is true, the function returns a URL pointing to the hosted version of the folder. If `linkHosted` is false, the function returns a URL pointing to the folder in the GitHub repository. For example:\n\n ```javascript\n githubFolderUrl(\"https://github.com/user/repo\", \"/input\", \"/input/folder\", true); // returns \"https://github.com/user/repo/folder\"\n githubFolderUrl(\"https://github.com/user/repo\", \"/input\", \"/input/folder\", false); // returns \"https://github.com/user/repo/tree/master/folder\"\n ```\n\nThese utility functions can be used in the autodoc project to generate file names and URLs for documentation files and folders, making it easier to manage and navigate the documentation structure.", - "questions": "1. **What does the `getFileName` function do?**\n\n The `getFileName` function takes an input string, an optional delimiter (default is '.'), and an optional extension (default is '.md'). It returns the input string with the specified extension, replacing the part after the last occurrence of the delimiter if it exists.\n\n2. **What is the purpose of the `githubFileUrl` and `githubFolderUrl` functions?**\n\n Both `githubFileUrl` and `githubFolderUrl` functions are used to generate URLs for files and folders, respectively, in a GitHub repository. They take a `githubRoot`, `inputRoot`, a `filePath` or `folderPath`, and a `linkHosted` boolean flag. If `linkHosted` is true, the generated URL will point to the hosted version of the file or folder; otherwise, it will point to the file or folder in the GitHub repository.\n\n3. **Why is the `inputRoot.length - 1` used in the `substring` method for both `githubFileUrl` and `githubFolderUrl` functions?**\n\n The `inputRoot.length - 1` is used to remove the `inputRoot` part from the `filePath` or `folderPath` when generating the final URL. This ensures that the generated URL only contains the relevant path relative to the GitHub repository root." + "filePath": "src\\cli\\utils\\FileUtil.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\utils\\FileUtil.ts", + "summary": "This code provides utility functions for handling file and folder paths in the autodoc project. The main purpose of these functions is to generate file names and GitHub URLs for documentation files.\n\n1. `getFileName(input, delimiter, extension)`: This function takes an input string, an optional delimiter (default is '.'), and an optional extension (default is '.md'). It returns a new string with the given extension. If the delimiter is found in the input string, the function removes the part of the string after the last occurrence of the delimiter and appends the extension. If the delimiter is not found, the function simply appends the extension to the input string. This function can be used to generate file names for documentation files with the desired extension.\n\n Example usage:\n\n ```\n getFileName('example.txt'); // returns 'example.md'\n getFileName('example', '_', '.html'); // returns 'example.html'\n ```\n\n2. `githubFileUrl(githubRoot, inputRoot, filePath, linkHosted)`: This function generates a GitHub URL for a file. It takes the GitHub repository root URL, the input root folder path, the file path, and a boolean flag indicating whether the URL should be for the hosted version of the file or the source code. It returns a string with the generated URL.\n\n Example usage:\n\n ```\n githubFileUrl('https://github.com/user/repo', '/input', '/input/example.md', true);\n // returns 'https://github.com/user/repo/example.md'\n ```\n\n3. `githubFolderUrl(githubRoot, inputRoot, folderPath, linkHosted)`: This function is similar to `githubFileUrl`, but it generates a GitHub URL for a folder instead of a file. It takes the same arguments as `githubFileUrl` and returns a string with the generated URL.\n\n Example usage:\n\n ```\n githubFolderUrl('https://github.com/user/repo', '/input', '/input/folder', true);\n // returns 'https://github.com/user/repo/folder'\n ```\n\nThese utility functions can be used throughout the autodoc project to generate file names and GitHub URLs for documentation files and folders, ensuring consistent naming and URL generation across the project.", + "questions": "1. **What is the purpose of the `getFileName` function?**\n\n The `getFileName` function takes an input string, an optional delimiter, and an optional extension, and returns a new string with the given extension. If the delimiter is not found in the input string, the extension is simply appended to the input string. If the delimiter is found, the input string is sliced up to the last delimiter index and the extension is appended.\n\n2. **What are the differences between the `githubFileUrl` and `githubFolderUrl` functions?**\n\n Both functions take the same parameters: `githubRoot`, `inputRoot`, a path (either `filePath` or `folderPath`), and a `linkHosted` boolean. The main difference is in the returned URL: `githubFileUrl` returns a URL pointing to a file in the GitHub repository, while `githubFolderUrl` returns a URL pointing to a folder in the GitHub repository. The URL structure differs slightly, with `/blob/master/` for files and `/tree/master/` for folders.\n\n3. **What is the purpose of the `linkHosted` parameter in the `githubFileUrl` and `githubFolderUrl` functions?**\n\n The `linkHosted` parameter is a boolean that determines whether the returned URL should point to the hosted version of the file or folder on GitHub Pages (if `true`) or to the file or folder within the GitHub repository itself (if `false`). Depending on the value of `linkHosted`, the functions will return different URL structures.", + "checksum": "d1f26fc674b4a9b4a2053642771871c8" }, { "fileName": "LLMUtil.ts", - "filePath": "src/cli/utils/LLMUtil.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/utils/LLMUtil.ts", - "summary": "This code defines and manages different language models (LLMs) and their associated costs for a project. It imports the `OpenAIChat` class from the `langchain/llms` module and the `LLMModelDetails` and `LLMModels` types from the `../../types.js` file.\n\nThe `models` object contains three LLMs: GPT3, GPT4, and GPT432k. Each model has a set of properties, such as `name`, `inputCostPer1KTokens`, `outputCostPer1KTokens`, `maxLength`, and an instance of `OpenAIChat` with specific configurations. The `inputTokens`, `outputTokens`, `succeeded`, `failed`, and `total` properties are initialized to 0.\n\n```javascript\n{\n name: LLMModels.GPT3,\n inputCostPer1KTokens: 0.002,\n outputCostPer1KTokens: 0.002,\n maxLength: 3050,\n llm: new OpenAIChat({ ... }),\n inputTokens: 0,\n outputTokens: 0,\n succeeded: 0,\n failed: 0,\n total: 0,\n}\n```\n\nThe `printModelDetails` function takes an array of `LLMModelDetails` and prints a summary table to the console. It calculates the total cost for each model based on the number of input and output tokens and their respective costs per 1,000 tokens. It also calculates the total file count, succeeded, failed, tokens, and cost across all models.\n\nThe `totalIndexCostEstimate` function calculates the total cost for all models in the input array. It uses the same cost calculation as in `printModelDetails` but returns the total cost as a number.\n\nThese functions can be used in the larger project to manage and analyze the usage and costs of different language models. For example, the `printModelDetails` function can provide a summary of the project's LLM usage, while the `totalIndexCostEstimate` function can help estimate the overall cost of using these models.", - "questions": "1. **Question**: What is the purpose of the `models` object and what are the different models available?\n **Answer**: The `models` object is a record that maps the available LLMModels (GPT3, GPT4, and GPT432k) to their respective details, such as name, input and output costs, maxLength, and an instance of OpenAIChat with the corresponding model.\n\n2. **Question**: How does the `printModelDetails` function work and what information does it display?\n **Answer**: The `printModelDetails` function takes an array of LLMModelDetails and generates an output object containing the model name, file count, succeeded, failed, tokens, and cost. It then calculates the totals for each property and displays the information in a console table.\n\n3. **Question**: What is the purpose of the `totalIndexCostEstimate` function and how does it calculate the total cost?\n **Answer**: The `totalIndexCostEstimate` function calculates the total cost of indexing the given models by iterating through the models array and summing up the input and output costs per 1K tokens for each model." + "filePath": "src\\cli\\utils\\LLMUtil.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\utils\\LLMUtil.ts", + "summary": "This code defines and manages different language models (LLMs) and their associated costs for a project that utilizes OpenAI's GPT models. It imports the `OpenAIChat` class from the `langchain/llms` module and the `LLMModelDetails` and `LLMModels` types from the `../../types.js` file.\n\nThe `models` object contains three LLMs: GPT3, GPT4, and GPT432k. Each model has its own properties, such as `name`, `inputCostPer1KTokens`, `outputCostPer1KTokens`, `maxLength`, and an instance of the `OpenAIChat` class with the respective model name and API key. Additionally, each model has counters for input tokens, output tokens, succeeded, failed, and total files processed.\n\nThe `printModelDetails` function takes an array of `LLMModelDetails` and prints a summary table to the console. It calculates the total cost for each model based on the input and output tokens and their respective costs per 1,000 tokens. It also calculates the total file count, succeeded, failed, tokens, and cost across all models.\n\nThe `totalIndexCostEstimate` function calculates the total cost of indexing all models in the input array. It uses the same cost calculation as in `printModelDetails` but returns the total cost as a number.\n\nThese functions can be used in the larger project to manage and analyze the usage and costs of different LLMs. For example, the `printModelDetails` function can be called to display a summary of the models' usage and costs:\n\n```javascript\nimport { models, printModelDetails } from './path/to/this/file';\n\n// Process files with models...\n// Update models' properties...\n\nprintModelDetails(Object.values(models));\n```\n\nAnd the `totalIndexCostEstimate` function can be used to estimate the total cost of indexing all models:\n\n```javascript\nimport { models, totalIndexCostEstimate } from './path/to/this/file';\n\n// Process files with models...\n// Update models' properties...\n\nconst totalCost = totalIndexCostEstimate(Object.values(models));\nconsole.log(`Total cost: ${totalCost}`);\n```", + "questions": "1. **Question:** What is the purpose of the `models` object and how are the different GPT models being used?\n **Answer:** The `models` object is a record that maps different GPT models (GPT3, GPT4, and GPT432k) to their respective details, such as cost per tokens, maximum length, and an instance of `OpenAIChat` with the corresponding model configuration.\n\n2. **Question:** How does the `printModelDetails` function work and what information does it display?\n **Answer:** The `printModelDetails` function takes an array of `LLMModelDetails` as input, processes the information for each model, and then prints a summary table to the console. The table includes the model name, file count, succeeded and failed counts, total tokens, and cost.\n\n3. **Question:** What is the purpose of the `totalIndexCostEstimate` function and how is it calculating the total cost?\n **Answer:** The `totalIndexCostEstimate` function calculates the total cost of processing the given models by iterating through the input `models` array and summing up the costs based on the input and output tokens and their respective costs per 1K tokens.", + "checksum": "f4464cf197f4af827ac0eac950d568fc" }, { - "fileName": "WaitUtil.ts", - "filePath": "src/cli/utils/WaitUtil.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/utils/WaitUtil.ts", - "summary": "The code in this file provides two utility functions, `wait` and `forTrue`, which are designed to help manage asynchronous operations in the larger project. Both functions return a `Promise`, which is a JavaScript object that represents the eventual completion (or failure) of an asynchronous operation and its resulting value.\n\n### wait function\n\nThe `wait` function takes two arguments: `timeoutMs`, which is the number of milliseconds to wait before resolving the promise, and `value`, which is an optional value to be returned when the promise resolves. The function creates a new `Promise` and uses `setTimeout` to resolve it with the given `value` after the specified `timeoutMs` has passed.\n\nExample usage:\n\n```javascript\n// Wait for 2 seconds and then log \"Hello, world!\"\nwait(2000, \"Hello, world!\").then(console.log);\n```\n\n### forTrue function\n\nThe `forTrue` function takes a single argument, `fn`, which is a function that returns a boolean value. The purpose of this function is to repeatedly check if the given function `fn` returns `true`. If it does, the promise resolves with `true`. If the function does not return `true` after 200 checks, the promise is rejected.\n\nThe function uses `setInterval` to repeatedly call the given function `fn` every 50 milliseconds. If `fn` returns `true`, the interval is cleared, and the promise is resolved. If the function has been called 200 times without returning `true`, the promise is rejected.\n\nExample usage:\n\n```javascript\n// Check if a certain element is visible on the page\nconst isElementVisible = () => document.querySelector(\"#my-element\").offsetParent !== null;\n\n// Wait for the element to become visible, then log \"Element is visible!\"\nforTrue(isElementVisible).then(() => console.log(\"Element is visible!\"));\n```\n\nIn summary, these utility functions help manage asynchronous operations by providing a way to wait for a certain amount of time or for a specific condition to be met. They can be used in various parts of the larger project to handle timing and conditional logic in an asynchronous manner.", - "questions": "1. **What is the purpose of the `wait` function?**\n\n The `wait` function is an asynchronous utility function that resolves a promise after a specified timeout in milliseconds. It can be used to introduce a delay in the execution of asynchronous code.\n\n2. **How does the `forTrue` function work and what is its use case?**\n\n The `forTrue` function takes a function `fn` as an argument, which returns a boolean value. It repeatedly checks the result of `fn` every 50 milliseconds until it returns `true` or the maximum number of checks (200) is reached. This function can be used to wait for a specific condition to be met before proceeding with the execution of asynchronous code.\n\n3. **Is there any error handling or customization for the `forTrue` function, such as customizing the interval or maximum number of checks?**\n\n Currently, there is no error handling or customization options for the `forTrue` function. The interval is hardcoded to 50 milliseconds, and the maximum number of checks is hardcoded to 200. To add customization, additional parameters could be added to the function signature and used in the implementation." + "fileName": "traverseFileSystem.ts", + "filePath": "src\\cli\\utils\\traverseFileSystem.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\utils\\traverseFileSystem.ts", + "summary": "The `traverseFileSystem` function in this code is an asynchronous function that recursively traverses a given file system, processing files and folders based on the provided parameters. It is designed to be used in the larger project for generating documentation or performing other tasks that require processing files and folders in a directory structure.\n\nThe function takes an object of type `TraverseFileSystemParams` as its input, which contains various properties to control the traversal and processing behavior. These properties include:\n\n- `inputPath`: The root path to start the traversal from.\n- `projectName`: The name of the project being processed.\n- `processFile`: An optional callback function to process a file.\n- `processFolder`: An optional callback function to process a folder.\n- `ignore`: An array of patterns to ignore during traversal.\n- `filePrompt`, `folderPrompt`: Optional prompts for user interaction.\n- `contentType`, `targetAudience`, `linkHosted`: Additional metadata for processing.\n\nThe function first checks if the provided `inputPath` exists using `fs.access`. If the path does not exist, it logs an error message and returns. It then defines a helper function `shouldIgnore` that checks if a given file or folder should be ignored based on the `ignore` patterns.\n\nThe main logic of the function is implemented in the `dfs` (depth-first search) function, which is called recursively to traverse the file system. It reads the contents of the current directory using `fs.readdir`, filters out ignored items, and processes the remaining items.\n\nFor each item, if it is a directory, the `dfs` function is called recursively, and the `processFolder` callback is invoked if provided. If it is a file and its content is text (checked using `isText`), the `processFile` callback is invoked if provided.\n\nThe traversal is performed using `Promise.all` to process items concurrently, improving performance. If an error occurs during traversal, it is logged and rethrown.\n\nHere's an example of how this function might be used in the larger project:\n\n```javascript\nawait traverseFileSystem({\n inputPath: './src',\n projectName: 'myProject',\n processFile: (params) => {\n // Process file logic here\n },\n processFolder: (params) => {\n // Process folder logic here\n },\n ignore: ['node_modules/**', '.git/**'],\n});\n```", + "questions": "1. **What is the purpose of the `traverseFileSystem` function?**\n\n The `traverseFileSystem` function is an asynchronous function that traverses a given file system, processes folders and files based on the provided parameters, and ignores files and folders based on the given ignore patterns.\n\n2. **How does the `shouldIgnore` function work?**\n\n The `shouldIgnore` function takes a file name as input and returns a boolean value indicating whether the file should be ignored or not. It checks if the file name matches any of the ignore patterns provided in the `ignore` parameter using the `minimatch` library.\n\n3. **What is the role of the `dfs` function inside `traverseFileSystem`?**\n\n The `dfs` function is an asynchronous function that performs a depth-first search on the file system starting from the given `currentPath`. It processes folders and files based on the provided parameters and recursively calls itself for each subdirectory found.", + "checksum": "b9e957c10ee6c009864c90aa2fa93763" }, { - "fileName": "traverseFileSystem.ts", - "filePath": "src/cli/utils/traverseFileSystem.ts", - "url": "https://github.com/context-labs/autodoc/src/cli/utils/traverseFileSystem.ts", - "summary": "The `traverseFileSystem` function in this code is an asynchronous function that recursively traverses a given file system, processes folders and files, and filters out ignored files based on provided patterns. It is designed to be used in the larger project for processing and generating documentation for a given project.\n\nThe function takes an object of type `TraverseFileSystemParams` as its input, which contains the following properties:\n\n- `inputPath`: The root folder path to start traversing.\n- `projectName`: The name of the project being documented.\n- `processFile`: An optional callback function to process files.\n- `processFolder`: An optional callback function to process folders.\n- `ignore`: An array of patterns to ignore files and folders.\n- `filePrompt`: An optional prompt for processing files.\n- `folderPrompt`: An optional prompt for processing folders.\n- `contentType`: The type of content being processed.\n- `targetAudience`: The target audience for the documentation.\n- `linkHosted`: A flag indicating if the documentation should be linked to a hosted version.\n\nThe function first checks if the provided `inputPath` exists. If not, it logs an error message and returns. It then defines a helper function `shouldIgnore` that checks if a given file or folder should be ignored based on the `ignore` patterns.\n\nThe main logic of the function is implemented in the `dfs` (depth-first search) function, which recursively traverses the file system. It reads the contents of the current folder, filters out ignored files and folders, and processes them accordingly. If an entry is a directory, it calls `dfs` recursively and then calls the `processFolder` callback if provided. If an entry is a file and is a text file, it calls the `processFile` callback if provided.\n\nHere's an example of how this function might be used in the larger project:\n\n```javascript\nimport { traverseFileSystem } from './autodoc';\n\nconst params = {\n inputPath: './myProject',\n projectName: 'My Project',\n ignore: ['node_modules/**', '.git/**'],\n processFile: async (fileInfo) => {\n // Process the file, e.g., generate documentation\n },\n processFolder: async (folderInfo) => {\n // Process the folder, e.g., create a folder in the output directory\n },\n};\n\ntraverseFileSystem(params);\n```\n\nThis example would traverse the `myProject` folder, ignoring any files and folders within `node_modules` and `.git`, and process the remaining files and folders using the provided callback functions.", - "questions": "1. **What is the purpose of the `traverseFileSystem` function?**\n\n The `traverseFileSystem` function is an asynchronous function that traverses a given file system, processes files and folders based on the provided parameters, and ignores files and folders that match the specified ignore patterns.\n\n2. **How does the `shouldIgnore` function work?**\n\n The `shouldIgnore` function takes a file or folder name as input and returns a boolean value indicating whether the file or folder should be ignored based on the provided ignore patterns. It uses the `minimatch` library to check if the file or folder name matches any of the ignore patterns.\n\n3. **What is the role of the `dfs` function inside `traverseFileSystem`?**\n\n The `dfs` function is an asynchronous function that performs a depth-first search on the file system starting from the given `currentPath`. It processes folders and files based on the provided parameters and recursively calls itself for each subdirectory." + "fileName": "WaitUtil.ts", + "filePath": "src\\cli\\utils\\WaitUtil.ts", + "url": "https://github.com/context-labs/autodoc/src\\cli\\utils\\WaitUtil.ts", + "summary": "The code in this file provides two utility functions, `wait` and `forTrue`, which are designed to help manage asynchronous operations in the larger project. Both functions return a `Promise`, making them suitable for use with `async/await` syntax.\n\n### wait\n\nThe `wait` function takes two arguments: `timeoutMs`, a number representing the desired waiting time in milliseconds, and an optional `value` that defaults to `null`. It returns a `Promise` that resolves with the provided `value` after the specified `timeoutMs` has elapsed. This function can be used to introduce a delay in the execution of asynchronous code.\n\nExample usage:\n\n```javascript\nasync function delayedEcho() {\n console.log(\"Start\");\n await wait(1000, \"Hello\");\n console.log(\"End\");\n}\n\ndelayedEcho(); // Output: Start -> (1 second delay) -> End\n```\n\n### forTrue\n\nThe `forTrue` function takes a single argument, `fn`, which is a function that returns a boolean value. It returns a `Promise` that resolves with `true` when the provided function `fn` returns `true`. The function `fn` is checked every 50 milliseconds, up to a maximum of 200 times (i.e., 10 seconds). If `fn` does not return `true` within this time, the `Promise` is rejected.\n\nThis function can be used to wait for a specific condition to be met before continuing the execution of asynchronous code.\n\nExample usage:\n\n```javascript\nlet condition = false;\n\nsetTimeout(() => {\n condition = true;\n}, 3000);\n\nasync function waitForCondition() {\n console.log(\"Waiting for condition...\");\n await forTrue(() => condition);\n console.log(\"Condition met!\");\n}\n\nwaitForCondition(); // Output: Waiting for condition... -> (3 second delay) -> Condition met!\n```\n\nIn summary, this file provides two utility functions that help manage asynchronous operations by introducing delays and waiting for specific conditions to be met. These functions can be used in the larger project to control the flow of asynchronous code execution.", + "questions": "1. **What is the purpose of the `wait` function?**\n\n The `wait` function is an asynchronous utility function that resolves a promise after a specified timeout in milliseconds, optionally returning a value when the promise is resolved.\n\n2. **How does the `forTrue` function work?**\n\n The `forTrue` function takes a function `fn` as an argument, which should return a boolean value. It checks the result of `fn` every 50 milliseconds and resolves the promise when `fn` returns `true`. If `fn` does not return `true` after 200 attempts, the promise is rejected.\n\n3. **What is the use case for the `forTrue` function?**\n\n The `forTrue` function can be used to wait for a certain condition to be met before proceeding with the execution of the code. This can be useful in situations where you need to wait for an asynchronous operation to complete or a specific state to be reached before continuing.", + "checksum": "bf4acebb6c2736274af75a8c8441c9d2" } ], "folders": [], - "summary": "The code in the `.autodoc/docs/json/src/cli/utils` folder provides utility functions and classes that help manage various aspects of the autodoc project, such as rate-limiting API calls, handling file and folder paths, managing language models, and traversing file systems.\n\n`APIRateLimit.ts` contains the `APIRateLimit` class, which is designed to manage and limit the number of concurrent API calls made by the application. This is useful when the API being called has a rate limit or when the application needs to control the number of simultaneous requests to avoid overloading the server. For example:\n\n```javascript\nconst apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls\nasync function getData(id) {\n return apiRateLimiter.callApi(() => fetchData(id));\n}\ngetData(1).then(console.log); // Fetches data for ID 1, rate-limited\n```\n\n`FileUtil.ts` provides utility functions for handling file and folder paths, such as generating file names and GitHub URLs for files and folders. These functions can be used to manage and navigate the documentation structure. For example:\n\n```javascript\ngetFileName(\"example.txt\"); // returns \"example.md\"\ngithubFileUrl(\"https://github.com/user/repo\", \"/input\", \"/input/example.md\", true); // returns \"https://github.com/user/repo/example.md\"\n```\n\n`LLMUtil.ts` defines and manages different language models (LLMs) and their associated costs for a project. It provides functions like `printModelDetails` and `totalIndexCostEstimate` to manage and analyze the usage and costs of different language models. For example, the `printModelDetails` function can provide a summary of the project's LLM usage, while the `totalIndexCostEstimate` function can help estimate the overall cost of using these models.\n\n`WaitUtil.ts` provides two utility functions, `wait` and `forTrue`, which help manage asynchronous operations in the larger project. They can be used in various parts of the project to handle timing and conditional logic in an asynchronous manner. For example:\n\n```javascript\nwait(2000, \"Hello, world!\").then(console.log); // Waits for 2 seconds and then logs \"Hello, world!\"\nforTrue(isElementVisible).then(() => console.log(\"Element is visible!\")); // Waits for an element to become visible, then logs \"Element is visible!\"\n```\n\n`traverseFileSystem.ts` contains the `traverseFileSystem` function, which recursively traverses a given file system, processes folders and files, and filters out ignored files based on provided patterns. It is designed to be used for processing and generating documentation for a given project. For example:\n\n```javascript\nconst params = {\n inputPath: './myProject',\n projectName: 'My Project',\n ignore: ['node_modules/**', '.git/**'],\n processFile: async (fileInfo) => {\n // Process the file, e.g., generate documentation\n },\n processFolder: async (folderInfo) => {\n // Process the folder, e.g., create a folder in the output directory\n },\n};\ntraverseFileSystem(params);\n```\n\nIn summary, the code in this folder provides various utility functions and classes that help manage different aspects of the autodoc project, making it easier to handle tasks such as rate-limiting, file and folder management, language model management, asynchronous operations, and file system traversal.", - "questions": "" + "summary": "The `.autodoc\\docs\\json\\src\\cli\\utils` folder contains utility functions and classes that assist in managing API rate limits, handling file and folder paths, managing language models, traversing file systems, and controlling asynchronous operations. These utilities can be used throughout the autodoc project to ensure consistent behavior and improve code organization.\n\n`APIRateLimit.ts` provides the `APIRateLimit` class, which manages and limits the number of concurrent API calls made by the application. This is useful when working with rate-limited APIs or preventing server overload. Example usage:\n\n```javascript\nconst apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls\nasync function fetchSomeData(id) {\n const result = await apiRateLimiter.callApi(() => fetch(`https://api.example.com/data/${id}`));\n return result;\n}\n```\n\n`FileUtil.ts` offers utility functions for generating file names and GitHub URLs for documentation files. These functions ensure consistent naming and URL generation across the project. Example usage:\n\n```javascript\ngetFileName('example.txt'); // returns 'example.md'\ngithubFileUrl('https://github.com/user/repo', '/input', '/input/example.md', true); // returns 'https://github.com/user/repo/example.md'\n```\n\n`LLMUtil.ts` defines and manages different language models (LLMs) and their associated costs for a project utilizing OpenAI's GPT models. Functions like `printModelDetails` and `totalIndexCostEstimate` can be used to manage and analyze the usage and costs of different LLMs. Example usage:\n\n```javascript\nimport { models, printModelDetails } from './path/to/this/file';\nprintModelDetails(Object.values(models));\nconst totalCost = totalIndexCostEstimate(Object.values(models));\nconsole.log(`Total cost: ${totalCost}`);\n```\n\n`traverseFileSystem.ts` contains the `traverseFileSystem` function, which recursively traverses a given file system, processing files and folders based on provided parameters. This is useful for generating documentation or performing tasks that require processing files and folders in a directory structure. Example usage:\n\n```javascript\nawait traverseFileSystem({\n inputPath: './src',\n projectName: 'myProject',\n processFile: (params) => { /* Process file logic */ },\n processFolder: (params) => { /* Process folder logic */ },\n ignore: ['node_modules/**', '.git/**'],\n});\n```\n\n`WaitUtil.ts` provides two utility functions, `wait` and `forTrue`, which help manage asynchronous operations by introducing delays and waiting for specific conditions to be met. These functions can be used to control the flow of asynchronous code execution. Example usage:\n\n```javascript\nasync function delayedEcho() {\n console.log(\"Start\");\n await wait(1000, \"Hello\");\n console.log(\"End\");\n}\n\nasync function waitForCondition() {\n console.log(\"Waiting for condition...\");\n await forTrue(() => condition);\n console.log(\"Condition met!\");\n}\n```\n\nIn summary, the utilities in this folder enhance the autodoc project by providing consistent behavior, improving code organization, and managing various aspects of the project, such as API rate limits, file and folder paths, language models, file system traversal, and asynchronous operations.", + "questions": "", + "checksum": "a4b7088863601cd326edbec7726eefe7" } ], - "summary": "The `spinner.ts` file in the `.autodoc/docs/json/src/cli` folder provides a utility for managing a command-line spinner using the `ora` library. The spinner is a visual indicator that displays a series of characters in a loop, giving the user feedback that a process is running in the background. The code exports several functions to control the spinner's behavior, such as updating the text, stopping the spinner, and displaying success, error, or informational messages.\n\nThe `spinner` object is created as a singleton to ensure that there is only one instance of the spinner at any given time. This prevents multiple spinners from being displayed simultaneously, which could cause confusion for the user. The spinner is configured to use the 'dots' style.\n\nThe `updateSpinnerText` function is used to update the spinner's text. If the spinner is already spinning, it updates the text directly; otherwise, it starts the spinner with the given message. For example:\n\n```javascript\nupdateSpinnerText('Loading data...');\n```\n\nThe `stopSpinner` function stops the spinner if it is currently spinning:\n\n```javascript\nstopSpinner();\n```\n\nThe `spinnerError`, `spinnerSuccess`, and `spinnerInfo` functions are used to display error, success, and informational messages, respectively. These functions first check if the spinner is spinning and then call the appropriate `ora` method to display the message with the corresponding status symbol (e.g., a red cross for errors, a green checkmark for success, etc.):\n\n```javascript\nspinnerError('An error occurred');\nspinnerSuccess('Operation completed successfully');\nspinnerInfo('Please wait...');\n```\n\nIn the larger project, this utility can be used to provide a consistent and user-friendly interface for displaying progress and status messages during long-running tasks or processes.", - "questions": "" + "summary": "The code in the `spinner.ts` file, located in the `.autodoc\\docs\\json\\src\\cli` folder, is responsible for managing a spinner, a visual element that indicates a background process is running. The spinner is created using the `ora` library, which provides a simple and customizable way to create spinners for command-line interfaces.\n\nThe module exports several functions to interact with the spinner:\n\n1. `updateSpinnerText(message: string)`: Updates the spinner's text with the provided message. If the spinner is already spinning, it simply updates the text; otherwise, it starts the spinner with the new message.\n\n Example usage:\n ```javascript\n updateSpinnerText('Loading data...');\n ```\n\n2. `stopSpinner()`: Stops the spinner if it is currently spinning.\n\n Example usage:\n ```javascript\n stopSpinner();\n ```\n\n3. `spinnerError(message?: string)`: Stops the spinner and marks it as failed with an optional error message. It only takes effect if the spinner is currently spinning.\n\n Example usage:\n ```javascript\n spinnerError('Failed to load data');\n ```\n\n4. `spinnerSuccess(message?: string)`: Stops the spinner and marks it as successful with an optional success message. It only takes effect if the spinner is currently spinning.\n\n Example usage:\n ```javascript\n spinnerSuccess('Data loaded successfully');\n ```\n\n5. `spinnerInfo(message: string)`: Displays an informational message without affecting the spinner's state.\n\n Example usage:\n ```javascript\n spinnerInfo('Connecting to server...');\n ```\n\nIn the larger project, this module can be used to provide visual feedback to users when a background process is running, such as loading data, connecting to a server, or performing a complex calculation. By using the exported functions, developers can easily update the spinner's text, stop it, or change its state to indicate success, failure, or display informational messages.", + "questions": "", + "checksum": "e9d728bc3244f1081af08994f5fb1cd0" }, { "folderName": "langchain", - "folderPath": ".autodoc/docs/json/src/langchain", - "url": "https://github.com/context-labs/autodoc/.autodoc/docs/json/src/langchain", + "folderPath": ".autodoc\\docs\\json\\src\\langchain", + "url": "https://github.com/context-labs/autodoc/.autodoc\\docs\\json\\src\\langchain", "files": [ { "fileName": "hnswlib.ts", - "filePath": "src/langchain/hnswlib.ts", - "url": "https://github.com/context-labs/autodoc/src/langchain/hnswlib.ts", - "summary": "The `HNSWLib` class in this code is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm from the `hnswlib-node` library. It extends the `SaveableVectorStore` class and provides methods for adding documents, searching for similar documents, and saving/loading the index.\n\nThe constructor takes an `Embeddings` object and an `HNSWLibArgs` object as arguments. The `Embeddings` object is used to convert text documents into numerical vectors, while the `HNSWLibArgs` object contains configuration options for the HNSW index and an optional `InMemoryDocstore` object for storing document metadata.\n\nThe `addDocuments` method takes an array of `Document` objects, converts their text content into numerical vectors using the `Embeddings` object, and adds the vectors to the HNSW index. The `addVectors` method is responsible for initializing the index, resizing it if necessary, and adding the vectors and their corresponding metadata to the `InMemoryDocstore`.\n\nThe `similaritySearchVectorWithScore` method takes a query vector and a number `k`, and returns the top `k` most similar documents in the index along with their similarity scores. It checks if the query vector has the correct dimensions and if `k` is within the valid range before performing the search.\n\nThe `save` and `load` methods allow the HNSW index and its associated metadata to be saved to and loaded from a specified directory. The `fromTexts` and `fromDocuments` static methods provide convenient ways to create an `HNSWLib` instance from an array of text strings or `Document` objects, respectively.\n\nExample usage:\n\n```javascript\nconst embeddings = new Embeddings(/* ... */);\nconst hnswLib = await HNSWLib.fromTexts(texts, metadatas, embeddings);\n\nconst queryVector = await embeddings.embedText(\"example query\");\nconst similarDocuments = await hnswLib.similaritySearchVectorWithScore(queryVector, 5);\n```\n\nIn the larger project, this class can be used to efficiently store and search for similar documents based on their embeddings, which can be useful for tasks such as document clustering, nearest neighbor search, and recommendation systems.", - "questions": "1. **Question:** What is the purpose of the `HNSWLib` class and how does it relate to the `SaveableVectorStore` class?\n **Answer:** The `HNSWLib` class is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm from the `hnswlib-node` library. It extends the `SaveableVectorStore` class, which provides a base class for vector stores that can be saved and loaded from disk.\n\n2. **Question:** How does the `addDocuments` method work and what is its purpose?\n **Answer:** The `addDocuments` method takes an array of `Document` objects, extracts their `pageContent`, and embeds them into vectors using the `embedDocuments` method from the `embeddings` object. It then adds these vectors and the corresponding documents to the HNSW index and the `docstore` respectively.\n\n3. **Question:** How does the `similaritySearchVectorWithScore` method work and what does it return?\n **Answer:** The `similaritySearchVectorWithScore` method takes a query vector and a number `k` as input. It checks if the query vector has the same length as the number of dimensions and if `k` is not greater than the number of elements in the index. It then performs a k-nearest neighbors search on the HNSW index using the query vector and returns an array of `[Document, number]` tuples, where each tuple contains a document from the `docstore` and its corresponding distance score to the query vector." + "filePath": "src\\langchain\\hnswlib.ts", + "url": "https://github.com/context-labs/autodoc/src\\langchain\\hnswlib.ts", + "summary": "The `HNSWLib` class in this code is a specialized vector store that uses the Hierarchical Navigable Small World (HNSW) algorithm for efficient similarity search. It is built on top of the `hnswlib-node` library and extends the `SaveableVectorStore` class. The main purpose of this class is to store and search for documents based on their embeddings, which are high-dimensional vectors representing the documents' content.\n\nThe constructor of the `HNSWLib` class takes an `Embeddings` object and an `HNSWLibArgs` object as arguments. The `Embeddings` object is used to convert documents into their corresponding vector representations, while the `HNSWLibArgs` object contains configuration options for the HNSW index and an optional `InMemoryDocstore` object for storing the documents.\n\nThe `addDocuments` method takes an array of `Document` objects, converts them into embeddings using the `Embeddings` object, and adds them to the HNSW index. The `similaritySearchVectorWithScore` method takes a query vector and a number `k`, and returns the top `k` most similar documents along with their similarity scores.\n\nThe `save` and `load` methods allow for persisting the HNSW index, document store, and configuration options to disk and loading them back into memory. The `fromTexts` and `fromDocuments` static methods provide convenient ways to create an `HNSWLib` instance from an array of texts or documents, respectively.\n\nHere's an example of how to use the `HNSWLib` class:\n\n```javascript\nconst embeddings = new Embeddings(/* ... */);\nconst args = { space: 'cosine' };\nconst hnswLib = new HNSWLib(embeddings, args);\n\n// Add documents to the index\nawait hnswLib.addDocuments(documents);\n\n// Perform a similarity search\nconst queryVector = /* ... */;\nconst k = 10;\nconst results = await hnswLib.similaritySearchVectorWithScore(queryVector, k);\n```\n\nIn the larger project, the `HNSWLib` class can be used to efficiently store and search for documents based on their content similarity, which can be useful for tasks such as document clustering, recommendation systems, or information retrieval.", + "questions": "1. **Question**: What is the purpose of the `HNSWLib` class and how does it relate to the `SaveableVectorStore` class?\n **Answer**: The `HNSWLib` class is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm from the `hnswlib-node` library. It extends the `SaveableVectorStore` class, which provides a base class for vector stores that can be saved and loaded from disk.\n\n2. **Question**: How does the `addDocuments` method work and what is its purpose?\n **Answer**: The `addDocuments` method takes an array of `Document` objects, extracts their `pageContent`, and embeds them using the provided `Embeddings` instance. It then adds the resulting vectors and documents to the HNSW index and the `InMemoryDocstore`, respectively.\n\n3. **Question**: How does the `similaritySearchVectorWithScore` method work and what does it return?\n **Answer**: The `similaritySearchVectorWithScore` method takes a query vector and a number `k` as input, and searches for the `k` most similar vectors in the HNSW index. It returns an array of tuples, where each tuple contains a `Document` object and its corresponding similarity score to the query vector.", + "checksum": "4725f6bfddda88355b55a980a1eae582" } ], "folders": [], - "summary": "The `hnswlib.ts` file in the `.autodoc/docs/json/src/langchain` folder contains the `HNSWLib` class, which is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm from the `hnswlib-node` library. This class is designed to efficiently store and search for similar documents based on their embeddings, making it useful for tasks such as document clustering, nearest neighbor search, and recommendation systems.\n\nThe `HNSWLib` class extends the `SaveableVectorStore` class and provides methods for adding documents, searching for similar documents, and saving/loading the index. It takes an `Embeddings` object and an `HNSWLibArgs` object as arguments in its constructor. The `Embeddings` object is responsible for converting text documents into numerical vectors, while the `HNSWLibArgs` object contains configuration options for the HNSW index and an optional `InMemoryDocstore` object for storing document metadata.\n\nThe `addDocuments` method accepts an array of `Document` objects, converts their text content into numerical vectors using the `Embeddings` object, and adds the vectors to the HNSW index. The `addVectors` method initializes the index, resizes it if necessary, and adds the vectors and their corresponding metadata to the `InMemoryDocstore`.\n\nThe `similaritySearchVectorWithScore` method takes a query vector and a number `k`, and returns the top `k` most similar documents in the index along with their similarity scores. It checks if the query vector has the correct dimensions and if `k` is within the valid range before performing the search.\n\nThe `save` and `load` methods allow the HNSW index and its associated metadata to be saved to and loaded from a specified directory. The `fromTexts` and `fromDocuments` static methods provide convenient ways to create an `HNSWLib` instance from an array of text strings or `Document` objects, respectively.\n\nHere's an example of how this code might be used:\n\n```javascript\nconst embeddings = new Embeddings(/* ... */);\nconst hnswLib = await HNSWLib.fromTexts(texts, metadatas, embeddings);\n\nconst queryVector = await embeddings.embedText(\"example query\");\nconst similarDocuments = await hnswLib.similaritySearchVectorWithScore(queryVector, 5);\n```\n\nIn the larger project, the `HNSWLib` class can be integrated with other components to build efficient and scalable systems for document similarity search, clustering, and recommendations based on text embeddings.", - "questions": "" + "summary": "The `hnswlib.ts` file in the `.autodoc\\docs\\json\\src\\langchain` folder contains the `HNSWLib` class, which is a specialized vector store utilizing the Hierarchical Navigable Small World (HNSW) algorithm for efficient similarity search. This class is built on top of the `hnswlib-node` library and extends the `SaveableVectorStore` class. Its primary purpose is to store and search for documents based on their embeddings, which are high-dimensional vectors representing the documents' content.\n\nThe `HNSWLib` class constructor takes an `Embeddings` object and an `HNSWLibArgs` object as arguments. The `Embeddings` object is responsible for converting documents into their corresponding vector representations, while the `HNSWLibArgs` object contains configuration options for the HNSW index and an optional `InMemoryDocstore` object for storing the documents.\n\nThe `addDocuments` method accepts an array of `Document` objects, converts them into embeddings using the `Embeddings` object, and adds them to the HNSW index. The `similaritySearchVectorWithScore` method takes a query vector and a number `k`, and returns the top `k` most similar documents along with their similarity scores.\n\nThe `save` and `load` methods enable persisting the HNSW index, document store, and configuration options to disk and loading them back into memory. The `fromTexts` and `fromDocuments` static methods provide convenient ways to create an `HNSWLib` instance from an array of texts or documents, respectively.\n\nIn the larger project, the `HNSWLib` class can be employed to efficiently store and search for documents based on their content similarity, which can be beneficial for tasks such as document clustering, recommendation systems, or information retrieval.\n\nHere's an example of how to use the `HNSWLib` class:\n\n```javascript\nconst embeddings = new Embeddings(/* ... */);\nconst args = { space: 'cosine' };\nconst hnswLib = new HNSWLib(embeddings, args);\n\n// Add documents to the index\nawait hnswLib.addDocuments(documents);\n\n// Perform a similarity search\nconst queryVector = /* ... */;\nconst k = 10;\nconst results = await hnswLib.similaritySearchVectorWithScore(queryVector, k);\n```\n\nThis code snippet demonstrates how to create an `HNSWLib` instance, add documents to the index, and perform a similarity search. The results can then be used for various purposes, such as finding related documents or generating recommendations based on content similarity.", + "questions": "", + "checksum": "ccbe47bddb9d048f35d29fb2d8c04d7f" } ], - "summary": "The `.autodoc/docs/json/src` folder contains the core components of the Autodoc project, which aims to automatically generate documentation for a given code repository using OpenAI's language models (LLMs). The main files in this folder are `const.ts`, `index.ts`, and `types.ts`.\n\n`const.ts` manages the user configuration file for the Autodoc project. It defines the location and name of the user configuration file, ensuring that it is stored in a user-specific directory and follows a standard naming convention. This allows the Autodoc project to easily manage user-specific settings and preferences.\n\n`index.ts` is a CLI (Command Line Interface) tool for the Autodoc project, which simplifies the process of generating documentation for a codebase. It provides an easy-to-use interface for managing configurations and running the Autodoc project's core functionalities. The main commands supported are `init`, `estimate`, `index`, `user`, and `q`. For example:\n\n```bash\nautodoc init\nautodoc estimate\nautodoc index\nautodoc user\nautodoc q\n```\n\n`types.ts` defines the types and interfaces for the Autodoc project, providing the foundation for processing code repositories and generating documentation using OpenAI's language models. It includes types such as `AutodocUserConfig`, `AutodocRepoConfig`, `FileSummary`, `FolderSummary`, and more.\n\nThe `cli` subfolder contains the `spinner.ts` file, which provides a utility for managing a command-line spinner using the `ora` library. This utility can be used to provide a consistent and user-friendly interface for displaying progress and status messages during long-running tasks or processes. For example:\n\n```javascript\nupdateSpinnerText('Loading data...');\nstopSpinner();\nspinnerError('An error occurred');\nspinnerSuccess('Operation completed successfully');\nspinnerInfo('Please wait...');\n```\n\nThe `langchain` subfolder contains the `hnswlib.ts` file, which implements a vector store using the Hierarchical Navigable Small World (HNSW) algorithm. This class is designed to efficiently store and search for similar documents based on their embeddings, making it useful for tasks such as document clustering, nearest neighbor search, and recommendation systems. For example:\n\n```javascript\nconst embeddings = new Embeddings(/* ... */);\nconst hnswLib = await HNSWLib.fromTexts(texts, metadatas, embeddings);\n\nconst queryVector = await embeddings.embedText(\"example query\");\nconst similarDocuments = await hnswLib.similaritySearchVectorWithScore(queryVector, 5);\n```\n\nIn summary, the code in this folder provides the core components and utilities for the Autodoc project, enabling the automatic generation of documentation for code repositories using OpenAI's language models. The CLI tool simplifies the process, while the types and interfaces lay the foundation for processing and generating documentation. The additional utilities, such as the spinner and HNSWLib, enhance the user experience and provide efficient search capabilities.", - "questions": "" + "summary": "The `.autodoc\\docs\\json\\src` folder contains the core components of the autodoc project, which is designed to automatically generate documentation for a given code repository using OpenAI's language models (LLMs). The folder consists of three main files: `const.ts`, `index.ts`, and `types.ts`, as well as two subfolders: `cli` and `langchain`.\n\n`const.ts` defines the name and file path of the user configuration file for the autodoc project. This file stores user-specific settings in JSON format. Other parts of the project can easily access and use these constants to read or write user-specific settings. For example:\n\n```javascript\nimport { userConfigFilePath } from './path/to/this/file';\n\n// Read user configuration from the file\nconst userConfig = JSON.parse(fs.readFileSync(userConfigFilePath, 'utf-8'));\n\n// Apply user settings\napplyUserSettings(userConfig);\n```\n\n`index.ts` serves as the main entry point for the Autodoc CLI tool, providing a set of commands for developers to generate and manage documentation for their codebase. The available commands include `init`, `estimate`, `index`, `user`, and `q`. The CLI tool uses the `commander` library for command handling and `inquirer` for interactive prompts.\n\n`types.ts` defines the types and interfaces for the autodoc project, such as `AutodocUserConfig`, `AutodocRepoConfig`, `FileSummary`, `FolderSummary`, and more. These types are used to configure and run the autodoc tool, allowing users to generate documentation for their code repositories using OpenAI's LLMs.\n\nThe `cli` subfolder contains the `spinner.ts` file, which manages a spinner for visual feedback during background processes. It exports functions like `updateSpinnerText`, `stopSpinner`, `spinnerError`, `spinnerSuccess`, and `spinnerInfo` for easy interaction with the spinner.\n\nThe `langchain` subfolder contains the `hnswlib.ts` file, which provides the `HNSWLib` class for efficient similarity search using the Hierarchical Navigable Small World (HNSW) algorithm. This class is used to store and search for documents based on their embeddings, which are high-dimensional vectors representing the documents' content. Example usage:\n\n```javascript\nconst embeddings = new Embeddings(/* ... */);\nconst args = { space: 'cosine' };\nconst hnswLib = new HNSWLib(embeddings, args);\n\n// Add documents to the index\nawait hnswLib.addDocuments(documents);\n\n// Perform a similarity search\nconst queryVector = /* ... */;\nconst k = 10;\nconst results = await hnswLib.similaritySearchVectorWithScore(queryVector, k);\n```\n\nIn summary, the code in this folder is responsible for the core functionality of the autodoc project, including user configuration management, CLI tool commands, type definitions, spinner management, and efficient similarity search using the HNSW algorithm.", + "questions": "", + "checksum": "de4c7ea3f98620e42875dbf7fb0df9a9" } \ No newline at end of file diff --git a/.autodoc/docs/json/src/types.json b/.autodoc/docs/json/src/types.json index cadfbb6..73151b8 100644 --- a/.autodoc/docs/json/src/types.json +++ b/.autodoc/docs/json/src/types.json @@ -1,7 +1,8 @@ { "fileName": "types.ts", - "filePath": "src/types.ts", - "url": "https://github.com/context-labs/autodoc/src/types.ts", - "summary": "This code defines the types and interfaces for the `autodoc` project, which aims to automatically generate documentation for a given code repository. The project uses OpenAI's language models (LLMs) to process and generate summaries, questions, and other relevant information for files and folders within the repository.\n\nThe code starts by importing `OpenAIChat` from the `langchain/llms` package. It then defines several types and interfaces that are used throughout the project:\n\n- `AutodocUserConfig`: Represents the user configuration for the autodoc project, including the LLM models to be used.\n- `AutodocRepoConfig`: Represents the configuration for a specific repository, including its name, URL, root directory, output directory, LLM models, and other settings.\n- `FileSummary` and `FolderSummary`: Represent the summaries and questions generated for files and folders, respectively.\n- `ProcessFileParams`, `ProcessFolderParams`, and `TraverseFileSystemParams`: Define the parameters for processing files, folders, and traversing the file system, respectively.\n- `ProcessFile` and `ProcessFolder`: Define the function types for processing files and folders, respectively.\n- `LLMModels`: Enumerates the available LLM models, such as GPT-3.5-turbo, GPT-4, and GPT-4-32k.\n- `LLMModelDetails`: Represents the details of an LLM model, including its name, cost per 1K tokens, maximum length, and other statistics.\n\nFor example, when using this code in the larger project, you might define a `ProcessFile` function that takes a `ProcessFileParams` object as input and generates a summary and questions for the file using the specified LLM model. Similarly, you could define a `ProcessFolder` function that processes all files and subfolders within a folder, generating summaries and questions for each.\n\nThe `TraverseFileSystemParams` type allows you to configure how the file system is traversed, including specifying which files and folders to ignore, and what prompts to use for generating summaries and questions.\n\nOverall, this code provides the foundation for the `autodoc` project by defining the types and interfaces needed to process code repositories and generate documentation using OpenAI's language models.", - "questions": "1. **Question:** What is the purpose of the `LLMModels` enum and how is it used in the code?\n **Answer:** The `LLMModels` enum defines the available language models for the autodoc project. It is used in the `AutodocUserConfig` and `AutodocRepoConfig` types to specify which language models should be used for processing files and folders.\n\n2. **Question:** What are the `ProcessFile` and `ProcessFolder` types and how are they used in the code?\n **Answer:** `ProcessFile` and `ProcessFolder` are types for functions that process a file or a folder, respectively. They are used as optional parameters in the `TraverseFileSystemParams` type, allowing developers to provide custom processing functions when traversing the file system.\n\n3. **Question:** What is the purpose of the `TraverseFileSystemParams` type and how is it used in the code?\n **Answer:** The `TraverseFileSystemParams` type defines the parameters required for traversing the file system. It is used to pass configuration options, such as input path, project name, custom processing functions, and other settings, to a function that will traverse the file system and process files and folders accordingly." + "filePath": "src\\types.ts", + "url": "https://github.com/context-labs/autodoc/src\\types.ts", + "summary": "This code defines the types and interfaces for the `autodoc` project, which aims to automatically generate documentation for a given code repository. The project uses OpenAI's language models (LLMs) to process and generate summaries, questions, and other relevant information for files and folders in the repository.\n\nThe `AutodocUserConfig` and `AutodocRepoConfig` types define the configuration options for the user and repository, respectively. These include settings such as the LLM models to use, repository URL, output directory, and content type.\n\n`FileSummary` and `FolderSummary` types represent the generated summaries for files and folders, including their paths, URLs, and checksums. The `ProcessFileParams` and `ProcessFolderParams` types define the parameters required for processing files and folders, such as the file or folder name, path, and content type.\n\n`ProcessFile` and `ProcessFolder` are function types that take the respective parameters and return a promise. These functions are responsible for processing the files and folders, generating summaries, and updating the documentation.\n\n`TraverseFileSystemParams` type defines the parameters for traversing the file system, including the input path, project name, and optional `processFile` and `processFolder` functions. It also includes settings for ignoring certain files or folders and content type preferences.\n\nThe `LLMModels` enum lists the available language models, such as GPT-3.5 Turbo, GPT-4, and GPT-4 32k. The `LLMModelDetails` type provides information about each model, including the cost per 1K tokens, maximum length, and success/failure statistics.\n\nIn the larger project, these types and interfaces would be used to configure and run the `autodoc` tool, allowing users to automatically generate documentation for their code repositories using OpenAI's language models. For example, a user could provide an `AutodocRepoConfig` object to configure the tool, and then use the `TraverseFileSystem` function to process the repository and generate the documentation.", + "questions": "1. **What is the purpose of the `AutodocUserConfig` and `AutodocRepoConfig` types?**\n\n The `AutodocUserConfig` type is used to define the user configuration for the autodoc project, which includes an array of LLMModels. The `AutodocRepoConfig` type is used to define the repository configuration for the autodoc project, which includes various properties such as name, repository URL, root, output, LLMModels, and more.\n\n2. **What are the different LLMModels available in the `LLMModels` enum?**\n\n The `LLMModels` enum lists the available language models for the autodoc project. Currently, there are three models: GPT3 (gpt-3.5-turbo), GPT4 (gpt-4), and GPT432k (gpt-4-32k).\n\n3. **What is the purpose of the `ProcessFile` and `ProcessFolder` types?**\n\n The `ProcessFile` type is a function type that takes a `ProcessFileParams` object as input and returns a Promise. It is used to process a single file in the autodoc project. The `ProcessFolder` type is a function type that takes a `ProcessFolderParams` object as input and returns a Promise. It is used to process a folder in the autodoc project.", + "checksum": "796822d4da09cce719cb86b540d2fb66" } \ No newline at end of file diff --git a/.autodoc/docs/json/tsconfig.json b/.autodoc/docs/json/tsconfig.json index b9a229a..7266ad8 100644 --- a/.autodoc/docs/json/tsconfig.json +++ b/.autodoc/docs/json/tsconfig.json @@ -2,6 +2,7 @@ "fileName": "tsconfig.json", "filePath": "tsconfig.json", "url": "https://github.com/context-labs/autodoc/tsconfig.json", - "summary": "This code is a configuration file for the TypeScript compiler in a project. The purpose of this configuration is to define various options and settings that the TypeScript compiler should use when transpiling TypeScript code into JavaScript. This is important for ensuring that the compiled output is consistent and compatible with the intended runtime environment.\n\nHere's a brief explanation of the key options set in this configuration:\n\n- `\"rootDir\": \"src\"`: Specifies the root directory containing the TypeScript source files. This tells the compiler where to look for the input files.\n- `\"outDir\": \"dist\"`: Specifies the output directory for the compiled JavaScript files. This is where the transpiled code will be saved.\n- `\"strict\": true`: Enables strict type checking, which enforces stronger type safety and helps catch potential issues during development.\n- `\"target\": \"es2020\"`: Sets the target ECMAScript version for the compiled output. In this case, the output will be compatible with ECMAScript 2020 (ES11) features.\n- `\"module\": \"ES2020\"`: Specifies the module system to use in the compiled output. This setting is aligned with the target ECMAScript version.\n- `\"sourceMap\": true`: Generates source map files alongside the compiled output. This helps with debugging by mapping the compiled code back to the original TypeScript source.\n- `\"esModuleInterop\": true` and `\"allowSyntheticDefaultImports\": true`: These options enable better compatibility with different module systems and allow for more flexible import statements.\n- `\"moduleResolution\": \"node\"`: Sets the module resolution strategy to Node.js-style, which is the most common approach for resolving module imports in JavaScript projects.\n- `\"declaration\": true`: Generates TypeScript declaration files (`.d.ts`) alongside the compiled output. These files provide type information for the compiled code, which can be useful for other TypeScript projects that depend on this one.\n- `\"skipLibCheck\": true`: Skips type checking of declaration files, which can speed up the compilation process.\n\nIn the larger project, this configuration file ensures that the TypeScript compiler produces consistent and compatible JavaScript output, making it easier to integrate the compiled code with other parts of the project or with external dependencies.", - "questions": "1. **What is the purpose of the `rootDir` and `outDir` options in the configuration?**\n\n The `rootDir` option specifies the root folder of the source files, while the `outDir` option specifies the output directory for the compiled files.\n\n2. **What does the `strict` option do in the configuration?**\n\n The `strict` option enables a set of strict type-checking options in the TypeScript compiler, ensuring a higher level of type safety in the code.\n\n3. **What is the significance of the `target` and `module` options in the configuration?**\n\n The `target` option sets the ECMAScript target version for the compiled JavaScript output, while the `module` option specifies the module system to be used in the generated code. In this case, both are set to \"es2020\", indicating that the output will be ECMAScript 2020 compliant." + "summary": "The code provided is a configuration file for the TypeScript compiler in a project. It specifies various options that control how the TypeScript compiler should process the source code and generate the output JavaScript files. This configuration file is typically named `tsconfig.json` and is placed at the root of a TypeScript project.\n\nThe `compilerOptions` object contains several key-value pairs that define the behavior of the TypeScript compiler:\n\n- `rootDir`: Specifies the root directory of the source files. In this case, it is set to \"src\", meaning that the source files are located in the \"src\" folder.\n- `outDir`: Specifies the output directory for the compiled JavaScript files. In this case, it is set to \"dist\", meaning that the compiled files will be placed in the \"dist\" folder.\n- `strict`: Enables strict type checking, which helps catch potential issues in the code.\n- `target`: Specifies the ECMAScript target version for the output JavaScript files. In this case, it is set to \"es2020\", meaning that the output files will be compatible with ECMAScript 2020 features.\n- `module`: Specifies the module system to be used. In this case, it is set to \"ES2020\", meaning that the output files will use the ECMAScript 2020 module system.\n- `sourceMap`: Generates source map files, which help in debugging the compiled code by mapping it back to the original TypeScript source files.\n- `esModuleInterop`: Enables compatibility with ECMAScript modules for importing CommonJS modules.\n- `moduleResolution`: Specifies the module resolution strategy. In this case, it is set to \"node\", meaning that the Node.js module resolution algorithm will be used.\n- `allowSyntheticDefaultImports`: Allows default imports from modules with no default export.\n- `declaration`: Generates TypeScript declaration files (`.d.ts`) alongside the compiled JavaScript files, which can be useful for other projects that depend on this one.\n- `skipLibCheck`: Skips type checking of declaration files, which can speed up the compilation process.\n\nOverall, this configuration file helps ensure that the TypeScript compiler processes the source code according to the specified options, resulting in compiled JavaScript files that are compatible with the desired ECMAScript version and module system, while also providing useful features like source maps and strict type checking.", + "questions": "1. **What is the purpose of the `rootDir` and `outDir` options in the configuration?**\n\n The `rootDir` option specifies the root directory of the input files, while the `outDir` option specifies the output directory for the compiled files.\n\n2. **What does the `strict` option do in the configuration?**\n\n The `strict` option enables a wide range of type checking behavior that results in stronger guarantees of program correctness.\n\n3. **What is the significance of the `target` and `module` options in the configuration?**\n\n The `target` option specifies the ECMAScript target version for the output code, and the `module` option specifies the module system used in the output code. In this case, both are set to \"es2020\", which means the output code will be compatible with ECMAScript 2020 features and module system.", + "checksum": "e52c7d90cf341455e41e46229333e66d" } \ No newline at end of file diff --git a/.autodoc/docs/markdown/src/cli/commands/estimate/index.md b/.autodoc/docs/markdown/src/cli/commands/estimate/index.md index fc04cc4..f213482 100644 --- a/.autodoc/docs/markdown/src/cli/commands/estimate/index.md +++ b/.autodoc/docs/markdown/src/cli/commands/estimate/index.md @@ -1,29 +1,25 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/commands/estimate/index.ts) +[View code on GitHub](https://github.com/context-labs/autodoc/src\cli\commands\estimate\index.ts) -The `estimate` function in this code file is responsible for providing an estimated cost of indexing a given repository using the AutodocRepoConfig configuration. This function is particularly useful for users who want to get an idea of the cost involved in processing their repository before actually running the process. +The `estimate` function in this code is responsible for providing an estimated cost of processing a given repository using the Autodoc project. It takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the repository name, URL, root directory, output directory, and other settings related to the processing of the repository. -The function takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the repository name, URL, root directory, output directory, and other settings related to the processing of the repository. +The function starts by constructing the path to the JSON output directory, which will be used to store the intermediate results of the processing. It then updates the spinner text to indicate that the cost estimation is in progress. -The main steps involved in the function are: +Next, the `processRepository` function is called with the provided configuration options and a `true` flag to indicate that this is a dry run. This means that the repository will not actually be processed, but the function will return the details of what would happen if it were processed. This is used to calculate the estimated cost of processing the repository. -1. Set the output path for the JSON files generated during the process. -2. Update the spinner text to display "Estimating cost...". -3. Perform a dry run of the `processRepository` function with the given configuration options. The dry run does not actually process the repository but instead returns the details of the models that would be processed. -4. Stop the spinner once the dry run is complete. -5. Print the details of the models obtained from the dry run using the `printModelDetails` utility function. -6. Calculate the total estimated cost using the `totalIndexCostEstimate` utility function. -7. Display the estimated cost in a user-friendly format using the `chalk` library. +Once the dry run is complete, the spinner is updated to show success, and the results are printed using the `printModelDetails` function. The total estimated cost is then calculated using the `totalIndexCostEstimate` function, which takes the values of the `runDetails` object as input. + +Finally, the estimated cost is displayed in the console using the `chalk.redBright` function to format the text in a red color. The message also includes a disclaimer that the actual cost may vary and recommends setting a limit in the user's OpenAI account to prevent unexpected charges. Here's an example of how the `estimate` function might be used in the larger project: ```javascript -import { estimate } from './autodoc/estimate'; +import { estimate } from './path/to/this/file'; const config = { name: 'my-repo', repositoryUrl: 'https://github.com/user/my-repo.git', root: './', - output: './output/', + output: './output', llms: ['en'], ignore: ['.git', 'node_modules'], filePrompt: true, @@ -37,16 +33,16 @@ const config = { estimate(config); ``` -This example demonstrates how a user can call the `estimate` function with a specific configuration to get an estimated cost for processing their repository. +This example would estimate the cost of processing the "my-repo" repository with the specified configuration options. ## Questions: - 1. **What is the purpose of the `estimate` function and what parameters does it accept?** + 1. **What is the purpose of the `estimate` function?** - The `estimate` function is used to estimate the cost of processing a repository for indexing. It accepts an `AutodocRepoConfig` object as a parameter, which contains various configuration options such as repository URL, output path, and other settings. + The `estimate` function is used to perform a dry run of the `processRepository` command to get an estimated price for indexing the given repository. It then prints the model details and the total estimated cost. -2. **How does the `estimate` function calculate the cost estimate?** +2. **What are the parameters passed to the `processRepository` function?** - The `estimate` function performs a dry run of the `processRepository` command to get the estimated price for indexing the repository. It then uses the `totalIndexCostEstimate` function to calculate the total cost based on the returned run details. + The `processRepository` function is called with an object containing the following properties: `name`, `repositoryUrl`, `root`, `output`, `llms`, `ignore`, `filePrompt`, `folderPrompt`, `chatPrompt`, `contentType`, `targetAudience`, and `linkHosted`. Additionally, a second argument `true` is passed to indicate that it's a dry run. -3. **What is the purpose of the `printModelDetails` function and how is it used in the `estimate` function?** +3. **How is the total estimated cost calculated and displayed?** - The `printModelDetails` function is used to display the details of the models used in the estimation process. In the `estimate` function, it is called with the values of the `runDetails` object to print the model details before displaying the total cost estimate. \ No newline at end of file + The total estimated cost is calculated using the `totalIndexCostEstimate` function, which takes an array of values from the `runDetails` object. The cost is then displayed using `console.log` with `chalk.redBright` for formatting, showing the cost with two decimal places and a note that the actual cost may vary. \ No newline at end of file diff --git a/.autodoc/docs/markdown/src/cli/commands/estimate/summary.md b/.autodoc/docs/markdown/src/cli/commands/estimate/summary.md index 37f1ecb..f64823d 100644 --- a/.autodoc/docs/markdown/src/cli/commands/estimate/summary.md +++ b/.autodoc/docs/markdown/src/cli/commands/estimate/summary.md @@ -1,27 +1,23 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/estimate) +[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc\docs\json\src\cli\commands\estimate) -The `estimate` function in `index.ts` is a crucial part of the Autodoc project, as it allows users to estimate the cost of indexing a given repository before actually processing it. This function takes an `AutodocRepoConfig` object as input, which contains various configuration options for processing the repository. +The `estimate` function in `index.ts` is a crucial part of the Autodoc project, as it provides an estimated cost of processing a given repository. It takes an `AutodocRepoConfig` object as input, containing various configuration options such as repository name, URL, root directory, output directory, and other settings related to the processing of the repository. -The main steps involved in the `estimate` function are: +The function begins by constructing the path to the JSON output directory, which stores intermediate results of the processing. It then updates the spinner text to indicate that cost estimation is in progress. The `processRepository` function is called with the provided configuration options and a `true` flag, signifying a dry run. This dry run returns the details of what would happen if the repository were processed, which is used to calculate the estimated cost. -1. Setting the output path for the JSON files generated during the process. -2. Updating the spinner text to display "Estimating cost...". -3. Performing a dry run of the `processRepository` function with the given configuration options. The dry run does not actually process the repository but instead returns the details of the models that would be processed. -4. Stopping the spinner once the dry run is complete. -5. Printing the details of the models obtained from the dry run using the `printModelDetails` utility function. -6. Calculating the total estimated cost using the `totalIndexCostEstimate` utility function. -7. Displaying the estimated cost in a user-friendly format using the `chalk` library. +Upon completion of the dry run, the spinner is updated to show success, and the results are printed using the `printModelDetails` function. The total estimated cost is calculated using the `totalIndexCostEstimate` function, which takes the values of the `runDetails` object as input. + +Finally, the estimated cost is displayed in the console using the `chalk.redBright` function to format the text in red. The message also includes a disclaimer that the actual cost may vary and recommends setting a limit in the user's OpenAI account to prevent unexpected charges. Here's an example of how the `estimate` function might be used in the larger project: ```javascript -import { estimate } from './autodoc/estimate'; +import { estimate } from './path/to/this/file'; const config = { name: 'my-repo', repositoryUrl: 'https://github.com/user/my-repo.git', root: './', - output: './output/', + output: './output', llms: ['en'], ignore: ['.git', 'node_modules'], filePrompt: true, @@ -35,6 +31,4 @@ const config = { estimate(config); ``` -This example demonstrates how a user can call the `estimate` function with a specific configuration to get an estimated cost for processing their repository. The function is designed to work seamlessly with other parts of the Autodoc project, such as the `processRepository` function, which is responsible for the actual processing of the repository. - -By providing an estimated cost upfront, the `estimate` function helps users make informed decisions about whether to proceed with the indexing process or not. This can be particularly useful for users with large repositories or those who are working within a budget. Overall, the `estimate` function is an essential tool for users looking to leverage the power of Autodoc while managing their costs effectively. +This example would estimate the cost of processing the "my-repo" repository with the specified configuration options. diff --git a/.autodoc/docs/markdown/src/cli/commands/index/convertJsonToMarkdown.md b/.autodoc/docs/markdown/src/cli/commands/index/convertJsonToMarkdown.md index 82f31c5..cfba5ac 100644 --- a/.autodoc/docs/markdown/src/cli/commands/index/convertJsonToMarkdown.md +++ b/.autodoc/docs/markdown/src/cli/commands/index/convertJsonToMarkdown.md @@ -1,61 +1,37 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/commands/index/convertJsonToMarkdown.ts) +[View code on GitHub](https://github.com/context-labs/autodoc/src\cli\commands\index\convertJsonToMarkdown.ts) -The `convertJsonToMarkdown` function in this code is responsible for converting JSON files containing documentation information into Markdown files. This is done in two main steps: counting the number of files in the project and creating Markdown files for each code file in the project. +The `convertJsonToMarkdown` function in this code is responsible for converting JSON files containing documentation information into Markdown files. This function is part of the larger Autodoc project, which aims to automate the process of generating documentation for code repositories. -First, the function uses the `traverseFileSystem` utility to count the number of files in the project. It takes an `AutodocRepoConfig` object as input, which contains information about the project, such as its name, root directory, output directory, and other configuration options. The `traverseFileSystem` utility is called with a `processFile` function that increments the `files` counter for each file encountered. +The function takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the project name, input and output directories, and other settings related to the documentation generation process. -```javascript -await traverseFileSystem({ - inputPath: inputRoot, - projectName, - processFile: () => { - files++; - return Promise.resolve(); - }, - ignore: [], - filePrompt, - folderPrompt, - contentType, - targetAudience, - linkHosted, -}); -``` +The code first counts the number of files in the project by traversing the file system using the `traverseFileSystem` utility function. This is done to provide a progress update to the user via the `updateSpinnerText` function. -Next, the function defines another `processFile` function that reads the content of each JSON file, converts it to a Markdown format, and writes the output to a new Markdown file in the specified output directory. It first checks if the content exists, and if not, it returns early. It then creates the output directory if it doesn't exist, and parses the JSON content into either a `FolderSummary` or a `FileSummary` object, depending on the file name. +Next, the `processFile` function is defined, which is responsible for reading the content of each JSON file, parsing it, and converting it into a Markdown format. The function checks if the file has a summary, and if so, it generates the Markdown content with a link to the code on GitHub, the summary, and any questions if present. The output Markdown file is then saved in the specified output directory. -The function then constructs the Markdown content by including a link to the code on GitHub, the summary, and any questions if they exist. Finally, it writes the Markdown content to the output file with the `.md` extension. +Finally, the `traverseFileSystem` function is called again, this time with the `processFile` function as an argument. This allows the code to process each JSON file in the project and convert it into a Markdown file. Once the process is complete, a success message is displayed to the user using the `spinnerSuccess` function. -```javascript -const outputPath = getFileName(markdownFilePath, '.', '.md'); -await fs.writeFile(outputPath, markdown, 'utf-8'); -``` - -The `convertJsonToMarkdown` function is then called again with the new `processFile` function to create the Markdown files for each code file in the project. +Example usage: ```javascript -await traverseFileSystem({ - inputPath: inputRoot, - projectName, - processFile, - ignore: [], - filePrompt, - folderPrompt, - contentType, - targetAudience, - linkHosted, +convertJsonToMarkdown({ + name: "myProject", + root: "./input", + output: "./output", + filePrompt: true, + folderPrompt: true, + contentType: "code", + targetAudience: "developers", + linkHosted: "https://github.com/user/myProject", }); ``` -In summary, this code is responsible for converting JSON files containing documentation information into Markdown files, which can be used in the larger Autodoc project to generate documentation for code repositories. +This will convert all JSON files in the `./input` directory into Markdown files and save them in the `./output` directory. ## Questions: - 1. **What is the purpose of the `convertJsonToMarkdown` function?** - - The `convertJsonToMarkdown` function is responsible for converting JSON files containing summaries and questions about code files in a project into Markdown files. It traverses the file system, reads the JSON files, and creates corresponding Markdown files with the provided information. - -2. **How does the `traverseFileSystem` function work and what are its parameters?** - - The `traverseFileSystem` function is a utility function that recursively traverses the file system starting from a given input path. It takes an object as a parameter with properties such as `inputPath`, `projectName`, `processFile`, `ignore`, `filePrompt`, `folderPrompt`, `contentType`, `targetAudience`, and `linkHosted`. The function processes each file using the provided `processFile` callback and can be configured to ignore certain files or folders. + 1. **Question:** What is the purpose of the `convertJsonToMarkdown` function and what are the expected inputs? + **Answer:** The `convertJsonToMarkdown` function is used to convert JSON files to Markdown files for each code file in the project. It takes an `AutodocRepoConfig` object as input, which contains various properties like projectName, root, output, filePrompt, folderPrompt, contentType, targetAudience, and linkHosted. -3. **What is the purpose of the `processFile` function inside `convertJsonToMarkdown`?** +2. **Question:** How does the `traverseFileSystem` function work and what is its role in this code? + **Answer:** The `traverseFileSystem` function is a utility function that recursively traverses the file system, starting from the inputPath, and processes each file using the provided `processFile` function. In this code, it is used twice: first to count the number of files in the project, and then to create Markdown files for each code file in the project. - The `processFile` function is a callback function that is passed to the `traverseFileSystem` function. It is responsible for reading the content of a JSON file, parsing it, and creating a corresponding Markdown file with the summary and questions. It also handles creating the output directory if it doesn't exist and writing the Markdown content to the output file. \ No newline at end of file +3. **Question:** How are the output directories and Markdown files created, and what is the structure of the generated Markdown content? + **Answer:** The output directories are created using the `fs.mkdir` function with the `recursive: true` option. The Markdown files are created using the `fs.writeFile` function. The structure of the generated Markdown content includes a link to view the code on GitHub, the summary, and optionally, a list of questions if they exist. \ No newline at end of file diff --git a/.autodoc/docs/markdown/src/cli/commands/index/createVectorStore.md b/.autodoc/docs/markdown/src/cli/commands/index/createVectorStore.md index 31495cc..2f8217c 100644 --- a/.autodoc/docs/markdown/src/cli/commands/index/createVectorStore.md +++ b/.autodoc/docs/markdown/src/cli/commands/index/createVectorStore.md @@ -1,14 +1,14 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/commands/index/createVectorStore.ts) +[View code on GitHub](https://github.com/context-labs/autodoc/src\cli\commands\index\createVectorStore.ts) -The code in this file is responsible for processing a directory of text files, splitting the text into chunks, and creating a vector store using the HNSWLib library and OpenAIEmbeddings. +The code in this file is responsible for processing a directory of text files, splitting the text into chunks, and creating a vector store using the HNSWLib library and OpenAIEmbeddings. This vector store can be used for efficient similarity search and retrieval of documents in the larger project. -The `processFile` function takes a file path as input and returns a Promise that resolves to a Document object. It reads the file contents and creates a Document object with the file contents as `pageContent` and the file path as metadata. +The `processFile` function reads a file's content and creates a `Document` object with the content and metadata (source file path). It returns a Promise that resolves to the created Document. -The `processDirectory` function takes a directory path as input and returns a Promise that resolves to an array of Document objects. It reads the files in the directory and calls `processFile` for each file. If a file is a directory, it calls `processDirectory` recursively. The function accumulates all the Document objects in an array and returns it. +The `processDirectory` function is a recursive function that processes a directory and its subdirectories. It reads the files in the directory, and for each file, it checks if it's a directory or a regular file. If it's a directory, the function calls itself with the new directory path. If it's a file, it calls the `processFile` function to create a Document object. The function returns an array of Document objects. -The `RepoLoader` class extends the `BaseDocumentLoader` class and has a constructor that takes a file path as input. It has a `load` method that calls the `processDirectory` function with the file path and returns the resulting array of Document objects. +The `RepoLoader` class extends the `BaseDocumentLoader` class and has a constructor that takes a file path as an argument. It has a `load` method that calls the `processDirectory` function with the given file path and returns the array of Document objects. -The `createVectorStore` function is an async function that takes an AutodocRepoConfig object as input, which contains the root directory and output file path. It creates a RepoLoader instance with the root directory, loads the raw documents, and splits them into chunks using the `RecursiveCharacterTextSplitter` class. It then creates a vector store using the HNSWLib library and OpenAIEmbeddings, and saves the vector store to the output file path. +The `createVectorStore` function is an async function that takes an `AutodocRepoConfig` object as an argument, which contains the root directory and output file path. It creates a `RepoLoader` instance with the root directory and loads the documents using the `load` method. It then creates a `RecursiveCharacterTextSplitter` instance with a specified chunk size and chunk overlap and splits the documents into chunks. Finally, it creates a vector store using the HNSWLib library and OpenAIEmbeddings with the processed documents and saves the vector store to the output file path. Example usage: @@ -22,14 +22,12 @@ createVectorStore(config).then(() => { console.log('Vector store created successfully'); }); ``` - -This code snippet would process all the text files in the `./data/documents` directory, split the text into chunks, create a vector store using the HNSWLib library and OpenAIEmbeddings, and save the vector store to the `./data/vector_store` file. ## Questions: - 1. **Question:** What is the purpose of the `processFile` function and how does it handle errors? - **Answer:** The `processFile` function reads the content of a file and creates a `Document` object with the file contents and metadata. If there is an error while reading the file, it rejects the promise with the error. + 1. **Question:** What is the purpose of the `processFile` function and what does it return? + **Answer:** The `processFile` function is an asynchronous function that reads the content of a file given its file path, creates a `Document` object with the file contents and metadata (source file path), and returns a Promise that resolves to the created `Document` object. -2. **Question:** How does the `processDirectory` function handle nested directories and files? - **Answer:** The `processDirectory` function iterates through the files in a directory. If it encounters a subdirectory, it calls itself recursively to process the subdirectory. If it encounters a file, it processes the file using the `processFile` function and adds the resulting `Document` object to the `docs` array. +2. **Question:** How does the `processDirectory` function work and what does it return? + **Answer:** The `processDirectory` function is an asynchronous function that takes a directory path as input, reads all the files and subdirectories within it, and processes them recursively. It returns a Promise that resolves to an array of `Document` objects created from the files in the directory and its subdirectories. -3. **Question:** What is the purpose of the `createVectorStore` function and how does it use the `RepoLoader` class? - **Answer:** The `createVectorStore` function is responsible for creating a vector store from a given repository. It uses the `RepoLoader` class to load all the documents from the repository, splits the text into chunks using the `RecursiveCharacterTextSplitter`, and then creates a vector store using the `HNSWLib.fromDocuments` method with the `OpenAIEmbeddings`. Finally, it saves the vector store to the specified output path. \ No newline at end of file +3. **Question:** What is the purpose of the `createVectorStore` function and how does it work? + **Answer:** The `createVectorStore` function is an asynchronous function that takes an `AutodocRepoConfig` object as input, which contains the root directory path and output file path. The function loads all the documents from the root directory using the `RepoLoader`, splits the text into chunks using the `RecursiveCharacterTextSplitter`, creates a vector store from the documents using the `HNSWLib` and `OpenAIEmbeddings`, and saves the vector store to the specified output file. \ No newline at end of file diff --git a/.autodoc/docs/markdown/src/cli/commands/index/index.md b/.autodoc/docs/markdown/src/cli/commands/index/index.md index 1950469..67191ab 100644 --- a/.autodoc/docs/markdown/src/cli/commands/index/index.md +++ b/.autodoc/docs/markdown/src/cli/commands/index/index.md @@ -1,50 +1,45 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/commands/index/index.ts) +[View code on GitHub](https://github.com/context-labs/autodoc/src\cli\commands\index\index.ts) -The code in this file is responsible for processing a given repository and generating documentation in JSON and Markdown formats, as well as creating vector files for the documentation. It exports a single function `index` that takes an `AutodocRepoConfig` object as input, which contains various configuration options for processing the repository. +The code in this file is responsible for processing a given repository and generating documentation in JSON, Markdown, and vector formats. It exports a single function `index` that takes an `AutodocRepoConfig` object as its argument, which contains various configuration options for processing the repository. -The `index` function performs the following steps: +The `index` function performs three main tasks: -1. Define the paths for JSON, Markdown, and data output directories within the `output` folder. +1. **Process the repository**: It traverses the repository, calls the LLMS (Language Learning Management System) for each file, and creates JSON files with the results. This is done using the `processRepository` function, which takes the same configuration options as the `index` function. The JSON files are stored in the `output/docs/json/` directory. -2. Process the repository by traversing its files, calling the LLMS (Language Learning Management System) for each file, and creating JSON files with the results. This is done using the `processRepository` function, which takes the same configuration options as the `index` function. The spinner text is updated to show the progress of this step. + ```javascript + updateSpinnerText('Processing repository...'); + await processRepository({ /* configuration options */ }); + spinnerSuccess(); + ``` -3. Convert the generated JSON files into Markdown format using the `convertJsonToMarkdown` function. This function also takes the same configuration options as the `index` function. The spinner text is updated to show the progress of this step, and a success message is displayed upon completion. +2. **Create Markdown files**: It converts the generated JSON files into Markdown files using the `convertJsonToMarkdown` function. This function also takes the same configuration options as the `index` function. The Markdown files are stored in the `output/docs/markdown/` directory. -4. Create vector files for the generated Markdown documentation using the `createVectorStore` function. This function also takes the same configuration options as the `index` function. The spinner text is updated to show the progress of this step, and a success message is displayed upon completion. + ```javascript + updateSpinnerText('Creating markdown files...'); + await convertJsonToMarkdown({ /* configuration options */ }); + spinnerSuccess(); + ``` -Here's an example of how this code might be used in the larger project: +3. **Create vector files**: It creates vector files from the generated Markdown files using the `createVectorStore` function. This function also takes the same configuration options as the `index` function. The vector files are stored in the `output/docs/data/` directory. -```javascript -import autodoc from './autodoc'; + ```javascript + updateSpinnerText('Create vector files...'); + await createVectorStore({ /* configuration options */ }); + spinnerSuccess(); + ``` -const config = { - name: 'MyProject', - repositoryUrl: 'https://github.com/user/myproject', - root: './src', - output: './output', - llms: 'https://llms.example.com', - ignore: ['.git', 'node_modules'], - filePrompt: true, - folderPrompt: true, - chatPrompt: true, - contentType: 'text', - targetAudience: 'developers', - linkHosted: 'https://myproject-docs.example.com', -}; +Throughout the execution of these tasks, the code uses `updateSpinnerText` and `spinnerSuccess` functions to provide visual feedback on the progress of the tasks. -autodoc.index(config); -``` - -This example would process the `MyProject` repository, generate JSON and Markdown documentation, and create vector files for the documentation, all while providing progress updates through spinner text. +In the larger project, this code would be used to automatically generate documentation for a given repository based on the provided configuration options. The generated documentation can then be used for various purposes, such as displaying it on a website or analyzing the content for specific insights. ## Questions: - 1. **What is the purpose of the `index` function in this code?** + 1. **What does the `index` function do in this code?** - The `index` function is the main entry point for the autodoc project. It processes a given repository, converts the JSON files to markdown, and creates vector files based on the provided configuration options. + The `index` function is the main entry point for the autodoc project. It takes an `AutodocRepoConfig` object as input and performs three main tasks: processing the repository and creating JSON files, converting JSON files to markdown files, and creating vector files. -2. **What are the different steps involved in processing the repository?** +2. **What is the purpose of the `processRepository`, `convertJsonToMarkdown`, and `createVectorStore` functions?** - The processing of the repository involves three main steps: (1) traversing the repository and calling LLMS for each file to create JSON files with the results, (2) converting the JSON files to markdown files, and (3) creating vector files from the markdown files. + The `processRepository` function traverses the repository, calls LLMS for each file, and creates JSON files with the results. The `convertJsonToMarkdown` function creates markdown files from the generated JSON files. The `createVectorStore` function creates vector files from the markdown files. -3. **What is the role of the `AutodocRepoConfig` type?** +3. **What are the different types of prompts (`filePrompt`, `folderPrompt`, `chatPrompt`) used for in this code?** - The `AutodocRepoConfig` type is used to define the shape of the configuration object that is passed to the `index` function. It specifies the properties and their types that are required for the function to process the repository, convert JSON to markdown, and create vector files. \ No newline at end of file + These prompts are likely used to interact with the user during the processing of the repository. The `filePrompt` might be used to ask the user for input regarding specific files, the `folderPrompt` for input regarding folders, and the `chatPrompt` for general input or feedback during the processing. \ No newline at end of file diff --git a/.autodoc/docs/markdown/src/cli/commands/index/processRepository.md b/.autodoc/docs/markdown/src/cli/commands/index/processRepository.md index 20ef43d..bb943ec 100644 --- a/.autodoc/docs/markdown/src/cli/commands/index/processRepository.md +++ b/.autodoc/docs/markdown/src/cli/commands/index/processRepository.md @@ -1,44 +1,20 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/commands/index/processRepository.ts) +[View code on GitHub](https://github.com/context-labs/autodoc/src\cli\commands\index\processRepository.ts) -The `processRepository` function in this code is responsible for processing a given code repository and generating summaries and questions for each file and folder within the repository. It takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the repository URL, input and output paths, language models to use, and other settings. +The `processRepository` function in this code is responsible for generating summaries and questions for code files and folders in a given repository. It takes an `AutodocRepoConfig` object as input, which contains information about the project, repository URL, input and output paths, language models, and other configurations. An optional `dryRun` parameter can be provided to skip actual API calls and file writing. -The function starts by initializing an `APIRateLimit` instance to limit the number of API calls made to the language models. It then defines several helper functions, such as `callLLM` for making API calls, `isModel` for checking if a given model is valid, `processFile` for processing individual files, and `processFolder` for processing folders. +The function starts by initializing the encoding and rate limit for API calls. It then defines two main helper functions: `processFile` and `processFolder`. The `processFile` function is responsible for processing individual code files. It reads the file content, calculates a checksum, and checks if reindexing is needed. If reindexing is required, it creates prompts for summaries and questions, selects the appropriate language model based on the input length, and calls the language model API to generate the summaries and questions. The results are then saved to a JSON file in the output directory. -The `processFile` function reads the content of a file, generates prompts for summaries and questions using the `createCodeFileSummary` and `createCodeQuestions` functions, and selects the best language model to use based on the token length of the prompts. It then calls the language model API to generate the summaries and questions, and saves the results as JSON files in the output directory. +The `processFolder` function is responsible for processing folders. It reads the folder content, calculates a checksum, and checks if reindexing is needed. If reindexing is required, it reads the summaries and questions of all files and subfolders in the folder, calls the language model API to generate a summary for the folder, and saves the result to a `summary.json` file in the folder. -The `processFolder` function reads the contents of a folder, filters out ignored files, and processes each file and subfolder within the folder. It then generates a summary prompt using the `folderSummaryPrompt` function and calls the language model API to generate a summary for the folder. The folder summary, along with the summaries and questions of its files and subfolders, is saved as a JSON file in the output directory. +The main function then counts the number of files and folders in the project and processes them using the `traverseFileSystem` utility function. It processes all files first, followed by all folders. Finally, it returns the language model usage statistics. -The main part of the `processRepository` function first counts the number of files and folders in the input directory using the `filesAndFolders` function. It then processes each file and folder using the `traverseFileSystem` function, which calls the `processFile` and `processFolder` functions for each file and folder encountered. Finally, the function returns the language models used during processing. - -Example usage of the `processRepository` function: - -```javascript -const autodocConfig = { - name: 'myProject', - repositoryUrl: 'https://github.com/user/myProject', - root: 'src', - output: 'output', - llms: [LLMModels.GPT3, LLMModels.GPT4], - ignore: ['.git', 'node_modules'], - filePrompt: 'Explain this code file', - folderPrompt: 'Summarize this folder', - contentType: 'code', - targetAudience: 'developers', - linkHosted: true, -}; - -processRepository(autodocConfig).then((models) => { - console.log('Processing complete'); -}); -``` - -This code would process the `src` directory of the `myProject` repository, generating summaries and questions for each file and folder, and saving the results in the `output` directory. +The `calculateChecksum` function calculates the checksum of a list of file contents, while the `reindexCheck` function checks if reindexing is needed by comparing the new and old checksums of a file or folder. ## Questions: - 1. **Question:** What is the purpose of the `processRepository` function and what are its input parameters? - **Answer:** The `processRepository` function is responsible for processing a code repository by generating summaries and questions for each file and folder in the project. It takes an `AutodocRepoConfig` object as input, which contains various configuration options such as the project name, repository URL, input and output paths, language models, and other settings. Additionally, it accepts an optional `dryRun` parameter, which, if set to true, will not save the generated summaries and questions to disk. + 1. **Question:** What is the purpose of the `processRepository` function and what are its inputs and outputs? + **Answer:** The `processRepository` function processes a given code repository, generating summaries and questions for each file and folder within the repository. It takes an `AutodocRepoConfig` object and an optional `dryRun` boolean as inputs. The function returns a `Promise` that resolves to an object containing the models used during processing. -2. **Question:** How does the code determine the best language model to use for generating summaries and questions? - **Answer:** The code checks the maximum token length of each available language model (GPT3, GPT4, and GPT432k) and compares it with the token length of the prompts (summary and questions). It selects the first model that can handle the maximum token length and is included in the `llms` array provided in the configuration. +2. **Question:** How does the `calculateChecksum` function work and what is its purpose? + **Answer:** The `calculateChecksum` function takes an array of file contents as input and calculates a checksum for each file using the MD5 hashing algorithm. It then concatenates all the checksums and calculates a final checksum using MD5 again. The purpose of this function is to generate a unique identifier for the contents of the files, which can be used to determine if the files have changed and need to be reprocessed. -3. **Question:** How does the code handle traversing the file system and processing files and folders? - **Answer:** The code uses the `traverseFileSystem` utility function to traverse the file system. It takes an object with various configuration options, including the input path, project name, and callbacks for processing files and folders. The `processFile` and `processFolder` functions are passed as callbacks to handle the processing of files and folders, respectively. \ No newline at end of file +3. **Question:** How does the `reindexCheck` function work and when is it used? + **Answer:** The `reindexCheck` function checks if a summary.json file exists in the given file or folder path and compares the stored checksum with the new checksum to determine if the file or folder needs to be reindexed. It is used in the `processFile` and `processFolder` functions to decide whether to regenerate summaries and questions for a file or folder based on changes in their contents. \ No newline at end of file diff --git a/.autodoc/docs/markdown/src/cli/commands/index/prompts.md b/.autodoc/docs/markdown/src/cli/commands/index/prompts.md index c365548..4ff2751 100644 --- a/.autodoc/docs/markdown/src/cli/commands/index/prompts.md +++ b/.autodoc/docs/markdown/src/cli/commands/index/prompts.md @@ -1,32 +1,38 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/commands/index/prompts.ts) +[View code on GitHub](https://github.com/context-labs/autodoc/src\cli\commands\index\prompts.ts) -The code in this file provides three functions that generate prompts for documentation experts to create summaries and answer questions about code files and folders in a project. These functions are likely used in the larger autodoc project to automate the process of generating documentation for code files and folders. +This code defines three utility functions that generate prompts for documentation experts working on a project. These functions are used to create documentation for code files and folders within a project. The generated prompts are in markdown format and include specific instructions for the documentation expert. -1. `createCodeFileSummary`: This function takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `filePrompt`. It returns a formatted string prompt for a documentation expert to write a summary of the code file. The prompt includes the file path, project name, content type, and a custom file prompt. For example: +1. `createCodeFileSummary`: This function generates a prompt for creating a summary of a code file. It takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `filePrompt`. The function returns a markdown formatted string that includes the file's content and a custom prompt for the documentation expert. +Example usage: ```javascript -createCodeFileSummary('src/example.js', 'autodoc', 'console.log("Hello, World!");', 'JavaScript', 'Write a detailed technical explanation of what this code does.'); +const prompt = createCodeFileSummary('path/to/file.js', 'MyProject', 'const x = 10;', 'JavaScript', 'Write a detailed technical explanation of this code.'); ``` -2. `createCodeQuestions`: This function takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `targetAudience`. It returns a formatted string prompt for a documentation expert to generate three questions and answers that a target audience might have about the code file. The prompt includes the file path, project name, content type, and target audience. For example: +2. `createCodeQuestions`: This function generates a prompt for creating a list of questions and answers about a code file. It takes five parameters: `filePath`, `projectName`, `fileContents`, `contentType`, and `targetAudience`. The function returns a markdown formatted string that includes the file's content and a custom prompt for the documentation expert to provide questions and answers. +Example usage: ```javascript -createCodeQuestions('src/example.js', 'autodoc', 'console.log("Hello, World!");', 'JavaScript', 'beginner'); +const prompt = createCodeQuestions('path/to/file.js', 'MyProject', 'const x = 10;', 'JavaScript', 'beginner'); ``` -3. `folderSummaryPrompt`: This function takes six parameters: `folderPath`, `projectName`, `files`, `folders`, `contentType`, and `folderPrompt`. It returns a formatted string prompt for a documentation expert to write a summary of the folder and its contents. The prompt includes the folder path, project name, content type, a list of files and their summaries, a list of subfolders and their summaries, and a custom folder prompt. For example: +3. `folderSummaryPrompt`: This function generates a prompt for creating a summary of a folder containing code files and subfolders. It takes six parameters: `folderPath`, `projectName`, `files`, `folders`, `contentType`, and `folderPrompt`. The `files` parameter is an array of `FileSummary` objects, and the `folders` parameter is an array of `FolderSummary` objects. The function returns a markdown formatted string that includes a list of files and folders with their summaries and a custom prompt for the documentation expert. +Example usage: ```javascript -folderSummaryPrompt('src/', 'autodoc', [{fileName: 'example.js', summary: 'A simple example file'}], [{folderName: 'utils', summary: 'Utility functions'}], 'JavaScript', 'Write a detailed technical explanation of the folder structure and contents.'); +const prompt = folderSummaryPrompt('path/to/folder', 'MyProject', fileSummaries, folderSummaries, 'JavaScript', 'Write a detailed technical explanation of this folder structure.'); ``` -These functions can be used in the autodoc project to generate prompts for documentation experts, helping to streamline the process of creating documentation for code files and folders. +These functions can be used in the larger project to generate documentation tasks for experts, ensuring consistent formatting and instructions across different parts of the project. ## Questions: - 1. **Question:** What is the purpose of the `createCodeFileSummary` function? - **Answer:** The `createCodeFileSummary` function generates a string template for a code file summary prompt, which includes the file path, project name, file contents, content type, and a file prompt. + 1. **What is the purpose of the `createCodeFileSummary` function?** -2. **Question:** How does the `createCodeQuestions` function differ from the `createCodeFileSummary` function? - **Answer:** The `createCodeQuestions` function generates a string template for a code documentation prompt that asks for 3 questions and their answers, while the `createCodeFileSummary` function generates a string template for a code file summary prompt. + The `createCodeFileSummary` function generates a string template for a code file summary prompt, which includes the file path, project name, file contents, content type, and a file prompt. -3. **Question:** What is the purpose of the `folderSummaryPrompt` function and what parameters does it take? - **Answer:** The `folderSummaryPrompt` function generates a string template for a folder summary prompt, which includes the folder path, project name, files, folders, content type, and a folder prompt. It takes parameters such as folderPath, projectName, files, folders, contentType, and folderPrompt. \ No newline at end of file +2. **How does the `createCodeQuestions` function differ from the `createCodeFileSummary` function?** + + The `createCodeQuestions` function generates a string template for a code documentation prompt that asks for 3 questions and their answers, while the `createCodeFileSummary` function generates a string template for a code file summary prompt. + +3. **What is the role of the `folderSummaryPrompt` function?** + + The `folderSummaryPrompt` function generates a string template for a folder summary prompt, which includes the folder path, project name, lists of files and folders with their summaries, content type, and a folder prompt. \ No newline at end of file diff --git a/.autodoc/docs/markdown/src/cli/commands/index/summary.md b/.autodoc/docs/markdown/src/cli/commands/index/summary.md index 1efb44a..27f3ef4 100644 --- a/.autodoc/docs/markdown/src/cli/commands/index/summary.md +++ b/.autodoc/docs/markdown/src/cli/commands/index/summary.md @@ -1,36 +1,34 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/index) +[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc\docs\json\src\cli\commands\index) -The code in this folder is responsible for processing a given code repository, generating documentation in JSON and Markdown formats, and creating vector files for the documentation. It provides several functions and utilities to achieve these tasks, such as traversing the file system, calling language models, and converting JSON files to Markdown. +The code in this folder is responsible for processing a given repository and generating documentation in JSON, Markdown, and vector formats. It consists of several functions and utilities that work together to automate the documentation generation process. -For example, the `processRepository` function processes a code repository and generates summaries and questions for each file and folder within the repository. It uses helper functions like `callLLM` to make API calls to language models and `processFile` and `processFolder` to process individual files and folders. The results are saved as JSON files in the output directory. +The main function, `index`, takes an `AutodocRepoConfig` object as input, which contains various configuration options for processing the repository. It performs three main tasks: -The `convertJsonToMarkdown` function converts JSON files containing documentation information into Markdown files. It counts the number of files in the project and creates Markdown files for each code file in the project using the `traverseFileSystem` utility. +1. **Process the repository**: It calls the `processRepository` function to traverse the repository, generate summaries and questions for code files and folders using the LLMS (Language Learning Management System), and create JSON files with the results. These JSON files are stored in the `output/docs/json/` directory. -The `createVectorStore` function processes a directory of text files, splits the text into chunks, and creates a vector store using the HNSWLib library and OpenAIEmbeddings. It processes the files in the directory and calls `processFile` for each file, creating a vector store and saving it to the output file path. +2. **Create Markdown files**: It uses the `convertJsonToMarkdown` function to convert the generated JSON files into Markdown files. These Markdown files are stored in the `output/docs/markdown/` directory. -Here's an example of how this code might be used in the larger project: +3. **Create vector files**: It calls the `createVectorStore` function to create vector files from the generated Markdown files. These vector files are stored in the `output/docs/data/` directory. + +Throughout the execution of these tasks, the code provides visual feedback on the progress of the tasks using `updateSpinnerText` and `spinnerSuccess` functions. + +Here's an example of how this code might be used: ```javascript -import autodoc from './autodoc'; - -const config = { - name: 'MyProject', - repositoryUrl: 'https://github.com/user/myproject', - root: './src', - output: './output', - llms: 'https://llms.example.com', - ignore: ['.git', 'node_modules'], +index({ + name: "myProject", + root: "./input", + output: "./output", filePrompt: true, folderPrompt: true, - chatPrompt: true, - contentType: 'text', - targetAudience: 'developers', - linkHosted: 'https://myproject-docs.example.com', -}; - -autodoc.index(config); + contentType: "code", + targetAudience: "developers", + linkHosted: "https://github.com/user/myProject", +}); ``` -This example would process the `MyProject` repository, generate JSON and Markdown documentation, and create vector files for the documentation, all while providing progress updates through spinner text. +This will process the repository located at `./input`, generate documentation in JSON, Markdown, and vector formats, and save the results in the `./output` directory. + +The `prompts.ts` file contains utility functions that generate prompts for documentation experts. These functions create markdown formatted strings with specific instructions for the documentation expert, ensuring consistent formatting and instructions across different parts of the project. -In summary, the code in this folder plays a crucial role in the Autodoc project by processing code repositories, generating documentation in various formats, and creating vector files for the documentation. This helps developers to easily generate and maintain documentation for their projects, making it more accessible and understandable for other developers and users. +In summary, the code in this folder automates the process of generating documentation for a given repository based on the provided configuration options. The generated documentation can be used for various purposes, such as displaying it on a website or analyzing the content for specific insights. diff --git a/.autodoc/docs/markdown/src/cli/commands/init/index.md b/.autodoc/docs/markdown/src/cli/commands/init/index.md index d7abbef..f3384c0 100644 --- a/.autodoc/docs/markdown/src/cli/commands/init/index.md +++ b/.autodoc/docs/markdown/src/cli/commands/init/index.md @@ -1,32 +1,40 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/commands/init/index.ts) +[View code on GitHub](https://github.com/context-labs/autodoc/src\cli\commands\init\index.ts) -This code is responsible for initializing and configuring the `autodoc` project. It provides a function `init` that creates a configuration file `autodoc.config.json` with user inputs and default values. The configuration file is essential for the project to function correctly and adapt to different user requirements. +This code is responsible for initializing the configuration of the Autodoc project. It provides a template for the configuration and prompts the user to input necessary information to set up the project. The main functionality is exposed through the `init` function, which is an asynchronous function that takes an optional `AutodocRepoConfig` object as an argument. -The `makeConfigTemplate` function generates a default configuration object with pre-defined values. It takes an optional `config` parameter to override the default values. The returned object contains settings such as repository name, URL, output directory, LLM models, and various prompts for generating documentation. +The `makeConfigTemplate` function creates a default configuration object with pre-defined values for various properties. It takes an optional `config` parameter and returns a new `AutodocRepoConfig` object with the provided values or default values if not provided. -The `init` function is an asynchronous function that takes an optional `config` parameter. It first checks if a configuration file already exists in the project directory. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits. +The `init` function first checks if an `autodoc.config.json` file already exists in the project root. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits. -If there is no existing configuration file or the user chooses to overwrite, the function prompts the user for the repository name, URL, and LLM models they have access to. These values are then used to create a new configuration object using the `makeConfigTemplate` function. +Next, the user is prompted to enter the name of their repository, the GitHub URL of their repository, and the LLMs they have access to. The LLMs are language models used for generating documentation. The user can choose between GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). -Finally, the new configuration object is written to the `autodoc.config.json` file in the project directory. A success message is displayed, instructing the user to run `doc index` to get started. +After the user provides the necessary information, a new configuration object is created using the `makeConfigTemplate` function with the user's input. The new configuration is then written to the `autodoc.config.json` file in the project root. -Here's an example of how the `init` function is used: +Finally, a success message is displayed, instructing the user to run `doc index` to get started with the Autodoc project. + +Example usage: ```javascript -import { init } from './autodoc'; +import { init } from './path/to/this/file'; -(async () => { - await init(); -})(); -``` +// Initialize the configuration with default values +await init(); -This code imports the `init` function and calls it, initializing the `autodoc` project with the user's inputs and default values. +// Initialize the configuration with custom values +await init({ + name: 'My Custom Repository', + repositoryUrl: 'https://github.com/user/repo', +}); +``` ## Questions: - 1. **Question:** What is the purpose of the `makeConfigTemplate` function and what does it return? - **Answer:** The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc project. It takes an optional `config` parameter of type `AutodocRepoConfig` and returns a new `AutodocRepoConfig` object with default values for each property, using the provided `config` values if available. + 1. **What is the purpose of the `makeConfigTemplate` function?** + + The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc project. It takes an optional `config` parameter of type `AutodocRepoConfig` and returns a new configuration object with default values for various properties. + +2. **How does the `init` function work and when is it called?** + + The `init` function is an asynchronous function that initializes the Autodoc configuration by creating an `autodoc.config.json` file in the specified location. It takes an optional `config` parameter of type `AutodocRepoConfig` and prompts the user for input to set the configuration values. It is called when the user wants to set up the Autodoc configuration for their project. -2. **Question:** How does the `init` function work and what does it do with the user's input? - **Answer:** The `init` function is an asynchronous function that initializes the Autodoc configuration by prompting the user for input using the `inquirer` package. It takes an optional `config` parameter of type `AutodocRepoConfig` and uses it as the default values for the prompts. After collecting the user's input, it creates a new configuration object using the `makeConfigTemplate` function and writes it to a file named `autodoc.config.json`. +3. **What is the purpose of the `inquirer.prompt` calls in the `init` function?** -3. **Question:** What are the different LLM models available in the `llms` prompt and how are they used in the configuration? - **Answer:** The `llms` prompt provides three choices for the user to select the LLM models they have access to: GPT-3.5 Turbo, GPT-3.5 Turbo and GPT-4 8K (Early Access), and GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The selected LLM models are stored in the `llms` property of the `AutodocRepoConfig` object, which can be used later in the project to determine which models to use for generating documentation. \ No newline at end of file + The `inquirer.prompt` calls are used to interactively prompt the user for input to set the configuration values for the Autodoc project. The user is asked for the repository name, repository URL, and the LLMs they have access to. The input is then used to create a new configuration object and write it to the `autodoc.config.json` file. \ No newline at end of file diff --git a/.autodoc/docs/markdown/src/cli/commands/init/summary.md b/.autodoc/docs/markdown/src/cli/commands/init/summary.md index 85bc3fd..9c96c0c 100644 --- a/.autodoc/docs/markdown/src/cli/commands/init/summary.md +++ b/.autodoc/docs/markdown/src/cli/commands/init/summary.md @@ -1,23 +1,30 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/init) +[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc\docs\json\src\cli\commands\init) -The `index.ts` file in the `init` folder is responsible for initializing and configuring the `autodoc` project. It provides an essential function called `init` that creates a configuration file named `autodoc.config.json` with user inputs and default values. This configuration file is crucial for the project to function correctly and adapt to different user requirements. +The `index.ts` file in the `.autodoc\docs\json\src\cli\commands\init` folder is responsible for initializing the configuration of the Autodoc project. It provides a template for the configuration and prompts the user to input necessary information to set up the project. The main functionality is exposed through the `init` function, which is an asynchronous function that takes an optional `AutodocRepoConfig` object as an argument. -The `makeConfigTemplate` function generates a default configuration object with pre-defined values. It takes an optional `config` parameter to override the default values. The returned object contains settings such as repository name, URL, output directory, LLM models, and various prompts for generating documentation. +The `makeConfigTemplate` function creates a default configuration object with pre-defined values for various properties. It takes an optional `config` parameter and returns a new `AutodocRepoConfig` object with the provided values or default values if not provided. -The `init` function is an asynchronous function that takes an optional `config` parameter. It first checks if a configuration file already exists in the project directory. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits. +The `init` function first checks if an `autodoc.config.json` file already exists in the project root. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits. -If there is no existing configuration file or the user chooses to overwrite, the function prompts the user for the repository name, URL, and LLM models they have access to. These values are then used to create a new configuration object using the `makeConfigTemplate` function. +Next, the user is prompted to enter the name of their repository, the GitHub URL of their repository, and the LLMs they have access to. The LLMs are language models used for generating documentation. The user can choose between GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). -Finally, the new configuration object is written to the `autodoc.config.json` file in the project directory. A success message is displayed, instructing the user to run `doc index` to get started. +After the user provides the necessary information, a new configuration object is created using the `makeConfigTemplate` function with the user's input. The new configuration is then written to the `autodoc.config.json` file in the project root. -Here's an example of how the `init` function is used: +Finally, a success message is displayed, instructing the user to run `doc index` to get started with the Autodoc project. + +Example usage: ```javascript -import { init } from './autodoc'; +import { init } from './path/to/this/file'; + +// Initialize the configuration with default values +await init(); -(async () => { - await init(); -})(); +// Initialize the configuration with custom values +await init({ + name: 'My Custom Repository', + repositoryUrl: 'https://github.com/user/repo', +}); ``` -This code imports the `init` function and calls it, initializing the `autodoc` project with the user's inputs and default values. The `init` function is a crucial part of the project, as it sets up the necessary configuration for the project to work correctly. It interacts with other parts of the project by providing the required settings and values, ensuring that the project can adapt to different user requirements and preferences. +This code is essential for setting up the Autodoc project, as it creates the necessary configuration file and gathers user input to customize the project. It works in conjunction with other parts of the project, such as the CLI and the documentation generation process, which rely on the configuration file to function correctly. diff --git a/.autodoc/docs/markdown/src/cli/commands/query/createChatChain.md b/.autodoc/docs/markdown/src/cli/commands/query/createChatChain.md index b41b9b6..adea20d 100644 --- a/.autodoc/docs/markdown/src/cli/commands/query/createChatChain.md +++ b/.autodoc/docs/markdown/src/cli/commands/query/createChatChain.md @@ -1,44 +1,32 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/commands/query/createChatChain.ts) +[View code on GitHub](https://github.com/context-labs/autodoc/src\cli\commands\query\createChatChain.ts) -This code defines a function `makeChain` that creates a chatbot for answering questions about a software project. The chatbot is built using the `ChatVectorDBQAChain` class, which combines two separate language models: a question generator and a document chain. +This code defines a function `makeChain` that creates a chatbot for answering questions about a software project called `projectName`. The chatbot is trained on the content of the project, which is located at `repositoryUrl`. The content type of the project is specified by the `contentType` parameter. The chatbot is designed to provide conversational answers with hyperlinks back to GitHub, including code examples and links to the examples where appropriate. The target audience for the chatbot is specified by the `targetAudience` parameter. -The question generator is an instance of the `LLMChain` class, which uses the OpenAIChat API to generate standalone questions based on a given conversation history. The `CONDENSE_PROMPT` template is used to format the input for the language model. +The `makeChain` function takes several parameters: -The document chain is created using the `loadQAChain` function, which takes an instance of the OpenAIChat API and a prompt template as input. The `makeQAPrompt` function generates this template, which instructs the language model to provide a conversational answer with hyperlinks to the project's GitHub repository. The answer should be tailored to the target audience and include code examples when appropriate. +- `projectName`: The name of the software project. +- `repositoryUrl`: The URL of the project's repository. +- `contentType`: The type of content the chatbot is trained on. +- `chatPrompt`: Additional instructions for answering questions about the content type. +- `targetAudience`: The intended audience for the chatbot's answers. +- `vectorstore`: An instance of HNSWLib for efficient nearest neighbor search. +- `llms`: An array of LLMModels, which are language models used for generating answers. +- `onTokenStream`: An optional callback function that is called when a new token is generated by the language model. -The `makeChain` function takes the following parameters: +The `makeChain` function first creates a question generator using the `LLMChain` class. This generator is responsible for rephrasing follow-up questions to be standalone questions. It uses the `CONDENSE_PROMPT` template, which is defined at the beginning of the code. -- `projectName`: The name of the software project. -- `repositoryUrl`: The URL of the project's GitHub repository. -- `contentType`: The type of content the chatbot is trained on (e.g., code, documentation). -- `chatPrompt`: Additional instructions for answering questions about the content. -- `targetAudience`: The intended audience for the chatbot's answers (e.g., developers, users). -- `vectorstore`: An instance of the `HNSWLib` class for storing and searching vectors. -- `llms`: An array of language models (e.g., GPT-3, GPT-4). -- `onTokenStream`: An optional callback function to handle streaming tokens. - -Example usage: - -```javascript -const chatbot = makeChain( - "autodoc", - "https://github.com/autodoc/autodoc", - "code", - "", - "developer", - vectorstore, - [gpt3, gpt4], - (token) => console.log(token) -); -``` - -This creates a chatbot that can answer questions about the "autodoc" project, using the provided language models and vector store. +Next, the function creates a `QA_PROMPT` template using the `makeQAPrompt` function. This template is used to generate answers to the questions in a conversational manner, with hyperlinks back to GitHub and code examples where appropriate. + +Finally, the function creates and returns a new instance of the `ChatVectorDBQAChain` class, which combines the question generator and the document chain to create a chatbot that can answer questions about the software project. The chatbot uses the `vectorstore` for efficient nearest neighbor search and the `llms` language models for generating answers. If the `onTokenStream` callback is provided, it will be called when a new token is generated by the language model. ## Questions: 1. **Question:** What is the purpose of the `makeChain` function and what are its input parameters? - **Answer:** The `makeChain` function is used to create a new `ChatVectorDBQAChain` instance, which is responsible for generating questions and answers based on the given input parameters. The input parameters include `projectName`, `repositoryUrl`, `contentType`, `chatPrompt`, `targetAudience`, `vectorstore`, `llms`, and an optional `onTokenStream` callback function. -2. **Question:** What are the roles of `CONDENSE_PROMPT` and `QA_PROMPT` in the code? - **Answer:** `CONDENSE_PROMPT` is a template for generating a standalone question from a given chat history and follow-up input. `QA_PROMPT` is a template for generating a conversational answer with hyperlinks back to GitHub, based on the given context and question. Both templates are used in the `LLMChain` and `loadQAChain` instances, respectively. + **Answer:** The `makeChain` function is used to create a `ChatVectorDBQAChain` instance, which is responsible for generating questions and answers based on the given input parameters. The input parameters include `projectName`, `repositoryUrl`, `contentType`, `chatPrompt`, `targetAudience`, `vectorstore`, `llms`, and an optional `onTokenStream` function. + +2. **Question:** What are the roles of `CONDENSE_PROMPT` and `QA_PROMPT` in this code? + + **Answer:** `CONDENSE_PROMPT` is a template for generating standalone questions from a given chat history and follow-up question. `QA_PROMPT` is a template for generating conversational answers with hyperlinks to GitHub, based on the provided context and question. Both templates are used in the `LLMChain` and `loadQAChain` instances, respectively. + +3. **Question:** How does the `onTokenStream` function work and when is it used? -3. **Question:** How does the `onTokenStream` callback function work and when is it used? - **Answer:** The `onTokenStream` callback function is an optional parameter in the `makeChain` function. It is used to handle the streaming of tokens generated by the OpenAIChat instance. If provided, it will be called with each new token generated during the chat process, allowing developers to handle or process the tokens in real-time. \ No newline at end of file + **Answer:** The `onTokenStream` function is an optional callback that can be provided to the `makeChain` function. It is used to handle the streaming of tokens generated by the OpenAIChat instance. If provided, it will be called with each new token generated during the chat process. \ No newline at end of file diff --git a/.autodoc/docs/markdown/src/cli/commands/query/index.md b/.autodoc/docs/markdown/src/cli/commands/query/index.md index 338396c..2288bca 100644 --- a/.autodoc/docs/markdown/src/cli/commands/query/index.md +++ b/.autodoc/docs/markdown/src/cli/commands/query/index.md @@ -1,33 +1,44 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/commands/query/index.ts) +[View code on GitHub](https://github.com/context-labs/autodoc/src\cli\commands\query\index.ts) -This code defines a chatbot interface for the Autodoc project, which allows users to ask questions related to a specific codebase and receive answers in a conversational manner. The chatbot uses a language model to generate responses based on the user's input and the codebase documentation. +This code defines a chatbot interface for the Autodoc project, which allows users to ask questions related to a specific codebase and receive answers in a conversational manner. The chatbot uses a combination of the `inquirer` library for user input, `marked` and `marked-terminal` for rendering Markdown output, and the `langchain` library for handling natural language processing tasks. -The code starts by importing necessary libraries and setting up the `marked` library with a custom terminal renderer for displaying Markdown content. It then defines a `chatHistory` array to store the conversation history between the user and the chatbot. +The `query` function is the main entry point for the chatbot. It takes two arguments: an `AutodocRepoConfig` object containing information about the code repository, and an `AutodocUserConfig` object containing user-specific settings. The function initializes a vector store using the `HNSWLib` and `OpenAIEmbeddings` classes, and creates a chat chain using the `makeChain` function. -The `displayWelcomeMessage` function is used to display a welcome message to the user when they start the chatbot. The `clearScreenAndMoveCursorToTop` function clears the terminal screen and moves the cursor to the top. +The chatbot interface is displayed using the `displayWelcomeMessage` function, which prints a welcome message to the console. The `getQuestion` function is used to prompt the user for a question using the `inquirer` library. The chatbot then enters a loop, where it processes the user's question, generates a response using the chat chain, and displays the response as Markdown in the terminal. -The main function, `query`, takes two arguments: `AutodocRepoConfig` and `AutodocUserConfig`. It initializes the `vectorStore` by loading pre-trained embeddings and creates a `chain` object using the `makeChain` function. This chain object is responsible for generating responses based on the user's input. +If an error occurs during the processing of a question, the chatbot will display an error message and continue to prompt the user for a new question. The loop continues until the user types 'exit', at which point the chatbot terminates. -The `getQuestion` function uses the `inquirer` library to prompt the user for a question. The main loop of the chatbot starts by getting the user's question and continues until the user types 'exit'. Inside the loop, the code updates the spinner text to 'Thinking...' and calls the `chain` object with the user's question and chat history. The response is then displayed in Markdown format using the `marked` library. +Here's an example of how the `query` function might be used: -If an error occurs during the process, the chatbot displays an error message and prompts the user for another question. +```javascript +import { query } from './autodoc'; -Example usage: +const repoConfig = { + name: 'MyProject', + repositoryUrl: 'https://github.com/user/myproject', + output: 'path/to/output', + contentType: 'code', + chatPrompt: 'Ask me anything about MyProject', + targetAudience: 'developers', +}; + +const userConfig = { + llms: 'path/to/llms', +}; -```javascript query(repoConfig, userConfig); ``` -This chatbot interface can be used in the larger Autodoc project to help users navigate and understand the codebase more efficiently by providing a conversational interface for asking questions and receiving answers. +This example would initialize the chatbot with the specified repository and user configurations, and start the chatbot interface for the user to ask questions about the "MyProject" codebase. ## Questions: - 1. **What is the purpose of the `query` function and what are its input parameters?** + 1. **What is the purpose of the `query` function in this code?** - The `query` function is used to interact with the chatbot, taking user input and providing responses based on the given codebase. It takes two input parameters: an `AutodocRepoConfig` object containing information about the repository, and an `AutodocUserConfig` object containing user-specific configuration. + The `query` function is responsible for handling user interactions with the chatbot. It takes in an AutodocRepoConfig object and an AutodocUserConfig object, sets up the necessary data structures, and then enters a loop where it prompts the user for questions, processes them, and displays the results. -2. **How does the `vectorStore` work and what is its role in the code?** +2. **How does the code handle rendering Markdown text in the terminal?** - The `vectorStore` is an instance of HNSWLib loaded with data from the specified output directory and using OpenAIEmbeddings. It is used to store and retrieve vector representations of the codebase, which are then used by the `makeChain` function to generate responses to user questions. + The code uses the `marked` library along with a custom `TerminalRenderer` to render Markdown text in the terminal. The `marked` library is configured with the custom renderer using `marked.setOptions({ renderer: new TerminalRenderer() });`. -3. **How does the chat history work and what is its purpose?** +3. **What is the purpose of the `chatHistory` variable and how is it used?** - The `chatHistory` is an array of string pairs, where each pair represents a user question and the corresponding chatbot response. It is used to store the conversation history between the user and the chatbot, allowing the chatbot to provide context-aware responses based on previous interactions. \ No newline at end of file + The `chatHistory` variable is an array that stores the history of questions and answers in the chat session. It is used to keep track of the conversation between the user and the chatbot. When a new question is asked, the chat history is passed to the `chain.call()` function, and the new question and its corresponding answer are added to the `chatHistory` array. \ No newline at end of file diff --git a/.autodoc/docs/markdown/src/cli/commands/query/summary.md b/.autodoc/docs/markdown/src/cli/commands/query/summary.md index 709ec84..d184d42 100644 --- a/.autodoc/docs/markdown/src/cli/commands/query/summary.md +++ b/.autodoc/docs/markdown/src/cli/commands/query/summary.md @@ -1,32 +1,34 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/query) +[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc\docs\json\src\cli\commands\query) -The `query` folder in the Autodoc project contains code for creating a chatbot interface that allows users to ask questions related to a specific codebase and receive answers in a conversational manner. The chatbot uses a language model to generate responses based on the user's input and the codebase documentation. +The `query` folder in the Autodoc project contains code for creating a chatbot that can answer questions about a specific software project in a conversational manner. The chatbot is trained on the content of the project and provides answers with hyperlinks back to GitHub, including code examples and links to the examples where appropriate. -In `createChatChain.ts`, the `makeChain` function is defined, which creates a chatbot using the `ChatVectorDBQAChain` class. This class combines two separate language models: a question generator and a document chain. The question generator is an instance of the `LLMChain` class, which uses the OpenAIChat API to generate standalone questions based on a given conversation history. The document chain is created using the `loadQAChain` function, which takes an instance of the OpenAIChat API and a prompt template as input. +The main entry point for the chatbot is the `query` function in `index.ts`. It takes two arguments: an `AutodocRepoConfig` object containing information about the code repository, and an `AutodocUserConfig` object containing user-specific settings. The function initializes a vector store and creates a chat chain using the `makeChain` function from `createChatChain.ts`. -Example usage of `makeChain`: +Here's an example of how the `query` function might be used: ```javascript -const chatbot = makeChain( - "autodoc", - "https://github.com/autodoc/autodoc", - "code", - "", - "developer", - vectorstore, - [gpt3, gpt4], - (token) => console.log(token) -); -``` - -In `index.ts`, the main chatbot interface is defined. It starts by importing necessary libraries and setting up the `marked` library with a custom terminal renderer for displaying Markdown content. The main function, `query`, takes two arguments: `AutodocRepoConfig` and `AutodocUserConfig`. It initializes the `vectorStore` by loading pre-trained embeddings and creates a `chain` object using the `makeChain` function. This chain object is responsible for generating responses based on the user's input. +import { query } from './autodoc'; -The main loop of the chatbot starts by getting the user's question and continues until the user types 'exit'. Inside the loop, the code updates the spinner text to 'Thinking...' and calls the `chain` object with the user's question and chat history. The response is then displayed in Markdown format using the `marked` library. +const repoConfig = { + name: 'MyProject', + repositoryUrl: 'https://github.com/user/myproject', + output: 'path/to/output', + contentType: 'code', + chatPrompt: 'Ask me anything about MyProject', + targetAudience: 'developers', +}; -Example usage of the chatbot interface: +const userConfig = { + llms: 'path/to/llms', +}; -```javascript query(repoConfig, userConfig); ``` -This chatbot interface can be used in the larger Autodoc project to help users navigate and understand the codebase more efficiently by providing a conversational interface for asking questions and receiving answers. +This example initializes the chatbot with the specified repository and user configurations and starts the chatbot interface for the user to ask questions about the "MyProject" codebase. + +The `createChatChain.ts` file defines the `makeChain` function, which creates a chatbot for answering questions about a software project. The chatbot is designed to provide conversational answers with hyperlinks back to GitHub, including code examples and links to the examples where appropriate. The target audience for the chatbot is specified by the `targetAudience` parameter. + +The `makeChain` function takes several parameters, such as `projectName`, `repositoryUrl`, `contentType`, `chatPrompt`, `targetAudience`, `vectorstore`, `llms`, and `onTokenStream`. It first creates a question generator using the `LLMChain` class, then creates a `QA_PROMPT` template using the `makeQAPrompt` function, and finally creates and returns a new instance of the `ChatVectorDBQAChain` class, which combines the question generator and the document chain to create a chatbot that can answer questions about the software project. + +In summary, the code in the `query` folder is responsible for creating a chatbot that can answer questions about a specific software project in a conversational manner. The chatbot uses a combination of natural language processing techniques and efficient nearest neighbor search to generate accurate and relevant answers for the user. diff --git a/.autodoc/docs/markdown/src/cli/commands/summary.md b/.autodoc/docs/markdown/src/cli/commands/summary.md index 429b49f..4aab10a 100644 --- a/.autodoc/docs/markdown/src/cli/commands/summary.md +++ b/.autodoc/docs/markdown/src/cli/commands/summary.md @@ -1,53 +1,101 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands) +[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc\docs\json\src\cli\commands) -The code in the `src/cli/commands` folder is responsible for handling various command-line tasks in the Autodoc project. It contains several subfolders, each dedicated to a specific command or functionality, such as estimating costs, processing repositories, initializing the project, querying the chatbot, and managing user configurations. +The code in the `.autodoc\docs\json\src\cli\commands` folder is responsible for various tasks related to the Autodoc project, such as initializing the configuration, processing repositories, generating documentation, and creating a chatbot for answering questions about a specific software project. The folder contains several subfolders, each with a specific purpose. -For instance, the `estimate` subfolder contains a function that allows users to estimate the cost of indexing a given repository before actually processing it. This function takes an `AutodocRepoConfig` object as input and performs a dry run of the `processRepository` function. It then calculates the total estimated cost and displays it to the user. This helps users make informed decisions about whether to proceed with the indexing process or not. +### estimate + +The `estimate` function provides an estimated cost of processing a given repository. It takes an `AutodocRepoConfig` object as input and performs a dry run of the repository processing to calculate the estimated cost. Example usage: ```javascript -import { estimate } from './autodoc/estimate'; +import { estimate } from './path/to/this/file'; const config = { - // ...configuration options... + name: 'my-repo', + repositoryUrl: 'https://github.com/user/my-repo.git', + root: './', + output: './output', + llms: ['en'], + ignore: ['.git', 'node_modules'], + filePrompt: true, + folderPrompt: true, + chatPrompt: true, + contentType: 'code', + targetAudience: 'developers', + linkHosted: true, }; estimate(config); ``` -The `index` subfolder contains code for processing a given code repository, generating documentation in JSON and Markdown formats, and creating vector files for the documentation. It provides several functions and utilities to achieve these tasks, such as traversing the file system, calling language models, and converting JSON files to Markdown. - -```javascript -import autodoc from './autodoc'; +### index -const config = { - // ...configuration options... -}; +The code in this folder processes a given repository and generates documentation in JSON, Markdown, and vector formats. It takes an `AutodocRepoConfig` object as input and performs three main tasks: processing the repository, creating Markdown files, and creating vector files. Example usage: -autodoc.index(config); +```javascript +index({ + name: "myProject", + root: "./input", + output: "./output", + filePrompt: true, + folderPrompt: true, + contentType: "code", + targetAudience: "developers", + linkHosted: "https://github.com/user/myProject", +}); ``` -The `init` subfolder is responsible for initializing and configuring the `autodoc` project. It provides an essential function called `init` that creates a configuration file named `autodoc.config.json` with user inputs and default values. +### init + +The `init` function initializes the configuration of the Autodoc project. It prompts the user to input necessary information to set up the project and creates the `autodoc.config.json` file in the project root. Example usage: ```javascript -import { init } from './autodoc'; +import { init } from './path/to/this/file'; -(async () => { - await init(); -})(); +// Initialize the configuration with default values +await init(); + +// Initialize the configuration with custom values +await init({ + name: 'My Custom Repository', + repositoryUrl: 'https://github.com/user/repo', +}); ``` -The `query` subfolder contains code for creating a chatbot interface that allows users to ask questions related to a specific codebase and receive answers in a conversational manner. The chatbot uses a language model to generate responses based on the user's input and the codebase documentation. +### query + +The `query` folder contains code for creating a chatbot that can answer questions about a specific software project. The main entry point is the `query` function, which takes an `AutodocRepoConfig` object and an `AutodocUserConfig` object as input. Example usage: ```javascript +import { query } from './autodoc'; + +const repoConfig = { + name: 'MyProject', + repositoryUrl: 'https://github.com/user/myproject', + output: 'path/to/output', + contentType: 'code', + chatPrompt: 'Ask me anything about MyProject', + targetAudience: 'developers', +}; + +const userConfig = { + llms: 'path/to/llms', +}; + query(repoConfig, userConfig); ``` -The `user` subfolder is responsible for managing the user configuration for the Autodoc project. It provides a way to create, update, and save the user configuration file, which stores information about the user's access to different Language Learning Models (LLMs). +### user + +The `user` folder manages the user configuration for the Autodoc project. It allows users to create, update, and save their configuration file, which stores information about their access to different Language Learning Models (LLMs). Example usage: + +```javascript +import { user } from './path/to/this/file'; + +// Create a new user configuration with default settings +await user(); -```typescript -async function user(): Promise { - // ... -} +// Update the user configuration with a custom config object +await user({ llms: [LLMModels.GPT3, LLMModels.GPT4] }); ``` -In summary, the code in the `src/cli/commands` folder plays a crucial role in the Autodoc project by providing various command-line functionalities, such as estimating costs, processing repositories, initializing the project, querying the chatbot, and managing user configurations. These functionalities help developers to easily generate and maintain documentation for their projects, making it more accessible and understandable for other developers and users. +In summary, the code in this folder is essential for various tasks related to the Autodoc project, such as initializing the configuration, processing repositories, generating documentation, and creating a chatbot for answering questions about a specific software project. diff --git a/.autodoc/docs/markdown/src/cli/commands/user/index.md b/.autodoc/docs/markdown/src/cli/commands/user/index.md index 6fcef0b..682866a 100644 --- a/.autodoc/docs/markdown/src/cli/commands/user/index.md +++ b/.autodoc/docs/markdown/src/cli/commands/user/index.md @@ -1,26 +1,37 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/commands/user/index.ts) +[View code on GitHub](https://github.com/context-labs/autodoc/src\cli\commands\user\index.ts) -This code is responsible for managing the user configuration for the Autodoc project. It provides a way to create, update, and save the user configuration file, which stores information about the user's access to different Language Learning Models (LLMs) such as GPT-3.5 Turbo, GPT-4 8K, and GPT-4 32K. +This code is responsible for managing the user configuration for the Autodoc project. It provides a way to create, update, and save the user configuration file, which stores information about the user's access to different Language Learning Models (LLMs) such as GPT-3, GPT-4, and GPT-4 32K. -The `makeConfigTemplate` function is used to create a default configuration object with the specified LLMs or default to GPT-3.5 Turbo if none are provided. This function is used to generate the initial configuration object for the user. +The `makeConfigTemplate` function is used to create a default configuration object with the provided `config` parameter or with GPT-3 as the default LLM. This function is used to generate a new configuration object when needed. -The `user` function is an asynchronous function that handles the user configuration process. It first checks if a user configuration file already exists. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits. +The main function, `user`, is an asynchronous function that takes an optional `config` parameter. It first checks if a user configuration file already exists at the `userConfigFilePath`. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits. -If the user decides to continue or if no configuration file exists, the function proceeds to create the necessary directories for the configuration file. It then prompts the user to select the LLMs they have access to using the `inquirer` library. The user can choose from three options: +If the user configuration file does not exist, the code attempts to create the necessary directories for the file. If there's an error during this process, it logs the error and exits with a non-zero status code. -1. GPT-3.5 Turbo -2. GPT-3.5 Turbo, GPT-4 8K (Early Access) -3. GPT-3.5 Turbo, GPT-4 8K (Early Access), GPT-4 32K (Early Access) +Next, the user is prompted to select which LLMs they have access to. The available options are GPT-3.5 Turbo, GPT-3.5 Turbo with GPT-4 8K (Early Access), and GPT-3.5 Turbo with GPT-4 8K and GPT-4 32K (Early Access). The user's selection is then used to create a new configuration object using the `makeConfigTemplate` function. -After the user makes their selection, the new configuration object is created using the `makeConfigTemplate` function with the selected LLMs. The configuration object is then saved to the user configuration file in JSON format. +Finally, the new configuration object is written to the user configuration file in JSON format. A success message is displayed to the user, indicating that the configuration has been saved and they can start querying using the `doc q` command. -Finally, the user is informed that the configuration has been saved and they can start querying by running the `doc q` command. +Example usage: + +```javascript +import { user } from './path/to/this/file'; + +// Create a new user configuration with default settings +await user(); + +// Update the user configuration with a custom config object +await user({ llms: [LLMModels.GPT3, LLMModels.GPT4] }); +``` ## Questions: - 1. **Question:** What is the purpose of the `makeConfigTemplate` function and what does it return? - **Answer:** The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc user. It takes an optional `config` parameter of type `AutodocUserConfig` and returns a new configuration object with the `llms` property set to the provided value or a default value of `[LLMModels.GPT3]`. + 1. **What is the purpose of the `makeConfigTemplate` function?** + + The `makeConfigTemplate` function is used to create a default configuration object for the Autodoc user. It takes an optional `config` parameter and returns an object with a `llms` property, which is an array of LLM models. + +2. **How does the `user` function handle existing user configuration files?** + + The `user` function checks if a user configuration file already exists using `fsSync.existsSync`. If it does, the user is prompted with a confirmation message to overwrite the existing configuration. If the user chooses not to overwrite, the process exits with a status code of 0. -2. **Question:** How does the `user` function handle existing user configuration files? - **Answer:** The `user` function checks if a user configuration file already exists at the `userConfigFilePath`. If it does, the function prompts the user with a confirmation message to overwrite the existing configuration. If the user chooses not to overwrite, the process exits; otherwise, the function proceeds to create a new configuration. +3. **What are the available choices for LLM models in the `user` function?** -3. **Question:** What are the available choices for the LLMs in the `user` function, and how are they used to create the new configuration? - **Answer:** The available choices for LLMs are GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user can select one of these options, and the corresponding LLM models will be set as the value of the `llms` property in the new configuration object. \ No newline at end of file + The available choices for LLM models are GPT-3.5 Turbo, GPT-3.5 Turbo and GPT-4 8K (Early Access), and GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user can select one of these options, and the selected value is stored in the `llms` property of the new configuration object. \ No newline at end of file diff --git a/.autodoc/docs/markdown/src/cli/commands/user/summary.md b/.autodoc/docs/markdown/src/cli/commands/user/summary.md index 349d5a6..8e74a3c 100644 --- a/.autodoc/docs/markdown/src/cli/commands/user/summary.md +++ b/.autodoc/docs/markdown/src/cli/commands/user/summary.md @@ -1,40 +1,29 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/commands/user) +[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc\docs\json\src\cli\commands\user) -The `index.ts` file in the `user` folder is responsible for managing the user configuration for the Autodoc project. It provides a way to create, update, and save the user configuration file, which stores information about the user's access to different Language Learning Models (LLMs) such as GPT-3.5 Turbo, GPT-4 8K, and GPT-4 32K. +The `index.ts` file in the `user` folder is responsible for managing the user configuration for the Autodoc project. It allows users to create, update, and save their configuration file, which stores information about their access to different Language Learning Models (LLMs) such as GPT-3, GPT-4, and GPT-4 32K. -The `makeConfigTemplate` function is used to create a default configuration object with the specified LLMs or default to GPT-3.5 Turbo if none are provided. This function is used to generate the initial configuration object for the user. +The `makeConfigTemplate` function creates a default configuration object with either the provided `config` parameter or GPT-3 as the default LLM. This function is useful for generating a new configuration object when needed. -```typescript -function makeConfigTemplate(llms: string[]): ConfigTemplate { - // ... -} -``` +The main function, `user`, is an asynchronous function that takes an optional `config` parameter. It first checks if a user configuration file already exists at the `userConfigFilePath`. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits. -The `user` function is an asynchronous function that handles the user configuration process. It first checks if a user configuration file already exists. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits. +If the user configuration file does not exist, the code attempts to create the necessary directories for the file. If there's an error during this process, it logs the error and exits with a non-zero status code. -```typescript -async function user(): Promise { - // ... -} -``` +Next, the user is prompted to select which LLMs they have access to. The available options are GPT-3.5 Turbo, GPT-3.5 Turbo with GPT-4 8K (Early Access), and GPT-3.5 Turbo with GPT-4 8K and GPT-4 32K (Early Access). The user's selection is then used to create a new configuration object using the `makeConfigTemplate` function. -If the user decides to continue or if no configuration file exists, the function proceeds to create the necessary directories for the configuration file. It then prompts the user to select the LLMs they have access to using the `inquirer` library. The user can choose from three options: +Finally, the new configuration object is written to the user configuration file in JSON format. A success message is displayed to the user, indicating that the configuration has been saved and they can start querying using the `doc q` command. -1. GPT-3.5 Turbo -2. GPT-3.5 Turbo, GPT-4 8K (Early Access) -3. GPT-3.5 Turbo, GPT-4 8K (Early Access), GPT-4 32K (Early Access) +This code is essential for the Autodoc project as it allows users to manage their access to different LLMs and store their preferences in a configuration file. This configuration file can then be used by other parts of the project to determine which LLMs the user has access to and tailor the querying process accordingly. -After the user makes their selection, the new configuration object is created using the `makeConfigTemplate` function with the selected LLMs. The configuration object is then saved to the user configuration file in JSON format. +Example usage: -```typescript -const configTemplate = makeConfigTemplate(selectedLLMs); -await fs.promises.writeFile(configPath, JSON.stringify(configTemplate, null, 2)); -``` +```javascript +import { user } from './path/to/this/file'; -Finally, the user is informed that the configuration has been saved and they can start querying by running the `doc q` command. +// Create a new user configuration with default settings +await user(); -This code is essential for setting up the user's environment and preferences for the Autodoc project. It ensures that the user has the correct configuration file in place, which is necessary for the proper functioning of the project. The user configuration file is used by other parts of the project to determine which LLMs the user has access to and can query. - -For example, when a user runs the `doc q` command, the project will read the user configuration file to determine which LLMs are available for querying. This ensures that the user only queries the LLMs they have access to, preventing any unauthorized access or usage. +// Update the user configuration with a custom config object +await user({ llms: [LLMModels.GPT3, LLMModels.GPT4] }); +``` -In summary, the `index.ts` file in the `user` folder is responsible for managing the user configuration for the Autodoc project, ensuring that the user has the correct configuration file in place, and allowing the user to select the LLMs they have access to. This is essential for the proper functioning of the project and for maintaining the user's preferences and access to different LLMs. +In summary, the `index.ts` file in the `user` folder is a crucial part of the Autodoc project, allowing users to manage their LLM access and preferences. This configuration is then used by other parts of the project to provide a tailored experience based on the user's access to different LLMs. diff --git a/.autodoc/docs/markdown/src/cli/spinner.md b/.autodoc/docs/markdown/src/cli/spinner.md index 1d7643d..c8e8b15 100644 --- a/.autodoc/docs/markdown/src/cli/spinner.md +++ b/.autodoc/docs/markdown/src/cli/spinner.md @@ -1,39 +1,56 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/spinner.ts) +[View code on GitHub](https://github.com/context-labs/autodoc/src\cli\spinner.ts) -This code provides a utility for managing a command-line spinner using the `ora` library. The spinner is a visual indicator that displays a series of characters in a loop, giving the user feedback that a process is running in the background. The code exports several functions to control the spinner's behavior, such as updating the text, stopping the spinner, and displaying success, error, or informational messages. +This code is responsible for managing a spinner, which is a visual element that indicates a process is running in the background. The spinner is created using the `ora` library, which provides a simple and customizable way to create spinners for command-line interfaces. -The `spinner` object is created as a singleton to ensure that there is only one instance of the spinner at any given time. This prevents multiple spinners from being displayed simultaneously, which could cause confusion for the user. The spinner is configured to use the 'dots' style. +The code starts by importing the `ora` library and creating a singleton spinner instance with the 'dots' style. This ensures that there will only be one spinner active at any given time. -The `updateSpinnerText` function is used to update the spinner's text. If the spinner is already spinning, it updates the text directly; otherwise, it starts the spinner with the given message. For example: +There are several functions exported by this module to interact with the spinner: -```javascript -updateSpinnerText('Loading data...'); -``` +1. `updateSpinnerText(message: string)`: This function updates the spinner's text with the provided message. If the spinner is already spinning, it simply updates the text; otherwise, it starts the spinner with the new message. -The `stopSpinner` function stops the spinner if it is currently spinning: + Example usage: + ```javascript + updateSpinnerText('Loading data...'); + ``` -```javascript -stopSpinner(); -``` +2. `stopSpinner()`: This function stops the spinner if it is currently spinning. -The `spinnerError`, `spinnerSuccess`, and `spinnerInfo` functions are used to display error, success, and informational messages, respectively. These functions first check if the spinner is spinning and then call the appropriate `ora` method to display the message with the corresponding status symbol (e.g., a red cross for errors, a green checkmark for success, etc.): + Example usage: + ```javascript + stopSpinner(); + ``` -```javascript -spinnerError('An error occurred'); -spinnerSuccess('Operation completed successfully'); -spinnerInfo('Please wait...'); -``` +3. `spinnerError(message?: string)`: This function stops the spinner and marks it as failed with an optional error message. It only takes effect if the spinner is currently spinning. -In the larger project, this utility can be used to provide a consistent and user-friendly interface for displaying progress and status messages during long-running tasks or processes. + Example usage: + ```javascript + spinnerError('Failed to load data'); + ``` + +4. `spinnerSuccess(message?: string)`: This function stops the spinner and marks it as successful with an optional success message. It only takes effect if the spinner is currently spinning. + + Example usage: + ```javascript + spinnerSuccess('Data loaded successfully'); + ``` + +5. `spinnerInfo(message: string)`: This function displays an informational message without affecting the spinner's state. + + Example usage: + ```javascript + spinnerInfo('Connecting to server...'); + ``` + +In the larger project, this module can be used to provide visual feedback to users when a background process is running, such as loading data, connecting to a server, or performing a complex calculation. By using the exported functions, developers can easily update the spinner's text, stop it, or change its state to indicate success, failure, or display informational messages. ## Questions: 1. **What is the purpose of the `ora` package in this code?** - The `ora` package is used to create a spinner in the terminal, providing a visual indication of a running process. In this code, it is used to create a singleton spinner with the 'dots' style. + The `ora` package is used to create a spinner in the command line interface, providing a visual indication of a running process. In this code, it is used to create a singleton spinner with the 'dots' style. -2. **What are the different states of the spinner and how are they updated?** +2. **How does the `updateSpinnerText` function work?** - The spinner can have different states such as spinning, stopped, failed, succeeded, and displaying information. The functions `updateSpinnerText`, `stopSpinner`, `spinnerError`, `spinnerSuccess`, and `spinnerInfo` are used to update the spinner's state and text accordingly. + The `updateSpinnerText` function takes a message as an input and updates the spinner's text with the given message. If the spinner is already spinning, it updates the text directly; otherwise, it starts the spinner with the new message. -3. **How does the `updateSpinnerText` function work and when should it be used?** +3. **What are the differences between `spinnerError`, `spinnerSuccess`, and `spinnerInfo` functions?** - The `updateSpinnerText` function updates the spinner's text with the provided message. If the spinner is already spinning, it updates the text directly; otherwise, it starts the spinner with the new message. This function should be used when you want to change the spinner's text while it is spinning or start it with a new message. \ No newline at end of file + These functions are used to update the spinner's state and message based on the outcome of a process. `spinnerError` is called when there is an error, and it stops the spinner with a failure message. `spinnerSuccess` is called when the process is successful, and it stops the spinner with a success message. `spinnerInfo` is used to display an informational message without stopping the spinner. \ No newline at end of file diff --git a/.autodoc/docs/markdown/src/cli/summary.md b/.autodoc/docs/markdown/src/cli/summary.md index d36d7bf..29a248f 100644 --- a/.autodoc/docs/markdown/src/cli/summary.md +++ b/.autodoc/docs/markdown/src/cli/summary.md @@ -1,27 +1,42 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli) +[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc\docs\json\src\cli) -The `spinner.ts` file in the `.autodoc/docs/json/src/cli` folder provides a utility for managing a command-line spinner using the `ora` library. The spinner is a visual indicator that displays a series of characters in a loop, giving the user feedback that a process is running in the background. The code exports several functions to control the spinner's behavior, such as updating the text, stopping the spinner, and displaying success, error, or informational messages. +The code in the `spinner.ts` file, located in the `.autodoc\docs\json\src\cli` folder, is responsible for managing a spinner, a visual element that indicates a background process is running. The spinner is created using the `ora` library, which provides a simple and customizable way to create spinners for command-line interfaces. -The `spinner` object is created as a singleton to ensure that there is only one instance of the spinner at any given time. This prevents multiple spinners from being displayed simultaneously, which could cause confusion for the user. The spinner is configured to use the 'dots' style. +The module exports several functions to interact with the spinner: -The `updateSpinnerText` function is used to update the spinner's text. If the spinner is already spinning, it updates the text directly; otherwise, it starts the spinner with the given message. For example: +1. `updateSpinnerText(message: string)`: Updates the spinner's text with the provided message. If the spinner is already spinning, it simply updates the text; otherwise, it starts the spinner with the new message. -```javascript -updateSpinnerText('Loading data...'); -``` + Example usage: + ```javascript + updateSpinnerText('Loading data...'); + ``` -The `stopSpinner` function stops the spinner if it is currently spinning: +2. `stopSpinner()`: Stops the spinner if it is currently spinning. -```javascript -stopSpinner(); -``` + Example usage: + ```javascript + stopSpinner(); + ``` -The `spinnerError`, `spinnerSuccess`, and `spinnerInfo` functions are used to display error, success, and informational messages, respectively. These functions first check if the spinner is spinning and then call the appropriate `ora` method to display the message with the corresponding status symbol (e.g., a red cross for errors, a green checkmark for success, etc.): +3. `spinnerError(message?: string)`: Stops the spinner and marks it as failed with an optional error message. It only takes effect if the spinner is currently spinning. -```javascript -spinnerError('An error occurred'); -spinnerSuccess('Operation completed successfully'); -spinnerInfo('Please wait...'); -``` + Example usage: + ```javascript + spinnerError('Failed to load data'); + ``` -In the larger project, this utility can be used to provide a consistent and user-friendly interface for displaying progress and status messages during long-running tasks or processes. +4. `spinnerSuccess(message?: string)`: Stops the spinner and marks it as successful with an optional success message. It only takes effect if the spinner is currently spinning. + + Example usage: + ```javascript + spinnerSuccess('Data loaded successfully'); + ``` + +5. `spinnerInfo(message: string)`: Displays an informational message without affecting the spinner's state. + + Example usage: + ```javascript + spinnerInfo('Connecting to server...'); + ``` + +In the larger project, this module can be used to provide visual feedback to users when a background process is running, such as loading data, connecting to a server, or performing a complex calculation. By using the exported functions, developers can easily update the spinner's text, stop it, or change its state to indicate success, failure, or display informational messages. diff --git a/.autodoc/docs/markdown/src/cli/utils/APIRateLimit.md b/.autodoc/docs/markdown/src/cli/utils/APIRateLimit.md index 79d41f4..628688c 100644 --- a/.autodoc/docs/markdown/src/cli/utils/APIRateLimit.md +++ b/.autodoc/docs/markdown/src/cli/utils/APIRateLimit.md @@ -1,34 +1,28 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/utils/APIRateLimit.ts) +[View code on GitHub](https://github.com/context-labs/autodoc/src\cli\utils\APIRateLimit.ts) -The `APIRateLimit` class in this code snippet is designed to manage and limit the number of concurrent API calls made by the application. This is useful in situations where the API being called has a rate limit or when the application needs to control the number of simultaneous requests to avoid overloading the server. +The `APIRateLimit` class in this code snippet is designed to manage and limit the number of concurrent API calls made by the application. This is useful in situations where the API being called has a rate limit or when the application needs to prevent overwhelming the server with too many requests at once. -The class has a constructor that takes an optional `maxConcurrentCalls` parameter, which defaults to 50. This parameter determines the maximum number of API calls that can be made concurrently. +The class constructor takes an optional parameter `maxConcurrentCalls`, which defaults to 50, to set the maximum number of concurrent API calls allowed. It maintains a queue of API calls and keeps track of the number of calls in progress. -The main method of this class is `callApi(apiFunction: () => Promise): Promise`. This method takes a function `apiFunction` that returns a promise and wraps it in a rate-limited execution. The method returns a promise that resolves with the result of the API call or rejects with an error if the call fails. +The main method of this class is `callApi(apiFunction: () => Promise): Promise`. It takes a function `apiFunction` that returns a promise and wraps it in a new promise. The purpose of this wrapping is to control the execution of the API calls and ensure that they do not exceed the specified rate limit. -When `callApi` is called, it adds the `executeCall` function to the `queue`. The `executeCall` function is responsible for executing the API call, resolving or rejecting the promise, and managing the `inProgress` counter. After adding the `executeCall` function to the queue, the code checks if there are available slots for concurrent calls by comparing `inProgress` with `maxConcurrentCalls`. If there are available slots, it calls the `dequeueAndExecute` method. +When `callApi` is called, the provided `apiFunction` is added to the queue and the `dequeueAndExecute` method is triggered if there are available slots for concurrent calls. The `dequeueAndExecute` method checks if there are any API calls in the queue and if the number of in-progress calls is below the maximum limit. If both conditions are met, it dequeues the next API call and executes it. -The `dequeueAndExecute` method is responsible for executing the queued API calls while ensuring that the number of concurrent calls does not exceed the `maxConcurrentCalls` limit. It dequeues the next API call from the queue and executes it if there are available slots for concurrent calls. +The `executeCall` function inside `callApi` is responsible for actually calling the API function, resolving or rejecting the promise based on the result, and updating the number of in-progress calls. Once an API call is completed, the `dequeueAndExecute` method is called again to process any remaining calls in the queue. Here's an example of how this class can be used in the larger project: ```javascript const apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls -async function fetchData(id) { - // Simulate an API call - return new Promise((resolve) => setTimeout(() => resolve(`Data for ${id}`), 1000)); +async function fetchSomeData(id) { + // Call the API using the rate limiter + const result = await apiRateLimiter.callApi(() => fetch(`https://api.example.com/data/${id}`)); + return result; } - -async function getData(id) { - return apiRateLimiter.callApi(() => fetchData(id)); -} - -// Usage -getData(1).then(console.log); // Fetches data for ID 1, rate-limited ``` -In this example, the `APIRateLimit` class is used to limit the number of concurrent calls to the `fetchData` function, which simulates an API call. +In this example, the `APIRateLimit` class is used to limit the number of concurrent calls to the `fetch` function, ensuring that no more than 10 calls are made at once. ## Questions: 1. **What is the purpose of the `APIRateLimit` class?** @@ -36,8 +30,8 @@ In this example, the `APIRateLimit` class is used to limit the number of concurr 2. **How does the `callApi` method work and what is its return type?** - The `callApi` method takes an `apiFunction` as an argument, which is a function that returns a Promise. It adds the API call to a queue and manages the execution of queued calls based on the available slots for concurrent calls. The method returns a Promise of type `T`, where `T` is the expected return type of the `apiFunction`. + The `callApi` method takes an `apiFunction` as an argument, which is a function that returns a Promise. It adds the API call to a queue and executes it when there are available slots for concurrent calls. The method returns a Promise of type `T`, where `T` is the expected return type of the `apiFunction`. -3. **How does the `dequeueAndExecute` method work?** +3. **How can the maximum number of concurrent calls be configured?** - The `dequeueAndExecute` method is responsible for executing the queued API calls. It checks if there are any calls in the queue and if there are available slots for concurrent calls. If both conditions are met, it dequeues the next call from the queue and executes it. This method is called whenever a new API call is added to the queue or when an in-progress call is completed. \ No newline at end of file + The maximum number of concurrent calls can be configured by passing a value to the `maxConcurrentCalls` parameter in the constructor of the `APIRateLimit` class. If no value is provided, the default value is set to 50. \ No newline at end of file diff --git a/.autodoc/docs/markdown/src/cli/utils/FileUtil.md b/.autodoc/docs/markdown/src/cli/utils/FileUtil.md index 2f213f0..c2b665c 100644 --- a/.autodoc/docs/markdown/src/cli/utils/FileUtil.md +++ b/.autodoc/docs/markdown/src/cli/utils/FileUtil.md @@ -1,38 +1,44 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/utils/FileUtil.ts) +[View code on GitHub](https://github.com/context-labs/autodoc/src\cli\utils\FileUtil.ts) -This code provides utility functions for handling file and folder paths in the autodoc project. The main purpose of these functions is to generate file names and GitHub URLs for files and folders. +This code provides utility functions for handling file and folder paths in the autodoc project. The main purpose of these functions is to generate file names and GitHub URLs for documentation files. -1. `getFileName(input: string, delimiter = '.', extension = '.md'): string`: This function takes an input string, an optional delimiter (default is '.'), and an optional extension (default is '.md'). It returns a new file name with the given extension. If the delimiter is not found in the input string, the function appends the extension to the input string. If the delimiter is found, the function replaces the part after the last delimiter with the extension. For example: +1. `getFileName(input, delimiter, extension)`: This function takes an input string, an optional delimiter (default is '.'), and an optional extension (default is '.md'). It returns a new string with the given extension. If the delimiter is found in the input string, the function removes the part of the string after the last occurrence of the delimiter and appends the extension. If the delimiter is not found, the function simply appends the extension to the input string. This function can be used to generate file names for documentation files with the desired extension. - ```javascript - getFileName("example.txt"); // returns "example.md" - getFileName("example"); // returns "example.md" + Example usage: + + ``` + getFileName('example.txt'); // returns 'example.md' + getFileName('example', '_', '.html'); // returns 'example.html' ``` -2. `githubFileUrl(githubRoot: string, inputRoot: string, filePath: string, linkHosted: boolean): string`: This function generates a GitHub URL for a file. It takes the GitHub root URL, the input root path, the file path, and a boolean flag `linkHosted`. If `linkHosted` is true, the function returns a URL pointing to the hosted version of the file. If `linkHosted` is false, the function returns a URL pointing to the file in the GitHub repository. For example: +2. `githubFileUrl(githubRoot, inputRoot, filePath, linkHosted)`: This function generates a GitHub URL for a file. It takes the GitHub repository root URL, the input root folder path, the file path, and a boolean flag indicating whether the URL should be for the hosted version of the file or the source code. It returns a string with the generated URL. + + Example usage: - ```javascript - githubFileUrl("https://github.com/user/repo", "/input", "/input/example.md", true); // returns "https://github.com/user/repo/example.md" - githubFileUrl("https://github.com/user/repo", "/input", "/input/example.md", false); // returns "https://github.com/user/repo/blob/master/example.md" ``` + githubFileUrl('https://github.com/user/repo', '/input', '/input/example.md', true); + // returns 'https://github.com/user/repo/example.md' + ``` + +3. `githubFolderUrl(githubRoot, inputRoot, folderPath, linkHosted)`: This function is similar to `githubFileUrl`, but it generates a GitHub URL for a folder instead of a file. It takes the same arguments as `githubFileUrl` and returns a string with the generated URL. -3. `githubFolderUrl(githubRoot: string, inputRoot: string, folderPath: string, linkHosted: boolean): string`: This function is similar to `githubFileUrl`, but it generates a GitHub URL for a folder instead of a file. If `linkHosted` is true, the function returns a URL pointing to the hosted version of the folder. If `linkHosted` is false, the function returns a URL pointing to the folder in the GitHub repository. For example: + Example usage: - ```javascript - githubFolderUrl("https://github.com/user/repo", "/input", "/input/folder", true); // returns "https://github.com/user/repo/folder" - githubFolderUrl("https://github.com/user/repo", "/input", "/input/folder", false); // returns "https://github.com/user/repo/tree/master/folder" + ``` + githubFolderUrl('https://github.com/user/repo', '/input', '/input/folder', true); + // returns 'https://github.com/user/repo/folder' ``` -These utility functions can be used in the autodoc project to generate file names and URLs for documentation files and folders, making it easier to manage and navigate the documentation structure. +These utility functions can be used throughout the autodoc project to generate file names and GitHub URLs for documentation files and folders, ensuring consistent naming and URL generation across the project. ## Questions: - 1. **What does the `getFileName` function do?** + 1. **What is the purpose of the `getFileName` function?** - The `getFileName` function takes an input string, an optional delimiter (default is '.'), and an optional extension (default is '.md'). It returns the input string with the specified extension, replacing the part after the last occurrence of the delimiter if it exists. + The `getFileName` function takes an input string, an optional delimiter, and an optional extension, and returns a new string with the given extension. If the delimiter is not found in the input string, the extension is simply appended to the input string. If the delimiter is found, the input string is sliced up to the last delimiter index and the extension is appended. -2. **What is the purpose of the `githubFileUrl` and `githubFolderUrl` functions?** +2. **What are the differences between the `githubFileUrl` and `githubFolderUrl` functions?** - Both `githubFileUrl` and `githubFolderUrl` functions are used to generate URLs for files and folders, respectively, in a GitHub repository. They take a `githubRoot`, `inputRoot`, a `filePath` or `folderPath`, and a `linkHosted` boolean flag. If `linkHosted` is true, the generated URL will point to the hosted version of the file or folder; otherwise, it will point to the file or folder in the GitHub repository. + Both functions take the same parameters: `githubRoot`, `inputRoot`, a path (either `filePath` or `folderPath`), and a `linkHosted` boolean. The main difference is in the returned URL: `githubFileUrl` returns a URL pointing to a file in the GitHub repository, while `githubFolderUrl` returns a URL pointing to a folder in the GitHub repository. The URL structure differs slightly, with `/blob/master/` for files and `/tree/master/` for folders. -3. **Why is the `inputRoot.length - 1` used in the `substring` method for both `githubFileUrl` and `githubFolderUrl` functions?** +3. **What is the purpose of the `linkHosted` parameter in the `githubFileUrl` and `githubFolderUrl` functions?** - The `inputRoot.length - 1` is used to remove the `inputRoot` part from the `filePath` or `folderPath` when generating the final URL. This ensures that the generated URL only contains the relevant path relative to the GitHub repository root. \ No newline at end of file + The `linkHosted` parameter is a boolean that determines whether the returned URL should point to the hosted version of the file or folder on GitHub Pages (if `true`) or to the file or folder within the GitHub repository itself (if `false`). Depending on the value of `linkHosted`, the functions will return different URL structures. \ No newline at end of file diff --git a/.autodoc/docs/markdown/src/cli/utils/LLMUtil.md b/.autodoc/docs/markdown/src/cli/utils/LLMUtil.md index b307c9d..aa0ca5c 100644 --- a/.autodoc/docs/markdown/src/cli/utils/LLMUtil.md +++ b/.autodoc/docs/markdown/src/cli/utils/LLMUtil.md @@ -1,35 +1,41 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/utils/LLMUtil.ts) +[View code on GitHub](https://github.com/context-labs/autodoc/src\cli\utils\LLMUtil.ts) -This code defines and manages different language models (LLMs) and their associated costs for a project. It imports the `OpenAIChat` class from the `langchain/llms` module and the `LLMModelDetails` and `LLMModels` types from the `../../types.js` file. +This code defines and manages different language models (LLMs) and their associated costs for a project that utilizes OpenAI's GPT models. It imports the `OpenAIChat` class from the `langchain/llms` module and the `LLMModelDetails` and `LLMModels` types from the `../../types.js` file. -The `models` object contains three LLMs: GPT3, GPT4, and GPT432k. Each model has a set of properties, such as `name`, `inputCostPer1KTokens`, `outputCostPer1KTokens`, `maxLength`, and an instance of `OpenAIChat` with specific configurations. The `inputTokens`, `outputTokens`, `succeeded`, `failed`, and `total` properties are initialized to 0. +The `models` object contains three LLMs: GPT3, GPT4, and GPT432k. Each model has its own properties, such as `name`, `inputCostPer1KTokens`, `outputCostPer1KTokens`, `maxLength`, and an instance of the `OpenAIChat` class with the respective model name and API key. Additionally, each model has counters for input tokens, output tokens, succeeded, failed, and total files processed. + +The `printModelDetails` function takes an array of `LLMModelDetails` and prints a summary table to the console. It calculates the total cost for each model based on the input and output tokens and their respective costs per 1,000 tokens. It also calculates the total file count, succeeded, failed, tokens, and cost across all models. + +The `totalIndexCostEstimate` function calculates the total cost of indexing all models in the input array. It uses the same cost calculation as in `printModelDetails` but returns the total cost as a number. + +These functions can be used in the larger project to manage and analyze the usage and costs of different LLMs. For example, the `printModelDetails` function can be called to display a summary of the models' usage and costs: ```javascript -{ - name: LLMModels.GPT3, - inputCostPer1KTokens: 0.002, - outputCostPer1KTokens: 0.002, - maxLength: 3050, - llm: new OpenAIChat({ ... }), - inputTokens: 0, - outputTokens: 0, - succeeded: 0, - failed: 0, - total: 0, -} +import { models, printModelDetails } from './path/to/this/file'; + +// Process files with models... +// Update models' properties... + +printModelDetails(Object.values(models)); ``` -The `printModelDetails` function takes an array of `LLMModelDetails` and prints a summary table to the console. It calculates the total cost for each model based on the number of input and output tokens and their respective costs per 1,000 tokens. It also calculates the total file count, succeeded, failed, tokens, and cost across all models. +And the `totalIndexCostEstimate` function can be used to estimate the total cost of indexing all models: + +```javascript +import { models, totalIndexCostEstimate } from './path/to/this/file'; -The `totalIndexCostEstimate` function calculates the total cost for all models in the input array. It uses the same cost calculation as in `printModelDetails` but returns the total cost as a number. +// Process files with models... +// Update models' properties... -These functions can be used in the larger project to manage and analyze the usage and costs of different language models. For example, the `printModelDetails` function can provide a summary of the project's LLM usage, while the `totalIndexCostEstimate` function can help estimate the overall cost of using these models. +const totalCost = totalIndexCostEstimate(Object.values(models)); +console.log(`Total cost: ${totalCost}`); +``` ## Questions: - 1. **Question**: What is the purpose of the `models` object and what are the different models available? - **Answer**: The `models` object is a record that maps the available LLMModels (GPT3, GPT4, and GPT432k) to their respective details, such as name, input and output costs, maxLength, and an instance of OpenAIChat with the corresponding model. + 1. **Question:** What is the purpose of the `models` object and how are the different GPT models being used? + **Answer:** The `models` object is a record that maps different GPT models (GPT3, GPT4, and GPT432k) to their respective details, such as cost per tokens, maximum length, and an instance of `OpenAIChat` with the corresponding model configuration. -2. **Question**: How does the `printModelDetails` function work and what information does it display? - **Answer**: The `printModelDetails` function takes an array of LLMModelDetails and generates an output object containing the model name, file count, succeeded, failed, tokens, and cost. It then calculates the totals for each property and displays the information in a console table. +2. **Question:** How does the `printModelDetails` function work and what information does it display? + **Answer:** The `printModelDetails` function takes an array of `LLMModelDetails` as input, processes the information for each model, and then prints a summary table to the console. The table includes the model name, file count, succeeded and failed counts, total tokens, and cost. -3. **Question**: What is the purpose of the `totalIndexCostEstimate` function and how does it calculate the total cost? - **Answer**: The `totalIndexCostEstimate` function calculates the total cost of indexing the given models by iterating through the models array and summing up the input and output costs per 1K tokens for each model. \ No newline at end of file +3. **Question:** What is the purpose of the `totalIndexCostEstimate` function and how is it calculating the total cost? + **Answer:** The `totalIndexCostEstimate` function calculates the total cost of processing the given models by iterating through the input `models` array and summing up the costs based on the input and output tokens and their respective costs per 1K tokens. \ No newline at end of file diff --git a/.autodoc/docs/markdown/src/cli/utils/WaitUtil.md b/.autodoc/docs/markdown/src/cli/utils/WaitUtil.md index aa64ac5..001b655 100644 --- a/.autodoc/docs/markdown/src/cli/utils/WaitUtil.md +++ b/.autodoc/docs/markdown/src/cli/utils/WaitUtil.md @@ -1,44 +1,57 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/utils/WaitUtil.ts) +[View code on GitHub](https://github.com/context-labs/autodoc/src\cli\utils\WaitUtil.ts) -The code in this file provides two utility functions, `wait` and `forTrue`, which are designed to help manage asynchronous operations in the larger project. Both functions return a `Promise`, which is a JavaScript object that represents the eventual completion (or failure) of an asynchronous operation and its resulting value. +The code in this file provides two utility functions, `wait` and `forTrue`, which are designed to help manage asynchronous operations in the larger project. Both functions return a `Promise`, making them suitable for use with `async/await` syntax. -### wait function +### wait -The `wait` function takes two arguments: `timeoutMs`, which is the number of milliseconds to wait before resolving the promise, and `value`, which is an optional value to be returned when the promise resolves. The function creates a new `Promise` and uses `setTimeout` to resolve it with the given `value` after the specified `timeoutMs` has passed. +The `wait` function takes two arguments: `timeoutMs`, a number representing the desired waiting time in milliseconds, and an optional `value` that defaults to `null`. It returns a `Promise` that resolves with the provided `value` after the specified `timeoutMs` has elapsed. This function can be used to introduce a delay in the execution of asynchronous code. Example usage: ```javascript -// Wait for 2 seconds and then log "Hello, world!" -wait(2000, "Hello, world!").then(console.log); +async function delayedEcho() { + console.log("Start"); + await wait(1000, "Hello"); + console.log("End"); +} + +delayedEcho(); // Output: Start -> (1 second delay) -> End ``` -### forTrue function +### forTrue -The `forTrue` function takes a single argument, `fn`, which is a function that returns a boolean value. The purpose of this function is to repeatedly check if the given function `fn` returns `true`. If it does, the promise resolves with `true`. If the function does not return `true` after 200 checks, the promise is rejected. +The `forTrue` function takes a single argument, `fn`, which is a function that returns a boolean value. It returns a `Promise` that resolves with `true` when the provided function `fn` returns `true`. The function `fn` is checked every 50 milliseconds, up to a maximum of 200 times (i.e., 10 seconds). If `fn` does not return `true` within this time, the `Promise` is rejected. -The function uses `setInterval` to repeatedly call the given function `fn` every 50 milliseconds. If `fn` returns `true`, the interval is cleared, and the promise is resolved. If the function has been called 200 times without returning `true`, the promise is rejected. +This function can be used to wait for a specific condition to be met before continuing the execution of asynchronous code. Example usage: ```javascript -// Check if a certain element is visible on the page -const isElementVisible = () => document.querySelector("#my-element").offsetParent !== null; +let condition = false; + +setTimeout(() => { + condition = true; +}, 3000); + +async function waitForCondition() { + console.log("Waiting for condition..."); + await forTrue(() => condition); + console.log("Condition met!"); +} -// Wait for the element to become visible, then log "Element is visible!" -forTrue(isElementVisible).then(() => console.log("Element is visible!")); +waitForCondition(); // Output: Waiting for condition... -> (3 second delay) -> Condition met! ``` -In summary, these utility functions help manage asynchronous operations by providing a way to wait for a certain amount of time or for a specific condition to be met. They can be used in various parts of the larger project to handle timing and conditional logic in an asynchronous manner. +In summary, this file provides two utility functions that help manage asynchronous operations by introducing delays and waiting for specific conditions to be met. These functions can be used in the larger project to control the flow of asynchronous code execution. ## Questions: 1. **What is the purpose of the `wait` function?** - The `wait` function is an asynchronous utility function that resolves a promise after a specified timeout in milliseconds. It can be used to introduce a delay in the execution of asynchronous code. + The `wait` function is an asynchronous utility function that resolves a promise after a specified timeout in milliseconds, optionally returning a value when the promise is resolved. -2. **How does the `forTrue` function work and what is its use case?** +2. **How does the `forTrue` function work?** - The `forTrue` function takes a function `fn` as an argument, which returns a boolean value. It repeatedly checks the result of `fn` every 50 milliseconds until it returns `true` or the maximum number of checks (200) is reached. This function can be used to wait for a specific condition to be met before proceeding with the execution of asynchronous code. + The `forTrue` function takes a function `fn` as an argument, which should return a boolean value. It checks the result of `fn` every 50 milliseconds and resolves the promise when `fn` returns `true`. If `fn` does not return `true` after 200 attempts, the promise is rejected. -3. **Is there any error handling or customization for the `forTrue` function, such as customizing the interval or maximum number of checks?** +3. **What is the use case for the `forTrue` function?** - Currently, there is no error handling or customization options for the `forTrue` function. The interval is hardcoded to 50 milliseconds, and the maximum number of checks is hardcoded to 200. To add customization, additional parameters could be added to the function signature and used in the implementation. \ No newline at end of file + The `forTrue` function can be used to wait for a certain condition to be met before proceeding with the execution of the code. This can be useful in situations where you need to wait for an asynchronous operation to complete or a specific state to be reached before continuing. \ No newline at end of file diff --git a/.autodoc/docs/markdown/src/cli/utils/summary.md b/.autodoc/docs/markdown/src/cli/utils/summary.md index 3815d86..8f3926f 100644 --- a/.autodoc/docs/markdown/src/cli/utils/summary.md +++ b/.autodoc/docs/markdown/src/cli/utils/summary.md @@ -1,48 +1,59 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc/docs/json/src/cli/utils) +[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc\docs\json\src\cli\utils) -The code in the `.autodoc/docs/json/src/cli/utils` folder provides utility functions and classes that help manage various aspects of the autodoc project, such as rate-limiting API calls, handling file and folder paths, managing language models, and traversing file systems. +The `.autodoc\docs\json\src\cli\utils` folder contains utility functions and classes that assist in managing API rate limits, handling file and folder paths, managing language models, traversing file systems, and controlling asynchronous operations. These utilities can be used throughout the autodoc project to ensure consistent behavior and improve code organization. -`APIRateLimit.ts` contains the `APIRateLimit` class, which is designed to manage and limit the number of concurrent API calls made by the application. This is useful when the API being called has a rate limit or when the application needs to control the number of simultaneous requests to avoid overloading the server. For example: +`APIRateLimit.ts` provides the `APIRateLimit` class, which manages and limits the number of concurrent API calls made by the application. This is useful when working with rate-limited APIs or preventing server overload. Example usage: ```javascript const apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls -async function getData(id) { - return apiRateLimiter.callApi(() => fetchData(id)); +async function fetchSomeData(id) { + const result = await apiRateLimiter.callApi(() => fetch(`https://api.example.com/data/${id}`)); + return result; } -getData(1).then(console.log); // Fetches data for ID 1, rate-limited ``` -`FileUtil.ts` provides utility functions for handling file and folder paths, such as generating file names and GitHub URLs for files and folders. These functions can be used to manage and navigate the documentation structure. For example: +`FileUtil.ts` offers utility functions for generating file names and GitHub URLs for documentation files. These functions ensure consistent naming and URL generation across the project. Example usage: ```javascript -getFileName("example.txt"); // returns "example.md" -githubFileUrl("https://github.com/user/repo", "/input", "/input/example.md", true); // returns "https://github.com/user/repo/example.md" +getFileName('example.txt'); // returns 'example.md' +githubFileUrl('https://github.com/user/repo', '/input', '/input/example.md', true); // returns 'https://github.com/user/repo/example.md' ``` -`LLMUtil.ts` defines and manages different language models (LLMs) and their associated costs for a project. It provides functions like `printModelDetails` and `totalIndexCostEstimate` to manage and analyze the usage and costs of different language models. For example, the `printModelDetails` function can provide a summary of the project's LLM usage, while the `totalIndexCostEstimate` function can help estimate the overall cost of using these models. - -`WaitUtil.ts` provides two utility functions, `wait` and `forTrue`, which help manage asynchronous operations in the larger project. They can be used in various parts of the project to handle timing and conditional logic in an asynchronous manner. For example: +`LLMUtil.ts` defines and manages different language models (LLMs) and their associated costs for a project utilizing OpenAI's GPT models. Functions like `printModelDetails` and `totalIndexCostEstimate` can be used to manage and analyze the usage and costs of different LLMs. Example usage: ```javascript -wait(2000, "Hello, world!").then(console.log); // Waits for 2 seconds and then logs "Hello, world!" -forTrue(isElementVisible).then(() => console.log("Element is visible!")); // Waits for an element to become visible, then logs "Element is visible!" +import { models, printModelDetails } from './path/to/this/file'; +printModelDetails(Object.values(models)); +const totalCost = totalIndexCostEstimate(Object.values(models)); +console.log(`Total cost: ${totalCost}`); ``` -`traverseFileSystem.ts` contains the `traverseFileSystem` function, which recursively traverses a given file system, processes folders and files, and filters out ignored files based on provided patterns. It is designed to be used for processing and generating documentation for a given project. For example: +`traverseFileSystem.ts` contains the `traverseFileSystem` function, which recursively traverses a given file system, processing files and folders based on provided parameters. This is useful for generating documentation or performing tasks that require processing files and folders in a directory structure. Example usage: ```javascript -const params = { - inputPath: './myProject', - projectName: 'My Project', +await traverseFileSystem({ + inputPath: './src', + projectName: 'myProject', + processFile: (params) => { /* Process file logic */ }, + processFolder: (params) => { /* Process folder logic */ }, ignore: ['node_modules/**', '.git/**'], - processFile: async (fileInfo) => { - // Process the file, e.g., generate documentation - }, - processFolder: async (folderInfo) => { - // Process the folder, e.g., create a folder in the output directory - }, -}; -traverseFileSystem(params); +}); +``` + +`WaitUtil.ts` provides two utility functions, `wait` and `forTrue`, which help manage asynchronous operations by introducing delays and waiting for specific conditions to be met. These functions can be used to control the flow of asynchronous code execution. Example usage: + +```javascript +async function delayedEcho() { + console.log("Start"); + await wait(1000, "Hello"); + console.log("End"); +} + +async function waitForCondition() { + console.log("Waiting for condition..."); + await forTrue(() => condition); + console.log("Condition met!"); +} ``` -In summary, the code in this folder provides various utility functions and classes that help manage different aspects of the autodoc project, making it easier to handle tasks such as rate-limiting, file and folder management, language model management, asynchronous operations, and file system traversal. +In summary, the utilities in this folder enhance the autodoc project by providing consistent behavior, improving code organization, and managing various aspects of the project, such as API rate limits, file and folder paths, language models, file system traversal, and asynchronous operations. diff --git a/.autodoc/docs/markdown/src/cli/utils/traverseFileSystem.md b/.autodoc/docs/markdown/src/cli/utils/traverseFileSystem.md index 573fcf0..e8c62aa 100644 --- a/.autodoc/docs/markdown/src/cli/utils/traverseFileSystem.md +++ b/.autodoc/docs/markdown/src/cli/utils/traverseFileSystem.md @@ -1,54 +1,49 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/src/cli/utils/traverseFileSystem.ts) +[View code on GitHub](https://github.com/context-labs/autodoc/src\cli\utils\traverseFileSystem.ts) -The `traverseFileSystem` function in this code is an asynchronous function that recursively traverses a given file system, processes folders and files, and filters out ignored files based on provided patterns. It is designed to be used in the larger project for processing and generating documentation for a given project. +The `traverseFileSystem` function in this code is an asynchronous function that recursively traverses a given file system, processing files and folders based on the provided parameters. It is designed to be used in the larger project for generating documentation or performing other tasks that require processing files and folders in a directory structure. -The function takes an object of type `TraverseFileSystemParams` as its input, which contains the following properties: +The function takes an object of type `TraverseFileSystemParams` as its input, which contains various properties to control the traversal and processing behavior. These properties include: -- `inputPath`: The root folder path to start traversing. -- `projectName`: The name of the project being documented. -- `processFile`: An optional callback function to process files. -- `processFolder`: An optional callback function to process folders. -- `ignore`: An array of patterns to ignore files and folders. -- `filePrompt`: An optional prompt for processing files. -- `folderPrompt`: An optional prompt for processing folders. -- `contentType`: The type of content being processed. -- `targetAudience`: The target audience for the documentation. -- `linkHosted`: A flag indicating if the documentation should be linked to a hosted version. +- `inputPath`: The root path to start the traversal from. +- `projectName`: The name of the project being processed. +- `processFile`: An optional callback function to process a file. +- `processFolder`: An optional callback function to process a folder. +- `ignore`: An array of patterns to ignore during traversal. +- `filePrompt`, `folderPrompt`: Optional prompts for user interaction. +- `contentType`, `targetAudience`, `linkHosted`: Additional metadata for processing. -The function first checks if the provided `inputPath` exists. If not, it logs an error message and returns. It then defines a helper function `shouldIgnore` that checks if a given file or folder should be ignored based on the `ignore` patterns. +The function first checks if the provided `inputPath` exists using `fs.access`. If the path does not exist, it logs an error message and returns. It then defines a helper function `shouldIgnore` that checks if a given file or folder should be ignored based on the `ignore` patterns. -The main logic of the function is implemented in the `dfs` (depth-first search) function, which recursively traverses the file system. It reads the contents of the current folder, filters out ignored files and folders, and processes them accordingly. If an entry is a directory, it calls `dfs` recursively and then calls the `processFolder` callback if provided. If an entry is a file and is a text file, it calls the `processFile` callback if provided. +The main logic of the function is implemented in the `dfs` (depth-first search) function, which is called recursively to traverse the file system. It reads the contents of the current directory using `fs.readdir`, filters out ignored items, and processes the remaining items. + +For each item, if it is a directory, the `dfs` function is called recursively, and the `processFolder` callback is invoked if provided. If it is a file and its content is text (checked using `isText`), the `processFile` callback is invoked if provided. + +The traversal is performed using `Promise.all` to process items concurrently, improving performance. If an error occurs during traversal, it is logged and rethrown. Here's an example of how this function might be used in the larger project: ```javascript -import { traverseFileSystem } from './autodoc'; - -const params = { - inputPath: './myProject', - projectName: 'My Project', - ignore: ['node_modules/**', '.git/**'], - processFile: async (fileInfo) => { - // Process the file, e.g., generate documentation +await traverseFileSystem({ + inputPath: './src', + projectName: 'myProject', + processFile: (params) => { + // Process file logic here }, - processFolder: async (folderInfo) => { - // Process the folder, e.g., create a folder in the output directory + processFolder: (params) => { + // Process folder logic here }, -}; - -traverseFileSystem(params); + ignore: ['node_modules/**', '.git/**'], +}); ``` - -This example would traverse the `myProject` folder, ignoring any files and folders within `node_modules` and `.git`, and process the remaining files and folders using the provided callback functions. ## Questions: 1. **What is the purpose of the `traverseFileSystem` function?** - The `traverseFileSystem` function is an asynchronous function that traverses a given file system, processes files and folders based on the provided parameters, and ignores files and folders that match the specified ignore patterns. + The `traverseFileSystem` function is an asynchronous function that traverses a given file system, processes folders and files based on the provided parameters, and ignores files and folders based on the given ignore patterns. 2. **How does the `shouldIgnore` function work?** - The `shouldIgnore` function takes a file or folder name as input and returns a boolean value indicating whether the file or folder should be ignored based on the provided ignore patterns. It uses the `minimatch` library to check if the file or folder name matches any of the ignore patterns. + The `shouldIgnore` function takes a file name as input and returns a boolean value indicating whether the file should be ignored or not. It checks if the file name matches any of the ignore patterns provided in the `ignore` parameter using the `minimatch` library. 3. **What is the role of the `dfs` function inside `traverseFileSystem`?** - The `dfs` function is an asynchronous function that performs a depth-first search on the file system starting from the given `currentPath`. It processes folders and files based on the provided parameters and recursively calls itself for each subdirectory. \ No newline at end of file + The `dfs` function is an asynchronous function that performs a depth-first search on the file system starting from the given `currentPath`. It processes folders and files based on the provided parameters and recursively calls itself for each subdirectory found. \ No newline at end of file diff --git a/.autodoc/docs/markdown/src/const.md b/.autodoc/docs/markdown/src/const.md index fceb891..31c2de3 100644 --- a/.autodoc/docs/markdown/src/const.md +++ b/.autodoc/docs/markdown/src/const.md @@ -1,27 +1,35 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/src/const.ts) +[View code on GitHub](https://github.com/context-labs/autodoc/src\const.ts) -The code in this file is responsible for managing the user configuration file for the Autodoc project. It imports two Node.js built-in modules, `path` and `os`, which are used to handle file paths and operating system-related utility functions, respectively. +The code in this file is responsible for managing the user configuration file for the autodoc project. It imports two Node.js built-in modules, `path` and `os`, which are used to handle file paths and operating system-related utility functions, respectively. -The `userConfigFileName` constant is defined as `'autodoc.user.json'`. This constant represents the name of the user configuration file that will be used by the Autodoc project. +The `userConfigFileName` constant is defined as `'autodoc.user.json'`, which represents the name of the user configuration file. This file is expected to store user-specific settings for the autodoc project in JSON format. -The `userConfigFilePath` constant is created using the `path.resolve()` function, which resolves a sequence of paths into an absolute path. It takes three arguments: +The `userConfigFilePath` constant is created using the `path.resolve()` function, which combines the provided arguments into an absolute file path. The `os.homedir()` function is used to get the current user's home directory, and `./.config/autodoc/` is appended to it as the folder where the user configuration file should be stored. Finally, the `userConfigFileName` constant is appended to the path, resulting in the complete file path for the user configuration file. -1. `os.homedir()`: This function returns the current user's home directory. It ensures that the user configuration file is stored in the user's home directory, making it user-specific. -2. `'./.config/autodoc/'`: This string specifies the subdirectory within the user's home directory where the configuration file will be stored. The `.config` directory is a common location for storing configuration files on Unix-based systems, and the `autodoc` subdirectory is used to keep the Autodoc configuration files organized. -3. `userConfigFileName`: This constant is used as the file name for the user configuration file. +By exporting both `userConfigFileName` and `userConfigFilePath`, other parts of the autodoc project can easily access and use these constants to read or write user-specific settings. For example, when the autodoc application starts, it can read the user configuration file from the specified path, and apply the settings accordingly. -The `userConfigFilePath` constant will store the absolute path to the user configuration file, which can be used by other parts of the Autodoc project to read or write user-specific settings. +Here's a code example of how these constants might be used in another part of the autodoc project: -In summary, this code is responsible for defining the location and name of the user configuration file for the Autodoc project. It ensures that the configuration file is stored in a user-specific directory and follows a standard naming convention. This allows the Autodoc project to easily manage user-specific settings and preferences. +```javascript +import { userConfigFilePath } from './path/to/this/file'; + +// Read user configuration from the file +const userConfig = JSON.parse(fs.readFileSync(userConfigFilePath, 'utf-8')); + +// Apply user settings +applyUserSettings(userConfig); +``` + +In summary, this code is responsible for defining the name and file path of the user configuration file for the autodoc project, allowing other parts of the project to easily access and manage user-specific settings. ## Questions: 1. **What is the purpose of the `userConfigFileName` and `userConfigFilePath` constants?** The `userConfigFileName` constant defines the name of the user configuration file for the autodoc project, while the `userConfigFilePath` constant defines the absolute path to this file, which is located in the user's home directory under the `.config/autodoc/` folder. -2. **Why are the `node:path` and `node:os` modules imported?** +2. **Why are the `node:path` and `node:os` modules being imported?** - The `node:path` module is imported to provide utilities for working with file and directory paths, such as the `path.resolve()` function used to construct the `userConfigFilePath`. The `node:os` module is imported to provide operating system-related utility methods, such as `os.homedir()` which returns the current user's home directory. + The `node:path` module is imported to provide utilities for working with file and directory paths, such as resolving the absolute path to the user configuration file. The `node:os` module is imported to provide operating system-related utility methods, such as getting the user's home directory. 3. **Is this code compatible with different operating systems?** - Yes, this code is compatible with different operating systems. The `os.homedir()` function from the `node:os` module returns the correct home directory path for the current user, regardless of the operating system. Additionally, the `path.resolve()` function from the `node:path` module handles path separators and other OS-specific details, ensuring the correct file path is generated. \ No newline at end of file + Yes, this code is compatible with different operating systems. The `os.homedir()` method returns the home directory of the current user, which is platform-specific, and the `path.resolve()` method takes care of handling the correct path separators for the current operating system. \ No newline at end of file diff --git a/.autodoc/docs/markdown/src/index.md b/.autodoc/docs/markdown/src/index.md index e178bd7..217e90f 100644 --- a/.autodoc/docs/markdown/src/index.md +++ b/.autodoc/docs/markdown/src/index.md @@ -1,6 +1,8 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/src/index.ts) +[View code on GitHub](https://github.com/context-labs/autodoc/src\index.ts) -The code is a CLI (Command Line Interface) tool for the Autodoc project, which helps in generating documentation for a codebase. It uses the `commander` package to define and manage commands, and `inquirer` for interactive prompts. The main commands supported are `init`, `estimate`, `index`, `user`, and `q`. +This code is the main entry point for the Autodoc CLI tool, which provides a set of commands to help developers automatically generate documentation for their codebase. The tool uses the `commander` library to define and handle commands, and `inquirer` for interactive prompts. + +The available commands are: 1. `init`: Initializes the repository by creating an `autodoc.config.json` file in the current directory. If the file already exists, it uses the existing configuration. ```bash @@ -12,30 +14,33 @@ The code is a CLI (Command Line Interface) tool for the Autodoc project, which h autodoc estimate ``` -3. `index`: Traverses the codebase, writes documentation using LLM (Language Model), and creates a locally stored index. It prompts the user to confirm before starting the indexing process. +3. `index`: Traverses the codebase, writes documentation using LLM, and creates a locally stored index. Before starting the indexing process, it prompts the user for confirmation. It requires the `autodoc.config.json` file to be present. ```bash autodoc index ``` -4. `user`: Sets the Autodoc user configuration. If a user configuration file exists, it uses the existing configuration; otherwise, it creates a new one. +4. `user`: Sets the Autodoc user configuration. If a user configuration file exists, it uses the existing configuration. ```bash autodoc user ``` -5. `q`: Queries an Autodoc index. It requires both `autodoc.config.json` and user configuration files to be present. +5. `q`: Queries an Autodoc index. It requires both the `autodoc.config.json` and user configuration files to be present. ```bash autodoc q ``` -The code also handles unhandled promise rejections by logging the error stack, showing an error spinner, stopping the spinner, and exiting with an error code. +The code also listens for unhandled promise rejections and handles them gracefully by showing an error spinner, stopping the spinner, and exiting with an error code. -Overall, this CLI tool simplifies the process of generating documentation for a codebase by providing an easy-to-use interface for managing configurations and running the Autodoc project's core functionalities. +In the larger project, this CLI tool serves as the primary interface for users to interact with Autodoc, allowing them to easily generate and manage documentation for their codebase. ## Questions: - 1. **Question:** What is the purpose of the `autodoc.config.json` file and how is it used in the code? - **Answer:** The `autodoc.config.json` file is used to store the configuration for the Autodoc repository. It is read and parsed in various commands like `init`, `estimate`, `index`, and `q` to provide the necessary configuration for each command's execution. + 1. **What is the purpose of the Autodoc CLI Tool?** + + The Autodoc CLI Tool is designed to help developers automatically generate documentation for their codebase by traversing the code, writing docs via LLM, and creating a locally stored index. + +2. **How does the `estimate` command work and what does it return?** + + The `estimate` command reads the `autodoc.config.json` file and estimates the cost of running the `index` command on the repository. It provides an estimation of the resources required to generate the documentation. -2. **Question:** How does the `estimate` command work and what does it do? - **Answer:** The `estimate` command reads the `autodoc.config.json` file, parses it into a configuration object, and then calls the `estimate` function with the configuration. The purpose of this command is to estimate the cost of running the `index` command on the repository. +3. **What is the role of the `user` command and how does it interact with the user configuration file?** -3. **Question:** What is the purpose of the `user` command and how does it handle user configuration? - **Answer:** The `user` command is used to set the Autodoc user configuration. It reads the user configuration file specified by `userConfigFilePath`, parses it into a configuration object, and then calls the `user` function with the configuration. If the configuration file is not found, it calls the `user` function without any configuration, allowing the user to set up their configuration. \ No newline at end of file + The `user` command is responsible for setting the Autodoc user configuration. It reads the user configuration file (if it exists) and allows the user to update or create a new configuration. This configuration is then used in other commands, such as the `query` command, to interact with the Autodoc index. \ No newline at end of file diff --git a/.autodoc/docs/markdown/src/langchain/hnswlib.md b/.autodoc/docs/markdown/src/langchain/hnswlib.md index 696d07c..5b00af0 100644 --- a/.autodoc/docs/markdown/src/langchain/hnswlib.md +++ b/.autodoc/docs/markdown/src/langchain/hnswlib.md @@ -1,32 +1,36 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/src/langchain/hnswlib.ts) +[View code on GitHub](https://github.com/context-labs/autodoc/src\langchain\hnswlib.ts) -The `HNSWLib` class in this code is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm from the `hnswlib-node` library. It extends the `SaveableVectorStore` class and provides methods for adding documents, searching for similar documents, and saving/loading the index. +The `HNSWLib` class in this code is a specialized vector store that uses the Hierarchical Navigable Small World (HNSW) algorithm for efficient similarity search. It is built on top of the `hnswlib-node` library and extends the `SaveableVectorStore` class. The main purpose of this class is to store and search for documents based on their embeddings, which are high-dimensional vectors representing the documents' content. -The constructor takes an `Embeddings` object and an `HNSWLibArgs` object as arguments. The `Embeddings` object is used to convert text documents into numerical vectors, while the `HNSWLibArgs` object contains configuration options for the HNSW index and an optional `InMemoryDocstore` object for storing document metadata. +The constructor of the `HNSWLib` class takes an `Embeddings` object and an `HNSWLibArgs` object as arguments. The `Embeddings` object is used to convert documents into their corresponding vector representations, while the `HNSWLibArgs` object contains configuration options for the HNSW index and an optional `InMemoryDocstore` object for storing the documents. -The `addDocuments` method takes an array of `Document` objects, converts their text content into numerical vectors using the `Embeddings` object, and adds the vectors to the HNSW index. The `addVectors` method is responsible for initializing the index, resizing it if necessary, and adding the vectors and their corresponding metadata to the `InMemoryDocstore`. +The `addDocuments` method takes an array of `Document` objects, converts them into embeddings using the `Embeddings` object, and adds them to the HNSW index. The `similaritySearchVectorWithScore` method takes a query vector and a number `k`, and returns the top `k` most similar documents along with their similarity scores. -The `similaritySearchVectorWithScore` method takes a query vector and a number `k`, and returns the top `k` most similar documents in the index along with their similarity scores. It checks if the query vector has the correct dimensions and if `k` is within the valid range before performing the search. +The `save` and `load` methods allow for persisting the HNSW index, document store, and configuration options to disk and loading them back into memory. The `fromTexts` and `fromDocuments` static methods provide convenient ways to create an `HNSWLib` instance from an array of texts or documents, respectively. -The `save` and `load` methods allow the HNSW index and its associated metadata to be saved to and loaded from a specified directory. The `fromTexts` and `fromDocuments` static methods provide convenient ways to create an `HNSWLib` instance from an array of text strings or `Document` objects, respectively. - -Example usage: +Here's an example of how to use the `HNSWLib` class: ```javascript const embeddings = new Embeddings(/* ... */); -const hnswLib = await HNSWLib.fromTexts(texts, metadatas, embeddings); +const args = { space: 'cosine' }; +const hnswLib = new HNSWLib(embeddings, args); + +// Add documents to the index +await hnswLib.addDocuments(documents); -const queryVector = await embeddings.embedText("example query"); -const similarDocuments = await hnswLib.similaritySearchVectorWithScore(queryVector, 5); +// Perform a similarity search +const queryVector = /* ... */; +const k = 10; +const results = await hnswLib.similaritySearchVectorWithScore(queryVector, k); ``` -In the larger project, this class can be used to efficiently store and search for similar documents based on their embeddings, which can be useful for tasks such as document clustering, nearest neighbor search, and recommendation systems. +In the larger project, the `HNSWLib` class can be used to efficiently store and search for documents based on their content similarity, which can be useful for tasks such as document clustering, recommendation systems, or information retrieval. ## Questions: - 1. **Question:** What is the purpose of the `HNSWLib` class and how does it relate to the `SaveableVectorStore` class? - **Answer:** The `HNSWLib` class is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm from the `hnswlib-node` library. It extends the `SaveableVectorStore` class, which provides a base class for vector stores that can be saved and loaded from disk. + 1. **Question**: What is the purpose of the `HNSWLib` class and how does it relate to the `SaveableVectorStore` class? + **Answer**: The `HNSWLib` class is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm from the `hnswlib-node` library. It extends the `SaveableVectorStore` class, which provides a base class for vector stores that can be saved and loaded from disk. -2. **Question:** How does the `addDocuments` method work and what is its purpose? - **Answer:** The `addDocuments` method takes an array of `Document` objects, extracts their `pageContent`, and embeds them into vectors using the `embedDocuments` method from the `embeddings` object. It then adds these vectors and the corresponding documents to the HNSW index and the `docstore` respectively. +2. **Question**: How does the `addDocuments` method work and what is its purpose? + **Answer**: The `addDocuments` method takes an array of `Document` objects, extracts their `pageContent`, and embeds them using the provided `Embeddings` instance. It then adds the resulting vectors and documents to the HNSW index and the `InMemoryDocstore`, respectively. -3. **Question:** How does the `similaritySearchVectorWithScore` method work and what does it return? - **Answer:** The `similaritySearchVectorWithScore` method takes a query vector and a number `k` as input. It checks if the query vector has the same length as the number of dimensions and if `k` is not greater than the number of elements in the index. It then performs a k-nearest neighbors search on the HNSW index using the query vector and returns an array of `[Document, number]` tuples, where each tuple contains a document from the `docstore` and its corresponding distance score to the query vector. \ No newline at end of file +3. **Question**: How does the `similaritySearchVectorWithScore` method work and what does it return? + **Answer**: The `similaritySearchVectorWithScore` method takes a query vector and a number `k` as input, and searches for the `k` most similar vectors in the HNSW index. It returns an array of tuples, where each tuple contains a `Document` object and its corresponding similarity score to the query vector. \ No newline at end of file diff --git a/.autodoc/docs/markdown/src/langchain/summary.md b/.autodoc/docs/markdown/src/langchain/summary.md index bb0b9a8..e3292fb 100644 --- a/.autodoc/docs/markdown/src/langchain/summary.md +++ b/.autodoc/docs/markdown/src/langchain/summary.md @@ -1,23 +1,29 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc/docs/json/src/langchain) +[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc\docs\json\src\langchain) -The `hnswlib.ts` file in the `.autodoc/docs/json/src/langchain` folder contains the `HNSWLib` class, which is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm from the `hnswlib-node` library. This class is designed to efficiently store and search for similar documents based on their embeddings, making it useful for tasks such as document clustering, nearest neighbor search, and recommendation systems. +The `hnswlib.ts` file in the `.autodoc\docs\json\src\langchain` folder contains the `HNSWLib` class, which is a specialized vector store utilizing the Hierarchical Navigable Small World (HNSW) algorithm for efficient similarity search. This class is built on top of the `hnswlib-node` library and extends the `SaveableVectorStore` class. Its primary purpose is to store and search for documents based on their embeddings, which are high-dimensional vectors representing the documents' content. -The `HNSWLib` class extends the `SaveableVectorStore` class and provides methods for adding documents, searching for similar documents, and saving/loading the index. It takes an `Embeddings` object and an `HNSWLibArgs` object as arguments in its constructor. The `Embeddings` object is responsible for converting text documents into numerical vectors, while the `HNSWLibArgs` object contains configuration options for the HNSW index and an optional `InMemoryDocstore` object for storing document metadata. +The `HNSWLib` class constructor takes an `Embeddings` object and an `HNSWLibArgs` object as arguments. The `Embeddings` object is responsible for converting documents into their corresponding vector representations, while the `HNSWLibArgs` object contains configuration options for the HNSW index and an optional `InMemoryDocstore` object for storing the documents. -The `addDocuments` method accepts an array of `Document` objects, converts their text content into numerical vectors using the `Embeddings` object, and adds the vectors to the HNSW index. The `addVectors` method initializes the index, resizes it if necessary, and adds the vectors and their corresponding metadata to the `InMemoryDocstore`. +The `addDocuments` method accepts an array of `Document` objects, converts them into embeddings using the `Embeddings` object, and adds them to the HNSW index. The `similaritySearchVectorWithScore` method takes a query vector and a number `k`, and returns the top `k` most similar documents along with their similarity scores. -The `similaritySearchVectorWithScore` method takes a query vector and a number `k`, and returns the top `k` most similar documents in the index along with their similarity scores. It checks if the query vector has the correct dimensions and if `k` is within the valid range before performing the search. +The `save` and `load` methods enable persisting the HNSW index, document store, and configuration options to disk and loading them back into memory. The `fromTexts` and `fromDocuments` static methods provide convenient ways to create an `HNSWLib` instance from an array of texts or documents, respectively. -The `save` and `load` methods allow the HNSW index and its associated metadata to be saved to and loaded from a specified directory. The `fromTexts` and `fromDocuments` static methods provide convenient ways to create an `HNSWLib` instance from an array of text strings or `Document` objects, respectively. +In the larger project, the `HNSWLib` class can be employed to efficiently store and search for documents based on their content similarity, which can be beneficial for tasks such as document clustering, recommendation systems, or information retrieval. -Here's an example of how this code might be used: +Here's an example of how to use the `HNSWLib` class: ```javascript const embeddings = new Embeddings(/* ... */); -const hnswLib = await HNSWLib.fromTexts(texts, metadatas, embeddings); +const args = { space: 'cosine' }; +const hnswLib = new HNSWLib(embeddings, args); -const queryVector = await embeddings.embedText("example query"); -const similarDocuments = await hnswLib.similaritySearchVectorWithScore(queryVector, 5); +// Add documents to the index +await hnswLib.addDocuments(documents); + +// Perform a similarity search +const queryVector = /* ... */; +const k = 10; +const results = await hnswLib.similaritySearchVectorWithScore(queryVector, k); ``` -In the larger project, the `HNSWLib` class can be integrated with other components to build efficient and scalable systems for document similarity search, clustering, and recommendations based on text embeddings. +This code snippet demonstrates how to create an `HNSWLib` instance, add documents to the index, and perform a similarity search. The results can then be used for various purposes, such as finding related documents or generating recommendations based on content similarity. diff --git a/.autodoc/docs/markdown/src/summary.md b/.autodoc/docs/markdown/src/summary.md index 30a7739..6d65cdc 100644 --- a/.autodoc/docs/markdown/src/summary.md +++ b/.autodoc/docs/markdown/src/summary.md @@ -1,39 +1,39 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc/docs/json/src) +[View code on GitHub](https://github.com/context-labs/autodoc/.autodoc\docs\json\src) -The `.autodoc/docs/json/src` folder contains the core components of the Autodoc project, which aims to automatically generate documentation for a given code repository using OpenAI's language models (LLMs). The main files in this folder are `const.ts`, `index.ts`, and `types.ts`. +The `.autodoc\docs\json\src` folder contains the core components of the autodoc project, which is designed to automatically generate documentation for a given code repository using OpenAI's language models (LLMs). The folder consists of three main files: `const.ts`, `index.ts`, and `types.ts`, as well as two subfolders: `cli` and `langchain`. -`const.ts` manages the user configuration file for the Autodoc project. It defines the location and name of the user configuration file, ensuring that it is stored in a user-specific directory and follows a standard naming convention. This allows the Autodoc project to easily manage user-specific settings and preferences. +`const.ts` defines the name and file path of the user configuration file for the autodoc project. This file stores user-specific settings in JSON format. Other parts of the project can easily access and use these constants to read or write user-specific settings. For example: -`index.ts` is a CLI (Command Line Interface) tool for the Autodoc project, which simplifies the process of generating documentation for a codebase. It provides an easy-to-use interface for managing configurations and running the Autodoc project's core functionalities. The main commands supported are `init`, `estimate`, `index`, `user`, and `q`. For example: +```javascript +import { userConfigFilePath } from './path/to/this/file'; + +// Read user configuration from the file +const userConfig = JSON.parse(fs.readFileSync(userConfigFilePath, 'utf-8')); -```bash -autodoc init -autodoc estimate -autodoc index -autodoc user -autodoc q +// Apply user settings +applyUserSettings(userConfig); ``` -`types.ts` defines the types and interfaces for the Autodoc project, providing the foundation for processing code repositories and generating documentation using OpenAI's language models. It includes types such as `AutodocUserConfig`, `AutodocRepoConfig`, `FileSummary`, `FolderSummary`, and more. +`index.ts` serves as the main entry point for the Autodoc CLI tool, providing a set of commands for developers to generate and manage documentation for their codebase. The available commands include `init`, `estimate`, `index`, `user`, and `q`. The CLI tool uses the `commander` library for command handling and `inquirer` for interactive prompts. -The `cli` subfolder contains the `spinner.ts` file, which provides a utility for managing a command-line spinner using the `ora` library. This utility can be used to provide a consistent and user-friendly interface for displaying progress and status messages during long-running tasks or processes. For example: +`types.ts` defines the types and interfaces for the autodoc project, such as `AutodocUserConfig`, `AutodocRepoConfig`, `FileSummary`, `FolderSummary`, and more. These types are used to configure and run the autodoc tool, allowing users to generate documentation for their code repositories using OpenAI's LLMs. -```javascript -updateSpinnerText('Loading data...'); -stopSpinner(); -spinnerError('An error occurred'); -spinnerSuccess('Operation completed successfully'); -spinnerInfo('Please wait...'); -``` +The `cli` subfolder contains the `spinner.ts` file, which manages a spinner for visual feedback during background processes. It exports functions like `updateSpinnerText`, `stopSpinner`, `spinnerError`, `spinnerSuccess`, and `spinnerInfo` for easy interaction with the spinner. -The `langchain` subfolder contains the `hnswlib.ts` file, which implements a vector store using the Hierarchical Navigable Small World (HNSW) algorithm. This class is designed to efficiently store and search for similar documents based on their embeddings, making it useful for tasks such as document clustering, nearest neighbor search, and recommendation systems. For example: +The `langchain` subfolder contains the `hnswlib.ts` file, which provides the `HNSWLib` class for efficient similarity search using the Hierarchical Navigable Small World (HNSW) algorithm. This class is used to store and search for documents based on their embeddings, which are high-dimensional vectors representing the documents' content. Example usage: ```javascript const embeddings = new Embeddings(/* ... */); -const hnswLib = await HNSWLib.fromTexts(texts, metadatas, embeddings); +const args = { space: 'cosine' }; +const hnswLib = new HNSWLib(embeddings, args); + +// Add documents to the index +await hnswLib.addDocuments(documents); -const queryVector = await embeddings.embedText("example query"); -const similarDocuments = await hnswLib.similaritySearchVectorWithScore(queryVector, 5); +// Perform a similarity search +const queryVector = /* ... */; +const k = 10; +const results = await hnswLib.similaritySearchVectorWithScore(queryVector, k); ``` -In summary, the code in this folder provides the core components and utilities for the Autodoc project, enabling the automatic generation of documentation for code repositories using OpenAI's language models. The CLI tool simplifies the process, while the types and interfaces lay the foundation for processing and generating documentation. The additional utilities, such as the spinner and HNSWLib, enhance the user experience and provide efficient search capabilities. +In summary, the code in this folder is responsible for the core functionality of the autodoc project, including user configuration management, CLI tool commands, type definitions, spinner management, and efficient similarity search using the HNSW algorithm. diff --git a/.autodoc/docs/markdown/src/types.md b/.autodoc/docs/markdown/src/types.md index 40258f5..7850747 100644 --- a/.autodoc/docs/markdown/src/types.md +++ b/.autodoc/docs/markdown/src/types.md @@ -1,28 +1,27 @@ -[View code on GitHub](https://github.com/context-labs/autodoc/src/types.ts) +[View code on GitHub](https://github.com/context-labs/autodoc/src\types.ts) -This code defines the types and interfaces for the `autodoc` project, which aims to automatically generate documentation for a given code repository. The project uses OpenAI's language models (LLMs) to process and generate summaries, questions, and other relevant information for files and folders within the repository. +This code defines the types and interfaces for the `autodoc` project, which aims to automatically generate documentation for a given code repository. The project uses OpenAI's language models (LLMs) to process and generate summaries, questions, and other relevant information for files and folders in the repository. -The code starts by importing `OpenAIChat` from the `langchain/llms` package. It then defines several types and interfaces that are used throughout the project: +The `AutodocUserConfig` and `AutodocRepoConfig` types define the configuration options for the user and repository, respectively. These include settings such as the LLM models to use, repository URL, output directory, and content type. -- `AutodocUserConfig`: Represents the user configuration for the autodoc project, including the LLM models to be used. -- `AutodocRepoConfig`: Represents the configuration for a specific repository, including its name, URL, root directory, output directory, LLM models, and other settings. -- `FileSummary` and `FolderSummary`: Represent the summaries and questions generated for files and folders, respectively. -- `ProcessFileParams`, `ProcessFolderParams`, and `TraverseFileSystemParams`: Define the parameters for processing files, folders, and traversing the file system, respectively. -- `ProcessFile` and `ProcessFolder`: Define the function types for processing files and folders, respectively. -- `LLMModels`: Enumerates the available LLM models, such as GPT-3.5-turbo, GPT-4, and GPT-4-32k. -- `LLMModelDetails`: Represents the details of an LLM model, including its name, cost per 1K tokens, maximum length, and other statistics. +`FileSummary` and `FolderSummary` types represent the generated summaries for files and folders, including their paths, URLs, and checksums. The `ProcessFileParams` and `ProcessFolderParams` types define the parameters required for processing files and folders, such as the file or folder name, path, and content type. -For example, when using this code in the larger project, you might define a `ProcessFile` function that takes a `ProcessFileParams` object as input and generates a summary and questions for the file using the specified LLM model. Similarly, you could define a `ProcessFolder` function that processes all files and subfolders within a folder, generating summaries and questions for each. +`ProcessFile` and `ProcessFolder` are function types that take the respective parameters and return a promise. These functions are responsible for processing the files and folders, generating summaries, and updating the documentation. -The `TraverseFileSystemParams` type allows you to configure how the file system is traversed, including specifying which files and folders to ignore, and what prompts to use for generating summaries and questions. +`TraverseFileSystemParams` type defines the parameters for traversing the file system, including the input path, project name, and optional `processFile` and `processFolder` functions. It also includes settings for ignoring certain files or folders and content type preferences. -Overall, this code provides the foundation for the `autodoc` project by defining the types and interfaces needed to process code repositories and generate documentation using OpenAI's language models. +The `LLMModels` enum lists the available language models, such as GPT-3.5 Turbo, GPT-4, and GPT-4 32k. The `LLMModelDetails` type provides information about each model, including the cost per 1K tokens, maximum length, and success/failure statistics. + +In the larger project, these types and interfaces would be used to configure and run the `autodoc` tool, allowing users to automatically generate documentation for their code repositories using OpenAI's language models. For example, a user could provide an `AutodocRepoConfig` object to configure the tool, and then use the `TraverseFileSystem` function to process the repository and generate the documentation. ## Questions: - 1. **Question:** What is the purpose of the `LLMModels` enum and how is it used in the code? - **Answer:** The `LLMModels` enum defines the available language models for the autodoc project. It is used in the `AutodocUserConfig` and `AutodocRepoConfig` types to specify which language models should be used for processing files and folders. + 1. **What is the purpose of the `AutodocUserConfig` and `AutodocRepoConfig` types?** + + The `AutodocUserConfig` type is used to define the user configuration for the autodoc project, which includes an array of LLMModels. The `AutodocRepoConfig` type is used to define the repository configuration for the autodoc project, which includes various properties such as name, repository URL, root, output, LLMModels, and more. + +2. **What are the different LLMModels available in the `LLMModels` enum?** + + The `LLMModels` enum lists the available language models for the autodoc project. Currently, there are three models: GPT3 (gpt-3.5-turbo), GPT4 (gpt-4), and GPT432k (gpt-4-32k). -2. **Question:** What are the `ProcessFile` and `ProcessFolder` types and how are they used in the code? - **Answer:** `ProcessFile` and `ProcessFolder` are types for functions that process a file or a folder, respectively. They are used as optional parameters in the `TraverseFileSystemParams` type, allowing developers to provide custom processing functions when traversing the file system. +3. **What is the purpose of the `ProcessFile` and `ProcessFolder` types?** -3. **Question:** What is the purpose of the `TraverseFileSystemParams` type and how is it used in the code? - **Answer:** The `TraverseFileSystemParams` type defines the parameters required for traversing the file system. It is used to pass configuration options, such as input path, project name, custom processing functions, and other settings, to a function that will traverse the file system and process files and folders accordingly. \ No newline at end of file + The `ProcessFile` type is a function type that takes a `ProcessFileParams` object as input and returns a Promise. It is used to process a single file in the autodoc project. The `ProcessFolder` type is a function type that takes a `ProcessFolderParams` object as input and returns a Promise. It is used to process a folder in the autodoc project. \ No newline at end of file diff --git a/.autodoc/docs/markdown/tsconfig.md b/.autodoc/docs/markdown/tsconfig.md index 68f5e10..64aee1e 100644 --- a/.autodoc/docs/markdown/tsconfig.md +++ b/.autodoc/docs/markdown/tsconfig.md @@ -1,30 +1,31 @@ [View code on GitHub](https://github.com/context-labs/autodoc/tsconfig.json) -This code is a configuration file for the TypeScript compiler in a project. The purpose of this configuration is to define various options and settings that the TypeScript compiler should use when transpiling TypeScript code into JavaScript. This is important for ensuring that the compiled output is consistent and compatible with the intended runtime environment. - -Here's a brief explanation of the key options set in this configuration: - -- `"rootDir": "src"`: Specifies the root directory containing the TypeScript source files. This tells the compiler where to look for the input files. -- `"outDir": "dist"`: Specifies the output directory for the compiled JavaScript files. This is where the transpiled code will be saved. -- `"strict": true`: Enables strict type checking, which enforces stronger type safety and helps catch potential issues during development. -- `"target": "es2020"`: Sets the target ECMAScript version for the compiled output. In this case, the output will be compatible with ECMAScript 2020 (ES11) features. -- `"module": "ES2020"`: Specifies the module system to use in the compiled output. This setting is aligned with the target ECMAScript version. -- `"sourceMap": true`: Generates source map files alongside the compiled output. This helps with debugging by mapping the compiled code back to the original TypeScript source. -- `"esModuleInterop": true` and `"allowSyntheticDefaultImports": true`: These options enable better compatibility with different module systems and allow for more flexible import statements. -- `"moduleResolution": "node"`: Sets the module resolution strategy to Node.js-style, which is the most common approach for resolving module imports in JavaScript projects. -- `"declaration": true`: Generates TypeScript declaration files (`.d.ts`) alongside the compiled output. These files provide type information for the compiled code, which can be useful for other TypeScript projects that depend on this one. -- `"skipLibCheck": true`: Skips type checking of declaration files, which can speed up the compilation process. - -In the larger project, this configuration file ensures that the TypeScript compiler produces consistent and compatible JavaScript output, making it easier to integrate the compiled code with other parts of the project or with external dependencies. +The code provided is a configuration file for the TypeScript compiler in a project. It specifies various options that control how the TypeScript compiler should process the source code and generate the output JavaScript files. This configuration file is typically named `tsconfig.json` and is placed at the root of a TypeScript project. + +The `compilerOptions` object contains several key-value pairs that define the behavior of the TypeScript compiler: + +- `rootDir`: Specifies the root directory of the source files. In this case, it is set to "src", meaning that the source files are located in the "src" folder. +- `outDir`: Specifies the output directory for the compiled JavaScript files. In this case, it is set to "dist", meaning that the compiled files will be placed in the "dist" folder. +- `strict`: Enables strict type checking, which helps catch potential issues in the code. +- `target`: Specifies the ECMAScript target version for the output JavaScript files. In this case, it is set to "es2020", meaning that the output files will be compatible with ECMAScript 2020 features. +- `module`: Specifies the module system to be used. In this case, it is set to "ES2020", meaning that the output files will use the ECMAScript 2020 module system. +- `sourceMap`: Generates source map files, which help in debugging the compiled code by mapping it back to the original TypeScript source files. +- `esModuleInterop`: Enables compatibility with ECMAScript modules for importing CommonJS modules. +- `moduleResolution`: Specifies the module resolution strategy. In this case, it is set to "node", meaning that the Node.js module resolution algorithm will be used. +- `allowSyntheticDefaultImports`: Allows default imports from modules with no default export. +- `declaration`: Generates TypeScript declaration files (`.d.ts`) alongside the compiled JavaScript files, which can be useful for other projects that depend on this one. +- `skipLibCheck`: Skips type checking of declaration files, which can speed up the compilation process. + +Overall, this configuration file helps ensure that the TypeScript compiler processes the source code according to the specified options, resulting in compiled JavaScript files that are compatible with the desired ECMAScript version and module system, while also providing useful features like source maps and strict type checking. ## Questions: 1. **What is the purpose of the `rootDir` and `outDir` options in the configuration?** - The `rootDir` option specifies the root folder of the source files, while the `outDir` option specifies the output directory for the compiled files. + The `rootDir` option specifies the root directory of the input files, while the `outDir` option specifies the output directory for the compiled files. 2. **What does the `strict` option do in the configuration?** - The `strict` option enables a set of strict type-checking options in the TypeScript compiler, ensuring a higher level of type safety in the code. + The `strict` option enables a wide range of type checking behavior that results in stronger guarantees of program correctness. 3. **What is the significance of the `target` and `module` options in the configuration?** - The `target` option sets the ECMAScript target version for the compiled JavaScript output, while the `module` option specifies the module system to be used in the generated code. In this case, both are set to "es2020", indicating that the output will be ECMAScript 2020 compliant. \ No newline at end of file + The `target` option specifies the ECMAScript target version for the output code, and the `module` option specifies the module system used in the output code. In this case, both are set to "es2020", which means the output code will be compatible with ECMAScript 2020 features and module system. \ No newline at end of file diff --git a/src/cli/commands/index/convertJsonToMarkdown.ts b/src/cli/commands/index/convertJsonToMarkdown.ts index e871399..98eed82 100644 --- a/src/cli/commands/index/convertJsonToMarkdown.ts +++ b/src/cli/commands/index/convertJsonToMarkdown.ts @@ -87,7 +87,7 @@ export const convertJsonToMarkdown = async ({ await fs.writeFile(outputPath, markdown, 'utf-8'); }; - updateSpinnerText(`Creating ${files} mardown files...`); + updateSpinnerText(`Creating ${files} markdown files...`); await traverseFileSystem({ inputPath: inputRoot, projectName, @@ -99,5 +99,5 @@ export const convertJsonToMarkdown = async ({ targetAudience, linkHosted, }); - spinnerSuccess(`Created ${files} mardown files...`); + spinnerSuccess(`Created ${files} markdown files...`); }; diff --git a/src/cli/commands/index/processRepository.ts b/src/cli/commands/index/processRepository.ts index a93f9c9..cbf1e1c 100644 --- a/src/cli/commands/index/processRepository.ts +++ b/src/cli/commands/index/processRepository.ts @@ -72,11 +72,12 @@ export const processRepository = async ( const content = await fs.readFile(filePath, 'utf-8'); //calculate the hash of the file - const newChecksum = await calculateChecksum(filePath, [content]); + const newChecksum = await calculateChecksum([content]); - //if an existing summary.json file exists, it will check the checksums and decide if a reindex is needed + //if an existing .json file exists, it will check the checksums and decide if a reindex is needed const reindex = await reindexCheck( - path.join(outputRoot, filePath), + path.join(outputRoot, filePath.substring(0, filePath.lastIndexOf('\\'))), + fileName.replace(/\.[^/.]+$/, '.json'), newChecksum, ); if (!reindex) { @@ -212,10 +213,10 @@ export const processRepository = async ( ); //get the checksum of all the files in the folder - const newChecksum = await calculateChecksum(folderPath, contents); + const newChecksum = await calculateChecksum(contents); //if an existing summary.json file exists, it will check the checksums and decide if a reindex is needed - const reindex = await reindexCheck(folderPath, newChecksum); + const reindex = await reindexCheck(folderPath, 'summary.json', newChecksum); if (!reindex) { return; } @@ -394,15 +395,10 @@ export const processRepository = async ( }; //reads all the files, and returns a checksum -async function calculateChecksum( - folderPath: string, - contents: string[], -): Promise { +async function calculateChecksum(contents: string[]): Promise { const checksums: string[] = []; - for (const fileName of contents) { - const filePath = `${folderPath}/${fileName}`; - const fileData = await fs.readFile(filePath, 'utf-8'); - const checksum = Md5.hashStr(fileData); + for (const content of contents) { + const checksum = Md5.hashStr(content); checksums.push(checksum); } const concatenatedChecksum = checksums.join(''); @@ -412,26 +408,27 @@ async function calculateChecksum( //checks if a summary.json file exists, and if it does, compares the checksums to see if it needs to be re-indexed or not. async function reindexCheck( - fileOrFolderPath: string, + contentPath: string, + name: string, newChecksum: string, ): Promise { + const jsonPath = path.join(contentPath, name); + console.log(jsonPath); + let summaryExists = false; try { - await fs.access(path.join(fileOrFolderPath, 'summary.json')); + await fs.access(jsonPath); summaryExists = true; } catch (error) {} if (summaryExists) { - const fileContents = await fs.readFile( - path.join(fileOrFolderPath, 'summary.json'), - 'utf8', - ); + const fileContents = await fs.readFile(jsonPath, 'utf8'); const fileContentsJSON = JSON.parse(fileContents); const oldChecksum = fileContentsJSON.checksum; if (oldChecksum === newChecksum) { - console.log(`Skipping ${fileOrFolderPath} because it has not changed`); + console.log(`Skipping ${jsonPath} because it has not changed`); return false; } else { return true; From f020b190d280629c0112a16392eeb5b1fcfdaf94 Mon Sep 17 00:00:00 2001 From: Andrew <47720952+andrewhong5297@users.noreply.github.com> Date: Thu, 30 Mar 2023 18:48:20 -0400 Subject: [PATCH 4/5] works! --- src/cli/commands/index/processRepository.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cli/commands/index/processRepository.ts b/src/cli/commands/index/processRepository.ts index cbf1e1c..64379bf 100644 --- a/src/cli/commands/index/processRepository.ts +++ b/src/cli/commands/index/processRepository.ts @@ -413,7 +413,6 @@ async function reindexCheck( newChecksum: string, ): Promise { const jsonPath = path.join(contentPath, name); - console.log(jsonPath); let summaryExists = false; try { @@ -431,6 +430,7 @@ async function reindexCheck( console.log(`Skipping ${jsonPath} because it has not changed`); return false; } else { + console.log(`Reindexing ${jsonPath} because it has changed`); return true; } } From 19644e7c21436d7a81e2755aad20782f1d77a8c7 Mon Sep 17 00:00:00 2001 From: Andrew <47720952+andrewhong5297@users.noreply.github.com> Date: Thu, 30 Mar 2023 19:11:07 -0400 Subject: [PATCH 5/5] update readme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8e44aff..8212860 100644 --- a/README.md +++ b/README.md @@ -142,7 +142,7 @@ You should see a screen like this: Markdownify -This screen estimates the cost of indexing your repository. You can also access this screen via the `doc estimate` command. +This screen estimates the cost of indexing your repository. You can also access this screen via the `doc estimate` command. If you've already indexed once, then `doc index` will only reindex files that have been changed on the second go. For every file in your project, Autodoc calculates the number of tokens in the file based on the file content. The more lines of code, the larger the number of tokens. Using this number, it determine which model it will use on per file basis, always choosing the cheapest model whose context length supports the number of tokens in the file. If you're interested in helping make model selection configurable in Autodoc, check out [this issue](https://github.com/context-labs/autodoc/issues/9).