diff --git a/community/log_analysis_multi_agent_rag/README.md b/community/log_analysis_multi_agent_rag/README.md index b59132d77..84ef85c14 100644 --- a/community/log_analysis_multi_agent_rag/README.md +++ b/community/log_analysis_multi_agent_rag/README.md @@ -17,7 +17,7 @@ This repository provides a sample code to demonstrate how you can use the log an - **Generate your API key** by following the steps in the link below: [Click here to view the steps for generating an API Key](https://docs.nvidia.com/nim/large-language-models/latest/getting-started.html#generate-an-api-key) -- **Store your API key** : You can securely store your API key by creating a `.env` file in the root directory of your project +- **Configure your API key** : Following the [examples in the document above](https://docs.nvidia.com/nim/large-language-models/latest/getting-started.html#export-the-api-key), place your API key in your shell initialization files. Alternatively, you can use environment variable managers like [direnv](https://direnv.net/). - **example.py** : The sample script showcases how to integrate log analysis into your workflow. It demonstrates how to pass your log data through the system, generate insights, and manage the output. # Components diff --git a/community/log_analysis_multi_agent_rag/utils.py b/community/log_analysis_multi_agent_rag/utils.py index 6a787e55e..618fb16dd 100644 --- a/community/log_analysis_multi_agent_rag/utils.py +++ b/community/log_analysis_multi_agent_rag/utils.py @@ -65,8 +65,16 @@ def format_docs(self, docs): # Usage -# Access the API key from environment variables -api_key = os.getenv('API_KEY') +# Access the API key from environment variables preferring the well known environemnt variables. +# Finally falling back to the .env environment variable within this repo +api_key = ( + os.getenv('NVIDIA_API_KEY') or + os.getenv('NGC_API_KEY') or + os.getenv('API_KEY') +) +if not api_key: + raise RuntimeError("No NVIDIA API key found. Set one of NVIDIA_API_KEY, or NGC_API_KEY, or .env based API_KEY in your environment.") + model = "nvidia/llama-3.3-nemotron-super-49b-v1.5" prompts_file = "prompt.json" automation = Nodeoutputs(api_key, model, prompts_file)