@@ -97,6 +97,7 @@ def create_chat_completion_message_event(
9797 if not transaction :
9898 return
9999
100+ message_ids = []
100101 for index , message in enumerate (input_message_list ):
101102 if response_id :
102103 id_ = "%s-%d" % (response_id , index ) # Response ID was set, append message index to it.
@@ -128,6 +129,7 @@ def create_chat_completion_message_event(
128129 id_ = "%s-%d" % (response_id , index ) # Response ID was set, append message index to it.
129130 else :
130131 id_ = str (uuid .uuid4 ()) # No response IDs, use random UUID
132+ message_ids .append (id_ )
131133
132134 chat_completion_message_dict = {
133135 "id" : id_ ,
@@ -144,9 +146,10 @@ def create_chat_completion_message_event(
144146 "response.model" : request_model ,
145147 "vendor" : "bedrock" ,
146148 "ingest_source" : "Python" ,
147- "is_response" : True
149+ "is_response" : True ,
148150 }
149151 transaction .record_custom_event ("LlmChatCompletionMessage" , chat_completion_message_dict )
152+ return (conversation_id , request_id , message_ids )
150153
151154
152155def extract_bedrock_titan_text_model (request_body , response_body = None ):
@@ -246,7 +249,7 @@ def extract_bedrock_claude_model(request_body, response_body=None):
246249 chat_completion_summary_dict = {
247250 "request.max_tokens" : request_body .get ("max_tokens_to_sample" , "" ),
248251 "request.temperature" : request_body .get ("temperature" , "" ),
249- "response.number_of_messages" : len (input_message_list )
252+ "response.number_of_messages" : len (input_message_list ),
250253 }
251254
252255 if response_body :
@@ -264,6 +267,40 @@ def extract_bedrock_claude_model(request_body, response_body=None):
264267 return input_message_list , output_message_list , chat_completion_summary_dict
265268
266269
270+ def extract_bedrock_llama_model (request_body , response_body = None ):
271+ request_body = json .loads (request_body )
272+ if response_body :
273+ response_body = json .loads (response_body )
274+
275+ input_message_list = [{"role" : "user" , "content" : request_body .get ("prompt" , "" )}]
276+
277+ chat_completion_summary_dict = {
278+ "request.max_tokens" : request_body .get ("max_gen_len" , "" ),
279+ "request.temperature" : request_body .get ("temperature" , "" ),
280+ "response.number_of_messages" : len (input_message_list ),
281+ }
282+
283+ if response_body :
284+ output_message_list = [{"role" : "assistant" , "content" : response_body .get ("generation" , "" )}]
285+ prompt_tokens = response_body .get ("prompt_token_count" , None )
286+ completion_tokens = response_body .get ("generation_token_count" , None )
287+ total_tokens = prompt_tokens + completion_tokens if prompt_tokens and completion_tokens else None
288+
289+ chat_completion_summary_dict .update (
290+ {
291+ "response.usage.completion_tokens" : completion_tokens ,
292+ "response.usage.prompt_tokens" : prompt_tokens ,
293+ "response.usage.total_tokens" : total_tokens ,
294+ "response.choices.finish_reason" : response_body .get ("stop_reason" , "" ),
295+ "response.number_of_messages" : len (input_message_list ) + len (output_message_list ),
296+ }
297+ )
298+ else :
299+ output_message_list = []
300+
301+ return input_message_list , output_message_list , chat_completion_summary_dict
302+
303+
267304def extract_bedrock_cohere_model (request_body , response_body = None ):
268305 request_body = json .loads (request_body )
269306 if response_body :
@@ -274,7 +311,7 @@ def extract_bedrock_cohere_model(request_body, response_body=None):
274311 chat_completion_summary_dict = {
275312 "request.max_tokens" : request_body .get ("max_tokens" , "" ),
276313 "request.temperature" : request_body .get ("temperature" , "" ),
277- "response.number_of_messages" : len (input_message_list )
314+ "response.number_of_messages" : len (input_message_list ),
278315 }
279316
280317 if response_body :
@@ -300,6 +337,7 @@ def extract_bedrock_cohere_model(request_body, response_body=None):
300337 ("ai21.j2" , extract_bedrock_ai21_j2_model ),
301338 ("cohere" , extract_bedrock_cohere_model ),
302339 ("anthropic.claude" , extract_bedrock_claude_model ),
340+ ("meta.llama2" , extract_bedrock_llama_model ),
303341]
304342
305343
@@ -313,6 +351,7 @@ def wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs):
313351 return wrapped (* args , ** kwargs )
314352
315353 transaction .add_ml_model_info ("Bedrock" , BOTOCORE_VERSION )
354+ transaction ._add_agent_attribute ("llm" , True )
316355
317356 # Read and replace request file stream bodies
318357 request_body = kwargs ["body" ]
@@ -368,7 +407,7 @@ def wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs):
368407 notice_error_attributes = {
369408 "http.statusCode" : error_attributes ["http.statusCode" ],
370409 "error.message" : error_attributes ["error.message" ],
371- "error.code" : error_attributes ["error.code" ]
410+ "error.code" : error_attributes ["error.code" ],
372411 }
373412
374413 if is_embedding :
@@ -511,7 +550,7 @@ def handle_chat_completion_event(
511550 span_id ,
512551):
513552 custom_attrs_dict = transaction ._custom_params
514- conversation_id = custom_attrs_dict .get ("conversation_id" , "" )
553+ conversation_id = custom_attrs_dict .get ("llm. conversation_id" , "" )
515554
516555 chat_completion_id = str (uuid .uuid4 ())
517556
@@ -542,7 +581,7 @@ def handle_chat_completion_event(
542581
543582 transaction .record_custom_event ("LlmChatCompletionSummary" , chat_completion_summary_dict )
544583
545- create_chat_completion_message_event (
584+ message_ids = create_chat_completion_message_event (
546585 transaction = transaction ,
547586 app_name = settings .app_name ,
548587 input_message_list = input_message_list ,
@@ -556,6 +595,10 @@ def handle_chat_completion_event(
556595 response_id = response_id ,
557596 )
558597
598+ if not hasattr (transaction , "_nr_message_ids" ):
599+ transaction ._nr_message_ids = {}
600+ transaction ._nr_message_ids ["bedrock_key" ] = message_ids
601+
559602
560603CUSTOM_TRACE_POINTS = {
561604 ("sns" , "publish" ): message_trace ("SNS" , "Produce" , "Topic" , extract (("TopicArn" , "TargetArn" ), "PhoneNumber" )),
@@ -592,6 +635,12 @@ def _nr_clientcreator__create_api_method_(wrapped, instance, args, kwargs):
592635 return tracer (wrapped )
593636
594637
638+ def _nr_clientcreator__create_methods (wrapped , instance , args , kwargs ):
639+ class_attributes = wrapped (* args , ** kwargs )
640+ class_attributes ["_nr_wrapped" ] = True
641+ return class_attributes
642+
643+
595644def _bind_make_request_params (operation_model , request_dict , * args , ** kwargs ):
596645 return operation_model , request_dict
597646
@@ -622,3 +671,4 @@ def instrument_botocore_endpoint(module):
622671
623672def instrument_botocore_client (module ):
624673 wrap_function_wrapper (module , "ClientCreator._create_api_method" , _nr_clientcreator__create_api_method_ )
674+ wrap_function_wrapper (module , "ClientCreator._create_methods" , _nr_clientcreator__create_methods )
0 commit comments