From 497429487eb2026fcf6791f0572f145a4a954067 Mon Sep 17 00:00:00 2001 From: Wolfgang Colsman Date: Sun, 12 Jan 2025 21:14:26 -0500 Subject: [PATCH 1/6] Add Callback --- .../callbacks/bedrock_callback.py | 206 ++++++++++++++++++ libs/aws/langchain_aws/callbacks/manager.py | 45 ++++ libs/aws/langchain_aws/chat_models/bedrock.py | 8 +- .../chat_models/bedrock_converse.py | 28 ++- libs/aws/langchain_aws/llms/bedrock.py | 5 + 5 files changed, 286 insertions(+), 6 deletions(-) create mode 100644 libs/aws/langchain_aws/callbacks/bedrock_callback.py create mode 100644 libs/aws/langchain_aws/callbacks/manager.py diff --git a/libs/aws/langchain_aws/callbacks/bedrock_callback.py b/libs/aws/langchain_aws/callbacks/bedrock_callback.py new file mode 100644 index 00000000..329059b8 --- /dev/null +++ b/libs/aws/langchain_aws/callbacks/bedrock_callback.py @@ -0,0 +1,206 @@ +import threading +from typing import Any, Dict, List, Union + +from langchain_core.callbacks import BaseCallbackHandler +from langchain_core.outputs import LLMResult + +# https://aws.amazon.com/bedrock/pricing/ +MODEL_COST_PER_1K_INPUT_TOKENS = { + "anthropic.claude-instant-v1": 0.0008, + "anthropic.claude-v2": 0.008, + "anthropic.claude-v2:1": 0.008, + "anthropic.claude-3-sonnet-20240229-v1:0": 0.003, + "anthropic.claude-3-5-sonnet-20240620-v1:0": 0.003, + "anthropic.claude-3-5-sonnet-20241022-v2:0": 0.003, + "anthropic.claude-3-haiku-20240307-v1:0": 0.00025, + "anthropic.claude-3-5-haiku-20241022-v1:0": 0.0008, + "anthropic.claude-3-opus-20240229-v1:0": 0.015, + "meta.llama3-70b-instruct-v1:0": 0.00265, + "meta.llama3-8b-instruct-v1:0": 0.0003, + "meta.llama2-13b-chat-v1": 0.00075, + "meta.llama2-70b-chat-v1": 0.00195, + "meta.llama3-3-70b-instruct-v1:0": 0.00072, + "amazon.nova-pro-v1:0": 0.0008, + "amazon.nova-lite-v1:0": 0.00006, + "amazon.nova-micro-v1:0": 0.000035, +} + +MODEL_COST_PER_1K_OUTPUT_TOKENS = { + "anthropic.claude-instant-v1": 0.0024, + "anthropic.claude-v2": 0.024, + "anthropic.claude-v2:1": 0.024, + "anthropic.claude-3-sonnet-20240229-v1:0": 0.015, + "anthropic.claude-3-5-sonnet-20240620-v1:0": 0.015, + "anthropic.claude-3-5-sonnet-20241022-v2:0": 0.015, + "anthropic.claude-3-haiku-20240307-v1:0": 0.00125, + "anthropic.claude-3-5-haiku-20241022-v1:0": 0.004, + "anthropic.claude-3-opus-20240229-v1:0": 0.075, + "meta.llama3-70b-instruct-v1:0": 0.0035, + "meta.llama3-8b-instruct-v1:0": 0.0006, + "meta.llama2-13b-chat-v1": 0.00100, + "meta.llama2-70b-chat-v1": 0.00256, + "meta.llama3-3-70b-instruct-v1:0": 0.00072, + "amazon.nova-pro-v1:0": 0.0032, + "amazon.nova-lite-v1:0": 0.00024, + "amazon.nova-micro-v1:0": 0.00014, +} + +MODEL_COST_PER_1K_INPUT_CACHE_WRITE_TOKENS = { + "anthropic.claude-instant-v1": 0.0, + "anthropic.claude-v2": 0.0, + "anthropic.claude-v2:1": 0.0, + "anthropic.claude-3-sonnet-20240229-v1:0": 0.0, + "anthropic.claude-3-5-sonnet-20240620-v1:0": 0.00375, + "anthropic.claude-3-5-sonnet-20241022-v2:0": 0.00375, + "anthropic.claude-3-haiku-20240307-v1:0": 0.0, + "anthropic.claude-3-5-haiku-20241022-v1:0": 0.001, + "anthropic.claude-3-opus-20240229-v1:0": 0.0, + "meta.llama3-70b-instruct-v1:0": 0.0, + "meta.llama3-8b-instruct-v1:0": 0.0, + "meta.llama2-13b-chat-v1": 0.0, + "meta.llama2-70b-chat-v1": 0.0, + "meta.llama3-3-70b-instruct-v1:0": 0.0, + "amazon.nova-pro-v1:0": 0.0, + "amazon.nova-lite-v1:0": 0.0, + "amazon.nova-micro-v1:0": 0.0, +} + +MODEL_COST_PER_1K_INPUT_CACHE_READ_TOKENS = { + "anthropic.claude-instant-v1": 0.0, + "anthropic.claude-v2": 0.0, + "anthropic.claude-v2:1": 0.0, + "anthropic.claude-3-sonnet-20240229-v1:0": 0.0, + "anthropic.claude-3-5-sonnet-20240620-v1:0": 0.0003, + "anthropic.claude-3-5-sonnet-20241022-v2:0": 0.0003, + "anthropic.claude-3-haiku-20240307-v1:0": 0.0, + "anthropic.claude-3-5-haiku-20241022-v1:0": 0.00008, + "anthropic.claude-3-opus-20240229-v1:0": 0.0, + "meta.llama3-70b-instruct-v1:0": 0.0, + "meta.llama3-8b-instruct-v1:0": 0.0, + "meta.llama2-13b-chat-v1": 0.0, + "meta.llama2-70b-chat-v1": 0.0, + "meta.llama3-3-70b-instruct-v1:0": 0.0, + "amazon.nova-pro-v1:0": 0.0002, + "amazon.nova-lite-v1:0": 0.000015, + "amazon.nova-micro-v1:0": 0.00000875, +} + + +def _get_token_cost( + prompt_tokens: int, + prompt_tokens_cache_write: int, + prompt_tokens_cache_read: int, + completion_tokens: int, + model_id: Union[str, None], +) -> float: + if model_id not in MODEL_COST_PER_1K_INPUT_TOKENS: + # The model ID can be a cross-region (system-defined) inference profile ID, + # which has a prefix indicating the region (e.g., 'us', 'eu') but + # shares the same token costs as the "base model". + # By extracting the "base model ID", by taking the last two segments + # of the model ID, we can map cross-region inference profile IDs to + # their corresponding cost entries. + base_model_id = model_id.split(".")[-2] + "." + model_id.split(".")[-1] + else: + base_model_id = model_id + """Get the cost of tokens for the model.""" + if base_model_id not in MODEL_COST_PER_1K_INPUT_TOKENS: + raise ValueError( + f"Unknown model: {model_id}. Please provide a valid model name." + "Known models are: " + ", ".join(MODEL_COST_PER_1K_INPUT_TOKENS.keys()) + ) + return ( + ((prompt_tokens - prompt_tokens_cache_read) / 1000) + * MODEL_COST_PER_1K_INPUT_TOKENS[base_model_id] + + (prompt_tokens_cache_write / 1000) + * MODEL_COST_PER_1K_INPUT_CACHE_WRITE_TOKENS[base_model_id] + + (prompt_tokens_cache_read / 1000) + * MODEL_COST_PER_1K_INPUT_CACHE_READ_TOKENS[base_model_id] + + (completion_tokens / 1000) * MODEL_COST_PER_1K_OUTPUT_TOKENS[base_model_id] + ) + + +class BedrockTokenUsageCallbackHandler(BaseCallbackHandler): + """Callback Handler that tracks bedrock anthropic info.""" + + total_tokens: int = 0 + prompt_tokens: int = 0 + prompt_tokens_cache_write: int = 0 + prompt_tokens_cache_read: int = 0 + completion_tokens: int = 0 + successful_requests: int = 0 + total_cost: float = 0.0 + + def __init__(self) -> None: + super().__init__() + self._lock = threading.Lock() + + def __repr__(self) -> str: + return ( + f"Tokens Used: {self.total_tokens}\n" + f"\tPrompt Tokens: {self.prompt_tokens}\n" + f"\t\tCache Write: {self.prompt_tokens_cache_write}\n" + f"\t\tCache Read: {self.prompt_tokens_cache_read}\n" + f"\tCompletion Tokens: {self.completion_tokens}\n" + f"Successful Requests: {self.successful_requests}\n" + f"Total Cost (USD): ${self.total_cost}" + ) + + @property + def always_verbose(self) -> bool: + """Whether to call verbose callbacks even if verbose is False.""" + return True + + def on_llm_start( + self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any + ) -> None: + """Print out the prompts.""" + pass + + def on_llm_new_token(self, token: str, **kwargs: Any) -> None: + """Print out the token.""" + pass + + def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: + """Collect token usage.""" + if response.llm_output is None: + return None + + if "usage" not in response.llm_output: + with self._lock: + self.successful_requests += 1 + return None + + # compute tokens and cost for this request + token_usage = response.llm_output["usage"] + prompt_tokens = token_usage.get("prompt_tokens", 0) + prompt_tokens_cache_write = token_usage.get("prompt_tokens_cache_write", 0) + prompt_tokens_cache_read = token_usage.get("prompt_tokens_cache_read", 0) + completion_tokens = token_usage.get("completion_tokens", 0) + total_tokens = token_usage.get("total_tokens", 0) + model_id = response.llm_output.get("model_id", None) + total_cost = _get_token_cost( + prompt_tokens=prompt_tokens, + prompt_tokens_cache_write=prompt_tokens_cache_write, + prompt_tokens_cache_read=prompt_tokens_cache_read, + completion_tokens=completion_tokens, + model_id=model_id, + ) + + # update shared state behind lock + with self._lock: + self.total_cost += total_cost + self.total_tokens += total_tokens + self.prompt_tokens += prompt_tokens + self.prompt_tokens_cache_write += prompt_tokens_cache_write + self.prompt_tokens_cache_read += prompt_tokens_cache_read + self.completion_tokens += completion_tokens + self.successful_requests += 1 + + def __copy__(self) -> "BedrockTokenUsageCallbackHandler": + """Return a copy of the callback handler.""" + return self + + def __deepcopy__(self, memo: Any) -> "BedrockTokenUsageCallbackHandler": + """Return a deep copy of the callback handler.""" + return self diff --git a/libs/aws/langchain_aws/callbacks/manager.py b/libs/aws/langchain_aws/callbacks/manager.py new file mode 100644 index 00000000..962f6d82 --- /dev/null +++ b/libs/aws/langchain_aws/callbacks/manager.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +import logging +from contextlib import contextmanager +from contextvars import ContextVar +from typing import ( + Generator, + Optional, +) + +from langchain_core.tracers.context import register_configure_hook + +from langchain_aws.callbacks.bedrock_callback import ( + BedrockTokenUsageCallbackHandler, +) + +logger = logging.getLogger(__name__) + + +bedrock_callback_var: (ContextVar)[ + Optional[BedrockTokenUsageCallbackHandler] +] = ContextVar("bedrock_anthropic_callback", default=None) + +register_configure_hook(bedrock_callback_var, True) + + +@contextmanager +def get_bedrock_callback() -> ( + Generator[BedrockTokenUsageCallbackHandler, None, None] +): + """Get the Bedrock callback handler in a context manager. + which conveniently exposes token and cost information. + + Returns: + BedrockTokenUsageCallbackHandler: + The Bedrock callback handler. + + Example: + >>> with get_bedrock_callback() as cb: + ... # Use the Bedrock callback handler + """ + cb = BedrockTokenUsageCallbackHandler() + bedrock_callback_var.set(cb) + yield cb + bedrock_callback_var.set(None) \ No newline at end of file diff --git a/libs/aws/langchain_aws/chat_models/bedrock.py b/libs/aws/langchain_aws/chat_models/bedrock.py index 11cb01fb..af8b6e28 100644 --- a/libs/aws/langchain_aws/chat_models/bedrock.py +++ b/libs/aws/langchain_aws/chat_models/bedrock.py @@ -30,7 +30,7 @@ HumanMessage, SystemMessage, ) -from langchain_core.messages.ai import UsageMetadata +from langchain_core.messages.ai import InputTokenDetails, UsageMetadata from langchain_core.messages.tool import ToolCall, ToolMessage from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough @@ -578,11 +578,17 @@ def _generate( # usage metadata if usage := llm_output.get("usage"): input_tokens = usage.get("prompt_tokens", 0) + cache_creation = usage.get("prompt_tokens_cache_write", 0) + cache_read = usage.get("prompt_tokens_cache_read", 0) output_tokens = usage.get("completion_tokens", 0) usage_metadata = UsageMetadata( input_tokens=input_tokens, output_tokens=output_tokens, total_tokens=usage.get("total_tokens", input_tokens + output_tokens), + input_token_details=InputTokenDetails( + cache_creation=cache_creation, + cache_read=cache_read, + ), ) else: usage_metadata = None diff --git a/libs/aws/langchain_aws/chat_models/bedrock_converse.py b/libs/aws/langchain_aws/chat_models/bedrock_converse.py index 4748d8b6..8ef7eec6 100644 --- a/libs/aws/langchain_aws/chat_models/bedrock_converse.py +++ b/libs/aws/langchain_aws/chat_models/bedrock_converse.py @@ -34,7 +34,7 @@ ToolMessage, merge_message_runs, ) -from langchain_core.messages.ai import AIMessageChunk, UsageMetadata +from langchain_core.messages.ai import AIMessageChunk, InputTokenDetails, UsageMetadata from langchain_core.messages.tool import tool_call as create_tool_call from langchain_core.messages.tool import tool_call_chunk from langchain_core.output_parsers import JsonOutputKeyToolsParser, PydanticToolsParser @@ -383,9 +383,9 @@ class Joke(BaseModel): additionalModelResponseFieldPaths. """ - supports_tool_choice_values: Optional[ - Sequence[Literal["auto", "any", "tool"]] - ] = None + supports_tool_choice_values: Optional[Sequence[Literal["auto", "any", "tool"]]] = ( + None + ) """Which types of tool_choice values the model supports. Inferred if not specified. Inferred as ('auto', 'any', 'tool') if a 'claude-3' @@ -502,7 +502,25 @@ def _generate( messages=bedrock_messages, system=system, **params ) response_message = _parse_response(response) - return ChatResult(generations=[ChatGeneration(message=response_message)]) + + usage_metadata: UsageMetadata = ( + response_message.usage_metadata or UsageMetadata() + ) + input_token_details = usage_metadata.input_token_details or InputTokenDetails() + llm_output = { + "usage": { + "prompt_tokens": usage_metadata.input_tokens, + "prompt_tokens_cache_write": input_token_details.cache_creation, + "prompt_tokens_cache_read": input_token_details.cache_read, + "completion_tokens": usage_metadata.output_tokens, + "total_tokens": usage_metadata.total_tokens, + }, + "model_id": self.model_id, + } + return ChatResult( + generations=[ChatGeneration(message=response_message)], + llm_output=llm_output, + ) def _stream( self, diff --git a/libs/aws/langchain_aws/llms/bedrock.py b/libs/aws/langchain_aws/llms/bedrock.py index befb4c51..ff6cd32f 100644 --- a/libs/aws/langchain_aws/llms/bedrock.py +++ b/libs/aws/langchain_aws/llms/bedrock.py @@ -349,6 +349,9 @@ def prepare_output(cls, provider: str, response: Any) -> dict: headers = response.get("ResponseMetadata", {}).get("HTTPHeaders", {}) prompt_tokens = int(headers.get("x-amzn-bedrock-input-token-count", 0)) + # TODO what are the headers for cache write and read, if any? + prompt_tokens_cache_write = int(headers.get("x-amzn-bedrock-XXX", 0)) + prompt_tokens_cache_read = int(headers.get("x-amzn-bedrock-XXX", 0)) completion_tokens = int(headers.get("x-amzn-bedrock-output-token-count", 0)) return { "text": text, @@ -356,6 +359,8 @@ def prepare_output(cls, provider: str, response: Any) -> dict: "body": response_body, "usage": { "prompt_tokens": prompt_tokens, + "prompt_tokens_cache_write": prompt_tokens_cache_write, + "prompt_tokens_cache_read": prompt_tokens_cache_read, "completion_tokens": completion_tokens, "total_tokens": prompt_tokens + completion_tokens, }, From 0fb6b78d463d2b7aca94667f22828e27506284b5 Mon Sep 17 00:00:00 2001 From: Wolfgang Colsman Date: Sun, 12 Jan 2025 21:27:20 -0500 Subject: [PATCH 2/6] use dict functions for usage data --- .../chat_models/bedrock_converse.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/libs/aws/langchain_aws/chat_models/bedrock_converse.py b/libs/aws/langchain_aws/chat_models/bedrock_converse.py index 8ef7eec6..422ee529 100644 --- a/libs/aws/langchain_aws/chat_models/bedrock_converse.py +++ b/libs/aws/langchain_aws/chat_models/bedrock_converse.py @@ -506,14 +506,18 @@ def _generate( usage_metadata: UsageMetadata = ( response_message.usage_metadata or UsageMetadata() ) - input_token_details = usage_metadata.input_token_details or InputTokenDetails() + input_token_details: InputTokenDetails = ( + usage_metadata.get("input_token_details") or InputTokenDetails() + ) llm_output = { "usage": { - "prompt_tokens": usage_metadata.input_tokens, - "prompt_tokens_cache_write": input_token_details.cache_creation, - "prompt_tokens_cache_read": input_token_details.cache_read, - "completion_tokens": usage_metadata.output_tokens, - "total_tokens": usage_metadata.total_tokens, + "prompt_tokens": usage_metadata.get("input_tokens", 0), + "prompt_tokens_cache_write": input_token_details.get( + "cache_creation", 0 + ), + "prompt_tokens_cache_read": input_token_details.get("cache_read", 0), + "completion_tokens": usage_metadata.get("output_tokens", 0), + "total_tokens": usage_metadata.get("total_tokens", 0), }, "model_id": self.model_id, } From d46c4d41070be87dbc1bc86ae1aaf8ea016ead8d Mon Sep 17 00:00:00 2001 From: Wolfgang Colsman Date: Sun, 12 Jan 2025 21:32:23 -0500 Subject: [PATCH 3/6] round the total cost to 7 digits --- libs/aws/langchain_aws/callbacks/bedrock_callback.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/libs/aws/langchain_aws/callbacks/bedrock_callback.py b/libs/aws/langchain_aws/callbacks/bedrock_callback.py index 329059b8..518e6774 100644 --- a/libs/aws/langchain_aws/callbacks/bedrock_callback.py +++ b/libs/aws/langchain_aws/callbacks/bedrock_callback.py @@ -109,14 +109,15 @@ def _get_token_cost( f"Unknown model: {model_id}. Please provide a valid model name." "Known models are: " + ", ".join(MODEL_COST_PER_1K_INPUT_TOKENS.keys()) ) - return ( + return round( ((prompt_tokens - prompt_tokens_cache_read) / 1000) * MODEL_COST_PER_1K_INPUT_TOKENS[base_model_id] + (prompt_tokens_cache_write / 1000) * MODEL_COST_PER_1K_INPUT_CACHE_WRITE_TOKENS[base_model_id] + (prompt_tokens_cache_read / 1000) * MODEL_COST_PER_1K_INPUT_CACHE_READ_TOKENS[base_model_id] - + (completion_tokens / 1000) * MODEL_COST_PER_1K_OUTPUT_TOKENS[base_model_id] + + (completion_tokens / 1000) * MODEL_COST_PER_1K_OUTPUT_TOKENS[base_model_id], + 7, ) From 34578f80b4422295a01153f1bd4dcd755cfd9ea2 Mon Sep 17 00:00:00 2001 From: Wolfgang Colsman Date: Sun, 12 Jan 2025 22:03:16 -0500 Subject: [PATCH 4/6] add _extract_usage_metadata --- .../chat_models/bedrock_converse.py | 22 +++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/libs/aws/langchain_aws/chat_models/bedrock_converse.py b/libs/aws/langchain_aws/chat_models/bedrock_converse.py index 422ee529..f08ea176 100644 --- a/libs/aws/langchain_aws/chat_models/bedrock_converse.py +++ b/libs/aws/langchain_aws/chat_models/bedrock_converse.py @@ -764,13 +764,25 @@ def _extract_response_metadata(response: Dict[str, Any]) -> Dict[str, Any]: return response_metadata +def _extract_usage_metadata(response: Dict[str, Any]) -> UsageMetadata: + usage: Dict[str, int] = response.pop("usage") # type: ignore[misc] + return UsageMetadata( + input_tokens=usage.get("inputTokens", 0), + output_tokens=usage.get("outputTokens", 0), + total_tokens=usage.get("totalTokens", 0), + input_token_details=InputTokenDetails( + cache_creation=usage.get("cacheWriteInputTokensCount", 0), + cache_read=usage.get("cacheReadInputTokensCount", 0), + ), + ) + + def _parse_response(response: Dict[str, Any]) -> AIMessage: lc_content = _bedrock_to_lc(response.pop("output")["message"]["content"]) tool_calls = _extract_tool_calls(lc_content) - usage = UsageMetadata(_camel_to_snake_keys(response.pop("usage"))) # type: ignore[misc] return AIMessage( content=_str_if_single_text_block(lc_content), # type: ignore[arg-type] - usage_metadata=usage, + usage_metadata=_extract_usage_metadata(response), response_metadata=_extract_response_metadata(response), tool_calls=tool_calls, ) @@ -825,9 +837,11 @@ def _parse_stream_event(event: Dict[str, Any]) -> Optional[BaseMessageChunk]: # TODO: snake case response metadata? return AIMessageChunk(content=[], response_metadata=event["messageStop"]) elif "metadata" in event: - usage = UsageMetadata(_camel_to_snake_keys(event["metadata"].pop("usage"))) # type: ignore[misc] + usage_metadata = _extract_usage_metadata(event["metadata"]) return AIMessageChunk( - content=[], response_metadata=event["metadata"], usage_metadata=usage + content=[], + response_metadata=event["metadata"], + usage_metadata=usage_metadata, ) elif "Exception" in list(event.keys())[0]: name, info = list(event.items())[0] From 535395263c1bb4debf7b3da62a6901f7abd3d1ef Mon Sep 17 00:00:00 2001 From: Wolfgang Colsman Date: Sun, 12 Jan 2025 22:22:58 -0500 Subject: [PATCH 5/6] Add additional meta llama models --- .../callbacks/bedrock_callback.py | 37 +++++++++++++++++-- 1 file changed, 34 insertions(+), 3 deletions(-) diff --git a/libs/aws/langchain_aws/callbacks/bedrock_callback.py b/libs/aws/langchain_aws/callbacks/bedrock_callback.py index 518e6774..2c5e2fb1 100644 --- a/libs/aws/langchain_aws/callbacks/bedrock_callback.py +++ b/libs/aws/langchain_aws/callbacks/bedrock_callback.py @@ -1,3 +1,4 @@ +import logging import threading from typing import Any, Dict, List, Union @@ -19,6 +20,12 @@ "meta.llama3-8b-instruct-v1:0": 0.0003, "meta.llama2-13b-chat-v1": 0.00075, "meta.llama2-70b-chat-v1": 0.00195, + "meta.llama3-1-70b-instruct-v1:0": 0.00072, + "meta.llama3-1-8b-instruct-v1:0": 0.00022, + "meta.llama3-2-11b-instruct-v1:0": 0.00016, + "meta.llama3-2-1b-instruct-v1:0": 0.0001, + "meta.llama3-2-3b-instruct-v1:0": 0.00015, + "meta.llama3-2-90b-instruct-v1:0": 0.00072, "meta.llama3-3-70b-instruct-v1:0": 0.00072, "amazon.nova-pro-v1:0": 0.0008, "amazon.nova-lite-v1:0": 0.00006, @@ -39,6 +46,12 @@ "meta.llama3-8b-instruct-v1:0": 0.0006, "meta.llama2-13b-chat-v1": 0.00100, "meta.llama2-70b-chat-v1": 0.00256, + "meta.llama3-1-70b-instruct-v1:0": 0.00072, + "meta.llama3-1-8b-instruct-v1:0": 0.00022, + "meta.llama3-2-11b-instruct-v1:0": 0.00016, + "meta.llama3-2-1b-instruct-v1:0": 0.0001, + "meta.llama3-2-3b-instruct-v1:0": 0.00015, + "meta.llama3-2-90b-instruct-v1:0": 0.00072, "meta.llama3-3-70b-instruct-v1:0": 0.00072, "amazon.nova-pro-v1:0": 0.0032, "amazon.nova-lite-v1:0": 0.00024, @@ -59,6 +72,12 @@ "meta.llama3-8b-instruct-v1:0": 0.0, "meta.llama2-13b-chat-v1": 0.0, "meta.llama2-70b-chat-v1": 0.0, + "meta.llama3-1-70b-instruct-v1:0": 0.0, + "meta.llama3-1-8b-instruct-v1:0": 0.0, + "meta.llama3-2-11b-instruct-v1:0": 0.0, + "meta.llama3-2-1b-instruct-v1:0": 0.0, + "meta.llama3-2-3b-instruct-v1:0": 0.0, + "meta.llama3-2-90b-instruct-v1:0": 0.0, "meta.llama3-3-70b-instruct-v1:0": 0.0, "amazon.nova-pro-v1:0": 0.0, "amazon.nova-lite-v1:0": 0.0, @@ -79,12 +98,20 @@ "meta.llama3-8b-instruct-v1:0": 0.0, "meta.llama2-13b-chat-v1": 0.0, "meta.llama2-70b-chat-v1": 0.0, + "meta.llama3-1-70b-instruct-v1:0": 0.0, + "meta.llama3-1-8b-instruct-v1:0": 0.0, + "meta.llama3-2-11b-instruct-v1:0": 0.0, + "meta.llama3-2-1b-instruct-v1:0": 0.0, + "meta.llama3-2-3b-instruct-v1:0": 0.0, + "meta.llama3-2-90b-instruct-v1:0": 0.0, "meta.llama3-3-70b-instruct-v1:0": 0.0, "amazon.nova-pro-v1:0": 0.0002, "amazon.nova-lite-v1:0": 0.000015, "amazon.nova-micro-v1:0": 0.00000875, } +logger = logging.getLogger(__name__) + def _get_token_cost( prompt_tokens: int, @@ -105,10 +132,14 @@ def _get_token_cost( base_model_id = model_id """Get the cost of tokens for the model.""" if base_model_id not in MODEL_COST_PER_1K_INPUT_TOKENS: - raise ValueError( - f"Unknown model: {model_id}. Please provide a valid model name." - "Known models are: " + ", ".join(MODEL_COST_PER_1K_INPUT_TOKENS.keys()) + logger.error( + "Failed to calculate token cost. " + "Unknown model: %s. Please provide a valid model name. " + "Known models are: %s", + model_id, + ", ".join(MODEL_COST_PER_1K_INPUT_TOKENS.keys()), ) + return 0.0 return round( ((prompt_tokens - prompt_tokens_cache_read) / 1000) * MODEL_COST_PER_1K_INPUT_TOKENS[base_model_id] From 6c59e236984987b3dd47344ea8c7acb6146629ea Mon Sep 17 00:00:00 2001 From: Wolfgang Colsman Date: Sun, 12 Jan 2025 22:28:12 -0500 Subject: [PATCH 6/6] raise ValueError --- .../langchain_aws/callbacks/bedrock_callback.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/libs/aws/langchain_aws/callbacks/bedrock_callback.py b/libs/aws/langchain_aws/callbacks/bedrock_callback.py index 2c5e2fb1..cf5e7dd2 100644 --- a/libs/aws/langchain_aws/callbacks/bedrock_callback.py +++ b/libs/aws/langchain_aws/callbacks/bedrock_callback.py @@ -1,4 +1,3 @@ -import logging import threading from typing import Any, Dict, List, Union @@ -110,8 +109,6 @@ "amazon.nova-micro-v1:0": 0.00000875, } -logger = logging.getLogger(__name__) - def _get_token_cost( prompt_tokens: int, @@ -132,14 +129,11 @@ def _get_token_cost( base_model_id = model_id """Get the cost of tokens for the model.""" if base_model_id not in MODEL_COST_PER_1K_INPUT_TOKENS: - logger.error( - "Failed to calculate token cost. " - "Unknown model: %s. Please provide a valid model name. " - "Known models are: %s", - model_id, - ", ".join(MODEL_COST_PER_1K_INPUT_TOKENS.keys()), + raise ValueError( + f"Failed to calculate token cost. Unknown model: {model_id}. " + "Please provide a valid model name. " + f"Known models are: {",".join(MODEL_COST_PER_1K_INPUT_TOKENS.keys())}" ) - return 0.0 return round( ((prompt_tokens - prompt_tokens_cache_read) / 1000) * MODEL_COST_PER_1K_INPUT_TOKENS[base_model_id]