Skip to content

Commit

Permalink
Add message to_dict
Browse files Browse the repository at this point in the history
  • Loading branch information
dirkbrnd committed Feb 11, 2025
1 parent 7814d82 commit 5ae4eb0
Show file tree
Hide file tree
Showing 7 changed files with 27 additions and 11 deletions.
2 changes: 1 addition & 1 deletion libs/agno/agno/models/fireworks/fireworks.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def invoke_stream(self, messages: List[Message]) -> Iterator[ChatCompletionChunk
"""
yield from self.get_client().chat.completions.create(
model=self.id,
messages=[m.to_dict() for m in messages], # type: ignore
messages=[m.serialize_for_models() for m in messages], # type: ignore
stream=True,
**self.request_kwargs,
) # type: ignore
4 changes: 2 additions & 2 deletions libs/agno/agno/models/google/gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -556,7 +556,7 @@ def create_assistant_message(self, response: GenerateContentResponse, metrics: M

if message_data.response_parts is not None:
for part in message_data.response_parts:
part_dict = type(part).to_dict(part)
part_dict = type(part).serialize_for_models(part)

# Extract text if present
if "text" in part_dict:
Expand Down Expand Up @@ -760,7 +760,7 @@ def response_stream(self, messages: List[Message]) -> Iterator[ModelResponse]:

if message_data.response_parts is not None:
for part in message_data.response_parts:
part_dict = type(part).to_dict(part)
part_dict = type(part).serialize_for_models(part)

# -*- Yield text if present
if "text" in part_dict:
Expand Down
2 changes: 1 addition & 1 deletion libs/agno/agno/models/groq/groq.py
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ def format_message(self, message: Message) -> Dict[str, Any]:
# if message.audio is not None:
# message = self.add_audio_to_message(message=message, audio=message.audio)

return message.to_dict()
return message.serialize_for_models()

def invoke(self, messages: List[Message]) -> ChatCompletion:
"""
Expand Down
8 changes: 4 additions & 4 deletions libs/agno/agno/models/huggingface/huggingface.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,7 @@ def invoke(self, messages: List[Message]) -> Union[ChatCompletionOutput]:
"""
return self.get_client().chat.completions.create(
model=self.id,
messages=[m.to_dict() for m in messages],
messages=[m.serialize_for_models() for m in messages],
**self.request_kwargs,
)

Expand All @@ -261,7 +261,7 @@ async def ainvoke(self, messages: List[Message]) -> Union[ChatCompletionOutput]:
"""
return await self.get_async_client().chat.completions.create(
model=self.id,
messages=[m.to_dict() for m in messages],
messages=[m.serialize_for_models() for m in messages],
**self.request_kwargs,
)

Expand All @@ -277,7 +277,7 @@ def invoke_stream(self, messages: List[Message]) -> Iterator[ChatCompletionStrea
"""
yield from self.get_client().chat.completions.create(
model=self.id,
messages=[m.to_dict() for m in messages], # type: ignore
messages=[m.serialize_for_models() for m in messages], # type: ignore
stream=True,
stream_options={"include_usage": True},
**self.request_kwargs,
Expand All @@ -295,7 +295,7 @@ async def ainvoke_stream(self, messages: List[Message]) -> Any:
"""
async_stream = await self.get_async_client().chat.completions.create(
model=self.id,
messages=[m.to_dict() for m in messages],
messages=[m.serialize_for_models() for m in messages],
stream=True,
stream_options={"include_usage": True},
**self.request_kwargs,
Expand Down
16 changes: 16 additions & 0 deletions libs/agno/agno/models/message.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,22 @@ def get_content_string(self) -> str:
return ""

def to_dict(self) -> Dict[str, Any]:
"""Returns the message as a dictionary."""
message_dict = self.model_dump(exclude_none=True)

# Convert media objects to dictionaries
if self.images:
message_dict["images"] = [img.to_dict() for img in self.images]
if self.audio:
message_dict["audio"] = [aud.to_dict() for aud self.audio]
if self.videos:
message_dict["videos"] = [vid.to_dict() for vid in self.videos]
if self.audio_output:
message_dict["audio_output"] = self.audio_output.to_dict()

return message_dict

def serialize_for_models(self) -> Dict[str, Any]:
_dict = self.model_dump(
exclude_none=True,
include={"role", "content", "audio", "name", "tool_call_id", "tool_calls"},
Expand Down
2 changes: 1 addition & 1 deletion libs/agno/agno/models/openai/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,7 @@ def format_message(self, message: Message) -> Dict[str, Any]:
if message.tool_calls is not None and len(message.tool_calls) == 0:
message.tool_calls = None

return message.to_dict()
return message.serialize_for_models()

def invoke(self, messages: List[Message]) -> Union[ChatCompletion, ParsedChatCompletion]:
"""
Expand Down
4 changes: 2 additions & 2 deletions libs/agno/agno/models/vertexai/gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,7 @@ def create_assistant_message(self, response: GenerationResponse, metrics: Metric
# -*- Parse response
if message_data.response_parts is not None:
for part in message_data.response_parts:
part_dict = type(part).to_dict(part)
part_dict = type(part).serialize_for_models(part)

# Extract text if present
if "text" in part_dict:
Expand Down Expand Up @@ -521,7 +521,7 @@ def response_stream(self, messages: List[Message]) -> Iterator[ModelResponse]:

if message_data.response_parts is not None:
for part in message_data.response_parts:
part_dict = type(part).to_dict(part)
part_dict = type(part).serialize_for_models(part)

# -*- Yield text if present
if "text" in part_dict:
Expand Down

0 comments on commit 5ae4eb0

Please sign in to comment.