Skip to content

Commit

Permalink
Fix/stateless models response stream (#2037)
Browse files Browse the repository at this point in the history
## Description

- **Summary of changes**: Describe the key changes in this PR and their
purpose.
- **Related issues**: Mention if this PR fixes or is connected to any
issues.
- **Motivation and context**: Explain the reason for the changes and the
problem they solve.
- **Environment or dependencies**: Specify any changes in dependencies
or environment configurations required for this update.
- **Impact on metrics**: (If applicable) Describe changes in any metrics
or performance benchmarks.

Fixes # (issue)

---

## Type of change

Please check the options that are relevant:

- [ ] Bug fix (non-breaking change which fixes an issue)
- [ ] New feature (non-breaking change which adds functionality)
- [ ] Breaking change (fix or feature that would cause existing
functionality to not work as expected)
- [ ] Model update (Addition or modification of models)
- [ ] Other (please describe):

---

## Checklist

- [ ] Adherence to standards: Code complies with Agno’s style guidelines
and best practices.
- [ ] Formatting and validation: You have run `./scripts/format.sh` and
`./scripts/validate.sh` to ensure code is formatted and linted.
- [ ] Self-review completed: A thorough review has been performed by the
contributor(s).
- [ ] Documentation: Docstrings and comments have been added or updated
for any complex logic.
- [ ] Examples and guides: Relevant cookbook examples have been included
or updated (if applicable).
- [ ] Tested in a clean environment: Changes have been tested in a clean
environment to confirm expected behavior.
- [ ] Tests (optional): Tests have been added or updated to cover any
new or changed functionality.

---

## Additional Notes

Include any deployment notes, performance implications, security
considerations, or other relevant information (e.g., screenshots or logs
if applicable).

---------

Co-authored-by: Dirk Brand <[email protected]>
  • Loading branch information
ashpreetbedi and dirkbrnd authored Feb 11, 2025
1 parent 88025bb commit 3602749
Show file tree
Hide file tree
Showing 9 changed files with 27 additions and 10 deletions.
2 changes: 1 addition & 1 deletion cookbook/agent_concepts/async/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,4 @@
markdown=True,
)
# -*- Print a response to the cli
asyncio.run(agent.aprint_response("Share a breakfast recipe.", stream=True))
asyncio.run(agent.aprint_response("Share a breakfast recipe."))
13 changes: 13 additions & 0 deletions cookbook/agent_concepts/async/basic_stream.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import asyncio

from agno.agent import Agent
from agno.models.openai import OpenAIChat

agent = Agent(
model=OpenAIChat(id="gpt-4o"),
description="You help people with their health and fitness goals.",
instructions=["Recipes should be under 5 ingredients"],
markdown=True,
)
# -*- Print a response to the cli
asyncio.run(agent.aprint_response("Share a breakfast recipe.", stream=True))
4 changes: 1 addition & 3 deletions cookbook/agent_concepts/async/data_analyst.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,4 @@
- movies: contains information about movies from IMDB.
"""),
)
asyncio.run(
agent.aprint_response("What is the average rating of movies?", stream=False)
)
asyncio.run(agent.aprint_response("What is the average rating of movies?"))
2 changes: 2 additions & 0 deletions libs/agno/agno/models/aws/claude.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@

from dataclasses import dataclass
from os import getenv
from typing import Any, Dict, Optional
Expand Down Expand Up @@ -114,3 +115,4 @@ def request_kwargs(self) -> Dict[str, Any]:
if self.request_params:
_request_params.update(self.request_params)
return _request_params

2 changes: 1 addition & 1 deletion libs/agno/agno/models/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -1086,4 +1086,4 @@ def __deepcopy__(self, memo):

# Clear the new model to remove any references to the old model
new_model.clear()
return new_model
return new_model
1 change: 1 addition & 0 deletions libs/agno/agno/models/cohere/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,6 +297,7 @@ def _process_stream_response(
assistant_message.tool_calls.append(tool_use)
tool_use = {}


elif (
response.type == "message-end"
and response.delta is not None
Expand Down
2 changes: 2 additions & 0 deletions libs/agno/agno/models/groq/groq.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,6 +283,7 @@ async def ainvoke_stream(self, messages: List[Message]) -> Any:
Returns:
Any: An asynchronous iterator of chat completion chunks.
"""

try:
stream = await self.get_async_client().chat.completions.create(
model=self.id,
Expand Down Expand Up @@ -337,6 +338,7 @@ def parse_tool_calls(tool_calls_data: List[ChoiceDeltaToolCall]) -> List[Dict[st
tool_call_entry["type"] = _tool_call_type
return tool_calls


def parse_provider_response(self, response: ChatCompletion) -> ModelResponse:
"""
Parse the Groq response into a ModelResponse.
Expand Down
6 changes: 3 additions & 3 deletions libs/agno/agno/models/openai/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -452,7 +452,7 @@ def parse_provider_response(self, response: Union[ChatCompletion, ParsedChatComp
Parse the OpenAI response into a ModelResponse.
Args:
response: Raw response from OpenAI
response: Response from invoke() method
Returns:
ModelResponse: Parsed response data
Expand Down Expand Up @@ -490,7 +490,7 @@ def parse_provider_response(self, response: Union[ChatCompletion, ParsedChatComp
except Exception as e:
logger.warning(f"Error processing tool calls: {e}")

# -*- Add audio transcript to content if available
# Add audio transcript to content if available
response_audio: Optional[ChatCompletionAudio] = response_message.audio
if response_audio and response_audio.transcript and not model_response.content:
model_response.content = response_audio.transcript
Expand All @@ -517,7 +517,7 @@ def parse_provider_response(self, response: Union[ChatCompletion, ParsedChatComp

def parse_provider_response_delta(self, response_delta: ChatCompletionChunk) -> ModelResponse:
"""
Parse the OpenAI streaming response into ModelProviderResponse objects.
Parse the OpenAI streaming response into a ModelResponse.
Args:
response_delta: Raw response chunk from OpenAI
Expand Down
5 changes: 3 additions & 2 deletions libs/agno/agno/models/response.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@


class ModelResponseEvent(str, Enum):
"""Events that can be sent by the Model.response() method"""
"""Events that can be sent by the model provider"""

tool_call_started = "ToolCallStarted"
tool_call_completed = "ToolCallCompleted"
Expand All @@ -16,7 +16,7 @@ class ModelResponseEvent(str, Enum):

@dataclass
class ModelResponse:
"""Response returned by Model.response()"""
"""Response from the model provider"""

role: Optional[str] = None

Expand All @@ -35,6 +35,7 @@ class ModelResponse:
extra: Dict[str, Any] = field(default_factory=dict)



class FileType(str, Enum):
MP4 = "mp4"
GIF = "gif"

0 comments on commit 3602749

Please sign in to comment.