Skip to content

Commit

Permalink
fix(llm_server_hybrid.py): local qwen streaming (#399)
Browse files Browse the repository at this point in the history
* fix(llm_server_hybrid.py): qwen streaming output
  • Loading branch information
tpoisonooo authored Oct 29, 2024
1 parent 07eee9d commit 67e6947
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 21 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ The Web version's API for Android also supports other devices. See [Python sampl
<td>

- [InternLM2/InternLM2.5](https://github.com/InternLM/InternLM)
- [Qwen/Qwen2](https://github.com/QwenLM/Qwen2)
- [Qwen1.5~2.5](https://github.com/QwenLM/Qwen2)
- [puyu](https://internlm.openxlab.org.cn/)
- [StepFun](https://platform.stepfun.com)
- [KIMI](https://kimi.moonshot.cn)
Expand Down
32 changes: 12 additions & 20 deletions huixiangdou/service/llm_server_hybrid.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,10 @@
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from sse_starlette.sse import EventSourceResponse

from transformers import TextIteratorStreamer
import uvicorn
from typing import List, Tuple
from threading import Thread

def os_run(cmd: str):
ret = os.popen(cmd)
Expand Down Expand Up @@ -57,6 +58,7 @@ def __init__(self, model_path: str):
self.model_path = model_path
self.tokenizer = AutoTokenizer.from_pretrained(model_path,
trust_remote_code=True)
self.streamer = TextIteratorStreamer(self.tokenizer, skip_prompt=True, skip_special_tokens=True)

model_path_lower = model_path.lower()

Expand All @@ -69,14 +71,6 @@ def __init__(self, model_path: str):
elif 'qwen1.5' in model_path_lower:
self.model = AutoModelForCausalLM.from_pretrained(
model_path, device_map='auto', trust_remote_code=True).eval()
elif 'qwen' in model_path_lower:
self.model = AutoModelForCausalLM.from_pretrained(
model_path,
device_map='auto',
trust_remote_code=True,
use_cache_quantization=True,
use_cache_kernel=True,
use_flash_attn=False).eval()
elif 'internlm2_5' in model_path_lower:
self.model = AutoModelForCausalLM.from_pretrained(
model_path,
Expand Down Expand Up @@ -112,17 +106,15 @@ async def chat_stream(self, prompt: str, history=[]):
messages, tokenize=False, add_generation_prompt=True)
model_inputs = self.tokenizer([text],
return_tensors='pt').to('cuda')
generated_ids = self.model.generate(model_inputs.input_ids,
max_new_tokens=512,
top_k=1)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(
model_inputs.input_ids, generated_ids)
]

output_text = self.tokenizer.batch_decode(
generated_ids, skip_special_tokens=True)[0]
yield output_text

generation_kwargs = dict(model_inputs, streamer=self.streamer, max_new_tokens=512)
thread = Thread(target=self.model.generate, kwargs=generation_kwargs)
thread.start()

for new_text in self.streamer:
yield new_text

thread.join()

elif type(self.model).__name__ == 'InternLM2ForCausalLM':

Expand Down

0 comments on commit 67e6947

Please sign in to comment.