Skip to content

Commit

Permalink
update
Browse files Browse the repository at this point in the history
  • Loading branch information
tpoisonooo committed Jan 4, 2024
1 parent 6d9610c commit 6f75840
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 13 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,4 @@ service/__pycache__/
frontend/__pycache__/
pk/
badcase.txt
config.bak
21 changes: 8 additions & 13 deletions config.ini
Original file line number Diff line number Diff line change
@@ -1,36 +1,31 @@




[feature_store]
reject_throttle = 767.0
# text2vec model path, support local relative path and huggingface model format
model_path = "../models/text2vec-large-chinese"
model_path = "shibing624/text2vec-base-chinese"
work_dir = "workdir"

[web_search]
# check https://serper.dev/api-key to get a free API key
x_api_key = "aa3da0cd69c5a2df7c0b664dc8a4c118de532405"
x_api_key = "${YOUR-API-KEY}"
domain_partial_order = ["openai.com", "pytorch.org", "readthedocs.io", "nvidia.com", "stackoverflow.com", "juejin.cn", "zhuanlan.zhihu.com", "www.cnblogs.com"]
save_dir = "logs/web_search_result"

[llm]
# enable local/remote LLM or not
enable_local = 1
enable_remote = 0
# hybrid llm service address
# client_url = "http://10.140.24.142:39999/inference"
client_url = "http://10.140.24.142:39999/inference"
client_url = "http://127.0.0.1:8888/inference"

[llm.server]
# local LLM configuration
# support "internlm2-7B", "internlm2-20B" and "internlm2-70B"
local_llm_path = "/internlm/ampere_7b_v1_7_0"
local_llm_max_text_length = 16000

# remote LLM service configuration
# support any python3 openai interface, such as "gpt", "kimi" and so on
remote_type = "kimi"
remote_api_key = "Y2tpMG41dDB0YzExbjRqYW5nN2c6bXNrLTFzVlB2NGJRaDExeWdnNTlZY3dYMm5mcVRpWng="
remote_api_key = "${YOUR-API-KEY}"
# max text length for remote LLM. for example, use 128000 for kimi, 192000 for gpt
remote_llm_max_text_length = 128000
# openai model type. use "moonshot-v1-128k" for kimi, "gpt-4" for gpt
Expand All @@ -48,7 +43,7 @@ has_weekday = 1

[sg_search]
binary_src_path = "/usr/local/bin/src"
src_access_token = "sgp_636f79ad2075640f_3ef2a135579615403e29b88d4402f1e6183ad347"
src_access_token = "${YOUR-SRC-ACCESS-TOKEN}"

# add your repo here, we just take opencompass and lmdeploy as example
[sg_search.opencompass]
Expand All @@ -61,7 +56,7 @@ introduction = "lmdeploy 是一个用于压缩、部署和服务 LLM(Large Lan

[frontend]
# chat group type, support "lark" and "none"
# check https://open.feishu.cn/document/client-docs/bot-v3/add-custom-bot to add bot
# check https://open.feishu.cn/document/client-docs/bot-v3/add-custom-bot to add lark bot
type = "none"
# char group webhook url, send reply to group
webhook_url = "https://open.feishu.cn/open-apis/bot/v2/hook/7a5d3d98-fdfd-40f8-b8de-851cb7e81e5c"
webhook_url = "https://open.feishu.cn/open-apis/bot/v2/hook/7a5d3d98-fdfd-40f8-b8de-851cb7e81e5c"

0 comments on commit 6f75840

Please sign in to comment.