From d380db250c666805dd443d53987868b5ae4b8c7d Mon Sep 17 00:00:00 2001 From: Elvin-Ma Date: Sun, 2 Jul 2023 15:15:20 +0800 Subject: [PATCH] add chatgpt --- .../flask_deploy/{READEME.md => README.md} | 0 14-model_learning/chatGLM/README.md | 26 +++++++++++++++++++ 2 files changed, 26 insertions(+) rename 13-model_deploy_guide/flask_deploy/{READEME.md => README.md} (100%) create mode 100644 14-model_learning/chatGLM/README.md diff --git a/13-model_deploy_guide/flask_deploy/READEME.md b/13-model_deploy_guide/flask_deploy/README.md similarity index 100% rename from 13-model_deploy_guide/flask_deploy/READEME.md rename to 13-model_deploy_guide/flask_deploy/README.md diff --git a/14-model_learning/chatGLM/README.md b/14-model_learning/chatGLM/README.md new file mode 100644 index 0000000..8cb3264 --- /dev/null +++ b/14-model_learning/chatGLM/README.md @@ -0,0 +1,26 @@ +# 聊天机器人 +```shell +pip install protobuf transformers==4.27.1 cpm_kernels +``` +## coding +```python +from transformers import AutoTokenizer, AutoModel + +tokenizer = AutoTokenizer.from_pretrained("./chatglm-6b-int4", trust_remote_code=True) +model = AutoModel.from_pretrained("./chatglm-6b-int4", trust_remote_code=True).half().cuda() +response, history = model.chat(tokenizer, "你好", history=[]) +print(response) + +# 你好👋!我是人工智能助手 ChatGLM-6B,很高兴见到你,欢迎问我任何问题。 + +response, history = model.chat(tokenizer, "晚上睡不着应该怎么办", history=history) +print(response) +``` + +## pretrained model +- [链接](https://pan.baidu.com/s/1VPRGReHfnnqe_ULKjquoaQ?pwd=oaae) +- 提取码:oaae + +# 参考资料 +[参考资料2](https://huggingface.co/THUDM/chatglm-6b-int4) +[参考资料](https://github.com/wenda-LLM/wenda/tree/main) \ No newline at end of file