#复制公钥给Hugging Face设置 (base) root@I1dc83206c700201ce5:~/.ssh# ls authorized_keys id_ed25519 id_ed25519.pub (base) root@I1dc83206c700201ce5:~/.ssh# vim id_ed25519.pub
# 运行报错 # OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like BAAI/EVA-CLIP-8B is not the path to a directory containing a file named config.json. (echosight) root@I1dc83206c700201ce5:/hy-tmp/EchoProject/hf-llm/Mistral-7B-Instruct-v0.2# cd /usr/local/miniconda3/envs/echosight/lib/python3.10/site-packages/huggingface_hub/ (echosight)root@I1dc83206c700201ce5:/usr/local/miniconda3/envs/echosight/lib/python3.10/site-packages/huggingface_hub/# ls (echosight)root@I1dc83206c700201ce5:/usr/local/miniconda3/envs/echosight/lib/python3.10/site-packages/huggingface_hub/# vim constants.py # 修改constants.py里面的hugging face参数为镜像网站 HUGGINGFACE_CO_URL_HOME = "https://hf-mirror.com/" _HF_DEFAULT_ENDPOINT = "https://hf-mirror.com/"
拉取仓库模型
首先在hf官网创建model的token,之后提供两种方式下载模型到服务器或者本地
第一种方法:使用huggingface_hub直接下载
1 2 3
from huggingface_hub import snapshot_download # snapshot_download(repo_id="decapoda-research/llama-7b-hf") snapshot_download(repo_id="THUDM/chatglm3-6b")
# 模型使用提示符 /set | set session variables /show | show model information /load <model> | load a session or model /save <model> | save your current session /clear | clear session context /bye | exit /? /help | help for a command