31 lines
934 B
Bash
31 lines
934 B
Bash
sudo apt install ffmpeg
|
|
sudo {{homepath}}/{{user}}
|
|
sudo chown -R $(id -nu):$(id -ng) {{homepath}}/{{user}}
|
|
cd {{homepath}}/{{user}}
|
|
python3 -m venv py3
|
|
source py/bin/activate
|
|
pip install git+https://github.com/huggingface/transformers@3a1ead0aabed473eafe527915eea8c197d424356
|
|
pip install accelerate
|
|
pip install qwen-omni-utils[decord]
|
|
pip install -U flash-attn --no-build-isolation
|
|
cat >> .bashrc <<EOF
|
|
export PATH=$HOME/py3/bin:$PATH
|
|
source $HOME/py3/bin/activate
|
|
EOF
|
|
cat > loadmodel.py <<EOF
|
|
from transformers import Qwen2_5OmniModel
|
|
|
|
model = Qwen2_5OmniModel.from_pretrained(
|
|
"Qwen/Qwen2.5-Omni-7B",
|
|
device_map="auto",
|
|
torch_dtype=torch.bfloat16,
|
|
attn_implementation="flash_attention_2",
|
|
)
|
|
EOF
|
|
sudo useradd -m -g {{user}} -s /usr/bin/bash -d {{homepath}}/{{user}} {{user}}
|
|
echo "{{user}}:{{password}}" | sudo chpasswd
|
|
sudo chown -R {{user}}:{{user}} {{homepath}}/{{user}}
|
|
|
|
sudo - {{user}} -c "python loadmodel.py"
|
|
|