Skip to content

spursy/algo-rust-bert-demo

Repository files navigation

AlGO-RUST-BERT-DEMO

Docker operation

docker build -t algo-rust-bert-demo:1 .
docker run -it algo-rust-bert-demo:1 /bin/sh

export LIBTORCH=$(python -c 'import torch; from pathlib import Path; print(Path(torch.__file__).parent)')
export DYLD_LIBRARY_PATH=${LIBTORCH}/lib
export LD_LIBRARY_PATH=${LIBTORCH}/lib:$LD_LIBRARY_PATH
export LIBTORCH_CXX11_ABI=0

Docker for AMD OS

docker build -t algo-rust-bert-demo:1 -f  ./Dockerfile.amd .

Docker for ARM OS

docker build -t algo-rust-bert-demo:1 -f  ./Dockerfile.arm .

Mac M2

// create micro pythin env
micromamba env create -f environment.yml --platform osx-arm64

// install torch(v0.13.1)
pip install torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0

// set env
export LIBTORCH=$(python3 -c 'import torch; from pathlib import Path; print(Path(torch.__file__).parent)')
export DYLD_LIBRARY_PATH=${LIBTORCH}/lib
export LD_LIBRARY_PATH=${LIBTORCH}/lib:$LD_LIBRARY_PATH
export LIBTORCH_CXX11_ABI=0

// verify mac python architecture
python -c "import sysconfig;print(sysconfig.get_platform())"
// verify mac architecture
uname -m 

// verify torch package
python -c "import torch;print(torch.__version__);"

m2-cargo-build-issue

torch-rs

Mac local rust bert model

model address

ls $HOME/Library/Caches/.rustbert

git push large data to github

 git lfs install
 
 git lfs track "./resources/all-MiniLM-L12-v2/rust_model.ot"

 // rollback large fs
 git filter-branch -f --index-filter 'git rm --cached --ignore-unmatch ./resources/all-MiniLM-L12-v2/rust_model.ot'
 
 git add .gitattributes
  
 git commit -m "extend lfs conf"
 
 git push origin master
// install micromamba with homebrew
brew install micromamba

// create a new conda environment: 
micromamba env create -f environment.yml --platform osx-arm64

// activate the new environment
micromamba activate tch-rs-demo
import torch
 
# 查看torch版本
print('--- torch版本 ---')
print(torch.__version__)
 
# 查看cuda版本
print('--- cuda版本 ---')
print(torch.version.cuda)
 
# GPU是否可用
print('--- GPU是否可用 ---')
print(torch.cuda.is_available())
 
# 返回gpu数量
print('--- GPU数量 ---')
print(torch.cuda.device_count())
 
# 返回gpu名字,设备索引默认从0开始
print('--- GPU名称 ---')
n = 0
while n < torch.cuda.device_count():
    print(torch.cuda.get_device_name(n))
    n += 1
 
# 代码测试
print('--- PyTorch代码测试 ---')
print(torch.rand(3,3))
print('--- PyTorch代码测试(在GPU上测试PyTorch代码) ---')
print(torch.rand(3,3).cuda())

OnnxRuntime

wget -O  onnxruntime-arm.tgz  https://github.com/microsoft/onnxruntime/releases/download/v1.18.1/onnxruntime-osx-arm64-1.18.1.tgz
tar -zxvf onnxruntime-arm.tgz
export ORT_DYLIB_PATH=......../onnxruntime-osx-arm64-1.18.1/lib/libonnxruntime.dylib

Reference

mac m silicon config

amd os

onnx runtime reference

About

No description, website, or topics provided.

Resources

Stars

Watchers

Forks

Releases

No releases published

Packages

No packages published