1+ #! /encs/bin/tcsh
2+
3+ # SBATCH --job-name=ollama-server
4+ # SBATCH --mem=50G
5+ # SBATCH --gpus=1
6+ # SBATCH --ntasks=1
7+ # SBATCH --cpus-per-task=4
8+ # SBATCH --mail-type=ALL
9+ # SBATCH --output=ollama-%J.out
10+ # SBATCH --time=03:00:00 ## Adjust based on your needs
11+
12+ set ODIR = /speed-scratch/$USER /ollama
13+ mkdir -p $ODIR && cd $ODIR
14+
15+ # Download Ollama tarball and extract it once
16+ if ( ! -x $ODIR /bin/ollama ) then
17+ echo " Downloading Ollama..."
18+ curl -LO https://ollama.com/download/ollama-linux-amd64.tgz
19+ tar -xzf ollama-linux-amd64.tgz
20+ endif
21+
22+ # Add ollama to your PATH and set models directory
23+ setenv PATH $ODIR /bin:$PATH
24+ setenv OLLAMA_MODELS $ODIR /models
25+ mkdir -p $OLLAMA_MODELS
26+
27+ # pull a specific model
28+ ollama pull llama3.2
29+
30+ # Get an available port for the server
31+ set PORT = ` python -c ' import socket,sys; s=socket.socket(); s.bind(("",0)); print(s.getsockname()[1]); s.close()' `
32+ setenv OLLAMA_HOST 127.0.0.1:$PORT
33+ echo " http://localhost:$PORT " > ! ${ODIR} /.ollama_host
34+
35+ # Print connection instructions
36+ set NODE = ` hostname -s`
37+ set USER = ` whoami`
38+ echo " "
39+ echo " ===================================================="
40+ echo " Ollama server will start on $NODE "
41+ echo " ===================================================="
42+ echo " To connect from your laptop, open a new terminal and run:"
43+
44+ echo " "
45+ echo " ssh -L ${PORT} :localhost:${PORT} ${USER} @speed.encs.concordia.ca -t ssh $NODE "
46+ echo " "
47+ echo " Once connected, set your environment variables:"
48+ echo " setenv PATH ${ODIR} /bin:$PATH "
49+ echo " setenv OLLAMA_HOST http://localhost:${PORT} "
50+ echo " setenv OLLAMA_MODELS ${ODIR} /models"
51+ echo " ===================================================="
52+ echo " "
53+
54+ # Start server
55+ srun ollama serve
0 commit comments