ollama
1 cd ~
2 curl -fsSL https://ollama.com/install.sh | sh
3 # >>> The Ollama API is now available at 127.0.0.1:11434.
4 curl localhost:11434
5 # Ollama is running
6 ollama run llama3.2:1b
7 ollama show llama3.2:1b
8 ollama list
9 ollama ps
10 ollama serve # start server
11 ollama stop llama3.2:1b
12 ollama rm llama3.2:1b
Setup proxy
1 sudo nano /etc/systemd/system/ollama.service
1 [Unit]
2 Description=Ollama Service
3 After=network-online.target
4
5 [Service]
6 ExecStart=/usr/local/bin/ollama serve
7 User=ollama
8 Group=ollama
9 Restart=always
10 RestartSec=3
11 Environment="PATH=/home/vagrant/.dotnet/tools:/home/vagrant/dotnetcore9:/usr/local/bin:/usr/bin:/bin:/home/vagrant/jdk-17.0.7+7/bin/:/home/vagrant/gradle-8.1/bin/"
12 Environment="HTTPS_PROXY=http://192.168.0.123:3128/"
13 Environment="HTTP_PROXY=http://192.168.0.123:3128/"
14
15 [Install]
16 WantedBy=default.target
Chatbots - LLM - AI accessible via browser
Gemini prompt - https://gemini.google.com/
Copilot prompt - https://copilot.microsoft.com/
Grok prompt - https://grok.com/
ChatGPT prompt - https://chatgpt.com/
