Skip to content

Commit fd4246f

Browse files
committed
Initial commit: Ollama Coder with Maux integration
0 parents  commit fd4246f

File tree

8 files changed

+334
-0
lines changed

8 files changed

+334
-0
lines changed

.gitignore

+16
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
# Python
2+
__pycache__/
3+
*.py[cod]
4+
*$py.class
5+
venv/
6+
.env
7+
8+
# Streamlit
9+
.streamlit/
10+
11+
# IDE
12+
.vscode/
13+
.idea/
14+
15+
# System
16+
.DS_Store

README.md

+72
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
![Maux Banner](static/banner.png)
2+
3+
# 🤖 Ollama Coder
4+
5+
A streamlined Streamlit interface for coding with local AI models through Ollama. Powered by [Maux](https://ai.maux.space).
6+
7+
> 💻 Your personal AI coding assistant running entirely on your machine
8+
>
9+
> 💡 **Want cloud-based AI coding?** Try our hosted version at [ai.maux.space](https://ai.maux.space)
10+
11+
## Description
12+
13+
Ollama Coder is an intuitive, open-source application that provides a modern chat interface for coding assistance using your local Ollama models. It features real-time streaming responses, automatic model detection, and session-based chat history - all running locally on your machine.
14+
15+
## Prerequisites
16+
17+
- Python 3.8+
18+
- Ollama installed and running locally
19+
- At least one model installed in Ollama (e.g., codellama, llama2)
20+
21+
## Installation
22+
23+
1. Clone this repository:
24+
```bash
25+
git clone https://github.com/xmannii/ollama-coder.git
26+
cd ollama-coder
27+
```
28+
2. Install the required packages:
29+
```bash
30+
pip install -r requirements.txt
31+
```
32+
3. Make sure Ollama is running and you have models installed:
33+
```bash
34+
ollama run qwen2.5-coder
35+
```
36+
37+
# Running the App
38+
39+
1. Start the Streamlit app:
40+
```bash
41+
streamlit run app.py
42+
```
43+
2. Open your browser and go to `http://localhost:8501`
44+
45+
## Features
46+
47+
- 🔍 Automatically detects your local Ollama models
48+
- 💬 Modern chat interface with streaming responses
49+
- 📝 Maintains chat history during the session
50+
- 🎨 Clean and intuitive UI
51+
- 🔄 Easy model switching
52+
- 🗑️ Clear chat history option
53+
54+
## Usage
55+
56+
1. Select a model from the dropdown in the sidebar
57+
2. Type your coding question in the chat input
58+
3. The AI will respond with code examples and explanations
59+
4. You can continue the conversation with follow-up questions
60+
5. Use the "Clear Chat History" button to start a new conversation
61+
62+
## Note
63+
64+
Make sure Ollama is running locally at `http://localhost:11434` before starting the app.
65+
66+
## About Maux
67+
68+
This is an open-source project powered by [Maux](https://ai.maux.space). For a hosted solution with additional features and no setup required, visit [ai.maux.space](https://ai.maux.space).
69+
70+
## License
71+
72+
MIT License - feel free to use this code for your own projects!

app.py

+112
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,112 @@
1+
import streamlit as st
2+
import openai
3+
from typing import List, Dict
4+
5+
from utils.models import get_local_models
6+
from utils.prompts import LANGUAGE_PROMPTS
7+
from components.sidebar import render_sidebar
8+
9+
# Configure OpenAI client for Ollama
10+
client = openai.OpenAI(
11+
base_url='http://localhost:11434/v1',
12+
api_key='ollama' # required but unused
13+
)
14+
15+
def init_session_state():
16+
"""Initialize session state variables"""
17+
if 'messages' not in st.session_state:
18+
st.session_state.messages = []
19+
if 'settings' not in st.session_state:
20+
st.session_state.settings = None
21+
22+
def display_chat_history():
23+
"""Display chat messages"""
24+
for message in st.session_state.messages:
25+
with st.chat_message(message["role"]):
26+
st.markdown(message["content"])
27+
28+
def get_system_prompt(settings: dict) -> str:
29+
"""Generate system prompt based on settings"""
30+
language_prompt = LANGUAGE_PROMPTS.get(settings['language'], "")
31+
return f"""You are a helpful coding assistant. {language_prompt}
32+
Provide clear, well-commented code examples.
33+
Format code blocks with proper markdown syntax using triple backticks."""
34+
35+
def main():
36+
st.set_page_config(
37+
page_title="Maux Local AI Code Assistant",
38+
page_icon="🤖",
39+
layout="wide"
40+
)
41+
42+
# Add Maux branding
43+
col1, col2 = st.columns([3, 1])
44+
with col1:
45+
st.title("🤖 Maux Local AI Code Assistant")
46+
with col2:
47+
st.markdown("""
48+
<div style='text-align: right; padding-top: 20px;'>
49+
<a href='https://ai.maux.space' target='_blank' style='color: #FF4B4B;'>
50+
Try our hosted version ↗
51+
</a>
52+
</div>
53+
""", unsafe_allow_html=True)
54+
55+
init_session_state()
56+
57+
# Render sidebar and get settings
58+
models = get_local_models()
59+
settings = render_sidebar(models)
60+
st.session_state.settings = settings
61+
62+
# Display chat history
63+
display_chat_history()
64+
65+
# Chat input
66+
if prompt := st.chat_input("Ask your coding question..."):
67+
if not settings['model']:
68+
st.error("Please select a model first!")
69+
st.stop()
70+
71+
# Add user message to chat history
72+
st.session_state.messages.append({"role": "user", "content": prompt})
73+
with st.chat_message("user"):
74+
st.markdown(prompt)
75+
76+
# Get AI response
77+
with st.chat_message("assistant"):
78+
message_placeholder = st.empty()
79+
full_response = ""
80+
81+
try:
82+
# Get context window messages
83+
context_messages = st.session_state.messages[-settings['context_window']:]
84+
85+
stream = client.chat.completions.create(
86+
model=settings['model'],
87+
messages=[
88+
{"role": "system", "content": get_system_prompt(settings)},
89+
*[{"role": m["role"], "content": m["content"]} for m in context_messages]
90+
],
91+
stream=True,
92+
temperature=settings['temperature'],
93+
max_tokens=settings['max_tokens'],
94+
top_p=settings['top_p']
95+
)
96+
97+
for chunk in stream:
98+
if chunk.choices[0].delta.content is not None:
99+
full_response += chunk.choices[0].delta.content
100+
message_placeholder.markdown(full_response + "▌")
101+
102+
message_placeholder.markdown(full_response)
103+
104+
except Exception as e:
105+
st.error(f"Error: {str(e)}")
106+
st.stop()
107+
108+
# Add assistant response to chat history
109+
st.session_state.messages.append({"role": "assistant", "content": full_response})
110+
111+
if __name__ == "__main__":
112+
main()

components/sidebar.py

+80
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,80 @@
1+
import streamlit as st
2+
from utils.prompts import LANGUAGE_PROMPTS
3+
4+
def render_sidebar(models: list):
5+
with st.sidebar:
6+
st.header("Settings")
7+
8+
# Model Selection
9+
if not models:
10+
st.error("⚠️ No local models found. Please make sure Ollama is running and you have models installed.")
11+
st.stop()
12+
13+
selected_model = st.selectbox(
14+
"Select Model",
15+
models,
16+
index=0 if models else None,
17+
key="model_selector"
18+
)
19+
20+
# Optional Language Selection
21+
selected_language = st.selectbox(
22+
"Programming Language (Optional)",
23+
list(LANGUAGE_PROMPTS.keys()),
24+
index=0, # Default to Any/General
25+
key="language_selector"
26+
)
27+
28+
# Advanced Settings
29+
with st.expander("Advanced Settings"):
30+
temperature = st.slider(
31+
"Temperature",
32+
min_value=0.0,
33+
max_value=2.0,
34+
value=0.7,
35+
step=0.1,
36+
help="Higher values make output more creative, lower values more focused"
37+
)
38+
39+
max_tokens = st.number_input(
40+
"Max Tokens",
41+
min_value=100,
42+
max_value=4000,
43+
value=2000,
44+
help="Maximum length of the response"
45+
)
46+
47+
top_p = st.slider(
48+
"Top P",
49+
min_value=0.0,
50+
max_value=1.0,
51+
value=0.9,
52+
step=0.1,
53+
help="Nucleus sampling threshold"
54+
)
55+
56+
context_window = st.number_input(
57+
"Context Window",
58+
min_value=1,
59+
max_value=10,
60+
value=4,
61+
help="Number of previous messages to include as context"
62+
)
63+
64+
st.markdown("---")
65+
66+
# Quick Actions
67+
st.markdown("### Quick Actions")
68+
if st.button("Clear Chat History"):
69+
st.session_state.messages = []
70+
st.rerun()
71+
72+
73+
return {
74+
"model": selected_model,
75+
"language": selected_language,
76+
"temperature": temperature,
77+
"max_tokens": max_tokens,
78+
"top_p": top_p,
79+
"context_window": context_window
80+
}

requirements.txt

+4
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
streamlit
2+
openai
3+
requests
4+
protobuf

static/banner.png

832 KB
Loading

utils/models.py

+13
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
import requests
2+
from typing import List, Dict
3+
4+
def get_local_models() -> List[str]:
5+
"""Fetch available models from Ollama"""
6+
try:
7+
response = requests.get('http://localhost:11434/api/tags')
8+
if response.status_code == 200:
9+
models = response.json()
10+
return [model['name'] for model in models['models']]
11+
return []
12+
except:
13+
return []

utils/prompts.py

+37
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
LANGUAGE_PROMPTS = {
2+
"Any/General": """You are a helpful coding assistant. Provide clear, well-commented code examples
3+
and explain your solutions.""",
4+
5+
"Python": """You are a Python expert. Please provide clear, PEP 8 compliant code with type hints.
6+
Include docstrings and helpful comments. Focus on modern Python practices.""",
7+
8+
"JavaScript": """You are a JavaScript/TypeScript expert. Focus on modern ES6+ syntax,
9+
provide clean code following best practices. Consider performance and security.""",
10+
11+
"Rust": """You are a Rust expert. Provide memory-safe, concurrent code following Rust idioms.
12+
Include error handling and proper lifetime management.""",
13+
14+
"Go": """You are a Go expert. Write idiomatic Go code following the official style guide.
15+
Focus on simplicity, readability, and proper error handling.""",
16+
17+
"SQL": """You are a SQL expert. Provide optimized queries with proper indexing suggestions.
18+
Consider performance and explain query execution plans when relevant.""",
19+
20+
"Java": """You are a Java expert. Write clean, object-oriented code following Java conventions.
21+
Focus on proper design patterns, exception handling, and thread safety.""",
22+
23+
"C++": """You are a C++ expert. Write efficient, modern C++ code (C++17/20). Focus on RAII,
24+
smart pointers, and proper memory management. Consider performance optimization.""",
25+
26+
"C#": """You are a C# expert. Write idiomatic C# code using modern features. Focus on LINQ,
27+
async/await patterns, and proper use of .NET Core libraries.""",
28+
29+
"PHP": """You are a PHP expert. Write secure, modern PHP code following PSR standards.
30+
Focus on proper framework usage (Laravel/Symfony) and dependency management.""",
31+
32+
"Ruby": """You are a Ruby expert. Write elegant, idiomatic Ruby code. Focus on Ruby on Rails
33+
best practices, meta-programming when appropriate, and test-driven development.""",
34+
35+
"Swift": """You are a Swift expert. Write safe, performant iOS/macOS applications. Focus on
36+
Swift's type system, protocol-oriented programming, and memory management."""
37+
}

0 commit comments

Comments
 (0)