@@ -41,6 +41,7 @@ def test_ollama_llm_happy_path_deprecated_options(mock_import: Mock) -> None:
4141 mock_ollama .Client .return_value .chat .return_value = MagicMock (
4242 message = MagicMock (content = "ollama chat response" ),
4343 )
44+ mock_ollama .Message .return_value = {"role" : "user" , "content" : "test" }
4445 model = "gpt"
4546 model_params = {"temperature" : 0.3 }
4647 with pytest .warns (DeprecationWarning ) as record :
@@ -59,11 +60,12 @@ def test_ollama_llm_happy_path_deprecated_options(mock_import: Mock) -> None:
5960 res = llm .invoke (question )
6061 assert isinstance (res , LLMResponse )
6162 assert res .content == "ollama chat response"
62- messages = [
63- {"role" : "user" , "content" : question },
64- ]
6563 llm .client .chat .assert_called_once_with ( # type: ignore[attr-defined]
66- model = model , messages = messages , options = {"temperature" : 0.3 }
64+ model = model ,
65+ messages = [
66+ {"role" : "user" , "content" : "test" }
67+ ],
68+ options = {"temperature" : 0.3 }
6769 )
6870
6971
@@ -90,6 +92,7 @@ def test_ollama_llm_happy_path(mock_import: Mock) -> None:
9092 mock_ollama .Client .return_value .chat .return_value = MagicMock (
9193 message = MagicMock (content = "ollama chat response" ),
9294 )
95+ mock_ollama .Message .return_value = {"role" : "user" , "content" : "test" }
9396 model = "gpt"
9497 options = {"temperature" : 0.3 }
9598 model_params = {"options" : options , "format" : "json" }
@@ -102,7 +105,7 @@ def test_ollama_llm_happy_path(mock_import: Mock) -> None:
102105 assert isinstance (res , LLMResponse )
103106 assert res .content == "ollama chat response"
104107 messages = [
105- {"role" : "user" , "content" : question },
108+ {"role" : "user" , "content" : "test" },
106109 ]
107110 llm .client .chat .assert_called_once_with ( # type: ignore[attr-defined]
108111 model = model ,
@@ -112,102 +115,6 @@ def test_ollama_llm_happy_path(mock_import: Mock) -> None:
112115 )
113116
114117
115- @patch ("builtins.__import__" )
116- def test_ollama_invoke_with_system_instruction_happy_path (mock_import : Mock ) -> None :
117- mock_ollama = get_mock_ollama ()
118- mock_import .return_value = mock_ollama
119- mock_ollama .Client .return_value .chat .return_value = MagicMock (
120- message = MagicMock (content = "ollama chat response" ),
121- )
122- model = "gpt"
123- options = {"temperature" : 0.3 }
124- model_params = {"options" : options , "format" : "json" }
125- llm = OllamaLLM (
126- model ,
127- model_params = model_params ,
128- )
129- system_instruction = "You are a helpful assistant."
130- question = "What about next season?"
131-
132- response = llm .invoke (question , system_instruction = system_instruction )
133- assert response .content == "ollama chat response"
134- messages = [{"role" : "system" , "content" : system_instruction }]
135- messages .append ({"role" : "user" , "content" : question })
136- llm .client .chat .assert_called_once_with ( # type: ignore[attr-defined]
137- model = model ,
138- messages = messages ,
139- options = options ,
140- format = "json" ,
141- )
142-
143-
144- @patch ("builtins.__import__" )
145- def test_ollama_invoke_with_message_history_happy_path (mock_import : Mock ) -> None :
146- mock_ollama = get_mock_ollama ()
147- mock_import .return_value = mock_ollama
148- mock_ollama .Client .return_value .chat .return_value = MagicMock (
149- message = MagicMock (content = "ollama chat response" ),
150- )
151- model = "gpt"
152- options = {"temperature" : 0.3 }
153- model_params = {"options" : options }
154- llm = OllamaLLM (
155- model ,
156- model_params = model_params ,
157- )
158- message_history = [
159- {"role" : "user" , "content" : "When does the sun come up in the summer?" },
160- {"role" : "assistant" , "content" : "Usually around 6am." },
161- ]
162- question = "What about next season?"
163-
164- response = llm .invoke (question , message_history ) # type: ignore
165- assert response .content == "ollama chat response"
166- messages = [m for m in message_history ]
167- messages .append ({"role" : "user" , "content" : question })
168- llm .client .chat .assert_called_once_with ( # type: ignore[attr-defined]
169- model = model , messages = messages , options = options
170- )
171-
172-
173- @patch ("builtins.__import__" )
174- def test_ollama_invoke_with_message_history_and_system_instruction (
175- mock_import : Mock ,
176- ) -> None :
177- mock_ollama = get_mock_ollama ()
178- mock_import .return_value = mock_ollama
179- mock_ollama .Client .return_value .chat .return_value = MagicMock (
180- message = MagicMock (content = "ollama chat response" ),
181- )
182- model = "gpt"
183- options = {"temperature" : 0.3 }
184- model_params = {"options" : options }
185- system_instruction = "You are a helpful assistant."
186- llm = OllamaLLM (
187- model ,
188- model_params = model_params ,
189- )
190- message_history = [
191- {"role" : "user" , "content" : "When does the sun come up in the summer?" },
192- {"role" : "assistant" , "content" : "Usually around 6am." },
193- ]
194- question = "What about next season?"
195-
196- response = llm .invoke (
197- question ,
198- message_history , # type: ignore
199- system_instruction = system_instruction ,
200- )
201- assert response .content == "ollama chat response"
202- messages = [{"role" : "system" , "content" : system_instruction }]
203- messages .extend (message_history )
204- messages .append ({"role" : "user" , "content" : question })
205- llm .client .chat .assert_called_once_with ( # type: ignore[attr-defined]
206- model = model , messages = messages , options = options
207- )
208- assert llm .client .chat .call_count == 1 # type: ignore
209-
210-
211118@patch ("builtins.__import__" )
212119def test_ollama_invoke_with_message_history_validation_error (mock_import : Mock ) -> None :
213120 mock_ollama = get_mock_ollama ()
@@ -228,9 +135,8 @@ def test_ollama_invoke_with_message_history_validation_error(mock_import: Mock)
228135 ]
229136 question = "What about next season?"
230137
231- with pytest .raises (LLMGenerationError ) as exc_info :
138+ with pytest .raises (LLMGenerationError , match = "Input validation failed" ) :
232139 llm .invoke (question , message_history ) # type: ignore
233- assert "Input should be 'user', 'assistant' or 'system" in str (exc_info .value )
234140
235141
236142@pytest .mark .asyncio
0 commit comments