Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 12 additions & 22 deletions libs/oci/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -59,14 +59,16 @@ embeddings.embed_query("What is the meaning of life?")

### 1. Use a Chat Model

You may instantiate the OCI Data Science model with the generic `ChatOCIModelDeployment` or framework specific class like `ChatOCIModelDeploymentVLLM`.
The `ChatOCIModelDeployment` class is designed for deployment with OpenAI compatible APIs.

```python
from langchain_oci.chat_models import ChatOCIModelDeployment, ChatOCIModelDeploymentVLLM
from langchain_oci import ChatOCIModelDeployment

# Create an instance of OCI Model Deployment Endpoint

# Replace the endpoint uri with your own
endpoint = "https://modeldeployment.<region>.oci.customer-oci.com/<ocid>/predict"
# For streaming, use the /predictWithResponseStream endpoint.
endpoint = "https://modeldeployment.<region>.oci.customer-oci.com/<ocid>/predictWithResponseStream"

messages = [
(
Expand All @@ -78,31 +80,23 @@ messages = [

chat = ChatOCIModelDeployment(
endpoint=endpoint,
streaming=True,
max_retries=1,
model_kwargs={
"temperature": 0.2,
"max_tokens": 512,
}, # other model params...
default_headers={
"route": "/v1/chat/completions",
# other request headers ...
},
model="odsc-llm",
max_tokens=512,
)
chat.invoke(messages)
chat.stream(messages)

chat_vllm = ChatOCIModelDeploymentVLLM(endpoint=endpoint)
chat_vllm.invoke(messages)
```

### 2. Use a Completion Model
You may instantiate the OCI Data Science model with `OCIModelDeploymentLLM` or `OCIModelDeploymentVLLM`.
The `OCIModelDeploymentLLM` class is designed for completion endpoints.

```python
from langchain_oci.llms import OCIModelDeploymentLLM, OCIModelDeploymentVLLM
from langchain_oci import OCIModelDeploymentLLM

# Create an instance of OCI Model Deployment Endpoint

# Replace the endpoint uri and model name with your own
# For streaming, use the /predictWithResponseStream endpoint.
endpoint = "https://modeldeployment.<region>.oci.customer-oci.com/<ocid>/predict"

llm = OCIModelDeploymentLLM(
Expand All @@ -111,10 +105,6 @@ llm = OCIModelDeploymentLLM(
)
llm.invoke("Who is the first president of United States?")

vllm = OCIModelDeploymentVLLM(
endpoint=endpoint,
)
vllm.invoke("Who is the first president of United States?")
```

### 3. Use an Embedding Model
Expand Down
18 changes: 2 additions & 16 deletions libs/oci/langchain_oci/__init__.py
Original file line number Diff line number Diff line change
@@ -1,35 +1,21 @@
# Copyright (c) 2025 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/

from langchain_oci.chat_models.oci_data_science import (
ChatOCIModelDeployment,
ChatOCIModelDeploymentTGI,
ChatOCIModelDeploymentVLLM,
)
from langchain_oci.chat_models import ChatOCIModelDeployment
from langchain_oci.chat_models.oci_generative_ai import ChatOCIGenAI
from langchain_oci.embeddings.oci_data_science_model_deployment_endpoint import (
OCIModelDeploymentEndpointEmbeddings,
)
from langchain_oci.embeddings.oci_generative_ai import OCIGenAIEmbeddings
from langchain_oci.llms.oci_data_science_model_deployment_endpoint import (
BaseOCIModelDeployment,
OCIModelDeploymentLLM,
OCIModelDeploymentTGI,
OCIModelDeploymentVLLM,
)
from langchain_oci.llms import BaseOCIModelDeployment, OCIModelDeploymentLLM
from langchain_oci.llms.oci_generative_ai import OCIGenAI, OCIGenAIBase

__all__ = [
"ChatOCIGenAI",
"ChatOCIModelDeployment",
"ChatOCIModelDeploymentTGI",
"ChatOCIModelDeploymentVLLM",
"OCIGenAIEmbeddings",
"OCIModelDeploymentEndpointEmbeddings",
"OCIGenAIBase",
"OCIGenAI",
"BaseOCIModelDeployment",
"OCIModelDeploymentLLM",
"OCIModelDeploymentTGI",
"OCIModelDeploymentVLLM",
]
31 changes: 24 additions & 7 deletions libs/oci/langchain_oci/chat_models/__init__.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,33 @@
# Copyright (c) 2025 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/

from langchain_oci.chat_models.oci_data_science import (
ChatOCIModelDeployment,
ChatOCIModelDeploymentTGI,
ChatOCIModelDeploymentVLLM,
)
from langchain_oci.chat_models.oci_generative_ai import ChatOCIGenAI

try:
from langchain_oci.chat_models.oci_data_science import ChatOCIModelDeployment

except ModuleNotFoundError as ex:
# Default message
message = ex.msg
# For langchain_openai, show the message with pip install command.
if ex.name == "langchain_openai":
message = (
"No module named langchain_openai. "
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe we can give some more user friendly message, that we rely on the langchain_openai and delegate inferencing to this library?

"Please install it with `pip install langchain_openai`"
)

# Create a placeholder class here so that
# users can import the class without error.
# Users will see the error message when they try to initialize an instance.
class ChatOCIModelDeployment:
"""Placeholder class for ChatOCIModelDeployment
when langchain-openai is not installed."""

def __init__(self, *args, **kwargs):
raise ModuleNotFoundError(message)


__all__ = [
"ChatOCIGenAI",
"ChatOCIModelDeployment",
"ChatOCIModelDeploymentTGI",
"ChatOCIModelDeploymentVLLM",
]
Loading