Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 7 additions & 3 deletions core/changelog.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
- feat(providers): added HuggingFace provider using Inference Provider API, support for chat(with stream also), response(with stream also), TTS and speech synthesis
- fix(mcp): ensure properties field is always set for tools - [@CryptoFewka](https://github.com/CryptoFewka)
- fix(perplexity): correct search_domain_filter json tag - [@hnoguchigr](https://github.com/hnoguchigr)
- fix: ensure properties field is always set for mcp tools - [@CryptoFewka](https://github.com/CryptoFewka)
- fix: correct search_domain_filter json tag in perplexity provider - [@hnoguchigr](https://github.com/hnoguchigr)
- feat: added HuggingFace provider
- fix: bedrock empty ARN issue causing request to fail
- fix: anthropic single context block in response converted to string instead for chat completions
- fix: added auth support in HTTP proxies
- feat: added custom CA certificate support in proxies
5 changes: 5 additions & 0 deletions core/providers/anthropic/chat.go
Original file line number Diff line number Diff line change
Expand Up @@ -365,6 +365,11 @@ func (response *AnthropicMessageResponse) ToBifrostChatResponse() *schemas.Bifro
}
}

if len(contentBlocks) == 1 && contentBlocks[0].Type == schemas.ChatContentBlockTypeText {
contentStr = contentBlocks[0].Text
contentBlocks = nil
}

// Create a single choice with the collected content
// Create message content
messageContent := schemas.ChatMessageContent{
Expand Down
2 changes: 1 addition & 1 deletion core/providers/bedrock/bedrock.go
Original file line number Diff line number Diff line change
Expand Up @@ -2736,7 +2736,7 @@ func (provider *BedrockProvider) getModelPath(basePath string, model string, key
// Default: use model/deployment directly
path := fmt.Sprintf("%s/%s", deployment, basePath)
// If ARN is present, Bedrock expects the ARN-scoped identifier
if key.BedrockKeyConfig != nil && key.BedrockKeyConfig.ARN != nil {
if key.BedrockKeyConfig != nil && key.BedrockKeyConfig.ARN != nil && *key.BedrockKeyConfig.ARN != "" {
encodedModelIdentifier := url.PathEscape(fmt.Sprintf("%s/%s", *key.BedrockKeyConfig.ARN, deployment))
path = fmt.Sprintf("%s/%s", encodedModelIdentifier, basePath)
}
Expand Down
5 changes: 5 additions & 0 deletions core/providers/bedrock/chat.go
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,11 @@ func (response *BedrockConverseResponse) ToBifrostChatResponse(ctx context.Conte
}
}

if len(contentBlocks) == 1 && contentBlocks[0].Type == schemas.ChatContentBlockTypeText {
contentStr = contentBlocks[0].Text
contentBlocks = nil
}

// Create the message content
messageContent := schemas.ChatMessageContent{
ContentStr: contentStr,
Expand Down
13 changes: 9 additions & 4 deletions core/providers/gemini/chat.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@ func (response *GenerateContentResponse) ToBifrostChatResponse() *schemas.Bifros
var toolCalls []schemas.ChatAssistantMessageToolCall
var contentBlocks []schemas.ChatContentBlock
var reasoningDetails []schemas.ChatReasoningDetails
var contentStr *string

// Process candidates to extract text content
if len(response.Candidates) > 0 {
Expand Down Expand Up @@ -145,10 +146,14 @@ func (response *GenerateContentResponse) ToBifrostChatResponse() *schemas.Bifros
Role: schemas.ChatMessageRoleAssistant,
}

if len(contentBlocks) > 0 {
message.Content = &schemas.ChatMessageContent{
ContentBlocks: contentBlocks,
}
if len(contentBlocks) == 1 && contentBlocks[0].Type == schemas.ChatContentBlockTypeText {
contentStr = contentBlocks[0].Text
contentBlocks = nil
}

message.Content = &schemas.ChatMessageContent{
ContentStr: contentStr,
ContentBlocks: contentBlocks,
}

if len(toolCalls) > 0 || len(reasoningDetails) > 0 {
Expand Down
2 changes: 1 addition & 1 deletion core/version
Original file line number Diff line number Diff line change
@@ -1 +1 @@
1.2.39
1.2.40
20 changes: 10 additions & 10 deletions docs/docs.json
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,16 @@
]
},
{
"group": "Provider Integrations",
"group": "Providers",
"icon": "solar-system",
"pages": [
"providers/supported-providers",
"providers/huggingface",
"providers/custom-providers"
]
},
{
"group": "SDK Integrations",
"icon": "plug",
"pages": [
"integrations/what-is-an-integration",
Expand Down Expand Up @@ -155,15 +164,6 @@
},
"features/telemetry",
"features/semantic-caching",
{
"group": "Providers",
"icon": "toolbox",
"pages": [
"features/providers/supported-providers",
"features/providers/custom-providers",
"features/providers/huggingface"
]
},
{
"group": "Plugins",
"icon": "puzzle-piece",
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
---
title: "Custom Providers"
description: "Create custom provider configurations with specific request type restrictions, custom naming, and controlled access patterns."
icon: "gears"
icon: "gear"
---

## What Are Custom Providers?
Expand Down
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
---
title: "Hugging Face Provider Implementation Details"
title: "Hugging Face"
description: "Detailed guide on Hugging Face provider implementation specifics, including model aliases and unique request handling."
icon: "face-smiling-hands"
---

# HuggingFace Implementation

The Hugging Face provider in Bifrost (`core/providers/huggingface`) implements a complex integration that supports multiple inference providers (like `hf-inference`, `fal-ai`, `cerebras`, `sambanova`, etc.) through a unified interface.

## Overview
Expand All @@ -15,6 +14,35 @@ The Hugging Face provider implements custom logic for:
- **Heterogeneous request formats**: Supports JSON, raw binary, and base64-encoded payloads
- **Provider-specific constraints**: Handles varying payload limits and format restrictions

## Supported Inference Providers

The Hugging Face provider supports routing to 20+ inference backends. Below is the current list of supported providers and their capabilities (as of December 2025):

| Provider | Chat | Embedding | Speech (TTS) | Transcription (ASR) |
|----------|------|-----------|--------------|---------------------|
| `hf-inference` | ✅ | ✅ | ❌ | ✅ |
| `cerebras` | ✅ | ❌ | ❌ | ❌ |
| `cohere` | ✅ | ❌ | ❌ | ❌ |
| `fal-ai` | ❌ | ❌ | ✅ | ✅ |
| `featherless-ai` | ✅ | ❌ | ❌ | ❌ |
| `fireworks` | ✅ | ❌ | ❌ | ❌ |
| `groq` | ✅ | ❌ | ❌ | ❌ |
| `hyperbolic` | ✅ | ❌ | ❌ | ❌ |
| `nebius` | ✅ | ✅ | ❌ | ❌ |
| `novita` | ✅ | ❌ | ❌ | ❌ |
| `nscale` | ✅ | ❌ | ❌ | ❌ |
| `ovhcloud-ai-endpoints` | ✅ | ❌ | ❌ | ❌ |
| `public-ai` | ✅ | ❌ | ❌ | ❌ |
| `replicate` | ❌ | ❌ | ✅ | ✅ |
| `sambanova` | ✅ | ✅ | ❌ | ❌ |
| `scaleway` | ✅ | ✅ | ❌ | ❌ |
| `together` | ✅ | ❌ | ❌ | ❌ |
| `z-ai` | ✅ | ❌ | ❌ | ❌ |

<Note>Provider capabilities may change over time. For the most up-to-date information, refer to the [Hugging Face Inference Providers documentation](https://huggingface.co/docs/inference-providers/en/index#partners). Also checkmarks (✅) indicate capabilities supported by the inference provider itself.</Note>

<Info>All Chat-supported models automatically support Responses(`v1/responses`) as well via Bifrost's internal conversion logic.</Info>

## Model Aliases & Identification

Unlike standard providers where model IDs are direct strings (e.g., `gpt-4`), Hugging Face models in Bifrost are identified by a composite key to route requests to the correct inference backend.
Expand Down Expand Up @@ -169,33 +197,6 @@ This multi-mode approach allows the provider to support diverse API contracts wi

This flexibility allows the provider to support diverse API contracts within a single implementation structure.

Supported Inference Providers

The Hugging Face provider supports routing to 20+ inference backends. Below is the current list of supported providers and their capabilities (as of December 2025):

| Provider | Chat | Embedding | Speech (TTS) | Transcription (ASR) | Notes |
|----------|------|-----------|--------------|---------------------|-------|
| `hf-inference` | ✅ | ✅ | ❌ | ✅ | Standard HF inference |
| `cerebras` | ✅ | ❌ | ❌ | ❌ | Chat only |
| `cohere` | ✅ | ❌ | ❌ | ❌ | Chat only |
| `fal-ai` | ❌ | ❌ | ✅ | ✅ | MP3 only for audio, base64 Data URI |
| `featherless-ai` | ✅ | ❌ | ❌ | ❌ | Chat only |
| `fireworks` | ✅ | ❌ | ❌ | ❌ | Chat only |
| `groq` | ✅ | ❌ | ❌ | ❌ | Chat only |
| `hyperbolic` | ✅ | ❌ | ❌ | ❌ | Chat only |
| `nebius` | ✅ | ✅ | ❌ | ❌ | Chat and embedding |
| `novita` | ✅ | ❌ | ❌ | ❌ | Chat only |
| `nscale` | ✅ | ❌ | ❌ | ❌ | Chat only |
| `ovhcloud-ai-endpoints` | ✅ | ❌ | ❌ | ❌ | Chat only |
| `public-ai` | ✅ | ❌ | ❌ | ❌ | Chat-only |
| `replicate` | ❌ | ❌ | ✅ | ✅ | Currently we do not support Speech and Transcription on it|
| `sambanova` | ✅ | ✅ | ❌ | ❌ | Chat and embedding |
| `scaleway` | ✅ | ✅ | ❌ | ❌ | Chat and embedding |
| `together` | ✅ | ❌ | ❌ | ❌ | Chat only |
| `z-ai` | ✅ | ❌ | ❌ | ❌ | Chat only |

**Note**: Provider capabilities may change over time. For the most up-to-date information, refer to the [Hugging Face Inference Providers documentation](https://huggingface.co/docs/inference-providers/en/index#partners). Also checkmarks (✅) indicate capabilities supported by the inference provider itself. The notes column clarifies Bifrost's current implementation status where there are gaps.

## Model Discovery & Caching

The provider implements sophisticated model discovery using the Hugging Face Hub API:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,14 +1,61 @@
---
title: "Supported Providers"
description: "Bifrost supports multiple AI providers with consistent OpenAI-compatible response formats, enabling seamless provider switching without code changes."
icon: "layer-group"
icon: "bars-progress"
---

## Overview

Bifrost supports a wide range of AI providers, all accessible through a consistent OpenAI-compatible interface. This standardization allows you to switch between providers without modifying your application code, as all responses follow the same structure regardless of the underlying provider.

Bifrost can also act as a provider-compatible gateway (for example, Anthropic, Google Gemini/GenAI, LiteLLM, and others), exposing provider-specific endpoints so you can use existing provider SDKs or integrations with no code changes — see [What is an integration?](../../integrations/what-is-an-integration) for details.
Bifrost can also act as a provider-compatible gateway (for example, <u>[Anthropic](../../integrations/anthropic-sdk/overview)</u>, <u>[Google Gemini](../../integrations/genai-sdk/overview)</u>, <u>Cohere</u>, <u>[Bedrock](../../integrations/bedrock-sdk/overview)</u>, and others), exposing provider-specific endpoints so you can use existing provider SDKs or integrations with no code changes, see [What is an integration?](../../integrations/what-is-an-integration) for details.


## Provider Support Matrix

The following table summarizes which operations are supported by each provider via Bifrost’s unified interface.

| Provider | Models | Text | Text (stream) | Chat | Chat (stream) | Responses | Responses (stream) | Embeddings | TTS | TTS (stream) | STT | STT (stream) | Files | Batch |
|----------|--------|------|----------------|------|---------------|-----------|--------------------|------------|-----|-------------|-----|--------------|-------|-------|
| Anthropic (`anthropic/<model>`) | ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ✅ |
| Azure (`azure/<model>`) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ |
| Bedrock (`bedrock/<model>`) | ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ✅ | ✅ |
| Cerebras (`cerebras/<model>`) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ |
| Cohere (`cohere/<model>`) | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ |
| Elevenlabs (`elevenlabs/<model>`) | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ |
| Gemini (`gemini/<model>`) | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Groq (`groq/<model>`) | ✅ | 🟡 | 🟡 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ |
| Hugging Face (`huggingface/<model>`) | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ | ❌ | ❌ | ❌ |
| Mistral (`mistral/<model>`) | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ❌ | ❌ |
| Nebius (`nebius/<model>`) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |❌ | ❌ |
| Ollama (`ollama/<model>`) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ |
| OpenAI (`openai/<model>`) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| OpenRouter (`openrouter/<model>`) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ |
| Parasail (`parasail/<model>`) | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ |
| Perplexity (`perplexity/<model>`) | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ |
| SGL (`sgl/<model>`) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ |
| Vertex AI (`vertex/<model>`) | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ |

- 🟡 Not supported by the downstream provider, but internally implemented by Bifrost as a fallback.
- ❌ Not supported by the downstream provider, hence not supported by Bifrost.
- ✅ Fully supported by the downstream provider, or internally implemented by Bifrost.


<Note>
Some operations are not supported by the downstream provider, and their internal implementation in Bifrost is optional. 🟡
Like Text completions are not supported by Groq, but Bifrost can emulate them internally using the Chat Completions API. This feature is disabled by default, but it can be enabled by setting the `enable_litellm_fallbacks` flag to `true` in the client configuration.
We do not promote using such fallbacks, since text completions and chat completions are fundamentally different. However, this option is available to help users migrating from LiteLLM (which does support these fallbacks).
</Note>


Notes:
- "Models" refers to the list models operation (`/v1/models`).
- "Text" refers to the classic text completion interface (`/v1/completions`).
- "Responses" refers to the OpenAI-style Responses API (`/v1/responses`). Non-OpenAI providers map this to their native chat API under the hood.
- TTS corresponds to `/v1/audio/speech` and STT to `/v1/audio/transcriptions`.
- "Files" refers to the Files API operations (`/v1/files`) for uploading, listing, retrieving, and deleting files.
- "Batch" refers to the Batch API operations (`/v1/batches`) for creating, listing, retrieving, canceling, and getting results of batch jobs.


## Response Format

Expand Down Expand Up @@ -81,51 +128,6 @@ response, err := client.ChatCompletionRequest(ctx, &schemas.BifrostChatRequest{

</Tabs>

## Provider Support Matrix

The following table summarizes which operations are supported by each provider via Bifrost’s unified interface.

| Provider | Models | Text | Text (stream) | Chat | Chat (stream) | Responses | Responses (stream) | Embeddings | TTS | TTS (stream) | STT | STT (stream) | Files | Batch |
|----------|--------|------|----------------|------|---------------|-----------|--------------------|------------|-----|-------------|-----|--------------|-------|-------|
| Anthropic (`anthropic/<model>`) | ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ✅ |
| Azure (`azure/<model>`) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ |
| Bedrock (`bedrock/<model>`) | ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ✅ | ✅ |
| Cerebras (`cerebras/<model>`) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ |
| Cohere (`cohere/<model>`) | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ |
| Elevenlabs (`elevenlabs/<model>`) | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ |
| Gemini (`gemini/<model>`) | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Groq (`groq/<model>`) | ✅ | 🟡 | 🟡 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ |
| Hugging Face (`huggingface/<model>`) | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ | ❌ | ❌ | ❌ |
| Mistral (`mistral/<model>`) | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ❌ | ❌ |
| Nebius (`nebius/<model>`) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |❌ | ❌ |
| Ollama (`ollama/<model>`) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ |
| OpenAI (`openai/<model>`) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| OpenRouter (`openrouter/<model>`) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ |
| Parasail (`parasail/<model>`) | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ |
| Perplexity (`perplexity/<model>`) | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ |
| SGL (`sgl/<model>`) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ |
| Vertex AI (`vertex/<model>`) | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ |

- 🟡 Not supported by the downstream provider, but internally implemented by Bifrost as a fallback.
- ❌ Not supported by the downstream provider, hence not supported by Bifrost.
- ✅ Fully supported by the downstream provider, or internally implemented by Bifrost.


<Note>
Some operations are not supported by the downstream provider, and their internal implementation in Bifrost is optional. 🟡
Like Text completions are not supported by Groq, but Bifrost can emulate them internally using the Chat Completions API. This feature is disabled by default, but it can be enabled by setting the `enable_litellm_fallbacks` flag to `true` in the client configuration.
We do not promote using such fallbacks, since text completions and chat completions are fundamentally different. However, this option is available to help users migrating from LiteLLM (which does support these fallbacks).
</Note>


Notes:
- "Models" refers to the list models operation (`/v1/models`).
- "Text" refers to the classic text completion interface (`/v1/completions`).
- "Responses" refers to the OpenAI-style Responses API (`/v1/responses`). Non-OpenAI providers map this to their native chat API under the hood.
- TTS corresponds to `/v1/audio/speech` and STT to `/v1/audio/transcriptions`.
- "Files" refers to the Files API operations (`/v1/files`) for uploading, listing, retrieving, and deleting files.
- "Batch" refers to the Batch API operations (`/v1/batches`) for creating, listing, retrieving, canceling, and getting results of batch jobs.

## Custom Providers

In addition to the built-in providers, Bifrost supports custom provider configurations. Custom providers allow you to create multiple instances of the same base provider with different configurations, request type restrictions, and access patterns. This is useful for environment-specific configurations, role-based access control, and feature testing.
Expand Down
1 change: 1 addition & 0 deletions framework/changelog.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
- chore: bump core to 1.2.40
2 changes: 1 addition & 1 deletion framework/version
Original file line number Diff line number Diff line change
@@ -1 +1 @@
1.1.49
1.1.50
1 change: 1 addition & 0 deletions plugins/governance/changelog.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
- chore: bump core to 1.2.40 and framework to 1.1.50
2 changes: 1 addition & 1 deletion plugins/governance/version
Original file line number Diff line number Diff line change
@@ -1 +1 @@
1.3.50
1.3.51
1 change: 1 addition & 0 deletions plugins/jsonparser/changelog.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
- chore: bump core to 1.2.40 and framework to 1.1.50
2 changes: 1 addition & 1 deletion plugins/jsonparser/version
Original file line number Diff line number Diff line change
@@ -1 +1 @@
1.3.50
1.3.51
1 change: 1 addition & 0 deletions plugins/logging/changelog.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
- chore: bump core to 1.2.40 and framework to 1.1.50
2 changes: 1 addition & 1 deletion plugins/logging/version
Original file line number Diff line number Diff line change
@@ -1 +1 @@
1.3.50
1.3.51
Loading