diff --git a/vllm_gaudi/attention/backends/hpu_attn.py b/vllm_gaudi/attention/backends/hpu_attn.py index 271aeef6..e384e760 100644 --- a/vllm_gaudi/attention/backends/hpu_attn.py +++ b/vllm_gaudi/attention/backends/hpu_attn.py @@ -381,8 +381,10 @@ def __init__( attn_type: str = AttentionType.DECODER, kv_sharing_target_layer_name: Optional[str] = None, use_irope: bool = False, + sinks: Optional[int] = None, ) -> None: super(AttentionImpl, self).__init__() + self._sinks = sinks if kv_sharing_target_layer_name is not None: raise NotImplementedError("KV sharing is not currently supported on HPU.") if use_irope: diff --git a/vllm_gaudi/extension/ops.py b/vllm_gaudi/extension/ops.py index 987f61eb..75161943 100644 --- a/vllm_gaudi/extension/ops.py +++ b/vllm_gaudi/extension/ops.py @@ -474,7 +474,7 @@ def forward(self, hidden_states, expert_routing_table, router_weights, permuted_ w12=w1_list, w3=w2_list, permuted_weights=permuted_weights, - activation=activation, + activation="silu", experts_min=self.experts_min, experts_max=self.experts_max) for i in range(self.moe_n_slice):