-
Notifications
You must be signed in to change notification settings - Fork 490
feat: correct routed_experts from sglang(trick of routing_replay) #884
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Changes from all commits
Commits
Show all changes
31 commits
Select commit
Hold shift + click to select a range
1ae0ecd
Refactor code structure for improved readability and maintainability
ZiyiTsang 8ca7919
Merge remote-tracking branch 'origin/main' into r3replay
ZiyiTsang ee2fe0f
Merge remote-tracking branch 'origin/main' into r3replay
ZiyiTsang 5cafc81
.
ZiyiTsang facf445
.
ZiyiTsang 48e06e5
.
ZiyiTsang b7929c2
.
ZiyiTsang 3e477bb
.
ZiyiTsang 9634b57
.
ZiyiTsang ae44f2d
Update areal/api/io_struct.py
ZiyiTsang d54f7f1
Update areal/api/cli_args.py
ZiyiTsang 7adde36
.
ZiyiTsang efa6572
.
ZiyiTsang c72de5d
.
ZiyiTsang 8d3ff6c
.
ZiyiTsang a7cf9a0
Merge remote-tracking branch 'origin/main' into r3replay
ZiyiTsang 6b051d3
.
ZiyiTsang 9ed95bb
.
ZiyiTsang 353825b
.
ZiyiTsang d5dbcd3
.
ZiyiTsang 17ed509
.
ZiyiTsang fdda8c9
.
ZiyiTsang 0e1532f
.
ZiyiTsang d26fc4c
.
ZiyiTsang 506950c
.
ZiyiTsang 342ba4f
.
ZiyiTsang cba381e
Merge branch 'main' into r3replay
ZiyiTsang 570d95d
Merge branch 'r3replay' of https://github.com/ZiyiTsang/AReaL into r3…
ZiyiTsang fca71f1
.
ZiyiTsang 35fc22f
Merge origin/main into r3replay
ZiyiTsang 03ccdef
Merge branch 'main' into r3replay
rchardx File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Some comments aren't visible on the classic Files Changed page.
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -6,6 +6,8 @@ | |
| from concurrent.futures import Future | ||
| from typing import Any | ||
|
|
||
| import numpy as np | ||
| import pybase64 | ||
| from torchdata.stateful_dataloader import StatefulDataLoader | ||
|
|
||
| from areal.api.cli_args import InferenceEngineConfig, PerfTracerConfig, SGLangConfig | ||
|
|
@@ -66,6 +68,9 @@ def build_generation_request( | |
| "stream": False, | ||
| } | ||
|
|
||
| # Add return_routed_experts to payload if set | ||
| if req.metadata.get("return_routed_experts", False): | ||
| payload["return_routed_experts"] = True | ||
| # Add LoRA if initialized | ||
| if with_lora: | ||
| lora_name = gconfig.lora_name | ||
|
|
@@ -85,11 +90,24 @@ def parse_generation_response( | |
| finish_reason = meta_info["finish_reason"] | ||
| stop_reason = finish_reason["type"] | ||
| stop_message = finish_reason.get("message", "") | ||
|
|
||
| # Extract routed_experts information if available | ||
| routed_experts = meta_info.get("routed_experts", None) | ||
| if routed_experts is not None: | ||
| num_sgl_token = ( | ||
| meta_info["prompt_tokens"] + meta_info["completion_tokens"] - 1 | ||
| ) | ||
|
ZiyiTsang marked this conversation as resolved.
Comment on lines
+97
to
+99
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
| # Extract expert_id and reshape to (num_sgl_token, num_layers*expert_top_k) | ||
| routed_experts = np.frombuffer( | ||
| pybase64.b64decode(routed_experts.encode("utf-8")), dtype=np.int32 | ||
| ).reshape(num_sgl_token, -1) | ||
|
|
||
| if stop_reason == "abort" and stop_message.startswith("Abort before prefill"): | ||
| return HttpGenerationResult( | ||
| output_tokens=[], | ||
| output_logprobs=[], | ||
| stop_reason=stop_reason, | ||
| routed_experts=routed_experts, | ||
| ) | ||
|
|
||
| output_tokens = [x[1] for x in meta_info["output_token_logprobs"]] | ||
|
|
@@ -99,6 +117,7 @@ def parse_generation_response( | |
| output_tokens=output_tokens, | ||
| output_logprobs=output_logprobs, | ||
| stop_reason=stop_reason, | ||
| routed_experts=routed_experts, | ||
| ) | ||
|
|
||
| def build_disk_weight_update_requests( | ||
|
|
@@ -211,7 +230,6 @@ def get_onload_request(self, tags: list[str] | None = None) -> HttpRequest: | |
| def launch_server(self, server_args: dict[str, Any]) -> subprocess.Popen: | ||
| """Launch SGLang server subprocess.""" | ||
| cmd = SGLangConfig.build_cmd_from_args(server_args) | ||
|
|
||
| _env = os.environ.copy() | ||
| triton_cache_path = _env.get("TRITON_CACHE_PATH", TRITON_CACHE_PATH) | ||
| _env["TRITON_CACHE_PATH"] = os.path.join(triton_cache_path, str(uuid.uuid4())) | ||
|
|
||
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -109,6 +109,9 @@ def __init__( | |
| # Parse allocation mode. | ||
| self.allocation_mode = AllocationMode.from_str(config.allocation_mode) | ||
|
|
||
| # Validate config before proceeding with weight initialization | ||
| self._validate_cfg() | ||
|
|
||
| self._amend_xccl_weight_update_envvar() | ||
|
|
||
| # Create models: actor, critic, etc. | ||
|
|
@@ -652,6 +655,8 @@ def _init_rollout( | |
|
|
||
| # Determine engine class and server args based on backend | ||
| if self.allocation_mode.gen_backend == "sglang": | ||
| if self.config.rollout.return_routed_experts: | ||
| self.config.sglang.enable_return_routed_experts = True | ||
| if lora_path is not None and self.config.actor.use_lora: | ||
| self.config.sglang.lora_paths = [ | ||
| f"{self.config.gconfig.lora_name}-v0={lora_path}" | ||
|
|
@@ -663,6 +668,10 @@ def _init_rollout( | |
| base_gpu_id=0, | ||
| ) | ||
| elif self.allocation_mode.gen_backend == "vllm": | ||
| if self.config.rollout.return_routed_experts: | ||
| raise ValueError( | ||
| "return_routed_experts is not supported with vLLM backend. Please disable return_routed_experts or switch to SGLang backend." | ||
| ) | ||
|
Comment on lines
+671
to
+674
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
| if lora_path is not None and self.config.actor.use_lora: | ||
| self.config.vllm.lora_modules = [ | ||
| f"{self.config.gconfig.lora_name}-v0={lora_path}" | ||
|
|
@@ -842,6 +851,17 @@ def _export_and_commit_stats(self, epoch: int, epoch_step: int, global_step: int | |
| dist.barrier(group=self.actor.cpu_group) | ||
| current_platform.synchronize() | ||
|
|
||
| def _validate_cfg(self): | ||
| """validate config for incompatible settings before weight initialization, to avoid wasted resources on spawning workers and loading models.""" | ||
| if ( | ||
| self.allocation_mode.gen_backend == "vllm" | ||
| and self.config.rollout.return_routed_experts | ||
| ): | ||
| raise ValueError( | ||
| "return_routed_experts is only supported with SGLang backend. " | ||
| "Please disable return_routed_experts or switch to SGLang backend." | ||
| ) | ||
|
|
||
| def _requires_proxy_workflow(self, workflow: WorkflowLike | None) -> bool: | ||
| """Check if workflow requires proxy workers (i.e., not a RolloutWorkflow). | ||
|
|
||
|
|
||
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.