Skip to content

Commit 3fddd2a

Browse files
russellbSaid-Akbar
authored andcommitted
[Misc] Add SPDX-License-Identifier headers to python source files (vllm-project#12628)
- **Add SPDX license headers to python source files** - **Check for SPDX headers using pre-commit** commit 9d7ef44 Author: Russell Bryant <[email protected]> Date: Fri Jan 31 14:18:24 2025 -0500 Add SPDX license headers to python source files This commit adds SPDX license headers to python source files as recommended to the project by the Linux Foundation. These headers provide a concise way that is both human and machine readable for communicating license information for each source file. It helps avoid any ambiguity about the license of the code and can also be easily used by tools to help manage license compliance. The Linux Foundation runs license scans against the codebase to help ensure we are in compliance with the licenses of the code we use, including dependencies. Having these headers in place helps that tool do its job. More information can be found on the SPDX site: - https://spdx.dev/learn/handling-license-info/ Signed-off-by: Russell Bryant <[email protected]> commit 5a1cf1c Author: Russell Bryant <[email protected]> Date: Fri Jan 31 14:36:32 2025 -0500 Check for SPDX headers using pre-commit Signed-off-by: Russell Bryant <[email protected]> --------- Signed-off-by: Russell Bryant <[email protected]> Signed-off-by: saeediy <[email protected]>
1 parent 54a36b9 commit 3fddd2a

File tree

1,012 files changed

+1884
-2
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,012 files changed

+1884
-2
lines changed

.buildkite/check-wheel-size.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import os
24
import sys
35
import zipfile

.buildkite/generate_index.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import argparse
24
import os
35

.buildkite/lm-eval-harness/test_lm_eval_correctness.py

+1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
# SPDX-License-Identifier: Apache-2.0
12
"""
23
LM eval harness on model to compare vs HF baseline computed offline.
34
Configs are found in configs/$MODEL.yaml

.buildkite/nightly-benchmarks/scripts/convert-results-json-to-markdown.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import json
24
import os
35
from pathlib import Path

.buildkite/nightly-benchmarks/scripts/download-tokenizer.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import argparse
24

35
from transformers import AutoTokenizer

.buildkite/nightly-benchmarks/scripts/generate-nightly-markdown.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import argparse
24
import json
35
from pathlib import Path

.buildkite/nightly-benchmarks/scripts/get-lmdeploy-modelname.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
from lmdeploy.serve.openai.api_client import APIClient
24

35
api_client = APIClient("http://localhost:8000")

.buildkite/nightly-benchmarks/scripts/summary-nightly-results.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import datetime
24
import json
35
import os

.pre-commit-config.yaml

+5-1
Original file line numberDiff line numberDiff line change
@@ -97,10 +97,14 @@ repos:
9797
language: system
9898
verbose: true
9999
stages: [commit-msg]
100+
- id: check-spdx-header
101+
name: Check SPDX headers
102+
entry: python tools/check_spdx_header.py
103+
language: python
104+
types: [python]
100105
- id: suggestion
101106
name: Suggestion
102107
entry: bash -c 'echo "To bypass pre-commit hooks, add --no-verify to git commit."'
103108
language: system
104109
verbose: true
105110
pass_filenames: false
106-

benchmarks/backend_request_func.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import json
24
import os
35
import sys

benchmarks/benchmark_guided.py

+1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
# SPDX-License-Identifier: Apache-2.0
12
"""Benchmark guided decoding throughput."""
23
import argparse
34
import dataclasses

benchmarks/benchmark_latency.py

+1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
# SPDX-License-Identifier: Apache-2.0
12
"""Benchmark the latency of processing a single batch of requests."""
23
import argparse
34
import dataclasses

benchmarks/benchmark_long_document_qa_throughput.py

+1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
# SPDX-License-Identifier: Apache-2.0
12
"""
23
Offline benchmark to test the long document QA throughput.
34

benchmarks/benchmark_prefix_caching.py

+1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
# SPDX-License-Identifier: Apache-2.0
12
"""
23
Benchmark the efficiency of prefix caching.
34

benchmarks/benchmark_prioritization.py

+1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
# SPDX-License-Identifier: Apache-2.0
12
"""Benchmark offline prioritization."""
23
import argparse
34
import dataclasses

benchmarks/benchmark_serving.py

+1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
# SPDX-License-Identifier: Apache-2.0
12
r"""Benchmark online serving throughput.
23
34
On the server side, run one of the following commands:

benchmarks/benchmark_serving_guided.py

+1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
# SPDX-License-Identifier: Apache-2.0
12
r"""Benchmark online serving throughput with guided decoding.
23
34
On the server side, run one of the following commands:

benchmarks/benchmark_throughput.py

+1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
# SPDX-License-Identifier: Apache-2.0
12
"""Benchmark offline inference throughput."""
23
import argparse
34
import dataclasses

benchmarks/cutlass_benchmarks/sparse_benchmarks.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import argparse
24
import copy
35
import itertools

benchmarks/cutlass_benchmarks/utils.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
# Cutlass bench utils
24
from typing import Iterable, Tuple
35

benchmarks/cutlass_benchmarks/w8a8_benchmarks.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import argparse
24
import copy
35
import itertools

benchmarks/cutlass_benchmarks/weight_shapes.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
# Weight Shapes are in the format
24
# ([K, N], TP_SPLIT_DIM)
35
# Example:

benchmarks/disagg_benchmarks/disagg_prefill_proxy_server.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import os
24

35
import aiohttp

benchmarks/disagg_benchmarks/round_robin_proxy.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import asyncio
24
import itertools
35

benchmarks/disagg_benchmarks/visualize_benchmark_results.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import json
24

35
import matplotlib.pyplot as plt

benchmarks/fused_kernels/layernorm_rms_benchmarks.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import pickle as pkl
24
import time
35
from dataclasses import dataclass

benchmarks/kernels/benchmark_aqlm.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import os
24
import sys
35
from typing import Optional

benchmarks/kernels/benchmark_layernorm.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import time
24

35
import torch

benchmarks/kernels/benchmark_lora.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import argparse
24
import copy
35
import json

benchmarks/kernels/benchmark_machete.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import argparse
24
import copy
35
import itertools

benchmarks/kernels/benchmark_marlin.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
from typing import List
24

35
import torch

benchmarks/kernels/benchmark_moe.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import argparse
24
import time
35
from datetime import datetime

benchmarks/kernels/benchmark_paged_attention.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import random
24
import time
35
from typing import List, Optional

benchmarks/kernels/benchmark_quant.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import time
24

35
import torch

benchmarks/kernels/benchmark_rmsnorm.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import itertools
24
from typing import Optional, Tuple, Union
35

benchmarks/kernels/benchmark_rope.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
from itertools import accumulate
24
from typing import List, Optional
35

benchmarks/kernels/benchmark_shapes.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
WEIGHT_SHAPES = {
24
"ideal": [[4 * 256 * 32, 256 * 32]],
35
"mistralai/Mistral-7B-v0.1/TP1": [

benchmarks/kernels/graph_machete_bench.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import math
24
import pickle
35
import re

benchmarks/kernels/utils.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import dataclasses
24
from typing import Any, Callable, Iterable, Optional
35

benchmarks/kernels/weight_shapes.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
# Weight Shapes are in the format
24
# ([K, N], TP_SPLIT_DIM)
35
# Example:

benchmarks/overheads/benchmark_hashing.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import cProfile
24
import pstats
35

cmake/hipify.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
#!/usr/bin/env python3
24

35
#

collect_env.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
# ruff: noqa
24
# code borrowed from https://github.com/pytorch/pytorch/blob/main/torch/utils/collect_env.py
35

csrc/cutlass_extensions/vllm_cutlass_library_extension.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import enum
24
from typing import Dict, Union
35

csrc/quantization/machete/generate.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import itertools
24
import math
35
import os

docs/source/conf.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
# Configuration file for the Sphinx documentation builder.
24
#
35
# This file only contains a selection of the most common options. For a full

docs/source/generate_examples.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import itertools
24
import re
35
from dataclasses import dataclass, field

examples/offline_inference/aqlm_example.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
from vllm import LLM, SamplingParams
24
from vllm.utils import FlexibleArgumentParser
35

examples/offline_inference/arctic.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
from vllm import LLM, SamplingParams
24

35
# Sample prompts.

examples/offline_inference/audio_language.py

+1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
# SPDX-License-Identifier: Apache-2.0
12
"""
23
This example shows how to use vLLM for running offline inference
34
with the correct prompt format on audio language models.

examples/offline_inference/basic.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
from vllm import LLM, SamplingParams
24

35
# Sample prompts.

examples/offline_inference/basic_with_model_default_sampling.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
from vllm import LLM
24

35
# Sample prompts.

examples/offline_inference/chat.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
from vllm import LLM, SamplingParams
24

35
llm = LLM(model="meta-llama/Meta-Llama-3-8B-Instruct")

examples/offline_inference/chat_with_tools.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
# ruff: noqa
24
import json
35
import random

examples/offline_inference/classification.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
from vllm import LLM
24

35
# Sample prompts.

examples/offline_inference/cli.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
from dataclasses import asdict
24

35
from vllm import LLM, SamplingParams

examples/offline_inference/cpu_offload.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
from vllm import LLM, SamplingParams
24

35
# Sample prompts.

examples/offline_inference/distributed.py

+1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
# SPDX-License-Identifier: Apache-2.0
12
"""
23
This example shows how to use Ray Data for running offline batch inference
34
distributively on a multi-nodes cluster.

examples/offline_inference/embedding.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
from vllm import LLM
24

35
# Sample prompts.

examples/offline_inference/encoder_decoder.py

+1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
# SPDX-License-Identifier: Apache-2.0
12
'''
23
Demonstrate prompting of text-to-text
34
encoder/decoder models, specifically BART

examples/offline_inference/florence2_inference.py

+1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
# SPDX-License-Identifier: Apache-2.0
12
'''
23
Demonstrate prompting of text-to-text
34
encoder/decoder models, specifically Florence-2

examples/offline_inference/gguf_inference.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
from huggingface_hub import hf_hub_download
24

35
from vllm import LLM, SamplingParams

examples/offline_inference/llm_engine_example.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import argparse
24
from typing import List, Tuple
35

examples/offline_inference/lora_with_quantization_inference.py

+1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
# SPDX-License-Identifier: Apache-2.0
12
"""
23
This example shows how to use LoRA with different quantization techniques
34
for offline inference.

examples/offline_inference/mlpspeculator.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
import gc
24
import time
35
from typing import List

examples/offline_inference/multilora_inference.py

+1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
# SPDX-License-Identifier: Apache-2.0
12
"""
23
This example shows how to use the multi-LoRA functionality
34
for offline inference.

examples/offline_inference/neuron.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
13
from vllm import LLM, SamplingParams
24

35
# Sample prompts.

0 commit comments

Comments
 (0)