Skip to content

Commit 3897f2c

Browse files
authoredFeb 10, 2025
Enable pytest live log and show warning logs on GitHub Actions CI runs (huggingface#35912)
* fix * remove * fix --------- Co-authored-by: ydshieh <[email protected]>
1 parent 48a309d commit 3897f2c

File tree

4 files changed

+13
-6
lines changed

4 files changed

+13
-6
lines changed
 

‎pyproject.toml

+2
Original file line numberDiff line numberDiff line change
@@ -52,3 +52,5 @@ markers = [
5252
"bitsandbytes: select (or deselect with `not`) bitsandbytes integration tests",
5353
"generate: marks tests that use the GenerationTesterMixin"
5454
]
55+
log_cli = 1
56+
log_cli_level = "WARNING"

‎src/transformers/generation/configuration_utils.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -785,8 +785,7 @@ def validate(self, is_init=False):
785785
for arg_name in ("cache_implementation", "cache_config", "return_legacy_cache"):
786786
if getattr(self, arg_name) is not None:
787787
logger.warning_once(
788-
no_cache_warning.format(cache_arg=arg_name, cache_arg_value=getattr(self, arg_name)),
789-
UserWarning,
788+
no_cache_warning.format(cache_arg=arg_name, cache_arg_value=getattr(self, arg_name))
790789
)
791790

792791
# 6. check watermarking arguments

‎src/transformers/utils/logging.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,8 @@ def _configure_library_root_logger() -> None:
101101
formatter = logging.Formatter("[%(levelname)s|%(pathname)s:%(lineno)s] %(asctime)s >> %(message)s")
102102
_default_handler.setFormatter(formatter)
103103

104-
library_root_logger.propagate = False
104+
is_ci = os.getenv("CI") is not None and os.getenv("CI").upper() in {"1", "ON", "YES", "TRUE"}
105+
library_root_logger.propagate = True if is_ci else False
105106

106107

107108
def _reset_library_root_logger() -> None:

‎tests/generation/test_streamers.py

+8-3
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
import unittest
1717
from queue import Empty
1818
from threading import Thread
19+
from unittest.mock import patch
1920

2021
import pytest
2122

@@ -27,6 +28,7 @@
2728
is_torch_available,
2829
)
2930
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
31+
from transformers.utils.logging import _get_library_root_logger
3032

3133
from ..test_modeling_common import ids_tensor
3234

@@ -102,9 +104,12 @@ def test_text_streamer_decode_kwargs(self):
102104
model.config.eos_token_id = -1
103105

104106
input_ids = torch.ones((1, 5), device=torch_device).long() * model.config.bos_token_id
105-
with CaptureStdout() as cs:
106-
streamer = TextStreamer(tokenizer, skip_special_tokens=True)
107-
model.generate(input_ids, max_new_tokens=1, do_sample=False, streamer=streamer)
107+
108+
root = _get_library_root_logger()
109+
with patch.object(root, "propagate", False):
110+
with CaptureStdout() as cs:
111+
streamer = TextStreamer(tokenizer, skip_special_tokens=True)
112+
model.generate(input_ids, max_new_tokens=1, do_sample=False, streamer=streamer)
108113

109114
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
110115
# re-tokenized, must only contain one token

0 commit comments

Comments
 (0)