From 9eb659973c9b1feca5e428e9d0d392476b0e5a31 Mon Sep 17 00:00:00 2001 From: Louise Fox <208544511+folouiseAWS@users.noreply.github.com> Date: Thu, 11 Dec 2025 10:57:45 -0800 Subject: [PATCH 01/10] feat: Make farm, queue, and storage profile editable in Submit Dialog - Replace read-only DeadlineFarmDisplay and DeadlineQueueDisplay with editable DeadlineFarmListComboBox and DeadlineQueueListComboBox - Add DeadlineStorageProfileNameListComboBox that shows only when queue has storage profiles available - Config updates immediately on selection change - Remove unused display classes and imports - Add unit tests for new combo box functionality Signed-off-by: Louise Fox <208544511+folouiseAWS@users.noreply.github.com> --- .../dialogs/submit_job_to_deadline_dialog.py | 3 + .../ui/widgets/shared_job_settings_tab.py | 324 ++++++++---------- .../widgets/test_shared_job_settings_tab.py | 194 ++++++++++- 3 files changed, 336 insertions(+), 185 deletions(-) diff --git a/src/deadline/client/ui/dialogs/submit_job_to_deadline_dialog.py b/src/deadline/client/ui/dialogs/submit_job_to_deadline_dialog.py index 74e995979..ccd20ec92 100644 --- a/src/deadline/client/ui/dialogs/submit_job_to_deadline_dialog.py +++ b/src/deadline/client/ui/dialogs/submit_job_to_deadline_dialog.py @@ -210,6 +210,9 @@ def _build_ui( self.deadline_authentication_status.api_availability_changed.connect( self.refresh_deadline_settings ) + self.deadline_authentication_status.deadline_config_changed.connect( + self.refresh_deadline_settings + ) # Refresh the submit button enable state once queue parameter status changes self.shared_job_settings.valid_parameters.connect(self._set_submit_button_state) diff --git a/src/deadline/client/ui/widgets/shared_job_settings_tab.py b/src/deadline/client/ui/widgets/shared_job_settings_tab.py index f43365823..84652dc50 100644 --- a/src/deadline/client/ui/widgets/shared_job_settings_tab.py +++ b/src/deadline/client/ui/widgets/shared_job_settings_tab.py @@ -6,7 +6,6 @@ from __future__ import annotations -import sys import threading from typing import Any, Dict, Optional @@ -15,7 +14,6 @@ QComboBox, QFormLayout, QGroupBox, - QHBoxLayout, QLabel, QLineEdit, QRadioButton, @@ -24,8 +22,7 @@ QWidget, ) -from ... import api -from ...config import get_setting +from ...config import get_setting, set_setting, config_file from .._utils import CancelationFlag, tr from .openjd_parameters_widget import OpenJDParametersWidget from ...api import get_queue_parameter_definitions @@ -493,208 +490,167 @@ def _build_ui(self): """ Build the UI for the Deadline settings """ + # Import combo box classes from deadline_config_dialog + from ..dialogs.deadline_config_dialog import ( + DeadlineFarmListComboBox, + DeadlineQueueListComboBox, + DeadlineStorageProfileNameListComboBox, + ) + self.farm_box_label = QLabel(tr("Farm")) - self.farm_box = DeadlineFarmDisplay() + self.farm_box = DeadlineFarmListComboBox(parent=self) self.layout.addRow(self.farm_box_label, self.farm_box) self.queue_box_label = QLabel(tr("Queue")) - self.queue_box = DeadlineQueueDisplay() + self.queue_box = DeadlineQueueListComboBox(parent=self) self.layout.addRow(self.queue_box_label, self.queue_box) - def refresh_setting_controls(self, deadline_authorized): - """ - Refreshes the controls for UI items that depend on the AWS Deadline Cloud API - for their values. - - Args: - deadline_authorized (bool): Should be the result of a call to - api.check_deadline_available, for example from - an AWS Deadline Cloud Status Widget. - """ - self.farm_box.refresh(deadline_authorized) - self.queue_box.refresh(deadline_authorized) - - -class _DeadlineNamedResourceDisplay(QWidget): - """ - A Label for displaying an AWS Deadline Cloud resource, that starts displaying - it as the Id, but does an async call to AWS Deadline Cloud to convert it - to the name. - - Args: - resource_name (str): The resource name for the list, like "Farm", - "Queue", "Fleet". - setting_name (str): The setting name for the item. - """ + self.storage_profile_box_label = QLabel(tr("Default storage profile")) + self.storage_profile_box = DeadlineStorageProfileNameListComboBox(parent=self) + self.layout.addRow(self.storage_profile_box_label, self.storage_profile_box) - # Emitted when the background refresh thread catches an exception, - # provides (operation_name, BaseException) - background_exception = Signal(str, BaseException) + # Hide storage profile by default - only show when queue has storage profiles + self._set_storage_profile_visible(False) - # Emitted when an async refresh_item thread completes, - # provides (refresh_id, id, name, description) - _item_update = Signal(int, str, str, str) + # Connect signals for farm, queue, and storage profile changes + self.farm_box.box.currentIndexChanged.connect(self._on_farm_changed) + self.queue_box.box.currentIndexChanged.connect(self._on_queue_changed) + self.storage_profile_box.box.currentIndexChanged.connect(self._on_storage_profile_changed) - def __init__(self, *, resource_name, setting_name, parent: Optional[QWidget] = None): - super().__init__(parent=parent) - - self.__refresh_thread = None - self.__refresh_id = 0 - self.canceled = CancelationFlag() - self.destroyed.connect(self.canceled.set_canceled) - - self.resource_name = resource_name - self.setting_name = setting_name - self.item_id = get_setting(self.setting_name) - self.item_name = "" - self.item_description = "" - - self._build_ui() - - self.label.setText(self.item_display_name()) - - def _build_ui(self): - self.label = QLabel(parent=self) - layout = QHBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) - layout.addWidget(self.label) - self._item_update.connect(self.handle_item_update) - self.background_exception.connect(self.handle_background_exception) - - def handle_background_exception(self, e): - self.label.setText(self.item_id) - self.label.setToolTip("") - - def item_display_name(self): - """Returns the text to display the item name as""" - return self.item_name or self.item_id or "" - - def refresh(self, deadline_authorized): - """ - Starts a background thread to refresh the item name. - - Args: - deadline_authorized (bool): Should be the result of a call to - api.check_deadline_available, for example from - an AWS Deadline Cloud Status Widget. - """ - resource_id = get_setting(self.setting_name) - if resource_id != self.item_id or not self.item_name: - self.item_id = resource_id - self.item_name = "" - self.item_description = "" - display_name = self.item_display_name() - # Only call the AWS Deadline Cloud API if we've confirmed access - if deadline_authorized: - display_name = " - " + display_name - - self.__refresh_id += 1 - self.__refresh_thread = threading.Thread( - target=self._refresh_thread_function, - name=f"AWS Deadline Cloud refresh {self.resource_name} item thread", - args=(self.__refresh_id,), - ) - self.__refresh_thread.start() - - self.label.setText(display_name) - self.label.setToolTip(self.item_description) - else: - self.label.setText(self.item_display_name()) + # Connect to storage profile combo box model to detect when list changes + self.storage_profile_box.box.model().rowsInserted.connect( + self._update_storage_profile_visibility + ) + self.storage_profile_box.box.model().rowsRemoved.connect( + self._update_storage_profile_visibility + ) + self.storage_profile_box.box.model().modelReset.connect( + self._update_storage_profile_visibility + ) - def handle_item_update(self, refresh_id, id, name, description): - # Apply the refresh if it's still for the latest call - if refresh_id == self.__refresh_id: - self.item_id = id - self.item_name = name - self.item_description = description - self.label.setText(self.item_display_name()) - self.label.setToolTip(self.item_description) - - def _refresh_thread_function(self, refresh_id: int): - """ - This function gets started in a background thread to refresh the list. - """ - try: - item = self.get_item() - if not self.canceled: - self._item_update.emit(refresh_id, *item) - except BaseException as e: - if not self.canceled: - self.background_exception.emit(f"Refresh {self.resource_name} item", e) + # Initialize with current config + config = config_file.read_config() + self.farm_box.set_config(config) + self.queue_box.set_config(config) + self.storage_profile_box.set_config(config) + + def _set_storage_profile_visible(self, visible: bool): + """Show or hide the storage profile selector""" + self.storage_profile_box_label.setVisible(visible) + self.storage_profile_box.setVisible(visible) + + def _update_storage_profile_visibility(self): + """Update storage profile visibility based on available profiles""" + # Check if there are actual storage profiles (not just placeholder items) + count = self.storage_profile_box.box.count() + # Hide if empty or only has "" or "" placeholder + has_real_profiles = count > 0 and self.storage_profile_box.box.itemData(0) not in ( + None, + "", + ) + # Also check if it's just a refreshing placeholder + if count == 1 and self.storage_profile_box.box.itemText(0) in ( + "", + "", + ): + has_real_profiles = False + self._set_storage_profile_visible(has_real_profiles) + def _on_farm_changed(self, index: int): + """Handle farm selection change in Submit Dialog""" + if index < 0: + return -class DeadlineFarmDisplay(_DeadlineNamedResourceDisplay): - def __init__(self, *, parent: Optional[QWidget] = None): - super().__init__(resource_name="Farm", setting_name="defaults.farm_id", parent=parent) - - def get_item(self): - farm_id = get_setting(self.setting_name) - if farm_id: - deadline = api.get_boto3_client("deadline") - response = deadline.get_farm(farmId=farm_id) - return (response["farmId"], response["displayName"], response["description"]) - else: - return ("", "", "") + # Get the selected farm ID from the combo box + farm_id = self.farm_box.box.itemData(index) + if farm_id is None: + return + # Update config immediately (unlike Settings Dialog which defers to apply()) + set_setting("defaults.farm_id", farm_id) -class DeadlineQueueDisplay(_DeadlineNamedResourceDisplay): - def __init__(self, *, parent: Optional[QWidget] = None): - super().__init__(resource_name="Queue", setting_name="defaults.queue_id", parent=parent) + # Refresh queue list for the new farm (same as Settings Dialog) + self.queue_box.refresh_list() - def get_item(self): - farm_id = get_setting("defaults.farm_id") - queue_id = get_setting(self.setting_name) - if farm_id and queue_id: - deadline = api.get_boto3_client("deadline") - response = deadline.get_queue(farmId=farm_id, queueId=queue_id) - return (response["queueId"], response["displayName"], response["description"]) - else: - return ("", "", "") + # Notify parent to refresh (triggers submit button state update and queue parameters) + self._notify_parent_refresh() + def _on_queue_changed(self, index: int): + """Handle queue selection change in Submit Dialog""" + if index < 0: + return -class DeadlineStorageProfileNameDisplay(_DeadlineNamedResourceDisplay): - WINDOWS_OS = "Windows" - MAC_OS = "Macos" - LINUX_OS = "Linux" + # Get the selected queue ID from the combo box + queue_id = self.queue_box.box.itemData(index) + if queue_id is None: + return - def __init__(self, *, parent: Optional[QWidget] = None): - super().__init__( - resource_name="Storage profile name", - setting_name="settings.storage_profile_id", - parent=parent, - ) + # Update config immediately (unlike Settings Dialog which defers to apply()) + set_setting("defaults.queue_id", queue_id) - def get_item(self): - farm_id = get_setting("defaults.farm_id") - queue_id = get_setting("defaults.queue_id") - storage_profile_id = get_setting(self.setting_name) + # Refresh storage profile list for the new queue + self.storage_profile_box.refresh_list() - if farm_id and queue_id and storage_profile_id: - deadline = api.get_boto3_client("deadline") - response = deadline.list_storage_profiles_for_queue(farmId=farm_id, queueId=queue_id) - farm_storage_profiles = response.get("storageProfiles", {}) + # Notify parent to refresh (triggers submit button state update and queue parameters) + self._notify_parent_refresh() - if farm_storage_profiles: - storage_profile = [ - (item["storageProfileId"], item["displayName"], item["osFamily"]) - for item in farm_storage_profiles - if storage_profile_id == item["storageProfileId"] - ] - return storage_profile[0] + def _on_storage_profile_changed(self, index: int): + """Handle storage profile selection change in Submit Dialog""" + if index < 0: + return - return ("", "", "") + # Get the selected storage profile ID from the combo box + storage_profile_id = self.storage_profile_box.box.itemData(index) + + # Update config immediately + set_setting("settings.storage_profile_id", storage_profile_id if storage_profile_id else "") + + def _notify_parent_refresh(self): + """Helper to notify parent widgets to refresh after config changes""" + # Find SharedJobSettingsWidget parent to refresh queue parameters + parent_widget = self.parent() + while parent_widget is not None: + if hasattr(parent_widget, "refresh_queue_parameters"): + parent_widget.refresh_queue_parameters() + if hasattr(parent_widget, "parent") and callable(parent_widget.parent): + parent_widget = parent_widget.parent() + else: + break + + # Find SubmitJobToDeadlineDialog to refresh submit button state + parent_widget = self.parent() + while parent_widget is not None: + if hasattr(parent_widget, "refresh_deadline_settings"): + parent_widget.refresh_deadline_settings() + break + if hasattr(parent_widget, "parent") and callable(parent_widget.parent): + parent_widget = parent_widget.parent() + else: + break - def _get_default_storage_profile_name(self) -> str: - """ - Get a string specifying what the OS is, following the format the Deadline storage profile API expects. + def refresh_setting_controls(self, deadline_authorized): """ - if sys.platform.startswith("linux"): - return self.LINUX_OS - - if sys.platform.startswith("darwin"): - return self.MAC_OS - - if sys.platform.startswith("win"): - return self.WINDOWS_OS + Refreshes the controls for UI items that depend on the AWS Deadline Cloud API + for their values. - return "" + Args: + deadline_authorized (bool): Should be the result of a call to + api.check_deadline_available, for example from + an AWS Deadline Cloud Status Widget. + """ + # Update config for combo boxes + config = config_file.read_config() + self.farm_box.set_config(config) + self.queue_box.set_config(config) + self.storage_profile_box.set_config(config) + + # Refresh selected items to reflect current config + self.farm_box.refresh_selected_id() + self.queue_box.refresh_selected_id() + self.storage_profile_box.refresh_selected_id() + + # Refresh lists if authorized + if deadline_authorized: + self.farm_box.refresh_list() + self.queue_box.refresh_list() + self.storage_profile_box.refresh_list() diff --git a/test/unit/deadline_client/ui/widgets/test_shared_job_settings_tab.py b/test/unit/deadline_client/ui/widgets/test_shared_job_settings_tab.py index 477503dc9..81ead0e5b 100644 --- a/test/unit/deadline_client/ui/widgets/test_shared_job_settings_tab.py +++ b/test/unit/deadline_client/ui/widgets/test_shared_job_settings_tab.py @@ -1,9 +1,13 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. import pytest +from unittest.mock import patch, MagicMock try: - from deadline.client.ui.widgets.shared_job_settings_tab import SharedJobSettingsWidget + from deadline.client.ui.widgets.shared_job_settings_tab import ( + SharedJobSettingsWidget, + DeadlineCloudSettingsWidget, + ) from deadline.client.ui.dataclasses import JobBundleSettings except ImportError: # The tests in this file should be skipped if Qt UI related modules cannot be loaded @@ -80,3 +84,191 @@ def test_max_worker_count_should_be_integer_within_range( ): shared_job_settings_tab.shared_job_properties_box.max_worker_count_box.setValue(-1) assert shared_job_settings_tab.shared_job_properties_box.max_worker_count_box.value() == 1 + + +# Tests for DeadlineCloudSettingsWidget farm/queue combo boxes + + +@pytest.fixture(scope="function") +def deadline_cloud_settings_widget(qtbot) -> DeadlineCloudSettingsWidget: + """Fixture for DeadlineCloudSettingsWidget with mocked combo boxes.""" + with patch( + "deadline.client.ui.widgets.shared_job_settings_tab.config_file.read_config" + ) as mock_config: + mock_config.return_value = MagicMock() + widget = DeadlineCloudSettingsWidget() + qtbot.addWidget(widget) + return widget + + +def test_farm_selection_updates_config(deadline_cloud_settings_widget: DeadlineCloudSettingsWidget): + """Test that selecting a farm updates the config setting.""" + widget = deadline_cloud_settings_widget + test_farm_id = "farm-test123" + + # Add a test item to the farm combo box + widget.farm_box.box.addItem("Test Farm", test_farm_id) + + with patch( + "deadline.client.ui.widgets.shared_job_settings_tab.set_setting" + ) as mock_set_setting: + # Select the farm (triggers _on_farm_changed) + widget.farm_box.box.setCurrentIndex(widget.farm_box.box.count() - 1) + + # Verify config was updated with the farm ID + mock_set_setting.assert_called_with("defaults.farm_id", test_farm_id) + + +def test_queue_selection_updates_config( + deadline_cloud_settings_widget: DeadlineCloudSettingsWidget, +): + """Test that selecting a queue updates the config setting.""" + widget = deadline_cloud_settings_widget + test_queue_id = "queue-test456" + + # Add a test item to the queue combo box + widget.queue_box.box.addItem("Test Queue", test_queue_id) + + with patch( + "deadline.client.ui.widgets.shared_job_settings_tab.set_setting" + ) as mock_set_setting: + # Select the queue (triggers _on_queue_changed) + widget.queue_box.box.setCurrentIndex(widget.queue_box.box.count() - 1) + + # Verify config was updated with the queue ID + mock_set_setting.assert_called_with("defaults.queue_id", test_queue_id) + + +def test_farm_change_refreshes_queue_list( + deadline_cloud_settings_widget: DeadlineCloudSettingsWidget, +): + """Test that changing farm triggers queue list refresh.""" + widget = deadline_cloud_settings_widget + test_farm_id = "farm-test789" + + # Add a test item to the farm combo box + widget.farm_box.box.addItem("Test Farm", test_farm_id) + + with patch.object(widget.queue_box, "refresh_list") as mock_refresh: + with patch("deadline.client.ui.widgets.shared_job_settings_tab.set_setting"): + # Select the farm + widget.farm_box.box.setCurrentIndex(widget.farm_box.box.count() - 1) + + # Verify queue list was refreshed + mock_refresh.assert_called_once() + + +def test_refresh_setting_controls_updates_combo_boxes( + deadline_cloud_settings_widget: DeadlineCloudSettingsWidget, +): + """Test that refresh_setting_controls updates both combo boxes.""" + widget = deadline_cloud_settings_widget + + with patch.object(widget.farm_box, "set_config") as mock_farm_config, patch.object( + widget.queue_box, "set_config" + ) as mock_queue_config, patch.object( + widget.farm_box, "refresh_selected_id" + ) as mock_farm_refresh, patch.object( + widget.queue_box, "refresh_selected_id" + ) as mock_queue_refresh, patch.object( + widget.farm_box, "refresh_list" + ) as mock_farm_list, patch.object(widget.queue_box, "refresh_list") as mock_queue_list, patch( + "deadline.client.ui.widgets.shared_job_settings_tab.config_file.read_config" + ): + # Call refresh with authorized=True + widget.refresh_setting_controls(deadline_authorized=True) + + # Verify all methods were called + mock_farm_config.assert_called_once() + mock_queue_config.assert_called_once() + mock_farm_refresh.assert_called_once() + mock_queue_refresh.assert_called_once() + mock_farm_list.assert_called_once() + mock_queue_list.assert_called_once() + + +def test_refresh_setting_controls_skips_list_refresh_when_unauthorized( + deadline_cloud_settings_widget: DeadlineCloudSettingsWidget, +): + """Test that refresh_setting_controls skips list refresh when not authorized.""" + widget = deadline_cloud_settings_widget + + with patch.object(widget.farm_box, "set_config"), patch.object( + widget.queue_box, "set_config" + ), patch.object(widget.farm_box, "refresh_selected_id"), patch.object( + widget.queue_box, "refresh_selected_id" + ), patch.object(widget.farm_box, "refresh_list") as mock_farm_list, patch.object( + widget.queue_box, "refresh_list" + ) as mock_queue_list, patch( + "deadline.client.ui.widgets.shared_job_settings_tab.config_file.read_config" + ): + # Call refresh with authorized=False + widget.refresh_setting_controls(deadline_authorized=False) + + # Verify list refresh was NOT called + mock_farm_list.assert_not_called() + mock_queue_list.assert_not_called() + + +def test_storage_profile_hidden_by_default( + deadline_cloud_settings_widget: DeadlineCloudSettingsWidget, +): + """Test that storage profile selector is hidden by default.""" + widget = deadline_cloud_settings_widget + + # Storage profile should be hidden initially + assert not widget.storage_profile_box.isVisible() + assert not widget.storage_profile_box_label.isVisible() + + +def test_storage_profile_shown_when_profiles_available( + deadline_cloud_settings_widget: DeadlineCloudSettingsWidget, +): + """Test that storage profile selector is shown when profiles are available.""" + widget = deadline_cloud_settings_widget + + # Add a real storage profile item + widget.storage_profile_box.box.addItem("Test Profile", "sp-test123") + + # Storage profile should now be visible + assert widget.storage_profile_box.isVisible() + assert widget.storage_profile_box_label.isVisible() + + +def test_storage_profile_selection_updates_config( + deadline_cloud_settings_widget: DeadlineCloudSettingsWidget, +): + """Test that selecting a storage profile updates the config setting.""" + widget = deadline_cloud_settings_widget + test_profile_id = "sp-test123" + + # Add a test item to the storage profile combo box + widget.storage_profile_box.box.addItem("Test Profile", test_profile_id) + + with patch( + "deadline.client.ui.widgets.shared_job_settings_tab.set_setting" + ) as mock_set_setting: + # Select the storage profile (triggers _on_storage_profile_changed) + widget.storage_profile_box.box.setCurrentIndex(widget.storage_profile_box.box.count() - 1) + + # Verify config was updated with the storage profile ID + mock_set_setting.assert_called_with("settings.storage_profile_id", test_profile_id) + + +def test_queue_change_refreshes_storage_profile_list( + deadline_cloud_settings_widget: DeadlineCloudSettingsWidget, +): + """Test that changing queue triggers storage profile list refresh.""" + widget = deadline_cloud_settings_widget + test_queue_id = "queue-test789" + + # Add a test item to the queue combo box + widget.queue_box.box.addItem("Test Queue", test_queue_id) + + with patch.object(widget.storage_profile_box, "refresh_list") as mock_refresh: + with patch("deadline.client.ui.widgets.shared_job_settings_tab.set_setting"): + # Select the queue + widget.queue_box.box.setCurrentIndex(widget.queue_box.box.count() - 1) + + # Verify storage profile list was refreshed + mock_refresh.assert_called_once() From fc07fee978ce6cccd61479c17ae9f7f7863a18d4 Mon Sep 17 00:00:00 2001 From: Louise Fox <208544511+folouiseAWS@users.noreply.github.com> Date: Thu, 11 Dec 2025 14:46:35 -0800 Subject: [PATCH 02/10] fix: refresh queue parameters when farm changes Previously, queue parameters would only refresh when the queue ID changed. This fix ensures queue environments are also refreshed when the farm ID changes, since queue environments are specific to a farm+queue combination. Signed-off-by: Louise Fox <208544511+folouiseAWS@users.noreply.github.com> --- .../ui/widgets/shared_job_settings_tab.py | 11 ++- .../widgets/test_shared_job_settings_tab.py | 91 +++++++++++++++++++ 2 files changed, 97 insertions(+), 5 deletions(-) diff --git a/src/deadline/client/ui/widgets/shared_job_settings_tab.py b/src/deadline/client/ui/widgets/shared_job_settings_tab.py index 84652dc50..b4a9a5e41 100644 --- a/src/deadline/client/ui/widgets/shared_job_settings_tab.py +++ b/src/deadline/client/ui/widgets/shared_job_settings_tab.py @@ -128,24 +128,25 @@ def refresh_ui(self, job_settings: Any, load_new_bundle: bool = False): def refresh_queue_parameters(self, load_new_bundle: bool = False): """ - If the default queue id or job bundle has changed, refresh the queue parameters. + If the default farm id, queue id, or job bundle has changed, refresh the queue parameters. """ farm_id = get_setting("defaults.farm_id") queue_id = get_setting("defaults.queue_id") if not farm_id or not queue_id: self.queue_parameters_box.rebuild_ui(async_loading_state="") return # If the user has not selected a farm or queue ID, don't try to load + farm_or_queue_changed = farm_id != self.farm_id or queue_id != self.queue_id if ( self.queue_parameters_box.async_loading_state - or queue_id != self.queue_id + or farm_or_queue_changed or load_new_bundle ): self.queue_parameters_box.rebuild_ui( async_loading_state="Reloading Queue Environments..." ) - # Join the thread if the queue id or job bundle has changed and the thread is running + # Join the thread if the farm, queue id, or job bundle has changed and the thread is running if ( - (queue_id != self.queue_id or load_new_bundle) + (farm_or_queue_changed or load_new_bundle) and self.__refresh_queue_parameters_thread and self.__refresh_queue_parameters_thread.is_alive() ): @@ -490,7 +491,7 @@ def _build_ui(self): """ Build the UI for the Deadline settings """ - # Import combo box classes from deadline_config_dialog + # Import here to avoid circular import from ..dialogs.deadline_config_dialog import ( DeadlineFarmListComboBox, DeadlineQueueListComboBox, diff --git a/test/unit/deadline_client/ui/widgets/test_shared_job_settings_tab.py b/test/unit/deadline_client/ui/widgets/test_shared_job_settings_tab.py index 81ead0e5b..911f31173 100644 --- a/test/unit/deadline_client/ui/widgets/test_shared_job_settings_tab.py +++ b/test/unit/deadline_client/ui/widgets/test_shared_job_settings_tab.py @@ -272,3 +272,94 @@ def test_queue_change_refreshes_storage_profile_list( # Verify storage profile list was refreshed mock_refresh.assert_called_once() + + +# Tests for SharedJobSettingsWidget.refresh_queue_parameters + + +def test_refresh_queue_parameters_triggers_on_farm_change( + shared_job_settings_tab: SharedJobSettingsWidget, +): + widget = shared_job_settings_tab + + # Set initial farm and queue IDs + widget.farm_id = "farm-initial" + widget.queue_id = "queue-initial" + + with patch( + "deadline.client.ui.widgets.shared_job_settings_tab.get_setting" + ) as mock_get_setting: + # Simulate farm change (different farm_id, same queue_id) + mock_get_setting.side_effect = lambda key: { + "defaults.farm_id": "farm-new", + "defaults.queue_id": "queue-initial", + }.get(key) + + with patch.object(widget.queue_parameters_box, "rebuild_ui") as mock_rebuild, patch.object( + widget, "_start_load_queue_parameters_thread" + ) as mock_start_thread: + widget.refresh_queue_parameters() + + mock_rebuild.assert_called_once_with( + async_loading_state="Reloading Queue Environments..." + ) + mock_start_thread.assert_called_once() + + +def test_refresh_queue_parameters_triggers_on_queue_change( + shared_job_settings_tab: SharedJobSettingsWidget, +): + widget = shared_job_settings_tab + + # Set initial farm and queue IDs + widget.farm_id = "farm-initial" + widget.queue_id = "queue-initial" + + with patch( + "deadline.client.ui.widgets.shared_job_settings_tab.get_setting" + ) as mock_get_setting: + # Simulate queue change (same farm_id, different queue_id) + mock_get_setting.side_effect = lambda key: { + "defaults.farm_id": "farm-initial", + "defaults.queue_id": "queue-new", + }.get(key) + + with patch.object(widget.queue_parameters_box, "rebuild_ui") as mock_rebuild, patch.object( + widget, "_start_load_queue_parameters_thread" + ) as mock_start_thread: + widget.refresh_queue_parameters() + + mock_rebuild.assert_called_once_with( + async_loading_state="Reloading Queue Environments..." + ) + mock_start_thread.assert_called_once() + + +def test_refresh_queue_parameters_no_refresh_when_unchanged( + shared_job_settings_tab: SharedJobSettingsWidget, +): + widget = shared_job_settings_tab + + # Set initial farm and queue IDs + widget.farm_id = "farm-same" + widget.queue_id = "queue-same" + + # Clear the async loading state so it doesn't trigger refresh + widget.queue_parameters_box.async_loading_state = "" + + with patch( + "deadline.client.ui.widgets.shared_job_settings_tab.get_setting" + ) as mock_get_setting: + # Same farm and queue IDs + mock_get_setting.side_effect = lambda key: { + "defaults.farm_id": "farm-same", + "defaults.queue_id": "queue-same", + }.get(key) + + with patch.object(widget.queue_parameters_box, "rebuild_ui") as mock_rebuild, patch.object( + widget, "_start_load_queue_parameters_thread" + ) as mock_start_thread: + widget.refresh_queue_parameters() + + mock_rebuild.assert_not_called() + mock_start_thread.assert_not_called() From 35efc1d2a9a16ae9d370400c5a428c8ee53871c5 Mon Sep 17 00:00:00 2001 From: Charles Moore <122481442+moorec-aws@users.noreply.github.com> Date: Thu, 18 Dec 2025 12:36:12 -0600 Subject: [PATCH 03/10] ci: add release environment option to build_installers.yml (#952) Signed-off-by: Charles Moore <122481442+moorec-aws@users.noreply.github.com> Signed-off-by: Louise Fox <208544511+folouiseAWS@users.noreply.github.com> --- .github/workflows/build_installers.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build_installers.yml b/.github/workflows/build_installers.yml index 39e0008ed..f80f2c594 100644 --- a/.github/workflows/build_installers.yml +++ b/.github/workflows/build_installers.yml @@ -26,6 +26,7 @@ on: type: choice options: - mainline + - release jobs: BuildInstaller: From 58c0fef9fb89c23d9b26878f6df47c18e4cf47a5 Mon Sep 17 00:00:00 2001 From: Josh Usiskin <56369778+jusiskin@users.noreply.github.com> Date: Thu, 18 Dec 2025 17:23:20 -0600 Subject: [PATCH 04/10] docs: add release changelog guidelines (#951) Signed-off-by: Josh Usiskin <56369778+jusiskin@users.noreply.github.com> Signed-off-by: Louise Fox <208544511+folouiseAWS@users.noreply.github.com> --- CHANGELOG_GUIDELINES.md | 306 ++++++++++++++++++++++++++++++++++++++++ DEVELOPMENT.md | 12 ++ 2 files changed, 318 insertions(+) create mode 100644 CHANGELOG_GUIDELINES.md diff --git a/CHANGELOG_GUIDELINES.md b/CHANGELOG_GUIDELINES.md new file mode 100644 index 000000000..ec75631f3 --- /dev/null +++ b/CHANGELOG_GUIDELINES.md @@ -0,0 +1,306 @@ +# Changelog Guidelines + +This document provides formal guidance on structuring and writing changelog entries for the AWS Deadline Cloud client repository. + +## Table of Contents + +* [Changelog Structure](#changelog-structure) +* [Writing Guidelines](#writing-guidelines) +* [Breaking Changes](#breaking-changes) +* [Deprecations](#deprecations) +* [Features](#features) +* [Bug Fixes](#bug-fixes) +* [Performance Improvements](#performance-improvements) +* [Experimental](#experimental) +* [Fixes to Unreleased Changes](#fixes-to-unreleased-changes) +* [Examples](#examples) +* [Review Checklist](#review-checklist) + +## Changelog Structure + +Each release in the changelog MUST follow this standardized section order: + +1. **BREAKING CHANGES** (if applicable) +2. **DEPRECATIONS** (if applicable) +3. **Features** +4. **Bug Fixes** +5. **Performance Improvements** (if applicable) +6. **Experimental** (if applicable) + +### Section Descriptions + +**BREAKING CHANGES**: Changes that break backward compatibility and require user action. + +**DEPRECATIONS**: Features or APIs that will be removed in a future release. Users should be warned but functionality still works. + +**Features**: New functionality or enhancements to existing features. + +**Bug Fixes**: Corrections to defects in existing functionality. + +**Performance Improvements**: Changes that improve performance without altering functionality. + +**Experimental**: Features behind feature flags or marked as subject to change. + +## Writing Guidelines + +### General Principles + +1. **User-focused language**: Write from the customer's perspective, not the implementation perspective +2. **Action-oriented**: Describe what changed and the impact, not how it was implemented +3. **Concise but complete**: Provide enough context to understand the change without being verbose +4. **Present tense**: Use present tense for describing the state after the change + +## Breaking Changes + +Breaking changes require special attention and MUST include: + +1. **Clear description of what broke** +2. **Migration path or workaround** +3. **Code examples when applicable** + +### Format + +```markdown +### BREAKING CHANGES + +* [Brief description of the change] (#PR) ([commit]) + * [Detailed explanation of what broke] + * [Migration path or how to adapt] + * [Code example if applicable] +``` + +### Example + +```markdown +### BREAKING CHANGES + +* `deadline.ui.show_job_bundle_submitter`: Input parameter renamed from `submitter_name` to `submitter_info` and now expects a `deadline.dataclasses.SubmitterInfo` object as input. (#940) ([`74a3b01`]) + * The function signature has changed to accept a structured object instead of a string + * **Migration**: Replace `show_job_bundle_submitter(submitter_name="MyName")` with `show_job_bundle_submitter(submitter_info=SubmitterInfo(submitter_name="MyName"))` +``` + +## Deprecations + +Deprecations MUST include: + +1. **What is being deprecated** +2. **What to use instead** +3. **When it will be removed** (if known) + +### Format + +```markdown +## DEPRECATIONS + +* [What is deprecated] has been deprecated. [What to use instead] should now be used. [When removal will occur if known] +``` + +### Example + +```markdown +## DEPRECATIONS + +* The CLI `bundle gui-submit --submitter-name` option has been deprecated. `--submitter-info` should now be used to provide the name. This option will be removed in version 1.0.0. +* `--timezone` is being deprecated in favor of `--timestamp-format` for the `job logs` command. `--timezone` will be removed in a future release. +``` + +## Fixes to Unreleased Changes + +Fixes that only impact unreleased changes (changes not yet in a published release) should generally **NOT** be included in the changelog as separate entries. + +### Guidelines + +Fixes to unreleased changes should generally be omitted from the changelog. Instead, describe features in their final working state without mentioning intermediate bugs or fixes. The changelog is for released functionality, not development history. + +### Example + +Consider two commits that were merged for the same feature: + +``` +feat: add job retry functionality +fix: job retry fails when retry count exceeds 5 +``` + +Let's assume that the `fix:` commit fixes a bug in the `feat:` commit, but the `feat:` commit had not been released before the `fix:` commit was merged. + +When drafting the changelog for the release, these two changes would be merged into a single chaneglog entry: + +**GOOD:** + +```markdown +### Features +* Add job retry functionality with configurable retry limits +``` + +**BAD:** + +```markdown +### Features +* Add job retry functionality +### Bug Fixes +* Fix job retry failing when retry count exceeds 5 +``` + +## Features + +Features should describe **what the user can now do** and when it's useful. + +**Good examples:** +```markdown +* Add `deadline job wait` command to monitor job completion with configurable polling intervals +* Support automatic download of job attachments output +* Add detailed tooltips to grayed-out submit button +``` + +**Poor examples:** +```markdown +* Added new function to API +* Implemented job waiting +* Updated UI +``` + +## Bug Fixes + +Bug fixes should describe **the problem that was fixed** as the customer experienced it, not the technical implementation. + +**Good examples:** +```markdown +* Job submission error when submitting same jobs with the same title over 100 times in a single day. +* Re-queued jobs downloading the same output more than once with cli +* Process hangs on exit with high volume of telemetry +``` + +**Poor examples:** +```markdown +* Fixed a bug in the submission code +* Updated the download logic +* Changed the telemetry handler +``` + +## Performance Improvements + +Performance improvements: +1. **SHOULD quantify the improvement** when possible (e.g., "2x faster", "50% reduction") +2. **MUST specify when it applies** (e.g., "for large file uploads", "during job submission") + +**Good examples:** +```markdown +* Improve concurrency during bundle submission by threading local s3 cache db connections and enabling WAL mode by default (40% faster for bundles with 1000+ files) +* Speed up job bundle submissions by reducing redundant stat calls (2x faster for bundles with deep directory structures) +* Reduce memory usage during large file uploads +``` + +**Poor examples:** +```markdown +* Made uploads faster +* Improved performance +* Optimized code +``` + +## Experimental + +Use a dedicated **Experimental** section at the end of the changelog for features that: +- Are behind feature flags +- Have public APIs and functional behavior under development and subject to change without following normal breaking change policies + +Experimental features must be grouped under parent bullets by feature name, with specific changes as sub-bullets. When a feature group requires a feature flag, document the flag name in the parent bullet point. + + +### Format + +```markdown +### Experimental + +These changes are experimental and are subject to change. + +* [Feature name] (requires `FEATURE_FLAG_NAME=true`): + * [Specific change description] + * [Another change for same feature] +* [Another feature name]: + * [Change description] +``` + +### Example + +```markdown +### Experimental + +These changes are experimental and are subject to change. + +* MCP Server: + * Add get_session_logs to mcp server (#909) ([`c9f83a4`]) +* Incremental/Automatic Downloads (requires `ENABLE_INCREMENTAL_DOWNLOAD=true`): + * Add storage profile support for incremental download (#773) ([`d7fd976`]) + * Add internal functions to support path mapping (#764) ([`5a28a64`]) +``` + +### Graduating from Experimental + +When a feature graduates from experimental to stable: + +1. Move it to the appropriate section (Features, etc.) +2. Note that it's now stable +3. Document any API changes made during the experimental phase + +**Example:** +```markdown +### Features + +* Incremental job output downloads are now stable and enabled by default (previously experimental) + * API changes from experimental version: `download_incremental()` renamed to `download_outputs()` +``` + +## Examples + +### Complete Release Example + +```markdown +## 0.55.0 (2025-01-15) + +### BREAKING CHANGES + +* Remove deprecated `create_job_response` attribute from `ui.dialogs.SubmitJobToDeadlineDialog` (#791) ([`6587e4e`]) + * This attribute was deprecated in 0.51.1 and always returned `None` + * **Migration**: Use the `job_id` attribute instead, which is set when job submission succeeds + +## DEPRECATIONS + +* The `--legacy-format` option has been deprecated. Use `--format=legacy` instead. This option will be removed in version 1.0.0. + +### Features + +* Add `deadline job wait` command to monitor job completion with configurable polling and timeout +* Support automatic download of job attachments output for completed jobs +* Add detailed tooltips to grayed-out submit button explaining why submission is disabled + +### Bug Fixes + +* Job submission error when submitting same jobs with the same title over 100 times in a single day +* Process hangs on exit with high volume of telemetry +* HashDB does not retry when failing to open + +### Performance Improvements + +* Improve concurrency during bundle submission by threading local s3 cache db connections (40% faster for bundles with 1000+ files) (#896) ([`ba15300`]) +* Speed up job bundle submissions by reducing redundant stat calls (2x faster for bundles with deep directory structures) (#860) ([`6e6e3ff`]) + +### Experimental + +These changes are experimental and are subject to change. + +* Incremental Downloads: + * Add storage profile support for incremental download (requires `ENABLE_INCREMENTAL_DOWNLOAD=true`) (#773) ([`d7fd976`]) +``` + +## Review Checklist + +Before finalizing a changelog, verify: + +- [ ] Sections are in the correct order +- [ ] Breaking changes describe what broke and include migration paths +- [ ] Deprecations specify what to use instead +- [ ] Bug fixes describe the problem that was fixed from the user perspective +- [ ] Performance improvements specify when they apply and ideally quantify improvements +- [ ] Experimental features are grouped by feature name with feature flags documented +- [ ] All entries are user-focused, not implementation-focused +- [ ] Fixes to unreleased changes are omitted or merged with original features diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 988ec3019..33edff433 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -12,6 +12,7 @@ Table of Contents: * [Unit tests](#unit-tests) * [Integration tests](#integration-tests) * [Squish GUI Submitter tests](#squish-tests) +* [Changelog Guidelines](#changelog-guidelines) * [Things to Know](#things-to-know) * [Public contracts](#public-contracts) * [Library Dependencies](#dependencies) @@ -200,6 +201,17 @@ Squish GUI tests are located under the `test/squish` directory of this repositor A separate ReadMe for developing/running Squish GUI tests is located in the `test/squish` directory. Please refer to [test/squish/SQUISH_README.md](./test/squish/SQUISH_README.md) on full instructions to use the automated tests. Note that a Squish license is required in order to run the tests. Currently, you may either have your own Squish license or you may file a [pull request](https://help.github.com/articles/creating-a-pull-request/) to the Deadline Cloud team to run or add any tests against any changes to be committed. Please perform any necessary manual tests prior to submitting any changes, in addition to making sure at least a minimal render job test passes. +## Changelog Guidelines + +When a new version of `deadline` is being released, we must prepare an update to our change log (`CHANGELOG.md`). This is a semi-automated process. GitHub actions prepares a pull request with an automatically generated draft of the changelog entry. Maintainers are responsible for reviewing the draft, making any necessary changes, and reviewing the changes in the pull request. Please consult in [CHANGELOG_GUIDELINES.md](./CHANGELOG_GUIDELINES.md) for the changelog guidelines. These guidelines ensure consistency in how we communicate changes to users and provide standards for: + +* Structuring changelog sections and their ordering +* Writing user-focused descriptions for different types of changes +* Handling breaking changes with proper migration guidance +* Communicating deprecations effectively +* Managing fixes to unreleased changes +* Documenting changes to experimental features + ## Things to Know ### Public Contracts From 83c753a4783eac9b1e247643ba6615c7b4d9c8e0 Mon Sep 17 00:00:00 2001 From: Mark Wiebe <399551+mwiebe@users.noreply.github.com> Date: Sun, 28 Dec 2025 11:44:22 -0800 Subject: [PATCH 05/10] feat!: Update hash cache to support byte ranges for file chunking support (#953) BREAKING CHANGES: - Hash cache database schema updated from V3 to V4. Signed-off-by: Mark Wiebe <399551+mwiebe@users.noreply.github.com> Signed-off-by: Louise Fox <208544511+folouiseAWS@users.noreply.github.com> --- .../job_attachments/caches/__init__.py | 3 +- .../job_attachments/caches/hash_cache.py | 144 +++++++-- .../caches/test_caches.py | 292 ++++++++++++++++++ 3 files changed, 414 insertions(+), 25 deletions(-) diff --git a/src/deadline/job_attachments/caches/__init__.py b/src/deadline/job_attachments/caches/__init__.py index 7ee77a445..d2046340d 100644 --- a/src/deadline/job_attachments/caches/__init__.py +++ b/src/deadline/job_attachments/caches/__init__.py @@ -1,7 +1,7 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. from .cache_db import CacheDB, CONFIG_ROOT, COMPONENT_NAME -from .hash_cache import HashCache, HashCacheEntry +from .hash_cache import HashCache, HashCacheEntry, WHOLE_FILE_RANGE_END from .s3_check_cache import S3CheckCache, S3CheckCacheEntry __all__ = [ @@ -10,6 +10,7 @@ "COMPONENT_NAME", "HashCache", "HashCacheEntry", + "WHOLE_FILE_RANGE_END", "S3CheckCache", "S3CheckCacheEntry", ] diff --git a/src/deadline/job_attachments/caches/hash_cache.py b/src/deadline/job_attachments/caches/hash_cache.py index 590142a73..5ed43a1af 100644 --- a/src/deadline/job_attachments/caches/hash_cache.py +++ b/src/deadline/job_attachments/caches/hash_cache.py @@ -2,6 +2,13 @@ """ Module for accessing the local file hash cache. + +Supports two types of hash entries: +1. Whole-file hashes: range_start=0, range_end=-1 (WHOLE_FILE_RANGE_END) +2. Byte-range hashes: range_start >= 0, range_end > 0, defining the range [start, end) + +The range parameters allow caching hashes for arbitrary byte ranges of files, +which is useful for caching hashes for any chunking scheme without modifying the cache. """ import logging @@ -14,10 +21,17 @@ logger = logging.getLogger("Deadline") +# Sentinel value indicating a whole-file hash (no specific byte range) +WHOLE_FILE_RANGE_END = -1 + @dataclass class HashCacheEntry: - """Represents an entry in the local hash-cache database""" + """Represents an entry in the local hash-cache database. + + For whole-file hashes: range_start=0, range_end=-1 (WHOLE_FILE_RANGE_END) + For chunk hashes: range_start and range_end define the byte range [start, end) + """ # The file_path is stored as a BLOB in sqlite, encoded with utf-8 and the "surrogatepass" # error handler, as file names encountered in practice require this. @@ -25,6 +39,20 @@ class HashCacheEntry: hash_algorithm: HashAlgorithm file_hash: str last_modified_time: str + range_start: int = 0 + range_end: int = WHOLE_FILE_RANGE_END + + def __post_init__(self) -> None: + # Validate byte-range entries have range_end > range_start. + if self.range_end != WHOLE_FILE_RANGE_END and self.range_end <= self.range_start: + raise ValueError( + f"For byte-range entries, range_end ({self.range_end}) must be greater than " + f"range_start ({self.range_start})" + ) + + def is_whole_file(self) -> bool: + """Returns True if this entry represents a whole-file hash.""" + return self.range_start == 0 and self.range_end == WHOLE_FILE_RANGE_END def to_dict(self) -> Dict[str, Any]: return { @@ -32,6 +60,8 @@ def to_dict(self) -> Dict[str, Any]: "hash_algorithm": self.hash_algorithm.value, "file_hash": self.file_hash, "last_modified_time": self.last_modified_time, + "range_start": self.range_start, + "range_end": self.range_end, } @@ -44,14 +74,34 @@ class HashCache(CacheDB): This class also automatically locks when doing writes, so it can be called by multiple threads. + + Schema (hashesV4): + - file_path: blob (part of composite primary key) + - hash_algorithm: text (part of composite primary key) + - range_start: integer (part of composite primary key) + - range_end: integer (part of composite primary key) + - file_hash: text + - last_modified_time: timestamp + + For whole-file hashes, range_start=0 and range_end=-1. + For byte-range hashes, range_start and range_end define [start, end). """ CACHE_NAME = "hash_cache" - CACHE_DB_VERSION = 3 + CACHE_DB_VERSION = 4 def __init__(self, cache_dir: Optional[str] = None) -> None: table_name: str = f"hashesV{self.CACHE_DB_VERSION}" - create_query: str = f"CREATE TABLE hashesV{self.CACHE_DB_VERSION}(file_path blob primary key, hash_algorithm text secondary key, file_hash text, last_modified_time timestamp)" + create_query: str = ( + f"CREATE TABLE {table_name}(" + "file_path blob, " + "hash_algorithm text, " + "range_start integer, " + "range_end integer, " + "file_hash text, " + "last_modified_time timestamp, " + "PRIMARY KEY (file_path, hash_algorithm, range_start, range_end))" + ) super().__init__( cache_name=self.CACHE_NAME, table_name=table_name, @@ -60,55 +110,101 @@ def __init__(self, cache_dir: Optional[str] = None) -> None: ) def get_connection_entry( - self, file_path_key: str, hash_algorithm: HashAlgorithm, connection + self, + file_path_key: str, + hash_algorithm: HashAlgorithm, + connection: Any, + range_start: int = 0, + range_end: int = WHOLE_FILE_RANGE_END, ) -> Optional[HashCacheEntry]: """ - Returns an entry from the hash cache, if it exists. This is the "lockless" (Doesn't take - the main db_lock protecting db_connection) version of get_entry which expects a connection + Returns an entry from the hash cache, if it exists. + + This is the "lockless" version of get_entry which expects a connection parameter for the connection which will be used to read from the DB - this can generally be the thread local connection returned by get_local_connection() + + Args: + file_path_key: The file path to look up + hash_algorithm: The hash algorithm used + connection: SQLite connection to use + range_start: Start byte offset (0 for whole-file) + range_end: End byte offset (-1/WHOLE_FILE_RANGE_END for whole-file) + + Returns: + HashCacheEntry if found, None otherwise """ if not self.enabled: return None + encoded_path = file_path_key.encode(encoding="utf-8", errors="surrogatepass") + entry_vals = connection.execute( - f"SELECT * FROM {self.table_name} WHERE file_path=? AND hash_algorithm=?", - [ - file_path_key.encode(encoding="utf-8", errors="surrogatepass"), - hash_algorithm.value, - ], + f"SELECT * FROM {self.table_name} " + "WHERE file_path=? AND hash_algorithm=? AND range_start=? AND range_end=?", + [encoded_path, hash_algorithm.value, range_start, range_end], ).fetchone() + if entry_vals: return HashCacheEntry( file_path=str(entry_vals[0], encoding="utf-8", errors="surrogatepass"), hash_algorithm=HashAlgorithm(entry_vals[1]), - file_hash=entry_vals[2], - last_modified_time=str(entry_vals[3]), + file_hash=entry_vals[4], + last_modified_time=str(entry_vals[5]), + range_start=entry_vals[2], + range_end=entry_vals[3], ) - else: - return None + + return None def get_entry( - self, file_path_key: str, hash_algorithm: HashAlgorithm + self, + file_path_key: str, + hash_algorithm: HashAlgorithm, + range_start: int = 0, + range_end: int = WHOLE_FILE_RANGE_END, ) -> Optional[HashCacheEntry]: """ Returns an entry from the hash cache, if it exists. + + Args: + file_path_key: The file path to look up + hash_algorithm: The hash algorithm used + range_start: Start byte offset (0 for whole-file) + range_end: End byte offset (-1/WHOLE_FILE_RANGE_END for whole-file) + + Returns: + HashCacheEntry if found, None otherwise """ if not self.enabled: return None with self.db_lock, self.db_connection: - return self.get_connection_entry(file_path_key, hash_algorithm, self.db_connection) + return self.get_connection_entry( + file_path_key, hash_algorithm, self.db_connection, range_start, range_end + ) def put_entry(self, entry: HashCacheEntry) -> None: - """Inserts or replaces an entry into the hash cache database after acquiring the lock.""" + """ + Inserts or replaces an entry into the hash cache database after acquiring the lock. + + The entry's range_start and range_end determine whether this is a whole-file + hash (range_start=0, range_end=-1) or a byte-range hash. + """ if self.enabled: with self.db_lock, self.db_connection: - entry_dict = entry.to_dict() - entry_dict["file_path"] = entry_dict["file_path"].encode( - encoding="utf-8", errors="surrogatepass" - ) + encoded_path = entry.file_path.encode(encoding="utf-8", errors="surrogatepass") + self.db_connection.execute( - f"INSERT OR REPLACE INTO {self.table_name} VALUES(:file_path, :hash_algorithm, :file_hash, :last_modified_time)", - entry_dict, + f"INSERT OR REPLACE INTO {self.table_name} " + "VALUES(:file_path, :hash_algorithm, :range_start, :range_end, " + ":file_hash, :last_modified_time)", + { + "file_path": encoded_path, + "hash_algorithm": entry.hash_algorithm.value, + "range_start": entry.range_start, + "range_end": entry.range_end, + "file_hash": entry.file_hash, + "last_modified_time": entry.last_modified_time, + }, ) diff --git a/test/unit/deadline_job_attachments/caches/test_caches.py b/test/unit/deadline_job_attachments/caches/test_caches.py index 62c2f79a4..69ea69e19 100644 --- a/test/unit/deadline_job_attachments/caches/test_caches.py +++ b/test/unit/deadline_job_attachments/caches/test_caches.py @@ -17,6 +17,7 @@ HashCacheEntry, S3CheckCache, S3CheckCacheEntry, + WHOLE_FILE_RANGE_END, ) @@ -267,6 +268,297 @@ def test_enter_sqlite_import_error(self, tmpdir): ) assert hc.get_entry("/no/file", HashAlgorithm.XXH128) is None + def test_get_entry_with_byte_range(self, tmpdir): + """ + Tests that a byte range entry is returned when it exists in the cache + """ + # GIVEN + cache_dir = tmpdir.mkdir("cache") + expected_entry = HashCacheEntry( + file_path="large_file.bin", + hash_algorithm=HashAlgorithm.XXH128, + file_hash="chunk_hash_1", + last_modified_time="1234.5678", + range_start=0, + range_end=268435456, # 256MB + ) + + # WHEN + with HashCache(cache_dir) as hc: + hc.put_entry(expected_entry) + actual_entry = hc.get_entry( + "large_file.bin", HashAlgorithm.XXH128, range_start=0, range_end=268435456 + ) + + # THEN + assert actual_entry == expected_entry + assert actual_entry.range_start == 0 + assert actual_entry.range_end == 268435456 + + def test_get_entry_multiple_byte_ranges_same_file(self, tmpdir): + """ + Tests that multiple byte range entries for the same file are stored and retrieved correctly + """ + # GIVEN + cache_dir = tmpdir.mkdir("cache") + chunk_size = 268435456 # 256MB + entries = [ + HashCacheEntry( + file_path="large_file.bin", + hash_algorithm=HashAlgorithm.XXH128, + file_hash=f"chunk_hash_{i}", + last_modified_time="1234.5678", + range_start=i * chunk_size, + range_end=(i + 1) * chunk_size, + ) + for i in range(4) # 4 chunks + ] + + # WHEN + with HashCache(cache_dir) as hc: + for entry in entries: + hc.put_entry(entry) + + # THEN - each chunk should be retrievable independently + for i, expected_entry in enumerate(entries): + actual_entry = hc.get_entry( + "large_file.bin", + HashAlgorithm.XXH128, + range_start=i * chunk_size, + range_end=(i + 1) * chunk_size, + ) + assert actual_entry == expected_entry + assert actual_entry.file_hash == f"chunk_hash_{i}" + + def test_get_entry_byte_range_not_found(self, tmpdir): + """ + Tests that None is returned when a specific byte range doesn't exist + """ + # GIVEN + cache_dir = tmpdir.mkdir("cache") + entry = HashCacheEntry( + file_path="file.bin", + hash_algorithm=HashAlgorithm.XXH128, + file_hash="chunk_hash", + last_modified_time="1234.5678", + range_start=0, + range_end=1000, + ) + + # WHEN + with HashCache(cache_dir) as hc: + hc.put_entry(entry) + + # THEN - different range should return None + assert ( + hc.get_entry("file.bin", HashAlgorithm.XXH128, range_start=0, range_end=2000) + is None + ) + assert ( + hc.get_entry("file.bin", HashAlgorithm.XXH128, range_start=1000, range_end=2000) + is None + ) + + def test_get_entry_whole_file_vs_byte_range_independent(self, tmpdir): + """ + Tests that whole-file hashes and byte-range hashes are stored independently + """ + # GIVEN + cache_dir = tmpdir.mkdir("cache") + whole_file_entry = HashCacheEntry( + file_path="file.bin", + hash_algorithm=HashAlgorithm.XXH128, + file_hash="whole_file_hash", + last_modified_time="1234.5678", + range_start=0, + range_end=WHOLE_FILE_RANGE_END, + ) + chunk_entry = HashCacheEntry( + file_path="file.bin", + hash_algorithm=HashAlgorithm.XXH128, + file_hash="chunk_hash", + last_modified_time="1234.5678", + range_start=0, + range_end=1000, + ) + + # WHEN + with HashCache(cache_dir) as hc: + hc.put_entry(whole_file_entry) + hc.put_entry(chunk_entry) + + # THEN - both should be retrievable independently + actual_whole = hc.get_entry("file.bin", HashAlgorithm.XXH128) + actual_chunk = hc.get_entry( + "file.bin", HashAlgorithm.XXH128, range_start=0, range_end=1000 + ) + + assert actual_whole == whole_file_entry + assert actual_whole.file_hash == "whole_file_hash" + assert actual_chunk == chunk_entry + assert actual_chunk.file_hash == "chunk_hash" + + def test_get_connection_entry_with_byte_range(self, tmpdir): + """ + Tests that get_connection_entry works with byte range parameters + """ + # GIVEN + cache_dir = tmpdir.mkdir("cache") + expected_entry = HashCacheEntry( + file_path="file.bin", + hash_algorithm=HashAlgorithm.XXH128, + file_hash="chunk_hash", + last_modified_time="1234.5678", + range_start=1000, + range_end=2000, + ) + + # WHEN + with HashCache(cache_dir) as hc: + hc.put_entry(expected_entry) + connection = hc.get_local_connection() + actual_entry = hc.get_connection_entry( + "file.bin", HashAlgorithm.XXH128, connection, range_start=1000, range_end=2000 + ) + + # THEN + assert actual_entry == expected_entry + + def test_hash_cache_entry_is_whole_file(self): + """ + Tests the is_whole_file() helper method on HashCacheEntry + """ + whole_file = HashCacheEntry( + file_path="file.txt", + hash_algorithm=HashAlgorithm.XXH128, + file_hash="hash", + last_modified_time="1234.5678", + ) + assert whole_file.is_whole_file() is True + + chunk = HashCacheEntry( + file_path="file.txt", + hash_algorithm=HashAlgorithm.XXH128, + file_hash="hash", + last_modified_time="1234.5678", + range_start=0, + range_end=1000, + ) + assert chunk.is_whole_file() is False + + # Edge case: range_start != 0 but range_end == -1 should not be whole file + weird_entry = HashCacheEntry( + file_path="file.txt", + hash_algorithm=HashAlgorithm.XXH128, + file_hash="hash", + last_modified_time="1234.5678", + range_start=100, + range_end=WHOLE_FILE_RANGE_END, + ) + assert weird_entry.is_whole_file() is False + + def test_put_entry_replaces_existing_byte_range(self, tmpdir): + """ + Tests that put_entry replaces an existing entry with the same byte range + """ + # GIVEN + cache_dir = tmpdir.mkdir("cache") + original_entry = HashCacheEntry( + file_path="file.bin", + hash_algorithm=HashAlgorithm.XXH128, + file_hash="original_hash", + last_modified_time="1234.5678", + range_start=0, + range_end=1000, + ) + updated_entry = HashCacheEntry( + file_path="file.bin", + hash_algorithm=HashAlgorithm.XXH128, + file_hash="updated_hash", + last_modified_time="9999.9999", + range_start=0, + range_end=1000, + ) + + # WHEN + with HashCache(cache_dir) as hc: + hc.put_entry(original_entry) + hc.put_entry(updated_entry) + actual_entry = hc.get_entry( + "file.bin", HashAlgorithm.XXH128, range_start=0, range_end=1000 + ) + + # THEN + assert actual_entry.file_hash == "updated_hash" + assert actual_entry.last_modified_time == "9999.9999" + + def test_hash_cache_entry_to_dict_includes_range(self): + """ + Tests that to_dict() includes range_start and range_end + """ + entry = HashCacheEntry( + file_path="file.bin", + hash_algorithm=HashAlgorithm.XXH128, + file_hash="hash", + last_modified_time="1234.5678", + range_start=100, + range_end=200, + ) + result = entry.to_dict() + + assert result["file_path"] == "file.bin" + assert result["hash_algorithm"] == "xxh128" + assert result["file_hash"] == "hash" + assert result["last_modified_time"] == "1234.5678" + assert result["range_start"] == 100 + assert result["range_end"] == 200 + + def test_hash_cache_entry_validates_byte_range(self): + """ + Tests that HashCacheEntry raises ValueError when range_end <= range_start for byte-range entries + """ + # Valid byte-range entry should work + HashCacheEntry( + file_path="file.bin", + hash_algorithm=HashAlgorithm.XXH128, + file_hash="hash", + last_modified_time="1234.5678", + range_start=0, + range_end=100, + ) + + # Whole-file entry (range_end=-1) should work regardless of range_start + HashCacheEntry( + file_path="file.bin", + hash_algorithm=HashAlgorithm.XXH128, + file_hash="hash", + last_modified_time="1234.5678", + range_start=0, + range_end=WHOLE_FILE_RANGE_END, + ) + + # Invalid: range_end == range_start + with pytest.raises(ValueError, match="range_end.*must be greater than.*range_start"): + HashCacheEntry( + file_path="file.bin", + hash_algorithm=HashAlgorithm.XXH128, + file_hash="hash", + last_modified_time="1234.5678", + range_start=100, + range_end=100, + ) + + # Invalid: range_end < range_start + with pytest.raises(ValueError, match="range_end.*must be greater than.*range_start"): + HashCacheEntry( + file_path="file.bin", + hash_algorithm=HashAlgorithm.XXH128, + file_hash="hash", + last_modified_time="1234.5678", + range_start=200, + range_end=100, + ) + class TestS3CheckCache: """ From 1aa98bfd7f9c9324b49010e09a16ac05aef6c889 Mon Sep 17 00:00:00 2001 From: Mark Wiebe <399551+mwiebe@users.noreply.github.com> Date: Tue, 30 Dec 2025 10:49:22 -0800 Subject: [PATCH 06/10] docs: Add AGENTS.md for AI assistants to reference (#955) https://agents.md/ is an open format for guiding coding agents, supported by https://kiro.dev/ and many other agentic AI systems. This adds a basic AGENTS.md to provide a short overview of the project and commands to use with it. Signed-off-by: Mark <399551+mwiebe@users.noreply.github.com> Signed-off-by: Louise Fox <208544511+folouiseAWS@users.noreply.github.com> --- AGENTS.md | 148 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 148 insertions(+) create mode 100644 AGENTS.md diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 000000000..0f7371c63 --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,148 @@ +# AGENTS.md - AWS Deadline Cloud Client + +This document provides guidance for AI agents working with this codebase. + +## Project Overview + +AWS Deadline Cloud client is a Python library and CLI tool for interacting with [AWS Deadline Cloud](https://docs.aws.amazon.com/deadline-cloud/latest/userguide/what-is-deadline-cloud.html). It supports job submission, file transfer via job attachments, and provides both CLI and GUI interfaces. + +**Package name:** `deadline` +**Python versions:** 3.8 - 3.13 +**Platforms:** Linux, Windows, macOS + +## Repository Structure + +``` +src/deadline/ +├── client/ # Main client library +│ ├── api/ # Boto3 wrappers and AWS API helpers +│ ├── cli/ # CLI entry points (deadline command) +│ ├── config/ # Configuration (~/.deadline/*) +│ ├── ui/ # Qt/PySide GUI components +│ ├── job_bundle/ # Job bundle handling and history +│ └── dataclasses/ # Data structures +├── job_attachments/ # File transfer to/from S3 +│ ├── api/ # Public job attachments API +│ ├── caches/ # Hash and S3 check caches +│ └── asset_manifests/ # Manifest handling +├── mcp/ # MCP server (public) +├── _mcp/ # MCP server (internal) +└── common/ # Shared utilities +``` + +## Key Commands + +Always use `hatch` for running builds, tests, formatting, and linting. +If running just a few tests, always add `--numprocesses=1` to the `hatch run test` +command so that it starts quicker. + +```bash +# Development +hatch build # Build wheel/sdist +hatch run test # Run unit tests +hatch run test --numprocesses=1 -k "selected_test_name" # Run just a few tests +hatch run all:test # Test all Python versions +hatch run integ:test # Integration tests (requires AWS) +hatch run lint # Check formatting +hatch run fmt # Auto-format code +hatch shell # Enter dev environment + +# CLI usage +deadline farm list +deadline bundle submit +deadline job list +deadline job get +deadline job logs --job-id +``` + +## Code Conventions + +### Module Naming +- **Private modules:** `_module.py` - internal implementation +- **Public modules:** `module.py` - part of public API +- Symbols not starting with `_` define the public contract + +### Import Style (Public Modules) +```python +import os as _os # Private import +from typing import Dict as _Dict # Private import + +class PublicClass: # Public + pass + +class _PrivateClass: # Private + pass +``` + +### Commit Messages +Use [conventional commits](https://www.conventionalcommits.org/): +- `feat:` - New features +- `fix:` - Bug fixes +- `docs:` - Documentation +- `test:` - Tests only +- `refactor:` - Code refactoring +- `perf:` - Performance improvements +- `feat!:` or `fix!:` - Breaking changes (Also include `BREAKING CHANGES:` section in message body) + +## Testing + +- **Unit tests:** `test/unit/` - Run with `hatch run test` +- **Integration tests:** `test/integ/` - Requires AWS resources +- **Squish GUI tests:** `test/squish/` - Requires Squish license +- **Docker tests:** `scripts/run_sudo_tests.sh` - For permission tests + +Tests use pytest with unittest.mock. Coverage target: 80%. + +## Public API Contracts + +Breaking changes require conventional commit syntax (`feat!:`, `fix!:`). + +**CLI contracts:** +- Subcommand names and arguments +- Default values and behaviors + +**Python API contracts:** +- All non-underscore-prefixed functions/classes in public modules +- Function signatures (adding required params is breaking) +- Default argument values + +## Qt/GUI Guidelines + +Never call AWS APIs from the main Qt thread. Use: +1. Background threads for API calls +2. Qt Signals to return results to widgets +3. Cancellation flags for thread cleanup + +See `deadline_config_dialog.py` for examples. + +## Dependencies + +Minimize new dependencies. This library is used from embedded Python within +applications where dependency conflicts are possible. When adding: +- Evaluate if functionality can be implemented locally +- Check library quality (maintenance, downloads, stars) +- Ensure license compatibility (Apache-2.0) +- Document in THIRD_PARTY_LICENSES + +## Key Files + +| File | Purpose | +|------|---------| +| `pyproject.toml` | Project config, dependencies | +| `CONTRIBUTING.md` | Contribution guidelines | +| `DEVELOPMENT.md` | Development setup and workflows | +| `CHANGELOG_GUIDELINES.md` | Changelog formatting rules | +| `scripts/README.md` | API change detection scripts | + +## Environment Variables + +- `DEADLINE_CONFIG_FILE_PATH` - Override config location (default: `~/.deadline/config`) +- `AWS_ENDPOINT_URL_DEADLINE` - Custom Deadline Cloud endpoint + +## Optional Features + +Install with extras: +```bash +pip install "deadline[gui]" # Qt GUI components +pip install "deadline[mcp]" # MCP server for AI assistants +``` From 2b95d8d69530fb1a3f6372e0ea1658da585450e1 Mon Sep 17 00:00:00 2001 From: Charles Moore <122481442+moorec-aws@users.noreply.github.com> Date: Wed, 31 Dec 2025 14:51:06 -0600 Subject: [PATCH 07/10] chore: add sysconfigdata to installer validation allowlist (#956) Signed-off-by: Charles Moore <122481442+moorec-aws@users.noreply.github.com> Signed-off-by: Louise Fox <208544511+folouiseAWS@users.noreply.github.com> --- scripts/pyinstaller/README.md | 4 ++-- scripts/pyinstaller/allowlist.py | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/scripts/pyinstaller/README.md b/scripts/pyinstaller/README.md index c0096d89a..e9814c112 100644 --- a/scripts/pyinstaller/README.md +++ b/scripts/pyinstaller/README.md @@ -19,7 +19,7 @@ See [attributions](../attributions/README.txt) 1. `uv venv .venv` 1. `source .venv/bin/activate` (bash/zsh), or `source .venv/bin/activate.fish` (fish), or `.venv/Scripts/activate` (powershell) 1. `uv pip install -e .` -1. `uv pip install -r requirements-pyinstaller-6.txt` +1. `uv pip install -r requirements-installer.txt` 1. `python scripts/pyinstaller/make_exe.py` 1. `deactivate` @@ -40,7 +40,7 @@ if it is not desired. See `uv venv --help` for more information. 1. `uv venv .venv-validation` 1. `source .venv-validation/bin/activate` (bash/zsh), or `source .venv-validation/bin/activate.fish` (fish), or `.venv-validation/Scripts/activate` (powershell) -1. `uv pip install -r requirements-pyinstaller-6.txt` +1. `uv pip install -r requirements-installer.txt` 1. `python scripts/pyinstaller/validate.py` 1. `deactivate` diff --git a/scripts/pyinstaller/allowlist.py b/scripts/pyinstaller/allowlist.py index 09283a697..1006d1ba8 100644 --- a/scripts/pyinstaller/allowlist.py +++ b/scripts/pyinstaller/allowlist.py @@ -132,6 +132,8 @@ "_pydatetime", "_ios_support", "_colorize", + "_sysconfigdata__linux_x86_64-linux-gnu", + "_sysconfigdata__darwin_darwin", ], } } From a86ccce5193569403d4b5fbac865a6cfa381b73f Mon Sep 17 00:00:00 2001 From: folouise-aws <208544511+folouiseAWS@users.noreply.github.com> Date: Mon, 5 Jan 2026 08:26:35 -0800 Subject: [PATCH 08/10] fix: improve About dialog YAML key formatting to be human friendly (#950) * fix: improve About dialog YAML key formatting for nested structures --------- Signed-off-by: Louise Fox <208544511+folouiseAWS@users.noreply.github.com> Co-authored-by: Charles Moore <122481442+moorec-aws@users.noreply.github.com> Co-authored-by: Lucas Eckhardt <117225985+lucaseck@users.noreply.github.com> Signed-off-by: Louise Fox <208544511+folouiseAWS@users.noreply.github.com> --- .../client/ui/dialogs/_about_dialog.py | 26 +++- .../deadline_client/ui/dialogs/__init__.py | 1 + .../ui/dialogs/test_about_dialog.py | 113 ++++++++++++++++++ 3 files changed, 139 insertions(+), 1 deletion(-) create mode 100644 test/unit/deadline_client/ui/dialogs/__init__.py create mode 100644 test/unit/deadline_client/ui/dialogs/test_about_dialog.py diff --git a/src/deadline/client/ui/dialogs/_about_dialog.py b/src/deadline/client/ui/dialogs/_about_dialog.py index 892a79cd8..517be5f8b 100644 --- a/src/deadline/client/ui/dialogs/_about_dialog.py +++ b/src/deadline/client/ui/dialogs/_about_dialog.py @@ -88,6 +88,27 @@ def _build_ui(self): self.setLayout(layout) + @staticmethod + def _make_keys_human_readable(data): + """ + Recursively replace underscores with spaces in dictionary keys. + + Args: + data: The data structure to process (dict, list, or scalar value) + + Returns: + The data structure with all dictionary keys made human-readable + """ + if isinstance(data, dict): + return { + k.replace("_", " "): _AboutDialog._make_keys_human_readable(v) + for k, v in data.items() + } + elif isinstance(data, list): + return [_AboutDialog._make_keys_human_readable(item) for item in data] + else: + return data + def _format_version_info(self) -> str: """ Combine submitter and version information for display. @@ -102,7 +123,10 @@ def _format_version_info(self) -> str: combined_info = {**submitter_dict, **env_data} - yaml_str = yaml.dump(combined_info, indent=4, sort_keys=False) + # Replace underscores with spaces in keys for user-friendly display (recursively) + friendly_info = self._make_keys_human_readable(combined_info) + + yaml_str = yaml.dump(friendly_info, indent=4, sort_keys=False) return yaml_str def _format_for_copy(self) -> str: diff --git a/test/unit/deadline_client/ui/dialogs/__init__.py b/test/unit/deadline_client/ui/dialogs/__init__.py new file mode 100644 index 000000000..8d929cc86 --- /dev/null +++ b/test/unit/deadline_client/ui/dialogs/__init__.py @@ -0,0 +1 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/test/unit/deadline_client/ui/dialogs/test_about_dialog.py b/test/unit/deadline_client/ui/dialogs/test_about_dialog.py new file mode 100644 index 000000000..1e6e500cd --- /dev/null +++ b/test/unit/deadline_client/ui/dialogs/test_about_dialog.py @@ -0,0 +1,113 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + +""" +Tests for the _make_keys_human_readable function in _about_dialog.py. + +These tests verify that nested dictionary keys are properly transformed +to be human-readable by replacing underscores with spaces. +""" + +import pytest + +try: + from deadline.client.ui.dialogs._about_dialog import _AboutDialog + + _make_keys_human_readable = _AboutDialog._make_keys_human_readable +except ImportError: + # The tests in this file should be skipped if Qt UI related modules cannot be loaded + pytest.importorskip("deadline.client.ui.dialogs._about_dialog") + + +@pytest.mark.parametrize( + "input_data,expected", + [ + pytest.param( + {"some_key": "value", "another_key": 123}, + {"some key": "value", "another key": 123}, + id="flat_dict", + ), + pytest.param( + {"outer_key": {"inner_key": "value", "another_inner_key": 456}}, + {"outer key": {"inner key": "value", "another inner key": 456}}, + id="nested_dict", + ), + pytest.param( + {"level_one": {"level_two": {"level_three": {"deep_key": "deep_value"}}}}, + {"level one": {"level two": {"level three": {"deep key": "deep_value"}}}}, + id="deeply_nested_dict", + ), + pytest.param( + {"items_list": [{"item_name": "first"}, {"item_name": "second"}]}, + {"items list": [{"item name": "first"}, {"item name": "second"}]}, + id="list_with_dicts", + ), + pytest.param( + {"my_list": [1, 2, 3, "four_five"]}, + {"my list": [1, 2, 3, "four_five"]}, + id="list_of_scalars", + ), + pytest.param( + {"normalkey": "value", "AnotherKey": 123}, + {"normalkey": "value", "AnotherKey": 123}, + id="keys_without_underscores", + ), + pytest.param({}, {}, id="empty_dict"), + pytest.param([], [], id="empty_list"), + ], +) +def test_make_keys_human_readable(input_data, expected): + """Test that _make_keys_human_readable correctly transforms dictionary keys.""" + result = _make_keys_human_readable(input_data) + assert result == expected + + +@pytest.mark.parametrize( + "scalar_value", + [ + pytest.param("string_value", id="string"), + pytest.param(123, id="int"), + pytest.param(45.67, id="float"), + pytest.param(True, id="bool"), + pytest.param(None, id="none"), + ], +) +def test_scalar_values_unchanged(scalar_value): + """Test that scalar values pass through unchanged.""" + result = _make_keys_human_readable(scalar_value) + assert result == scalar_value + + +def test_mixed_nested_structure(): + """Test complex structure with dicts, lists, and scalar values.""" + data = { + "deadline_dep_versions": { + "deadline_cloud": "1.0.0", + "boto3_stubs": "1.2.3", + }, + "additional_info": { + "loaded_plugins": [ + {"plugin_name": "Plugin A", "plugin_version": "1.0"}, + {"plugin_name": "Plugin B", "plugin_version": "2.0"}, + ], + "render_engine": "Cycles", + }, + "simple_value": "test", + } + + expected = { + "deadline dep versions": { + "deadline cloud": "1.0.0", + "boto3 stubs": "1.2.3", + }, + "additional info": { + "loaded plugins": [ + {"plugin name": "Plugin A", "plugin version": "1.0"}, + {"plugin name": "Plugin B", "plugin version": "2.0"}, + ], + "render engine": "Cycles", + }, + "simple value": "test", + } + + result = _make_keys_human_readable(data) + assert result == expected From d6adc34389626e37fe1d7b73606150b2177f730b Mon Sep 17 00:00:00 2001 From: David Leong <116610336+leongdl@users.noreply.github.com> Date: Thu, 8 Jan 2026 21:01:35 +0000 Subject: [PATCH 09/10] fix: Add option to bundle submit to always perform S3 head check (#957) Signed-off-by: David Leong <116610336+leongdl@users.noreply.github.com> Signed-off-by: Louise Fox <208544511+folouiseAWS@users.noreply.github.com> --- src/deadline/client/api/_submit_job_bundle.py | 11 +++ .../client/cli/_groups/bundle_group.py | 15 ++++ src/deadline/client/config/config_file.py | 9 ++ .../ui/dialogs/deadline_config_dialog.py | 3 + .../client/ui/translations/locales/de_DE.json | 1 + .../client/ui/translations/locales/en_US.json | 1 + .../client/ui/translations/locales/es_ES.json | 1 + .../client/ui/translations/locales/fr_FR.json | 1 + .../client/ui/translations/locales/id_ID.json | 1 + .../client/ui/translations/locales/it_IT.json | 1 + .../client/ui/translations/locales/ja_JP.json | 1 + .../client/ui/translations/locales/ko_KR.json | 1 + .../client/ui/translations/locales/pt_BR.json | 1 + .../client/ui/translations/locales/tr_TR.json | 1 + .../client/ui/translations/locales/zh_CN.json | 1 + .../client/ui/translations/locales/zh_TW.json | 1 + src/deadline/job_attachments/upload.py | 45 +++++++--- .../api/test_job_bundle_submission.py | 67 +++++++++++++++ .../deadline_client/cli/test_cli_config.py | 6 +- .../config/test_config_file.py | 1 + .../deadline_job_attachments/test_upload.py | 85 +++++++++++++++++++ 21 files changed, 242 insertions(+), 12 deletions(-) diff --git a/src/deadline/client/api/_submit_job_bundle.py b/src/deadline/client/api/_submit_job_bundle.py index 7d29d8512..08418331e 100644 --- a/src/deadline/client/api/_submit_job_bundle.py +++ b/src/deadline/client/api/_submit_job_bundle.py @@ -159,6 +159,7 @@ def _upload_attachments( upload_progress_callback: Optional[Callable], config: Optional[ConfigParser] = None, from_gui: bool = False, + force_s3_check: Optional[bool] = None, ) -> Dict[str, Any]: """ Starts the job attachments upload and handles the progress reporting callback. @@ -175,6 +176,7 @@ def _default_update_upload_progress(upload_metadata: ProgressReportMetadata) -> manifests=manifests, on_uploading_assets=upload_progress_callback, s3_check_cache_dir=config_file.get_cache_directory(), + force_s3_check=force_s3_check, ) api.get_deadline_cloud_library_telemetry_client(config=config).record_upload_summary( upload_summary, @@ -392,6 +394,7 @@ def create_job_from_job_bundle( hashing_progress_callback: Optional[Callable[[ProgressReportMetadata], bool]] = None, upload_progress_callback: Optional[Callable[[ProgressReportMetadata], bool]] = None, create_job_result_callback: Optional[Callable[[], bool]] = None, + force_s3_check: Optional[bool] = None, ) -> Optional[str]: """ Creates a [Deadline Cloud job] in the [queue] configured as default for the workstation @@ -462,6 +465,9 @@ def create_job_from_job_bundle( See hashing_progress_callback for more details. create_job_result_callback (Callable -> bool): Callbacks periodically called while waiting for the deadline.create_job result. See hashing_progress_callback for more details. + force_s3_check (bool, optional): If True, skip S3CheckCache and always do S3 HEAD + to verify job attachment existence before uploading. Use when S3 bucket contents may be out of sync with local caches. + If None (default), reads from the `settings.force_s3_check` config setting. Returns: Returns the submitted job id. If `debug_snapshot_dir` is provided then no job is submitted and it returns None. @@ -498,6 +504,10 @@ def create_job_from_job_bundle( "defaults.job_attachments_file_system", config=config ) + # Read force_s3_check from config if not explicitly set by caller + if force_s3_check is None: + force_s3_check = config_file.str2bool(get_setting("settings.force_s3_check", config=config)) + queue = deadline.get_queue( farmId=farm_id, queueId=queue_id, @@ -718,6 +728,7 @@ def create_job_from_job_bundle( print_function_callback, upload_progress_callback, from_gui=from_gui, + force_s3_check=force_s3_check, ) else: attachment_settings = _snapshot_attachments( # type: ignore diff --git a/src/deadline/client/cli/_groups/bundle_group.py b/src/deadline/client/cli/_groups/bundle_group.py index d28679a12..9b7baef9e 100644 --- a/src/deadline/client/cli/_groups/bundle_group.py +++ b/src/deadline/client/cli/_groups/bundle_group.py @@ -216,6 +216,13 @@ def _interactive_confirmation_prompt(message: str, default_response: bool) -> bo " It includes the job attachments and parameters for creating the job." " You can later run the bash script in the snapshot to submit the job using AWS CLI commands.", ) +@click.option( + "--force-s3-check/--no-force-s3-check", + default=None, + help="Force verification that job attachments exist in S3 before skipping upload. " + "Use when S3 bucket contents may be out of sync with local caches. " + "Overrides the 'settings.force_s3_check' config setting.", +) @click.argument("job_bundle_dir") @_handle_error def bundle_submit( @@ -232,6 +239,7 @@ def bundle_submit( require_paths_exist, submitter_name, save_debug_snapshot, + force_s3_check, **args, ): """ @@ -245,6 +253,12 @@ def bundle_submit( # Apply the CLI args to the config config = _apply_cli_options_to_config(required_options={"farm_id", "queue_id"}, **args) + # Resolve force_s3_check: CLI flag takes precedence, otherwise use config setting + if force_s3_check is None: + force_s3_check = config_file.str2bool( + config_file.get_setting("settings.force_s3_check", config=config) + ) + hash_callback_manager = _ProgressBarCallbackManager(length=100, label="Hashing Attachments") upload_callback_manager = _ProgressBarCallbackManager(length=100, label="Uploading Attachments") @@ -280,6 +294,7 @@ def _check_create_job_wait_canceled() -> bool: submitter_name=submitter_name or "CLI", known_asset_paths=known_asset_path, debug_snapshot_dir=snapshot_tmpdir.name if snapshot_tmpdir else save_debug_snapshot, + force_s3_check=force_s3_check, ) if snapshot_tmpdir: diff --git a/src/deadline/client/config/config_file.py b/src/deadline/client/config/config_file.py index 1e037be10..318273e73 100644 --- a/src/deadline/client/config/config_file.py +++ b/src/deadline/client/config/config_file.py @@ -145,6 +145,15 @@ "default": "", "description": "The locale to use for the UI. If empty, uses the system locale.", }, + "settings.force_s3_check": { + "default": "false", + "description": ( + "Controls S3 verification behavior for job attachments. " + "When 'true', always verify files exist in S3 via HEAD request before skipping upload " + "(most reliable but slower, skips cache integrity check since every file is verified). " + "When 'false' or unset, use local cache with periodic integrity sampling against S3 (balanced default)." + ), + }, } diff --git a/src/deadline/client/ui/dialogs/deadline_config_dialog.py b/src/deadline/client/ui/dialogs/deadline_config_dialog.py index 4bed4b7b7..468031100 100644 --- a/src/deadline/client/ui/dialogs/deadline_config_dialog.py +++ b/src/deadline/client/ui/dialogs/deadline_config_dialog.py @@ -376,6 +376,9 @@ def _build_general_settings_ui(self, group, layout): self.telemetry_opt_out = self._init_checkbox_setting( group, layout, "telemetry.opt_out", tr("Telemetry opt out") ) + self.force_s3_check = self._init_checkbox_setting( + group, layout, "settings.force_s3_check", tr("Always check S3 job attachments") + ) self._conflict_resolution_options = [option.name for option in FileConflictResolution] self.conflict_resolution_box = self._init_combobox_setting( diff --git a/src/deadline/client/ui/translations/locales/de_DE.json b/src/deadline/client/ui/translations/locales/de_DE.json index cedc8b283..7a5a8b205 100644 --- a/src/deadline/client/ui/translations/locales/de_DE.json +++ b/src/deadline/client/ui/translations/locales/de_DE.json @@ -12,6 +12,7 @@ "Add...": "Hinzufügen...", "All": "Alle", "All fields below are optional": "Alle folgenden Felder sind optional", + "Always check S3 job attachments": "S3-Jobanhänge immer prüfen", "Amount name": "Mengenname", "Any": "Beliebig", "Apply": "Anwenden", diff --git a/src/deadline/client/ui/translations/locales/en_US.json b/src/deadline/client/ui/translations/locales/en_US.json index 4d05dda68..59edcf3fa 100644 --- a/src/deadline/client/ui/translations/locales/en_US.json +++ b/src/deadline/client/ui/translations/locales/en_US.json @@ -12,6 +12,7 @@ "Add...": "Add...", "All": "All", "All fields below are optional": "All fields below are optional", + "Always check S3 job attachments": "Always check S3 job attachments", "Amount name": "Amount name", "Any": "Any", "Apply": "Apply", diff --git a/src/deadline/client/ui/translations/locales/es_ES.json b/src/deadline/client/ui/translations/locales/es_ES.json index 8f546ab19..dab64dca2 100644 --- a/src/deadline/client/ui/translations/locales/es_ES.json +++ b/src/deadline/client/ui/translations/locales/es_ES.json @@ -12,6 +12,7 @@ "Add...": "Agregar...", "All": "Todos", "All fields below are optional": "Todos los campos siguientes son opcionales", + "Always check S3 job attachments": "Comprobar siempre los archivos adjuntos de trabajo en S3", "Amount name": "Nombre de cantidad", "Any": "Cualquiera", "Apply": "Aplicar", diff --git a/src/deadline/client/ui/translations/locales/fr_FR.json b/src/deadline/client/ui/translations/locales/fr_FR.json index da277ec78..ca41db5f1 100644 --- a/src/deadline/client/ui/translations/locales/fr_FR.json +++ b/src/deadline/client/ui/translations/locales/fr_FR.json @@ -12,6 +12,7 @@ "Add...": "Ajouter...", "All": "Tous", "All fields below are optional": "Tous les champs ci-dessous sont facultatifs", + "Always check S3 job attachments": "Toujours vérifier les fichiers joints de tâche S3", "Amount name": "Nom de quantité", "Any": "Quelconque", "Apply": "Appliquer", diff --git a/src/deadline/client/ui/translations/locales/id_ID.json b/src/deadline/client/ui/translations/locales/id_ID.json index 44d3f672c..47cc81e39 100644 --- a/src/deadline/client/ui/translations/locales/id_ID.json +++ b/src/deadline/client/ui/translations/locales/id_ID.json @@ -12,6 +12,7 @@ "Add...": "Tambah...", "All": "Semua", "All fields below are optional": "Semua bidang di bawah ini bersifat opsional", + "Always check S3 job attachments": "Selalu periksa lampiran pekerjaan S3", "Amount name": "Nama jumlah", "Any": "Apa saja", "Apply": "Terapkan", diff --git a/src/deadline/client/ui/translations/locales/it_IT.json b/src/deadline/client/ui/translations/locales/it_IT.json index 94f352c13..dbdbec1f4 100644 --- a/src/deadline/client/ui/translations/locales/it_IT.json +++ b/src/deadline/client/ui/translations/locales/it_IT.json @@ -12,6 +12,7 @@ "Add...": "Aggiungi...", "All": "Tutti", "All fields below are optional": "Tutti i campi seguenti sono facoltativi", + "Always check S3 job attachments": "Controlla sempre gli allegati lavoro S3", "Amount name": "Nome quantità", "Any": "Qualsiasi", "Apply": "Applica", diff --git a/src/deadline/client/ui/translations/locales/ja_JP.json b/src/deadline/client/ui/translations/locales/ja_JP.json index 2ebc9d4fa..1897c3a7c 100644 --- a/src/deadline/client/ui/translations/locales/ja_JP.json +++ b/src/deadline/client/ui/translations/locales/ja_JP.json @@ -12,6 +12,7 @@ "Add...": "追加...", "All": "すべて", "All fields below are optional": "以下のすべてのフィールドはオプションです", + "Always check S3 job attachments": "S3 ジョブアタッチメントを常にチェック", "Amount name": "量の名前", "Any": "任意", "Apply": "適用", diff --git a/src/deadline/client/ui/translations/locales/ko_KR.json b/src/deadline/client/ui/translations/locales/ko_KR.json index 08d534692..e5be44602 100644 --- a/src/deadline/client/ui/translations/locales/ko_KR.json +++ b/src/deadline/client/ui/translations/locales/ko_KR.json @@ -12,6 +12,7 @@ "Add...": "추가...", "All": "모두", "All fields below are optional": "아래의 모든 필드는 선택 사항입니다", + "Always check S3 job attachments": "S3 작업 첨부 파일 항상 확인", "Amount name": "수량 이름", "Any": "모두", "Apply": "적용", diff --git a/src/deadline/client/ui/translations/locales/pt_BR.json b/src/deadline/client/ui/translations/locales/pt_BR.json index bbda297ad..44d9c6962 100644 --- a/src/deadline/client/ui/translations/locales/pt_BR.json +++ b/src/deadline/client/ui/translations/locales/pt_BR.json @@ -12,6 +12,7 @@ "Add...": "Adicionar...", "All": "Todos", "All fields below are optional": "Todos os campos abaixo são opcionais", + "Always check S3 job attachments": "Sempre verificar anexos de trabalho do S3", "Amount name": "Nome da quantidade", "Any": "Qualquer", "Apply": "Aplicar", diff --git a/src/deadline/client/ui/translations/locales/tr_TR.json b/src/deadline/client/ui/translations/locales/tr_TR.json index 7eda93457..29f325083 100644 --- a/src/deadline/client/ui/translations/locales/tr_TR.json +++ b/src/deadline/client/ui/translations/locales/tr_TR.json @@ -12,6 +12,7 @@ "Add...": "Ekle...", "All": "Tümü", "All fields below are optional": "Aşağıdaki tüm alanlar isteğe bağlıdır", + "Always check S3 job attachments": "S3 iş eklerini her zaman kontrol et", "Amount name": "Miktar adı", "Any": "Herhangi", "Apply": "Uygula", diff --git a/src/deadline/client/ui/translations/locales/zh_CN.json b/src/deadline/client/ui/translations/locales/zh_CN.json index 8b926638e..fabd2f52c 100644 --- a/src/deadline/client/ui/translations/locales/zh_CN.json +++ b/src/deadline/client/ui/translations/locales/zh_CN.json @@ -12,6 +12,7 @@ "Add...": "添加...", "All": "全部", "All fields below are optional": "以下所有字段均为可选", + "Always check S3 job attachments": "始终检查 S3 作业附件", "Amount name": "数量名称", "Any": "任意", "Apply": "应用", diff --git a/src/deadline/client/ui/translations/locales/zh_TW.json b/src/deadline/client/ui/translations/locales/zh_TW.json index e60a4def8..fd171c3fb 100644 --- a/src/deadline/client/ui/translations/locales/zh_TW.json +++ b/src/deadline/client/ui/translations/locales/zh_TW.json @@ -12,6 +12,7 @@ "Add...": "新增...", "All": "全部", "All fields below are optional": "以下所有欄位均為選用", + "Always check S3 job attachments": "一律檢查 S3 任務附件", "Amount name": "數量名稱", "Any": "任何", "Apply": "套用", diff --git a/src/deadline/job_attachments/upload.py b/src/deadline/job_attachments/upload.py index 52c4ac985..1183370a7 100644 --- a/src/deadline/job_attachments/upload.py +++ b/src/deadline/job_attachments/upload.py @@ -198,6 +198,7 @@ def upload_assets( manifest_metadata: dict[str, dict[str, str]] = dict(), manifest_file_name: Optional[str] = None, asset_root: Optional[Path] = None, + force_s3_check: Optional[bool] = None, ) -> tuple[str, str]: """ Uploads assets based off of an asset manifest, uploads the asset manifest. @@ -214,10 +215,12 @@ def upload_assets( manifest_metadata: File metadata for given manifest to be uploaded. manifest_file_name: Optional file name for given manifest to be uploaded, otherwise use default name. asset_root: The root in which asset actually in to facilitate path mapping. + force_s3_check: Controls S3 verification behavior: + - True: Skip the S3 check cache, always check whether uploads are already in S3. + - False/None: Use the S3 check cache, with periodic integrity sampling against S3 (default) Returns: - A tuple of (the partial key for the manifest on S3, the hash of input manifest). - """ + A tuple of (the partial key for the manifest on S3, the hash of input manifest).""" # Upload asset manifest (hash_alg, manifest_bytes, manifest_name) = S3AssetUploader._gather_upload_metadata( @@ -253,8 +256,10 @@ def upload_assets( extra_args=manifest_metadata, ) - # Verify S3 hash cache integrity, and reset cache if cached files are missing - if not self.verify_hash_cache_integrity( + # Verify S3 hash cache integrity, and reset cache if cached files are missing. + # Skip integrity check only when force_s3_check is True - we'll do S3 HEAD on every file anyway. + # When False or None, run the integrity check to catch stale cache entries. + if force_s3_check is not True and not self.verify_hash_cache_integrity( s3_check_cache_dir, manifest, job_attachment_settings.full_cas_prefix(), @@ -270,6 +275,7 @@ def upload_assets( s3_cas_prefix=job_attachment_settings.full_cas_prefix(), progress_tracker=progress_tracker, s3_check_cache_dir=s3_check_cache_dir, + force_s3_check=force_s3_check, ) return (partial_manifest_key, hash_data(manifest_bytes, hash_alg)) @@ -434,6 +440,7 @@ def upload_input_files( s3_cas_prefix: str, progress_tracker: Optional[ProgressTracker] = None, s3_check_cache_dir: Optional[str] = None, + force_s3_check: Optional[bool] = None, ) -> None: """ Uploads all of the files listed in the given manifest to S3 if they don't exist in the @@ -466,6 +473,7 @@ def upload_input_files( s3_cas_prefix, s3_cache, progress_tracker, + force_s3_check, ): file for file in small_file_queue } @@ -485,6 +493,7 @@ def upload_input_files( s3_cas_prefix, s3_cache, progress_tracker, + force_s3_check, ) if progress_tracker and not is_uploaded: progress_tracker.increase_skipped(1, file_size) @@ -644,23 +653,32 @@ def upload_object_to_cas( s3_cas_prefix: str, s3_check_cache: S3CheckCache, progress_tracker: Optional[ProgressTracker] = None, + force_s3_check: Optional[bool] = None, ) -> Tuple[bool, int]: """ Uploads an object to the S3 content-addressable storage (CAS) prefix. Optionally, does a head-object check and only uploads the file if it doesn't exist in S3 already. Returns a tuple (whether it has been uploaded, the file size). + + Args: + force_s3_check: Controls S3 verification behavior: + - True: Skip the S3 check cache, always check whether uploads are already in S3. + - False/None: Use the S3 check cache, with periodic integrity sampling against S3 (default) """ local_path = source_root.joinpath(file.path) s3_upload_key = self._generate_s3_upload_key(file, hash_algorithm, s3_cas_prefix) is_uploaded = False - if s3_check_cache.get_connection_entry( - s3_key=f"{s3_bucket}/{s3_upload_key}", connection=s3_check_cache.get_local_connection() - ): - logger.debug( - f"skipping {local_path} because {s3_bucket}/{s3_upload_key} exists in the cache" - ) - return (is_uploaded, file.size) + # Check cache first unless force_s3_check is True (skip cache entirely) + if force_s3_check is not True: + if s3_check_cache.get_connection_entry( + s3_key=f"{s3_bucket}/{s3_upload_key}", + connection=s3_check_cache.get_local_connection(), + ): + logger.debug( + f"skipping {local_path} because {s3_bucket}/{s3_upload_key} exists in the cache" + ) + return (is_uploaded, file.size) if self.file_already_uploaded(s3_bucket, s3_upload_key): logger.debug( @@ -1499,6 +1517,7 @@ def upload_assets( on_uploading_assets: Optional[Callable[[Any], bool]] = None, s3_check_cache_dir: Optional[str] = None, manifest_write_dir: Optional[str] = None, + force_s3_check: Optional[bool] = None, ) -> tuple[SummaryStatistics, Attachments]: """ Uploads all the files for provided manifests and manifests themselves to S3. @@ -1507,6 +1526,9 @@ def upload_assets( manifests: a list of manifests that contain assets to be uploaded on_uploading_assets: a callback to be called to periodically report progress to the caller. The callback returns True if the operation should continue as normal, or False to cancel. + force_s3_check: Controls S3 verification behavior: + - True: Skip the S3 check cache, always check whether uploads are already in S3. + - False/None: Use the S3 check cache, with periodic integrity sampling against S3 (default) Returns: a tuple with (1) the summary statistics of the upload operation, and @@ -1555,6 +1577,7 @@ def upload_assets( progress_tracker=progress_tracker, s3_check_cache_dir=s3_check_cache_dir, manifest_write_dir=manifest_write_dir, + force_s3_check=force_s3_check, ) manifest_properties.inputManifestPath = partial_manifest_key manifest_properties.inputManifestHash = asset_manifest_hash diff --git a/test/unit/deadline_client/api/test_job_bundle_submission.py b/test/unit/deadline_client/api/test_job_bundle_submission.py index b7781fb2f..384ff2407 100644 --- a/test/unit/deadline_client/api/test_job_bundle_submission.py +++ b/test/unit/deadline_client/api/test_job_bundle_submission.py @@ -1063,3 +1063,70 @@ def mock_continue_callback() -> bool: deadline_client=deadline_client, continue_callback=mock_continue_callback, ) + + +@pytest.mark.parametrize( + "force_s3_check_param, config_value, expected_value", + [ + pytest.param(True, "false", True, id="explicit-true-overrides-config-false"), + pytest.param(False, "true", False, id="explicit-false-overrides-config-true"), + pytest.param(None, "true", True, id="none-reads-config-true"), + pytest.param(None, "false", False, id="none-reads-config-false"), + ], +) +def test_create_job_from_job_bundle_force_s3_check( + fresh_deadline_config, + temp_job_bundle_dir, + force_s3_check_param, + config_value, + expected_value, +): + """ + Test that force_s3_check parameter is correctly resolved: + - Explicit True/False overrides config + - None falls back to config setting + """ + config.set_setting("defaults.farm_id", MOCK_FARM_ID) + config.set_setting("defaults.queue_id", MOCK_QUEUE_ID) + config.set_setting("settings.storage_profile_id", MOCK_STORAGE_PROFILE_ID) + config.set_setting("settings.force_s3_check", config_value) + + job_template_type, job_template = MOCK_JOB_TEMPLATE_CASES["MINIMAL_JSON"] + + with patch_calls_for_create_job_from_job_bundle() as mock: + mock.get_boto3_client().get_storage_profile_for_queue.return_value = ( + MOCK_GET_STORAGE_PROFILE_FOR_QUEUE_RESPONSE + ) + + # Write the template to the job bundle + with open( + os.path.join(temp_job_bundle_dir, f"template.{job_template_type.lower()}"), + "w", + encoding="utf8", + ) as f: + f.write(job_template) + + # Build kwargs, only include force_s3_check if not None + kwargs: Dict[str, Any] = { + "job_bundle_dir": temp_job_bundle_dir, + "queue_parameter_definitions": [], + } + if force_s3_check_param is not None: + kwargs["force_s3_check"] = force_s3_check_param + + # Call the function under test + response = api.create_job_from_job_bundle(**kwargs) + + # Verify the job was created + assert response == MOCK_JOB_ID + + # Verify create_job_from_job_bundle was called with the expected force_s3_check value + mock.create_job_from_job_bundle.assert_called_once() + _, call_kwargs = mock.create_job_from_job_bundle.call_args + # The wrapped function receives the resolved value, but we need to check + # what was passed to _upload_attachments. Since there are no attachments + # in this test, we verify the parameter was passed correctly to the function. + if force_s3_check_param is not None: + assert call_kwargs.get("force_s3_check") == force_s3_check_param + else: + assert call_kwargs.get("force_s3_check") is None diff --git a/test/unit/deadline_client/cli/test_cli_config.py b/test/unit/deadline_client/cli/test_cli_config.py index 5e511ca4c..df9bcb70c 100644 --- a/test/unit/deadline_client/cli/test_cli_config.py +++ b/test/unit/deadline_client/cli/test_cli_config.py @@ -35,7 +35,7 @@ def test_cli_config_show_defaults(fresh_deadline_config): assert fresh_deadline_config in result.output # Assert the expected number of settings - assert len(settings.keys()) == 17 + assert len(settings.keys()) == 18 for setting_name in settings.keys(): assert setting_name in result.output @@ -103,6 +103,7 @@ def test_cli_config_show_modified_config(fresh_deadline_config): config.set_setting("settings.small_file_threshold_multiplier", "15") config.set_setting("settings.known_asset_paths", "/known/asset/path") config.set_setting("settings.locale", "ja_JP") + config.set_setting("settings.force_s3_check", "true") runner = CliRunner() result = runner.invoke(main, ["config", "show"]) @@ -114,6 +115,9 @@ def test_cli_config_show_modified_config(fresh_deadline_config): assert "~/alternate/job_history" in result.output assert result.output.count("False") == 1 assert result.output.count("True") == 1 + # "true" appears three times: once as the value for force_s3_check, + # once in the telemetry.opt_out description, and once in the force_s3_check description + assert result.output.count("true") == 3 assert "farm-82934h23k4j23kjh" in result.output assert "queue-389348u234jhk34" in result.output assert "job-239u40234jkl234nkl23" in result.output diff --git a/test/unit/deadline_client/config/test_config_file.py b/test/unit/deadline_client/config/test_config_file.py index ab670a7ac..5f3f7f14d 100644 --- a/test/unit/deadline_client/config/test_config_file.py +++ b/test/unit/deadline_client/config/test_config_file.py @@ -26,6 +26,7 @@ ("defaults.farm_id", "", "farm-82934h23k4j23kjh"), ("defaults.job_attachments_file_system", "COPIED", "VIRTUAL"), ("settings.locale", "", "ja_JP"), + ("settings.force_s3_check", "false", "true"), ] diff --git a/test/unit/deadline_job_attachments/test_upload.py b/test/unit/deadline_job_attachments/test_upload.py index cbcc65df3..d1148a5e6 100644 --- a/test/unit/deadline_job_attachments/test_upload.py +++ b/test/unit/deadline_job_attachments/test_upload.py @@ -2651,6 +2651,91 @@ def test_upload_object_to_cas_skips_upload_with_cache( assert file_size == 5 s3_cache.put_entry.assert_not_called() + @mock_aws + @pytest.mark.parametrize( + "manifest_version", + [ + ManifestVersion.v2023_03_03, + ], + ) + @pytest.mark.parametrize( + "file_exists_in_s3, expected_upload", + [ + pytest.param(True, False, id="file-exists-skip-upload"), + pytest.param(False, True, id="file-missing-do-upload"), + ], + ) + def test_upload_object_to_cas_force_s3_check_bypasses_cache( + self, + tmpdir, + farm_id, + queue_id, + manifest_version, + default_job_attachment_s3_settings, + file_exists_in_s3, + expected_upload, + ): + """ + Tests that when force_s3_check=True, the S3CheckCache is bypassed and S3 HEAD is always performed. + Verifies that upload happens only when file doesn't exist in S3. + """ + # Given + asset_root = tmpdir.mkdir("test-root") + test_file = asset_root.join("test-file.txt") + test_file.write("stuff") + asset_manager = S3AssetManager( + farm_id=farm_id, + queue_id=queue_id, + job_attachment_settings=self.job_attachment_s3_settings, + asset_manifest_version=manifest_version, + ) + s3_key = f"{default_job_attachment_s3_settings.s3BucketName}/prefix/test-hash.xxh128" + test_entry = S3CheckCacheEntry(s3_key, "123.45") + s3_cache = MagicMock() + # Cache has an entry, but it should be bypassed + s3_cache.get_connection_entry.return_value = test_entry + + # When + with patch.object( + asset_manager.asset_uploader, + "_get_current_timestamp", + side_effect=["345.67"], + ), patch.object( + asset_manager.asset_uploader, + "file_already_uploaded", + return_value=file_exists_in_s3, + ) as mock_file_already_uploaded, patch.object( + asset_manager.asset_uploader, + "upload_file_to_s3", + ) as mock_upload_file_to_s3: + (is_uploaded, file_size) = asset_manager.asset_uploader.upload_object_to_cas( + file=BaseManifestPath(path="test-file.txt", hash="test-hash", size=5, mtime=1), + hash_algorithm=HashAlgorithm.XXH128, + s3_bucket=default_job_attachment_s3_settings.s3BucketName, + source_root=Path(asset_root), + s3_cas_prefix="prefix", + s3_check_cache=s3_cache, + force_s3_check=True, + ) + + # Then + assert is_uploaded == expected_upload + assert file_size == 5 + # Cache lookup should NOT have been called (bypassed due to force_s3_check) + s3_cache.get_connection_entry.assert_not_called() + # S3 HEAD should always be called when force_s3_check=True + mock_file_already_uploaded.assert_called_once_with( + default_job_attachment_s3_settings.s3BucketName, "prefix/test-hash.xxh128" + ) + # Upload should only happen if file doesn't exist in S3 + if expected_upload: + mock_upload_file_to_s3.assert_called_once() + else: + mock_upload_file_to_s3.assert_not_called() + # Cache should always be updated after HEAD/upload + expected_new_entry = S3CheckCacheEntry(s3_key, "345.67") + s3_cache.put_entry.assert_called_once_with(expected_new_entry) + def test_open_non_symlink_file_binary(self, tmp_path: Path): temp_file = tmp_path / "temp_file.txt" temp_file.write_text("this is test file") From c3bcb51e46a5408d010ba227aa154a7270a6dd27 Mon Sep 17 00:00:00 2001 From: Louise Fox <208544511+folouiseAWS@users.noreply.github.com> Date: Tue, 16 Dec 2025 16:28:58 -0800 Subject: [PATCH 10/10] fix: storage profile visibility in Submit Dialog The storage profile selector was not showing in the Submit Dialog even when profiles were available. Two issues were fixed: 1. _update_storage_profile_visibility() only checked itemData(0), but sorts alphabetically first and has empty string data, so it always returned False. Now iterates through all items to find any with real profile data. 2. Model signals (rowsInserted, etc.) weren't firing because the base class uses block_signals when populating the combo box. Added a connection to the _list_update signal which fires after async refresh. Signed-off-by: Louise Fox <208544511+folouiseAWS@users.noreply.github.com> --- .../ui/widgets/shared_job_settings_tab.py | 26 ++++++++++------- .../widgets/test_shared_job_settings_tab.py | 29 +++++++++++++++++++ 2 files changed, 44 insertions(+), 11 deletions(-) diff --git a/src/deadline/client/ui/widgets/shared_job_settings_tab.py b/src/deadline/client/ui/widgets/shared_job_settings_tab.py index b4a9a5e41..a77824de3 100644 --- a/src/deadline/client/ui/widgets/shared_job_settings_tab.py +++ b/src/deadline/client/ui/widgets/shared_job_settings_tab.py @@ -528,6 +528,10 @@ def _build_ui(self): self.storage_profile_box.box.model().modelReset.connect( self._update_storage_profile_visibility ) + # Also connect to the _list_update signal to handle updates after async refresh + self.storage_profile_box._list_update.connect( + lambda *args: self._update_storage_profile_visibility() + ) # Initialize with current config config = config_file.read_config() @@ -543,18 +547,18 @@ def _set_storage_profile_visible(self, visible: bool): def _update_storage_profile_visibility(self): """Update storage profile visibility based on available profiles""" # Check if there are actual storage profiles (not just placeholder items) + # Note: "" sorts first alphabetically and has empty string as data, + # so we need to check if there are items with non-empty data count = self.storage_profile_box.box.count() - # Hide if empty or only has "" or "" placeholder - has_real_profiles = count > 0 and self.storage_profile_box.box.itemData(0) not in ( - None, - "", - ) - # Also check if it's just a refreshing placeholder - if count == 1 and self.storage_profile_box.box.itemText(0) in ( - "", - "", - ): - has_real_profiles = False + has_real_profiles = False + for i in range(count): + item_data = self.storage_profile_box.box.itemData(i) + item_text = self.storage_profile_box.box.itemText(i) + # Skip placeholder items + if item_text in ("", "") or item_data in (None, ""): + continue + has_real_profiles = True + break self._set_storage_profile_visible(has_real_profiles) def _on_farm_changed(self, index: int): diff --git a/test/unit/deadline_client/ui/widgets/test_shared_job_settings_tab.py b/test/unit/deadline_client/ui/widgets/test_shared_job_settings_tab.py index 911f31173..e9ad7f75b 100644 --- a/test/unit/deadline_client/ui/widgets/test_shared_job_settings_tab.py +++ b/test/unit/deadline_client/ui/widgets/test_shared_job_settings_tab.py @@ -235,6 +235,35 @@ def test_storage_profile_shown_when_profiles_available( assert widget.storage_profile_box_label.isVisible() +def test_storage_profile_shown_when_none_selected_sorts_first( + deadline_cloud_settings_widget: DeadlineCloudSettingsWidget, +): + """Test that storage profile is visible even when sorts to first position. + + This tests the _list_update signal path which is used during async refresh. + The _list_update signal handler populates the combo box with block_signals, + so model signals don't fire - we need to explicitly trigger visibility update. + """ + widget = deadline_cloud_settings_widget + + # Simulate the async refresh completing with sorted profiles where + # "" sorts first because '<' comes before letters alphabetically + items_list = [ + ("", ""), + ("Profile A", "sp-profile-a"), + ("Profile B", "sp-profile-b"), + ] + + # Emit the _list_update signal to simulate async refresh completing + # This is the code path that was broken - the combo box is populated + # inside block_signals so model signals don't fire + widget.storage_profile_box._list_update.emit(1, items_list) + + # Storage profile should be visible because there are real profiles + assert widget.storage_profile_box.isVisible() + assert widget.storage_profile_box_label.isVisible() + + def test_storage_profile_selection_updates_config( deadline_cloud_settings_widget: DeadlineCloudSettingsWidget, ):