|
| 1 | +# Copyright The PyTorch Lightning team. |
| 2 | +# |
| 3 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +# you may not use this file except in compliance with the License. |
| 5 | +# You may obtain a copy of the License at |
| 6 | +# |
| 7 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +# |
| 9 | +# Unless required by applicable law or agreed to in writing, software |
| 10 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +# See the License for the specific language governing permissions and |
| 13 | +# limitations under the License. |
| 14 | +import torch |
| 15 | + |
| 16 | +from pytorch_lightning.plugins.training_type.single_device import SingleDevicePlugin |
| 17 | +from pytorch_lightning.utilities import _HPU_AVAILABLE |
| 18 | +from pytorch_lightning.utilities.apply_func import move_data_to_device |
| 19 | + |
| 20 | + |
| 21 | +class SingleHPUPlugin(SingleDevicePlugin): |
| 22 | + |
| 23 | + def __init__(self, device: int): |
| 24 | + |
| 25 | + device = torch.device("hpu") |
| 26 | + super().__init__(device) |
| 27 | + |
| 28 | + self.hpu_local_core_rank = 0 |
| 29 | + self.hpu_global_core_rank = 0 |
| 30 | + |
| 31 | + @property |
| 32 | + def on_hpu(self) -> bool: |
| 33 | + return True |
| 34 | + |
| 35 | + def connect(self, model: torch.nn.Module) -> torch.nn.Module: |
| 36 | + self._model = model |
| 37 | + self.model_to_device() |
| 38 | + return self._model |
| 39 | + |
| 40 | + @property |
| 41 | + def is_distributed(self) -> bool: |
| 42 | + return False |
| 43 | + |
| 44 | + def model_to_device(self) -> None: |
| 45 | + self._model.to(self.root_device) |
| 46 | + |
| 47 | + def pre_dispatch(self) -> None: |
| 48 | + if isinstance(self.device, int): |
| 49 | + self.device = torch.device(self.device) |
| 50 | + |
| 51 | + self.hpu_local_core_rank = 0 |
| 52 | + self.hpu_global_core_rank = 0 |
| 53 | + |
| 54 | + def on_save(self, checkpoint: dict) -> dict: |
| 55 | + """ |
| 56 | + Need to check how this part works |
| 57 | + Move XLA tensors to CPU before saving |
| 58 | + Recommended on XLA Guide: |
| 59 | + https://github.com/pytorch/xla/blob/master/API_GUIDE.md#saving-and-loading-xla-tensors |
| 60 | + """ |
| 61 | + return move_data_to_device(checkpoint, torch.device("cpu")) |
0 commit comments