diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml new file mode 100644 index 000000000..e21712e4b --- /dev/null +++ b/.github/workflows/codespell.yml @@ -0,0 +1,25 @@ +# Codespell configuration is within pyproject.toml +--- +name: Codespell + +on: + push: + branches: [master] + pull_request: + branches: [master] + +permissions: + contents: read + +jobs: + codespell: + name: Check for spelling errors + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Annotate locations with typos + uses: codespell-project/codespell-problem-matcher@v1 + - name: Codespell + uses: codespell-project/actions-codespell@v2 diff --git a/bazelisk.py b/bazelisk.py index 293f6a0bd..d1fe55da5 100755 --- a/bazelisk.py +++ b/bazelisk.py @@ -177,7 +177,7 @@ def get_version_history(bazelisk_directory): if not release["prerelease"] ), # This only handles versions with numeric components, but that is fine - # since prerelease verisons have been excluded. + # since prerelease versions have been excluded. key=lambda version: tuple(int(component) for component in version.split('.')), reverse=True, diff --git a/docs/conf.py b/docs/conf.py index 6c483be79..78315c478 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -363,7 +363,7 @@ def _python_type_to_xref( 'desc': 'C++ class', }, 'absl::StatusOr': { - 'url': 'https://abseil.io/docs/cpp/guides/statuss#returning-a-status-or-a-value', + 'url': 'https://abseil.io/docs/cpp/guides/status#returning-a-status-or-a-value', 'object_type': 'class', 'desc': 'C++ class', }, diff --git a/docs/doctest_test.py b/docs/doctest_test.py index 60f0b47b0..3c2cd3169 100644 --- a/docs/doctest_test.py +++ b/docs/doctest_test.py @@ -16,7 +16,7 @@ This allows doctest examples to be conveniently updated in case of changes to the output format. -After commiting or staging changes, you can run this with the `--in-place` +After committing or staging changes, you can run this with the `--in-place` option and then inspect the diff. This supports top-level `await` in tests, since that provides a convenient way @@ -327,7 +327,7 @@ def _ast_asyncify(code: str, wrapper_name: str) -> ast.Module: This is derived from a similar workaround in IPython. Args: - code: Python source code to pase. + code: Python source code to pass. wrapper_name: Name to use for function. Returns: diff --git a/docs/environment.rst b/docs/environment.rst index 731f5a832..a531529be 100644 --- a/docs/environment.rst +++ b/docs/environment.rst @@ -96,7 +96,7 @@ Debugging Enables debug logging for tensorstore internal subsystems. Set to comma separated list of values, where each value is one of ``name=int`` or just ``name``. When ``all`` is, present, then verbose logging will be enabled for - each subsytem, otherwise logging is set only for those subsystems present in + each subsystem, otherwise logging is set only for those subsystems present in the list. Verbose flag values include: ``curl``, ``distributed``, ``file``, diff --git a/docs/installation.rst b/docs/installation.rst index 7247e1400..18eb6f47a 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -264,7 +264,7 @@ The following CMake generators are supported: - Ninja and Ninja Multi-Config - Makefile generators - Visual Studio generators -- Xcode (targetting arm64 only) +- Xcode (targeting arm64 only) The Ninja generator is recommended because it provides the fastest builds. diff --git a/docs/python/indexing.rst b/docs/python/indexing.rst index 59b703693..9f3147465 100644 --- a/docs/python/indexing.rst +++ b/docs/python/indexing.rst @@ -423,7 +423,7 @@ Ellipsis Specifying the special `Ellipsis` value (:python:`...`) is equivalent to specifying as many full slices :python:`:` as needed to consume the -remaining dimensions of the original domin not consumed by other +remaining dimensions of the original domain not consumed by other indexing terms: .. doctest:: diff --git a/docs/schema_schema.yml b/docs/schema_schema.yml index f8df171fa..b2346213f 100644 --- a/docs/schema_schema.yml +++ b/docs/schema_schema.yml @@ -80,7 +80,7 @@ definitions: - A numerical :literal:`multiplier`, represented as a `double-precision floating-point number `_. - A multiplier of ``1`` may be used to indicate a quanity equal to a + A multiplier of ``1`` may be used to indicate a quantity equal to a single base unit. - A :literal:`base_unit`, represented as a string. An empty string may be used diff --git a/examples/image_convolution.cc b/examples/image_convolution.cc index 28f7210ca..1da8c0b70 100644 --- a/examples/image_convolution.cc +++ b/examples/image_convolution.cc @@ -34,7 +34,7 @@ tensorstore::SharedArray ApplyKernel( const tensorstore::ArrayView in, const tensorstore::ArrayView kernel) { // Compute bounds for the offset. - // FIXME: It's akward that we cannot do this: + // FIXME: It's awkward that we cannot do this: // std::array k(kernel.shape()); // std::array k; @@ -80,7 +80,7 @@ tensorstore::SharedArray ApplyKernel( } }); - // Again, the most intutive way to write an array value + // Again, the most intuitive way to write an array value // is not permitted: // // dest[indices] = (sum / count); // error: no viable overloaded '=' @@ -154,7 +154,7 @@ void AffineWarpGrid(size_t xmax, size_t ymax, } // AffineWarpInverseGrid computes the inverse mapping from AffineWarpGrid, -// so it can be used to map from a destination image to a souce image. +// so it can be used to map from a destination image to a source image. void AffineWarpInverseGrid(size_t xmax, size_t ymax, tensorstore::span M, AffineWarpGridFunction fn) { @@ -237,7 +237,7 @@ void PrintCSVArray(tensorstore::ArrayView data) { // reference for every element. // // There is a streaming operator already, but the output is - // this is equvalent to: + // this is equivalent to: // for (int x = 0; x < data.shape()[0]; x++) // for (int y = 0; y < data.shape()[1]; y++) { // ... body ... diff --git a/pyproject.toml b/pyproject.toml index eb3d6c676..0e6d7354b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,3 +39,11 @@ version_scheme = "no-guess-dev" # Test PyPI does not support local versions. local_scheme = "no-local-version" fallback_version = "0.0.0" + +[tool.codespell] +# Ref: https://github.com/codespell-project/codespell#using-a-config-file +skip = '.git*,third_party' +check-hidden = true +# Do not bother with mixed case words -- variables, and test lines +ignore-regex = '\b[a-zA-Z]+[A-Z][a-z]*\b|\bEXPECT_.*' +ignore-words-list = 'ehr,ans' diff --git a/python/tensorstore/array_type_caster.cc b/python/tensorstore/array_type_caster.cc index 156db288e..19aee0766 100644 --- a/python/tensorstore/array_type_caster.cc +++ b/python/tensorstore/array_type_caster.cc @@ -277,7 +277,7 @@ pybind11::object GetNumpyArrayImpl(SharedArrayView value, auto obj = py::reinterpret_steal(PyArray_NewFromDescr( /*subtype=*/&PyArray_Type, /*descr=*/reinterpret_cast(py_dtype.release().ptr()), - /*nd=*/static_cast(value.rank()), + /*nd=*/static_cast(value.rank()), // codespell:ignore nd /*dims=*/shape, /*strides=*/strides, /*data=*/const_cast(value.data()), flags, nullptr)); diff --git a/python/tensorstore/keyword_arguments.h b/python/tensorstore/keyword_arguments.h index 5c919cd8c..10cd3e89b 100644 --- a/python/tensorstore/keyword_arguments.h +++ b/python/tensorstore/keyword_arguments.h @@ -23,7 +23,7 @@ /// This mechanism allows individual keyword arguments to be defined as /// `ParamDef` types, that specifies the name, documentation, argument type, and /// operation to perform to "apply" the argument. These `ParamDef` types can -/// then be re-used by multiple pybind11 functions while avoiding duplication. +/// then be reused by multiple pybind11 functions while avoiding duplication. /// /// Each `ParamDef` type should be a struct with the following members, and no /// non-static data members (i.e. it should be empty): diff --git a/python/tensorstore/keyword_arguments_test.cc b/python/tensorstore/keyword_arguments_test.cc index 31d1f4437..12fad92d9 100644 --- a/python/tensorstore/keyword_arguments_test.cc +++ b/python/tensorstore/keyword_arguments_test.cc @@ -88,7 +88,7 @@ Does something or other with keyword arguments. // Args: // required_arg: This is required // - // a: Specifies a. This documentaiton string is allowed + // a: Specifies a. This documentation string is allowed // to be more than one line. // b: Specifies b. // diff --git a/python/tensorstore/tests/exit_test.py b/python/tensorstore/tests/exit_test.py index 32d7efb85..da4157a5b 100644 --- a/python/tensorstore/tests/exit_test.py +++ b/python/tensorstore/tests/exit_test.py @@ -37,7 +37,7 @@ def run_during_finalization(): async def test_read(): t = ts.array([1, 2, 3], dtype=ts.int64) await asyncio.wait_for(t.read(), timeout=1) - # Normally, await won't suceed. However, await may still succeed, if it + # Normally, await won't succeed. However, await may still succeed, if it # happens that the read completed before the call to `await`. os._exit(0) diff --git a/python/tensorstore/unit.cc b/python/tensorstore/unit.cc index df6d20353..2a8fbc976 100644 --- a/python/tensorstore/unit.cc +++ b/python/tensorstore/unit.cc @@ -54,7 +54,7 @@ The quantity is specified as the combination of: - A numerical :py:obj:`.multiplier`, represented as a `double-precision floating-point number `_. - A multiplier of :python:`1` may be used to indicate a quanity equal to a + A multiplier of :python:`1` may be used to indicate a quantity equal to a single base unit. - A :py:obj:`.base_unit`, represented as a string. An empty string may be used diff --git a/setup.py b/setup.py index 6be44836b..1e8a2db73 100644 --- a/setup.py +++ b/setup.py @@ -160,7 +160,7 @@ def _get_action_env(): Unfortunately there is no kosher way to detect the PEP517 build environment, so this is a heuristic approach based on inspection of the PATH variable passed to the build and review of the PIP sources. The source, as reviewed, - creates a temporary directory inclusing pip-{kind}, where kind=build-env. + creates a temporary directory including pip-{kind}, where kind=build-env. See: dist-packages/pip/_internal/utils/temp_dir.py Also: https://github.com/bazelbuild/bazel/issues/18809 @@ -178,7 +178,7 @@ def _get_action_env(): if not build_env: return [] - # There may be mutliple path entries added under the build-env directory, + # There may be multiple path entries added under the build-env directory, # so remove them all. while 'pip-build-env' in os.path.dirname(build_env): build_env = os.path.dirname(build_env) @@ -216,7 +216,7 @@ def run(self): # from the PATH as bazel is already hermetic to improve cache use. action_env = _get_action_env() - # Ensure python_configure.bzl finds the correct Python verison. + # Ensure python_configure.bzl finds the correct Python version. os.environ['PYTHON_BIN_PATH'] = sys.executable # Ensure it is built against the version of `numpy` in the current diff --git a/tensorstore/context.cc b/tensorstore/context.cc index 51cb4ce8d..624b2ad73 100644 --- a/tensorstore/context.cc +++ b/tensorstore/context.cc @@ -392,7 +392,7 @@ class ResourceReference : public ResourceSpecImplBase { ResourceSpecImplPtr UnbindContext( const internal::ContextSpecBuilder& spec_builder) final { auto& builder_impl = *internal_context::Access::impl(spec_builder); - // Ensure the referent is not re-used as an identifier for another resource. + // Ensure the referent is not reused as an identifier for another resource. ++builder_impl.ids_[referent_]; return ResourceSpecImplPtr(this); } diff --git a/tensorstore/contiguous_layout.h b/tensorstore/contiguous_layout.h index 880837b62..6ec1152b0 100644 --- a/tensorstore/contiguous_layout.h +++ b/tensorstore/contiguous_layout.h @@ -148,7 +148,7 @@ bool PermutationMatchesOrder(span permutation, void InvertPermutation(DimensionIndex rank, const DimensionIndex* perm, DimensionIndex* inverse_perm); -/// Normalizes `source` to a permutation if it is not already a permuation. +/// Normalizes `source` to a permutation if it is not already a permutation. /// /// \relates ContiguousLayoutPermutation template diff --git a/tensorstore/driver/downsample/downsample_util.h b/tensorstore/driver/downsample/downsample_util.h index ad59a3c2e..697c55b92 100644 --- a/tensorstore/driver/downsample/downsample_util.h +++ b/tensorstore/driver/downsample/downsample_util.h @@ -61,8 +61,8 @@ struct PropagatedIndexTransformDownsampling { /// /// is equivalent to: /// -/// transforming `b` by `propgated.transform` and then downsampling by -/// `propgated.input_downsample_factors` (and possibly "squeezing" some +/// transforming `b` by `propagated.transform` and then downsampling by +/// `propagated.input_downsample_factors` (and possibly "squeezing" some /// singleton dimensions that were added). /// /// Note that this function assumes downsampling is performed using a method diff --git a/tensorstore/driver/kvs_backed_chunk_driver.h b/tensorstore/driver/kvs_backed_chunk_driver.h index 9a1a124b0..c8f2bbe74 100644 --- a/tensorstore/driver/kvs_backed_chunk_driver.h +++ b/tensorstore/driver/kvs_backed_chunk_driver.h @@ -853,7 +853,7 @@ class OpenState : public MetadataOpenState { /// is compatible with the open request by calling /// `open_state->GetComponentIndex`. /// -/// - If it is, either re-uses an existing `DataCache` with a cache key that +/// - If it is, either reuses an existing `DataCache` with a cache key that /// matches `open_state->GetDataCacheKey`, or obtain a new `DataCache` from /// `open_state->GetDataCache`. /// diff --git a/tensorstore/driver/n5/schema.yml b/tensorstore/driver/n5/schema.yml index f1740c7b1..67be4bd7c 100644 --- a/tensorstore/driver/n5/schema.yml +++ b/tensorstore/driver/n5/schema.yml @@ -9,7 +9,7 @@ allOf: metadata: title: N5 array metadata. description: | - Specifies constraints on the metdata of a dataset exactly as in the + Specifies constraints on the metadata of a dataset exactly as in the `attributes.json file `_, except that all members are optional. When creating a new array, the diff --git a/tensorstore/driver/neuroglancer_precomputed/metadata.cc b/tensorstore/driver/neuroglancer_precomputed/metadata.cc index 97aa1c2da..a48ad120b 100644 --- a/tensorstore/driver/neuroglancer_precomputed/metadata.cc +++ b/tensorstore/driver/neuroglancer_precomputed/metadata.cc @@ -1481,7 +1481,7 @@ bool GetShardChunkHierarchy(const ShardingSpec& sharding_spec, ShardChunkHierarchy& hierarchy) { if (sharding_spec.hash_function != ShardingSpec::HashFunction::identity) { // For non-identity hash functions, the number of chunks per shard is not - // predicable and the shard doesn't correspond to a rectangular region + // predictable and the shard doesn't correspond to a rectangular region // anyway. return false; } diff --git a/tensorstore/index_interval_test.cc b/tensorstore/index_interval_test.cc index e4a05c2da..4c07aa98d 100644 --- a/tensorstore/index_interval_test.cc +++ b/tensorstore/index_interval_test.cc @@ -387,7 +387,7 @@ TEST(IndexIntervalTest, IntersectPreferringExplicit) { OIII{IndexInterval::UncheckedClosed(0, 10), false, false}), ::testing::Eq(OIII{IndexInterval::UncheckedClosed(0, 10), false, false})); - // These may surprise you! explicit takes prededence over implicit! + // These may surprise you! explicit takes precedence over implicit! EXPECT_THAT( IntersectPreferringExplicit( OIII{IndexInterval::UncheckedClosed(-kInfIndex, kMaxFiniteIndex), diff --git a/tensorstore/index_space/internal/numpy_indexing_spec.cc b/tensorstore/index_space/internal/numpy_indexing_spec.cc index 6a1167c6d..1e8b15d07 100644 --- a/tensorstore/index_space/internal/numpy_indexing_spec.cc +++ b/tensorstore/index_space/internal/numpy_indexing_spec.cc @@ -281,7 +281,7 @@ void GetIndexedInputDims(const NumpyIndexingSpec& spec, } input_dims_per_intermediate_dim[intermediate_rank] = input_dim; - // Compute `indexed_input_dims` by reodering `input_dims_per_intermediate_dim` + // Compute `indexed_input_dims` by reordering `input_dims_per_intermediate_dim` // by `selected_dims`. for (const DimensionIndex intermediate_dim : selected_dims) { for (DimensionIndex diff --git a/tensorstore/index_space/internal/propagate_bounds.cc b/tensorstore/index_space/internal/propagate_bounds.cc index 34ae4e38a..3bebf33b2 100644 --- a/tensorstore/index_space/internal/propagate_bounds.cc +++ b/tensorstore/index_space/internal/propagate_bounds.cc @@ -92,7 +92,7 @@ absl::Status PropagateBoundsImpl(BoxView<> b, for (DimensionIndex b_dim = 0; b_dim < b.rank(); ++b_dim) { auto& map = maps[b_dim]; const Index output_stride = map.stride(); - // We dont't validate or propagate bounds for array-based output index maps. + // We don't validate or propagate bounds for array-based output index maps. if (map.method() == OutputIndexMethod::array) continue; OptionallyImplicitIndexInterval b_bounds_oi{b[b_dim], b_implicit_lower_bounds[b_dim], diff --git a/tensorstore/index_space/internal/transform_rep.h b/tensorstore/index_space/internal/transform_rep.h index 7c742fbad..888a65e2b 100644 --- a/tensorstore/index_space/internal/transform_rep.h +++ b/tensorstore/index_space/internal/transform_rep.h @@ -368,8 +368,8 @@ inline void NormalizeImplicitBounds(TransformRep& rep) { // Check that OutputIndexMap and std::string don't have a greater alignment // value than Index, as that would require more complicated logic for accessing // the variable length fields than is currently implemented. In practice these -// constraints should always be satisified. If this code needs to work on a -// platform that doesn't satisfy these contraints, the more complicated logic +// constraints should always be satisfied. If this code needs to work on a +// platform that doesn't satisfy these constraints, the more complicated logic // could be implemented. static_assert(alignof(OutputIndexMap) <= sizeof(Index), "Platform has unsupported alignment."); diff --git a/tensorstore/internal/cache/async_cache.h b/tensorstore/internal/cache/async_cache.h index 1e1bb2f14..368a7a790 100644 --- a/tensorstore/internal/cache/async_cache.h +++ b/tensorstore/internal/cache/async_cache.h @@ -378,7 +378,7 @@ class AsyncCache : public Cache { /// \param transaction[in,out] Transaction associated with the entry. If /// non-null, must specify an explicit transaction, and an associated /// transaction node will be created if one does not already exist. In - /// this case, the `tranaction` pointer itself will not be modified. An + /// this case, the `transaction` pointer itself will not be modified. An /// implicit transaction node associated with a new implicit transaction /// is requested by specifying `transaction` initially equally to /// `nullptr`. Upon return, `transaction` will hold an open transaction diff --git a/tensorstore/internal/cache/cache_test.cc b/tensorstore/internal/cache/cache_test.cc index 2969375f5..bba503a4c 100644 --- a/tensorstore/internal/cache/cache_test.cc +++ b/tensorstore/internal/cache/cache_test.cc @@ -70,7 +70,7 @@ class TestCache : public Cache { std::deque entry_allocate_log; // Log of calls to DoDeleteEntry. Contains the cache key and entry key. std::deque> entry_destroy_log; - // Log of calls to GetTestCache (defined below). ontains the cache key. + // Log of calls to GetTestCache (defined below). contains the cache key. std::deque cache_allocate_log; // Log of calls to TestCache destructor. Contains the cache key. std::deque cache_destroy_log; diff --git a/tensorstore/internal/container/compressed_tuple_test.cc b/tensorstore/internal/container/compressed_tuple_test.cc index ec016b81c..92c4c7da7 100644 --- a/tensorstore/internal/container/compressed_tuple_test.cc +++ b/tensorstore/internal/container/compressed_tuple_test.cc @@ -308,7 +308,7 @@ TEST(CompressedTupleTest, Nested) { std::set*> empties{&y.get<0>(), &y.get<1>(), &y.get<2>().get<0>(), &y.get<2>().get<1>().get<0>()}; #ifdef _MSC_VER - // MSVC has a bug where many instances of the same base class are layed out in + // MSVC has a bug where many instances of the same base class are laid out in // the same address when using __declspec(empty_bases). // This will be fixed in a future version of MSVC. int expected = 1; diff --git a/tensorstore/internal/curl/curl_factory.h b/tensorstore/internal/curl/curl_factory.h index 0818a51fa..f75a3f955 100644 --- a/tensorstore/internal/curl/curl_factory.h +++ b/tensorstore/internal/curl/curl_factory.h @@ -23,7 +23,7 @@ namespace internal_http { /// CurlHandleFactory creates and cleans up CURL* (CurlPtr) handles /// and CURLM* (CurlMulti) handles. /// -/// NOTE: These methods are virtual so that a curl factory can re-use +/// NOTE: These methods are virtual so that a curl factory can reuse /// curl handles. class CurlHandleFactory { public: diff --git a/tensorstore/internal/file_io_concurrency_resource.cc b/tensorstore/internal/file_io_concurrency_resource.cc index 8923fa950..3a7dc1252 100644 --- a/tensorstore/internal/file_io_concurrency_resource.cc +++ b/tensorstore/internal/file_io_concurrency_resource.cc @@ -27,7 +27,7 @@ namespace { struct FileIoConcurrencyResourceTraits : public ConcurrencyResourceTraits, public ContextResourceTraits { - // TODO(jbms): use beter method of picking concurrency limit + // TODO(jbms): use better method of picking concurrency limit FileIoConcurrencyResourceTraits() : ConcurrencyResourceTraits( std::max(size_t(4), size_t(std::thread::hardware_concurrency()))) {} diff --git a/tensorstore/internal/image/image_reader_test.cc b/tensorstore/internal/image/image_reader_test.cc index 238ae2db4..e281342e1 100644 --- a/tensorstore/internal/image/image_reader_test.cc +++ b/tensorstore/internal/image/image_reader_test.cc @@ -207,7 +207,7 @@ TEST_P(ReaderTest, ReadImageTruncated) { } // Most images generated via tiffcp . -// Query image paramters using tiffinfo +// Query image parameters using tiffinfo std ::vector GetD75_08_Values() { return { // upper-left corner: hw=0,0 => 151,75,83 diff --git a/tensorstore/internal/image/image_writer_test.cc b/tensorstore/internal/image/image_writer_test.cc index b77b337ce..c18fea248 100644 --- a/tensorstore/internal/image/image_writer_test.cc +++ b/tensorstore/internal/image/image_writer_test.cc @@ -234,7 +234,7 @@ TEST_P(WriterTest, RoundTrip) { double rmse = ComputeRMSE(decoded.data(), source.data(), source.size()); - /// When RMSE is not 0, verify that the actual value is witin 5%. + /// When RMSE is not 0, verify that the actual value is within 5%. if (GetParam().rmse_error_limit == 0) { EXPECT_EQ(0, rmse) << "\nA: " << source_info << " " << "\nB: " << decoded_info; diff --git a/tensorstore/internal/json_binding/json_binding.h b/tensorstore/internal/json_binding/json_binding.h index e3ef0fb5d..84c512344 100644 --- a/tensorstore/internal/json_binding/json_binding.h +++ b/tensorstore/internal/json_binding/json_binding.h @@ -484,7 +484,7 @@ constexpr auto Projection(Proj projection, Binder binder = DefaultBinder<>) { }; } -/// Binder adapter that projects the parsed representation using gettter/setter +/// Binder adapter that projects the parsed representation using getter/setter /// functions. /// /// Commonly this is used with `Member`, in order to bind a diff --git a/tensorstore/internal/riegeli/find.cc b/tensorstore/internal/riegeli/find.cc index 5fee2ea22..02b201de2 100644 --- a/tensorstore/internal/riegeli/find.cc +++ b/tensorstore/internal/riegeli/find.cc @@ -34,7 +34,7 @@ bool StartsWith(riegeli::Reader &reader, std::string_view needle) { memcmp(reader.cursor(), needle.data(), needle.size()) == 0; } -/// Seeks for the first occurence of data string starting from the current pos. +/// Seeks for the first occurrence of data string starting from the current pos. /// This works well enough for ZIP archives, since the tags do not have /// internal repetition. bool FindFirst(riegeli::Reader &reader, std::string_view needle) { diff --git a/tensorstore/internal/riegeli/find.h b/tensorstore/internal/riegeli/find.h index 26b1d66d8..589001a79 100644 --- a/tensorstore/internal/riegeli/find.h +++ b/tensorstore/internal/riegeli/find.h @@ -30,7 +30,7 @@ bool StartsWith(riegeli::Reader &reader, std::string_view needle); /// FindFirst() /// -/// Seeks for the first occurence of data string starting from the current +/// Seeks for the first occurrence of data string starting from the current /// pos. Implementation note: This implements a naive approach, not /// Knuth-Morris-Pratt, and is intended to be used for relatively short /// strings. @@ -38,7 +38,7 @@ bool FindFirst(riegeli::Reader &reader, std::string_view needle); /// FindLast() /// -/// Seeks for the last occurence of data string starting from reader. +/// Seeks for the last occurrence of data string starting from reader. bool FindLast(riegeli::Reader &reader, std::string_view needle); } // namespace internal diff --git a/tensorstore/internal/thread/schedule_at.h b/tensorstore/internal/thread/schedule_at.h index a09ef156a..90b80474e 100644 --- a/tensorstore/internal/thread/schedule_at.h +++ b/tensorstore/internal/thread/schedule_at.h @@ -22,7 +22,7 @@ namespace tensorstore { namespace internal { -/// Schedule an executor tast to run near a target time. +/// Schedule an executor task to run near a target time. /// Long-running tasks should use WithExecutor() to avoid blocking the thread. /// /// \ingroup async diff --git a/tensorstore/internal/utf8_test.cc b/tensorstore/internal/utf8_test.cc index 2e70fc723..8343fcfb2 100644 --- a/tensorstore/internal/utf8_test.cc +++ b/tensorstore/internal/utf8_test.cc @@ -50,7 +50,7 @@ TEST(IsValidUtf8Test, Empty) { TEST(IsValidUtf8Test, Ascii) { EXPECT_TRUE(IsValidUtf8("ascii")); - // Singe NUL byte + // Single NUL byte EXPECT_TRUE(IsValidUtf8(std::string_view("\0", 1))); } diff --git a/tensorstore/kvstore/file/file_key_value_store.cc b/tensorstore/kvstore/file/file_key_value_store.cc index 80369b86a..2c0eed61f 100644 --- a/tensorstore/kvstore/file/file_key_value_store.cc +++ b/tensorstore/kvstore/file/file_key_value_store.cc @@ -510,7 +510,7 @@ class BatchReadTask final exclusive_max = std::max(exclusive_max, byte_range.exclusive_max); total_size += byte_range.size(); } - // Normalize the minium bound to be a multiple of the page size. + // Normalize the minimum bound to be a multiple of the page size. if (inclusive_min < internal_os::GetDefaultPageSize()) { inclusive_min = 0; } else { diff --git a/tensorstore/kvstore/gcs/gcs_resource.h b/tensorstore/kvstore/gcs/gcs_resource.h index 930070fdd..3924822bc 100644 --- a/tensorstore/kvstore/gcs/gcs_resource.h +++ b/tensorstore/kvstore/gcs/gcs_resource.h @@ -30,7 +30,7 @@ namespace internal_storage_gcs { /// Optionally specifies a project to which all requests are billed. /// /// If not specified, requests to normal buckets are billed to the project -/// that owns the bucket, and requests to "requestor pays"-enabled buckets +/// that owns the bucket, and requests to "requester pays"-enabled buckets /// fail. struct GcsUserProjectResource : public internal::ContextResourceTraits { diff --git a/tensorstore/kvstore/gcs/schema.yml b/tensorstore/kvstore/gcs/schema.yml index c46d85a07..3e1f89a6f 100644 --- a/tensorstore/kvstore/gcs/schema.yml +++ b/tensorstore/kvstore/gcs/schema.yml @@ -13,7 +13,7 @@ allOf: title: Google Cloud Storage bucket to use. description: | The Google Cloud account that is used must have appropriate permissions - on the bucket. If the bucket has `Requestor Pays + on the bucket. If the bucket has `Requester Pays `_ enabled, either additional permissions are required or a separate billing project must be specified using `Context.gcs_user_project`. @@ -82,7 +82,7 @@ definitions: description: | Specifies a Google Cloud project to bill for Google Cloud Storage requests. If a `project_id` is not specified, requests are - billed to the project that owns the bucket by default. For `Requestor + billed to the project that owns the bucket by default. For `Requester Pays `_ buckets, however, requests without a `.project_id` specified will fail unless the Google Cloud account has additional permissions. diff --git a/tensorstore/kvstore/gcs_http/gcs_key_value_store.cc b/tensorstore/kvstore/gcs_http/gcs_key_value_store.cc index 9f4404e95..08681b145 100644 --- a/tensorstore/kvstore/gcs_http/gcs_key_value_store.cc +++ b/tensorstore/kvstore/gcs_http/gcs_key_value_store.cc @@ -548,7 +548,7 @@ struct ReadTask : public RateLimiterNode, options.generation_conditions.if_equal); // Assume that if the user_project field is set, that we want to provide - // it on the uri for a requestor pays bucket. + // it on the uri for a requester pays bucket. AddUserProjectParam(&media_url, true, owner->encoded_user_project()); AddUniqueQueryParameterToDisableCaching(media_url); @@ -751,7 +751,7 @@ struct WriteTask : public RateLimiterNode, options.generation_conditions.if_equal); // Assume that if the user_project field is set, that we want to provide - // it on the uri for a requestor pays bucket. + // it on the uri for a requester pays bucket. AddUserProjectParam(&upload_url, true, owner->encoded_user_project()); auto maybe_auth_header = owner->GetAuthHeader(); @@ -906,7 +906,7 @@ struct DeleteTask : public RateLimiterNode, options.generation_conditions.if_equal); // Assume that if the user_project field is set, that we want to provide - // it on the uri for a requestor pays bucket. + // it on the uri for a requester pays bucket. AddUserProjectParam(&delete_url, has_query, owner->encoded_user_project()); auto maybe_auth_header = owner->GetAuthHeader(); diff --git a/tensorstore/kvstore/gcs_http/gcs_mock.h b/tensorstore/kvstore/gcs_http/gcs_mock.h index 9c537aac8..f8737ccc1 100644 --- a/tensorstore/kvstore/gcs_http/gcs_mock.h +++ b/tensorstore/kvstore/gcs_http/gcs_mock.h @@ -57,7 +57,7 @@ class GCSMockStorageBucket { /// /// \param bucket The bucket name. /// \param requestor_pays_project_id If not `std::nullopt`, this bucket - /// behaves as requestor pays and furthermore validates that the + /// behaves as requester pays and furthermore validates that the /// requestor_pays project id is equal to the specified value. The check /// for an exact project id is a mock version of the actual check done by /// GCS that the specified project ID has billing enabled. diff --git a/tensorstore/kvstore/ocdbt/distributed/cooperator_submit_mutation_batch.cc b/tensorstore/kvstore/ocdbt/distributed/cooperator_submit_mutation_batch.cc index 582e94fbc..91873488c 100644 --- a/tensorstore/kvstore/ocdbt/distributed/cooperator_submit_mutation_batch.cc +++ b/tensorstore/kvstore/ocdbt/distributed/cooperator_submit_mutation_batch.cc @@ -207,7 +207,7 @@ struct SubmitMutationBatchOperation << "] SendToPeer: " << state->node_identifier; auto* state_ptr = state.get(); // Construct a new `grpc::ClientContext` for this RPC request. It is not - // permitted to re-use a `ClientContext` for multiple RPC requests. + // permitted to reuse a `ClientContext` for multiple RPC requests. state->client_context.emplace(); state->request.Clear(); state->request.set_lease_key(state->lease_node->key); diff --git a/tensorstore/kvstore/ocdbt/distributed/coordinator.proto b/tensorstore/kvstore/ocdbt/distributed/coordinator.proto index 7d20a84fc..bc6305c19 100644 --- a/tensorstore/kvstore/ocdbt/distributed/coordinator.proto +++ b/tensorstore/kvstore/ocdbt/distributed/coordinator.proto @@ -52,7 +52,7 @@ message LeaseResponse { // Address (hostname:port) of the owner. optional bytes owner = 1; - // Indicates if the requestor is the owner. + // Indicates if the requester is the owner. optional bool is_owner = 2; // Expiration time of the lease. diff --git a/tensorstore/kvstore/ocdbt/index.rst b/tensorstore/kvstore/ocdbt/index.rst index 3f3acd58d..f6262e32e 100644 --- a/tensorstore/kvstore/ocdbt/index.rst +++ b/tensorstore/kvstore/ocdbt/index.rst @@ -287,7 +287,7 @@ Zstd compression configuration .. _ocdbt-config-zstd-level: ``level`` - Compresion level to use when writing. + Compression level to use when writing. .. _ocdbt-manifest-version-tree: @@ -520,7 +520,7 @@ Version tree leaf node entries format (``height = 0``) The same encoded representation is used for both the entries of a leaf :ref:`version tree node` and for the :ref:`ocdbt-manifest-version-tree-inline-versions` specified in the -:ref:`manfiest`. +:ref:`manifest`. .. |data_file_id_format| replace:: |varint| @@ -577,7 +577,7 @@ The same encoded representation is used for both the entries of a leaf The same computation of ``num_versions`` applies to both leaf node entries included in a :ref:`version tree node`, and :ref:`ocdbt-manifest-version-tree-inline-versions` included in the - :ref:`manfiest`. In the former case, the and + :ref:`manifest`. In the former case, the and :ref:`ocdbt-version-tree-version-tree-arity-log2` value is obtained from the version node. In the latter case, the :ref:`ocdbt-config-version-tree-arity-log2` value is taken from the @@ -649,7 +649,7 @@ Interior version tree node entries (``height > 0``) The same encoded representation is used for both the entries of an interior :ref:`version tree node` and for the :ref:`ocdbt-manifest-version-tree-version-nodes` version tree nodes specified in -the :ref:`manfiest`, but the interpretation +the :ref:`manifest`, but the interpretation differs, as described below. +----------------------------------------------------+--------------------------+-----------------------------------------------+ @@ -753,7 +753,7 @@ additional field: :ref:`ocdbt-manifest-version-tree-version-nodes` in the manifest. The heights must be decreasing, i.e. ``entry_height[i] > entry_height[j]`` if ``i < j``. - When the encoded representaiton is used to specify the entries of an interior + When the encoded representation is used to specify the entries of an interior version tree node, this field is not present and instead, for the purpose of this specification, ``entry_height[i]`` is implicitly equal to ``height - 1``, where :ref:`ocdbt-version-tree-height` is obtained from the version tree node. diff --git a/tensorstore/kvstore/ocdbt/io/manifest_cache.cc b/tensorstore/kvstore/ocdbt/io/manifest_cache.cc index c5752f2ee..c20be2433 100644 --- a/tensorstore/kvstore/ocdbt/io/manifest_cache.cc +++ b/tensorstore/kvstore/ocdbt/io/manifest_cache.cc @@ -552,7 +552,7 @@ void ListAndReadNumberedManifests( if (cached_manifest && cached_manifest->latest_generation() == generation_number) { - // No newer version is present, just re-use cached manifest. + // No newer version is present, just reuse cached manifest. ABSL_LOG_IF(INFO, ocdbt_logging) << "Using cached numbered manifest: " << generation_number; numbered_manifest->manifest = std::move(cached_manifest); diff --git a/tensorstore/kvstore/ocdbt/non_distributed/btree_writer.cc b/tensorstore/kvstore/ocdbt/non_distributed/btree_writer.cc index 1dddc113a..d8baed17b 100644 --- a/tensorstore/kvstore/ocdbt/non_distributed/btree_writer.cc +++ b/tensorstore/kvstore/ocdbt/non_distributed/btree_writer.cc @@ -379,7 +379,7 @@ Future NonDistributedBtreeWriter::CopySubtree( CopySubtreeOptions&& options) { // TODO(jbms): Currently this implementation avoids copying indirect values, // but never reuses B+tree nodes. A more efficient implementation that - // re-uses B+tree nodes in many cases is possible. + // reuses B+tree nodes in many cases is possible. ABSL_LOG_IF(INFO, ocdbt_logging) << "CopySubtree: " << options.node << ", height=" << static_cast(options.node_height) diff --git a/tensorstore/kvstore/ocdbt/non_distributed/staged_mutations.cc b/tensorstore/kvstore/ocdbt/non_distributed/staged_mutations.cc index c508caf6c..2038c86f3 100644 --- a/tensorstore/kvstore/ocdbt/non_distributed/staged_mutations.cc +++ b/tensorstore/kvstore/ocdbt/non_distributed/staged_mutations.cc @@ -179,7 +179,7 @@ void InsertWriteEntry(StagedMutations& staged, } existing_entry->key_ = KeyRange::Successor(entry->key_); if (existing_entry->key_ != existing_entry->exclusive_max_) { - // "Right" interval is non-empty. Re-use the existing entry for the + // "Right" interval is non-empty. Reuse the existing entry for the // right interval. staged.entries.Insert({entry, MutationEntryTree::kRight}, *existing_entry); existing_entry->superseded_ = std::move(split_result.trees[1]); diff --git a/tensorstore/kvstore/ocdbt/non_distributed/transactional_btree_writer.cc b/tensorstore/kvstore/ocdbt/non_distributed/transactional_btree_writer.cc index b2899b745..787287b53 100644 --- a/tensorstore/kvstore/ocdbt/non_distributed/transactional_btree_writer.cc +++ b/tensorstore/kvstore/ocdbt/non_distributed/transactional_btree_writer.cc @@ -407,7 +407,7 @@ Future AddCopySubtree( BtreeWriter::CopySubtreeOptions&& options) { // TODO(jbms): Currently this implementation avoids copying indirect values, // but never reuses B+tree nodes. A more efficient implementation that - // re-uses B+tree nodes in many cases is possible. + // reuses B+tree nodes in many cases is possible. ABSL_LOG_IF(INFO, ocdbt_logging) << "CopySubtree: " << options.node << ", height=" << static_cast(options.node_height) diff --git a/tensorstore/kvstore/s3/index.rst b/tensorstore/kvstore/s3/index.rst index 7f81db9ca..ea4232fce 100644 --- a/tensorstore/kvstore/s3/index.rst +++ b/tensorstore/kvstore/s3/index.rst @@ -9,7 +9,7 @@ Keys directly correspond to paths within an S3 bucket. .. warning:: The ``s3`` key-value store driver does not provide all the atomicity - guarantees required by tensorstore. On AWS, specfically, DELETE is not + guarantees required by tensorstore. On AWS, specifically, DELETE is not atomic, which leads to race conditions. On other S3-compatible object stores even PUT may not be atomic. diff --git a/tensorstore/kvstore/s3/s3_request_builder_test.cc b/tensorstore/kvstore/s3/s3_request_builder_test.cc index c8661232e..4f1fc067d 100644 --- a/tensorstore/kvstore/s3/s3_request_builder_test.cc +++ b/tensorstore/kvstore/s3/s3_request_builder_test.cc @@ -120,7 +120,7 @@ e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 TEST(S3RequestBuilderTest, AWS4SignatureGetExample) { // https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html - // These values from worked exapmle in "Example: GET Object" Section + // These values from worked example in "Example: GET Object" Section const auto credentials = AwsCredentials::Make( "AKIAIOSFODNN7EXAMPLE", "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", ""); diff --git a/tensorstore/kvstore/transaction.cc b/tensorstore/kvstore/transaction.cc index 680c42316..423b27c92 100644 --- a/tensorstore/kvstore/transaction.cc +++ b/tensorstore/kvstore/transaction.cc @@ -1097,7 +1097,7 @@ MultiPhaseMutation::ReadModifyWriteStatus MultiPhaseMutation::ReadModifyWrite( } existing_entry->key_ = KeyRange::Successor(entry->key_); if (existing_entry->key_ != existing_entry->exclusive_max_) { - // "Right" interval is non-empty. Re-use the existing entry for the + // "Right" interval is non-empty. Reuse the existing entry for the // right interval. single_phase_mutation.entries_.Insert({entry, MutationEntryTree::kRight}, *existing_entry); diff --git a/tensorstore/kvstore/zarr3_sharding_indexed/zarr3_sharding_indexed.cc b/tensorstore/kvstore/zarr3_sharding_indexed/zarr3_sharding_indexed.cc index 498e09239..9bed098e6 100644 --- a/tensorstore/kvstore/zarr3_sharding_indexed/zarr3_sharding_indexed.cc +++ b/tensorstore/kvstore/zarr3_sharding_indexed/zarr3_sharding_indexed.cc @@ -792,7 +792,7 @@ class ShardedKeyValueStore // Constructs the key value store. // // Args: - // params: Parmaeters for opening. + // params: Parameters for opening. // shared_cache_key: Cache key to use. Set when opened directly from a JSON // spec, but not used when opened by the zarr v3 driver. explicit ShardedKeyValueStore(ShardedKeyValueStoreParameters&& params, diff --git a/tensorstore/kvstore/zip/zip_dir_cache.cc b/tensorstore/kvstore/zip/zip_dir_cache.cc index 202fbda5f..a6ed381f2 100644 --- a/tensorstore/kvstore/zip/zip_dir_cache.cc +++ b/tensorstore/kvstore/zip/zip_dir_cache.cc @@ -180,7 +180,7 @@ struct ReadDirectoryOp !ready.value().has_value()) { // Any non-value is an error. entry_->ReadError( - absl::InvalidArgumentError("Faild to read ZIP directory")); + absl::InvalidArgumentError("Failed to read ZIP directory")); return; } diff --git a/tensorstore/transaction_impl.h b/tensorstore/transaction_impl.h index f8f77f667..928c5a626 100644 --- a/tensorstore/transaction_impl.h +++ b/tensorstore/transaction_impl.h @@ -111,7 +111,7 @@ namespace internal { /// `OpenTransactionPtr` smart pointer is destroyed. /// /// - If the chunk was already modified in the transaction by a prior write, -/// the existing transaction node will be re-used. The existing +/// the existing transaction node will be reused. The existing /// transaction node is located in the red-black tree of transaction nodes /// (keyed by their associated `TransactionState` pointers) stored within /// the `AsyncCache::Entry`. @@ -182,7 +182,7 @@ namespace internal { /// calling `GetTransactionNode`. /// /// - If the chunk has an existing implicit transaction node that is still -/// open (i.e. not already being committed), it is re-used. +/// open (i.e. not already being committed), it is reused. /// /// - Otherwise, a new implicit transaction node is created, but not yet /// associated with a transaction. When the new transaction node is @@ -213,8 +213,8 @@ namespace internal { /// node. /// /// - If there is already an open implicit transaction node associated with -/// the shard that contains the chunk, it is re-used, and the associated -/// implicit transaction is re-used by the upstream transaction node in the +/// the shard that contains the chunk, it is reused, and the associated +/// implicit transaction is reused by the upstream transaction node in the /// `ChunkCache`. /// /// - Otherwise, a new implicit transaction node is created for the shard, diff --git a/tensorstore/util/future_impl.h b/tensorstore/util/future_impl.h index ee9748564..961f51e1d 100644 --- a/tensorstore/util/future_impl.h +++ b/tensorstore/util/future_impl.h @@ -1377,7 +1377,7 @@ class FutureLink promise, assert(promise.ready()); return {}; case FutureErrorPropagationResult::kReady: - // The Policy is immediately invokable, avoid creating a link and invoke + // The Policy is immediately invocable, avoid creating a link and invoke // the callback directly. The callback should set the promise value. std::forward(callback)( std::move(promise), diff --git a/tensorstore/util/result.h b/tensorstore/util/result.h index 3b6baf3b3..fe9614915 100644 --- a/tensorstore/util/result.h +++ b/tensorstore/util/result.h @@ -103,7 +103,7 @@ using std::in_place_t; /// function which may fail. /// /// Initialization with a non-error `absl::Status` is only allowed for -/// `Result`, otherwise non-error `absl::Status` initilization is +/// `Result`, otherwise non-error `absl::Status` initialization is /// nonsensical because it does not provide a value. /// /// Conversion from `Result` to `Result` is allowed; the status diff --git a/tensorstore/util/result_test.cc b/tensorstore/util/result_test.cc index c8460bdd9..380edbe4e 100644 --- a/tensorstore/util/result_test.cc +++ b/tensorstore/util/result_test.cc @@ -346,7 +346,7 @@ struct Aggregate { }; TEST(ResultTest, Aggregate) { - // NOTE: c++ does not treate Aggregate initialization as a constructor, + // NOTE: c++ does not treat Aggregate initialization as a constructor, // so we cannot use emplace or in_place_t constructors here. Result a(Aggregate{1, 2, 3}); ASSERT_TRUE(a.has_value()); diff --git a/tools/cmake/TensorstoreDebugHelpers.cmake b/tools/cmake/TensorstoreDebugHelpers.cmake index bfe0e96b9..512b2addf 100644 --- a/tools/cmake/TensorstoreDebugHelpers.cmake +++ b/tools/cmake/TensorstoreDebugHelpers.cmake @@ -80,7 +80,7 @@ function(dump_cmake_targets directory) endfunction() -# Get all propreties that cmake supports +# Get all properties that cmake supports if(NOT CMAKE_PROPERTY_LIST) execute_process(COMMAND cmake --help-property-list OUTPUT_VARIABLE CMAKE_PROPERTY_LIST) diff --git a/tools/cmake/bazel_to_cmake/emit_cc.py b/tools/cmake/bazel_to_cmake/emit_cc.py index 49b0913be..c4abecdfa 100644 --- a/tools/cmake/bazel_to_cmake/emit_cc.py +++ b/tools/cmake/bazel_to_cmake/emit_cc.py @@ -269,7 +269,7 @@ def construct_cc_includes( if include_prefix is not None: # The prefix to add to the paths of the headers of this rule. # - # "When set, the headers in the hdrs attribute of this rule are accessable + # "When set, the headers in the hdrs attribute of this rule are accessible # at is the value of this attribute prepended to their repository-relative # path." # diff --git a/tools/cmake/bazel_to_cmake/evaluation.py b/tools/cmake/bazel_to_cmake/evaluation.py index e72f3b82a..56958a48b 100644 --- a/tools/cmake/bazel_to_cmake/evaluation.py +++ b/tools/cmake/bazel_to_cmake/evaluation.py @@ -523,7 +523,7 @@ def load_library(self, target_id: TargetId) -> Dict[str, Any]: Loads it if not already loaded in the current phase. Libraries are loaded separately for the workspace loading and package loading phases. - 1. If the target has been overriden by a call to `register_bzl_library` or + 1. If the target has been overridden by a call to `register_bzl_library` or has been added as an ignored library to the workspace, the overridden implementation will be used. diff --git a/tools/cmake/bazel_to_cmake/native_aspect_proto.py b/tools/cmake/bazel_to_cmake/native_aspect_proto.py index 7edbb7130..89c84e20f 100644 --- a/tools/cmake/bazel_to_cmake/native_aspect_proto.py +++ b/tools/cmake/bazel_to_cmake/native_aspect_proto.py @@ -171,7 +171,7 @@ def _generated_files(): # Single Protoaspect details ############################################################################## # -# Bazel proto_libraries() can reference soruce files multiple times, so +# Bazel proto_libraries() can reference source files multiple times, so # the aspect actually needs to generate a target per source file. # def _assert_is_proto(src: TargetId): @@ -376,7 +376,7 @@ def aspect_genproto_library_target( proto_library_provider = proto_target_info.get(ProtoLibraryProvider) assert proto_library_provider is not None - # Resovle aspect deps, excluding self. + # Resolve aspect deps, excluding self. aspect_deps: Set[TargetId] = set() aspect_deps.update(plugin_settings.aspectdeps(proto_target)) for d in proto_library_provider.deps: diff --git a/tools/cmake/bazel_to_cmake/variable_substitution.py b/tools/cmake/bazel_to_cmake/variable_substitution.py index 85b11f67e..6e9369f1d 100644 --- a/tools/cmake/bazel_to_cmake/variable_substitution.py +++ b/tools/cmake/bazel_to_cmake/variable_substitution.py @@ -254,7 +254,7 @@ def apply_location_substitutions( relative_to: str, add_dependencies: Optional[List[CMakeTarget]] = None, ) -> str: - """Substitues $(location) references in `cmd`. + """Substitutes $(location) references in `cmd`. https://bazel.build/reference/be/make-variables#predefined_label_variables