Skip to content

Commit 13525d2

Browse files
committed
Fix misc spelling
1 parent 3358f6b commit 13525d2

29 files changed

+72
-72
lines changed

docker/resources/systemctl.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1949,7 +1949,7 @@ def do_start_unit_from(self, conf):
19491949
service_result = "failed"
19501950
break
19511951
if service_result in [ "success" ] and mainpid:
1952-
logg.debug("okay, wating on socket for %ss", timeout)
1952+
logg.debug("okay, waiting on socket for %ss", timeout)
19531953
results = self.wait_notify_socket(notify, timeout, mainpid)
19541954
if "MAINPID" in results:
19551955
new_pid = results["MAINPID"]
@@ -2756,7 +2756,7 @@ def get_substate_from(self, conf):
27562756
else:
27572757
return "dead"
27582758
def is_failed_modules(self, *modules):
2759-
""" [UNIT]... -- check if these units are in failes state
2759+
""" [UNIT]... -- check if these units are in failed state
27602760
implements True if any is-active = True """
27612761
units = []
27622762
results = []
@@ -3618,7 +3618,7 @@ def syntax_check_service(self, conf):
36183618
+ "\n\t\t\tUse ' ; ' for multiple commands (ExecReloadPost or ExedReloadPre do not exist)", unit)
36193619
if len(usedExecReload) > 0 and "/bin/kill " in usedExecReload[0]:
36203620
logg.warning(" %s: the use of /bin/kill is not recommended for ExecReload as it is asychronous."
3621-
+ "\n\t\t\tThat means all the dependencies will perform the reload simultanously / out of order.", unit)
3621+
+ "\n\t\t\tThat means all the dependencies will perform the reload simultaneously / out of order.", unit)
36223622
if conf.getlist("Service", "ExecRestart", []): #pragma: no cover
36233623
logg.error(" %s: there no such thing as an ExecRestart (ignored)", unit)
36243624
if conf.getlist("Service", "ExecRestartPre", []): #pragma: no cover
@@ -3854,7 +3854,7 @@ def enabled_default_system_services(self, sysv = "S", default_target = None, ign
38543854
default_services.append(unit)
38553855
for folder in [ self.rc3_root_folder() ]:
38563856
if not os.path.isdir(folder):
3857-
logg.warning("non-existant %s", folder)
3857+
logg.warning("non-existent %s", folder)
38583858
continue
38593859
for unit in sorted(os.listdir(folder)):
38603860
path = os.path.join(folder, unit)
@@ -3960,7 +3960,7 @@ def init_modules(self, *modules):
39603960
it was never enabled in the system.
39613961
/// SPECIAL: when using --now then only the init-loop is started,
39623962
with the reap-zombies function and waiting for an interrupt.
3963-
(and no unit is started/stoppped wether given or not).
3963+
(and no unit is started/stopped wether given or not).
39643964
"""
39653965
if self._now:
39663966
return self.init_loop_until_stop([])
@@ -4387,7 +4387,7 @@ def logg_debug(*msg): pass
43874387
_o.add_option("--reverse", action="store_true",
43884388
help="Show reverse dependencies with 'list-dependencies' (ignored)")
43894389
_o.add_option("--job-mode", metavar="MODE",
4390-
help="Specifiy how to deal with already queued jobs, when queuing a new job (ignored)")
4390+
help="Specify how to deal with already queued jobs, when queuing a new job (ignored)")
43914391
_o.add_option("--show-types", action="store_true",
43924392
help="When showing sockets, explicitly show their type (ignored)")
43934393
_o.add_option("-i","--ignore-inhibitors", action="store_true",

docs/configuration-discovery-pseudo-gtid.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ See [Pseudo GTID](pseudo-gtid.md)
1212
"AutoPseudoGTID": true,
1313
}
1414
```
15-
And you may ignore any other Pseudo-GTID related configuration (they will all be implicitly overriden by `orchestrator`).
15+
And you may ignore any other Pseudo-GTID related configuration (they will all be implicitly overridden by `orchestrator`).
1616

1717
You will further need to grant the following on your MySQL servers:
1818
```sql

docs/deployment-shared-backend.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ To interact with orchestrator from shell/automation/scripts, you may choose to:
5454
- The [orchestrator command line](executing-via-command-line.md).
5555
- Deploy the `orchestrator` binary (you may use the `orchestrator-cli` distributed package) on any box from which you wish to interact with `orchestrator`.
5656
- Create `/etc/orchestrator.conf.json` on those boxes, populate with credentials. This file should generally be the same as for the `orchestrator` service boxes. If you're unsure, use exact same file content.
57-
- The `orchestrator` binary will access the shared backend DB. Make sure to give it access. Typicaly this will be port `3306`.
57+
- The `orchestrator` binary will access the shared backend DB. Make sure to give it access. Typically this will be port `3306`.
5858
5959
It is OK to run `orchestrator` CLI even while the `orchestrator` service is operating, since they will all coordinate on the same backend DB.
6060

docs/developers.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# Developers
22

3-
To build, test and contribute to `orchestrator`, please refer t othe following pages:
3+
To build, test and contribute to `orchestrator`, please refer to the following pages:
44

55
- [Understanding CI](ci.md)
66
- [Building and testing](build.md)

docs/docker.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ file is bind mounted into container at `/etc/orchestrator.conf.json`
4040
* `ORC_USER`: defaults to `orc_server_user`
4141
* `ORC_PASSWORD`: defaults to `orc_server_password`
4242

43-
To set these variables you could add these to an environment file where you add them like `key=value` (one pair per line). You can then pass this enviroment file to the docker command adding `--env-file=path/to/env-file` to the `docker run` command.
43+
To set these variables you could add these to an environment file where you add them like `key=value` (one pair per line). You can then pass this environment file to the docker command adding `--env-file=path/to/env-file` to the `docker run` command.
4444

4545
## Create package files
4646

docs/high-availability.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ This setup provides semi HA for `orchestrator`. Two variations available:
4444
- The proxy always directs to same server (e.g. `first` algorithm for `HAProxy`) unless that server is dead.
4545
- Death of the active master causes `orchestrator` to talk to other master, which may be somewhat behind. `orchestrator` will typically self reapply the missing changes by nature of its continuous discovery.
4646
- `orchestrator` queries guarantee `STATEMENT` based replication will not cause duplicate errors, and master-master setup will always achieve consistency.
47-
- `orchestrator` will be able to recover the death of a backend master even if in the middle of runnign a recovery (recovery will re-initiate on alternate master)
47+
- `orchestrator` will be able to recover the death of a backend master even if in the middle of running a recovery (recovery will re-initiate on alternate master)
4848
- **Split brain is possible**. Depending on your setup, physical locations, type of proxy, there can be different `orchestrator` service nodes speaking to different backend `MySQL` servers. This scenario can lead two two `orchestrator` services which consider themselves as "active", both of which will run failovers independently, which would lead to topology corruption.
4949

5050
To access your `orchestrator` service you may speak to any healthy node.

docs/risks.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ Most of the time `orchestrator` only reads status from your topologies. Default
77
You may use `orchestrator` to refactor your topologies: move replicas around and change the replication tree. `orchestrator` will do its best to:
88

99
1. Make sure you only move an instance to a location where it is valid for it to replicate (e.g. that you don't put a 5.5 server below a 5.6 server)
10-
2. Make sure you move an instance at the right time (ie the instance and whichever affected servers are not lagging badly, so that operation can compeletely in a timely manner).
10+
2. Make sure you move an instance at the right time (ie the instance and whichever affected servers are not lagging badly, so that operation can completely in a timely manner).
1111
3. Do the math correctly: stop the replica at the right time, roll it forward to the right position, `CHANGE MASTER` to the correct location & position.
1212

1313
The above is well tested.

docs/using-the-web-api.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -139,11 +139,11 @@ The structure of an Instance evolves and documentation will always fall behind.
139139
* `ReplicationLagSeconds`: when `ReplicationLagQuery` provided, the computed replica lag; otherwise same as `SecondsBehindMaster`
140140
* `Replicas`: list of MySQL replicas _hostname & port_)
141141
* `ClusterName`: name of cluster this instance is associated with; uniquely identifies cluster
142-
* `DataCenter`: (metadata) name of data center, infered by `DataCenterPattern` config variable
143-
* `PhysicalEnvironment`: (metadata) name of environment, infered by `PhysicalEnvironmentPattern` config variable
142+
* `DataCenter`: (metadata) name of data center, inferred by `DataCenterPattern` config variable
143+
* `PhysicalEnvironment`: (metadata) name of environment, inferred by `PhysicalEnvironmentPattern` config variable
144144
* `ReplicationDepth`: distance from the master (master is `0`, direct replica is `1` and so on)
145145
* `IsCoMaster`: true when this instanceis part of a master-master pair
146-
* `IsLastCheckValid`: whether last attempt at reading this instane succeeeded
146+
* `IsLastCheckValid`: whether last attempt at reading this instance succeeeded
147147
* `IsUpToDate`: whether this data is up to date
148148
* `IsRecentlyChecked`: whether a read attempt on this instance has been recently made
149149
* `SecondsSinceLastSeen`: time elapsed since last successfully accessed this instance

go/agent/agent_dao.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ func InitHttpClient() {
6363
httpClient = &http.Client{Transport: httpTransport}
6464
}
6565

66-
// httpGet is a convenience method for getting http response from URL, optionaly skipping SSL cert verification
66+
// httpGet is a convenience method for getting http response from URL, optionally skipping SSL cert verification
6767
func httpGet(url string) (resp *http.Response, err error) {
6868
return httpClient.Get(url)
6969
}
@@ -683,7 +683,7 @@ func executeSeed(seedId int64, targetHostname string, sourceHostname string) err
683683

684684
seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("Checking MySQL status on target %s", targetHostname), "")
685685
if targetAgent.MySQLRunning {
686-
return updateSeedStateEntry(seedStateId, errors.New("MySQL is running on target host. Cowardly refusing to proceeed. Please stop the MySQL service"))
686+
return updateSeedStateEntry(seedStateId, errors.New("MySQL is running on target host. Cowardly refusing to proceed. Please stop the MySQL service"))
687687
}
688688

689689
seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("Looking up available snapshots on source %s", sourceHostname), "")
@@ -711,7 +711,7 @@ func executeSeed(seedId int64, targetHostname string, sourceHostname string) err
711711
return updateSeedStateEntry(seedStateId, err)
712712
}
713713

714-
seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("Aquiring target host datadir free space on %s", targetHostname), "")
714+
seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("Acquiring target host datadir free space on %s", targetHostname), "")
715715
targetAgent, err = GetAgent(targetHostname)
716716
if err != nil {
717717
return updateSeedStateEntry(seedStateId, err)

go/app/command_help.go

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -208,7 +208,7 @@ func init() {
208208
CommandHelp["match-replicas"] = `
209209
Matches all replicas of a given instance under another (destination) instance. This is a (faster) shortcut
210210
to matching said replicas one by one under the destination instance. In fact, this bulk operation is highly
211-
optimized and can execute in orders of magnitue faster, depeding on the nu,ber of replicas involved and their
211+
optimized and can execute in orders of magnitue faster, depending on the nu,ber of replicas involved and their
212212
respective position behind the instance (the more replicas, the more savings).
213213
The instance itself may be crashed or inaccessible. It is not contacted throughout the operation. Examples:
214214
@@ -254,7 +254,7 @@ func init() {
254254
local master of its siblings, using Pseudo-GTID. It is uncertain that there *is* a replica that will be able to
255255
become master to all its siblings. But if there is one, orchestrator will pick such one. There are many
256256
constraints, most notably the replication positions of all replicas, whether they use log_slave_updates, and
257-
otherwise version compatabilities etc.
257+
otherwise version compatibilities etc.
258258
As many replicas that can be regrouped under promoted slves are operated on. The rest are untouched.
259259
This command is useful in the event of a crash. For example, in the event that a master dies, this operation
260260
can promote a candidate replacement and set up the remaining topology to correctly replicate from that
@@ -324,7 +324,7 @@ func init() {
324324
Undo a detach-replica operation. Reverses the binlog change into the original values, and
325325
resumes replication. Example:
326326
327-
orchestrator -c reattach-replica -i detahced.replica.whose.replication.will.amend.com
327+
orchestrator -c reattach-replica -i detached.replica.whose.replication.will.amend.com
328328
329329
Issuing this on an attached (i.e. normal) replica will do nothing.
330330
`
@@ -340,7 +340,7 @@ func init() {
340340
Undo a detach-replica-master-host operation. Reverses the hostname change into the original value, and
341341
resumes replication. Example:
342342
343-
orchestrator -c reattach-replica-master-host -i detahced.replica.whose.replication.will.amend.com
343+
orchestrator -c reattach-replica-master-host -i detached.replica.whose.replication.will.amend.com
344344
345345
Issuing this on an attached (i.e. normal) replica will do nothing.
346346
`
@@ -397,7 +397,7 @@ func init() {
397397
Get binlog file:pos of entry given by --pattern (exact full match, not a regular expression) in a given instance.
398398
This will search the instance's binary logs starting with most recent, and terminate as soon as an exact match is found.
399399
The given input is not a regular expression. It must fully match the entry (not a substring).
400-
This is most useful when looking for uniquely identifyable values, such as Pseudo-GTID. Example:
400+
This is most useful when looking for uniquely identifiable values, such as Pseudo-GTID. Example:
401401
402402
orchestrator -c find-binlog-entry -i instance.to.search.on.com --pattern "insert into my_data (my_column) values ('distinct_value_01234_56789')"
403403
@@ -480,7 +480,7 @@ func init() {
480480
-i not given, implicitly assumed local hostname
481481
482482
Instance must be already known to orchestrator. Topology is generated by orchestrator's mapping
483-
and not from synchronuous investigation of the instances. The generated topology may include
483+
and not from synchronous investigation of the instances. The generated topology may include
484484
instances that are dead, or whose replication is broken.
485485
`
486486
CommandHelp["all-instances"] = `
@@ -612,7 +612,7 @@ func init() {
612612
assuming some_alias is a known cluster alias (see ClusterNameToAlias or DetectClusterAliasQuery configuration)
613613
`
614614
CommandHelp["instance-status"] = `
615-
Output short status on a given instance (name, replication status, noteable configuration). Example2:
615+
Output short status on a given instance (name, replication status, notable configuration). Example2:
616616
617617
orchestrator -c instance-status -i instance.to.investigate.com
618618
@@ -631,7 +631,7 @@ func init() {
631631

632632
CommandHelp["discover"] = `
633633
Request that orchestrator cotacts given instance, reads its status, and upsert it into
634-
orchestrator's respository. Examples:
634+
orchestrator's repository. Examples:
635635
636636
orchestrator -c discover -i instance.to.discover.com:3306
637637
@@ -655,7 +655,7 @@ func init() {
655655
`
656656
CommandHelp["begin-maintenance"] = `
657657
Request a maintenance lock on an instance. Topology changes require placing locks on the minimal set of
658-
affected instances, so as to avoid an incident of two uncoordinated operations on a smae instance (leading
658+
affected instances, so as to avoid an incident of two uncoordinated operations on a same instance (leading
659659
to possible chaos). Locks are placed in the backend database, and so multiple orchestrator instances are safe.
660660
Operations automatically acquire locks and release them. This command manually acquires a lock, and will
661661
block other operations on the instance until lock is released.
@@ -680,7 +680,7 @@ func init() {
680680
Mark an instance as downtimed. A downtimed instance is assumed to be taken care of, and recovery-analysis does
681681
not apply for such an instance. As result, no recommendation for recovery, and no automated-recovery are issued
682682
on a downtimed instance.
683-
Downtime is different than maintanence in that it places no lock (mainenance uses an exclusive lock on the instance).
683+
Downtime is different than maintenance in that it places no lock (mainenance uses an exclusive lock on the instance).
684684
It is OK to downtime an instance that is already downtimed -- the new begin-downtime command will override whatever
685685
previous downtime attributes there were on downtimes instance.
686686
Note that orchestrator automatically assumes downtime to be expired after MaintenanceExpireMinutes (hard coded value).
@@ -801,17 +801,17 @@ func init() {
801801
CommandHelp["register-candidate"] = `
802802
Indicate that a specific instance is a preferred candidate for master promotion. Upon a dead master
803803
recovery, orchestrator will do its best to promote instances that are marked as candidates. However
804-
orchestrator cannot guarantee this will always work. Issues like version compatabilities, binlog format
804+
orchestrator cannot guarantee this will always work. Issues like version compatibilities, binlog format
805805
etc. are limiting factors.
806806
You will want to mark an instance as a candidate when: it is replicating directly from the master, has
807807
binary logs and log_slave_updates is enabled, uses same binlog_format as its siblings, compatible version
808808
as its siblings. If you're using DataCenterPattern & PhysicalEnvironmentPattern (see configuration),
809809
you would further wish to make sure you have a candidate in each data center.
810810
Orchestrator first promotes the best-possible replica, and only then replaces it with your candidate,
811-
and only if both in same datcenter and physical enviroment.
811+
and only if both in same datcenter and physical environment.
812812
An instance needs to continuously be marked as candidate, so as to make sure orchestrator is not wasting
813813
time with stale instances. Orchestrator periodically clears candidate-registration for instances that have
814-
not been registeres for over CandidateInstanceExpireMinutes (see config).
814+
not been registers for over CandidateInstanceExpireMinutes (see config).
815815
Example:
816816
817817
orchestrator -c register-candidate -i candidate.instance.com

0 commit comments

Comments
 (0)