@@ -6,40 +6,7 @@ Usage is best described by example.
6
6
Simple Usage
7
7
............
8
8
9
- .. code :: python
10
-
11
- import asyncio
12
- from aiohttp import ClientSession
13
- from arq import Actor, BaseWorker, concurrent
14
-
15
- class Downloader (Actor ):
16
- def __init__ (self , ** kwargs ):
17
- super ().__init__ (** kwargs)
18
- self .session = ClientSession(loop = self .loop)
19
-
20
- @concurrent
21
- async def download_content (self , url ):
22
- async with self .session.get(url) as response:
23
- content = await response.read()
24
- print (' {} : {:.80 } ...' .format(url, content.decode()))
25
- return len (content)
26
-
27
- async def close (self ):
28
- await super ().close()
29
- self .session.close()
30
-
31
- class Worker (BaseWorker ):
32
- shadows = [Downloader]
33
-
34
- async def download_lots ():
35
- d = Downloader()
36
- for url in (' https://facebook.com' , ' https://microsoft.com' , ' https://github.com' ):
37
- await d.download_content(url)
38
- await d.close()
39
-
40
- if __name__ == ' __main__' :
41
- loop = asyncio.get_event_loop()
42
- loop.run_until_complete(download_lots())
9
+ .. literalinclude :: demo.py
43
10
44
11
(This script is complete, it should run "as is" both to enqueue jobs and run them)
45
12
@@ -55,6 +22,45 @@ For details on the *arq* CLI::
55
22
56
23
arq --help
57
24
25
+ Startup & Shutdown coroutines
26
+ .............................
27
+
28
+ The ``startup `` and ``shutdown `` are provided as a convenient way to run logic as actors start and finish,
29
+ however it's important to not that these methods **are not called by default when actors are initialised or closed **.
30
+ They are however called when the actor started and closed on the worker, eg. in "shadow" mode, see above.
31
+ In other words: if you need these coroutines to be called when using an actor in your code, that's your responsibility.
32
+
33
+ For example, in the above code there's no need for ``self.session `` when using the actor in "default" mode, eg. called
34
+ with ``python demo.py ``, so neither ``startup `` or ``shutdown `` are called.
35
+
36
+ Health checks
37
+ .............
38
+
39
+ *arq * will automatically record some info about it's current state in redis every ``health_check_interval `` seconds,
40
+ see :attr: `arq.worker.BaseWorker.health_check_interval `. That key/value will expire after ``health_check_interval + 1 ``
41
+ so you can be sure if the variable exists you can be sure *arq * is alive and kicking (technically you can be sure it
42
+ was alive and kicking ``health_check_interval `` seconds ago).
43
+
44
+ You can run a health check with the CLI using (assuming you're using the above example)::
45
+
46
+ arq --check demo.py
47
+
48
+ The command will output the value of the health check if found,
49
+ then exit ``0 `` if the key was found and ``1 `` if it was not.
50
+
51
+ A health check value takes the following form::
52
+
53
+ Feb-20_11:02:40 j_complete=0 j_failed=0 j_timedout=0 j_ongoing=0 q_high=0 q_dft=0 q_low=0
54
+
55
+ Where the values have the following meaning:
56
+
57
+ * ``j_complete `` the number of jobs completed
58
+ * ``j_failed `` the number of jobs which have failed eg. raised an exception
59
+ * ``j_timedout `` the number of jobs which have timed out, eg. exceeded :attr: `arq.worker.BaseWorker.timeout_seconds `
60
+ and been cancelled
61
+ * ``j_ongoing `` the number of jobs currently being performed.
62
+ * ``q_* `` the number of pending jobs in each queue.
63
+
58
64
Multiple Queues
59
65
...............
60
66
@@ -123,6 +129,9 @@ document and record.
123
129
# jobs may not take more than 10 seconds, default 60
124
130
timeout_seconds = 10
125
131
132
+ # number of seconds between health checks, default 60
133
+ health_check_interval = 30
134
+
126
135
def logging_config (self , verbose ):
127
136
conf = super ().logging_config(verbose)
128
137
# alter logging setup to set arq.jobs level to WARNING
0 commit comments