-
Notifications
You must be signed in to change notification settings - Fork 15
/
Copy pathalerts.go
719 lines (621 loc) · 20.8 KB
/
alerts.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
package alertmanager
import (
"bytes"
"context"
"database/sql"
"fmt"
"math"
"strings"
"time"
"github.com/BurntSushi/toml"
"github.com/dustin/go-humanize"
"github.com/samber/lo"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/curio/api"
"github.com/filecoin-project/curio/build"
"github.com/filecoin-project/curio/deps/config"
"github.com/filecoin-project/curio/harmony/harmonydb"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
cliutil "github.com/filecoin-project/lotus/cli/util"
)
type AlertNow struct {
db *harmonydb.DB
name string
}
func NewAlertNow(db *harmonydb.DB, name string) *AlertNow {
return &AlertNow{
db: db,
name: name,
}
}
func (n *AlertNow) AddAlert(msg string) {
_, err := n.db.Exec(context.Background(), "INSERT INTO alerts (machine_name, message) VALUES ($1, $2)", n.name, msg)
if err != nil {
log.Errorf("Failed to add alert: %s", err)
}
}
func NowCheck(al *alerts) {
Name := "NowCheck"
al.alertMap[Name] = &alertOut{}
type NowType struct {
ID int `db:"id"`
Name string `db:"machine_name"`
Message string `db:"message"`
}
var nowAlerts []NowType
err := al.db.Select(al.ctx, &nowAlerts, `
SELECT id, machine_name, message
FROM alerts`)
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("getting now alerts: %w", err)
return
}
defer func() {
if err == nil {
ids := lo.Map(nowAlerts, func(n NowType, _ int) int {
return n.ID
})
_, err = al.db.Exec(al.ctx, "DELETE FROM alerts where id = ANY($1)", ids)
if err != nil {
log.Errorf("Failed to delete alerts: %s", err)
}
}
}()
if len(nowAlerts) > 0 {
al.alertMap[Name].alertString = strings.Join(lo.Map(nowAlerts, func(n NowType, _ int) string {
return fmt.Sprintf("Machine %s: %s", n.Name, n.Message)
}), " ")
}
}
// balanceCheck retrieves the machine details from the database and performs balance checks on unique addresses.
// It populates the alert map with any errors encountered during the process and with any alerts related to low wallet balance and missing wallets.
// The alert map key is "Balance Check".
// It queries the database for the configuration of each layer and decodes it using the toml.Decode function.
// It then iterates over the addresses in the configuration and curates a list of unique addresses.
// If an address is not found in the chain node, it adds an alert to the alert map.
// If the balance of an address is below MinimumWalletBalance, it adds an alert to the alert map.
// If there are any errors encountered during the process, the err field of the alert map is populated.
func balanceCheck(al *alerts) {
Name := "Balance Check"
al.alertMap[Name] = &alertOut{}
var ret string
uniqueAddrs, _, err := al.getAddresses()
if err != nil {
al.alertMap[Name].err = err
return
}
for _, addr := range uniqueAddrs {
keyAddr, err := al.api.StateAccountKey(al.ctx, addr, types.EmptyTSK)
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("getting account key: %w", err)
return
}
has, err := al.api.WalletHas(al.ctx, keyAddr)
if err != nil {
al.alertMap[Name].err = err
return
}
if !has {
ret += fmt.Sprintf("Wallet %s was not found in chain node. ", keyAddr)
}
balance, err := al.api.WalletBalance(al.ctx, addr)
if err != nil {
al.alertMap[Name].err = err
}
if abi.TokenAmount(al.cfg.MinimumWalletBalance).GreaterThanEqual(balance) {
ret += fmt.Sprintf("Balance for wallet %s (%s) is below 5 Fil. ", addr, keyAddr)
}
}
if ret != "" {
al.alertMap[Name].alertString = ret
}
}
// taskFailureCheck retrieves the task failure counts from the database for a specific time period.
// It then checks for specific sealing tasks and tasks with more than 5 failures to generate alerts.
func taskFailureCheck(al *alerts) {
Name := "TaskFailures"
al.alertMap[Name] = &alertOut{}
type taskFailure struct {
Machine string `db:"completed_by_host_and_port"`
Name string `db:"name"`
Failures int `db:"failed_count"`
}
var taskFailures []taskFailure
err := al.db.Select(al.ctx, &taskFailures, `
SELECT completed_by_host_and_port, name, COUNT(*) AS failed_count
FROM harmony_task_history
WHERE result = FALSE
AND work_end >= NOW() - $1::interval
GROUP BY completed_by_host_and_port, name
ORDER BY completed_by_host_and_port, name;`, fmt.Sprintf("%f Minutes", AlertMangerInterval.Minutes()))
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("getting failed task count: %w", err)
return
}
mmap := make(map[string]int)
tmap := make(map[string]int)
if len(taskFailures) > 0 {
for _, tf := range taskFailures {
_, ok := tmap[tf.Name]
if !ok {
tmap[tf.Name] = tf.Failures
} else {
tmap[tf.Name] += tf.Failures
}
_, ok = mmap[tf.Machine]
if !ok {
mmap[tf.Machine] = tf.Failures
} else {
mmap[tf.Machine] += tf.Failures
}
}
}
sealingTasks := []string{"SDR", "TreeD", "TreeRC", "PreCommitSubmit", "PoRep", "Finalize", "MoveStorage", "CommitSubmit", "WdPost", "ParkPiece"}
contains := func(s []string, e string) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
// Alerts for any sealing pipeline failures. Other tasks should have at least 5 failures for an alert
for name, count := range tmap {
if contains(sealingTasks, name) {
al.alertMap[Name].alertString += fmt.Sprintf("Task: %s, Failures: %d. ", name, count)
}
if count > 5 {
al.alertMap[Name].alertString += fmt.Sprintf("Task: %s, Failures: %d. ", name, count)
}
}
// Alert if a machine failed more than 5 tasks
for name, count := range tmap {
if count > 5 {
al.alertMap[Name].alertString += fmt.Sprintf("Machine: %s, Failures: %d. ", name, count)
}
}
}
// permanentStorageCheck retrieves the storage details from the database and checks if there is sufficient space for sealing sectors.
// It queries the database for the available storage for all storage paths that can store data.
// It queries the database for sectors being sealed that have not been finalized yet.
// For each sector, it calculates the required space for sealing based on the sector size.
// It checks if there is enough available storage for each sector and updates the sectorMap accordingly.
// If any sectors are unaccounted for, it calculates the total missing space and adds an alert to the alert map.
func permanentStorageCheck(al *alerts) {
Name := "PermanentStorageSpace"
al.alertMap[Name] = &alertOut{}
// Get all storage path for permanent storages
type storage struct {
ID string `db:"storage_id"`
Available int64 `db:"available"`
}
var storages []storage
err := al.db.Select(al.ctx, &storages, `
SELECT storage_id, available
FROM storage_path
WHERE can_store = TRUE;`)
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("getting storage details: %w", err)
return
}
type sector struct {
Miner abi.ActorID `db:"sp_id"`
Number abi.SectorNumber `db:"sector_number"`
Proof abi.RegisteredSealProof `db:"reg_seal_proof"`
}
var sectors []sector
err = al.db.Select(al.ctx, §ors, `
SELECT sp_id, sector_number, reg_seal_proof
FROM sectors_sdr_pipeline
WHERE after_move_storage = FALSE;`)
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("getting sectors being sealed: %w", err)
return
}
type sm struct {
s sector
size int64
}
sectorMap := make(map[sm]bool)
for _, sec := range sectors {
space := int64(0)
sec := sec
sectorSize, err := sec.Proof.SectorSize()
if err != nil {
space = int64(64<<30)*2 + int64(200<<20) // Assume 64 GiB sector
} else {
space = int64(sectorSize)*2 + int64(200<<20) // sealed + unsealed + cache
}
key := sm{s: sec, size: space}
sectorMap[key] = false
for _, strg := range storages {
if space > strg.Available {
strg.Available -= space
sectorMap[key] = true
}
}
}
missingSpace := big.NewInt(0)
for sec, accounted := range sectorMap {
if !accounted {
big.Add(missingSpace, big.NewInt(sec.size))
}
}
if missingSpace.GreaterThan(big.NewInt(0)) {
al.alertMap[Name].alertString = fmt.Sprintf("Insufficient storage space for sealing sectors. Additional %s required.", humanize.Bytes(missingSpace.Uint64()))
}
}
// getAddresses retrieves machine details from the database, stores them in an array and compares layers for uniqueness.
// It employs addrMap to handle unique addresses, and generated slices for configuration fields and MinerAddresses.
// The function iterates over layers, storing decoded configuration and verifying address existence in addrMap.
// It ends by returning unique addresses and miner slices.
func (al *alerts) getAddresses() ([]address.Address, []address.Address, error) {
// MachineDetails represents the structure of data received from the SQL query.
type machineDetail struct {
ID int
HostAndPort string
Layers string
}
var machineDetails []machineDetail
// Get all layers in use
err := al.db.Select(al.ctx, &machineDetails, `
SELECT m.id, m.host_and_port, d.layers
FROM harmony_machines m
LEFT JOIN harmony_machine_details d ON m.id = d.machine_id;`)
if err != nil {
return nil, nil, xerrors.Errorf("getting config layers for all machines: %w", err)
}
// UniqueLayers takes an array of MachineDetails and returns a slice of unique layers.
layerMap := make(map[string]bool)
var uniqueLayers []string
// Get unique layers in use
for _, machine := range machineDetails {
machine := machine
// Split the Layers field into individual layers
layers := strings.Split(machine.Layers, ",")
for _, layer := range layers {
layer = strings.TrimSpace(layer)
if _, exists := layerMap[layer]; !exists && layer != "" {
layerMap[layer] = true
uniqueLayers = append(uniqueLayers, layer)
}
}
}
addrMap := make(map[string]struct{})
minerMap := make(map[string]struct{})
// Get all unique addresses
for _, layer := range uniqueLayers {
text := ""
cfg := config.DefaultCurioConfig()
err := al.db.QueryRow(al.ctx, `SELECT config FROM harmony_config WHERE title=$1`, layer).Scan(&text)
if err != nil {
if strings.Contains(err.Error(), sql.ErrNoRows.Error()) {
return nil, nil, xerrors.Errorf("missing layer '%s' ", layer)
}
return nil, nil, xerrors.Errorf("could not read layer '%s': %w", layer, err)
}
err = config.FixTOML(text, cfg)
if err != nil {
return nil, nil, err
}
_, err = toml.Decode(text, cfg)
if err != nil {
return nil, nil, xerrors.Errorf("could not read layer, bad toml %s: %w", layer, err)
}
for i := range cfg.Addresses {
prec := cfg.Addresses[i].PreCommitControl
com := cfg.Addresses[i].CommitControl
term := cfg.Addresses[i].TerminateControl
miners := cfg.Addresses[i].MinerAddresses
for j := range prec {
if prec[j] != "" {
addrMap[prec[j]] = struct{}{}
}
}
for j := range com {
if com[j] != "" {
addrMap[com[j]] = struct{}{}
}
}
for j := range term {
if term[j] != "" {
addrMap[term[j]] = struct{}{}
}
}
for j := range miners {
if miners[j] != "" {
minerMap[miners[j]] = struct{}{}
}
}
}
}
var wallets, minerAddrs []address.Address
// Get control and wallet addresses from chain
for m := range minerMap {
maddr, err := address.NewFromString(m)
if err != nil {
return nil, nil, err
}
info, err := al.api.StateMinerInfo(al.ctx, maddr, types.EmptyTSK)
if err != nil {
return nil, nil, err
}
minerAddrs = append(minerAddrs, maddr)
addrMap[info.Worker.String()] = struct{}{}
for _, w := range info.ControlAddresses {
if _, ok := addrMap[w.String()]; !ok {
addrMap[w.String()] = struct{}{}
}
}
}
for w := range addrMap {
waddr, err := address.NewFromString(w)
if err != nil {
return nil, nil, err
}
wallets = append(wallets, waddr)
}
return wallets, minerAddrs, nil
}
func wdPostCheck(al *alerts) {
Name := "WindowPost"
al.alertMap[Name] = &alertOut{}
head, err := al.api.ChainHead(al.ctx)
if err != nil {
al.alertMap[Name].err = err
return
}
// Calculate from epoch for last AlertMangerInterval
from := head.Height() - abi.ChainEpoch(math.Ceil(AlertMangerInterval.Seconds()/float64(build.BlockDelaySecs))) - 1
if from < 0 {
from = 0
}
_, miners, err := al.getAddresses()
if err != nil {
al.alertMap[Name].err = err
return
}
h := head
// Map[Miner Address]Map[DeadlineIdx][]Partitions
msgCheck := make(map[address.Address]map[uint64][]bool)
// Walk back all tipset from current height to from height and find all deadlines and their partitions
for h.Height() >= from {
for _, maddr := range miners {
deadlineInfo, err := al.api.StateMinerProvingDeadline(al.ctx, maddr, h.Key())
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("getting miner deadline: %w", err)
return
}
partitions, err := al.api.StateMinerPartitions(al.ctx, maddr, deadlineInfo.Index, h.Key())
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("getting miner partitions: %w", err)
return
}
if _, ok := msgCheck[maddr]; !ok {
msgCheck[maddr] = make(map[uint64][]bool)
}
if _, ok := msgCheck[maddr][deadlineInfo.Index]; !ok {
ps := make([]bool, len(partitions))
msgCheck[maddr][deadlineInfo.Index] = ps
}
}
h, err = al.api.ChainGetTipSet(al.ctx, h.Parents())
if err != nil {
al.alertMap[Name].err = err
return
}
}
// Get all wdPost tasks from DB between from and head
var wdDetails []struct {
Miner int64 `db:"sp_id"`
Deadline int64 `db:"deadline"`
Partition int64 `db:"partition"`
Epoch abi.ChainEpoch `db:"submit_at_epoch"`
Proof []byte `db:"proof_params"`
}
err = al.db.Select(al.ctx, &wdDetails, `
SELECT sp_id, submit_at_epoch, proof_params, partition, deadline
FROM wdpost_proofs
WHERE submit_at_epoch > $1;`, from)
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("getting windowPost details from database: %w", err)
return
}
if len(wdDetails) < 1 {
return
}
// For all tasks between from and head, match how many we posted successfully
for _, detail := range wdDetails {
addr, err := address.NewIDAddress(uint64(detail.Miner))
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("getting miner address: %w", err)
return
}
if _, ok := msgCheck[addr][uint64(detail.Deadline)]; !ok {
al.alertMap[Name].alertString += fmt.Sprintf("unknown WindowPost jobs for miner %s deadline %d partition %d found. ", addr.String(), detail.Deadline, detail.Partition)
continue
}
// If entry for a partition is found we should mark it as processed
msgCheck[addr][uint64(detail.Deadline)][detail.Partition] = true
// Check if we skipped any sectors
var postOut miner.SubmitWindowedPoStParams
err = postOut.UnmarshalCBOR(bytes.NewReader(detail.Proof))
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("unmarshaling windowPost proof params: %w", err)
return
}
for i := range postOut.Partitions {
c, err := postOut.Partitions[i].Skipped.Count()
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("getting skipped sector count: %w", err)
return
}
if c > 0 {
al.alertMap[Name].alertString += fmt.Sprintf("Skipped %d sectors in deadline %d partition %d. ", c, postOut.Deadline, postOut.Partitions[i].Index)
}
}
}
// Check if we missed any deadline/partitions
for maddr, deadlines := range msgCheck {
for deadlineIndex, ps := range deadlines {
for idx := range ps {
if !ps[idx] {
al.alertMap[Name].alertString += fmt.Sprintf("No WindowPost jobs found for miner %s deadline %d paritions %d. ", maddr.String(), deadlineIndex, idx)
}
}
}
}
}
func wnPostCheck(al *alerts) {
Name := "WinningPost"
al.alertMap[Name] = &alertOut{}
head, err := al.api.ChainHead(al.ctx)
if err != nil {
al.alertMap[Name].err = err
return
}
// Calculate from epoch for last AlertMangerInterval
from := head.Height() - abi.ChainEpoch(math.Ceil(AlertMangerInterval.Seconds()/float64(build.BlockDelaySecs))) - 1
if from < 0 {
from = 0
}
var wnDetails []struct {
Miner int64 `db:"sp_id"`
Block string `db:"mined_cid"`
Epoch abi.ChainEpoch `db:"epoch"`
Included bool `db:"included"`
}
// Get all DB entries where we won the election in last AlertMangerInterval
err = al.db.Select(al.ctx, &wnDetails, `
SELECT sp_id, mined_cid, epoch
FROM mining_tasks
WHERE epoch > $1 AND won = TRUE
ORDER BY epoch;`, from)
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("getting winningPost details from database: %w", err)
return
}
// Get count of all mining tasks in DB in last AlertMangerInterval
var count int64
err = al.db.QueryRow(al.ctx, `
SELECT COUNT(*)
FROM mining_tasks
WHERE epoch > $1;`, from).Scan(&count)
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("getting winningPost count details from database: %w", err)
return
}
// If we have no task created for any miner ID, this is a serious issue
if count == 0 {
al.alertMap[Name].alertString += "No winningPost tasks found in the last " + humanize.Time(time.Now().Add(-AlertMangerInterval))
return
}
// Calculate how many tasks should be in DB for AlertMangerInterval (epochs) as each epoch should have 1 task
expected := int64(math.Ceil(AlertMangerInterval.Seconds() / float64(build.BlockDelaySecs)))
if (head.Height() - abi.ChainEpoch(expected)) < 0 {
expected = int64(head.Height())
}
_, miners, err := al.getAddresses()
if err != nil {
al.alertMap[Name].err = err
return
}
const slack = 4
slackTasks := slack * int64(len(miners))
expected = expected * int64(len(miners)) // Multiply epochs by number of miner IDs
if count < expected-slackTasks || count > expected+slackTasks {
al.alertMap[Name].alertString += fmt.Sprintf("Expected %d WinningPost task and found %d in DB. ", expected, count)
}
if len(wnDetails) < 1 {
return
}
// Repost any block which we submitted but was not included in the chain
for _, wn := range wnDetails {
if !wn.Included {
al.alertMap[Name].alertString += fmt.Sprintf("Epoch %d: does not contain our block %s. ", wn.Epoch, wn.Block)
}
}
}
func chainSyncCheck(al *alerts) {
Name := "ChainSync"
al.alertMap[Name] = &alertOut{}
type minimalApiInfo struct {
Apis struct {
ChainApiInfo []string
}
}
rpcInfos := map[string]minimalApiInfo{} // config name -> api info
confNameToAddr := map[string]string{} // config name -> api address
// Get all config from DB
rows, err := al.db.Query(al.ctx, `SELECT title, config FROM harmony_config`)
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("getting db configs: %w", err)
return
}
configs := make(map[string]string)
for rows.Next() {
var title, cfg string
if err := rows.Scan(&title, &cfg); err != nil {
al.alertMap[Name].err = xerrors.Errorf("scanning db configs: %w", err)
return
}
configs[title] = cfg
}
// Parse all configs minimal to get API
for name, tomlStr := range configs {
var info minimalApiInfo
if err := toml.Unmarshal([]byte(tomlStr), &info); err != nil {
al.alertMap[Name].err = xerrors.Errorf("unmarshaling %s config: %w", name, err)
continue
}
if len(info.Apis.ChainApiInfo) == 0 {
continue
}
rpcInfos[name] = info
for _, addr := range info.Apis.ChainApiInfo {
ai := cliutil.ParseApiInfo(addr)
confNameToAddr[name] = ai.Addr
}
}
dedup := map[string]bool{} // for dedup by address
// For each unique API (chain), check if in sync
for _, info := range rpcInfos {
ai := cliutil.ParseApiInfo(info.Apis.ChainApiInfo[0])
if dedup[ai.Addr] {
continue
}
dedup[ai.Addr] = true
addr, err := ai.DialArgs("v1")
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("could not get DialArgs: %w", err)
continue
}
var res api.ChainStruct
closer, err := jsonrpc.NewMergeClient(al.ctx, addr, "Filecoin",
api.GetInternalStructs(&res), ai.AuthHeader(), []jsonrpc.Option{jsonrpc.WithErrors(jsonrpc.NewErrors())}...)
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("error creating jsonrpc client: %v", err)
continue
}
defer closer()
full := &res
head, err := full.ChainHead(al.ctx)
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("ChainHead: %w", err)
continue
}
switch {
case time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs*3/2): // within 1.5 epochs
continue
case time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs*5): // within 5 epochs
log.Debugf("Chain Sync status: %s: slow (%s behind)", addr, time.Since(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second))
default:
al.alertMap[Name].alertString += fmt.Sprintf("behind (%s behind)", time.Since(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second))
}
}
}