diff --git a/node/pegnet/pegnet.go b/node/pegnet/pegnet.go index 0ee53b7..0caf18f 100644 --- a/node/pegnet/pegnet.go +++ b/node/pegnet/pegnet.go @@ -35,7 +35,7 @@ func (p *Pegnet) Init() error { // TODO: Come up with actual migrations. // until then, we can just bump this version number // and make the database reset when we need to. - path += ".v3" + path += ".v4" // Ensure the path exists dir := filepath.Dir(path) @@ -76,6 +76,9 @@ func (p *Pegnet) createTables() error { createTableWinners, createTableTransactions, createTableTransactionBatchHolding, + createTableTxHistoryBatch, + createTableTxHistoryTx, + createTableTxHistoryLookup, } { if _, err := p.DB.Exec(sql); err != nil { return err diff --git a/node/pegnet/txhistory.go b/node/pegnet/txhistory.go new file mode 100644 index 0000000..1de28a8 --- /dev/null +++ b/node/pegnet/txhistory.go @@ -0,0 +1,325 @@ +package pegnet + +import ( + "database/sql" + "encoding/json" + "fmt" + "time" + + "github.com/Factom-Asset-Tokens/factom" + "github.com/pegnet/pegnet/modules/grader" + "github.com/pegnet/pegnetd/fat/fat2" +) + +// HistoryTransaction is a flattened entry of the history table structure. +// It contains several actions: transfers, conversions, coinbases, and fct burns +type HistoryTransaction struct { + Hash *factom.Bytes32 `json:"hash"` + Height int64 `json:"height"` + Timestamp time.Time `json:"timestamp"` + Executed int32 `json:"executed"` + TxIndex int `json:"txindex"` + TxAction HistoryAction `json:"txaction"` + + FromAddress *factom.FAAddress `json:"fromaddress"` + FromAsset string `json:"fromasset"` + FromAmount int64 `json:"fromamount"` + ToAsset string `json:"toasset,omitempty"` + ToAmount int64 `json:"toamount,omitempty"` + Outputs []HistoryTransactionOutput `json:"outputs,omitempty"` +} + +// HistoryTransactionOutput is an entry of a transfer's outputs +type HistoryTransactionOutput struct { + Address factom.FAAddress `json:"address"` + Amount int64 `json:"amount"` +} + +// in the context of tables, `history_txbatch` is the table that holds the unique reference hash +// and `transaction` is the table that holds the actions associated with that unique reference hash +// `lookup` is an outside reference that indexes the addresses involved in the actions +// +// associations are: +// * history_txbach : history_transaction is `1:n` +// * history_transaction : lookup is `1:n` +// * lookup : (transaction.outputs + transaction.inputs) is `1:n` (unique addresses only) +const createTableTxHistoryBatch = `CREATE TABLE IF NOT EXISTS "pn_history_txbatch" ( + "history_id" INTEGER PRIMARY KEY, + "entry_hash" BLOB NOT NULL, + "height" INTEGER NOT NULL, -- height the tx is in + "blockorder" INTEGER NOT NULL, + "timestamp" INTEGER NOT NULL, + "executed" INTEGER NOT NULL, -- -1 if failed, 0 if pending, height it was applied at otherwise + + UNIQUE("entry_hash", "height") +); +CREATE INDEX IF NOT EXISTS "idx_history_txbatch_entry_hash" ON "pn_history_txbatch"("entry_hash"); +CREATE INDEX IF NOT EXISTS "idx_history_txbatch_timestamp" ON "pn_history_txbatch"("timestamp"); +CREATE INDEX IF NOT EXISTS "idx_history_txbatch_height" ON "pn_history_txbatch"("height"); +` + +const createTableTxHistoryTx = `CREATE TABLE IF NOT EXISTS "pn_history_transaction" ( + "entry_hash" BLOB NOT NULL, + "tx_index" INTEGER NOT NULL, -- the batch index + "action_type" INTEGER NOT NULL, + "from_address" BLOB NOT NULL, + "from_asset" STRING NOT NULL, + "from_amount" INTEGER NOT NULL, + "to_asset" STRING NOT NULL, -- used for NOT transfers + "to_amount" INTEGER NOT NULL, -- used for NOT transfers + "outputs" BLOB NOT NULL, -- used for transfers only + + PRIMARY KEY("entry_hash", "tx_index"), + FOREIGN KEY("entry_hash") REFERENCES "pn_history_txbatch" +); +CREATE INDEX IF NOT EXISTS "idx_history_transaction_entry_hash" ON "pn_history_transaction"("entry_hash"); +` + +const createTableTxHistoryLookup = `CREATE TABLE IF NOT EXISTS "pn_history_lookup" ( + "entry_hash" INTEGER NOT NULL, + "tx_index" INTEGER NOT NULL, + "address" BLOB NOT NULL, + + PRIMARY KEY("entry_hash", "tx_index", "address"), + FOREIGN KEY("entry_hash", "tx_index") REFERENCES "pn_history_transaction" +); +CREATE INDEX IF NOT EXISTS "idx_history_lookup_address" ON "pn_history_lookup"("address"); +CREATE INDEX IF NOT EXISTS "idx_history_lookup_entry_index" ON "pn_history_lookup"("entry_hash", "tx_index");` + +// only add a lookup reference if one doesn't already exist +const insertLookupQuery = `INSERT INTO pn_history_lookup (entry_hash, tx_index, address) VALUES (?, ?, ?) ON CONFLICT DO NOTHING;` + +func (p *Pegnet) historySelectHelper(field string, data interface{}, options HistoryQueryOptions) ([]HistoryTransaction, int, error) { + countQuery, dataQuery, err := historyQueryBuilder(field, options) + if err != nil { // developer error + return nil, 0, err + } + + var count int + err = p.DB.QueryRow(countQuery, data).Scan(&count) + if err != nil { + return nil, 0, err + } + + if count == 0 { + return nil, 0, nil + } + + if options.Offset > count { + return nil, 0, fmt.Errorf("offset too big") + } + + rows, err := p.DB.Query(dataQuery, data) + if err != nil { + return nil, 0, err + } + defer rows.Close() + + actions, err := turnRowsIntoHistoryTransactions(rows) + return actions, count, err +} + +// SelectTransactionHistoryActionsByHash returns the specified amount of transactions based on the hash. +// Hash can be an entry hash from the opr and transaction chains, or a transaction hash from an fblock. +func (p *Pegnet) SelectTransactionHistoryActionsByHash(hash *factom.Bytes32, options HistoryQueryOptions) ([]HistoryTransaction, int, error) { + return p.historySelectHelper("entry_hash", hash[:], options) +} + +// SelectTransactionHistoryActionsByAddress uses the lookup table to retrieve all transactions that have +// the specified address in either inputs or outputs +func (p *Pegnet) SelectTransactionHistoryActionsByAddress(addr *factom.FAAddress, options HistoryQueryOptions) ([]HistoryTransaction, int, error) { + return p.historySelectHelper("address", addr[:], options) +} + +// SelectTransactionHistoryActionsByHeight returns all transactions that were **entered** at the specified height. +func (p *Pegnet) SelectTransactionHistoryActionsByHeight(height uint32, options HistoryQueryOptions) ([]HistoryTransaction, int, error) { + return p.historySelectHelper("height", height, options) +} + +// SelectTransactionHistoryStatus returns the status of a transaction: +// `-1` for a failed transaction, `0` for a pending transactions, +// `height` for the block in which it was applied otherwise +func (p *Pegnet) SelectTransactionHistoryStatus(hash *factom.Bytes32) (uint32, uint32, error) { + var height, executed uint32 + err := p.DB.QueryRow("SELECT height, executed FROM pn_history_txbatch WHERE entry_hash = ?", hash[:]).Scan(&height, &executed) + if err != nil { + if err == sql.ErrNoRows { + return 0, 0, nil + } + return 0, 0, err + } + return height, executed, nil +} + +// SetTransactionHistoryExecuted updates a transaction's executed status +func (p *Pegnet) SetTransactionHistoryExecuted(tx *sql.Tx, txbatch *fat2.TransactionBatch, executed int64) error { + stmt, err := tx.Prepare(`UPDATE "pn_history_txbatch" SET executed = ? WHERE entry_hash = ?`) + if err != nil { + return err + } + _, err = stmt.Exec(executed, txbatch.Entry.Hash[:]) + if err != nil { + return err + } + return nil +} + +// SetTransactionHistoryConvertedAmount updates a conversion with the actual conversion value. +// This is done in the same SQL Transaction as updating its executed status +func (p *Pegnet) SetTransactionHistoryConvertedAmount(tx *sql.Tx, txbatch *fat2.TransactionBatch, index int, amount int64) error { + stmt, err := tx.Prepare(`UPDATE "pn_history_transaction" SET to_amount = ? WHERE entry_hash = ? AND tx_index = ?`) + if err != nil { + return err + } + _, err = stmt.Exec(amount, txbatch.Entry.Hash[:], index) + if err != nil { + return err + } + return nil +} + +// InsertTransactionHistoryTxBatch inserts a transaction from the transaction chain into the history system +func (p *Pegnet) InsertTransactionHistoryTxBatch(tx *sql.Tx, blockorder int, txbatch *fat2.TransactionBatch, height uint32) error { + stmt, err := tx.Prepare(`INSERT INTO "pn_history_txbatch" + (entry_hash, height, blockorder, timestamp, executed) VALUES + (?, ?, ?, ?, ?)`) + if err != nil { + return err + } + _, err = stmt.Exec(txbatch.Entry.Hash[:], height, blockorder, txbatch.Entry.Timestamp.Unix(), 0) + if err != nil { + return err + } + + txStatement, err := tx.Prepare(`INSERT INTO "pn_history_transaction" + (entry_hash, tx_index, action_type, from_address, from_asset, from_amount, to_asset, to_amount, outputs) VALUES + (?, ?, ?, ?, ?, ?, ?, ?, ?)`) + if err != nil { + return err + } + + lookup, err := tx.Prepare(insertLookupQuery) + if err != nil { + return err + } + + for index, action := range txbatch.Transactions { + var typ HistoryAction + if action.IsConversion() { + typ = Conversion + } else { + typ = Transfer + } + + if _, err = lookup.Exec(txbatch.Entry.Hash[:], index, action.Input.Address[:]); err != nil { + return err + } + + if action.IsConversion() { + _, err = txStatement.Exec(txbatch.Entry.Hash[:], index, typ, + action.Input.Address[:], action.Input.Type.String(), action.Input.Amount, // from + action.Conversion.String(), 0, "") // to + if err != nil { + return err + } + } else { + // json encode the outputs + outputs := make([]HistoryTransactionOutput, len(action.Transfers)) + for i, transfer := range action.Transfers { + outputs[i] = HistoryTransactionOutput{Address: transfer.Address, Amount: int64(transfer.Amount)} + if _, err = lookup.Exec(txbatch.Entry.Hash[:], index, transfer.Address[:]); err != nil { + return err + } + } + var outputData []byte + if outputData, err = json.Marshal(outputs); err != nil { + return err + } + + if _, err = txStatement.Exec(txbatch.Entry.Hash[:], index, typ, + action.Input.Address[:], action.Input.Type.String(), action.Input.Amount, + "", 0, outputData); err != nil { + return err + } + } + } + + return nil +} + +// InsertFCTBurn inserts a payout for an FCT burn into the system. +// Note that from_asset and to_asset are hardcoded +func (p *Pegnet) InsertFCTBurn(tx *sql.Tx, fBlockHash *factom.Bytes32, burn *factom.FactoidTransaction, height uint32) error { + stmt, err := tx.Prepare(`INSERT INTO "pn_history_txbatch" + (entry_hash, height, blockorder, timestamp, executed) VALUES + (?, ?, ?, ?, ?)`) + if err != nil { + return err + } + + lookup, err := tx.Prepare(insertLookupQuery) + if err != nil { + return err + } + + _, err = stmt.Exec(burn.TransactionID[:], height, -1, burn.FactoidTransactionHeader.Timestamp.Unix(), height) + if err != nil { + return err + } + + burnStatement, err := tx.Prepare(`INSERT INTO "pn_history_transaction" + (entry_hash, tx_index, action_type, from_address, from_asset, from_amount, to_asset, to_amount, outputs) VALUES + (?, ?, ?, ?, ?, ?, ?, ?, ?)`) + if err != nil { + return err + } + + if _, err = burnStatement.Exec(burn.TransactionID[:], 0, FCTBurn, burn.FCTInputs[0].Address[:], "FCT", burn.FCTInputs[0].Amount, "pFCT", burn.FCTInputs[0].Amount, ""); err != nil { + return err + } + + if _, err = lookup.Exec(burn.TransactionID[:], 0, burn.FCTInputs[0].Address[:]); err != nil { + return err + } + + return nil +} + +// InsertCoinbase inserts the payouts from mining into the history system. +// There is one transaction per winning OPR, with the entry hash pointing to that specific opr +func (p *Pegnet) InsertCoinbase(tx *sql.Tx, winner *grader.GradingOPR, addr []byte, timestamp time.Time) error { + stmt, err := tx.Prepare(`INSERT INTO "pn_history_txbatch" + (entry_hash, height, blockorder, timestamp, executed) VALUES + (?, ?, ?, ?, ?)`) + if err != nil { + return err + } + + lookup, err := tx.Prepare(insertLookupQuery) + if err != nil { + return err + } + + _, err = stmt.Exec(winner.EntryHash, winner.OPR.GetHeight(), 0, timestamp.Unix(), winner.OPR.GetHeight()) + if err != nil { + return err + } + + coinbaseStatement, err := tx.Prepare(`INSERT INTO "pn_history_transaction" + (entry_hash, tx_index, action_type, from_address, from_asset, from_amount, to_asset, to_amount, outputs) VALUES + (?, ?, ?, ?, ?, ?, ?, ?, ?)`) + if err != nil { + return err + } + + _, err = coinbaseStatement.Exec(winner.EntryHash, 0, Coinbase, addr, "", 0, "PEG", winner.Payout(), "") + if err != nil { + return err + } + + if _, err = lookup.Exec(winner.EntryHash, 0, addr); err != nil { + return err + } + + return nil +} diff --git a/node/pegnet/txhistory_util.go b/node/pegnet/txhistory_util.go new file mode 100644 index 0000000..8bc5b3a --- /dev/null +++ b/node/pegnet/txhistory_util.go @@ -0,0 +1,143 @@ +package pegnet + +import ( + "database/sql" + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "github.com/Factom-Asset-Tokens/factom" +) + +// HistoryAction are the different types of actions inside the history +type HistoryAction int32 + +const ( + // Invalid is used for debugging + Invalid HistoryAction = iota + // Transfer is a 1:n transfer of pegged assets from one address to another + Transfer + // Conversion is a conversion of pegged assets + Conversion + // Coinbase is a miner reward payout + Coinbase + // FCTBurn is a pFCT payout for burning FCT on factom + FCTBurn +) + +// QueryLimit is the amount of transactions to return in one query +const QueryLimit = 50 + +func historyActionPicker(tx, conv, coin, burn bool) []string { + if tx == conv && conv == coin && coin == burn { + return nil + } + + var actions []string + if tx { + actions = append(actions, strconv.Itoa(int(Transfer))) + } + if conv { + actions = append(actions, strconv.Itoa(int(Conversion))) + } + if coin { + actions = append(actions, strconv.Itoa(int(Coinbase))) + } + if burn { + actions = append(actions, strconv.Itoa(int(FCTBurn))) + } + + return actions +} + +// HistoryQueryOptions contains the data of what to query for the query builder +type HistoryQueryOptions struct { + Offset int + Desc bool + Transfer bool + Conversion bool + Coinbase bool + FCTBurn bool +} + +const historyQueryFields = "batch.history_id, batch.entry_hash, batch.height, batch.timestamp, batch.executed," + + "tx.tx_index, tx.action_type, tx.from_address, tx.from_asset, tx.from_amount, tx.outputs," + + "tx.to_asset, tx.to_amount" + +// historyQueryBuilder generates a count and data query for the given options +func historyQueryBuilder(field string, options HistoryQueryOptions) (string, string, error) { + order := "ORDER BY batch.history_id ASC" + if options.Desc { + order = "ORDER BY batch.history_id DESC" + } + + limit := fmt.Sprintf("LIMIT %d OFFSET %d", QueryLimit, options.Offset) + + types := historyActionPicker(options.Transfer, options.Conversion, options.Coinbase, options.FCTBurn) + + var from, where, fromCount, whereCount string + switch field { + case "address": + if types != nil { + fromCount = "pn_history_lookup lookup, pn_history_transaction tx" + whereCount = "lookup.address = ? AND lookup.entry_hash = tx.entry_hash AND lookup.tx_index = tx.tx_index" + } else { + fromCount = "pn_history_lookup" + whereCount = "address = ?" + } + from = "pn_history_lookup lookup, pn_history_txbatch batch, pn_history_transaction tx" + where = "lookup.address = ? AND lookup.entry_hash = tx.entry_hash AND lookup.tx_index = tx.tx_index AND batch.entry_hash = tx.entry_hash" + case "entry_hash": + fallthrough + case "height": + from = "pn_history_txbatch batch, pn_history_transaction tx" + where = fmt.Sprintf("batch.entry_hash = tx.entry_hash AND batch.%s = ?", field) + fromCount = from + whereCount = where + default: + return "", "", fmt.Errorf("developer error - unimplemented history query builder field") + } + + if types != nil { + where = fmt.Sprintf("(%s) AND tx.action_type IN(%s)", where, strings.Join(types, ",")) + whereCount = fmt.Sprintf("(%s) AND tx.action_type IN(%s)", whereCount, strings.Join(types, ",")) + } + + return fmt.Sprintf("SELECT COUNT(*) FROM %s WHERE %s", fromCount, whereCount), + fmt.Sprintf("SELECT %s FROM %s WHERE %s %s %s", historyQueryFields, from, where, order, limit), nil +} + +// helper function for sql results of a query builder's data query +func turnRowsIntoHistoryTransactions(rows *sql.Rows) ([]HistoryTransaction, error) { + var actions []HistoryTransaction + for rows.Next() { + var tx HistoryTransaction + var ts, id int64 + var hash, from, outputs []byte + err := rows.Scan( + &id, &hash, &tx.Height, &ts, &tx.Executed, // history + &tx.TxIndex, &tx.TxAction, &from, &tx.FromAsset, &tx.FromAmount, // action + &outputs, &tx.ToAsset, &tx.ToAmount) // data + if err != nil { + return nil, err + } + tx.Hash = factom.NewBytes32(hash) + tx.Timestamp = time.Unix(ts, 0) + var addr factom.FAAddress + addr = factom.FAAddress(*factom.NewBytes32(from)) + tx.FromAddress = &addr + + if tx.TxAction == Transfer { + var output []HistoryTransactionOutput + if err = json.Unmarshal(outputs, &output); err != nil { // should never fail unless database data is corrupt + return nil, fmt.Errorf("database corruption %d %v", id, err) + } + tx.Outputs = output + } + + actions = append(actions, tx) + } + return actions, nil +} diff --git a/node/pegnet/txhistory_util_test.go b/node/pegnet/txhistory_util_test.go new file mode 100644 index 0000000..9adec13 --- /dev/null +++ b/node/pegnet/txhistory_util_test.go @@ -0,0 +1,83 @@ +package pegnet + +import ( + "reflect" + "testing" +) + +func TestHistoryQueryBuilder(t *testing.T) { + type args struct { + field string + options HistoryQueryOptions + } + tests := []struct { + name string + args args + want string + want1 string + wantErr bool + }{ // only a single typed arg suffices since result of types is tested separately below + {"empty", args{"", HistoryQueryOptions{}}, "", "", true}, + {"wrong field", args{"bad", HistoryQueryOptions{}}, "", "", true}, + {"entry hash, default args", args{"entry_hash", HistoryQueryOptions{}}, "SELECT COUNT(*) FROM pn_history_txbatch batch, pn_history_transaction tx WHERE batch.entry_hash = tx.entry_hash AND batch.entry_hash = ?", "SELECT batch.history_id, batch.entry_hash, batch.height, batch.timestamp, batch.executed,tx.tx_index, tx.action_type, tx.from_address, tx.from_asset, tx.from_amount, tx.outputs,tx.to_asset, tx.to_amount FROM pn_history_txbatch batch, pn_history_transaction tx WHERE batch.entry_hash = tx.entry_hash AND batch.entry_hash = ? ORDER BY batch.history_id ASC LIMIT 50 OFFSET 0", false}, + {"entry hash, offset", args{"entry_hash", HistoryQueryOptions{Offset: 123}}, "SELECT COUNT(*) FROM pn_history_txbatch batch, pn_history_transaction tx WHERE batch.entry_hash = tx.entry_hash AND batch.entry_hash = ?", "SELECT batch.history_id, batch.entry_hash, batch.height, batch.timestamp, batch.executed,tx.tx_index, tx.action_type, tx.from_address, tx.from_asset, tx.from_amount, tx.outputs,tx.to_asset, tx.to_amount FROM pn_history_txbatch batch, pn_history_transaction tx WHERE batch.entry_hash = tx.entry_hash AND batch.entry_hash = ? ORDER BY batch.history_id ASC LIMIT 50 OFFSET 123", false}, + {"entry hash, descending", args{"entry_hash", HistoryQueryOptions{Desc: true}}, "SELECT COUNT(*) FROM pn_history_txbatch batch, pn_history_transaction tx WHERE batch.entry_hash = tx.entry_hash AND batch.entry_hash = ?", "SELECT batch.history_id, batch.entry_hash, batch.height, batch.timestamp, batch.executed,tx.tx_index, tx.action_type, tx.from_address, tx.from_asset, tx.from_amount, tx.outputs,tx.to_asset, tx.to_amount FROM pn_history_txbatch batch, pn_history_transaction tx WHERE batch.entry_hash = tx.entry_hash AND batch.entry_hash = ? ORDER BY batch.history_id DESC LIMIT 50 OFFSET 0", false}, + {"entry hash, typed", args{"entry_hash", HistoryQueryOptions{FCTBurn: true, Coinbase: true}}, "SELECT COUNT(*) FROM pn_history_txbatch batch, pn_history_transaction tx WHERE (batch.entry_hash = tx.entry_hash AND batch.entry_hash = ?) AND tx.action_type IN(3,4)", "SELECT batch.history_id, batch.entry_hash, batch.height, batch.timestamp, batch.executed,tx.tx_index, tx.action_type, tx.from_address, tx.from_asset, tx.from_amount, tx.outputs,tx.to_asset, tx.to_amount FROM pn_history_txbatch batch, pn_history_transaction tx WHERE (batch.entry_hash = tx.entry_hash AND batch.entry_hash = ?) AND tx.action_type IN(3,4) ORDER BY batch.history_id ASC LIMIT 50 OFFSET 0", false}, + {"height, default args", args{"height", HistoryQueryOptions{}}, "SELECT COUNT(*) FROM pn_history_txbatch batch, pn_history_transaction tx WHERE batch.entry_hash = tx.entry_hash AND batch.height = ?", "SELECT batch.history_id, batch.entry_hash, batch.height, batch.timestamp, batch.executed,tx.tx_index, tx.action_type, tx.from_address, tx.from_asset, tx.from_amount, tx.outputs,tx.to_asset, tx.to_amount FROM pn_history_txbatch batch, pn_history_transaction tx WHERE batch.entry_hash = tx.entry_hash AND batch.height = ? ORDER BY batch.history_id ASC LIMIT 50 OFFSET 0", false}, + {"address, default args", args{"address", HistoryQueryOptions{}}, "SELECT COUNT(*) FROM pn_history_lookup WHERE address = ?", "SELECT batch.history_id, batch.entry_hash, batch.height, batch.timestamp, batch.executed,tx.tx_index, tx.action_type, tx.from_address, tx.from_asset, tx.from_amount, tx.outputs,tx.to_asset, tx.to_amount FROM pn_history_lookup lookup, pn_history_txbatch batch, pn_history_transaction tx WHERE lookup.address = ? AND lookup.entry_hash = tx.entry_hash AND lookup.tx_index = tx.tx_index AND batch.entry_hash = tx.entry_hash ORDER BY batch.history_id ASC LIMIT 50 OFFSET 0", false}, + {"address, typed", args{"address", HistoryQueryOptions{Conversion: true, Transfer: true}}, "SELECT COUNT(*) FROM pn_history_lookup lookup, pn_history_transaction tx WHERE (lookup.address = ? AND lookup.entry_hash = tx.entry_hash AND lookup.tx_index = tx.tx_index) AND tx.action_type IN(1,2)", "SELECT batch.history_id, batch.entry_hash, batch.height, batch.timestamp, batch.executed,tx.tx_index, tx.action_type, tx.from_address, tx.from_asset, tx.from_amount, tx.outputs,tx.to_asset, tx.to_amount FROM pn_history_lookup lookup, pn_history_txbatch batch, pn_history_transaction tx WHERE (lookup.address = ? AND lookup.entry_hash = tx.entry_hash AND lookup.tx_index = tx.tx_index AND batch.entry_hash = tx.entry_hash) AND tx.action_type IN(1,2) ORDER BY batch.history_id ASC LIMIT 50 OFFSET 0", false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, got1, err := historyQueryBuilder(tt.args.field, tt.args.options) + if (err != nil) != tt.wantErr { + t.Errorf("HistoryQueryBuilder() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("HistoryQueryBuilder() got = %v, want %v", got, tt.want) + } + if got1 != tt.want1 { + t.Errorf("HistoryQueryBuilder() got1 = %v, want %v", got1, tt.want1) + } + }) + } +} + +func Test_historyActionPicker(t *testing.T) { + type args struct { + tx bool + conv bool + coin bool + burn bool + } + tests := []struct { + name string + args args + want []string + }{ + {"set-0", args{false, false, false, false}, nil}, + {"set-1", args{false, false, false, true}, []string{"4"}}, + {"set-2", args{false, false, true, false}, []string{"3"}}, + {"set-3", args{false, false, true, true}, []string{"3", "4"}}, + {"set-4", args{false, true, false, false}, []string{"2"}}, + {"set-5", args{false, true, false, true}, []string{"2", "4"}}, + {"set-6", args{false, true, true, false}, []string{"2", "3"}}, + {"set-7", args{false, true, true, true}, []string{"2", "3", "4"}}, + {"set-8", args{true, false, false, false}, []string{"1"}}, + {"set-9", args{true, false, false, true}, []string{"1", "4"}}, + {"set-10", args{true, false, true, false}, []string{"1", "3"}}, + {"set-11", args{true, false, true, true}, []string{"1", "3", "4"}}, + {"set-12", args{true, true, false, false}, []string{"1", "2"}}, + {"set-13", args{true, true, false, true}, []string{"1", "2", "4"}}, + {"set-14", args{true, true, true, false}, []string{"1", "2", "3"}}, + {"set-15", args{true, true, true, true}, nil}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := historyActionPicker(tt.args.tx, tt.args.conv, tt.args.coin, tt.args.burn); !reflect.DeepEqual(got, tt.want) { + t.Errorf("historyActionPicker() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/node/sync.go b/node/sync.go index 32d15e7..b9d683a 100644 --- a/node/sync.go +++ b/node/sync.go @@ -209,7 +209,7 @@ func (d *Pegnetd) SyncBlock(ctx context.Context, tx *sql.Tx, height uint32) erro // 4) Apply effects of graded OPR Block (PEG rewards, if any) // These funds will be available for transactions and conversions executed in the next block if gradedBlock != nil { - if err := d.ApplyGradedOPRBlock(tx, gradedBlock); err != nil { + if err := d.ApplyGradedOPRBlock(tx, gradedBlock, dblock.Timestamp); err != nil { return err } } @@ -305,7 +305,7 @@ func (d *Pegnetd) ApplyTransactionBatchesInHolding(ctx context.Context, sqlTx *s // and applys the balance updates for all transaction batches able to be executed // immediately. If an error is returned, the sql.Tx should be rolled back by the caller. func (d *Pegnetd) ApplyTransactionBlock(sqlTx *sql.Tx, eblock *factom.EBlock) error { - for _, entry := range eblock.Entries { + for blockorder, entry := range eblock.Entries { txBatch := fat2.NewTransactionBatch(entry) err := txBatch.UnmarshalEntry() if err != nil { @@ -328,6 +328,10 @@ func (d *Pegnetd) ApplyTransactionBlock(sqlTx *sql.Tx, eblock *factom.EBlock) er } // At this point, we know that the transaction batch is valid and able to be executed. + if err = d.Pegnet.InsertTransactionHistoryTxBatch(sqlTx, blockorder, txBatch, eblock.Height); err != nil { + return err + } + // A transaction batch that contains conversions must be put into holding to be executed // in a future block. This prevents gaming of conversions where an actor // can know the exchange rates of the future ahead of time. @@ -342,6 +346,8 @@ func (d *Pegnetd) ApplyTransactionBlock(sqlTx *sql.Tx, eblock *factom.EBlock) er // No conversions in the batch, it can be applied immediately if err = d.applyTransactionBatch(sqlTx, txBatch, nil, eblock.Height); err != nil && err != pegnet.InsufficientBalanceErr { return err + } else if err == pegnet.InsufficientBalanceErr { + d.Pegnet.SetTransactionHistoryExecuted(sqlTx, txBatch, -1) } } return nil @@ -426,11 +432,19 @@ func (d *Pegnetd) applyTransactionBatch(sqlTx *sql.Tx, txBatch *fat2.Transaction return err } + if err = d.Pegnet.SetTransactionHistoryExecuted(sqlTx, txBatch, int64(currentHeight)); err != nil { + return err + } + if tx.IsConversion() { outputAmount, err := conversions.Convert(int64(tx.Input.Amount), rates[tx.Input.Type], rates[tx.Conversion]) if err != nil { return err } + + if err = d.Pegnet.SetTransactionHistoryConvertedAmount(sqlTx, txBatch, txIndex, outputAmount); err != nil { + return err + } _, err = d.Pegnet.AddToBalance(sqlTx, &tx.Input.Address, tx.Conversion, uint64(outputAmount)) if err != nil { return err @@ -467,7 +481,7 @@ func (d *Pegnetd) ApplyFactoidBlock(ctx context.Context, tx *sql.Tx, dblock *fac } var totalBurned uint64 - var burns []factom.FactoidTransactionIO + var burns []*factom.FactoidTransaction // Register all burns. Burns have a few requirements // - Only 1 output, and that output must be the EC burn address @@ -501,7 +515,7 @@ func (d *Pegnetd) ApplyFactoidBlock(ctx context.Context, tx *sql.Tx, dblock *fac in := tx.FCTInputs[0] totalBurned += in.Amount - burns = append(burns, in) + burns = append(burns, tx) } var _ = burns @@ -512,8 +526,12 @@ func (d *Pegnetd) ApplyFactoidBlock(ctx context.Context, tx *sql.Tx, dblock *fac // All burns are FCT inputs for i := range burns { var add factom.FAAddress - copy(add[:], burns[i].Address[:]) - if _, err := d.Pegnet.AddToBalance(tx, &add, fat2.PTickerFCT, burns[i].Amount); err != nil { + copy(add[:], burns[i].FCTInputs[0].Address[:]) + if _, err := d.Pegnet.AddToBalance(tx, &add, fat2.PTickerFCT, burns[i].FCTInputs[0].Amount); err != nil { + return err + } + + if err := d.Pegnet.InsertFCTBurn(tx, fblock.KeyMR, burns[i], dblock.Height); err != nil { return err } } @@ -523,7 +541,7 @@ func (d *Pegnetd) ApplyFactoidBlock(ctx context.Context, tx *sql.Tx, dblock *fac // ApplyGradedOPRBlock pays out PEG to the winners of the given GradedBlock. // If an error is returned, the sql.Tx should be rolled back by the caller. -func (d *Pegnetd) ApplyGradedOPRBlock(tx *sql.Tx, gradedBlock grader.GradedBlock) error { +func (d *Pegnetd) ApplyGradedOPRBlock(tx *sql.Tx, gradedBlock grader.GradedBlock, timestamp time.Time) error { winners := gradedBlock.Winners() for i := range winners { addr, err := factom.NewFAAddress(winners[i].OPR.GetAddress()) @@ -541,6 +559,10 @@ func (d *Pegnetd) ApplyGradedOPRBlock(tx *sql.Tx, gradedBlock grader.GradedBlock if _, err := d.Pegnet.AddToBalance(tx, &addr, fat2.PTickerPEG, uint64(winners[i].Payout())); err != nil { return err } + + if err := d.Pegnet.InsertCoinbase(tx, winners[i], addr[:], timestamp); err != nil { + return err + } } return nil } diff --git a/srv/methods.go b/srv/methods.go index 0b9afc2..68da629 100644 --- a/srv/methods.go +++ b/srv/methods.go @@ -34,14 +34,17 @@ import ( "github.com/pegnet/pegnetd/config" "github.com/pegnet/pegnetd/fat/fat2" "github.com/pegnet/pegnetd/node" + "github.com/pegnet/pegnetd/node/pegnet" ) func (s *APIServer) jrpcMethods() jrpc.MethodMap { return jrpc.MethodMap{ - "get-transaction": s.getTransaction(false), - "get-transaction-entry": s.getTransaction(true), - "get-pegnet-balances": s.getPegnetBalances, - "get-pegnet-issuance": s.getPegnetIssuance, + "get-transactions": s.getTransactions, + "get-transaction-status": s.getTransactionStatus, + "get-transaction": s.getTransaction(false), + "get-transaction-entry": s.getTransaction(true), + "get-pegnet-balances": s.getPegnetBalances, + "get-pegnet-issuance": s.getPegnetIssuance, "send-transaction": s.sendTransaction, @@ -52,11 +55,95 @@ func (s *APIServer) jrpcMethods() jrpc.MethodMap { } +type ResultGetTransactionStatus struct { + Height uint32 `json:"height"` + Executed uint32 `json:"executed"` +} + +func (s *APIServer) getTransactionStatus(data json.RawMessage) interface{} { + params := ParamsGetPegnetTransactionStatus{} + _, _, err := validate(data, ¶ms) + if err != nil { + return err + } + + height, executed, err := s.Node.Pegnet.SelectTransactionHistoryStatus(params.Hash) + if err != nil { + return jrpc.InvalidParams(err.Error()) + } + + if height == 0 { + return ErrorTransactionNotFound + } + + var res ResultGetTransactionStatus + res.Height = height + res.Executed = executed + + return res +} + +// ResultGetTransactions returns history entries. +// `Actions` contains []pegnet.HistoryTransaction. +// `Count` is the total number of possible transactions +// `NextOffset` returns the offset to use to get the next set of records. +// 0 means no more records available +type ResultGetTransactions struct { + Actions interface{} `json:"actions"` + Count int `json:"count"` + NextOffset int `json:"nextoffset"` +} + +func (s *APIServer) getTransactions(data json.RawMessage) interface{} { + params := ParamsGetPegnetTransaction{} + _, _, err := validate(data, ¶ms) + if err != nil { + return err + } + + // using a separate options struct due to golang's circular import restrictions + var options pegnet.HistoryQueryOptions + options.Offset = params.Offset + options.Desc = params.Desc + options.Transfer = params.Transfer + options.Conversion = params.Conversion + options.Coinbase = params.Coinbase + options.FCTBurn = params.Burn + + var actions []pegnet.HistoryTransaction + var count int + + if params.Hash != nil { + actions, count, err = s.Node.Pegnet.SelectTransactionHistoryActionsByHash(params.Hash, options) + } else if params.Address != "" { + addr, _ := factom.NewFAAddress(params.Address) // verified in param + actions, count, err = s.Node.Pegnet.SelectTransactionHistoryActionsByAddress(&addr, options) + } else { + actions, count, err = s.Node.Pegnet.SelectTransactionHistoryActionsByHeight(uint32(params.Height), options) + } + + if err != nil { + return jrpc.InvalidParams(err.Error()) + } + + if len(actions) == 0 { + return ErrorTransactionNotFound + } + + var res ResultGetTransactions + res.Count = count + if params.Offset+len(actions) < count { + res.NextOffset = params.Offset + len(actions) + } + res.Actions = actions + + return res +} + type ResultGetTransaction struct { Hash *factom.Bytes32 `json:"entryhash"` Timestamp int64 `json:"timestamp"` - TxIndex uint64 `json:"txindex,omitempty"` - Tx interface{} `json:"data"` + Tx interface{} `json:"actions"` } func (s *APIServer) getTransaction(getEntry bool) jrpc.MethodFunc { diff --git a/srv/params.go b/srv/params.go index a528160..8ed64a9 100644 --- a/srv/params.go +++ b/srv/params.go @@ -91,6 +91,63 @@ func (ParamsGetPegnetRates) ValidChainID() *factom.Bytes32 { return nil } +type ParamsGetPegnetTransactionStatus struct { + Hash *factom.Bytes32 `json:"entryhash,omitempty"` +} + +func (p ParamsGetPegnetTransactionStatus) HasIncludePending() bool { return false } +func (p ParamsGetPegnetTransactionStatus) IsValid() error { + if p.Hash == nil { + return jrpc.InvalidParams(`required: "entryhash"`) + } + return nil +} +func (p ParamsGetPegnetTransactionStatus) ValidChainID() *factom.Bytes32 { + return nil +} + +// ParamsGetPegnetTransaction are the parameters for retrieving transactions from +// the history system. +// You need to specify exactly one of either `hash`, `address`, or `height`. +// `offset` is the value from a previous query's `nextoffset`. +// `desc` returns transactions in newest->oldest order +type ParamsGetPegnetTransaction struct { + Hash *factom.Bytes32 `json:"entryhash,omitempty"` + Address string `json:"address,omitempty"` + Height int `json:"height,omitempty"` + Offset int `json:"offset,omitempty"` + Desc bool `json:"desc,omitempty"` + Transfer bool `json:"transfer,omitempty"` + Conversion bool `json:"conversion,omitempty"` + Coinbase bool `json:"coinbase,omitempty"` + Burn bool `json:"burn,omitempty"` +} + +func (p ParamsGetPegnetTransaction) HasIncludePending() bool { return false } +func (p ParamsGetPegnetTransaction) IsValid() error { + if p.Offset < 0 { + return jrpc.InvalidParams(`offset must be >= 0`) + } + if p.Hash != nil && p.Address != "" && p.Height > 0 { + return jrpc.InvalidParams(`required: only set hash or address`) + } + if p.Hash != nil { + return nil + } else if p.Address != "" { + _, err := factom.NewFAAddress(p.Address) + if err != nil { + return err + } + return nil + } else if p.Height > 0 { + return nil + } + return jrpc.InvalidParams(`required: "entryhash" or "address"`) +} +func (p ParamsGetPegnetTransaction) ValidChainID() *factom.Bytes32 { + return nil +} + type ParamsGetPegnetBalances struct { Address *factom.FAAddress `json:"address,omitempty"` }