-
Notifications
You must be signed in to change notification settings - Fork 19
/
ledger.go
345 lines (293 loc) · 11.9 KB
/
ledger.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
// Copyright (c) 2022 Gobalsky Labs Limited
//
// Use of this software is governed by the Business Source License included
// in the LICENSE.DATANODE file and at https://www.mariadb.com/bsl11.
//
// Change Date: 18 months from the later of the date of the first publicly
// available Distribution of this version of the repository, and 25 June 2022.
//
// On the date above, in accordance with the Business Source License, use
// of this software will be governed by version 3 or later of the GNU General
// Public License.
package sqlstore
import (
"context"
"fmt"
"io"
"time"
"github.com/georgysavva/scany/pgxscan"
"github.com/shopspring/decimal"
"code.vegaprotocol.io/vega/core/types"
"code.vegaprotocol.io/vega/datanode/entities"
"code.vegaprotocol.io/vega/datanode/metrics"
"code.vegaprotocol.io/vega/libs/ptr"
"code.vegaprotocol.io/vega/logging"
v2 "code.vegaprotocol.io/vega/protos/data-node/api/v2"
)
var aggregateLedgerEntriesOrdering = TableOrdering{
ColumnOrdering{Name: "vega_time", Sorting: ASC},
}
type Ledger struct {
*ConnectionSource
batcher ListBatcher[entities.LedgerEntry]
pending []entities.LedgerEntry
}
func NewLedger(connectionSource *ConnectionSource) *Ledger {
a := &Ledger{
ConnectionSource: connectionSource,
batcher: NewListBatcher[entities.LedgerEntry]("ledger", entities.LedgerEntryColumns),
}
return a
}
func (ls *Ledger) Flush(ctx context.Context) ([]entities.LedgerEntry, error) {
defer metrics.StartSQLQuery("Ledger", "Flush")()
// This creates an entry time for the ledger entry that is guaranteed to be unique
// Block event sequence number cannot be used as multiple ledger entries can be created
// as the result of a single transfer event.
for i, le := range ls.pending {
le.LedgerEntryTime = entities.CreateLedgerEntryTime(le.VegaTime, i)
ls.batcher.Add(le)
}
ls.pending = nil
return ls.batcher.Flush(ctx, ls.Connection)
}
func (ls *Ledger) Add(le entities.LedgerEntry) error {
ls.pending = append(ls.pending, le)
return nil
}
func (ls *Ledger) GetByLedgerEntryTime(ctx context.Context, ledgerEntryTime time.Time) (entities.LedgerEntry, error) {
defer metrics.StartSQLQuery("Ledger", "GetByID")()
le := entities.LedgerEntry{}
return le, ls.wrapE(pgxscan.Get(ctx, ls.Connection, &le,
`SELECT ledger_entry_time, quantity, tx_hash, vega_time, transfer_time, type, account_from_balance, account_to_balance
FROM ledger WHERE ledger_entry_time =$1`,
ledgerEntryTime))
}
func (ls *Ledger) GetAll(ctx context.Context) ([]entities.LedgerEntry, error) {
defer metrics.StartSQLQuery("Ledger", "GetAll")()
ledgerEntries := []entities.LedgerEntry{}
err := pgxscan.Select(ctx, ls.Connection, &ledgerEntries, `
SELECT ledger_entry_time, quantity, tx_hash, vega_time, transfer_time, type, account_from_balance, account_to_balance
FROM ledger`)
return ledgerEntries, err
}
func (ls *Ledger) GetByTxHash(ctx context.Context, txHash entities.TxHash) ([]entities.LedgerEntry, error) {
ledgerEntries := []entities.LedgerEntry{}
defer metrics.StartSQLQuery("Ledger", "GetByTxHash")()
err := pgxscan.Select(ctx, ls.Connection, &ledgerEntries, `
SELECT ledger_entry_time, account_from_id, account_to_id, quantity, tx_hash, vega_time, transfer_time, type, account_from_balance, account_to_balance
FROM ledger WHERE tx_hash=$1`,
txHash,
)
return ledgerEntries, err
}
// This query requests and sums number of the ledger entries of a given subset of accounts, specified via the 'filter' argument.
// It returns a timeseries (implemented as a list of AggregateLedgerEntry structs), with a row for every time
// the summed ledger entries of the set of specified accounts changes.
// Listed queries should be limited to a single party from each side only. If no or more than one parties are provided
// for sending and receiving accounts - the query returns error.
//
// Entries can be queried by:
// - listing ledger entries with filtering on the sending account (market_id, asset_id, account_type)
// - listing ledger entries with filtering on the receiving account (market_id, asset_id, account_type)
// - listing ledger entries with filtering on the sending AND receiving account
// - listing ledger entries with filtering on the transfer type (on top of above filters or as a standalone option)
func (ls *Ledger) Query(
ctx context.Context,
filter *entities.LedgerEntryFilter,
dateRange entities.DateRange,
pagination entities.CursorPagination,
) (*[]entities.AggregatedLedgerEntry, entities.PageInfo, error) {
var pageInfo entities.PageInfo
dynamicQuery, whereQuery, args, err := ls.prepareQuery(filter, dateRange)
if err != nil {
return nil, pageInfo, err
}
pageQuery, args, err := PaginateQuery[entities.AggregatedLedgerEntriesCursor](
whereQuery, args, aggregateLedgerEntriesOrdering, pagination)
if err != nil {
return nil, pageInfo, err
}
query := fmt.Sprintf("%s %s", dynamicQuery, pageQuery)
defer metrics.StartSQLQuery("Ledger", "Query")()
rows, err := ls.Connection.Query(ctx, query, args...)
if err != nil {
return nil, pageInfo, fmt.Errorf("querying ledger entries: %w", err)
}
defer rows.Close()
var results []ledgerEntriesScanned
if err = pgxscan.ScanAll(&results, rows); err != nil {
return nil, pageInfo, fmt.Errorf("scanning ledger entries: %w", err)
}
ledgerEntries := parseScanned(results)
res, pageInfo := entities.PageEntities[*v2.AggregatedLedgerEntriesEdge](ledgerEntries, pagination)
return &res, pageInfo, nil
}
func (ls *Ledger) Export(
ctx context.Context,
partyID string,
assetID *string,
dateRange entities.DateRange,
writer io.Writer,
) error {
if partyID == "" {
return ErrLedgerEntryExportForParty
}
filter := &entities.LedgerEntryFilter{
FromAccountFilter: entities.AccountFilter{
PartyIDs: []entities.PartyID{entities.PartyID(partyID)},
},
}
if assetID != nil {
filter.FromAccountFilter.AssetID = entities.AssetID(ptr.UnBox(assetID))
}
dynamicQuery, whereQuery, args, err := ls.prepareQuery(filter, dateRange)
if err != nil {
return err
}
query := fmt.Sprintf("copy (%s %s) to STDOUT (FORMAT csv, HEADER)", dynamicQuery, whereQuery)
tag, err := ls.Connection.CopyTo(ctx, writer, query, args...)
if err != nil {
return fmt.Errorf("copying to stdout: %w", err)
}
ls.log.Debug("copy to CSV", logging.Int64("rows affected", tag.RowsAffected()))
return nil
}
func (*Ledger) prepareQuery(filter *entities.LedgerEntryFilter, dateRange entities.DateRange) (string, string, []any, error) {
filterQueries, args, err := filterLedgerEntriesQuery(filter)
if err != nil {
return "", "", nil, fmt.Errorf("filtering ledger entries: %w", err)
}
whereDate := ""
if dateRange.Start != nil {
whereDate = fmt.Sprintf("WHERE vega_time >= %s", nextBindVar(&args, dateRange.Start.Format(time.RFC3339)))
}
if dateRange.End != nil {
if whereDate != "" {
whereDate = fmt.Sprintf("%s AND", whereDate)
} else {
whereDate = "WHERE "
}
whereDate = fmt.Sprintf("%s vega_time < %s", whereDate, nextBindVar(&args, dateRange.End.Format(time.RFC3339)))
}
dynamicQuery := createDynamicQuery(filterQueries, filter.CloseOnAccountFilters)
whereQuery := fmt.Sprintf(`SELECT
vega_time, quantity, transfer_type, asset_id,
account_from_market_id, account_from_party_id, account_from_account_type,
account_to_market_id, account_to_party_id, account_to_account_type,
account_from_balance, account_to_balance
FROM entries
%s`, whereDate)
return dynamicQuery, whereQuery, args, nil
}
// ledgerEntriesScanned is a local type used as a mediator between pgxscan scanner
// and the AggregatedLedgerEntries types.
// Needed to manually transfer to needed data types that are not accepted by the scanner.
type ledgerEntriesScanned struct {
VegaTime time.Time
Quantity decimal.Decimal
TransferType entities.LedgerMovementType
AssetID entities.AssetID
AccountFromPartyID entities.PartyID
AccountToPartyID entities.PartyID
AccountFromAccountType types.AccountType
AccountToAccountType types.AccountType
AccountFromMarketID entities.MarketID
AccountToMarketID entities.MarketID
AccountFromBalance decimal.Decimal
AccountToBalance decimal.Decimal
}
func parseScanned(scanned []ledgerEntriesScanned) []entities.AggregatedLedgerEntry {
ledgerEntries := []entities.AggregatedLedgerEntry{}
if len(scanned) > 0 {
for i := range scanned {
ledgerEntries = append(ledgerEntries, entities.AggregatedLedgerEntry{
VegaTime: scanned[i].VegaTime,
Quantity: scanned[i].Quantity,
AssetID: &scanned[i].AssetID,
FromAccountPartyID: &scanned[i].AccountFromPartyID,
ToAccountPartyID: &scanned[i].AccountToPartyID,
FromAccountType: &scanned[i].AccountFromAccountType,
ToAccountType: &scanned[i].AccountToAccountType,
FromAccountMarketID: &scanned[i].AccountFromMarketID,
ToAccountMarketID: &scanned[i].AccountToMarketID,
FromAccountBalance: scanned[i].AccountFromBalance,
ToAccountBalance: scanned[i].AccountToBalance,
})
tt := scanned[i].TransferType
ledgerEntries[i].TransferType = &tt
}
}
return ledgerEntries
}
// createDynamicQuery creates a dynamic query depending on the query cases:
// - lising all ledger entries without filtering
// - listing ledger entries with filtering on the sending account
// - listing ledger entries with filtering on the receiving account
// - listing ledger entries with filtering on the sending AND receiving account
// - listing ledger entries with filtering on the transfer type (on top of above filters or as a standalone option)
func createDynamicQuery(filterQueries [3]string, closeOnAccountFilters entities.CloseOnLimitOperation) string {
whereClause := ""
tableNameFromAccountQuery := "ledger_entries_from_account_filter"
query := `
%s AS (
SELECT
ledger.vega_time AS vega_time, ledger.quantity, ledger.type AS transfer_type,
ledger.account_from_id, ledger.account_to_id,
ledger.account_from_balance, ledger.account_to_balance,
account_from.asset_id AS asset_id,
account_from.party_id AS account_from_party_id,
account_from.market_id AS account_from_market_id,
account_from.type AS account_from_account_type,
account_to.party_id AS account_to_party_id,
account_to.market_id AS account_to_market_id,
account_to.type AS account_to_account_type
FROM ledger
INNER JOIN accounts AS account_from
ON ledger.account_from_id=account_from.id
INNER JOIN accounts AS account_to
ON ledger.account_to_id=account_to.id),
entries AS (
SELECT vega_time, quantity, transfer_type, asset_id,
account_from_market_id, account_from_party_id, account_from_account_type,
account_to_market_id, account_to_party_id, account_to_account_type,
account_from_balance, account_to_balance
FROM %s
%s
)
`
tableNameToAccountQuery := "ledger_entries_to_account_filter"
tableNameCloseOnFilterQuery := "ledger_entries_closed_on_account_filters"
tableNameOpenOnFilterQuery := "ledger_entries_open_on_account_filters"
tableNameTransferType := "ledger_entries_transfer_type_filter"
tableName := ""
if filterQueries[0] != "" {
tableName = tableNameFromAccountQuery
whereClause = fmt.Sprintf("WHERE %s", filterQueries[0])
if filterQueries[1] != "" {
if closeOnAccountFilters {
tableName = tableNameCloseOnFilterQuery
whereClause = fmt.Sprintf("WHERE (%s) AND (%s)", filterQueries[0], filterQueries[1])
} else {
tableName = tableNameOpenOnFilterQuery
whereClause = fmt.Sprintf("WHERE (%s) OR (%s)", filterQueries[0], filterQueries[1])
}
}
} else {
if filterQueries[1] != "" {
tableName = tableNameToAccountQuery
whereClause = fmt.Sprintf("WHERE %s", filterQueries[1])
}
}
if filterQueries[2] != "" {
tableName = tableNameTransferType
if whereClause != "" {
whereClause = fmt.Sprintf("%s AND (%s)", whereClause, filterQueries[2])
} else {
whereClause = fmt.Sprintf("WHERE %s", filterQueries[2])
}
}
query = fmt.Sprintf(query, tableName, tableName, whereClause)
query = fmt.Sprintf(`WITH %s`, query)
return query
}