/
libindex.go
340 lines (319 loc) · 12.4 KB
/
libindex.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
/*
Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
Copyright (C) ITsysCOM GmbH
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
*/
package engine
import (
"strings"
"github.com/cgrates/cgrates/config"
"github.com/cgrates/cgrates/guardian"
"github.com/cgrates/cgrates/utils"
)
var (
filterIndexType = utils.StringMap{
utils.MetaString: true,
utils.MetaPrefix: true,
}
)
// UpdateFilterIndexes will update the indexes for every reference of a filter that exists in a profile.
// Every profile that contains the filters from oldFltr will be updated with the new values for newFltr.
// oldFltr and newFltr has the same tenant and ID.
func UpdateFilterIndexes(dm *DataManager, tnt string, oldFltr *Filter, newFltr *Filter) (err error) {
// we need the rules in roder to compute the new indexes
oldRules := utils.StringMap{} // rules from old filters
newRules := utils.StringMap{} // rules for new filters
removeRules := utils.StringMap{} // the difference from newRules and oldRules that are needed to be removed
// first we check the rules from the new filter
for _, fltr := range newFltr.Rules {
if !filterIndexType.HasKey(fltr.Type) { // we do not consider other types, just *string and *prefix
continue
}
isElementDyn := strings.HasPrefix(fltr.Element, utils.DynamicDataPrefix)
for _, value := range fltr.Values {
var idxKey string
if isElementDyn {
// we do not index element:value both of dynamic types e.g. *string:~*req.Account:~*req.Destination
if strings.HasPrefix(value, utils.DynamicDataPrefix) {
continue
}
idxKey = utils.ConcatenatedKey(fltr.Type, fltr.Element, value)
} else if strings.HasPrefix(value, utils.DynamicDataPrefix) {
idxKey = utils.ConcatenatedKey(fltr.Type, value, fltr.Element)
} else {
continue // none of the element or value are dynamic, so we do not index
}
newRules[idxKey] = true
}
}
// now we check the rules from the old filter
// compare the new rules and old rules and check what rules needs to be removed
for _, fltr := range oldFltr.Rules {
if !filterIndexType.HasKey(fltr.Type) { // we do not consider other types, just *string and *prefix
continue
}
isElementDyn := strings.HasPrefix(fltr.Element, utils.DynamicDataPrefix)
for _, value := range fltr.Values {
var idxKey string
if isElementDyn {
// we do not index element:value both of dynamic types e.g. *string:~*req.Account:~*req.Destination
if strings.HasPrefix(value, utils.DynamicDataPrefix) {
continue
}
idxKey = utils.ConcatenatedKey(fltr.Type, fltr.Element, value)
} else if strings.HasPrefix(value, utils.DynamicDataPrefix) {
idxKey = utils.ConcatenatedKey(fltr.Type, value, fltr.Element)
} else {
continue // none of the element or value are dynamic, so we do not index
}
if !newRules.HasKey(idxKey) {
removeRules[idxKey] = true
} else {
oldRules[idxKey] = true
}
}
}
needsRebuild := len(removeRules) != 0 // nothing to remove
if !needsRebuild { //check if we added something in remove rules by checking the difference betweend remove rules and old rules
for key := range newRules {
if needsRebuild = !oldRules.HasKey(key); needsRebuild {
break
}
}
if !needsRebuild {
return // nothing to change
}
}
tntFltrID := utils.ConcatenatedKey(newFltr.Tenant, newFltr.ID)
refID := guardian.Guardian.GuardIDs(utils.EmptyString,
config.CgrConfig().GeneralCfg().LockingTimeout, utils.CacheReverseFilterIndexes+tntFltrID)
defer guardian.Guardian.UnguardIDs(refID)
var rcvIndexes map[string]utils.StringMap
// get all the reverse indexes for the specific filter from db
if rcvIndexes, err = dm.GetFilterIndexes(utils.PrefixToIndexCache[utils.ReverseFilterIndexes], tntFltrID,
utils.EmptyString, nil); err != nil {
if err != utils.ErrNotFound {
return //
}
err = nil // if the error is NOT_FOUND, it means that no indexes were found for this filter, so no need to update
return
}
removeIndexKeys := removeRules.Slice()
for idxItmType, index := range rcvIndexes {
if !strings.HasPrefix(idxItmType, utils.Meta) {
idxItmType = strings.Split(idxItmType, utils.CONCATENATED_KEY_SEP)[0]
}
switch idxItmType {
case utils.CacheChargerFilterIndexes:
// remove the indexes from this filter for this partition
if err = removeFilterIndexesForFilter(dm, idxItmType, utils.CacheChargerProfiles,
tnt, removeIndexKeys, index); err != nil {
return
}
// we removed the old reverse indexes, now we have to compute the new ones
chargerIDs := index.Slice()
if _, err = ComputeChargerIndexes(dm, newFltr.Tenant, &chargerIDs,
utils.NonTransactional); err != nil {
return err
}
case utils.CacheThresholdFilterIndexes:
// remove the indexes from this filter for this partition
if err = removeFilterIndexesForFilter(dm, idxItmType, utils.CacheThresholdProfiles,
tnt, removeIndexKeys, index); err != nil {
return
}
// we removed the old reverse indexes, now we have to compute the new ones
thresholdIDs := index.Slice()
if _, err = ComputeThresholdIndexes(dm, newFltr.Tenant, &thresholdIDs,
utils.NonTransactional); err != nil {
return err
}
case utils.CacheResourceFilterIndexes:
// remove the indexes from this filter for this partition
if err = removeFilterIndexesForFilter(dm, idxItmType, utils.CacheResourceProfiles,
tnt, removeIndexKeys, index); err != nil {
return
}
// we removed the old reverse indexes, now we have to compute the new ones
resourceIDs := index.Slice()
if _, err = ComputeResourceIndexes(dm, newFltr.Tenant, &resourceIDs,
utils.NonTransactional); err != nil {
return err
}
case utils.CacheSupplierFilterIndexes:
// remove the indexes from this filter for this partition
if err = removeFilterIndexesForFilter(dm, idxItmType, utils.CacheSupplierProfiles,
tnt, removeIndexKeys, index); err != nil {
return
}
// we removed the old reverse indexes, now we have to compute the new ones
supplierIDs := index.Slice()
if _, err = ComputeSupplierIndexes(dm, newFltr.Tenant, &supplierIDs,
utils.NonTransactional); err != nil {
return err
}
case utils.CacheStatFilterIndexes:
// remove the indexes from this filter for this partition
if err = removeFilterIndexesForFilter(dm, idxItmType, utils.CacheStatQueueProfiles,
tnt, removeIndexKeys, index); err != nil {
return
}
// we removed the old reverse indexes, now we have to compute the new ones
statQueueIDs := index.Slice()
if _, err = ComputeStatIndexes(dm, newFltr.Tenant, &statQueueIDs,
utils.NonTransactional); err != nil {
return err
}
case utils.CacheAttributeFilterIndexes:
attributeIDs := index.Slice()
for _, attrID := range attributeIDs {
var ap *AttributeProfile
if ap, err = dm.GetAttributeProfile(newFltr.Tenant, attrID,
true, false, utils.NonTransactional); err != nil {
return
}
for _, ctx := range ap.Contexts {
tntCtx := utils.ConcatenatedKey(newFltr.Tenant, ctx)
if err = removeFilterIndexesForFilter(dm, idxItmType, utils.CacheAttributeProfiles,
tntCtx, // remove the indexes for the filter
removeIndexKeys, index); err != nil {
return
}
if _, err = ComputeAttributeIndexes(dm, newFltr.Tenant, ctx, &[]string{attrID},
utils.NonTransactional); err != nil {
return err
}
}
}
case utils.CacheDispatcherFilterIndexes:
dispatcherIDs := index.Slice()
for _, dspID := range dispatcherIDs {
var dpp *DispatcherProfile
if dpp, err = dm.GetDispatcherProfile(newFltr.Tenant, dspID,
true, false, utils.NonTransactional); err != nil {
return
}
for _, subsys := range dpp.Subsystems {
tntSubsys := utils.ConcatenatedKey(newFltr.Tenant, subsys)
if err = removeFilterIndexesForFilter(dm, idxItmType, utils.CacheDispatcherProfiles,
tntSubsys, // remove the indexes for the filter
removeIndexKeys, index); err != nil {
return
}
if _, err = ComputeDispatcherIndexes(dm, newFltr.Tenant, subsys, &[]string{dspID},
utils.NonTransactional); err != nil {
return err
}
}
}
}
}
return nil
}
// removeFilterIndexesForFilter removes the itemID for the index keys
// used to remove the old indexes when a filter is updated
func removeFilterIndexesForFilter(dm *DataManager, idxItmType, cacheItmType, tnt string,
removeIndexKeys []string, itemIDs utils.StringMap) (err error) {
refID := guardian.Guardian.GuardIDs(utils.EmptyString,
config.CgrConfig().GeneralCfg().LockingTimeout, idxItmType+tnt)
defer guardian.Guardian.UnguardIDs(refID)
for _, idxKey := range removeIndexKeys { // delete old filters indexes for this item
var remIndx map[string]utils.StringMap
if remIndx, err = dm.GetFilterIndexes(idxItmType, tnt,
utils.EmptyString, nil); err != nil {
if err != utils.ErrNotFound {
return
}
err = nil
continue
}
for idx := range itemIDs {
delete(remIndx[idxKey], idx)
}
fltrIndexer := NewFilterIndexer(dm, utils.CacheInstanceToPrefix[cacheItmType], tnt)
fltrIndexer.indexes = remIndx
if err = fltrIndexer.StoreIndexes(true, utils.NonTransactional); err != nil {
return
}
}
return
}
// addReverseFilterIndexForFilter will add a reference for the filter in reverse filter indexes
func addReverseFilterIndexForFilter(dm *DataManager, idxItmType, tnt,
itemID string, filterIDs []string) (err error) {
for _, fltrID := range filterIDs {
if strings.HasPrefix(fltrID, utils.Meta) { // we do not reverse for inline filters
continue
}
tntFltrID := utils.ConcatenatedKey(tnt, fltrID)
refID := guardian.Guardian.GuardIDs(utils.EmptyString,
config.CgrConfig().GeneralCfg().LockingTimeout, utils.CacheReverseFilterIndexes+tntFltrID)
var indexes map[string]utils.StringMap
if indexes, err = dm.GetFilterIndexes(utils.PrefixToIndexCache[utils.ReverseFilterIndexes], tntFltrID,
utils.EmptyString, nil); err != nil {
if err != utils.ErrNotFound {
guardian.Guardian.UnguardIDs(refID)
return
}
err = nil
indexes = map[string]utils.StringMap{
idxItmType: make(map[string]bool), // not found in database any reverse, we declare them to add in the next steps
}
}
if indexes[idxItmType] == nil {
indexes[idxItmType] = make(utils.StringMap)
}
indexes[idxItmType].Copy(map[string]bool{
itemID: true,
})
indexerKey := utils.ConcatenatedKey(tnt, fltrID)
fltrIndexer := NewFilterIndexer(dm, utils.ReverseFilterIndexes, indexerKey)
fltrIndexer.indexes = indexes
if err = fltrIndexer.StoreIndexes(true, utils.NonTransactional); err != nil { // it will remove from cache the old ones
guardian.Guardian.UnguardIDs(refID)
return
}
guardian.Guardian.UnguardIDs(refID)
}
return
}
// removeReverseFilterIndexForFilter will remove a reference for the filter in reverse filter indexes
func removeReverseFilterIndexForFilter(dm *DataManager, idxItmType, tnt, itemID string, filterIDs []string) (err error) {
for _, fltrID := range filterIDs {
if strings.HasPrefix(fltrID, utils.Meta) { // we do not reverse for inline filters
continue
}
tntFltrID := utils.ConcatenatedKey(tnt, fltrID)
refID := guardian.Guardian.GuardIDs(utils.EmptyString,
config.CgrConfig().GeneralCfg().LockingTimeout, utils.CacheReverseFilterIndexes+tntFltrID)
var indexes map[string]utils.StringMap
if indexes, err = dm.GetFilterIndexes(utils.PrefixToIndexCache[utils.ReverseFilterIndexes], tntFltrID,
utils.EmptyString, nil); err != nil {
guardian.Guardian.UnguardIDs(refID)
if err != utils.ErrNotFound {
return
}
err = nil
continue // already removed
}
delete(indexes[idxItmType], itemID) // delete index from map
indexerKey := utils.ConcatenatedKey(tnt, fltrID)
fltrIndexer := NewFilterIndexer(dm, utils.ReverseFilterIndexes, indexerKey)
fltrIndexer.indexes = indexes
if err = fltrIndexer.StoreIndexes(true, utils.NonTransactional); err != nil {
guardian.Guardian.UnguardIDs(refID)
return
}
guardian.Guardian.UnguardIDs(refID)
}
return
}