-
Notifications
You must be signed in to change notification settings - Fork 62
/
dynamodb.jl
3202 lines (2998 loc) · 157 KB
/
dynamodb.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# This file is auto-generated by AWSMetadata.jl
using AWS
using AWS.AWSServices: dynamodb
using AWS.Compat
using AWS.UUIDs
"""
batch_execute_statement(statements)
batch_execute_statement(statements, params::Dict{String,<:Any})
This operation allows you to perform batch reads or writes on data stored in DynamoDB,
using PartiQL. Each read statement in a BatchExecuteStatement must specify an equality
condition on all key attributes. This enforces that each SELECT statement in a batch
returns at most a single item. The entire batch must consist of either read statements or
write statements, you cannot mix both in one batch. A HTTP 200 response does not mean
that all statements in the BatchExecuteStatement succeeded. Error details for individual
statements can be found under the Error field of the BatchStatementResponse for each
statement.
# Arguments
- `statements`: The list of PartiQL statements representing the batch to run.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"ReturnConsumedCapacity"`:
"""
function batch_execute_statement(
Statements; aws_config::AbstractAWSConfig=global_aws_config()
)
return dynamodb(
"BatchExecuteStatement",
Dict{String,Any}("Statements" => Statements);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function batch_execute_statement(
Statements,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return dynamodb(
"BatchExecuteStatement",
Dict{String,Any}(
mergewith(_merge, Dict{String,Any}("Statements" => Statements), params)
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
batch_get_item(request_items)
batch_get_item(request_items, params::Dict{String,<:Any})
The BatchGetItem operation returns the attributes of one or more items from one or more
tables. You identify requested items by primary key. A single operation can retrieve up to
16 MB of data, which can contain as many as 100 items. BatchGetItem returns a partial
result if the response size limit is exceeded, the table's provisioned throughput is
exceeded, or an internal processing failure occurs. If a partial result is returned, the
operation returns a value for UnprocessedKeys. You can use this value to retry the
operation starting with the next item to get. If you request more than 100 items,
BatchGetItem returns a ValidationException with the message \"Too many items requested for
the BatchGetItem call.\" For example, if you ask to retrieve 100 items, but each
individual item is 300 KB in size, the system returns 52 items (so as not to exceed the 16
MB limit). It also returns an appropriate UnprocessedKeys value so you can get the next
page of results. If desired, your application can include its own logic to assemble the
pages of results into one dataset. If none of the items can be processed due to
insufficient provisioned throughput on all of the tables in the request, then BatchGetItem
returns a ProvisionedThroughputExceededException. If at least one of the items is
successfully processed, then BatchGetItem completes successfully, while returning the keys
of the unread items in UnprocessedKeys. If DynamoDB returns any unprocessed items, you
should retry the batch operation on those items. However, we strongly recommend that you
use an exponential backoff algorithm. If you retry the batch operation immediately, the
underlying read or write requests can still fail due to throttling on the individual
tables. If you delay the batch operation using exponential backoff, the individual requests
in the batch are much more likely to succeed. For more information, see Batch Operations
and Error Handling in the Amazon DynamoDB Developer Guide. By default, BatchGetItem
performs eventually consistent reads on every table in the request. If you want strongly
consistent reads instead, you can set ConsistentRead to true for any or all tables. In
order to minimize response latency, BatchGetItem retrieves items in parallel. When
designing your application, keep in mind that DynamoDB does not return items in any
particular order. To help parse the response by item, include the primary key values for
the items in your request in the ProjectionExpression parameter. If a requested item does
not exist, it is not returned in the result. Requests for nonexistent items consume the
minimum read capacity units according to the type of read. For more information, see
Working with Tables in the Amazon DynamoDB Developer Guide.
# Arguments
- `request_items`: A map of one or more table names and, for each table, a map that
describes one or more items to retrieve from that table. Each table name can be used only
once per BatchGetItem request. Each element in the map of items to retrieve consists of the
following: ConsistentRead - If true, a strongly consistent read is used; if false (the
default), an eventually consistent read is used. ExpressionAttributeNames - One or more
substitution tokens for attribute names in the ProjectionExpression parameter. The
following are some use cases for using ExpressionAttributeNames: To access an attribute
whose name conflicts with a DynamoDB reserved word. To create a placeholder for repeating
occurrences of an attribute name in an expression. To prevent special characters in an
attribute name from being misinterpreted in an expression. Use the # character in an
expression to dereference an attribute name. For example, consider the following attribute
name: Percentile The name of this attribute conflicts with a reserved word, so it
cannot be used directly in an expression. (For the complete list of reserved words, see
Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could
specify the following for ExpressionAttributeNames: {\"#P\":\"Percentile\"} You could
then use this substitution in an expression, as in this example: #P = :val Tokens
that begin with the : character are expression attribute values, which are placeholders for
the actual value at runtime. For more information about expression attribute names, see
Accessing Item Attributes in the Amazon DynamoDB Developer Guide. Keys - An array of
primary key attribute values that define specific items in the table. For each primary key,
you must provide all of the key attributes. For example, with a simple primary key, you
only need to provide the partition key value. For a composite key, you must provide both
the partition key value and the sort key value. ProjectionExpression - A string that
identifies one or more attributes to retrieve from the table. These attributes can include
scalars, sets, or elements of a JSON document. The attributes in the expression must be
separated by commas. If no attribute names are specified, then all attributes are returned.
If any of the requested attributes are not found, they do not appear in the result. For
more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.
AttributesToGet - This is a legacy parameter. Use ProjectionExpression instead. For more
information, see AttributesToGet in the Amazon DynamoDB Developer Guide.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"ReturnConsumedCapacity"`:
"""
function batch_get_item(RequestItems; aws_config::AbstractAWSConfig=global_aws_config())
return dynamodb(
"BatchGetItem",
Dict{String,Any}("RequestItems" => RequestItems);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function batch_get_item(
RequestItems,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return dynamodb(
"BatchGetItem",
Dict{String,Any}(
mergewith(_merge, Dict{String,Any}("RequestItems" => RequestItems), params)
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
batch_write_item(request_items)
batch_write_item(request_items, params::Dict{String,<:Any})
The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single
call to BatchWriteItem can transmit up to 16MB of data over the network, consisting of up
to 25 item put or delete operations. While individual items can be up to 400 KB once
stored, it's important to note that an item's representation might be greater than 400KB
while being sent in DynamoDB's JSON format for the API call. For more details on this
distinction, see Naming Rules and Data Types. BatchWriteItem cannot update items. If you
perform a BatchWriteItem operation on an existing item, that item's values will be
overwritten by the operation and it will appear like it was updated. To update items, we
recommend you use the UpdateItem action. The individual PutItem and DeleteItem operations
specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any
requested operations fail because the table's provisioned throughput is exceeded or an
internal processing failure occurs, the failed operations are returned in the
UnprocessedItems response parameter. You can investigate and optionally resend the
requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check
for unprocessed items and submit a new BatchWriteItem request with those unprocessed items
until all items have been processed. If none of the items can be processed due to
insufficient provisioned throughput on all of the tables in the request, then
BatchWriteItem returns a ProvisionedThroughputExceededException. If DynamoDB returns any
unprocessed items, you should retry the batch operation on those items. However, we
strongly recommend that you use an exponential backoff algorithm. If you retry the batch
operation immediately, the underlying read or write requests can still fail due to
throttling on the individual tables. If you delay the batch operation using exponential
backoff, the individual requests in the batch are much more likely to succeed. For more
information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer
Guide. With BatchWriteItem, you can efficiently write or delete large amounts of data,
such as from Amazon EMR, or copy data from another database into DynamoDB. In order to
improve performance with these large-scale operations, BatchWriteItem does not behave in
the same way as individual PutItem and DeleteItem calls would. For example, you cannot
specify conditions on individual put and delete requests, and BatchWriteItem does not
return deleted items in the response. If you use a programming language that supports
concurrency, you can use threads to write items in parallel. Your application must include
the necessary logic to manage the threads. With languages that don't support threading, you
must update or delete the specified items one at a time. In both situations, BatchWriteItem
performs the specified put and delete operations in parallel, giving you the power of the
thread pool approach without having to introduce complexity into your application. Parallel
processing reduces latency, but each specified put and delete request consumes the same
number of write capacity units whether it is processed in parallel or not. Delete
operations on nonexistent items consume one write capacity unit. If one or more of the
following is true, DynamoDB rejects the entire batch write operation: One or more tables
specified in the BatchWriteItem request does not exist. Primary key attributes specified
on an item in the request do not match those in the corresponding table's primary key
schema. You try to perform multiple operations on the same item in the same
BatchWriteItem request. For example, you cannot put and delete the same item in the same
BatchWriteItem request. Your request contains at least two items with identical hash
and range keys (which essentially is two put operations). There are more than 25
requests in the batch. Any individual item in a batch exceeds 400 KB. The total request
size exceeds 16 MB.
# Arguments
- `request_items`: A map of one or more table names and, for each table, a list of
operations to be performed (DeleteRequest or PutRequest). Each element in the map consists
of the following: DeleteRequest - Perform a DeleteItem operation on the specified item.
The item to be deleted is identified by a Key subelement: Key - A map of primary key
attribute values that uniquely identify the item. Each entry in this map consists of an
attribute name and an attribute value. For each primary key, you must provide all of the
key attributes. For example, with a simple primary key, you only need to provide a value
for the partition key. For a composite primary key, you must provide values for both the
partition key and the sort key. PutRequest - Perform a PutItem operation on the
specified item. The item to be put is identified by an Item subelement: Item - A map of
attributes and their values. Each entry in this map consists of an attribute name and an
attribute value. Attribute values must not be null; string and binary type attributes must
have lengths greater than zero; and set type attributes must not be empty. Requests that
contain empty values are rejected with a ValidationException exception. If you specify any
attributes that are part of an index key, then the data types for those attributes must
match those of the schema in the table's attribute definition.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"ReturnConsumedCapacity"`:
- `"ReturnItemCollectionMetrics"`: Determines whether item collection metrics are returned.
If set to SIZE, the response includes statistics about item collections, if any, that were
modified during the operation are returned in the response. If set to NONE (the default),
no statistics are returned.
"""
function batch_write_item(RequestItems; aws_config::AbstractAWSConfig=global_aws_config())
return dynamodb(
"BatchWriteItem",
Dict{String,Any}("RequestItems" => RequestItems);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function batch_write_item(
RequestItems,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return dynamodb(
"BatchWriteItem",
Dict{String,Any}(
mergewith(_merge, Dict{String,Any}("RequestItems" => RequestItems), params)
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
create_backup(backup_name, table_name)
create_backup(backup_name, table_name, params::Dict{String,<:Any})
Creates a backup for an existing table. Each time you create an on-demand backup, the
entire table data is backed up. There is no limit to the number of on-demand backups that
can be taken. When you create an on-demand backup, a time marker of the request is
cataloged, and the backup is created asynchronously, by applying all changes until the time
of the request to the last full table snapshot. Backup requests are processed
instantaneously and become available for restore within minutes. You can call CreateBackup
at a maximum rate of 50 times per second. All backups in DynamoDB work without consuming
any provisioned throughput on the table. If you submit a backup request on 2018-12-14 at
14:25:00, the backup is guaranteed to contain all data committed to the table up to
14:24:00, and data committed after 14:26:00 will not be. The backup might contain data
modifications made between 14:24:00 and 14:26:00. On-demand backup does not support causal
consistency. Along with data, the following are also included on the backups: Global
secondary indexes (GSIs) Local secondary indexes (LSIs) Streams Provisioned read and
write capacity
# Arguments
- `backup_name`: Specified name for the backup.
- `table_name`: The name of the table.
"""
function create_backup(
BackupName, TableName; aws_config::AbstractAWSConfig=global_aws_config()
)
return dynamodb(
"CreateBackup",
Dict{String,Any}("BackupName" => BackupName, "TableName" => TableName);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function create_backup(
BackupName,
TableName,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return dynamodb(
"CreateBackup",
Dict{String,Any}(
mergewith(
_merge,
Dict{String,Any}("BackupName" => BackupName, "TableName" => TableName),
params,
),
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
create_global_table(global_table_name, replication_group)
create_global_table(global_table_name, replication_group, params::Dict{String,<:Any})
Creates a global table from an existing table. A global table creates a replication
relationship between two or more DynamoDB tables with the same table name in the provided
Regions. This operation only applies to Version 2017.11.29 (Legacy) of global tables. We
recommend using Version 2019.11.21 (Current) when creating new global tables, as it
provides greater flexibility, higher efficiency and consumes less write capacity than
2017.11.29 (Legacy). To determine which version you are using, see Determining the version.
To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21
(Current), see Updating global tables. If you want to add a new replica table to a
global table, each of the following conditions must be true: The table must have the same
primary key as all of the other replicas. The table must have the same name as all of the
other replicas. The table must have DynamoDB Streams enabled, with the stream containing
both the new and the old images of the item. None of the replica tables in the global
table can contain any data. If global secondary indexes are specified, then the
following conditions must also be met: The global secondary indexes must have the same
name. The global secondary indexes must have the same hash key and sort key (if
present). If local secondary indexes are specified, then the following conditions must
also be met: The local secondary indexes must have the same name. The local
secondary indexes must have the same hash key and sort key (if present). Write
capacity settings should be set consistently across your replica tables and secondary
indexes. DynamoDB strongly recommends enabling auto scaling to manage the write capacity
settings for all of your global tables replicas and indexes. If you prefer to manage
write capacity settings manually, you should provision equal replicated write capacity
units to your replica tables. You should also provision equal replicated write capacity
units to matching secondary indexes across your global table.
# Arguments
- `global_table_name`: The global table name.
- `replication_group`: The Regions where the global table needs to be created.
"""
function create_global_table(
GlobalTableName, ReplicationGroup; aws_config::AbstractAWSConfig=global_aws_config()
)
return dynamodb(
"CreateGlobalTable",
Dict{String,Any}(
"GlobalTableName" => GlobalTableName, "ReplicationGroup" => ReplicationGroup
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function create_global_table(
GlobalTableName,
ReplicationGroup,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return dynamodb(
"CreateGlobalTable",
Dict{String,Any}(
mergewith(
_merge,
Dict{String,Any}(
"GlobalTableName" => GlobalTableName,
"ReplicationGroup" => ReplicationGroup,
),
params,
),
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
create_table(attribute_definitions, key_schema, table_name)
create_table(attribute_definitions, key_schema, table_name, params::Dict{String,<:Any})
The CreateTable operation adds a new table to your account. In an Amazon Web Services
account, table names must be unique within each Region. That is, you can have two tables
with same name if you create the tables in different Regions. CreateTable is an
asynchronous operation. Upon receiving a CreateTable request, DynamoDB immediately returns
a response with a TableStatus of CREATING. After the table is created, DynamoDB sets the
TableStatus to ACTIVE. You can perform read and write operations only on an ACTIVE table.
You can optionally define secondary indexes on the new table, as part of the CreateTable
operation. If you want to create multiple tables with secondary indexes on them, you must
create the tables sequentially. Only one table with secondary indexes can be in the
CREATING state at any given time. You can use the DescribeTable action to check the table
status.
# Arguments
- `attribute_definitions`: An array of attributes that describe the key schema for the
table and indexes.
- `key_schema`: Specifies the attributes that make up the primary key for a table or an
index. The attributes in KeySchema must also be defined in the AttributeDefinitions array.
For more information, see Data Model in the Amazon DynamoDB Developer Guide. Each
KeySchemaElement in the array is composed of: AttributeName - The name of this key
attribute. KeyType - The role that the key attribute will assume: HASH - partition
key RANGE - sort key The partition key of an item is also known as its hash
attribute. The term \"hash attribute\" derives from the DynamoDB usage of an internal hash
function to evenly distribute data items across partitions, based on their partition key
values. The sort key of an item is also known as its range attribute. The term \"range
attribute\" derives from the way DynamoDB stores items with the same partition key
physically close together, in sorted order by the sort key value. For a simple primary key
(partition key), you must provide exactly one element with a KeyType of HASH. For a
composite primary key (partition key and sort key), you must provide exactly two elements,
in this order: The first element must have a KeyType of HASH, and the second element must
have a KeyType of RANGE. For more information, see Working with Tables in the Amazon
DynamoDB Developer Guide.
- `table_name`: The name of the table to create.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"BillingMode"`: Controls how you are charged for read and write throughput and how you
manage capacity. This setting can be changed later. PROVISIONED - We recommend using
PROVISIONED for predictable workloads. PROVISIONED sets the billing mode to Provisioned
Mode. PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable workloads.
PAY_PER_REQUEST sets the billing mode to On-Demand Mode.
- `"DeletionProtectionEnabled"`: Indicates whether deletion protection is to be enabled
(true) or disabled (false) on the table.
- `"GlobalSecondaryIndexes"`: One or more global secondary indexes (the maximum is 20) to
be created on the table. Each global secondary index in the array includes the following:
IndexName - The name of the global secondary index. Must be unique only for this table.
KeySchema - Specifies the key schema for the global secondary index. Projection -
Specifies attributes that are copied (projected) from the table into the index. These are
in addition to the primary key attributes and index key attributes, which are automatically
projected. Each attribute specification is composed of: ProjectionType - One of the
following: KEYS_ONLY - Only the index and primary keys are projected into the index.
INCLUDE - Only the specified table attributes are projected into the index. The list of
projected attributes is in NonKeyAttributes. ALL - All of the table attributes are
projected into the index. NonKeyAttributes - A list of one or more non-key attribute
names that are projected into the secondary index. The total count of attributes provided
in NonKeyAttributes, summed across all of the secondary indexes, must not exceed 100. If
you project the same attribute into two different indexes, this counts as two distinct
attributes when determining the total. ProvisionedThroughput - The provisioned
throughput settings for the global secondary index, consisting of read and write capacity
units.
- `"LocalSecondaryIndexes"`: One or more local secondary indexes (the maximum is 5) to be
created on the table. Each index is scoped to a given partition key value. There is a 10 GB
size limit per partition key value; otherwise, the size of a local secondary index is
unconstrained. Each local secondary index in the array includes the following: IndexName
- The name of the local secondary index. Must be unique only for this table. KeySchema
- Specifies the key schema for the local secondary index. The key schema must begin with
the same partition key as the table. Projection - Specifies attributes that are copied
(projected) from the table into the index. These are in addition to the primary key
attributes and index key attributes, which are automatically projected. Each attribute
specification is composed of: ProjectionType - One of the following: KEYS_ONLY - Only
the index and primary keys are projected into the index. INCLUDE - Only the specified
table attributes are projected into the index. The list of projected attributes is in
NonKeyAttributes. ALL - All of the table attributes are projected into the index.
NonKeyAttributes - A list of one or more non-key attribute names that are projected into
the secondary index. The total count of attributes provided in NonKeyAttributes, summed
across all of the secondary indexes, must not exceed 100. If you project the same attribute
into two different indexes, this counts as two distinct attributes when determining the
total.
- `"ProvisionedThroughput"`: Represents the provisioned throughput settings for a specified
table or index. The settings can be modified using the UpdateTable operation. If you set
BillingMode as PROVISIONED, you must specify this property. If you set BillingMode as
PAY_PER_REQUEST, you cannot specify this property. For current minimum and maximum
provisioned throughput values, see Service, Account, and Table Quotas in the Amazon
DynamoDB Developer Guide.
- `"SSESpecification"`: Represents the settings used to enable server-side encryption.
- `"StreamSpecification"`: The settings for DynamoDB Streams on the table. These settings
consist of: StreamEnabled - Indicates whether DynamoDB Streams is to be enabled (true)
or disabled (false). StreamViewType - When an item in the table is modified,
StreamViewType determines what information is written to the table's stream. Valid values
for StreamViewType are: KEYS_ONLY - Only the key attributes of the modified item are
written to the stream. NEW_IMAGE - The entire item, as it appears after it was modified,
is written to the stream. OLD_IMAGE - The entire item, as it appeared before it was
modified, is written to the stream. NEW_AND_OLD_IMAGES - Both the new and the old item
images of the item are written to the stream.
- `"TableClass"`: The table class of the new table. Valid values are STANDARD and
STANDARD_INFREQUENT_ACCESS.
- `"Tags"`: A list of key-value pairs to label the table. For more information, see Tagging
for DynamoDB.
"""
function create_table(
AttributeDefinitions,
KeySchema,
TableName;
aws_config::AbstractAWSConfig=global_aws_config(),
)
return dynamodb(
"CreateTable",
Dict{String,Any}(
"AttributeDefinitions" => AttributeDefinitions,
"KeySchema" => KeySchema,
"TableName" => TableName,
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function create_table(
AttributeDefinitions,
KeySchema,
TableName,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return dynamodb(
"CreateTable",
Dict{String,Any}(
mergewith(
_merge,
Dict{String,Any}(
"AttributeDefinitions" => AttributeDefinitions,
"KeySchema" => KeySchema,
"TableName" => TableName,
),
params,
),
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
delete_backup(backup_arn)
delete_backup(backup_arn, params::Dict{String,<:Any})
Deletes an existing backup of a table. You can call DeleteBackup at a maximum rate of 10
times per second.
# Arguments
- `backup_arn`: The ARN associated with the backup.
"""
function delete_backup(BackupArn; aws_config::AbstractAWSConfig=global_aws_config())
return dynamodb(
"DeleteBackup",
Dict{String,Any}("BackupArn" => BackupArn);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function delete_backup(
BackupArn,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return dynamodb(
"DeleteBackup",
Dict{String,Any}(
mergewith(_merge, Dict{String,Any}("BackupArn" => BackupArn), params)
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
delete_item(key, table_name)
delete_item(key, table_name, params::Dict{String,<:Any})
Deletes a single item in a table by primary key. You can perform a conditional delete
operation that deletes the item if it exists, or if it has an expected attribute value. In
addition to deleting an item, you can also return the item's attribute values in the same
operation, using the ReturnValues parameter. Unless you specify conditions, the DeleteItem
is an idempotent operation; running it multiple times on the same item or attribute does
not result in an error response. Conditional deletes are useful for deleting items only if
specific conditions are met. If those conditions are met, DynamoDB performs the delete.
Otherwise, the item is not deleted.
# Arguments
- `key`: A map of attribute names to AttributeValue objects, representing the primary key
of the item to delete. For the primary key, you must provide all of the key attributes. For
example, with a simple primary key, you only need to provide a value for the partition key.
For a composite primary key, you must provide values for both the partition key and the
sort key.
- `table_name`: The name of the table from which to delete the item.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"ConditionExpression"`: A condition that must be satisfied in order for a conditional
DeleteItem to succeed. An expression can contain any of the following: Functions:
attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size
These function names are case-sensitive. Comparison operators: = | <> | < | >
| <= | >= | BETWEEN | IN Logical operators: AND | OR | NOT For more
information about condition expressions, see Condition Expressions in the Amazon DynamoDB
Developer Guide.
- `"ConditionalOperator"`: This is a legacy parameter. Use ConditionExpression instead. For
more information, see ConditionalOperator in the Amazon DynamoDB Developer Guide.
- `"Expected"`: This is a legacy parameter. Use ConditionExpression instead. For more
information, see Expected in the Amazon DynamoDB Developer Guide.
- `"ExpressionAttributeNames"`: One or more substitution tokens for attribute names in an
expression. The following are some use cases for using ExpressionAttributeNames: To
access an attribute whose name conflicts with a DynamoDB reserved word. To create a
placeholder for repeating occurrences of an attribute name in an expression. To prevent
special characters in an attribute name from being misinterpreted in an expression. Use
the # character in an expression to dereference an attribute name. For example, consider
the following attribute name: Percentile The name of this attribute conflicts with a
reserved word, so it cannot be used directly in an expression. (For the complete list of
reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around
this, you could specify the following for ExpressionAttributeNames:
{\"#P\":\"Percentile\"} You could then use this substitution in an expression, as in
this example: #P = :val Tokens that begin with the : character are expression
attribute values, which are placeholders for the actual value at runtime. For more
information on expression attribute names, see Specifying Item Attributes in the Amazon
DynamoDB Developer Guide.
- `"ExpressionAttributeValues"`: One or more values that can be substituted in an
expression. Use the : (colon) character in an expression to dereference an attribute value.
For example, suppose that you wanted to check whether the value of the ProductStatus
attribute was one of the following: Available | Backordered | Discontinued You would
first need to specify ExpressionAttributeValues as follows: {
\":avail\":{\"S\":\"Available\"}, \":back\":{\"S\":\"Backordered\"},
\":disc\":{\"S\":\"Discontinued\"} } You could then use these values in an expression,
such as this: ProductStatus IN (:avail, :back, :disc) For more information on expression
attribute values, see Condition Expressions in the Amazon DynamoDB Developer Guide.
- `"ReturnConsumedCapacity"`:
- `"ReturnItemCollectionMetrics"`: Determines whether item collection metrics are returned.
If set to SIZE, the response includes statistics about item collections, if any, that were
modified during the operation are returned in the response. If set to NONE (the default),
no statistics are returned.
- `"ReturnValues"`: Use ReturnValues if you want to get the item attributes as they
appeared before they were deleted. For DeleteItem, the valid values are: NONE - If
ReturnValues is not specified, or if its value is NONE, then nothing is returned. (This
setting is the default for ReturnValues.) ALL_OLD - The content of the old item is
returned. There is no additional cost associated with requesting a return value aside
from the small network and processing overhead of receiving a larger response. No read
capacity units are consumed. The ReturnValues parameter is used by several DynamoDB
operations; however, DeleteItem does not recognize any values other than NONE or ALL_OLD.
"""
function delete_item(Key, TableName; aws_config::AbstractAWSConfig=global_aws_config())
return dynamodb(
"DeleteItem",
Dict{String,Any}("Key" => Key, "TableName" => TableName);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function delete_item(
Key,
TableName,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return dynamodb(
"DeleteItem",
Dict{String,Any}(
mergewith(
_merge, Dict{String,Any}("Key" => Key, "TableName" => TableName), params
),
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
delete_table(table_name)
delete_table(table_name, params::Dict{String,<:Any})
The DeleteTable operation deletes a table and all of its items. After a DeleteTable
request, the specified table is in the DELETING state until DynamoDB completes the
deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING
or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table
does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the
DELETING state, no error is returned. This operation only applies to Version 2019.11.21
(Current) of global tables. DynamoDB might continue to accept data read and write
operations, such as GetItem and PutItem, on a table in the DELETING state until the table
deletion is complete. When you delete a table, any indexes on that table are also deleted.
If you have DynamoDB Streams enabled on the table, then the corresponding stream on that
table goes into the DISABLED state, and the stream is automatically deleted after 24 hours.
Use the DescribeTable action to check the status of the table.
# Arguments
- `table_name`: The name of the table to delete.
"""
function delete_table(TableName; aws_config::AbstractAWSConfig=global_aws_config())
return dynamodb(
"DeleteTable",
Dict{String,Any}("TableName" => TableName);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function delete_table(
TableName,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return dynamodb(
"DeleteTable",
Dict{String,Any}(
mergewith(_merge, Dict{String,Any}("TableName" => TableName), params)
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
describe_backup(backup_arn)
describe_backup(backup_arn, params::Dict{String,<:Any})
Describes an existing backup of a table. You can call DescribeBackup at a maximum rate of
10 times per second.
# Arguments
- `backup_arn`: The Amazon Resource Name (ARN) associated with the backup.
"""
function describe_backup(BackupArn; aws_config::AbstractAWSConfig=global_aws_config())
return dynamodb(
"DescribeBackup",
Dict{String,Any}("BackupArn" => BackupArn);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function describe_backup(
BackupArn,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return dynamodb(
"DescribeBackup",
Dict{String,Any}(
mergewith(_merge, Dict{String,Any}("BackupArn" => BackupArn), params)
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
describe_continuous_backups(table_name)
describe_continuous_backups(table_name, params::Dict{String,<:Any})
Checks the status of continuous backups and point in time recovery on the specified table.
Continuous backups are ENABLED on all tables at table creation. If point in time recovery
is enabled, PointInTimeRecoveryStatus will be set to ENABLED. After continuous backups and
point in time recovery are enabled, you can restore to any point in time within
EarliestRestorableDateTime and LatestRestorableDateTime. LatestRestorableDateTime is
typically 5 minutes before the current time. You can restore your table to any point in
time during the last 35 days. You can call DescribeContinuousBackups at a maximum rate of
10 times per second.
# Arguments
- `table_name`: Name of the table for which the customer wants to check the continuous
backups and point in time recovery settings.
"""
function describe_continuous_backups(
TableName; aws_config::AbstractAWSConfig=global_aws_config()
)
return dynamodb(
"DescribeContinuousBackups",
Dict{String,Any}("TableName" => TableName);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function describe_continuous_backups(
TableName,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return dynamodb(
"DescribeContinuousBackups",
Dict{String,Any}(
mergewith(_merge, Dict{String,Any}("TableName" => TableName), params)
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
describe_contributor_insights(table_name)
describe_contributor_insights(table_name, params::Dict{String,<:Any})
Returns information about contributor insights for a given table or global secondary index.
# Arguments
- `table_name`: The name of the table to describe.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"IndexName"`: The name of the global secondary index to describe, if applicable.
"""
function describe_contributor_insights(
TableName; aws_config::AbstractAWSConfig=global_aws_config()
)
return dynamodb(
"DescribeContributorInsights",
Dict{String,Any}("TableName" => TableName);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function describe_contributor_insights(
TableName,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return dynamodb(
"DescribeContributorInsights",
Dict{String,Any}(
mergewith(_merge, Dict{String,Any}("TableName" => TableName), params)
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
describe_endpoints()
describe_endpoints(params::Dict{String,<:Any})
Returns the regional endpoint information. This action must be included in your VPC
endpoint policies, or access to the DescribeEndpoints API will be denied. For more
information on policy permissions, please see Internetwork traffic privacy.
"""
function describe_endpoints(; aws_config::AbstractAWSConfig=global_aws_config())
return dynamodb(
"DescribeEndpoints"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET
)
end
function describe_endpoints(
params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()
)
return dynamodb(
"DescribeEndpoints", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET
)
end
"""
describe_export(export_arn)
describe_export(export_arn, params::Dict{String,<:Any})
Describes an existing table export.
# Arguments
- `export_arn`: The Amazon Resource Name (ARN) associated with the export.
"""
function describe_export(ExportArn; aws_config::AbstractAWSConfig=global_aws_config())
return dynamodb(
"DescribeExport",
Dict{String,Any}("ExportArn" => ExportArn);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function describe_export(
ExportArn,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return dynamodb(
"DescribeExport",
Dict{String,Any}(
mergewith(_merge, Dict{String,Any}("ExportArn" => ExportArn), params)
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
describe_global_table(global_table_name)
describe_global_table(global_table_name, params::Dict{String,<:Any})
Returns information about the specified global table. This operation only applies to
Version 2017.11.29 (Legacy) of global tables. We recommend using Version 2019.11.21
(Current) when creating new global tables, as it provides greater flexibility, higher
efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which
version you are using, see Determining the version. To update existing global tables from
version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables.
# Arguments
- `global_table_name`: The name of the global table.
"""
function describe_global_table(
GlobalTableName; aws_config::AbstractAWSConfig=global_aws_config()
)
return dynamodb(
"DescribeGlobalTable",
Dict{String,Any}("GlobalTableName" => GlobalTableName);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function describe_global_table(
GlobalTableName,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return dynamodb(
"DescribeGlobalTable",
Dict{String,Any}(
mergewith(
_merge, Dict{String,Any}("GlobalTableName" => GlobalTableName), params
),
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
describe_global_table_settings(global_table_name)
describe_global_table_settings(global_table_name, params::Dict{String,<:Any})
Describes Region-specific settings for a global table. This operation only applies to
Version 2017.11.29 (Legacy) of global tables. We recommend using Version 2019.11.21
(Current) when creating new global tables, as it provides greater flexibility, higher
efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which
version you are using, see Determining the version. To update existing global tables from
version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables.
# Arguments
- `global_table_name`: The name of the global table to describe.
"""
function describe_global_table_settings(
GlobalTableName; aws_config::AbstractAWSConfig=global_aws_config()
)
return dynamodb(
"DescribeGlobalTableSettings",
Dict{String,Any}("GlobalTableName" => GlobalTableName);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function describe_global_table_settings(
GlobalTableName,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return dynamodb(
"DescribeGlobalTableSettings",
Dict{String,Any}(
mergewith(
_merge, Dict{String,Any}("GlobalTableName" => GlobalTableName), params
),
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
describe_import(import_arn)
describe_import(import_arn, params::Dict{String,<:Any})
Represents the properties of the import.
# Arguments
- `import_arn`: The Amazon Resource Name (ARN) associated with the table you're importing
to.
"""
function describe_import(ImportArn; aws_config::AbstractAWSConfig=global_aws_config())
return dynamodb(
"DescribeImport",
Dict{String,Any}("ImportArn" => ImportArn);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function describe_import(
ImportArn,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return dynamodb(
"DescribeImport",
Dict{String,Any}(
mergewith(_merge, Dict{String,Any}("ImportArn" => ImportArn), params)
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
describe_kinesis_streaming_destination(table_name)
describe_kinesis_streaming_destination(table_name, params::Dict{String,<:Any})
Returns information about the status of Kinesis streaming.
# Arguments
- `table_name`: The name of the table being described.
"""
function describe_kinesis_streaming_destination(
TableName; aws_config::AbstractAWSConfig=global_aws_config()
)
return dynamodb(
"DescribeKinesisStreamingDestination",
Dict{String,Any}("TableName" => TableName);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function describe_kinesis_streaming_destination(