This repository has been archived by the owner on Aug 29, 2018. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 516
/
mcollective_application_container_proxy.rb
executable file
·3817 lines (3511 loc) · 124 KB
/
mcollective_application_container_proxy.rb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
require 'mcollective'
require 'open-uri'
require 'timeout'
include MCollective::RPC
#
# The OpenShift module is a namespace for all OpenShift related objects and
# methods.
#
module OpenShift
# Implements the broker-node communications This class the state
# of a node and a set of RPC functions to the node. It also has a
# set of just plain functions which live here because they relate
# to broker/node communications.
#
class MCollectiveApplicationContainerProxy < OpenShift::ApplicationContainerProxy
# the "cartridge" for Node operation messages to "cartridge_do"
@@C_CONTROLLER = 'openshift-origin-node'
# A Node ID string
attr_accessor :id
# A District ID string
attr_accessor :district
# <<constructor>>
#
# Create an app descriptor/handle for remote controls
#
# INPUTS:
# * id: string - a unique app identifier
# * district: <type> - a classifier for app placement
#
def initialize(id, district=nil)
@id = id
@district = district
@disable_print_debug = false
end
# <<factory method>>
#
# Find a node which fulfills app requirements. Implements the superclass
# find_all_available() method
#
# INPUTS:
# * node_profile: string identifier for a set of node characteristics
# * district_uuid: identifier for the district
# * least_preferred_servers: list of server identities that are least preferred. These could be the ones that won't allow the gear group to be highly available
# * gear_exists_in_district: true if the gear belongs to a node in the same district
# * required_uid: the uid that is required to be available in the destination district
#
# RETURNS:
# * a list of available nodes
#
# RAISES:
# * OpenShift::NodeUnavailableException
#
# NOTES:
# * a class method on Node?
# * Uses Rails.configuration.msg_broker
# * Uses District
# * Calls rpc_find_all_available
#
# VALIDATIONS:
# * If gear_exists_in_district is true, then required_uid cannot be set and has to be nil
# * If gear_exists_in_district is true, then district_uuid must be passed and cannot be nil
#
def self.find_all_available_impl(opts=nil)
opts ||= {}
district = nil
server_infos = rpc_find_all_available(opts)
if server_infos.blank?
opts[:force_rediscovery] = true
server_infos = rpc_find_all_available(opts)
end
raise OpenShift::NodeUnavailableException.new("No nodes available", 140) if server_infos.blank?
return server_infos
end
# <<factory method>>
#
# Find a single node. Implements superclass find_one() method.
#
# INPUTS:
# * node_profile: characteristics for node filtering
#
# RETURNS:
# * One server info
#
# NOTES:
# * Uses rpc_find_one() method
def self.find_one_impl(node_profile=nil, platform='linux')
current_server = rpc_find_one(node_profile, platform)
if current_server
Rails.logger.debug "DEBUG: find_one_impl: current_server: #{current_server}"
return current_server
else
server_infos = find_all_available_impl({:node_profile => node_profile, :platform => platform})
Rails.logger.debug "DEBUG: find_one_impl: Returning #{server_infos[0][0]} from a list of #{server_infos.length} servers"
# we are returning the server for the first server_info
return server_infos[0][0]
end
end
# <<orphan>>
# <<class method>>
#
# Return a list of blacklisted namespaces and app names.
# Implements superclass get_blacklisted() method.
#
# INPUTS:
# * none
#
# RETURNS:
# * empty list
#
# NOTES:
# * Is this really a function of the broker
#
def self.get_blacklisted_in_impl
[]
end
# <<class method>>
#
# <<query>>
#
# Query all nodes for all available cartridges
#
# INPUTS:
# * none
#
# RETURNS:
# * An array of OpenShift::Cartridge objects
#
# NOTES:
# * uses execute_direct and @@C_CONTROLLER
#
def get_available_cartridges
args = Hash.new
args['--porcelain'] = true
args['--with-descriptors'] = true
result = execute_direct(@@C_CONTROLLER, 'cartridge-list', args, false)
result = parse_result(result)
cart_data = JSON.parse(result.resultIO.string)
cart_data.map! {|c| OpenShift::Cartridge.new(YAML.load(c))}
end
# <<object method>>
#
# <<attribute getter>>
#
# Request the disk quotas from a Gear on a node
#
# RETURNS:
# * an array with following information:
#
# [Filesystem, blocks_used, blocks_soft_limit, blocks_hard_limit,
# inodes_used, inodes_soft_limit, inodes_hard_limit]
#
# RAISES:
# * OpenShift::NodeException
#
# NOTES
# * Uses execute_direct
# * A method on the gear object
#
def get_quota(gear)
args = Hash.new
args['--uuid'] = gear.uuid
reply = execute_direct(@@C_CONTROLLER, 'get-quota', args, false)
output = nil
exitcode = 0
if reply and reply.length > 0
mcoll_result = reply[0]
if (mcoll_result && (defined? mcoll_result.results) && !mcoll_result.results[:data].nil?)
output = mcoll_result.results[:data][:output]
exitcode = mcoll_result.results[:data][:exitcode]
raise OpenShift::NodeException.new("Failed to get quota for user: #{output}", 143) unless exitcode == 0
else
raise OpenShift::NodeException.new("Node execution failure (error getting result from node).", 143)
end
else
raise OpenShift::NodeException.new("Node execution failure (error getting result from node).", 143)
end
output
end
# <<object method>>
#
# <<attribute setter>>
#
# Set blocks hard limit and inodes hard limit for uuid.
# Effects disk quotas on Gear on Node
#
# INPUT:
# * gear: A Gear object
# * storage_in_gb: integer
# * inodes: integer
#
# RAISES:
# * OpenShift::NodeException
#
def set_quota(gear, storage_in_gb, inodes)
args = Hash.new
args['--uuid'] = gear.uuid
# quota command acts on 1K blocks
args['--blocks'] = Integer(storage_in_gb * 1024 * 1024)
args['--inodes'] = inodes unless inodes.nil?
reply = execute_direct(@@C_CONTROLLER, 'set-quota', args, false)
output = nil
exitcode = 0
if reply and reply.length > 0
mcoll_result = reply[0]
if (mcoll_result && (defined? mcoll_result.results) && !mcoll_result.results[:data].nil?)
output = mcoll_result.results[:data][:output]
exitcode = mcoll_result.results[:data][:exitcode]
raise OpenShift::NodeException.new("Failed to set quota for user: #{output}", 143) unless exitcode == 0
else
raise OpenShift::NodeException.new("Node execution failure (error getting result from node).", 143)
end
else
raise OpenShift::NodeException.new("Node execution failure (error getting result from node).", 143)
end
end
# Reserve a UID within a district or service
#
# UIDs must be unique in a district to allow migration without requiring
# reassigning Username (Gear UUID) and Unix User UID on migrate
# Perhaps a query on the nodes for "next UID"?
#
# INPUTS:
# * district_uuid: String: District handle or identifier
# * preferred_uid: Integer
#
# RAISES:
# * OpenShift::OOException
#
# NOTES:
# * a method on District class of the node.
#
def reserve_uid(district_uuid=nil, preferred_uid=nil)
reserved_uid = nil
if Rails.configuration.msg_broker[:districts][:enabled]
if @district
district_uuid = @district.uuid
elsif !district_uuid
if @id
begin
district = District.find_by({"servers.name" => @id})
district_uuid = district.uuid
rescue Mongoid::Errors::DocumentNotFound
district_uuid = 'NONE'
end
else
district_uuid = get_district_uuid
end
end
if district_uuid && district_uuid != 'NONE'
reserved_uid = District::reserve_uid(district_uuid, preferred_uid)
raise OpenShift::OOException.new("uid could not be reserved in target district '#{district_uuid}'. Please ensure the target district has available capacity or does not contain a conflicting uid.") unless reserved_uid
end
end
reserved_uid
end
# Release a UID reservation within a District
#
# UIDs must be unique in a district to allow migration without requiring
# reassigning Username (Gear UUID) and Unix User UID on migrate
# Perhaps a query on the nodes for "next UID"?
#
# INPUTS:
# * uid: Integer - the UID to unreserve within the district
# * district_uuid: String - district handle or identifier
#
# NOTES:
# * method on the District object.
#
def unreserve_uid(uid, district_uuid=nil)
if Rails.configuration.msg_broker[:districts][:enabled]
if @district
district_uuid = @district.uuid
elsif !district_uuid
if @id
begin
district = District.find_by({"servers.name" => @id})
district_uuid = district.uuid
rescue Mongoid::Errors::DocumentNotFound
district_uuid = 'NONE'
end
else
district_uuid = get_district_uuid
end
end
if district_uuid && district_uuid != 'NONE'
#cleanup
District::unreserve_uid(district_uuid, uid)
end
end
end
def build_base_gear_args(gear, quota_blocks=nil, quota_files=nil, sshkey_required=false)
app = gear.application
args = Hash.new
args['--with-app-uuid'] = app.uuid
args['--with-app-name'] = app.name
args['--with-expose-ports'] = app.scalable
args['--with-container-uuid'] = gear.uuid
args['--with-container-name'] = gear.name
args['--with-quota-blocks'] = quota_blocks if quota_blocks
args['--with-quota-files'] = quota_files if quota_files
args['--with-generate-app-key'] = sshkey_required if sshkey_required
args['--with-namespace'] = app.domain_namespace
args['--with-uid'] = gear.uid if gear.uid
args['--with-request-id'] = Thread.current[:user_action_log_uuid]
args
end
def build_base_component_args(component, existing_args={})
existing_args['--component-name'] = component.component_name
existing_args['--cart-name'] = component.cartridge.send(:short_name)
existing_args['--with-software-version'] = component.cartridge.version
existing_args['--cartridge-vendor'] = component.cartridge.cartridge_vendor
existing_args
end
#
# <<instance method>>
#
# Execute the 'app-create' script on a node.
#
# INPUTS:
# * gear: a Gear object
# * quota_blocks: Integer - max file space in blocks
# * quota_files: Integer - max files count
#
# RETURNS:
# * ResultIO
#
# NOTES:
# * uses execute_direct
# * should raise an exception on fail to cause revert rather than in-line
# * causes oo-app-create to execute on a node
#
# Constructs a shell command line to be executed by the MCollective agent
# on the node.
#
def create(gear, quota_blocks=nil, quota_files=nil, sshkey_required=false, initial_deployment_dir_required=true)
app = gear.application
result = nil
(1..10).each do |i|
args = build_base_gear_args(gear, quota_blocks, quota_files, sshkey_required)
# set the secret token for new gear creations
# log an error if the application does not have its secret_token set
if app.secret_token.present?
args['--with-secret-token'] = app.secret_token
else
Rails.logger.error "The application #{app.name} (#{app._id.to_s}) does not have its secret token set"
end
args['--with-initial-deployment-dir'] = initial_deployment_dir_required
mcoll_reply = execute_direct(@@C_CONTROLLER, 'app-create', args)
begin
result = parse_result(mcoll_reply, gear)
rescue OpenShift::OOException => ooex
# destroy the gear in case of failures
# the UID will be unreserved up as part of rollback
destroy(gear, true)
# raise the exception if this is the last retry
raise ooex if i == 10
result = ooex.resultIO
if result != nil && result.exitcode == 129 && has_uid_or_gid?(gear.uid) # Code to indicate uid already taken
gear.uid = reserve_uid
app.save!
else
raise ooex
end
else
break
end
end
result
end
#
# Remove a gear from a node
# Optionally release a reserved UID from the District.
#
# INPUTS:
# * gear: a Gear object
# * keep_uid: boolean
# * is_group_rollback: boolean - flag for optional archive on rollback
# * uid: Integer: reserved UID
# * skip_hooks: boolean
#
# RETURNS:
# * ResultIO
#
# NOTES:
# * uses execute_direct
#
def destroy(gear, keep_uid=false, is_group_rollback=false, uid=nil, skip_hooks=false)
args = build_base_gear_args(gear)
args['--skip-hooks'] = true if skip_hooks
args['--is-group-rollback'] = true if is_group_rollback
begin
result = execute_direct(@@C_CONTROLLER, 'app-destroy', args)
result_io = parse_result(result, gear)
rescue Exception=>e
raise e if has_gear?(gear.uuid)!=false
result_io = ResultIO.new
end
uid = gear.uid unless uid
if uid && !keep_uid
unreserve_uid(uid)
end
return result_io
end
# Add an SSL certificate to a gear on the remote node and associate it with
# a server name.
# See node/bin/oo-ssl-cert-add
#
# INPUTS:
# * gear: a Gear object
# * priv_key: String - the private key value
# * server_alias: String - the name of the server which will offer this key
# * passphrase: String - the private key passphrase or '' if its unencrypted.
#
# RETURNS:
# * ResultIO
#
# NOTES:
# * calls node script oo-ssl-cert-add
#
def add_ssl_cert(gear, ssl_cert, priv_key, server_alias, passphrase='')
args = build_base_gear_args(gear)
args['--with-ssl-cert'] = ssl_cert
args['--with-priv-key'] = priv_key
args['--with-alias-name'] = server_alias
args['--with-passphrase'] = passphrase
result = execute_direct(@@C_CONTROLLER, 'ssl-cert-add', args)
parse_result(result)
end
# remove an SSL certificate to a gear on the remote node.
# See node/bin/oo-ssl-cert-remove
#
# INPUTS:
# * gear: a Gear object
# * server_alias: String - the name of the server which will offer this key
#
# RETURNS:
# * ResultIO
#
# NOTES:
# * calls node script oo-ssl-cert-remove
#
def remove_ssl_cert(gear, server_alias)
args = build_base_gear_args(gear)
args['--with-alias-name'] = server_alias
result = execute_direct(@@C_CONTROLLER, 'ssl-cert-remove', args)
parse_result(result)
end
# fetches all SSL certificates from a gear on the remote node.
#
# INPUTS:
# * gear: a Gear object
#
# RETURNS: an array of arrays
# * each array consists of three elements
# - the SSL certificate
# - the private key
# - the alias
#
def get_all_ssl_certs(gear)
args = build_base_gear_args(gear)
result = execute_direct(@@C_CONTROLLER, 'ssl-certs', args)
JSON.parse(parse_result(result).resultIO.string)
end
#
# Add an environment variable on gear on a remote node.
# Calls oo-env-var-add on the remote node
#
# INPUTS:
# * gear: a Gear object
# * key: String - environment variable name
# * value: String - environment variable value
#
# RETURNS:
# * ResultIO
#
# NOTES:
# * uses execute_direct
# * calls oo-env-var-add on the node
#
def add_env_var(gear, key, value)
args = build_base_gear_args(gear)
args['--with-key'] = key
args['--with-value'] = value
result = execute_direct(@@C_CONTROLLER, 'env-var-add', args)
parse_result(result, gear)
end
#
# Remove an environment variable on gear on a remote node
#
# INPUTS:
# * gear: a Gear object
# * key: String - environment variable name
#
# RETURNS:
# * ResultIO
#
# NOTES:
# * uses execute_direct
# * calls oo-env-var-remove on the node
#
def remove_env_var(gear, key)
args = build_base_gear_args(gear)
args['--with-key'] = key
result = execute_direct(@@C_CONTROLLER, 'env-var-remove', args)
parse_result(result, gear)
end
#
# Add a broker auth key. The broker auth key allows an application
# to request scaling and other actions from the broker.
#
# INPUTS:
# * gear: a Gear object
# * iv: String - SSL initialization vector
# * token: String - a broker auth key
#
# RETURNS:
# * ResultIO
#
# NOTES:
# * uses execute_direct
# * calls oo-broker-auth-key-add
#
def add_broker_auth_key(gear, iv, token)
args = build_base_gear_args(gear)
args['--with-iv'] = iv
args['--with-token'] = token
result = execute_direct(@@C_CONTROLLER, 'broker-auth-key-add', args)
parse_result(result, gear)
end
#
# Remove a broker auth key. The broker auth key allows an application
# to request scaling and other actions from the broker.
#
# INPUTS:
# * gear: a Gear object
#
# RETURNS:
# * ResultIO
#
# NOTES:
# * uses execute_direct
# * calls oo-broker-auth-key-remove
#
def remove_broker_auth_key(gear)
args = build_base_gear_args(gear)
result = execute_direct(@@C_CONTROLLER, 'broker-auth-key-remove', args)
parse_result(result, gear)
end
#
# Get the operating state of a gear
#
# INPUTS:
# * gear: Gear Object
#
# RETURNS:
# * ResultIO
#
# NOTES:
# * uses execute_direct
# * calls oo-app-state-show
# * Should be a method on Gear object
#
def show_state(gear)
args = build_base_gear_args(gear)
result = execute_direct(@@C_CONTROLLER, 'app-state-show', args)
parse_result(result, gear)
end
# <<accessor>>
# Get the public hostname of a Node
#
# INPUTS:
# none
#
# RETURNS:
# * String: the public hostname of a node
#
# NOTES:
# * method on Node
# * calls rpc_get_fact_direct
#
def get_public_hostname
rpc_get_fact_direct('public_hostname')
end
# <<accessor>>
# Get the "capacity" of a node
#
# INPUTS:
# none
#
# RETURNS:
# * Float: the "capacity" of a node
#
# NOTES:
# * method on Node
# * calls rpc_get_fact_direct
#
def get_capacity
rpc_get_fact_direct('capacity').to_f
end
# <<accessor>>
# Get the "active capacity" of a node
#
# INPUTS:
# none
#
# RETURNS:
# * Float: the "active capacity" of a node
#
# NOTES:
# * method on Node
# * calls rpc_get_fact_direct
#
def get_active_capacity
rpc_get_fact_direct('active_capacity').to_f
end
# <<accessor>>
# Get the district UUID (membership handle) of a node
#
# INPUTS:
# none
#
# RETURNS:
# * String: the UUID of a node's district
#
# NOTES:
# * method on Node
# * calls rpc_get_fact_direct
#
def get_district_uuid
rpc_get_fact_direct('district_uuid')
end
# <<accessor>>
# Get the platform attribute of a node
#
# INPUTS:
# none
#
# RETURNS:
# * String: the platform of a node
#
# NOTES:
# * method on Node
# * calls rpc_get_fact_direct
#
def get_platform
rpc_get_fact_direct('kernel')
end
# <<accessor>>
# Get the IP address of a Node
# i.e. the IP that PUBLIC_NIC is using
#
# INPUTS:
# none
#
# RETURNS:
# * String: the IP address of a node's PUBLIC_NIC
#
# NOTES:
# * method on Node
# * calls rpc_get_fact_direct
#
def get_ip_address
rpc_get_fact_direct('host_ip')
end
# <<accessor>>
# Get the public IP address of a Node
# as configured in PUBLIC_IP
#
# INPUTS:
# none
#
# RETURNS:
# * String: the public IP address of a node
#
# NOTES:
# * method on Node
# * calls rpc_get_fact_direct
#
def get_public_ip_address
rpc_get_fact_direct('public_ip')
end
# <<accessor>>
# Get the "node profile" of a Node
#
# INPUTS:
# none
#
# RETURNS:
# * String: the "node profile" of a node
#
# NOTES:
# * method on Node
# * calls rpc_get_fact_direct
#
def get_node_profile
rpc_get_fact_direct('node_profile')
end
# <<accessor>>
# Get the quota blocks of a Node
#
# Is this disk available or the default quota limit?
# It's from Facter.
#
# INPUTS:
# none
#
# RETURNS:
# * Integer: the "quota blocks" of a node
#
# NOTES:
# * method on Node
# * calls rpc_get_fact_direct
#
def get_quota_blocks
rpc_get_fact_direct('quota_blocks').to_i
end
# <<accessor>>
# Get the quota files of a Node
#
# Is this disk available or the default quota limit?
# It's from Facter.
#
# INPUTS:
# none
#
# RETURNS:
# * Integer: the "quota files" of a node
#
# NOTES:
# * method on Node
# * calls rpc_get_fact_direct
#
def get_quota_files
rpc_get_fact_direct('quota_files').to_i
end
#
# Add a component to an existing gear on the node
#
# INPUTS:
# * gear: a Gear object
# * cart: string representing cartridge name
#
# RETURNS:
# * ResultIO
#
# RAISES:
# * Exception
#
# CATCHES:
# * Exception
#
# NOTES:
# * uses run_cartridge_command
# * runs "configure" on a "component"
#
def add_component(gear, component, template_git_url=nil)
result_io = ResultIO.new
args = build_base_gear_args(gear)
args = build_base_component_args(component, args)
if component.cartridge.singleton?
args['--with-cartridge-manifest'] = component.cartridge.manifest_text
args['--with-software-version'] = component.cartridge.version
end
if template_git_url.present?
args['--with-template-git-url'] = template_git_url
end
result_io = run_cartridge_command(component.cartridge_name, gear, "configure", args)
component_details = result_io.appInfoIO.string.empty? ? '' : result_io.appInfoIO.string
result_io.debugIO << "\n\n#{component.cartridge_name}: #{component_details}" unless component_details.blank?
return result_io
end
#
# Post configuration for a cartridge on a gear.
#
# INPUTS:
# * gear: a Gear object
# * component: component_instance object
# * template_git_url: a url of a git repo containing a cart overlay
#
# RETURNS
# * ResultIO
#
def post_configure_component(gear, component, template_git_url=nil)
result_io = ResultIO.new
cart = component.cartridge_name
args = build_base_gear_args(gear)
args = build_base_component_args(component, args)
if template_git_url.present?
args['--with-template-git-url'] = template_git_url
end
result_io = run_cartridge_command(cart, gear, "post-configure", args)
component_details = result_io.appInfoIO.string.empty? ? '' : result_io.appInfoIO.string
result_io.debugIO << "#{cart}: #{component_details}" unless component_details.blank?
return result_io
end
#
# Deploy a gear.
#
# INPUTS:
# * gear: a Gear object
# * hot_deploy: indicates whether this is a hot deploy
# * force_clean_build: indicates whether this should be a clean build
# * ref: the ref to deploy
# * artifact_url: the url of the artifacts to deploy
#
# RETURNS
# * ResultIO
#
def deploy(gear, hot_deploy=false, force_clean_build=false, ref=nil, artifact_url=nil)
result_io = ResultIO.new
args = build_base_gear_args(gear)
args['--with-hot-deploy'] = hot_deploy
args['--with-force-clean-build'] = force_clean_build
args['--with-ref'] = ref if ref.present?
args['--with-artifact-url'] = artifact_url if artifact_url.present?
result_io = run_cartridge_command(@@C_CONTROLLER, gear, "deploy", args)
return result_io
end
#
# Activate a deployment for a gear
#
# INPUTS:
# * gear: a Gear object
# * deployment_id: a deployment id
#
# RETURNS
# * ResultIO
#
def activate(gear, deployment_id)
result_io = ResultIO.new
args = build_base_gear_args(gear)
args['--with-deployment-id'] = deployment_id
result_io = run_cartridge_command(@@C_CONTROLLER, gear, "activate", args)
return result_io
end
#
# Remove a component from a gear
#
# INPUTS:
# * gear: a Gear object
# * component: String: a component name
#
# RETURNS:
# * ResultIO
#
def remove_component(gear, component)
app = gear.application
args = build_base_gear_args(gear)
args = build_base_component_args(component, args)
cart = component.cartridge_name
Rails.logger.debug "DEBUG: Deconfiguring cartridge '#{cart}' in application '#{app.name}' on node '#{@id}'"
resultIO = ResultIO.new
begin
resultIO = run_cartridge_command(cart, gear, 'deconfigure', args)
rescue OpenShift::NodeException => e
if has_app_cartridge?(app.uuid, gear.uuid, component.cartridge.send(:short_name))
raise
else
Rails.logger.debug "DEBUG: Cartridge '#{cart}' not found on within application '#{app.name}/#{gear.uuid}'. Continuing with deconfigure."
end
end
return resultIO
end
#
# Start cartridge services within a gear
#
# INPUTS:
# * gear: a Gear object
# * cart: a Cartridge object
#
# RETURNS:
# * ResultIO
#
# NOTES:
# * uses run_cartridge_command
# * uses start_component
#
def start(gear, component)
args = build_base_gear_args(gear)
cart = component.cartridge_name
args = build_base_component_args(component, args)
run_cartridge_command(cart, gear, "start", args)
end
def get_start_job(gear, component)
args = build_base_gear_args(gear)
args = build_base_component_args(component, args)
RemoteJob.new('openshift-origin-node', 'start', args)
end
#
# Stop cartridge services within a gear
#
# INPUTS:
# * gear: a Gear object
# * cart: a Cartridge object
#
# RETURNS:
# * ResultIO
#
# NOTES:
# * uses run_cartridge_command
# * uses stop_component
# * uses start_component
#
def stop(gear, component)
args = build_base_gear_args(gear)
cart = component.cartridge_name
args = build_base_component_args(component, args)
run_cartridge_command(cart, gear, "stop", args)
end
def get_stop_job(gear, component)
args = build_base_gear_args(gear)
args = build_base_component_args(component, args)
RemoteJob.new('openshift-origin-node', 'stop', args)
end
#
# Force gear services to stop
#
# INPUTS:
# * gear: Gear object
#
# RETURNS: