-
Notifications
You must be signed in to change notification settings - Fork 73
/
client.cr
173 lines (153 loc) · 5.17 KB
/
client.cr
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
require "random/secure"
require "./types"
module Sidekiq
class Client
class Context < Sidekiq::Context
getter pool : Sidekiq::Pool
getter logger : ::Log
def error_handlers : Array(Sidekiq::ExceptionHandler::Base)
[] of Sidekiq::ExceptionHandler::Base
end
def initialize
@pool = RedisConfig.new.new_pool
@logger = Sidekiq::Logger.build
end
def initialize(redis_cfg : Sidekiq::RedisConfig, logger : ::Log? = nil)
@pool = redis_cfg.new_pool
@logger = logger || Sidekiq::Logger.build
end
def initialize(pool : Sidekiq::Pool, logger : ::Log? = nil)
@pool = pool
@logger = logger || Sidekiq::Logger.build
end
end
DEFAULT_MIDDLEWARE = Sidekiq::Middleware::Chain(Sidekiq::Middleware::ClientEntry).new
@@default : Sidekiq::Context?
def self.default_context=(ctx)
@@default = ctx
end
def self.default_context : Sidekiq::Context
@@default.not_nil!
end
def self.middleware
DEFAULT_MIDDLEWARE
end
# #
# Define client-side middleware:
#
# client = Sidekiq::Client.new
# client.middleware do |chain|
# chain.use MyClientMiddleware
# end
#
def middleware
yield @chain
@chain
end
def middleware
@chain
end
@chain : Sidekiq::Middleware::Chain(Sidekiq::Middleware::ClientEntry)
@ctx : Sidekiq::Context
@pool : Sidekiq::Pool
# Sidekiq::Client normally uses the default Redis pool but you may
# set a custom ConnectionPool if you want to shard your
# Sidekiq jobs across several Redis instances (for scalability
# reasons, e.g.)
#
# c = Sidekiq::Client.new(Sidekiq::Pool.new)
#
# Generally this is only needed for very large Sidekiq installs processing
# thousands of jobs per second. I don't recommend sharding unless you
# cannot scale any other way (e.g. splitting your app into smaller apps).
def initialize(pool = nil)
raise "Sidekiq client has not been configured yet" unless @@default
@ctx = @@default.not_nil!
@pool = pool || @ctx.pool
@chain = DEFAULT_MIDDLEWARE.copy
end
# #
# The main method used to push a job to Redis. Accepts a number of options:
#
# queue - the named queue to use, default 'default'
# class - the worker class to call, required
# args - an array of simple arguments to the perform method, must be JSON-serializable
# retry - whether to retry this job if it fails, default true or an integer number of retries
# backtrace - whether to save any error backtrace, default false
#
# All options must be strings, not symbols. NB: because we are serializing to JSON, all
# symbols in 'args' will be converted to strings. Note that +backtrace: true+ can take quite a bit of
# space in Redis; a large volume of failing jobs can start Redis swapping if you aren't careful.
#
# Returns a unique Job ID. If middleware stops the job, nil will be returned instead.
#
# Example:
# push('queue' => 'my_queue', 'class' => MyWorker, 'args' => ['foo', 1, :bat => 'bar'])
#
def push(job : Sidekiq::Job)
result = middleware.invoke(job, @ctx) do
true
end
if result
raw_push([job])
job.jid
end
end
# #
# Push a large number of jobs to Redis. In practice this method is only
# useful if you are pushing thousands of jobs or more. This method
# cuts out the redis network round trip latency.
#
# Takes the same arguments as #push except that allargs is expected to be
# an Array of Arrays. All other keys are duplicated for each job. Each job
# is run through the client middleware pipeline and each job gets its own Job ID
# as normal.
#
# Returns an array of the of pushed jobs' jids. The number of jobs pushed can be less
# than the number given if the middleware stopped processing for one or more jobs.
def push_bulk(job : Sidekiq::Job, allargs : Array(String))
payloads = allargs.compact_map do |args|
copy = Sidekiq::Job.new
copy.jid = Random::Secure.hex(12)
copy.klass = job.klass
copy.queue = job.queue
copy.args = args
copy.retry = job.retry
result = middleware.invoke(copy, @ctx) do
!!copy
end
result ? copy : nil
end
raw_push(payloads) if !payloads.empty?
payloads.map(&.jid)
end
def raw_push(payloads)
@ctx.pool.redis do |conn|
conn.multi do |multi|
atomic_push(multi, payloads)
end
end
true
end
def atomic_push(conn, payloads)
if payloads.first.at
all = [] of Redis::RedisValue
payloads.each do |hash|
at, hash.at = hash.at, nil
all << at.not_nil!.to_unix_f.to_s
all << hash.to_json
end
conn.zadd("schedule", all)
else
q = payloads.first.queue
now = Time.local
to_push = payloads.map do |entry|
entry.enqueued_at = now
entry.to_json
end
conn.sadd("queues", q)
conn.lpush("queue:#{q}", to_push)
end
end
end
end