diff --git a/dmlc-core b/dmlc-core index d0afb8e946dd..4e54bfaa50c0 160000 --- a/dmlc-core +++ b/dmlc-core @@ -1 +1 @@ -Subproject commit d0afb8e946dda3310e6ed062a59f97726bf4c565 +Subproject commit 4e54bfaa50c0a5fabe1969713a9702d3bd92070b diff --git a/example/nce-loss/README.md b/example/nce-loss/README.md new file mode 100644 index 000000000000..88e54910bc7c --- /dev/null +++ b/example/nce-loss/README.md @@ -0,0 +1,35 @@ +#Examples of NCE Loss + +nce-loss is used to speedup multi-class classification when class num is huge. + +## Toy example + +* toy_softmax.py: a multi class example using softmax output +* toy_nce.py: a multi-class example using nce loss + +## Word2Vec + +* word2vec.py: a CBOW word2vec example using nce loss + +You can run it by + +``` +./get_text8.sh +python word2vec.py + +``` + +## LSTM + +* lstm_word.py: a lstm example use nce loss + +You can run it by + +``` +./get_text8.sh +python lstm_word.py +``` + +## References + +You can refer to [http://www.jianshu.com/p/e439b43ea464](http://www.jianshu.com/p/e439b43ea464) for more details. (In Chinese) diff --git a/example/nce-loss/get_text8.sh b/example/nce-loss/get_text8.sh new file mode 100755 index 000000000000..ccd4a08e69bb --- /dev/null +++ b/example/nce-loss/get_text8.sh @@ -0,0 +1,4 @@ +mkdir -p ./data/ +cd ./data/ +wget http://mattmahoney.net/dc/text8.zip +unzip text8.zip diff --git a/example/nce-loss/lstm_word.py b/example/nce-loss/lstm_word.py new file mode 100644 index 000000000000..6b4116c84ce8 --- /dev/null +++ b/example/nce-loss/lstm_word.py @@ -0,0 +1,222 @@ +# pylint:skip-file +import sys, random, time, math +sys.path.insert(0, "../../python") +import mxnet as mx +import numpy as np +from collections import namedtuple +from nce import * +from operator import itemgetter +from optparse import OptionParser + +LSTMState = namedtuple("LSTMState", ["c", "h"]) +LSTMParam = namedtuple("LSTMParam", ["i2h_weight", "i2h_bias", + "h2h_weight", "h2h_bias"]) +LSTMModel = namedtuple("LSTMModel", ["rnn_exec", "symbol", + "init_states", "last_states", + "seq_data", "seq_labels", "seq_outputs", + "param_blocks"]) + +def lstm(num_hidden, indata, prev_state, param, seqidx, layeridx, dropout=0.): + """LSTM Cell symbol""" + if dropout > 0.: + indata = mx.sym.Dropout(data=indata, p=dropout) + i2h = mx.sym.FullyConnected(data=indata, + weight=param.i2h_weight, + bias=param.i2h_bias, + num_hidden=num_hidden * 4, + name="t%d_l%d_i2h" % (seqidx, layeridx)) + h2h = mx.sym.FullyConnected(data=prev_state.h, + weight=param.h2h_weight, + bias=param.h2h_bias, + num_hidden=num_hidden * 4, + name="t%d_l%d_h2h" % (seqidx, layeridx)) + gates = i2h + h2h + slice_gates = mx.sym.SliceChannel(gates, num_outputs=4, + name="t%d_l%d_slice" % (seqidx, layeridx)) + in_gate = mx.sym.Activation(slice_gates[0], act_type="sigmoid") + in_transform = mx.sym.Activation(slice_gates[1], act_type="tanh") + forget_gate = mx.sym.Activation(slice_gates[2], act_type="sigmoid") + out_gate = mx.sym.Activation(slice_gates[3], act_type="sigmoid") + next_c = (forget_gate * prev_state.c) + (in_gate * in_transform) + next_h = out_gate * mx.sym.Activation(next_c, act_type="tanh") + return LSTMState(c=next_c, h=next_h) + + +def get_net(vocab_size, seq_len, num_label, num_lstm_layer, num_hidden): + param_cells = [] + last_states = [] + for i in range(num_lstm_layer): + param_cells.append(LSTMParam(i2h_weight=mx.sym.Variable("l%d_i2h_weight" % i), + i2h_bias=mx.sym.Variable("l%d_i2h_bias" % i), + h2h_weight=mx.sym.Variable("l%d_h2h_weight" % i), + h2h_bias=mx.sym.Variable("l%d_h2h_bias" % i))) + state = LSTMState(c=mx.sym.Variable("l%d_init_c" % i), + h=mx.sym.Variable("l%d_init_h" % i)) + last_states.append(state) + + data = mx.sym.Variable('data') + label = mx.sym.Variable('label') + label_weight = mx.sym.Variable('label_weight') + embed_weight = mx.sym.Variable('embed_weight') + label_embed_weight = mx.sym.Variable('label_embed_weight') + data_embed = mx.sym.Embedding(data = data, input_dim = vocab_size, + weight = embed_weight, + output_dim = 100, name = 'data_embed') + datavec = mx.sym.SliceChannel(data = data_embed, + num_outputs = seq_len, + squeeze_axis = True, name = 'data_slice') + labelvec = mx.sym.SliceChannel(data = label, + num_outputs = seq_len, + squeeze_axis = True, name = 'label_slice') + labelweightvec = mx.sym.SliceChannel(data = label_weight, + num_outputs = seq_len, + squeeze_axis = True, name = 'label_weight_slice') + probs = [] + for seqidx in range(seq_len): + hidden = datavec[seqidx] + + for i in range(num_lstm_layer): + next_state = lstm(num_hidden, indata = hidden, + prev_state = last_states[i], + param = param_cells[i], + seqidx = seqidx, layeridx = i) + hidden = next_state.h + last_states[i] = next_state + + probs.append(nce_loss(data = hidden, + label = labelvec[seqidx], + label_weight = labelweightvec[seqidx], + embed_weight = label_embed_weight, + vocab_size = vocab_size, + num_hidden = 100, + num_label = num_label)) + return mx.sym.Group(probs) + + +def load_data(name): + buf = open(name).read() + tks = buf.split(' ') + vocab = {} + freq = [0] + data = [] + for tk in tks: + if len(tk) == 0: + continue + if tk not in vocab: + vocab[tk] = len(vocab) + 1 + freq.append(0) + wid = vocab[tk] + data.append(wid) + freq[wid] += 1 + negative = [] + for i, v in enumerate(freq): + if i == 0 or v < 5: + continue + v = int(math.pow(v * 1.0, 0.75)) + negative += [i for _ in range(v)] + return data, negative, vocab, freq + +class SimpleBatch(object): + def __init__(self, data_names, data, label_names, label): + self.data = data + self.label = label + self.data_names = data_names + self.label_names = label_names + + @property + def provide_data(self): + return [(n, x.shape) for n, x in zip(self.data_names, self.data)] + + @property + def provide_label(self): + return [(n, x.shape) for n, x in zip(self.label_names, self.label)] + + +class DataIter(mx.io.DataIter): + def __init__(self, name, batch_size, seq_len, num_label, init_states): + super(DataIter, self).__init__() + self.batch_size = batch_size + self.data, self.negative, self.vocab, self.freq = load_data(name) + self.vocab_size = 1 + len(self.vocab) + print self.vocab_size + self.seq_len = seq_len + self.num_label = num_label + self.init_states = init_states + self.init_state_names = [x[0] for x in self.init_states] + self.init_state_arrays = [mx.nd.zeros(x[1]) for x in init_states] + self.provide_data = [('data', (batch_size, seq_len))] + init_states + self.provide_label = [('label', (self.batch_size, seq_len, num_label)), + ('label_weight', (self.batch_size, seq_len, num_label))] + + def sample_ne(self): + return self.negative[random.randint(0, len(self.negative) - 1)] + + def __iter__(self): + print 'begin' + batch_data = [] + batch_label = [] + batch_label_weight = [] + for i in range(0, len(self.data) - self.seq_len - 1, self.seq_len): + data = self.data[i: i+self.seq_len] + label = [[self.data[i+k+1]] \ + + [self.sample_ne() for _ in range(self.num_label-1)]\ + for k in range(self.seq_len)] + label_weight = [[1.0] \ + + [0.0 for _ in range(self.num_label-1)]\ + for k in range(self.seq_len)] + + batch_data.append(data) + batch_label.append(label) + batch_label_weight.append(label_weight) + if len(batch_data) == self.batch_size: + data_all = [mx.nd.array(batch_data)] + self.init_state_arrays + label_all = [mx.nd.array(batch_label), mx.nd.array(batch_label_weight)] + data_names = ['data'] + self.init_state_names + label_names = ['label', 'label_weight'] + batch_data = [] + batch_label = [] + batch_label_weight = [] + yield SimpleBatch(data_names, data_all, label_names, label_all) + + def reset(self): + pass + +if __name__ == '__main__': + parser = OptionParser() + parser.add_option("-g", "--gpu", action = "store_true", dest = "gpu", default = False, + help = "use gpu") + batch_size = 1024 + seq_len = 5 + num_label = 6 + num_lstm_layer = 2 + num_hidden = 100 + + init_c = [('l%d_init_c'%l, (batch_size, num_hidden)) for l in range(num_lstm_layer)] + init_h = [('l%d_init_h'%l, (batch_size, num_hidden)) for l in range(num_lstm_layer)] + init_states = init_c + init_h + + data_train = DataIter("./data/text8", batch_size, seq_len, num_label, + init_states) + + network = get_net(data_train.vocab_size, seq_len, num_label, num_lstm_layer, num_hidden) + options, args = parser.parse_args() + devs = mx.cpu() + if options.gpu == True: + devs = mx.gpu() + model = mx.model.FeedForward(ctx = devs, + symbol = network, + num_epoch = 20, + learning_rate = 0.3, + momentum = 0.9, + wd = 0.0000, + initializer=mx.init.Xavier(factor_type="in", magnitude=2.34)) + + import logging + head = '%(asctime)-15s %(message)s' + logging.basicConfig(level=logging.DEBUG, format=head) + + metric = NceLSTMAuc() + model.fit(X = data_train, + eval_metric = metric, + batch_end_callback = mx.callback.Speedometer(batch_size, 50),) + diff --git a/example/nce-loss/nce.py b/example/nce-loss/nce.py new file mode 100644 index 000000000000..5b278a1eac3e --- /dev/null +++ b/example/nce-loss/nce.py @@ -0,0 +1,91 @@ +# pylint:skip-file +import sys +sys.path.insert(0, "../../python") +import mxnet as mx +import numpy as np +from operator import itemgetter + +def nce_loss(data, label, label_weight, embed_weight, vocab_size, num_hidden, num_label): + label_embed = mx.sym.Embedding(data = label, input_dim = vocab_size, + weight = embed_weight, + output_dim = num_hidden, name = 'label_embed') + data = mx.sym.Reshape(data = data, shape = (-1, 1, num_hidden)) + pred = mx.sym.broadcast_mul(data, label_embed) + pred = mx.sym.sum(data = pred, axis = 2) + return mx.sym.LogisticRegressionOutput(data = pred, + label = label_weight) + + +class NceAccuracy(mx.metric.EvalMetric): + def __init__(self): + super(NceAccuracy, self).__init__('nce-accuracy') + + def update(self, labels, preds): + label_weight = labels[1].asnumpy() + preds = preds[0].asnumpy() + for i in range(preds.shape[0]): + if np.argmax(label_weight[i]) == np.argmax(preds[i]): + self.sum_metric += 1 + self.num_inst += 1 + +class NceAuc(mx.metric.EvalMetric): + def __init__(self): + super(NceAuc, self).__init__('nce-auc') + + def update(self, labels, preds): + label_weight = labels[1].asnumpy() + preds = preds[0].asnumpy() + tmp = [] + for i in range(preds.shape[0]): + for j in range(preds.shape[1]): + tmp.append((label_weight[i][j], preds[i][j])) + tmp = sorted(tmp, key = itemgetter(1), reverse = True) + m = 0.0 + n = 0.0 + z = 0.0 + k = 0 + for a, b in tmp: + if a > 0.5: + m += 1.0 + z += len(tmp) - k + else: + n += 1.0 + k += 1 + z -= m * (m + 1.0) / 2.0 + z /= m + z /= n + self.sum_metric += z + self.num_inst += 1 + +class NceLSTMAuc(mx.metric.EvalMetric): + def __init__(self): + super(NceLSTMAuc, self).__init__('nce-lstm-auc') + + def update(self, labels, preds): + preds = np.array([x.asnumpy() for x in preds]) + preds = preds.reshape((preds.shape[0] * preds.shape[1], preds.shape[2])) + label_weight = labels[1].asnumpy() + label_weight = label_weight.transpose((1, 0, 2)) + label_weight = label_weight.reshape((preds.shape[0], preds.shape[1])) + + tmp = [] + for i in range(preds.shape[0]): + for j in range(preds.shape[1]): + tmp.append((label_weight[i][j], preds[i][j])) + tmp = sorted(tmp, key = itemgetter(1), reverse = True) + m = 0.0 + n = 0.0 + z = 0.0 + k = 0 + for a, b in tmp: + if a > 0.5: + m += 1.0 + z += len(tmp) - k + else: + n += 1.0 + k += 1 + z -= m * (m + 1.0) / 2.0 + z /= m + z /= n + self.sum_metric += z + self.num_inst += 1 diff --git a/example/nce-loss/toy_nce.py b/example/nce-loss/toy_nce.py new file mode 100644 index 000000000000..89a5aaf9c35f --- /dev/null +++ b/example/nce-loss/toy_nce.py @@ -0,0 +1,111 @@ +# pylint:skip-file +import sys, random, time +sys.path.insert(0, "../../python") +import mxnet as mx +import numpy as np +from collections import namedtuple +from nce import * + +def get_net(vocab_size, num_label): + data = mx.sym.Variable('data') + label = mx.sym.Variable('label') + label_weight = mx.sym.Variable('label_weight') + embed_weight = mx.sym.Variable('embed_weight') + pred = mx.sym.FullyConnected(data = data, num_hidden = 100) + ret = nce_loss(data = pred, + label = label, + label_weight = label_weight, + embed_weight = embed_weight, + vocab_size = vocab_size, + num_hidden = 100, + num_label = num_label) + return ret + +class SimpleBatch(object): + def __init__(self, data_names, data, label_names, label): + self.data = data + self.label = label + self.data_names = data_names + self.label_names = label_names + + @property + def provide_data(self): + return [(n, x.shape) for n, x in zip(self.data_names, self.data)] + + @property + def provide_label(self): + return [(n, x.shape) for n, x in zip(self.label_names, self.label)] + + +class DataIter(mx.io.DataIter): + def __init__(self, count, batch_size, vocab_size, num_label, feature_size): + super(DataIter, self).__init__() + self.batch_size = batch_size + self.count = count + self.vocab_size = vocab_size + self.num_label = num_label + self.feature_size = feature_size + self.provide_data = [('data', (batch_size, feature_size))] + self.provide_label = [('label', (self.batch_size, num_label)), + ('label_weight', (self.batch_size, num_label))] + + def mock_sample(self): + ret = np.zeros(self.feature_size) + rn = set() + while len(rn) < 3: + rn.add(random.randint(0, self.feature_size - 1)) + s = 0 + for k in rn: + ret[k] = 1.0 + s *= self.feature_size + s += k + la = [s % self.vocab_size] +\ + [random.randint(0, self.vocab_size - 1) for _ in range(self.num_label - 1)] + return ret, la + + def __iter__(self): + for _ in range(self.count / self.batch_size): + data = [] + label = [] + label_weight = [] + for i in range(self.batch_size): + d, l = self.mock_sample() + data.append(d) + label.append(l) + label_weight.append([1.0] + [0.0 for _ in range(self.num_label - 1)]) + data_all = [mx.nd.array(data)] + label_all = [mx.nd.array(label), mx.nd.array(label_weight)] + data_names = ['data'] + label_names = ['label', 'label_weight'] + yield SimpleBatch(data_names, data_all, label_names, label_all) + + def reset(self): + pass + +if __name__ == '__main__': + batch_size = 128 + vocab_size = 10000 + feature_size = 100 + num_label = 6 + + data_train = DataIter(100000, batch_size, vocab_size, num_label, feature_size) + data_test = DataIter(1000, batch_size, vocab_size, num_label, feature_size) + + network = get_net(vocab_size, num_label) + devs = [mx.cpu()] + model = mx.model.FeedForward(ctx = devs, + symbol = network, + num_epoch = 20, + learning_rate = 0.03, + momentum = 0.9, + wd = 0.00001, + initializer=mx.init.Xavier(factor_type="in", magnitude=2.34)) + import logging + head = '%(asctime)-15s %(message)s' + logging.basicConfig(level=logging.DEBUG, format=head) + + metric = NceAccuracy() + model.fit(X = data_train, eval_data = data_test, + eval_metric = metric, + batch_end_callback = mx.callback.Speedometer(batch_size, 50),) + diff --git a/example/nce-loss/toy_softmax.py b/example/nce-loss/toy_softmax.py new file mode 100644 index 000000000000..83d0a1e6a43c --- /dev/null +++ b/example/nce-loss/toy_softmax.py @@ -0,0 +1,98 @@ +# pylint:skip-file +import sys, random, time +sys.path.insert(0, "../../python") +import mxnet as mx +import numpy as np +from collections import namedtuple + +ToyModel = namedtuple("ToyModel", ["ex", "symbol", "param_blocks"]) + +def get_net(vocab_size): + data = mx.sym.Variable('data') + label = mx.sym.Variable('label') + pred = mx.sym.FullyConnected(data = data, num_hidden = 100) + pred = mx.sym.FullyConnected(data = pred, num_hidden = vocab_size) + sm = mx.sym.SoftmaxOutput(data = pred, label = label) + return sm + +class SimpleBatch(object): + def __init__(self, data_names, data, label_names, label): + self.data = data + self.label = label + self.data_names = data_names + self.label_names = label_names + + @property + def provide_data(self): + return [(n, x.shape) for n, x in zip(self.data_names, self.data)] + + @property + def provide_label(self): + return [(n, x.shape) for n, x in zip(self.label_names, self.label)] + + +class DataIter(mx.io.DataIter): + def __init__(self, count, batch_size, vocab_size, num_label, feature_size): + super(DataIter, self).__init__() + self.batch_size = batch_size + self.count = count + self.vocab_size = vocab_size + self.num_label = num_label + self.feature_size = feature_size + self.provide_data = [('data', (batch_size, feature_size))] + self.provide_label = [('label', (self.batch_size,))] + + def mock_sample(self): + ret = np.zeros(self.feature_size) + rn = set() + while len(rn) < 3: + rn.add(random.randint(0, self.feature_size - 1)) + s = 0 + for k in rn: + ret[k] = 1.0 + s *= self.feature_size + s += k + return ret, s % self.vocab_size + + def __iter__(self): + for _ in range(self.count / self.batch_size): + data = [] + label = [] + for i in range(self.batch_size): + d, l = self.mock_sample() + data.append(d) + label.append(l) + data_all = [mx.nd.array(data)] + label_all = [mx.nd.array(label)] + data_names = ['data'] + label_names = ['label'] + yield SimpleBatch(data_names, data_all, label_names, label_all) + + def reset(self): + pass + +if __name__ == '__main__': + batch_size = 128 + vocab_size = 10000 + feature_size = 100 + num_label = 6 + + data_train = DataIter(100000, batch_size, vocab_size, num_label, feature_size) + data_test = DataIter(1000, batch_size, vocab_size, num_label, feature_size) + + network = get_net(vocab_size) + devs = mx.cpu() + model = mx.model.FeedForward(ctx = devs, + symbol = network, + num_epoch = 20, + learning_rate = 0.03, + momentum = 0.9, + wd = 0.0000, + initializer=mx.init.Xavier(factor_type="in", magnitude=2.34)) + import logging + head = '%(asctime)-15s %(message)s' + logging.basicConfig(level=logging.DEBUG, format=head) + + model.fit(X = data_train, eval_data = data_test, + batch_end_callback = mx.callback.Speedometer(batch_size, 50),) + diff --git a/example/nce-loss/wordvec.py b/example/nce-loss/wordvec.py new file mode 100644 index 000000000000..02e986e76813 --- /dev/null +++ b/example/nce-loss/wordvec.py @@ -0,0 +1,149 @@ +# pylint:skip-file +import sys, random, time, math +sys.path.insert(0, "../../python") +import mxnet as mx +import numpy as np +from collections import namedtuple +from nce import * +from operator import itemgetter +from optparse import OptionParser + +def get_net(vocab_size, num_input, num_label): + data = mx.sym.Variable('data') + label = mx.sym.Variable('label') + label_weight = mx.sym.Variable('label_weight') + embed_weight = mx.sym.Variable('embed_weight') + data_embed = mx.sym.Embedding(data = data, input_dim = vocab_size, + weight = embed_weight, + output_dim = 100, name = 'data_embed') + datavec = mx.sym.SliceChannel(data = data_embed, + num_outputs = num_input, + squeeze_axis = 1, name = 'data_slice') + pred = datavec[0] + for i in range(1, num_input): + pred = pred + datavec[i] + return nce_loss(data = pred, + label = label, + label_weight = label_weight, + embed_weight = embed_weight, + vocab_size = vocab_size, + num_hidden = 100, + num_label = num_label) + +def load_data(name): + buf = open(name).read() + tks = buf.split(' ') + vocab = {} + freq = [0] + data = [] + for tk in tks: + if len(tk) == 0: + continue + if tk not in vocab: + vocab[tk] = len(vocab) + 1 + freq.append(0) + wid = vocab[tk] + data.append(wid) + freq[wid] += 1 + negative = [] + for i, v in enumerate(freq): + if i == 0 or v < 5: + continue + v = int(math.pow(v * 1.0, 0.75)) + negative += [i for _ in range(v)] + return data, negative, vocab, freq + +class SimpleBatch(object): + def __init__(self, data_names, data, label_names, label): + self.data = data + self.label = label + self.data_names = data_names + self.label_names = label_names + + @property + def provide_data(self): + return [(n, x.shape) for n, x in zip(self.data_names, self.data)] + + @property + def provide_label(self): + return [(n, x.shape) for n, x in zip(self.label_names, self.label)] + + +class DataIter(mx.io.DataIter): + def __init__(self, name, batch_size, num_label): + super(DataIter, self).__init__() + self.batch_size = batch_size + self.data, self.negative, self.vocab, self.freq = load_data(name) + self.vocab_size = 1 + len(self.vocab) + print self.vocab_size + self.num_label = num_label + self.provide_data = [('data', (batch_size, num_label - 1))] + self.provide_label = [('label', (self.batch_size, num_label)), + ('label_weight', (self.batch_size, num_label))] + + def sample_ne(self): + return self.negative[random.randint(0, len(self.negative) - 1)] + + def __iter__(self): + print 'begin' + batch_data = [] + batch_label = [] + batch_label_weight = [] + start = random.randint(0, self.num_label - 1) + for i in range(start, len(self.data) - self.num_label - start, self.num_label): + context = self.data[i: i + self.num_label / 2] \ + + self.data[i + 1 + self.num_label / 2: i + self.num_label] + target_word = self.data[i + self.num_label / 2] + if self.freq[target_word] < 5: + continue + target = [target_word] \ + + [self.sample_ne() for _ in range(self.num_label - 1)] + target_weight = [1.0] + [0.0 for _ in range(self.num_label - 1)] + batch_data.append(context) + batch_label.append(target) + batch_label_weight.append(target_weight) + if len(batch_data) == self.batch_size: + data_all = [mx.nd.array(batch_data)] + label_all = [mx.nd.array(batch_label), mx.nd.array(batch_label_weight)] + data_names = ['data'] + label_names = ['label', 'label_weight'] + batch_data = [] + batch_label = [] + batch_label_weight = [] + yield SimpleBatch(data_names, data_all, label_names, label_all) + + def reset(self): + pass + +if __name__ == '__main__': + parser = OptionParser() + parser.add_option("-g", "--gpu", action = "store_true", dest = "gpu", default = False, + help = "use gpu") + batch_size = 256 + num_label = 5 + + data_train = DataIter("./data/text8", batch_size, num_label) + + network = get_net(data_train.vocab_size, num_label - 1, num_label) + + options, args = parser.parse_args() + devs = mx.cpu() + if options.gpu == True: + devs = mx.gpu() + model = mx.model.FeedForward(ctx = devs, + symbol = network, + num_epoch = 20, + learning_rate = 0.3, + momentum = 0.9, + wd = 0.0000, + initializer=mx.init.Xavier(factor_type="in", magnitude=2.34)) + + import logging + head = '%(asctime)-15s %(message)s' + logging.basicConfig(level=logging.DEBUG, format=head) + + metric = NceAuc() + model.fit(X = data_train, + eval_metric = metric, + batch_end_callback = mx.callback.Speedometer(batch_size, 50),) + diff --git a/ps-lite b/ps-lite index 35ddccd4cd03..36b015ffd51c 160000 --- a/ps-lite +++ b/ps-lite @@ -1 +1 @@ -Subproject commit 35ddccd4cd0302f78ed2a05f1258860d4666e43c +Subproject commit 36b015ffd51c0f7062bba845f01164c0433dc6b3