From 8fe9e1ca15a3d38933208b6227443113aebc59dd Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Tue, 2 Aug 2016 12:10:48 +0200
Subject: [PATCH 001/128] Implemented step logistic regression.
---
.../widgets/utils/logistic_regression.py | 73 +++++++++++++++++++
1 file changed, 73 insertions(+)
create mode 100644 orangecontrib/educational/widgets/utils/logistic_regression.py
diff --git a/orangecontrib/educational/widgets/utils/logistic_regression.py b/orangecontrib/educational/widgets/utils/logistic_regression.py
new file mode 100644
index 00000000..75a49e27
--- /dev/null
+++ b/orangecontrib/educational/widgets/utils/logistic_regression.py
@@ -0,0 +1,73 @@
+import numpy as np
+
+from Orange.classification import Learner, Model
+
+
+class LogisticRegression:
+
+ x = None
+ y = None
+ theta = None
+ domain = None
+
+ def __init__(self, alpha, theta=None, data=None):
+ self.alpha = alpha
+ self.set_data(data)
+ self.set_theta(theta)
+
+ def set_data(self, data):
+ if data is not None:
+ self.x = data.X
+ self.y = data.Y
+ self.domain = data.domain
+
+ def set_theta(self, theta):
+ self.theta = theta
+
+ @property
+ def model(self):
+ return LogisticRegressionModel(self.theta, self.domain)
+
+ def step(self):
+ grad = self.dj(self.theta)
+ self.theta -= self.alpha * grad
+
+ def j(self, theta):
+ """
+ Cost function for logistic regression
+ """
+ yh = self.g(self.x.dot(theta))
+ return -sum(np.log(self.y * yh + (1 - self.y) * (1 - yh)))
+
+ def dj(self, theta):
+ """
+ Gradient of the cost function with L2 regularization
+ """
+ return (self.g(self.x.dot(theta)) - self.y).dot(self.x)
+
+ @staticmethod
+ def g(z):
+ """
+ sigmoid function
+
+ Parameters
+ ----------
+ z : array_like
+ values to evaluate with function
+ """
+
+ # limit values in z to avoid log with 0 produced by values almost 0
+ z_mod = np.minimum(z, 100 * np.ones(len(z)))
+ z_mod = np.maximum(z_mod, -100 * np.ones(len(z)))
+
+ return 1.0 / (1 + np.exp(- z_mod))
+
+
+class LogisticRegressionModel(Model):
+
+ def __init__(self, theta, domain):
+ super().__init__(domain)
+ self.theta = theta
+
+ def predict_storage(self, data):
+ return LogisticRegression.g(data.X.dot(self.theta))
From ecc4aa4e9b7be3024e95720cb449ca4bb375994a Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Tue, 2 Aug 2016 15:19:35 +0200
Subject: [PATCH 002/128] Partly implemented gradient desecent
---
.../educational/widgets/owgradientdescent.py | 383 ++++++++++++++++++
.../widgets/utils/logistic_regression.py | 22 +-
2 files changed, 401 insertions(+), 4 deletions(-)
create mode 100644 orangecontrib/educational/widgets/owgradientdescent.py
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
new file mode 100644
index 00000000..f00da059
--- /dev/null
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -0,0 +1,383 @@
+from math import isnan
+from os import path
+
+import numpy as np
+from Orange.widgets.utils import itemmodels
+from PyQt4.QtCore import pyqtSlot, Qt
+from PyQt4.QtGui import QSizePolicy, QPixmap, QColor, QIcon
+
+from Orange.classification import Model
+from Orange.data import Table, ContinuousVariable, Domain, DiscreteVariable
+from Orange.widgets import gui
+from Orange.widgets import highcharts
+from Orange.widgets import settings
+from Orange.widgets.widget import OWWidget, Msg
+from scipy.ndimage import gaussian_filter
+
+from orangecontrib.educational.widgets.utils.logistic_regression \
+ import LogisticRegression
+
+
+class Scatterplot(highcharts.Highchart):
+ """
+ Scatterplot extends Highchart and just defines some sane defaults:
+ * enables scroll-wheel zooming,
+ * set callback functions for click (in empty chart), drag and drop
+ * enables moving of centroids points
+ * include drag_drop_js script by highchart
+ """
+
+ js_click_function = """/**/(function(event) {
+ window.pybridge.chart_clicked(event.xAxis[0].value, event.yAxis[0].value);
+ })
+ """
+
+ # to make unit tesest
+ count_replots = 0
+
+ def __init__(self, click_callback, **kwargs):
+
+ # read javascript for drag and drop
+ with open(path.join(path.dirname(__file__), 'resources', 'highcharts-contour.js'), 'r') as f:
+ contours_js = f.read()
+
+ super().__init__(enable_zoom=True,
+ bridge=self,
+ enable_select='',
+ chart_events_click=self.js_click_function,
+ plotOptions_series_states_hover_enabled=False,
+ plotOptions_series_cursor="move",
+ javascript=contours_js,
+ **kwargs)
+
+ self.click_callback = click_callback
+
+ def chart(self, *args, **kwargs):
+ self.count_replots += 1
+ super(Scatterplot, self).chart(*args, **kwargs)
+
+ @pyqtSlot(float, float)
+ def chart_clicked(self, x, y):
+ self.click_callback(x, y)
+
+
+class OWGradientDescent(OWWidget):
+
+ name = "Gradient Descent"
+ description = "Widget demonstrates shows the procedure of gradient descent."
+ icon = "icons/InteractiveKMeans.svg"
+ want_main_area = True
+
+ inputs = [("Data", Table, "set_data")]
+ outputs = [("Model", Model),
+ ("Coefficients", Table)]
+
+ # selected attributes in chart
+ attr_x = settings.Setting('')
+ attr_y = settings.Setting('')
+ target_class = settings.Setting('')
+
+ # models
+ x_var_model = None
+ y_var_model = None
+
+ # function used in gradient descent
+ default_learner = LogisticRegression
+ learner = None
+ cost_grid = None
+ grid_size = 20
+
+ # data
+ data = None
+ selected_data = None
+
+ class Warning(OWWidget.Warning):
+ to_few_features = Msg("Too few Continuous feature. Min 2 required")
+ no_class = Msg("No class provided or only one class variable")
+
+ def __init__(self):
+ super().__init__()
+
+ # var models
+ self.x_var_model = itemmodels.VariableListModel()
+ self.y_var_model = itemmodels.VariableListModel()
+
+ # options box
+ policy = QSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Fixed)
+
+ self.options_box = gui.widgetBox(self.controlArea)
+ opts = dict(
+ widget=self.options_box, master=self, orientation=Qt.Horizontal,
+ callback=self.restart, sendSelectedValue=True
+ )
+ self.cbx = gui.comboBox(value='attr_x', label='X:', **opts)
+ self.cbx.setSizePolicy(policy)
+ self.cby = gui.comboBox(value='attr_y', label='Y:', **opts)
+ self.cby.setSizePolicy(policy)
+ self.target_class_combobox = gui.comboBox(
+ value='target_class', label='Target class: ', **opts)
+ self.target_class_combobox.setSizePolicy(policy)
+
+ self.cbx.setModel(self.x_var_model)
+ self.cby.setModel(self.y_var_model)
+
+ # graph in mainArea
+ self.scatter = Scatterplot(click_callback=self.graph_clicked,
+ xAxis_gridLineWidth=0,
+ yAxis_gridLineWidth=0,
+ title_text='',
+ tooltip_shared=False,
+ debug=True)
+
+ gui.rubber(self.controlArea)
+
+ # TODO: set false when end of development
+ # Just render an empty chart so it shows a nice 'No data to display'
+ self.scatter.chart()
+ self.mainArea.layout().addWidget(self.scatter)
+
+ # set random learner
+
+ def set_data(self, data):
+ """
+ Function receives data from input and init part of widget if data
+ satisfy. Otherwise set empty plot and notice
+ user about that
+
+ Parameters
+ ----------
+ data : Table
+ Input data
+ """
+
+ def reset_combos():
+ self.x_var_model[:] = []
+ self.y_var_model[:] = []
+ self.target_class_combobox.clear()
+
+ def init_combos():
+ """
+ function initialize the combos with attributes
+ """
+ reset_combos()
+
+ c_vars = [var for var in data.domain.variables if var.is_continuous]
+
+ self.x_var_model[:] = c_vars
+ self.y_var_model[:] = c_vars
+
+ for i, var in enumerate(data.domain.class_var.values):
+ pix_map = QPixmap(60, 60)
+ color = tuple(data.domain.class_var.colors[i].tolist())
+ pix_map.fill(QColor(*color))
+ self.target_class_combobox.addItem(QIcon(pix_map), var)
+
+ self.Warning.clear()
+
+ # clear variables
+ self.xv = None
+ self.yv = None
+ self.cost_grid = None
+
+ if data is None or len(data) == 0:
+ self.data = None
+ reset_combos()
+ self.set_empty_plot()
+ elif sum(True for var in data.domain.attributes
+ if isinstance(var, ContinuousVariable)) < 2:
+ self.data = None
+ reset_combos()
+ self.Warning.to_few_features()
+ self.set_empty_plot()
+ elif (data.domain.class_var is None or
+ len(data.domain.class_var.values) < 2):
+ self.data = None
+ reset_combos()
+ self.Warning.no_class()
+ self.set_empty_plot()
+ else:
+ self.data = data
+ init_combos()
+ self.attr_x = self.cbx.itemText(0)
+ self.attr_y = self.cbx.itemText(1)
+ self.target_class = self.target_class_combobox.itemText(0)
+ self.restart()
+
+ def restart(self):
+ self.selected_data = self.select_data()
+ self.learner = self.default_learner(data=self.selected_data)
+ self.replot()
+
+ def replot(self):
+ """
+ This function performs complete replot of the graph
+ """
+ if self.data is None:
+ return
+
+ attr_x = self.data.domain[self.attr_x]
+ attr_y = self.data.domain[self.attr_y]
+
+ optimal_theta = self.learner.optimized()
+ min_x = optimal_theta[0] - 3
+ max_x = optimal_theta[0] + 3
+ min_y = optimal_theta[1] - 3
+ max_y = optimal_theta[1] + 3
+
+ options = dict(series=[])
+
+ # gradient and contour
+ options['series'] += self.plot_gradient_and_contour(
+ min_x, max_x, min_y, max_y)
+
+ data = options['series'][0]['data']
+ data = [d[2] for d in data]
+ min_value = np.min(data)
+ max_value = np.max(data)
+
+ # highcharts parameters
+ kwargs = dict(
+ xAxis_title_text=attr_x.name,
+ yAxis_title_text=attr_y.name,
+ xAxis_min=min_x,
+ xAxis_max=max_x,
+ yAxis_min=min_y,
+ yAxis_max=max_y,
+ colorAxis=dict(
+ stops=[
+ [min_value, "#ffffff"],
+ [max_value, "#ff0000"]],
+ tickInterval=1, max=max_value, min=min_value),
+ plotOptions_contour_colsize=(max_y - min_y) / 1000,
+ plotOptions_contour_rowsize=(max_x - min_x) / 1000,
+ tooltip_headerFormat="",
+ tooltip_pointFormat="%s: {point.x:.2f}
"
+ "%s: {point.y:.2f}" %
+ (self.attr_x, self.attr_y))
+
+ self.scatter.chart(options, **kwargs)
+ # hack to destroy the legend for coloraxis
+
+ def plot_gradient_and_contour(self, x_from, x_to, y_from, y_to):
+ """
+ Function constructs series for gradient and contour
+ Parameters
+ ----------
+ x_from : float
+ Min grid x value
+ x_to : float
+ Max grid x value
+ y_from : float
+ Min grid y value
+ y_to : float
+ Max grid y value
+ Returns
+ -------
+ list
+ List containing series with background gradient and contour
+ """
+
+ # grid for gradient
+ x = np.linspace(x_from, x_to, self.grid_size)
+ y = np.linspace(y_from, y_to, self.grid_size)
+ self.xv, self.yv = np.meshgrid(x, y)
+ thetas = np.column_stack((self.xv.flatten(), self.yv.flatten()))
+
+ cost_values = np.vstack([self.learner.j(theta) for theta in thetas])
+
+ # results
+ self.cost_grid = cost_values.reshape(self.xv.shape)
+
+ blurred = self.blur_grid(self.cost_grid)
+
+ return self.plot_gradient(self.xv, self.yv, blurred)
+
+ def plot_gradient(self, x, y, grid):
+ """
+ Function constructs background gradient
+ """
+ return [dict(data=[[x[j, k], y[j, k], grid[j, k]] for j in range(len(x))
+ for k in range(y.shape[1])],
+ grid_width=self.grid_size,
+ type="contour")]
+
+ def select_data(self):
+ """
+ Function takes two selected columns from data table and merge them
+ in new Orange.data.Table
+ Returns
+ -------
+ Table
+ Table with selected columns
+ """
+ attr_x = self.data.domain[self.attr_x]
+ attr_y = self.data.domain[self.attr_y]
+ cols = []
+ for attr in (attr_x, attr_y):
+ subset = self.data[:, attr]
+ cols.append(subset.X)
+ x = np.column_stack(cols)
+ domain = Domain(
+ [attr_x, attr_y],
+ [DiscreteVariable(name=self.data.domain.class_var.name,
+ values=[self.target_class, 'Others'])],
+ [self.data.domain.class_var])
+ y = [(0 if d.get_class().value == self.target_class else 1)
+ for d in self.data]
+
+ return Table(domain, x, y, self.data.Y[:, None])
+
+ # def plot_contour(self):
+ # """
+ # Function constructs contour lines
+ # """
+ # self.scatter.remove_contours()
+ # if self.contours_enabled:
+ # contour = Contour(
+ # self.xv, self.yv, self.blur_grid(self.probabilities_grid))
+ # contour_lines = contour.contours(
+ # np.hstack(
+ # (np.arange(0.5, 0, - self.contour_step)[::-1],
+ # np.arange(0.5 + self.contour_step, 1, self.contour_step))))
+ # # we want to have contour for 0.5
+ #
+ # series = []
+ # count = 0
+ # for key, value in contour_lines.items():
+ # for line in value:
+ # if len(line) > self.degree:
+ # # if less than degree interpolation fails
+ # tck, u = splprep(
+ # [list(x) for x in zip(*reversed(line))],
+ # s=0.001, k=self.degree,
+ # per=(len(line)
+ # if np.allclose(line[0], line[-1])
+ # else 0))
+ # new_int = np.arange(0, 1.01, 0.01)
+ # interpol_line = np.array(splev(new_int, tck)).T.tolist()
+ # else:
+ # interpol_line = line
+ #
+ # series.append(dict(data=self.labeled(interpol_line, count),
+ # color=self.contour_color,
+ # type="spline",
+ # lineWidth=0.5,
+ # showInLegend=False,
+ # marker=dict(enabled=False),
+ # name="%g" % round(key, 2),
+ # enableMouseTracking=False
+ # ))
+ # count += 1
+ # self.scatter.add_series(series)
+ # self.scatter.redraw_series()
+
+ @staticmethod
+ def blur_grid(grid):
+ filtered = gaussian_filter(grid, sigma=1)
+ filtered[(grid > 0.45) & (grid < 0.55)] = grid[(grid > 0.45) &
+ (grid < 0.55)]
+ return filtered
+
+ def graph_clicked(self, x, y):
+ pass
+
diff --git a/orangecontrib/educational/widgets/utils/logistic_regression.py b/orangecontrib/educational/widgets/utils/logistic_regression.py
index 75a49e27..ad24474f 100644
--- a/orangecontrib/educational/widgets/utils/logistic_regression.py
+++ b/orangecontrib/educational/widgets/utils/logistic_regression.py
@@ -1,6 +1,7 @@
import numpy as np
-from Orange.classification import Learner, Model
+from Orange.classification import Model
+from scipy.optimize import fmin_l_bfgs_b
class LogisticRegression:
@@ -10,8 +11,8 @@ class LogisticRegression:
theta = None
domain = None
- def __init__(self, alpha, theta=None, data=None):
- self.alpha = alpha
+ def __init__(self, alpha=0.1, theta=None, data=None):
+ self.set_alpha(alpha)
self.set_data(data)
self.set_theta(theta)
@@ -24,6 +25,9 @@ def set_data(self, data):
def set_theta(self, theta):
self.theta = theta
+ def set_alpha(self, alpha):
+ self.alpha = alpha
+
@property
def model(self):
return LogisticRegressionModel(self.theta, self.domain)
@@ -36,8 +40,9 @@ def j(self, theta):
"""
Cost function for logistic regression
"""
+ # TODO: modify for more thetas
yh = self.g(self.x.dot(theta))
- return -sum(np.log(self.y * yh + (1 - self.y) * (1 - yh)))
+ return -sum(np.log(self.y * yh + (1 - self.y) * (1 - yh))) / len(yh)
def dj(self, theta):
"""
@@ -45,6 +50,15 @@ def dj(self, theta):
"""
return (self.g(self.x.dot(theta)) - self.y).dot(self.x)
+ def optimized(self):
+ """
+ Function performs model training
+ """
+ res = fmin_l_bfgs_b(self.j,
+ np.zeros(self.x.shape[1]),
+ self.dj)
+ return res[0]
+
@staticmethod
def g(z):
"""
From a73ac6793420de5ed9fa5ce3aab3b1b29d73b327 Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Tue, 2 Aug 2016 15:20:50 +0200
Subject: [PATCH 003/128] Added highcahrts-contour
---
.../widgets/resources/highcharts-contour.js | 538 ++++++++++++++++++
1 file changed, 538 insertions(+)
create mode 100644 orangecontrib/educational/widgets/resources/highcharts-contour.js
diff --git a/orangecontrib/educational/widgets/resources/highcharts-contour.js b/orangecontrib/educational/widgets/resources/highcharts-contour.js
new file mode 100644
index 00000000..e1fdd64e
--- /dev/null
+++ b/orangecontrib/educational/widgets/resources/highcharts-contour.js
@@ -0,0 +1,538 @@
+/**
+* Highcharts plugin for contour curves
+*
+* Author: Paulo Costa
+*/
+
+(function (Highcharts) {
+
+"use strict";
+
+
+var defaultOptions = Highcharts.getOptions(),
+ each = Highcharts.each,
+ extendClass = Highcharts.extendClass,
+ merge = Highcharts.merge,
+ seriesTypes = Highcharts.seriesTypes,
+ wrap = Highcharts.wrap,
+ perspective = Highcharts.perspective,
+ eps = 0.0001,
+ SVG_NS = "http://www.w3.org/2000/svg",
+ XLINK_NS = "http://www.w3.org/1999/xlink",
+ gradient_id = 0;
+
+/**
+* Extend the default options with map options
+*/
+
+
+defaultOptions.plotOptions.contour = merge(defaultOptions.plotOptions.heatmap, {
+ marker: defaultOptions.plotOptions.scatter.marker,
+ turboThreshold:0
+});
+
+/**
+* Normalize a value into 0-1 range
+*/
+Highcharts.ColorAxis.prototype.toRelativePosition = function(value) {
+ if (this.isLog) {
+ value = this.val2lin(value);
+ }
+ return (value - this.min) / ((this.max - this.min) || 1);
+};
+
+wrap(Highcharts.ColorAxis.prototype, 'setAxisSize', function (proceed) {
+ if (this.legendSymbol) {
+ proceed.call(this);
+ } else {
+ this.len = 100;
+ this.pos = 0;
+ }
+});
+
+Highcharts.Axis.prototype.drawCrosshair = function() {};
+
+// The Heatmap series type
+seriesTypes.contour = extendClass(seriesTypes.heatmap, {
+ type: 'contour',
+ hasPointSpecificOptions: true,
+ getSymbol: seriesTypes.scatter.prototype.getSymbol,
+ drawPoints: Highcharts.Series.prototype.drawPoints,
+
+ init: function (chart) {
+ this.is3d = chart.is3d && chart.is3d();
+ seriesTypes.scatter.prototype.init.apply(this, arguments);
+ },
+
+ bindAxes: function () {
+ if (this.is3d) {
+ this.axisTypes = ['xAxis', 'yAxis', 'zAxis', 'colorAxis'];
+ this.parallelArrays = ['x', 'y', 'z', 'value'];
+ } else {
+ this.axisTypes = ['xAxis', 'yAxis', 'colorAxis'];
+ this.parallelArrays = ['x', 'y', 'value'];
+ }
+ seriesTypes.scatter.prototype.bindAxes.apply(this, arguments);
+ },
+
+ //FIXME: Once https://github.com/highcharts/highcharts/pull/5497 has landed, this whole method can go away
+ translate: function () {
+ if (!this.is3d) {
+ seriesTypes.scatter.prototype.translate.apply(this, arguments);
+ return;
+ }
+ this.chart.options.chart.options3d.enabled = false;
+ seriesTypes.scatter.prototype.translate.apply(this, arguments);
+ this.chart.options.chart.options3d.enabled = true;
+
+ var series = this,
+ chart = series.chart,
+ zAxis = series.zAxis;
+
+ Highcharts.each(series.data, function (point) {
+ var p3d = {
+ x: point.plotX,
+ y: point.plotY,
+ z: zAxis.translate(zAxis.isLog && zAxis.val2lin ? zAxis.val2lin(point.z) : point.z)
+ };
+ point.plotXold = p3d.x;
+ point.plotYold = p3d.y;
+ point.plotZold = p3d.z;
+
+ p3d = perspective([p3d], chart, true)[0];
+ point.plotX = p3d.x;
+ point.plotY = p3d.y;
+ point.plotZ = p3d.z;
+ });
+ },
+
+ drawTriangle: function (triangle_data, points, edgeCount, show_edges, contours) {
+ var fill;
+ var chart = this.chart;
+ var renderer = this.chart.renderer;
+ var a = points[triangle_data.a];
+ var b = points[triangle_data.b];
+ var c = points[triangle_data.c];
+ var abc = [a,b,c];
+
+ //Normalized values of the vertexes
+ var values = [
+ this.colorAxis.toRelativePosition(a.value),
+ this.colorAxis.toRelativePosition(b.value),
+ this.colorAxis.toRelativePosition(c.value)
+ ];
+
+ //All vertexes have the same value/color
+ if (Math.abs(values[0] - values[1]) < eps && Math.abs(values[0] - values[2]) < eps) {
+ fill = this.colorAxis.toColor((a.value + b.value + c.value) / 3);
+ //Use a linear gradient to interpolate values/colors
+ } else {
+ //Find function where "Value = A*X + B*Y + C" at the 3 vertexes
+ var m = new Matrix([
+ [a.plotX, a.plotY, 1, values[0]],
+ [b.plotX, b.plotY, 1, values[1]],
+ [c.plotX, c.plotY, 1, values[2]]]);
+ m.toReducedRowEchelonForm();
+ var A = m.mtx[0][3];
+ var B = m.mtx[1][3];
+ var C = m.mtx[2][3];
+
+ //For convenience, we place our gradient control points at (k*A, k*B)
+ //We can find the value of K as:
+ // Value = A*X + B*Y + C =
+ // Value = A*(A*k) + B*(B*k) + C
+ // Value = A²*k + B²*k + C
+ // Value = k*(A² + B²) + C
+ // k = (Value - C) / (A² + B²)
+ var k0 = (0-C) / (A*A + B*B);
+ var k1 = (1-C) / (A*A + B*B);
+ var x1 = k0*A;
+ var y1 = k0*B;
+ var x2 = k1*A;
+ var y2 = k1*B;
+
+ // Assign a linear gradient that interpolates all 3 vertexes
+ if (renderer.isSVG) {
+ //SVGRenderer implementation of gradient is slow and leaks memory -- Lets do it ourselves
+ var gradient = triangle_data.gradient;
+ if (!gradient) {
+ var gradient = triangle_data.gradient = document.createElementNS(SVG_NS, "linearGradient");
+ gradient.setAttribute("id", "contour-gradient-id-" + (gradient_id++));
+ renderer.defs.element.appendChild(gradient);
+ }
+ gradient.setAttributeNS(XLINK_NS, "xlink:href", this.base_gradient_id);
+ gradient.setAttribute("x1", x1);
+ gradient.setAttribute("y1", y1);
+ gradient.setAttribute("x2", x2);
+ gradient.setAttribute("y2", y2);
+ fill = 'url(' + renderer.url + '#' + gradient.getAttribute('id') + ')';
+ } else {
+ fill = {
+ linearGradient: {
+ x1: x1,
+ y1: y1,
+ x2: x2,
+ y2: y2,
+ spreadMethod: 'pad',
+ gradientUnits:'userSpaceOnUse'
+ },
+ stops: this.colorAxis.stops
+ };
+ }
+ }
+
+
+ var path = [
+ 'M',
+ a.plotX + ',' + a.plotY,
+ 'L',
+ b.plotX + ',' + b.plotY,
+ 'L',
+ c.plotX + ',' + c.plotY,
+ 'Z'
+ ];
+
+ if (triangle_data.shape) {
+ triangle_data.shape.attr({
+ d: path,
+ fill: fill,
+ });
+ } else {
+ triangle_data.shape = renderer.path(path)
+ .attr({
+ 'shape-rendering': 'crispEdges',
+ fill: fill
+ })
+ }
+ triangle_data.shape.add(this.surface_group);
+
+
+
+ // Draw edges around the triangle and/or on contour curves
+
+ var edge_path = [];
+ if (show_edges) {
+ var processEdge = function(a,b) {
+ if (!edgeCount[b + '-' + a]) {
+ if (edgeCount[a + '-' + b]-- == 1) {
+ edge_path.push(
+ 'M',
+ points[a].plotX + ',' + points[a].plotY,
+ 'L',
+ points[b].plotX + ',' + points[b].plotY);
+ }
+ }
+ }
+ processEdge(triangle_data.a,triangle_data.b);
+ processEdge(triangle_data.b,triangle_data.c);
+ processEdge(triangle_data.c,triangle_data.a);
+ }
+
+ for (var contour_index=0; contour_index= Math.min(v1, v2) && tickValue < Math.max(v1, v2)) {
+ var q = (tickValue-v1)/(v2-v1);
+ contourVertexes.push([
+ q*(x2-x1) + x1,
+ q*(y2-y1) + y1
+ ]);
+ }
+ }
+ if (contourVertexes.length == 2) {
+ edge_path.push(
+ 'M',
+ contourVertexes[0][0] + ',' + contourVertexes[0][1],
+ 'L',
+ contourVertexes[1][0] + ',' + contourVertexes[1][1]);
+ }
+ }
+ }
+
+ if (edge_path.length) {
+ if (triangle_data.edge) {
+ triangle_data.edge.attr({
+ d: edge_path,
+ });
+ } else {
+ triangle_data.edge = renderer.path(edge_path)
+ .attr({
+ 'stroke-linecap': 'round',
+ 'stroke': 'black',
+ 'stroke-width': 1,
+ })
+ }
+ triangle_data.edge.add(this.surface_group);
+ } else if (triangle_data.edge) {
+ triangle_data.edge.destroy();
+ delete triangle_data.edge;
+ }
+ },
+ drawGraph: function () {
+ var series = this,
+ i,j,
+ points = series.points,
+ options = this.options,
+ renderer = series.chart.renderer;
+
+ if (!series.surface_group) {
+ series.surface_group = renderer.g().add(series.group);
+ series.triangles = [];
+ }
+
+ //When creating a SVG, we create a "base" gradient with the right colors,
+ //And extend it on every triangle to define the orientation.
+ if (series.chart.renderer.isSVG && !this.base_gradient_id) {
+ var fake_rect = series.chart.renderer.rect(0,0,1,1).attr({
+ fill: {
+ linearGradient: {
+ x1: 0,
+ y1: 0,
+ x2: 1,
+ y2: 0,
+ spreadMethod: 'pad',
+ gradientUnits:'userSpaceOnUse'
+ },
+ stops: this.colorAxis.stops
+ }
+ });
+ this.base_gradient_id = /(#.*)[)]/.exec(fake_rect.attr('fill'))[1];
+ }
+
+ var group = series.surface_group;
+ var triangle_count = 0;
+
+ var egde_count = {};
+ var validatePoint = function(p) {
+ return p && (typeof p.x === "number") && (typeof p.y === "number") && (typeof p.z === "number" || !series.is3d) && (typeof p.value === "number");
+ };
+ var appendEdge = function(a,b) {
+ egde_count[a+'-'+b] = (egde_count[a+'-'+b] || 0) + 1;
+ };
+ var appendTriangle = function(a,b,c) {
+ if (validatePoint(points[a]) && validatePoint(points[b]) && validatePoint(points[c])) {
+ var triangle_data = series.triangles[triangle_count];
+ if (!triangle_data) {
+ triangle_data = series.triangles[triangle_count] = {};
+ }
+ triangle_count++;
+
+ //Make sure the shape is counter-clockwise
+ if (shapeArea([points[a], points[b], points[c]], 'plotX', 'plotY') > 0) {
+ var tmp = a;
+ a = b;
+ b = tmp;
+ }
+ triangle_data.a = a;
+ triangle_data.b = b;
+ triangle_data.c = c;
+
+ appendEdge(a,b);
+ appendEdge(b,c);
+ appendEdge(c,a);
+
+ triangle_data.z_order = [(points[a].plotZ + points[b].plotZ + points[c].plotZ)/3];
+ }
+ };
+
+
+ var triangles = [];
+ if (options.triangles) {
+ for (i=0; i bestRowVal) {
+ bestRow = row;
+ bestRowVal = Math.abs(this.mtx[row][col]);
+ }
+ }
+
+ //All zeros in this column :(
+ if (bestRow == null) {
+ for (var row=0; row
Date: Wed, 3 Aug 2016 11:57:43 +0200
Subject: [PATCH 004/128] Basic implementation of the gradient descent.
---
.../educational/widgets/owgradientdescent.py | 208 +++++++++-----
.../educational/widgets/utils/contour.py | 265 ++++++++++++++++++
.../widgets/utils/logistic_regression.py | 7 +-
3 files changed, 407 insertions(+), 73 deletions(-)
create mode 100644 orangecontrib/educational/widgets/utils/contour.py
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
index f00da059..7fd139c4 100644
--- a/orangecontrib/educational/widgets/owgradientdescent.py
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -5,6 +5,8 @@
from Orange.widgets.utils import itemmodels
from PyQt4.QtCore import pyqtSlot, Qt
from PyQt4.QtGui import QSizePolicy, QPixmap, QColor, QIcon
+from scipy.interpolate import splev, splprep
+from scipy.ndimage import gaussian_filter
from Orange.classification import Model
from Orange.data import Table, ContinuousVariable, Domain, DiscreteVariable
@@ -12,10 +14,11 @@
from Orange.widgets import highcharts
from Orange.widgets import settings
from Orange.widgets.widget import OWWidget, Msg
-from scipy.ndimage import gaussian_filter
+from Orange.preprocess.preprocess import Normalize
from orangecontrib.educational.widgets.utils.logistic_regression \
import LogisticRegression
+from orangecontrib.educational.widgets.utils.contour import Contour
class Scatterplot(highcharts.Highchart):
@@ -28,9 +31,9 @@ class Scatterplot(highcharts.Highchart):
"""
js_click_function = """/**/(function(event) {
- window.pybridge.chart_clicked(event.xAxis[0].value, event.yAxis[0].value);
- })
- """
+ window.pybridge.chart_clicked(event.xAxis[0].value, event.yAxis[0].value);
+ })
+ """
# to make unit tesest
count_replots = 0
@@ -46,7 +49,6 @@ def __init__(self, click_callback, **kwargs):
enable_select='',
chart_events_click=self.js_click_function,
plotOptions_series_states_hover_enabled=False,
- plotOptions_series_cursor="move",
javascript=contours_js,
**kwargs)
@@ -60,6 +62,24 @@ def chart(self, *args, **kwargs):
def chart_clicked(self, x, y):
self.click_callback(x, y)
+ def remove_series(self, id):
+ self.evalJS("""
+ series = chart.get('{id}');
+ if (series != null)
+ series.remove(true);
+ """.format(id=id))
+
+ def add_series(self, series):
+ for i, s in enumerate(series):
+ self.exposeObject('series%d' % i, series[i])
+ self.evalJS("chart.addSeries(series%d, true);" % i)
+
+ def add_point_to_series(self, id, x, y):
+ self.evalJS("""
+ series = chart.get('{id}');
+ series.addPoint([{x}, {y}]);
+ """.format(id=id, x=x, y=y))
+
class OWGradientDescent(OWWidget):
@@ -76,6 +96,7 @@ class OWGradientDescent(OWWidget):
attr_x = settings.Setting('')
attr_y = settings.Setting('')
target_class = settings.Setting('')
+ alpha = settings.Setting(0.1)
# models
x_var_model = None
@@ -85,7 +106,8 @@ class OWGradientDescent(OWWidget):
default_learner = LogisticRegression
learner = None
cost_grid = None
- grid_size = 20
+ grid_size = 15
+ contour_color = "#aaaaaa"
# data
data = None
@@ -121,8 +143,26 @@ def __init__(self):
self.cbx.setModel(self.x_var_model)
self.cby.setModel(self.y_var_model)
+ self.properties_box = gui.widgetBox(self.controlArea)
+ self.alpha_spin = gui.spin(widget=self.properties_box,
+ master=self,
+ callback=self.change_alpha,
+ value="alpha",
+ label="Alpha: ",
+ minv=0.01,
+ maxv=1,
+ step=0.01,
+ spinType=float)
+
+ self.comand_box = gui.widgetBox(self.controlArea)
+
+ self.step_buttton = gui.button(widget=self.comand_box,
+ master=self,
+ callback=self.step,
+ label="Step")
+
# graph in mainArea
- self.scatter = Scatterplot(click_callback=self.graph_clicked,
+ self.scatter = Scatterplot(click_callback=self.set_theta,
xAxis_gridLineWidth=0,
yAxis_gridLineWidth=0,
title_text='',
@@ -203,11 +243,31 @@ def init_combos():
self.target_class = self.target_class_combobox.itemText(0)
self.restart()
+ def set_empty_plot(self):
+ self.scatter.clear()
+
def restart(self):
self.selected_data = self.select_data()
- self.learner = self.default_learner(data=self.selected_data)
+ self.learner = self.default_learner(data=Normalize(self.selected_data))
self.replot()
+ def change_alpha(self):
+ if self.learner is not None:
+ self.learner.set_alpha(self.alpha)
+
+ def step(self):
+ if self.data is None:
+ return
+ if self.learner.theta is None:
+ self.set_theta(np.random.uniform(self.min_x, self.max_x),
+ np.random.uniform(self.min_y, self.max_y))
+ self.learner.step()
+ theta = self.learner.theta
+ self.plot_point(theta[0], theta[1])
+
+ def plot_point(self, x, y):
+ self.scatter.add_point_to_series("path", x, y)
+
def replot(self):
"""
This function performs complete replot of the graph
@@ -219,44 +279,48 @@ def replot(self):
attr_y = self.data.domain[self.attr_y]
optimal_theta = self.learner.optimized()
- min_x = optimal_theta[0] - 3
- max_x = optimal_theta[0] + 3
- min_y = optimal_theta[1] - 3
- max_y = optimal_theta[1] + 3
+ self.min_x = optimal_theta[0] - 5
+ self.max_x = optimal_theta[0] + 5
+ self.min_y = optimal_theta[1] - 5
+ self.max_y = optimal_theta[1] + 5
options = dict(series=[])
# gradient and contour
options['series'] += self.plot_gradient_and_contour(
- min_x, max_x, min_y, max_y)
+ self.min_x, self.max_x, self.min_y, self.max_y)
- data = options['series'][0]['data']
- data = [d[2] for d in data]
- min_value = np.min(data)
- max_value = np.max(data)
+
+ min_value = np.min(self.cost_grid)
+ max_value = np.max(self.cost_grid)
# highcharts parameters
kwargs = dict(
xAxis_title_text=attr_x.name,
yAxis_title_text=attr_y.name,
- xAxis_min=min_x,
- xAxis_max=max_x,
- yAxis_min=min_y,
- yAxis_max=max_y,
+ xAxis_min=self.min_x,
+ xAxis_max=self.max_x,
+ yAxis_min=self.min_y,
+ yAxis_max=self.max_y,
+ xAxis_startOnTick=False,
+ xAxis_endOnTick=False,
+ yAxis_startOnTick=False,
+ yAxis_endOnTick=False,
colorAxis=dict(
stops=[
[min_value, "#ffffff"],
[max_value, "#ff0000"]],
tickInterval=1, max=max_value, min=min_value),
- plotOptions_contour_colsize=(max_y - min_y) / 1000,
- plotOptions_contour_rowsize=(max_x - min_x) / 1000,
+ plotOptions_contour_colsize=(self.max_y - self.min_y) / 10000,
+ plotOptions_contour_rowsize=(self.max_x - self.min_x) / 10000,
+ tooltip_enabled=False,
tooltip_headerFormat="",
tooltip_pointFormat="%s: {point.x:.2f}
"
"%s: {point.y:.2f}" %
(self.attr_x, self.attr_y))
self.scatter.chart(options, **kwargs)
- # hack to destroy the legend for coloraxis
+
def plot_gradient_and_contour(self, x_from, x_to, y_from, y_to):
"""
@@ -290,7 +354,8 @@ def plot_gradient_and_contour(self, x_from, x_to, y_from, y_to):
blurred = self.blur_grid(self.cost_grid)
- return self.plot_gradient(self.xv, self.yv, blurred)
+ # return self.plot_gradient(self.xv, self.yv, blurred) + \
+ return self.plot_contour()
def plot_gradient(self, x, y, grid):
"""
@@ -327,57 +392,56 @@ def select_data(self):
return Table(domain, x, y, self.data.Y[:, None])
- # def plot_contour(self):
- # """
- # Function constructs contour lines
- # """
- # self.scatter.remove_contours()
- # if self.contours_enabled:
- # contour = Contour(
- # self.xv, self.yv, self.blur_grid(self.probabilities_grid))
- # contour_lines = contour.contours(
- # np.hstack(
- # (np.arange(0.5, 0, - self.contour_step)[::-1],
- # np.arange(0.5 + self.contour_step, 1, self.contour_step))))
- # # we want to have contour for 0.5
- #
- # series = []
- # count = 0
- # for key, value in contour_lines.items():
- # for line in value:
- # if len(line) > self.degree:
- # # if less than degree interpolation fails
- # tck, u = splprep(
- # [list(x) for x in zip(*reversed(line))],
- # s=0.001, k=self.degree,
- # per=(len(line)
- # if np.allclose(line[0], line[-1])
- # else 0))
- # new_int = np.arange(0, 1.01, 0.01)
- # interpol_line = np.array(splev(new_int, tck)).T.tolist()
- # else:
- # interpol_line = line
- #
- # series.append(dict(data=self.labeled(interpol_line, count),
- # color=self.contour_color,
- # type="spline",
- # lineWidth=0.5,
- # showInLegend=False,
- # marker=dict(enabled=False),
- # name="%g" % round(key, 2),
- # enableMouseTracking=False
- # ))
- # count += 1
- # self.scatter.add_series(series)
- # self.scatter.redraw_series()
+ def plot_contour(self):
+ """
+ Function constructs contour lines
+ """
+
+ contour = Contour(
+ self.xv, self.yv, self.blur_grid(self.cost_grid))
+ contour_lines = contour.contours(
+ np.linspace(np.min(self.cost_grid), np.max(self.cost_grid), 10))
+
+ series = []
+ count = 0
+ for key, value in contour_lines.items():
+ for line in value:
+ # if len(line) > 3:
+ # # if less than degree interpolation fails
+ # tck, u = splprep(
+ # [list(x) for x in zip(*reversed(line))],
+ # s=0.001, k=3,
+ # per=(len(line)
+ # if np.allclose(line[0], line[-1])
+ # else 0))
+ # new_int = np.arange(0, 1.01, 0.01)
+ # interpol_line = np.array(splev(new_int, tck)).T.tolist()
+ # else:
+ interpol_line = line
+
+ series.append(dict(data=interpol_line,
+ color=self.contour_color,
+ type="spline",
+ lineWidth=0.5,
+ showInLegend=False,
+ marker=dict(enabled=False),
+ name="%g" % round(key, 2),
+ enableMouseTracking=False
+ ))
+ count += 1
+ return series
@staticmethod
def blur_grid(grid):
filtered = gaussian_filter(grid, sigma=1)
- filtered[(grid > 0.45) & (grid < 0.55)] = grid[(grid > 0.45) &
- (grid < 0.55)]
return filtered
- def graph_clicked(self, x, y):
- pass
+ def set_theta(self, x, y):
+ if self.learner is not None:
+ self.learner.set_theta([x, y])
+ self.scatter.remove_series("path")
+ self.scatter.add_series([
+ dict(id="path", data=[[x, y]], showInLegend=False,
+ type="scatter", lineWidth=1,
+ marker=dict(enabled=True, radius=2))],)
diff --git a/orangecontrib/educational/widgets/utils/contour.py b/orangecontrib/educational/widgets/utils/contour.py
new file mode 100644
index 00000000..009dd6de
--- /dev/null
+++ b/orangecontrib/educational/widgets/utils/contour.py
@@ -0,0 +1,265 @@
+import numpy as np
+
+
+class Contour:
+
+ # look corners table from
+ # https://en.wikipedia.org/wiki/Marching_squares#Isoline
+ # corners table is coded as move in clockwise direction
+ moves = {
+ 1: {"to": [1, 0], "from": [0, -1]}, # D
+ 2: {"to": [0, 1], "from": [1, 0]}, # R
+ 3: {"to": [0, 1], "from": [0, -1]}, # R
+ 4: {"to": [-1, 0], "from": [0, 1]}, # U
+ 6: {"to": [-1, 0], "from": [1, 0]}, # U
+ 7: {"to": [-1, 0], "from": [0, -1]}, # U
+ 8: {"to": [0, -1], "from": [-1, 0]}, # L
+ 9: {"to": [1, 0], "from": [-1, 0]}, # D
+ 11: {"to": [0, 1], "from": [-1, 0]}, # R
+ 12: {"to": [0, -1], "from": [0, 1]}, # L
+ 13: {"to": [1, 0], "from": [0, 1]}, # D
+ 14: {"to": [0, -1], "from": [1, 0]} # L
+ }
+
+ moves_up = [4, 6, 7]
+ moves_down = [1, 9, 13]
+ moves_left = [8, 12, 14]
+ moves_right = [2, 3, 11]
+
+ from_up = [8, 9, 11]
+ from_down = [2, 6, 14]
+ from_left = [1, 3, 7]
+ from_right = [4, 12, 13]
+
+ def __init__(self, x, y, z):
+ self.x = np.array(x)
+ self.y = np.array(y)
+ self.z = np.array(z)
+ self.visited_points = None
+
+ def contours(self, thresholds):
+ contours = {}
+ for t in thresholds:
+ points = self.find_contours(t)
+ if len(points) > 0:
+ contours[t] = points
+ return contours
+
+ def find_contours(self, threshold):
+ contours = []
+ bitmap = (self.z > threshold).astype(int)
+ self.visited_points = np.zeros(self.z.shape)
+ # check if contour start on edge (they have to touches the edge)
+ for i in range(bitmap.shape[0] - 1):
+ # left
+ sq_idx = self.corner_idx(bitmap[i:i+2, 0:2])
+ upper = (False if sq_idx != 5 else True)
+ if sq_idx in [1, 3, 5, 7] and not self.visited(i, 0, upper):
+ contour = self.find_contour_path(bitmap, i, 0, threshold)
+ contours.append(contour)
+ # right
+ sq_idx = self.corner_idx(
+ bitmap[i:i+2, bitmap.shape[1]-2:bitmap.shape[1]])
+ if sq_idx in [4, 5, 12, 13] and \
+ not self.visited(i, bitmap.shape[1]-2, False):
+ contour = self.find_contour_path(
+ bitmap, i, bitmap.shape[1]-2, threshold)
+ contours.append(contour)
+
+ for j in range(bitmap.shape[1] - 1):
+ # top
+ sq_idx = self.corner_idx(bitmap[0:2, j:j+2])
+ upper = (False if sq_idx != 10 else True)
+ if sq_idx in [8, 9, 10, 11] and not self.visited(0, j, upper):
+ contours.append(self.find_contour_path(bitmap, 0, j, threshold))
+ # bottom
+ sq_idx = self.corner_idx(
+ bitmap[bitmap.shape[0]-2:bitmap.shape[0], j:j+2])
+ if sq_idx in [2, 6, 10, 14] and \
+ not self.visited(bitmap.shape[0]-2, j, False):
+ contour = self.find_contour_path(
+ bitmap, bitmap.shape[0]-2, j, threshold)
+ contours.append(contour)
+
+ nonzero_lines = np.nonzero(
+ bitmap.shape[1] - np.sum(bitmap[1:-1, :], axis=1))[0] + 1
+ # 1:-1 to avoid double check edge
+
+ for i in nonzero_lines:
+ for j in range(1, bitmap.shape[1] - 2):
+ sq_idx = self.corner_idx(bitmap[i:i+2, j:j+2])
+ if sq_idx not in [0, 15] and not self.visited(i, j, False):
+ path = self.find_contour_path(bitmap, i, j, threshold)
+ contours.append(path)
+ return contours
+
+ def find_contour_path(self, bitmap, start_i, start_j, threshold):
+ i, j = start_i, start_j
+ path = [self.to_real_coordinate(
+ self.start_point(
+ bitmap[i:i+2, j:j+2], np.array([i, j]), threshold))]
+
+ previous_position = None
+ step = 0
+ while 0 <= i < bitmap.shape[0] - 1 \
+ and 0 <= j < bitmap.shape[1] - 1:
+ square = bitmap[i:i+2, j:j+2]
+ upper = (True if (self.corner_idx(square) in [5, 10] and
+ (previous_position is None or
+ previous_position[0] < i or
+ previous_position[1] < j)) else False)
+
+ if self.visited(i, j, upper):
+ # i == start_i and j == start_j and step > 0 and
+ break # cycle
+
+ new_p = self.new_point(
+ square, previous_position, np.array([i, j]), threshold)
+ path.append(self.to_real_coordinate(new_p))
+
+ self.mark_visited(i, j, upper)
+ previous_position_tmp = [i, j]
+
+ i, j = self.new_position(
+ square, previous_position, np.array([i, j]))
+ previous_position = previous_position_tmp
+ step += 1
+ return path
+
+ def to_real_coordinate(self, point):
+ """
+ Parameters
+ ----------
+ point : list
+ List that contains point (x, y) in grid coordinate system
+
+ Returns
+ -------
+ list
+ """
+ x_idx = int(point[1])
+ y_idx = int(point[0])
+ return [self.x[y_idx, x_idx] +
+ ((point[1] % 1) * (self.x[y_idx, x_idx + 1] -
+ self.x[y_idx, x_idx])
+ if x_idx + 1 < self.x.shape[1] else 0),
+ self.y[y_idx, x_idx] +
+ ((point[0] % 1) * (self.y[y_idx + 1, x_idx] -
+ self.y[y_idx, x_idx])
+ if y_idx + 1 < self.x.shape[0] else 0)]
+
+ def new_point(self, sq, previous, position, threshold):
+ con_idx = self.corner_idx(sq)
+ if con_idx == 5:
+ goes_top = ((previous is None and
+ position[1] != self.z.shape[1] - 2) or
+ (previous is not None and
+ (previous[1] + 1 == position[1])))
+ heat_from = self.z[position[0] +
+ (0 if goes_top else 1), position[1]]
+ heat_to = self.z[position[0] +
+ (0 if goes_top else 1), position[1] + 1]
+ pos = position + np.array(
+ [(0 if goes_top else 1),
+ self.triangulate(threshold, heat_from, heat_to)])
+ elif con_idx == 10:
+ goes_right = ((previous is None and
+ position[0] != self.z.shape[0] - 2) or
+ (previous is not None and
+ (previous[0] + 1 == position[0])))
+ heat_from = self.z[position[0],
+ position[1] + (1 if goes_right else 0)]
+ heat_to = self.z[position[0] + 1,
+ position[1] + (1 if goes_right else 0)]
+ pos = position + np.array(
+ [self.triangulate(threshold, heat_from, heat_to),
+ (1 if goes_right else 0)])
+ else:
+ move_dimension = 0 if self.moves[con_idx]['to'][0] == 0 else 1
+ pos = (position + np.array(
+ self.moves[con_idx]['to']).clip(min=0)).astype(float)
+ heat_from = self.z[
+ (position[0] + 1 if con_idx in self.moves_down
+ else position[0]),
+ (position[1] + 1 if con_idx in self.moves_right
+ else position[1])]
+ heat_to = self.z[
+ (position[0] if con_idx in self.moves_up else position[0] + 1),
+ (position[1] if con_idx in self.moves_left
+ else position[1] + 1)]
+ pos[move_dimension] += self.triangulate(
+ threshold, heat_from, heat_to)
+
+ return pos.tolist()
+
+ @staticmethod
+ def triangulate(threshold, heat_from, heat_to):
+ return ((threshold - heat_from) / (heat_to - heat_from)) \
+ if heat_from < heat_to else \
+ (1 - (threshold - heat_to) / (heat_from - heat_to))
+
+ def start_point(self, sq, position, threshold):
+ con_idx = self.corner_idx(sq)
+ if con_idx == 5:
+ from_left = position[1] != self.z.shape[1] - 2
+ heat_from = self.z[position[0],
+ position[1] + (0 if from_left else 1)]
+ heat_to = self.z[position[0] + 1,
+ position[1] + (0 if from_left else 1)]
+ pos = position + np.array([self.triangulate(
+ threshold, heat_from, heat_to),
+ 0 if from_left else 1]) # left edge 0 every time, same right
+ elif con_idx == 10:
+ from_top = position[0] != self.z.shape[0] - 2
+ heat_from = self.z[position[0] +
+ (0 if from_top else 1), position[1]]
+ heat_to = self.z[position[0] +
+ (0 if from_top else 1), position[1] + 1]
+ pos = position + np.array(
+ [0 if from_top else 1,
+ self.triangulate(threshold, heat_from, heat_to)])
+ # on top edge 1 every time, same bottom
+ else:
+ move_dimension = 0 if self.moves[con_idx]['from'][0] == 0 else 1
+ pos = (position + np.array(
+ self.moves[con_idx]['from']).clip(min=0)).astype(float)
+ heat_from = self.z[
+ (position[0] + 1 if con_idx in self.from_down else position[0]),
+ (position[1] + 1 if con_idx in self.from_right
+ else position[1])]
+ heat_to = self.z[
+ (position[0] if con_idx in self.from_up else position[0] + 1),
+ (position[1] if con_idx in self.from_left else position[1] + 1)]
+ pos[move_dimension] += self.triangulate(
+ threshold, heat_from, heat_to)
+
+ return pos.tolist()
+
+ @classmethod
+ def new_position(cls, sq, previous, position):
+ con_idx = cls.corner_idx(sq)
+ if con_idx == 5:
+ pos = (position +
+ np.array([(-1 if (previous is None or
+ previous[1] + 1 == position[1])
+ else 1), 0]))
+ elif con_idx == 10:
+ pos = (position +
+ np.array([0, (1 if (previous is None or
+ previous[0] + 1 == position[0])
+ else -1)]))
+ else:
+ pos = position + cls.moves[con_idx]['to']
+ return pos.tolist()
+
+ @staticmethod
+ def corner_idx(sq):
+ return np.sum(np.array([[8, 4], [1, 2]]) * sq)
+
+ def visited(self, i, j, upper=True):
+ visited = self.visited_points[i, j]
+ return (visited in [1, 3] and upper) or (visited >= 2 and not upper)
+
+ def mark_visited(self, i, j, upper=True):
+ if not self.visited(i, j, upper):
+ self.visited_points[i, j] += (1 if upper else 2)
diff --git a/orangecontrib/educational/widgets/utils/logistic_regression.py b/orangecontrib/educational/widgets/utils/logistic_regression.py
index ad24474f..ae6669a5 100644
--- a/orangecontrib/educational/widgets/utils/logistic_regression.py
+++ b/orangecontrib/educational/widgets/utils/logistic_regression.py
@@ -23,7 +23,12 @@ def set_data(self, data):
self.domain = data.domain
def set_theta(self, theta):
- self.theta = theta
+ if isinstance(theta, (np.ndarray, np.generic)):
+ self.theta = theta
+ elif isinstance(theta, list):
+ self.theta = np.array(theta)
+ else:
+ self.theta = None
def set_alpha(self, alpha):
self.alpha = alpha
From 6cf86802815dd318bf571a58912dfd9b0f1ab4ea Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Wed, 3 Aug 2016 13:02:57 +0200
Subject: [PATCH 005/128] Implemented step back.
---
.../educational/widgets/owgradientdescent.py | 18 ++++++++++++++-
.../widgets/utils/logistic_regression.py | 23 ++++++++++++++++++-
2 files changed, 39 insertions(+), 2 deletions(-)
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
index 7fd139c4..737c3bca 100644
--- a/orangecontrib/educational/widgets/owgradientdescent.py
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -69,6 +69,13 @@ def remove_series(self, id):
series.remove(true);
""".format(id=id))
+ def remove_last_point(self, id):
+ self.evalJS("""
+ series = chart.get('{id}');
+ if (series != null)
+ series.removePoint(series.data.length - 1, true);
+ """.format(id=id))
+
def add_series(self, series):
for i, s in enumerate(series):
self.exposeObject('series%d' % i, series[i])
@@ -156,10 +163,14 @@ def __init__(self):
self.comand_box = gui.widgetBox(self.controlArea)
- self.step_buttton = gui.button(widget=self.comand_box,
+ self.step_button = gui.button(widget=self.comand_box,
master=self,
callback=self.step,
label="Step")
+ self.step_back_button = gui.button(widget=self.comand_box,
+ master=self,
+ callback=self.step_back,
+ label="Step")
# graph in mainArea
self.scatter = Scatterplot(click_callback=self.set_theta,
@@ -265,6 +276,11 @@ def step(self):
theta = self.learner.theta
self.plot_point(theta[0], theta[1])
+ def step_back(self):
+ if self.learner.step_no > 0:
+ self.learner.step_back()
+ self.scatter.remove_last_point("path")
+
def plot_point(self, x, y):
self.scatter.add_point_to_series("path", x, y)
diff --git a/orangecontrib/educational/widgets/utils/logistic_regression.py b/orangecontrib/educational/widgets/utils/logistic_regression.py
index ae6669a5..34434b56 100644
--- a/orangecontrib/educational/widgets/utils/logistic_regression.py
+++ b/orangecontrib/educational/widgets/utils/logistic_regression.py
@@ -10,8 +10,10 @@ class LogisticRegression:
y = None
theta = None
domain = None
+ step_no = 0
def __init__(self, alpha=0.1, theta=None, data=None):
+ self.history = []
self.set_alpha(alpha)
self.set_data(data)
self.set_theta(theta)
@@ -29,6 +31,8 @@ def set_theta(self, theta):
self.theta = np.array(theta)
else:
self.theta = None
+ self.history = self.set_list(self.history, 0, np.copy(self.theta))
+ self.step_no = 0
def set_alpha(self, alpha):
self.alpha = alpha
@@ -38,8 +42,15 @@ def model(self):
return LogisticRegressionModel(self.theta, self.domain)
def step(self):
+ self.step_no += 1
grad = self.dj(self.theta)
self.theta -= self.alpha * grad
+ self.history = self.set_list(self.history, self.step_no, np.copy(self.theta))
+
+ def step_back(self):
+ if self.step_no > 0:
+ self.step_no -= 1
+ self.theta = np.copy(self.history[self.step_no])
def j(self, theta):
"""
@@ -47,7 +58,8 @@ def j(self, theta):
"""
# TODO: modify for more thetas
yh = self.g(self.x.dot(theta))
- return -sum(np.log(self.y * yh + (1 - self.y) * (1 - yh))) / len(yh)
+ # return -sum(np.log(self.y * yh + (1 - self.y) * (1 - yh))) / len(yh)
+ return -sum(self.y * np.log(yh) + (1 - self.y) * np.log(1 - yh)) / len(yh)
def dj(self, theta):
"""
@@ -81,6 +93,15 @@ def g(z):
return 1.0 / (1 + np.exp(- z_mod))
+ @staticmethod
+ def set_list(l, i, v):
+ try:
+ l[i] = v
+ except IndexError:
+ for _ in range(i-len(l)):
+ l.append(None)
+ l.append(v)
+ return l
class LogisticRegressionModel(Model):
From 42588134164a2ab4e35a55313cf8d1126e1cdc39 Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Wed, 3 Aug 2016 13:48:12 +0200
Subject: [PATCH 006/128] Auto step.
---
.../educational/widgets/owgradientdescent.py | 116 +++++++++++++++---
.../widgets/utils/logistic_regression.py | 6 +
2 files changed, 106 insertions(+), 16 deletions(-)
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
index 737c3bca..e58df245 100644
--- a/orangecontrib/educational/widgets/owgradientdescent.py
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -1,9 +1,9 @@
-from math import isnan
from os import path
+import time
import numpy as np
from Orange.widgets.utils import itemmodels
-from PyQt4.QtCore import pyqtSlot, Qt
+from PyQt4.QtCore import pyqtSlot, Qt, QThread, SIGNAL
from PyQt4.QtGui import QSizePolicy, QPixmap, QColor, QIcon
from scipy.interpolate import splev, splprep
from scipy.ndimage import gaussian_filter
@@ -88,6 +88,33 @@ def add_point_to_series(self, id, x, y):
""".format(id=id, x=x, y=y))
+class Autoplay(QThread):
+ """
+ Class used for separated thread when using "Autoplay" for k-means
+ Parameters
+ ----------
+ owkmeans : OWKmeans
+ Instance of OWKmeans class
+ """
+
+ def __init__(self, ow_gradient_descent):
+ QThread.__init__(self)
+ self.ow_gradient_descent = ow_gradient_descent
+
+ def __del__(self):
+ self.wait()
+
+ def run(self):
+ """
+ Stepping through the algorithm until converge or user interrupts
+ """
+ while (not self.ow_gradient_descent.learner.converged and
+ self.ow_gradient_descent.auto_play_enabled):
+ self.emit(SIGNAL('step()'))
+ time.sleep(2 - self.ow_gradient_descent.auto_play_speed)
+ self.emit(SIGNAL('stop_auto_play()'))
+
+
class OWGradientDescent(OWWidget):
name = "Gradient Descent"
@@ -104,6 +131,7 @@ class OWGradientDescent(OWWidget):
attr_y = settings.Setting('')
target_class = settings.Setting('')
alpha = settings.Setting(0.1)
+ auto_play_speed = settings.Setting(1)
# models
x_var_model = None
@@ -120,6 +148,10 @@ class OWGradientDescent(OWWidget):
data = None
selected_data = None
+ # autoplay
+ auto_play_enabled = False
+ autoplay_button_text = ["Run", "Stop"]
+
class Warning(OWWidget.Warning):
to_few_features = Msg("Too few Continuous feature. Min 2 required")
no_class = Msg("No class provided or only one class variable")
@@ -134,7 +166,7 @@ def __init__(self):
# options box
policy = QSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Fixed)
- self.options_box = gui.widgetBox(self.controlArea)
+ self.options_box = gui.widgetBox(self.controlArea, "Data")
opts = dict(
widget=self.options_box, master=self, orientation=Qt.Horizontal,
callback=self.restart, sendSelectedValue=True
@@ -150,7 +182,7 @@ def __init__(self):
self.cbx.setModel(self.x_var_model)
self.cby.setModel(self.y_var_model)
- self.properties_box = gui.widgetBox(self.controlArea)
+ self.properties_box = gui.widgetBox(self.controlArea, "Properties")
self.alpha_spin = gui.spin(widget=self.properties_box,
master=self,
callback=self.change_alpha,
@@ -160,17 +192,35 @@ def __init__(self):
maxv=1,
step=0.01,
spinType=float)
+ self.restart_button = gui.button(widget=self.properties_box,
+ master=self,
+ callback=self.restart,
+ label="Restart")
- self.comand_box = gui.widgetBox(self.controlArea)
+ self.step_box = gui.widgetBox(self.controlArea, "Manually step through")
- self.step_button = gui.button(widget=self.comand_box,
+ self.step_button = gui.button(widget=self.step_box,
master=self,
callback=self.step,
label="Step")
- self.step_back_button = gui.button(widget=self.comand_box,
+ self.step_back_button = gui.button(widget=self.step_box,
master=self,
callback=self.step_back,
- label="Step")
+ label="Step back")
+
+ self.run_box = gui.widgetBox(self.controlArea, "Run")
+ self.auto_play_button = gui.button(
+ self.run_box, self, self.autoplay_button_text[0],
+ callback=self.auto_play)
+ self.auto_play_speed_spinner = gui.hSlider(self.run_box,
+ self,
+ 'auto_play_speed',
+ minValue=0,
+ maxValue=1.91,
+ step=0.1,
+ intOnly=False,
+ createLabel=False,
+ label='Speed:')
# graph in mainArea
self.scatter = Scatterplot(click_callback=self.set_theta,
@@ -259,7 +309,8 @@ def set_empty_plot(self):
def restart(self):
self.selected_data = self.select_data()
- self.learner = self.default_learner(data=Normalize(self.selected_data))
+ self.learner = self.default_learner(data=Normalize(self.selected_data),
+ alpha=self.alpha)
self.replot()
def change_alpha(self):
@@ -312,8 +363,8 @@ def replot(self):
# highcharts parameters
kwargs = dict(
- xAxis_title_text=attr_x.name,
- yAxis_title_text=attr_y.name,
+ xAxis_title_text="theta 0",
+ yAxis_title_text="theta 1",
xAxis_min=self.min_x,
xAxis_max=self.max_x,
yAxis_min=self.min_y,
@@ -322,11 +373,11 @@ def replot(self):
xAxis_endOnTick=False,
yAxis_startOnTick=False,
yAxis_endOnTick=False,
- colorAxis=dict(
- stops=[
- [min_value, "#ffffff"],
- [max_value, "#ff0000"]],
- tickInterval=1, max=max_value, min=min_value),
+ # colorAxis=dict(
+ # stops=[
+ # [min_value, "#ffffff"],
+ # [max_value, "#ff0000"]],
+ # tickInterval=1, max=max_value, min=min_value),
plotOptions_contour_colsize=(self.max_y - self.min_y) / 10000,
plotOptions_contour_rowsize=(self.max_x - self.min_x) / 10000,
tooltip_enabled=False,
@@ -461,3 +512,36 @@ def set_theta(self, x, y):
type="scatter", lineWidth=1,
marker=dict(enabled=True, radius=2))],)
+ def auto_play(self):
+ """
+ Function called when autoplay button pressed
+ """
+ self.auto_play_enabled = not self.auto_play_enabled
+ self.auto_play_button.setText(
+ self.autoplay_button_text[self.auto_play_enabled])
+ if self.auto_play_enabled:
+ self.disable_controls(self.auto_play_enabled)
+ self.autoPlayThread = Autoplay(self)
+ self.connect(self.autoPlayThread, SIGNAL("step()"), self.step)
+ self.connect(
+ self.autoPlayThread, SIGNAL("stop_auto_play()"),
+ self.stop_auto_play)
+ self.autoPlayThread.start()
+ else:
+ self.stop_auto_play()
+
+ def stop_auto_play(self):
+ """
+ Called when stop autoplay button pressed or in the end of autoplay
+ """
+ self.auto_play_enabled = False
+ self.disable_controls(self.auto_play_enabled)
+ self.auto_play_button.setText(
+ self.autoplay_button_text[self.auto_play_enabled])
+
+ def disable_controls(self, disabled):
+ self.step_box.setDisabled(disabled)
+ self.options_box.setDisabled(disabled)
+ self.properties_box.setDisabled(disabled)
+
+
diff --git a/orangecontrib/educational/widgets/utils/logistic_regression.py b/orangecontrib/educational/widgets/utils/logistic_regression.py
index 34434b56..0662b3a8 100644
--- a/orangecontrib/educational/widgets/utils/logistic_regression.py
+++ b/orangecontrib/educational/widgets/utils/logistic_regression.py
@@ -41,6 +41,12 @@ def set_alpha(self, alpha):
def model(self):
return LogisticRegressionModel(self.theta, self.domain)
+ @property
+ def converged(self):
+ if self.step_no == 0:
+ return False
+ return np.sum(np.abs(self.theta - self.history[self.step_no - 1])) < 1e-2
+
def step(self):
self.step_no += 1
grad = self.dj(self.theta)
From 6cb21b792130a5267f2fb619a7b69569fa846f2d Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Wed, 3 Aug 2016 15:49:02 +0200
Subject: [PATCH 007/128] Stochastic option in gradient descent
---
.../educational/widgets/owgradientdescent.py | 15 +++++-
.../widgets/utils/logistic_regression.py | 50 +++++++++++++++----
2 files changed, 53 insertions(+), 12 deletions(-)
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
index e58df245..226676b9 100644
--- a/orangecontrib/educational/widgets/owgradientdescent.py
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -132,6 +132,7 @@ class OWGradientDescent(OWWidget):
target_class = settings.Setting('')
alpha = settings.Setting(0.1)
auto_play_speed = settings.Setting(1)
+ stochastic = settings.Setting(False)
# models
x_var_model = None
@@ -187,16 +188,22 @@ def __init__(self):
master=self,
callback=self.change_alpha,
value="alpha",
- label="Alpha: ",
+ label="Learning rate: ",
minv=0.01,
maxv=1,
step=0.01,
spinType=float)
+ self.stochastic_checkbox = gui.checkBox(widget=self.properties_box,
+ master=self,
+ callback=self.change_stochastic,
+ value="stochastic",
+ label="Stochastic: ")
self.restart_button = gui.button(widget=self.properties_box,
master=self,
callback=self.restart,
label="Restart")
+
self.step_box = gui.widgetBox(self.controlArea, "Manually step through")
self.step_button = gui.button(widget=self.step_box,
@@ -310,13 +317,17 @@ def set_empty_plot(self):
def restart(self):
self.selected_data = self.select_data()
self.learner = self.default_learner(data=Normalize(self.selected_data),
- alpha=self.alpha)
+ alpha=self.alpha, stochastic=self.stochastic)
self.replot()
def change_alpha(self):
if self.learner is not None:
self.learner.set_alpha(self.alpha)
+ def change_stochastic(self):
+ if self.learner is not None:
+ self.learner.stochastic = self.stochastic
+
def step(self):
if self.data is None:
return
diff --git a/orangecontrib/educational/widgets/utils/logistic_regression.py b/orangecontrib/educational/widgets/utils/logistic_regression.py
index 0662b3a8..7fdce26f 100644
--- a/orangecontrib/educational/widgets/utils/logistic_regression.py
+++ b/orangecontrib/educational/widgets/utils/logistic_regression.py
@@ -11,12 +11,15 @@ class LogisticRegression:
theta = None
domain = None
step_no = 0
+ stochastic_i = 0
+ stochastic_num_steps = 30 # number of steps in one step
- def __init__(self, alpha=0.1, theta=None, data=None):
+ def __init__(self, alpha=0.1, theta=None, data=None, stochastic=False):
self.history = []
self.set_alpha(alpha)
self.set_data(data)
self.set_theta(theta)
+ self.stochastic = stochastic
def set_data(self, data):
if data is not None:
@@ -31,7 +34,7 @@ def set_theta(self, theta):
self.theta = np.array(theta)
else:
self.theta = None
- self.history = self.set_list(self.history, 0, np.copy(self.theta))
+ self.history = self.set_list(self.history, 0, (np.copy(self.theta), 0))
self.step_no = 0
def set_alpha(self, alpha):
@@ -45,18 +48,39 @@ def model(self):
def converged(self):
if self.step_no == 0:
return False
- return np.sum(np.abs(self.theta - self.history[self.step_no - 1])) < 1e-2
+ return np.sum(np.abs(self.theta - self.history[self.step_no - 1][0])) < (1e-2 if not self.stochastic else 1e-5)
def step(self):
self.step_no += 1
- grad = self.dj(self.theta)
+ grad = self.dj(self.theta, self.stochastic)
self.theta -= self.alpha * grad
- self.history = self.set_list(self.history, self.step_no, np.copy(self.theta))
+
+ self.stochastic_i += self.stochastic_num_steps
+
+ seed = None # seed that will be stored to revert the shuffle
+ if self.stochastic_i >= len(self.x):
+ self.stochastic_i = 0
+ seed = np.random.randint(100) # random seed
+ np.random.seed(seed) # set seed of permutation used to shuffle
+ indices = np.random.permutation(len(self.x))
+ self.x = self.x[indices] # permutation
+ self.y = self.y[indices]
+
+ self.history = self.set_list(self.history, self.step_no, (np.copy(self.theta), self.stochastic_i, seed))
def step_back(self):
if self.step_no > 0:
self.step_no -= 1
- self.theta = np.copy(self.history[self.step_no])
+ self.theta = np.copy(self.history[self.step_no][0])
+ self.stochastic_i = self.history[self.step_no][1]
+ seed = self.history[self.step_no + 1][2]
+ if seed is not None: # it means data had been permuted on this pos
+ np.random.seed(seed) # use same seed to revert
+ indices = np.random.permutation(len(self.x))
+ indices_reverse = np.argsort(indices)
+ # indices of sorted indices gives us reversing shuffle list
+ self.x = self.x[indices_reverse]
+ self.y = self.y[indices_reverse]
def j(self, theta):
"""
@@ -67,11 +91,17 @@ def j(self, theta):
# return -sum(np.log(self.y * yh + (1 - self.y) * (1 - yh))) / len(yh)
return -sum(self.y * np.log(yh) + (1 - self.y) * np.log(1 - yh)) / len(yh)
- def dj(self, theta):
+ def dj(self, theta, stochastic=False):
"""
Gradient of the cost function with L2 regularization
"""
- return (self.g(self.x.dot(theta)) - self.y).dot(self.x)
+ if stochastic:
+ ns = self.stochastic_num_steps
+ x = self.x[self.stochastic_i : self.stochastic_i + ns]
+ y = self.y[self.stochastic_i : self.stochastic_i + ns]
+ return x.T.dot(self.g(x.dot(theta)) - y)
+ else:
+ return (self.g(self.x.dot(theta)) - self.y).dot(self.x)
def optimized(self):
"""
@@ -94,8 +124,8 @@ def g(z):
"""
# limit values in z to avoid log with 0 produced by values almost 0
- z_mod = np.minimum(z, 100 * np.ones(len(z)))
- z_mod = np.maximum(z_mod, -100 * np.ones(len(z)))
+ z_mod = np.minimum(z, 100)
+ z_mod = np.maximum(z_mod, -100)
return 1.0 / (1 + np.exp(- z_mod))
From a3892519a236a358f763c6ba33ec950266b2da0a Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Wed, 3 Aug 2016 16:22:29 +0200
Subject: [PATCH 008/128] Cost function (j) modified to work with matrix of
thetas
---
.../educational/widgets/owgradientdescent.py | 3 ++-
.../widgets/utils/logistic_regression.py | 16 ++++++++++------
2 files changed, 12 insertions(+), 7 deletions(-)
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
index 226676b9..4a282389 100644
--- a/orangecontrib/educational/widgets/owgradientdescent.py
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -425,7 +425,8 @@ def plot_gradient_and_contour(self, x_from, x_to, y_from, y_to):
self.xv, self.yv = np.meshgrid(x, y)
thetas = np.column_stack((self.xv.flatten(), self.yv.flatten()))
- cost_values = np.vstack([self.learner.j(theta) for theta in thetas])
+ # cost_values = np.vstack([self.learner.j(theta) for theta in thetas])
+ cost_values = self.learner.j(thetas)
# results
self.cost_grid = cost_values.reshape(self.xv.shape)
diff --git a/orangecontrib/educational/widgets/utils/logistic_regression.py b/orangecontrib/educational/widgets/utils/logistic_regression.py
index 7fdce26f..415ba208 100644
--- a/orangecontrib/educational/widgets/utils/logistic_regression.py
+++ b/orangecontrib/educational/widgets/utils/logistic_regression.py
@@ -9,6 +9,7 @@ class LogisticRegression:
x = None
y = None
theta = None
+ alpha = None
domain = None
step_no = 0
stochastic_i = 0
@@ -48,7 +49,8 @@ def model(self):
def converged(self):
if self.step_no == 0:
return False
- return np.sum(np.abs(self.theta - self.history[self.step_no - 1][0])) < (1e-2 if not self.stochastic else 1e-5)
+ return (np.sum(np.abs(self.theta - self.history[self.step_no - 1][0])) <
+ (1e-2 if not self.stochastic else 1e-5))
def step(self):
self.step_no += 1
@@ -66,7 +68,9 @@ def step(self):
self.x = self.x[indices] # permutation
self.y = self.y[indices]
- self.history = self.set_list(self.history, self.step_no, (np.copy(self.theta), self.stochastic_i, seed))
+ self.history = self.set_list(
+ self.history, self.step_no,
+ (np.copy(self.theta), self.stochastic_i, seed))
def step_back(self):
if self.step_no > 0:
@@ -86,10 +90,10 @@ def j(self, theta):
"""
Cost function for logistic regression
"""
- # TODO: modify for more thetas
- yh = self.g(self.x.dot(theta))
- # return -sum(np.log(self.y * yh + (1 - self.y) * (1 - yh))) / len(yh)
- return -sum(self.y * np.log(yh) + (1 - self.y) * np.log(1 - yh)) / len(yh)
+ yh = self.g(self.x.dot(theta.T)).T
+ y = self.y
+ return -np.sum(
+ (self.y * np.log(yh) + (1 - y) * np.log(1 - yh)).T, axis=0) / len(y)
def dj(self, theta, stochastic=False):
"""
From b42b0329412505b9dbc81a602a9a89e9d47f4a67 Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Wed, 3 Aug 2016 16:50:37 +0200
Subject: [PATCH 009/128] Code refactor for logistic_regression
---
.../widgets/utils/logistic_regression.py | 77 +++++++++++++++++--
1 file changed, 70 insertions(+), 7 deletions(-)
diff --git a/orangecontrib/educational/widgets/utils/logistic_regression.py b/orangecontrib/educational/widgets/utils/logistic_regression.py
index 415ba208..aad7609c 100644
--- a/orangecontrib/educational/widgets/utils/logistic_regression.py
+++ b/orangecontrib/educational/widgets/utils/logistic_regression.py
@@ -1,10 +1,23 @@
import numpy as np
+from scipy.optimize import fmin_l_bfgs_b
from Orange.classification import Model
-from scipy.optimize import fmin_l_bfgs_b
class LogisticRegression:
+ """
+ Logistic regression algorithm with custom cost and gradient function,
+ which allow to perform algorithm step by step
+
+ Parameters
+ ----------
+ alpha : float
+ Learning rate
+ theta : array_like(float, ndim=1)
+ Logistic function parameters
+ data : Orange.data.Table
+ Data
+ """
x = None
y = None
@@ -23,12 +36,18 @@ def __init__(self, alpha=0.1, theta=None, data=None, stochastic=False):
self.stochastic = stochastic
def set_data(self, data):
+ """
+ Function set the data
+ """
if data is not None:
self.x = data.X
self.y = data.Y
self.domain = data.domain
def set_theta(self, theta):
+ """
+ Function sets theta. Can be called from constructor or outside.
+ """
if isinstance(theta, (np.ndarray, np.generic)):
self.theta = theta
elif isinstance(theta, list):
@@ -39,35 +58,54 @@ def set_theta(self, theta):
self.step_no = 0
def set_alpha(self, alpha):
+ """
+ Function sets alpha and can be called from constructor or from outside.
+ """
self.alpha = alpha
@property
def model(self):
+ """
+ Function returns model based on current parameters.
+ """
return LogisticRegressionModel(self.theta, self.domain)
@property
def converged(self):
+ """
+ Function returns True if gradient descent already converged.
+ """
if self.step_no == 0:
return False
return (np.sum(np.abs(self.theta - self.history[self.step_no - 1][0])) <
(1e-2 if not self.stochastic else 1e-5))
def step(self):
+ """
+ Function performs one step of the gradient descent
+ """
self.step_no += 1
+
+ # calculates gradient and modify theta
grad = self.dj(self.theta, self.stochastic)
self.theta -= self.alpha * grad
+ # increase index used by stochastic gradient descent
self.stochastic_i += self.stochastic_num_steps
seed = None # seed that will be stored to revert the shuffle
+ # if we came around all data set index to zero and permute data
if self.stochastic_i >= len(self.x):
self.stochastic_i = 0
+
+ # shuffle data
seed = np.random.randint(100) # random seed
np.random.seed(seed) # set seed of permutation used to shuffle
indices = np.random.permutation(len(self.x))
self.x = self.x[indices] # permutation
self.y = self.y[indices]
+ # save history for step back
self.history = self.set_list(
self.history, self.step_no,
(np.copy(self.theta), self.stochastic_i, seed))
@@ -75,8 +113,14 @@ def step(self):
def step_back(self):
if self.step_no > 0:
self.step_no -= 1
+
+ # modify theta
self.theta = np.copy(self.history[self.step_no][0])
+
+ # modify index for stochastic gradient descent
self.stochastic_i = self.history[self.step_no][1]
+
+ # if necessary restore data shuffle
seed = self.history[self.step_no + 1][2]
if seed is not None: # it means data had been permuted on this pos
np.random.seed(seed) # use same seed to revert
@@ -97,19 +141,19 @@ def j(self, theta):
def dj(self, theta, stochastic=False):
"""
- Gradient of the cost function with L2 regularization
+ Gradient of the cost function for logistic regression
"""
if stochastic:
ns = self.stochastic_num_steps
- x = self.x[self.stochastic_i : self.stochastic_i + ns]
- y = self.y[self.stochastic_i : self.stochastic_i + ns]
+ x = self.x[self.stochastic_i: self.stochastic_i + ns]
+ y = self.y[self.stochastic_i: self.stochastic_i + ns]
return x.T.dot(self.g(x.dot(theta)) - y)
else:
return (self.g(self.x.dot(theta)) - self.y).dot(self.x)
def optimized(self):
"""
- Function performs model training
+ Function performs whole model training. Not step by step.
"""
res = fmin_l_bfgs_b(self.j,
np.zeros(self.x.shape[1]),
@@ -119,11 +163,11 @@ def optimized(self):
@staticmethod
def g(z):
"""
- sigmoid function
+ Sigmoid function
Parameters
----------
- z : array_like
+ z : array_like(float)
values to evaluate with function
"""
@@ -135,6 +179,24 @@ def g(z):
@staticmethod
def set_list(l, i, v):
+ """
+ Function sets i-th value in list to v. If i does not exist in l
+ it is initialized else value is modified
+
+ Parameters
+ ----------
+ l : list
+ List
+ i : int
+ Index of position in list
+ v : any
+ Value to insert in list
+
+ Returns
+ -------
+ list
+ List with inserted value v on position i
+ """
try:
l[i] = v
except IndexError:
@@ -143,6 +205,7 @@ def set_list(l, i, v):
l.append(v)
return l
+
class LogisticRegressionModel(Model):
def __init__(self, theta, domain):
From ea60cfdb60310e6b0bfd5b3ae5a88a0e09e0034c Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Wed, 3 Aug 2016 17:28:34 +0200
Subject: [PATCH 010/128] Code refactor in OWGradientDescent.
---
.../educational/widgets/owgradientdescent.py | 252 ++++++++++--------
1 file changed, 139 insertions(+), 113 deletions(-)
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
index 4a282389..c0a23c7e 100644
--- a/orangecontrib/educational/widgets/owgradientdescent.py
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -2,12 +2,11 @@
import time
import numpy as np
-from Orange.widgets.utils import itemmodels
+from scipy.ndimage import gaussian_filter
from PyQt4.QtCore import pyqtSlot, Qt, QThread, SIGNAL
from PyQt4.QtGui import QSizePolicy, QPixmap, QColor, QIcon
-from scipy.interpolate import splev, splprep
-from scipy.ndimage import gaussian_filter
+from Orange.widgets.utils import itemmodels
from Orange.classification import Model
from Orange.data import Table, ContinuousVariable, Domain, DiscreteVariable
from Orange.widgets import gui
@@ -27,11 +26,11 @@ class Scatterplot(highcharts.Highchart):
* enables scroll-wheel zooming,
* set callback functions for click (in empty chart), drag and drop
* enables moving of centroids points
- * include drag_drop_js script by highchart
+ * include drag_drop_js script by highcharts
"""
- js_click_function = """/**/(function(event) {
- window.pybridge.chart_clicked(event.xAxis[0].value, event.yAxis[0].value);
+ js_click_function = """/**/(function(e) {
+ window.pybridge.chart_clicked(e.xAxis[0].value, e.yAxis[0].value);
})
"""
@@ -41,7 +40,9 @@ class Scatterplot(highcharts.Highchart):
def __init__(self, click_callback, **kwargs):
# read javascript for drag and drop
- with open(path.join(path.dirname(__file__), 'resources', 'highcharts-contour.js'), 'r') as f:
+ with open(
+ path.join(path.dirname(__file__), 'resources',
+ 'highcharts-contour.js'), 'r') as f:
contours_js = f.read()
super().__init__(enable_zoom=True,
@@ -60,41 +61,57 @@ def chart(self, *args, **kwargs):
@pyqtSlot(float, float)
def chart_clicked(self, x, y):
+ """
+ Function is called from javascript when click event happens
+ """
self.click_callback(x, y)
- def remove_series(self, id):
+ def remove_series(self, idx):
+ """
+ Function remove series with id idx
+ """
self.evalJS("""
series = chart.get('{id}');
if (series != null)
series.remove(true);
- """.format(id=id))
+ """.format(id=idx))
- def remove_last_point(self, id):
+ def remove_last_point(self, idx):
+ """
+ Function remove last point from series with id idx
+ """
self.evalJS("""
series = chart.get('{id}');
if (series != null)
series.removePoint(series.data.length - 1, true);
- """.format(id=id))
+ """.format(id=idx))
def add_series(self, series):
+ """
+ Function add series to the chart
+ """
for i, s in enumerate(series):
self.exposeObject('series%d' % i, series[i])
self.evalJS("chart.addSeries(series%d, true);" % i)
- def add_point_to_series(self, id, x, y):
+ def add_point_to_series(self, idx, x, y):
+ """
+ Function add point to the series with id idx
+ """
self.evalJS("""
series = chart.get('{id}');
series.addPoint([{x}, {y}]);
- """.format(id=id, x=x, y=y))
+ """.format(id=idx, x=x, y=y))
class Autoplay(QThread):
"""
- Class used for separated thread when using "Autoplay" for k-means
+ Class used for separated thread when using "Autoplay" for gradient descent
+
Parameters
----------
- owkmeans : OWKmeans
- Instance of OWKmeans class
+ ow_gradient_descent : OWGradientDescent
+ Instance of OWGradientDescent class
"""
def __init__(self, ow_gradient_descent):
@@ -109,16 +126,19 @@ def run(self):
Stepping through the algorithm until converge or user interrupts
"""
while (not self.ow_gradient_descent.learner.converged and
- self.ow_gradient_descent.auto_play_enabled):
+ self.ow_gradient_descent.auto_play_enabled):
self.emit(SIGNAL('step()'))
time.sleep(2 - self.ow_gradient_descent.auto_play_speed)
self.emit(SIGNAL('stop_auto_play()'))
class OWGradientDescent(OWWidget):
+ """
+ Gradient descent widget algorithm
+ """
name = "Gradient Descent"
- description = "Widget demonstrates shows the procedure of gradient descent."
+ description = "Widget shows the procedure of gradient descent."
icon = "icons/InteractiveKMeans.svg"
want_main_area = True
@@ -144,6 +164,10 @@ class OWGradientDescent(OWWidget):
cost_grid = None
grid_size = 15
contour_color = "#aaaaaa"
+ min_x = None
+ max_x = None
+ min_y = None
+ max_y = None
# data
data = None
@@ -151,9 +175,13 @@ class OWGradientDescent(OWWidget):
# autoplay
auto_play_enabled = False
- autoplay_button_text = ["Run", "Stop"]
+ auto_play_button_text = ["Run", "Stop"]
+ auto_play_thread = None
class Warning(OWWidget.Warning):
+ """
+ Class used fro widget warnings.
+ """
to_few_features = Msg("Too few Continuous feature. Min 2 required")
no_class = Msg("No class provided or only one class variable")
@@ -183,69 +211,52 @@ def __init__(self):
self.cbx.setModel(self.x_var_model)
self.cby.setModel(self.y_var_model)
+ # properties box
self.properties_box = gui.widgetBox(self.controlArea, "Properties")
- self.alpha_spin = gui.spin(widget=self.properties_box,
- master=self,
- callback=self.change_alpha,
- value="alpha",
- label="Learning rate: ",
- minv=0.01,
- maxv=1,
- step=0.01,
- spinType=float)
- self.stochastic_checkbox = gui.checkBox(widget=self.properties_box,
- master=self,
- callback=self.change_stochastic,
- value="stochastic",
- label="Stochastic: ")
- self.restart_button = gui.button(widget=self.properties_box,
- master=self,
- callback=self.restart,
- label="Restart")
-
-
+ self.alpha_spin = gui.spin(
+ widget=self.properties_box, master=self, callback=self.change_alpha,
+ value="alpha", label="Learning rate: ",
+ minv=0.01, maxv=1, step=0.01, spinType=float)
+ self.stochastic_checkbox = gui.checkBox(
+ widget=self.properties_box, master=self,
+ callback=self.change_stochastic, value="stochastic",
+ label="Stochastic: ")
+ self.restart_button = gui.button(
+ widget=self.properties_box, master=self,
+ callback=self.restart, label="Restart")
+
+ # step box
self.step_box = gui.widgetBox(self.controlArea, "Manually step through")
+ self.step_button = gui.button(
+ widget=self.step_box, master=self, callback=self.step, label="Step")
+ self.step_back_button = gui.button(
+ widget=self.step_box, master=self, callback=self.step_back,
+ label="Step back")
- self.step_button = gui.button(widget=self.step_box,
- master=self,
- callback=self.step,
- label="Step")
- self.step_back_button = gui.button(widget=self.step_box,
- master=self,
- callback=self.step_back,
- label="Step back")
-
+ # run box
self.run_box = gui.widgetBox(self.controlArea, "Run")
self.auto_play_button = gui.button(
- self.run_box, self, self.autoplay_button_text[0],
- callback=self.auto_play)
- self.auto_play_speed_spinner = gui.hSlider(self.run_box,
- self,
- 'auto_play_speed',
- minValue=0,
- maxValue=1.91,
- step=0.1,
- intOnly=False,
- createLabel=False,
- label='Speed:')
+ widget=self.run_box, master=self,
+ label=self.auto_play_button_text[0], callback=self.auto_play)
+ self.auto_play_speed_spinner = gui.hSlider(
+ widget=self.run_box, master=self, value='auto_play_speed',
+ minValue=0, maxValue=1.91, step=0.1,
+ intOnly=False, createLabel=False, label='Speed:')
# graph in mainArea
- self.scatter = Scatterplot(click_callback=self.set_theta,
+ self.scatter = Scatterplot(click_callback=self.change_theta,
xAxis_gridLineWidth=0,
yAxis_gridLineWidth=0,
title_text='',
tooltip_shared=False,
debug=True)
-
+ # TODO: set false when end of development
gui.rubber(self.controlArea)
- # TODO: set false when end of development
# Just render an empty chart so it shows a nice 'No data to display'
self.scatter.chart()
self.mainArea.layout().addWidget(self.scatter)
- # set random learner
-
def set_data(self, data):
"""
Function receives data from input and init part of widget if data
@@ -283,22 +294,21 @@ def init_combos():
self.Warning.clear()
# clear variables
- self.xv = None
- self.yv = None
self.cost_grid = None
+ dd = data.domain
+
if data is None or len(data) == 0:
self.data = None
reset_combos()
self.set_empty_plot()
- elif sum(True for var in data.domain.attributes
+ elif sum(True for var in dd.attributes
if isinstance(var, ContinuousVariable)) < 2:
self.data = None
reset_combos()
self.Warning.to_few_features()
self.set_empty_plot()
- elif (data.domain.class_var is None or
- len(data.domain.class_var.values) < 2):
+ elif dd.class_var is None or len(dd.class_var.values) < 2:
self.data = None
reset_combos()
self.Warning.no_class()
@@ -312,38 +322,72 @@ def init_combos():
self.restart()
def set_empty_plot(self):
+ """
+ Function render empty plot
+ """
self.scatter.clear()
def restart(self):
+ """
+ Function restarts the algorithm
+ """
self.selected_data = self.select_data()
- self.learner = self.default_learner(data=Normalize(self.selected_data),
- alpha=self.alpha, stochastic=self.stochastic)
+ self.learner = self.default_learner(
+ data=Normalize(self.selected_data),
+ alpha=self.alpha, stochastic=self.stochastic)
self.replot()
def change_alpha(self):
+ """
+ Function changes alpha parameter of the alogrithm
+ """
if self.learner is not None:
self.learner.set_alpha(self.alpha)
def change_stochastic(self):
+ """
+ Function changes switches between stochastic or usual algorithm
+ """
if self.learner is not None:
self.learner.stochastic = self.stochastic
+ def change_theta(self, x, y):
+ """
+ Function set new theta
+ """
+ if self.learner is not None:
+ self.learner.set_theta([x, y])
+ self.scatter.remove_series("path")
+ self.scatter.add_series([
+ dict(id="path", data=[[x, y]], showInLegend=False,
+ type="scatter", lineWidth=1,
+ marker=dict(enabled=True, radius=2))],)
+
def step(self):
+ """
+ Function performs one step of the algorithm
+ """
if self.data is None:
return
if self.learner.theta is None:
- self.set_theta(np.random.uniform(self.min_x, self.max_x),
- np.random.uniform(self.min_y, self.max_y))
+ self.change_theta(np.random.uniform(self.min_x, self.max_x),
+ np.random.uniform(self.min_y, self.max_y))
self.learner.step()
theta = self.learner.theta
self.plot_point(theta[0], theta[1])
def step_back(self):
+ """
+ Function performs step back
+ """
if self.learner.step_no > 0:
self.learner.step_back()
self.scatter.remove_last_point("path")
def plot_point(self, x, y):
+ """
+ Function add point to the path
+ """
self.scatter.add_point_to_series("path", x, y)
def replot(self):
@@ -353,9 +397,6 @@ def replot(self):
if self.data is None:
return
- attr_x = self.data.domain[self.attr_x]
- attr_y = self.data.domain[self.attr_y]
-
optimal_theta = self.learner.optimized()
self.min_x = optimal_theta[0] - 5
self.max_x = optimal_theta[0] + 5
@@ -368,7 +409,6 @@ def replot(self):
options['series'] += self.plot_gradient_and_contour(
self.min_x, self.max_x, self.min_y, self.max_y)
-
min_value = np.min(self.cost_grid)
max_value = np.max(self.cost_grid)
@@ -399,10 +439,10 @@ def replot(self):
self.scatter.chart(options, **kwargs)
-
def plot_gradient_and_contour(self, x_from, x_to, y_from, y_to):
"""
Function constructs series for gradient and contour
+
Parameters
----------
x_from : float
@@ -413,6 +453,7 @@ def plot_gradient_and_contour(self, x_from, x_to, y_from, y_to):
Min grid y value
y_to : float
Max grid y value
+
Returns
-------
list
@@ -422,19 +463,19 @@ def plot_gradient_and_contour(self, x_from, x_to, y_from, y_to):
# grid for gradient
x = np.linspace(x_from, x_to, self.grid_size)
y = np.linspace(y_from, y_to, self.grid_size)
- self.xv, self.yv = np.meshgrid(x, y)
- thetas = np.column_stack((self.xv.flatten(), self.yv.flatten()))
+ xv, yv = np.meshgrid(x, y)
+ thetas = np.column_stack((xv.flatten(), yv.flatten()))
# cost_values = np.vstack([self.learner.j(theta) for theta in thetas])
cost_values = self.learner.j(thetas)
# results
- self.cost_grid = cost_values.reshape(self.xv.shape)
+ self.cost_grid = cost_values.reshape(xv.shape)
blurred = self.blur_grid(self.cost_grid)
# return self.plot_gradient(self.xv, self.yv, blurred) + \
- return self.plot_contour()
+ return self.plot_contour(xv, yv, blurred)
def plot_gradient(self, x, y, grid):
"""
@@ -449,6 +490,7 @@ def select_data(self):
"""
Function takes two selected columns from data table and merge them
in new Orange.data.Table
+
Returns
-------
Table
@@ -471,31 +513,20 @@ def select_data(self):
return Table(domain, x, y, self.data.Y[:, None])
- def plot_contour(self):
+ def plot_contour(self, xv, yv, cost_grid):
"""
Function constructs contour lines
"""
contour = Contour(
- self.xv, self.yv, self.blur_grid(self.cost_grid))
+ xv, yv, cost_grid)
contour_lines = contour.contours(
- np.linspace(np.min(self.cost_grid), np.max(self.cost_grid), 10))
+ np.linspace(np.min(cost_grid), np.max(cost_grid), 10))
series = []
count = 0
for key, value in contour_lines.items():
for line in value:
- # if len(line) > 3:
- # # if less than degree interpolation fails
- # tck, u = splprep(
- # [list(x) for x in zip(*reversed(line))],
- # s=0.001, k=3,
- # per=(len(line)
- # if np.allclose(line[0], line[-1])
- # else 0))
- # new_int = np.arange(0, 1.01, 0.01)
- # interpol_line = np.array(splev(new_int, tck)).T.tolist()
- # else:
interpol_line = line
series.append(dict(data=interpol_line,
@@ -512,33 +543,27 @@ def plot_contour(self):
@staticmethod
def blur_grid(grid):
+ """
+ Function blur the grid, to make crossings smoother
+ """
filtered = gaussian_filter(grid, sigma=1)
return filtered
- def set_theta(self, x, y):
- if self.learner is not None:
- self.learner.set_theta([x, y])
- self.scatter.remove_series("path")
- self.scatter.add_series([
- dict(id="path", data=[[x, y]], showInLegend=False,
- type="scatter", lineWidth=1,
- marker=dict(enabled=True, radius=2))],)
-
def auto_play(self):
"""
Function called when autoplay button pressed
"""
self.auto_play_enabled = not self.auto_play_enabled
self.auto_play_button.setText(
- self.autoplay_button_text[self.auto_play_enabled])
+ self.auto_play_button_text[self.auto_play_enabled])
if self.auto_play_enabled:
self.disable_controls(self.auto_play_enabled)
- self.autoPlayThread = Autoplay(self)
- self.connect(self.autoPlayThread, SIGNAL("step()"), self.step)
+ self.auto_play_thread = Autoplay(self)
+ self.connect(self.auto_play_thread, SIGNAL("step()"), self.step)
self.connect(
- self.autoPlayThread, SIGNAL("stop_auto_play()"),
+ self.auto_play_thread, SIGNAL("stop_auto_play()"),
self.stop_auto_play)
- self.autoPlayThread.start()
+ self.auto_play_thread.start()
else:
self.stop_auto_play()
@@ -549,11 +574,12 @@ def stop_auto_play(self):
self.auto_play_enabled = False
self.disable_controls(self.auto_play_enabled)
self.auto_play_button.setText(
- self.autoplay_button_text[self.auto_play_enabled])
+ self.auto_play_button_text[self.auto_play_enabled])
def disable_controls(self, disabled):
+ """
+ Function disable or enable all controls except those from run part
+ """
self.step_box.setDisabled(disabled)
self.options_box.setDisabled(disabled)
self.properties_box.setDisabled(disabled)
-
-
From f24f60d5c54bb15356494d447fedbf49941ab8b6 Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Thu, 4 Aug 2016 08:44:39 +0200
Subject: [PATCH 011/128] Icon for gradient descent.
---
.../widgets/icons/GradientDescent.svg | 152 ++++++++++++++++++
.../educational/widgets/owgradientdescent.py | 2 +-
2 files changed, 153 insertions(+), 1 deletion(-)
create mode 100644 orangecontrib/educational/widgets/icons/GradientDescent.svg
diff --git a/orangecontrib/educational/widgets/icons/GradientDescent.svg b/orangecontrib/educational/widgets/icons/GradientDescent.svg
new file mode 100644
index 00000000..10a2ff9a
--- /dev/null
+++ b/orangecontrib/educational/widgets/icons/GradientDescent.svg
@@ -0,0 +1,152 @@
+
+
+
+
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
index c0a23c7e..4276c278 100644
--- a/orangecontrib/educational/widgets/owgradientdescent.py
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -139,7 +139,7 @@ class OWGradientDescent(OWWidget):
name = "Gradient Descent"
description = "Widget shows the procedure of gradient descent."
- icon = "icons/InteractiveKMeans.svg"
+ icon = "icons/GradientDescent.svg"
want_main_area = True
inputs = [("Data", Table, "set_data")]
From ae985d2b759aa9343dd9da18e8fae0732f1a39f2 Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Thu, 4 Aug 2016 08:49:42 +0200
Subject: [PATCH 012/128] Icon updated
---
.../widgets/icons/GradientDescent.svg | 96 ++++++++-----------
1 file changed, 39 insertions(+), 57 deletions(-)
diff --git a/orangecontrib/educational/widgets/icons/GradientDescent.svg b/orangecontrib/educational/widgets/icons/GradientDescent.svg
index 10a2ff9a..83948785 100644
--- a/orangecontrib/educational/widgets/icons/GradientDescent.svg
+++ b/orangecontrib/educational/widgets/icons/GradientDescent.svg
@@ -55,98 +55,80 @@
id="layer1"
transform="translate(0,-1004.3622)">
-
+ style="fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:1.23305607;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="path4719-1"
+ cx="41.132149"
+ cy="1010.5355"
+ r="0.55258733" />
+ style="fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:1.23305607;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="path4719-1-6"
+ cx="35.544743"
+ cy="1019.5954"
+ r="0.55258733" />
+ style="fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:1.23305607;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="path4719-1-0"
+ cx="31.504133"
+ cy="1022.8152"
+ r="0.55258733" />
+ style="fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:1.23305607;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="path4719-1-62"
+ cx="27.905464"
+ cy="1023.7622"
+ r="0.55258733" />
-
-
+ r="0.55258733" />
+ style="fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:1.23305607;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="path4719-1-8"
+ cx="21.339472"
+ cy="1023.6992"
+ r="0.55258733" />
+ style="fill:#000000;fill-opacity:1;stroke:#000000;stroke-width:1.23305607;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="path4719-1-7"
+ cx="18.372149"
+ cy="1023.0677"
+ r="0.55258733" />
From 5bf3b3ac6f8bcc8e2b64ebad71b902f37097b1ba Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Thu, 4 Aug 2016 09:35:20 +0200
Subject: [PATCH 013/128] Implemented widget output
---
.../educational/widgets/owgradientdescent.py | 50 ++++++++++++++++---
1 file changed, 44 insertions(+), 6 deletions(-)
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
index 4276c278..dec24d3c 100644
--- a/orangecontrib/educational/widgets/owgradientdescent.py
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -8,7 +8,8 @@
from Orange.widgets.utils import itemmodels
from Orange.classification import Model
-from Orange.data import Table, ContinuousVariable, Domain, DiscreteVariable
+from Orange.data import Table, ContinuousVariable, Domain, DiscreteVariable, \
+ StringVariable
from Orange.widgets import gui
from Orange.widgets import highcharts
from Orange.widgets import settings
@@ -143,8 +144,9 @@ class OWGradientDescent(OWWidget):
want_main_area = True
inputs = [("Data", Table, "set_data")]
- outputs = [("Model", Model),
- ("Coefficients", Table)]
+ outputs = [("Classifier", Model),
+ ("Coefficients", Table),
+ ("Data", Table)]
# selected attributes in chart
attr_x = settings.Setting('')
@@ -295,20 +297,22 @@ def init_combos():
# clear variables
self.cost_grid = None
+ self.learner = None
- dd = data.domain
+ d = data
+ self.send_output()
if data is None or len(data) == 0:
self.data = None
reset_combos()
self.set_empty_plot()
- elif sum(True for var in dd.attributes
+ elif sum(True for var in d.domain.attributes
if isinstance(var, ContinuousVariable)) < 2:
self.data = None
reset_combos()
self.Warning.to_few_features()
self.set_empty_plot()
- elif dd.class_var is None or len(dd.class_var.values) < 2:
+ elif d.domain.class_var is None or len(d.domain.class_var.values) < 2:
self.data = None
reset_combos()
self.Warning.no_class()
@@ -336,6 +340,7 @@ def restart(self):
data=Normalize(self.selected_data),
alpha=self.alpha, stochastic=self.stochastic)
self.replot()
+ self.send_output()
def change_alpha(self):
"""
@@ -362,6 +367,7 @@ def change_theta(self, x, y):
dict(id="path", data=[[x, y]], showInLegend=False,
type="scatter", lineWidth=1,
marker=dict(enabled=True, radius=2))],)
+ self.send_output()
def step(self):
"""
@@ -375,6 +381,7 @@ def step(self):
self.learner.step()
theta = self.learner.theta
self.plot_point(theta[0], theta[1])
+ self.send_output()
def step_back(self):
"""
@@ -383,6 +390,7 @@ def step_back(self):
if self.learner.step_no > 0:
self.learner.step_back()
self.scatter.remove_last_point("path")
+ self.send_output()
def plot_point(self, x, y):
"""
@@ -583,3 +591,33 @@ def disable_controls(self, disabled):
self.step_box.setDisabled(disabled)
self.options_box.setDisabled(disabled)
self.properties_box.setDisabled(disabled)
+
+ def send_output(self):
+ self.send_model()
+ self.send_coefficients()
+ self.send_data()
+
+ def send_model(self):
+ if self.learner is not None and self.learner.theta is not None:
+ self.send("Classifier", self.learner.model)
+ else:
+ self.send("Classifier", None)
+
+ def send_coefficients(self):
+ if self.learner is not None and self.learner.theta is not None:
+ domain = Domain(
+ [ContinuousVariable("coef", number_of_decimals=7)],
+ metas=[StringVariable("name")])
+ names = ["theta 0", "theta 1"]
+
+ coefficients_table = Table(
+ domain, list(zip(list(self.learner.theta), names)))
+ self.send("Coefficients", coefficients_table)
+ else:
+ self.send("Coefficients", None)
+
+ def send_data(self):
+ if self.selected_data is not None:
+ self.send("Data", self.selected_data)
+ else:
+ self.send("Data", None)
\ No newline at end of file
From f3914dd50f1e586ab97ecf18cd51d0a34a6eb412 Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Thu, 4 Aug 2016 09:36:53 +0200
Subject: [PATCH 014/128] [FIX] Predictor to return correct predictions.
---
.../educational/widgets/utils/logistic_regression.py | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/orangecontrib/educational/widgets/utils/logistic_regression.py b/orangecontrib/educational/widgets/utils/logistic_regression.py
index aad7609c..4291dbbc 100644
--- a/orangecontrib/educational/widgets/utils/logistic_regression.py
+++ b/orangecontrib/educational/widgets/utils/logistic_regression.py
@@ -211,6 +211,12 @@ class LogisticRegressionModel(Model):
def __init__(self, theta, domain):
super().__init__(domain)
self.theta = theta
+ self.name = "Logistic Regression"
+ print("a")
def predict_storage(self, data):
- return LogisticRegression.g(data.X.dot(self.theta))
+ probabilities = LogisticRegression.g(data.X.dot(self.theta))
+ values = np.around(probabilities)
+ probabilities0 = 1 - probabilities
+ probabilities = np.column_stack((probabilities0, probabilities))
+ return values, probabilities
From 3c52867ff3d89841b46c5a01e46d0d0dacfe1166 Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Thu, 4 Aug 2016 10:03:34 +0200
Subject: [PATCH 015/128] Data normalization moved to select_data
---
orangecontrib/educational/widgets/owgradientdescent.py | 4 ++--
.../educational/widgets/utils/logistic_regression.py | 1 -
2 files changed, 2 insertions(+), 3 deletions(-)
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
index dec24d3c..81d0dcb7 100644
--- a/orangecontrib/educational/widgets/owgradientdescent.py
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -337,7 +337,7 @@ def restart(self):
"""
self.selected_data = self.select_data()
self.learner = self.default_learner(
- data=Normalize(self.selected_data),
+ data=self.selected_data,
alpha=self.alpha, stochastic=self.stochastic)
self.replot()
self.send_output()
@@ -519,7 +519,7 @@ def select_data(self):
y = [(0 if d.get_class().value == self.target_class else 1)
for d in self.data]
- return Table(domain, x, y, self.data.Y[:, None])
+ return Normalize(Table(domain, x, y, self.data.Y[:, None]))
def plot_contour(self, xv, yv, cost_grid):
"""
diff --git a/orangecontrib/educational/widgets/utils/logistic_regression.py b/orangecontrib/educational/widgets/utils/logistic_regression.py
index 4291dbbc..e890ff80 100644
--- a/orangecontrib/educational/widgets/utils/logistic_regression.py
+++ b/orangecontrib/educational/widgets/utils/logistic_regression.py
@@ -212,7 +212,6 @@ def __init__(self, theta, domain):
super().__init__(domain)
self.theta = theta
self.name = "Logistic Regression"
- print("a")
def predict_storage(self, data):
probabilities = LogisticRegression.g(data.X.dot(self.theta))
From 092e8663bba383835b48c5c900390bc7554bc123 Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Thu, 4 Aug 2016 12:37:54 +0200
Subject: [PATCH 016/128] Introduced very small regularization rate to avoid
high numbers when data are good separated between classes, Alpha field max
set to 10
---
.../educational/widgets/owgradientdescent.py | 16 +++-------------
.../widgets/utils/logistic_regression.py | 13 ++++++++-----
2 files changed, 11 insertions(+), 18 deletions(-)
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
index 81d0dcb7..bdfe4a0d 100644
--- a/orangecontrib/educational/widgets/owgradientdescent.py
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -218,7 +218,7 @@ def __init__(self):
self.alpha_spin = gui.spin(
widget=self.properties_box, master=self, callback=self.change_alpha,
value="alpha", label="Learning rate: ",
- minv=0.01, maxv=1, step=0.01, spinType=float)
+ minv=0.01, maxv=10, step=0.01, spinType=float)
self.stochastic_checkbox = gui.checkBox(
widget=self.properties_box, master=self,
callback=self.change_stochastic, value="stochastic",
@@ -480,10 +480,8 @@ def plot_gradient_and_contour(self, x_from, x_to, y_from, y_to):
# results
self.cost_grid = cost_values.reshape(xv.shape)
- blurred = self.blur_grid(self.cost_grid)
-
# return self.plot_gradient(self.xv, self.yv, blurred) + \
- return self.plot_contour(xv, yv, blurred)
+ return self.plot_contour(xv, yv, self.cost_grid)
def plot_gradient(self, x, y, grid):
"""
@@ -529,7 +527,7 @@ def plot_contour(self, xv, yv, cost_grid):
contour = Contour(
xv, yv, cost_grid)
contour_lines = contour.contours(
- np.linspace(np.min(cost_grid), np.max(cost_grid), 10))
+ np.linspace(np.min(cost_grid), np.max(cost_grid), 20))
series = []
count = 0
@@ -549,14 +547,6 @@ def plot_contour(self, xv, yv, cost_grid):
count += 1
return series
- @staticmethod
- def blur_grid(grid):
- """
- Function blur the grid, to make crossings smoother
- """
- filtered = gaussian_filter(grid, sigma=1)
- return filtered
-
def auto_play(self):
"""
Function called when autoplay button pressed
diff --git a/orangecontrib/educational/widgets/utils/logistic_regression.py b/orangecontrib/educational/widgets/utils/logistic_regression.py
index e890ff80..fbb9804a 100644
--- a/orangecontrib/educational/widgets/utils/logistic_regression.py
+++ b/orangecontrib/educational/widgets/utils/logistic_regression.py
@@ -27,6 +27,8 @@ class LogisticRegression:
step_no = 0
stochastic_i = 0
stochastic_num_steps = 30 # number of steps in one step
+ regularization_rate = 0.001
+ # very small regularization rate to avoid big parameters
def __init__(self, alpha=0.1, theta=None, data=None, stochastic=False):
self.history = []
@@ -136,8 +138,8 @@ def j(self, theta):
"""
yh = self.g(self.x.dot(theta.T)).T
y = self.y
- return -np.sum(
- (self.y * np.log(yh) + (1 - y) * np.log(1 - yh)).T, axis=0) / len(y)
+ return (-np.sum((y * np.log(yh) + (1 - y) * np.log(1 - yh)).T, axis=0) +
+ self.regularization_rate * np.sum(np.square(theta.T), axis=0))
def dj(self, theta, stochastic=False):
"""
@@ -149,7 +151,8 @@ def dj(self, theta, stochastic=False):
y = self.y[self.stochastic_i: self.stochastic_i + ns]
return x.T.dot(self.g(x.dot(theta)) - y)
else:
- return (self.g(self.x.dot(theta)) - self.y).dot(self.x)
+ return ((self.g(self.x.dot(theta)) - self.y).dot(self.x) +
+ self.regularization_rate * theta)
def optimized(self):
"""
@@ -172,8 +175,8 @@ def g(z):
"""
# limit values in z to avoid log with 0 produced by values almost 0
- z_mod = np.minimum(z, 100)
- z_mod = np.maximum(z_mod, -100)
+ z_mod = np.minimum(z, 20)
+ z_mod = np.maximum(z_mod, -20)
return 1.0 / (1 + np.exp(- z_mod))
From d0a11368490a5139195fa359ed25f7d9b4de6094 Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Thu, 4 Aug 2016 14:25:37 +0200
Subject: [PATCH 017/128] Solved bug with zoom
---
orangecontrib/educational/widgets/owgradientdescent.py | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
index bdfe4a0d..aefd1b0e 100644
--- a/orangecontrib/educational/widgets/owgradientdescent.py
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -51,6 +51,7 @@ def __init__(self, click_callback, **kwargs):
enable_select='',
chart_events_click=self.js_click_function,
plotOptions_series_states_hover_enabled=False,
+ chart_panning=False,
javascript=contours_js,
**kwargs)
@@ -437,8 +438,8 @@ def replot(self):
# [min_value, "#ffffff"],
# [max_value, "#ff0000"]],
# tickInterval=1, max=max_value, min=min_value),
- plotOptions_contour_colsize=(self.max_y - self.min_y) / 10000,
- plotOptions_contour_rowsize=(self.max_x - self.min_x) / 10000,
+ # plotOptions_contour_colsize=(self.max_y - self.min_y) / 10000,
+ # plotOptions_contour_rowsize=(self.max_x - self.min_x) / 10000,
tooltip_enabled=False,
tooltip_headerFormat="",
tooltip_pointFormat="%s: {point.x:.2f}
"
@@ -480,7 +481,7 @@ def plot_gradient_and_contour(self, x_from, x_to, y_from, y_to):
# results
self.cost_grid = cost_values.reshape(xv.shape)
- # return self.plot_gradient(self.xv, self.yv, blurred) + \
+ # return self.plot_gradient(xv, yv, self.cost_grid) + \
return self.plot_contour(xv, yv, self.cost_grid)
def plot_gradient(self, x, y, grid):
From b9707c215364a0e742163f7bcb04a71bb7e5f1c0 Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Thu, 4 Aug 2016 14:30:52 +0200
Subject: [PATCH 018/128] Code refactor.
---
.../educational/widgets/owgradientdescent.py | 20 +++++++++++++++----
1 file changed, 16 insertions(+), 4 deletions(-)
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
index aefd1b0e..82983af0 100644
--- a/orangecontrib/educational/widgets/owgradientdescent.py
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -407,10 +407,10 @@ def replot(self):
return
optimal_theta = self.learner.optimized()
- self.min_x = optimal_theta[0] - 5
- self.max_x = optimal_theta[0] + 5
- self.min_y = optimal_theta[1] - 5
- self.max_y = optimal_theta[1] + 5
+ self.min_x = optimal_theta[0] - 10
+ self.max_x = optimal_theta[0] + 10
+ self.min_y = optimal_theta[1] - 10
+ self.max_y = optimal_theta[1] + 10
options = dict(series=[])
@@ -584,17 +584,26 @@ def disable_controls(self, disabled):
self.properties_box.setDisabled(disabled)
def send_output(self):
+ """
+ Function sends output
+ """
self.send_model()
self.send_coefficients()
self.send_data()
def send_model(self):
+ """
+ Function sends model on output.
+ """
if self.learner is not None and self.learner.theta is not None:
self.send("Classifier", self.learner.model)
else:
self.send("Classifier", None)
def send_coefficients(self):
+ """
+ Function sends logistic regression coefficients on output.
+ """
if self.learner is not None and self.learner.theta is not None:
domain = Domain(
[ContinuousVariable("coef", number_of_decimals=7)],
@@ -608,6 +617,9 @@ def send_coefficients(self):
self.send("Coefficients", None)
def send_data(self):
+ """
+ Function sends data on output.
+ """
if self.selected_data is not None:
self.send("Data", self.selected_data)
else:
From 76172bcefadabb0d5ab401f92f3aec7c6bce06c5 Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Thu, 4 Aug 2016 14:30:52 +0200
Subject: [PATCH 019/128] Code refactor.
---
.../educational/widgets/owgradientdescent.py | 31 ++++++++++---------
1 file changed, 16 insertions(+), 15 deletions(-)
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
index aefd1b0e..42fbe07d 100644
--- a/orangecontrib/educational/widgets/owgradientdescent.py
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -407,10 +407,10 @@ def replot(self):
return
optimal_theta = self.learner.optimized()
- self.min_x = optimal_theta[0] - 5
- self.max_x = optimal_theta[0] + 5
- self.min_y = optimal_theta[1] - 5
- self.max_y = optimal_theta[1] + 5
+ self.min_x = optimal_theta[0] - 10
+ self.max_x = optimal_theta[0] + 10
+ self.min_y = optimal_theta[1] - 10
+ self.max_y = optimal_theta[1] + 10
options = dict(series=[])
@@ -418,9 +418,6 @@ def replot(self):
options['series'] += self.plot_gradient_and_contour(
self.min_x, self.max_x, self.min_y, self.max_y)
- min_value = np.min(self.cost_grid)
- max_value = np.max(self.cost_grid)
-
# highcharts parameters
kwargs = dict(
xAxis_title_text="theta 0",
@@ -433,13 +430,6 @@ def replot(self):
xAxis_endOnTick=False,
yAxis_startOnTick=False,
yAxis_endOnTick=False,
- # colorAxis=dict(
- # stops=[
- # [min_value, "#ffffff"],
- # [max_value, "#ff0000"]],
- # tickInterval=1, max=max_value, min=min_value),
- # plotOptions_contour_colsize=(self.max_y - self.min_y) / 10000,
- # plotOptions_contour_rowsize=(self.max_x - self.min_x) / 10000,
tooltip_enabled=False,
tooltip_headerFormat="",
tooltip_pointFormat="%s: {point.x:.2f}
"
@@ -481,7 +471,6 @@ def plot_gradient_and_contour(self, x_from, x_to, y_from, y_to):
# results
self.cost_grid = cost_values.reshape(xv.shape)
- # return self.plot_gradient(xv, yv, self.cost_grid) + \
return self.plot_contour(xv, yv, self.cost_grid)
def plot_gradient(self, x, y, grid):
@@ -584,17 +573,26 @@ def disable_controls(self, disabled):
self.properties_box.setDisabled(disabled)
def send_output(self):
+ """
+ Function sends output
+ """
self.send_model()
self.send_coefficients()
self.send_data()
def send_model(self):
+ """
+ Function sends model on output.
+ """
if self.learner is not None and self.learner.theta is not None:
self.send("Classifier", self.learner.model)
else:
self.send("Classifier", None)
def send_coefficients(self):
+ """
+ Function sends logistic regression coefficients on output.
+ """
if self.learner is not None and self.learner.theta is not None:
domain = Domain(
[ContinuousVariable("coef", number_of_decimals=7)],
@@ -608,6 +606,9 @@ def send_coefficients(self):
self.send("Coefficients", None)
def send_data(self):
+ """
+ Function sends data on output.
+ """
if self.selected_data is not None:
self.send("Data", self.selected_data)
else:
From 684e0cc3fc210156655d318a2f5f0bd62c99874e Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Thu, 4 Aug 2016 15:55:26 +0200
Subject: [PATCH 020/128] Part one of unit test for logistic regression.
---
.../widgets/utils/logistic_regression.py | 23 +-
.../utils/tests/test_logistic_regression.py | 265 ++++++++++++++++++
2 files changed, 273 insertions(+), 15 deletions(-)
create mode 100644 orangecontrib/educational/widgets/utils/tests/test_logistic_regression.py
diff --git a/orangecontrib/educational/widgets/utils/logistic_regression.py b/orangecontrib/educational/widgets/utils/logistic_regression.py
index fbb9804a..13e835cf 100644
--- a/orangecontrib/educational/widgets/utils/logistic_regression.py
+++ b/orangecontrib/educational/widgets/utils/logistic_regression.py
@@ -45,6 +45,10 @@ def set_data(self, data):
self.x = data.X
self.y = data.Y
self.domain = data.domain
+ else:
+ self.x = None
+ self.y = None
+ self.domain = None
def set_theta(self, theta):
"""
@@ -70,7 +74,10 @@ def model(self):
"""
Function returns model based on current parameters.
"""
- return LogisticRegressionModel(self.theta, self.domain)
+ if self.theta is None or self.domain is None:
+ return None
+ else:
+ return LogisticRegressionModel(self.theta, self.domain)
@property
def converged(self):
@@ -185,20 +192,6 @@ def set_list(l, i, v):
"""
Function sets i-th value in list to v. If i does not exist in l
it is initialized else value is modified
-
- Parameters
- ----------
- l : list
- List
- i : int
- Index of position in list
- v : any
- Value to insert in list
-
- Returns
- -------
- list
- List with inserted value v on position i
"""
try:
l[i] = v
diff --git a/orangecontrib/educational/widgets/utils/tests/test_logistic_regression.py b/orangecontrib/educational/widgets/utils/tests/test_logistic_regression.py
new file mode 100644
index 00000000..eacad98f
--- /dev/null
+++ b/orangecontrib/educational/widgets/utils/tests/test_logistic_regression.py
@@ -0,0 +1,265 @@
+import unittest
+from Orange.data import Table, Domain
+from orangecontrib.educational.widgets.utils.logistic_regression import \
+ LogisticRegression
+from numpy.testing import *
+import numpy as np
+
+class TestKmeans(unittest.TestCase):
+
+ def setUp(self):
+ self.iris = Table('iris')
+ # new_domain = Domain(self.data.domain.attributes[:2])
+ # self.data = Table(new_domain, self.data)
+ self.logistic_regression = LogisticRegression()
+
+ def test_set_data(self):
+ """
+ Test set data
+ """
+
+ # check if None on beginning
+ self.assertIsNone(self.logistic_regression.x, None)
+ self.assertIsNone(self.logistic_regression.y, None)
+ self.assertIsNone(self.logistic_regression.domain, None)
+
+ # check if correct data are provided
+ self.logistic_regression.set_data(self.iris)
+
+ assert_array_equal(self.logistic_regression.x, self.iris.X)
+ assert_array_equal(self.logistic_regression.y, self.iris.Y)
+ self.assertEqual(self.logistic_regression.domain, self.iris.domain)
+
+ # check data remove
+ self.logistic_regression.set_data(None)
+
+ self.assertIsNone(self.logistic_regression.x, None)
+ self.assertIsNone(self.logistic_regression.y, None)
+ self.assertIsNone(self.logistic_regression.domain, None)
+
+ def test_set_theta(self):
+ """
+ Check set theta
+ """
+
+ lr = self.logistic_regression
+
+ # theta must be none on beginning
+ self.assertIsNone(lr.theta, None)
+
+ # check if theta set correctly
+ # theta from np array
+ lr.set_theta(np.array([1, 2]))
+ assert_array_equal(lr.theta, np.array([1, 2]))
+ # history of 0 have to be equal theta
+ assert_array_equal(lr.history[0][0], np.array([1, 2]))
+ # step no have to reset to 0
+ self.assertEqual(lr.step_no, 0)
+
+ # theta from list
+ lr.set_theta([2, 3])
+ assert_array_equal(lr.theta, np.array([2, 3]))
+ assert_array_equal(lr.history[0][0], np.array([2, 3]))
+ self.assertEqual(lr.step_no, 0)
+
+ # theta None
+ lr.set_theta(None)
+ self.assertIsNone(lr.theta)
+
+ # theta anything else
+ lr.set_theta("abc")
+ self.assertIsNone(lr.theta)
+
+ def test_set_alpha(self):
+ """
+ Check if alpha set correctly
+ """
+ lr = self.logistic_regression
+
+ # check alpha 0.1 in the beginning
+ self.assertEqual(lr.alpha, 0.1)
+
+ # check if alpha set correctly
+ lr.set_alpha(0.2)
+ self.assertEqual(lr.alpha, 0.2)
+
+ # check if alpha removed correctly
+ lr.set_alpha(None)
+ self.assertIsNone(lr.alpha)
+
+ def test_model(self):
+ """
+ Test if model is correct
+ """
+ lr = self.logistic_regression
+
+ # test if model None when no data
+ lr.set_theta([1, 2])
+ self.assertIsNone(lr.model)
+
+ # test if model None when no theta
+ lr.set_theta(None)
+ lr.set_data(self.iris)
+ self.assertIsNone(lr.model)
+
+ # test if model None when no theta and no Data
+ lr.set_data(None)
+ self.assertIsNone(lr.model)
+
+ # test when model is not none
+ lr.set_data(self.iris)
+ lr.set_theta([1, 1, 1, 1])
+ model = lr.model
+
+ # test parameters are ok
+ self.assertIsNotNone(model)
+ assert_array_equal(model.theta, np.array([1, 1, 1, 1]))
+ self.assertEqual(model.name, "Logistic Regression")
+
+ # test class returns correct predictions
+ values, probabilities = model(self.iris, ret=2)
+ self.assertEqual(len(values), len(self.iris))
+ self.assertEqual(len(probabilities), len(self.iris))
+ # values have to be 0 if prob <0.5 else 1
+ assert_array_equal(values, np.around(probabilities)[:,1])
+
+ def test_converged(self):
+ """
+ Test convergence flag or the algorithm
+ """
+ lr = self.logistic_regression
+ lr.set_data(self.iris)
+ lr.set_theta([1., 1., 1., 1.])
+ lr.set_alpha(1)
+ # we found out for example in test convergence is faster with this alpha
+
+ # it can not converge in the first step
+ self.assertFalse(lr.converged)
+
+ # it converge when distance between current theta and this is < 1e-2
+ converge = False
+ while not converge:
+ lr.step()
+ converge = np.sum(
+ np.abs(lr.theta - lr.history[lr.step_no - 1][0])) < 1e-2
+ self.assertEqual(lr.converged, converge)
+
+ def test_step(self):
+ """
+ Test step method
+ """
+ lr = self.logistic_regression
+
+ lr.set_theta([1., 1., 1., 1.])
+ lr.set_data(self.iris)
+
+ # check beginning
+ self.assertEqual(lr.step_no, 0)
+
+ # perform step
+ lr.step()
+
+ # check if parameters are fine
+ self.assertEqual(len(lr.theta), 4)
+ assert_array_equal(lr.history[1][0], lr.theta)
+
+ # perform step
+ lr.step()
+
+ # check if parameters are fine
+ self.assertEqual(len(lr.theta), 4)
+ assert_array_equal(lr.history[2][0], lr.theta)
+
+ # check for stochastic
+ lr.stochastic = True
+
+ # perform step
+ lr.step()
+ self.assertEqual(len(lr.theta), 4)
+ assert_array_equal(lr.history[3][0], lr.theta)
+
+ # check if stochastic_i indices are ok
+ self.assertEqual(lr.history[3][1], lr.stochastic_i)
+
+ # reset algorithm
+ lr.set_data(self.iris)
+
+ # wait for shuffle and check if fine
+ shuffle = False
+ while not shuffle:
+ lr.step()
+ shuffle = lr.history[lr.step_no][2] is not None
+ if shuffle:
+ self.assertEqual(len(lr.x), len(self.iris))
+ self.assertEqual(len(lr.y), len(self.iris))
+
+ def test_step_back(self):
+ """
+ Test step back function
+ """
+ lr = self.logistic_regression
+ theta = [1., 1., 1., 1.]
+
+ lr.set_data(self.iris)
+ lr.set_theta(theta)
+
+ # check no step back when no step done before
+ lr.step_back()
+ assert_array_equal(lr.theta, theta)
+ self.assertEqual(lr.step_no, 0)
+
+ # perform step and step back
+ lr.step()
+ lr.step_back()
+ assert_array_equal(lr.theta, theta)
+ self.assertEqual(lr.step_no, 0)
+
+ lr.step()
+ theta1 = np.copy(lr.theta)
+ lr.step()
+ lr.step_back()
+
+ assert_array_equal(lr.theta, theta1)
+ self.assertEqual(lr.step_no, 1)
+
+ lr.step_back()
+
+ assert_array_equal(lr.theta, theta)
+ self.assertEqual(lr.step_no, 0)
+
+ # test for stochastic
+ lr.stochastic = True
+
+ lr.step()
+ lr.step_back()
+ self.assertEqual(lr.stochastic_i, 0)
+ self.assertEqual(lr.step_no, 0)
+
+ lr.step()
+ theta1 = np.copy(lr.theta)
+ lr.step()
+ lr.step_back()
+
+ self.assertEqual(lr.stochastic_i, lr.stochastic_num_steps)
+ self.assertEqual(lr.step_no, 1)
+
+ lr.step_back()
+
+ self.assertEqual(lr.stochastic_i, 0)
+ self.assertEqual(lr.step_no, 0)
+
+ # wait for shuffle and check if fine
+ shuffle = False
+ before = np.copy(lr.x)
+ while not shuffle:
+ lr.step()
+ shuffle = lr.history[lr.step_no][2] is not None
+
+ lr.step_back()
+ assert_array_equal(lr.x, before)
+
+
+
+
+
+
From a8ca9d6f76bd3a9cedd4db0d6c42f4d9e1b13449 Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Fri, 5 Aug 2016 10:18:03 +0200
Subject: [PATCH 021/128] Complete unittest for logistic regression
---
.../utils/tests/test_logistic_regression.py | 96 ++++++++++++++++++-
1 file changed, 93 insertions(+), 3 deletions(-)
diff --git a/orangecontrib/educational/widgets/utils/tests/test_logistic_regression.py b/orangecontrib/educational/widgets/utils/tests/test_logistic_regression.py
index eacad98f..99c15fbd 100644
--- a/orangecontrib/educational/widgets/utils/tests/test_logistic_regression.py
+++ b/orangecontrib/educational/widgets/utils/tests/test_logistic_regression.py
@@ -1,10 +1,11 @@
import unittest
-from Orange.data import Table, Domain
+from Orange.data import Table
from orangecontrib.educational.widgets.utils.logistic_regression import \
LogisticRegression
from numpy.testing import *
import numpy as np
+
class TestKmeans(unittest.TestCase):
def setUp(self):
@@ -121,7 +122,7 @@ def test_model(self):
self.assertEqual(len(values), len(self.iris))
self.assertEqual(len(probabilities), len(self.iris))
# values have to be 0 if prob <0.5 else 1
- assert_array_equal(values, np.around(probabilities)[:,1])
+ assert_array_equal(values, np.around(probabilities)[:, 1])
def test_converged(self):
"""
@@ -236,7 +237,6 @@ def test_step_back(self):
self.assertEqual(lr.step_no, 0)
lr.step()
- theta1 = np.copy(lr.theta)
lr.step()
lr.step_back()
@@ -258,8 +258,98 @@ def test_step_back(self):
lr.step_back()
assert_array_equal(lr.x, before)
+ def test_j(self):
+ """
+ Test cost function j
+ """
+ lr = self.logistic_regression
+
+ lr.set_data(self.iris)
+
+ # test with one theta and with list of thetas
+ self.assertEqual(type(lr.j(np.array([1., 1., 1., 1.]))), np.float64)
+ self.assertEqual(
+ len(lr.j(np.array([[1., 1., 1., 1.], [2, 2, 2, 2]]))), 2)
+
+ def test_dj(self):
+ """
+ Test gradient function
+ """
+ lr = self.logistic_regression
+ lr.set_data(self.iris)
+ # check length with stochastic and usual
+ self.assertEqual(len(lr.dj(np.array([1, 1, 1, 1]))), 4)
+ lr.stochastic = True
+ self.assertEqual(len(lr.dj(np.array([1, 1, 1, 1]))), 4)
+ def test_optimized(self):
+ """
+ Test if optimized works well
+ """
+ lr = self.logistic_regression
+ lr.set_data(self.iris)
+ op_theta = lr.optimized()
+ self.assertEqual(len(op_theta), 4)
+
+ # check if really minimal, function is monotonic so everywhere around
+ # j should be higher
+ self.assertLessEqual(
+ lr.j(op_theta), lr.j(op_theta + np.array([1, 0, 0, 0])))
+ self.assertLessEqual(
+ lr.j(op_theta), lr.j(op_theta + np.array([0, 1, 0, 0])))
+ self.assertLessEqual(
+ lr.j(op_theta), lr.j(op_theta + np.array([0, 0, 1, 0])))
+ self.assertLessEqual(
+ lr.j(op_theta), lr.j(op_theta + np.array([0, 0, 0, 1])))
+
+ def test_g(self):
+ """
+ Test sigmoid function
+ """
+ lr = self.logistic_regression
+ # test length
+ self.assertEqual(type(lr.g(1)), np.float64)
+ self.assertEqual(len(lr.g(np.array([1, 1]))), 2)
+ self.assertEqual(len(lr.g(np.array([1, 1, 1]))), 3)
+ self.assertEqual(len(lr.g(np.array([1, 1, 1, 1]))), 4)
+
+ # test correctness, function between 0 and 1
+ self.assertGreaterEqual(lr.g(-10000), 0)
+ self.assertGreaterEqual(lr.g(-1000), 0)
+ self.assertGreaterEqual(lr.g(-10), 0)
+ self.assertGreaterEqual(lr.g(-1), 0)
+ self.assertGreaterEqual(lr.g(0), 0)
+ self.assertGreaterEqual(lr.g(1), 0)
+ self.assertGreaterEqual(lr.g(10), 0)
+ self.assertGreaterEqual(lr.g(1000), 0)
+ self.assertGreaterEqual(lr.g(10000), 0)
+
+ self.assertLessEqual(lr.g(-10000), 1)
+ self.assertLessEqual(lr.g(-1000), 1)
+ self.assertLessEqual(lr.g(-10), 1)
+ self.assertLessEqual(lr.g(-1), 1)
+ self.assertLessEqual(lr.g(0), 1)
+ self.assertLessEqual(lr.g(1), 1)
+ self.assertLessEqual(lr.g(10), 1)
+ self.assertLessEqual(lr.g(1000), 1)
+ self.assertLessEqual(lr.g(10000), 1)
+
+ def test_set_list(self):
+ """
+ Test set list
+ """
+ lr = self.logistic_regression
+ # test adding Nones if list too short
+ self.assertEqual(lr.set_list([], 2, 1), [None, None, 1])
+ # test adding Nones if list too short
+ self.assertEqual(lr.set_list([2], 2, 1), [2, None, 1])
+ # adding to end
+ self.assertEqual(lr.set_list([2, 1], 2, 1), [2, 1, 1])
+ # changing the element in the last place
+ self.assertEqual(lr.set_list([2, 1], 1, 3), [2, 3])
+ # changing the element in the middle place
+ self.assertEqual(lr.set_list([2, 1, 3], 1, 3), [2, 3, 3])
From 008098904f467bd0d1fabc349d22f2b00f931929 Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Mon, 8 Aug 2016 10:15:50 +0200
Subject: [PATCH 022/128] Added field for step size for stochastic GDS
---
.../educational/widgets/owgradientdescent.py | 12 +++++++++++-
.../educational/widgets/utils/logistic_regression.py | 10 ++++++----
.../widgets/utils/tests/test_logistic_regression.py | 2 +-
3 files changed, 18 insertions(+), 6 deletions(-)
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
index 42fbe07d..0e4d7a63 100644
--- a/orangecontrib/educational/widgets/owgradientdescent.py
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -154,6 +154,7 @@ class OWGradientDescent(OWWidget):
attr_y = settings.Setting('')
target_class = settings.Setting('')
alpha = settings.Setting(0.1)
+ step_size = settings.Setting(30) # step size for stochastic gds
auto_play_speed = settings.Setting(1)
stochastic = settings.Setting(False)
@@ -224,6 +225,10 @@ def __init__(self):
widget=self.properties_box, master=self,
callback=self.change_stochastic, value="stochastic",
label="Stochastic: ")
+ self.step_size_spin = gui.spin(
+ widget=self.properties_box, master=self, callback=self.change_step,
+ value="step_size", label="Step size: ",
+ minv=1, maxv=100, step=1)
self.restart_button = gui.button(
widget=self.properties_box, master=self,
callback=self.restart, label="Restart")
@@ -339,7 +344,8 @@ def restart(self):
self.selected_data = self.select_data()
self.learner = self.default_learner(
data=self.selected_data,
- alpha=self.alpha, stochastic=self.stochastic)
+ alpha=self.alpha, stochastic=self.stochastic,
+ step_size=self.step_size)
self.replot()
self.send_output()
@@ -357,6 +363,10 @@ def change_stochastic(self):
if self.learner is not None:
self.learner.stochastic = self.stochastic
+ def change_step(self):
+ if self.learner is not None:
+ self.learner.stochastic_step_size = self.step_size
+
def change_theta(self, x, y):
"""
Function set new theta
diff --git a/orangecontrib/educational/widgets/utils/logistic_regression.py b/orangecontrib/educational/widgets/utils/logistic_regression.py
index 13e835cf..87a275e9 100644
--- a/orangecontrib/educational/widgets/utils/logistic_regression.py
+++ b/orangecontrib/educational/widgets/utils/logistic_regression.py
@@ -26,16 +26,18 @@ class LogisticRegression:
domain = None
step_no = 0
stochastic_i = 0
- stochastic_num_steps = 30 # number of steps in one step
+ stochastic_step_size = 30 # number of steps in one step
regularization_rate = 0.001
# very small regularization rate to avoid big parameters
- def __init__(self, alpha=0.1, theta=None, data=None, stochastic=False):
+ def __init__(self, alpha=0.1, theta=None, data=None, stochastic=False,
+ step_size=30):
self.history = []
self.set_alpha(alpha)
self.set_data(data)
self.set_theta(theta)
self.stochastic = stochastic
+ self.stochastic_step_size = step_size
def set_data(self, data):
"""
@@ -100,7 +102,7 @@ def step(self):
self.theta -= self.alpha * grad
# increase index used by stochastic gradient descent
- self.stochastic_i += self.stochastic_num_steps
+ self.stochastic_i += self.stochastic_step_size
seed = None # seed that will be stored to revert the shuffle
# if we came around all data set index to zero and permute data
@@ -153,7 +155,7 @@ def dj(self, theta, stochastic=False):
Gradient of the cost function for logistic regression
"""
if stochastic:
- ns = self.stochastic_num_steps
+ ns = self.stochastic_step_size
x = self.x[self.stochastic_i: self.stochastic_i + ns]
y = self.y[self.stochastic_i: self.stochastic_i + ns]
return x.T.dot(self.g(x.dot(theta)) - y)
diff --git a/orangecontrib/educational/widgets/utils/tests/test_logistic_regression.py b/orangecontrib/educational/widgets/utils/tests/test_logistic_regression.py
index 99c15fbd..54e49dbc 100644
--- a/orangecontrib/educational/widgets/utils/tests/test_logistic_regression.py
+++ b/orangecontrib/educational/widgets/utils/tests/test_logistic_regression.py
@@ -240,7 +240,7 @@ def test_step_back(self):
lr.step()
lr.step_back()
- self.assertEqual(lr.stochastic_i, lr.stochastic_num_steps)
+ self.assertEqual(lr.stochastic_i, lr.stochastic_step_size)
self.assertEqual(lr.step_no, 1)
lr.step_back()
From ae2597a37a55d7a568d07fd22d07158743f76c61 Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Mon, 8 Aug 2016 12:53:27 +0200
Subject: [PATCH 023/128] Unit test for OWGradientDescent and some bug fix and
small upgrades of code
---
.../educational/widgets/owgradientdescent.py | 92 +--
.../widgets/tests/test_owgradientdescent.py | 525 ++++++++++++++++++
2 files changed, 575 insertions(+), 42 deletions(-)
create mode 100644 orangecontrib/educational/widgets/tests/test_owgradientdescent.py
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
index 0e4d7a63..de0275c1 100644
--- a/orangecontrib/educational/widgets/owgradientdescent.py
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -304,6 +304,7 @@ def init_combos():
# clear variables
self.cost_grid = None
self.learner = None
+ self.selected_data = None
d = data
self.send_output()
@@ -314,6 +315,7 @@ def init_combos():
self.set_empty_plot()
elif sum(True for var in d.domain.attributes
if isinstance(var, ContinuousVariable)) < 2:
+ # not enough (2) continuous variable
self.data = None
reset_combos()
self.Warning.to_few_features()
@@ -398,6 +400,8 @@ def step_back(self):
"""
Function performs step back
"""
+ if self.data is None:
+ return
if self.learner.step_no > 0:
self.learner.step_back()
self.scatter.remove_last_point("path")
@@ -492,33 +496,6 @@ def plot_gradient(self, x, y, grid):
grid_width=self.grid_size,
type="contour")]
- def select_data(self):
- """
- Function takes two selected columns from data table and merge them
- in new Orange.data.Table
-
- Returns
- -------
- Table
- Table with selected columns
- """
- attr_x = self.data.domain[self.attr_x]
- attr_y = self.data.domain[self.attr_y]
- cols = []
- for attr in (attr_x, attr_y):
- subset = self.data[:, attr]
- cols.append(subset.X)
- x = np.column_stack(cols)
- domain = Domain(
- [attr_x, attr_y],
- [DiscreteVariable(name=self.data.domain.class_var.name,
- values=[self.target_class, 'Others'])],
- [self.data.domain.class_var])
- y = [(0 if d.get_class().value == self.target_class else 1)
- for d in self.data]
-
- return Normalize(Table(domain, x, y, self.data.Y[:, None]))
-
def plot_contour(self, xv, yv, cost_grid):
"""
Function constructs contour lines
@@ -547,23 +524,54 @@ def plot_contour(self, xv, yv, cost_grid):
count += 1
return series
+ def select_data(self):
+ """
+ Function takes two selected columns from data table and merge them
+ in new Orange.data.Table
+
+ Returns
+ -------
+ Table
+ Table with selected columns
+ """
+ if self.data is None:
+ return
+
+ attr_x = self.data.domain[self.attr_x]
+ attr_y = self.data.domain[self.attr_y]
+ cols = []
+ for attr in (attr_x, attr_y):
+ subset = self.data[:, attr]
+ cols.append(subset.X)
+ x = np.column_stack(cols)
+ domain = Domain(
+ [attr_x, attr_y],
+ [DiscreteVariable(name=self.data.domain.class_var.name,
+ values=[self.target_class, 'Others'])],
+ [self.data.domain.class_var])
+ y = [(0 if d.get_class().value == self.target_class else 1)
+ for d in self.data]
+
+ return Normalize(Table(domain, x, y, self.data.Y[:, None]))
+
def auto_play(self):
"""
Function called when autoplay button pressed
"""
- self.auto_play_enabled = not self.auto_play_enabled
- self.auto_play_button.setText(
- self.auto_play_button_text[self.auto_play_enabled])
- if self.auto_play_enabled:
- self.disable_controls(self.auto_play_enabled)
- self.auto_play_thread = Autoplay(self)
- self.connect(self.auto_play_thread, SIGNAL("step()"), self.step)
- self.connect(
- self.auto_play_thread, SIGNAL("stop_auto_play()"),
- self.stop_auto_play)
- self.auto_play_thread.start()
- else:
- self.stop_auto_play()
+ if self.data is not None:
+ self.auto_play_enabled = not self.auto_play_enabled
+ self.auto_play_button.setText(
+ self.auto_play_button_text[self.auto_play_enabled])
+ if self.auto_play_enabled:
+ self.disable_controls(self.auto_play_enabled)
+ self.auto_play_thread = Autoplay(self)
+ self.connect(self.auto_play_thread, SIGNAL("step()"), self.step)
+ self.connect(
+ self.auto_play_thread, SIGNAL("stop_auto_play()"),
+ self.stop_auto_play)
+ self.auto_play_thread.start()
+ else:
+ self.stop_auto_play()
def stop_auto_play(self):
"""
@@ -605,8 +613,8 @@ def send_coefficients(self):
"""
if self.learner is not None and self.learner.theta is not None:
domain = Domain(
- [ContinuousVariable("coef", number_of_decimals=7)],
- metas=[StringVariable("name")])
+ [ContinuousVariable("Coefficients", number_of_decimals=7)],
+ metas=[StringVariable("Name")])
names = ["theta 0", "theta 1"]
coefficients_table = Table(
diff --git a/orangecontrib/educational/widgets/tests/test_owgradientdescent.py b/orangecontrib/educational/widgets/tests/test_owgradientdescent.py
new file mode 100644
index 00000000..23863c31
--- /dev/null
+++ b/orangecontrib/educational/widgets/tests/test_owgradientdescent.py
@@ -0,0 +1,525 @@
+from numpy.testing import *
+import numpy as np
+
+from Orange.data import Table, Domain, ContinuousVariable, DiscreteVariable
+from Orange.widgets.tests.base import WidgetTest
+
+from orangecontrib.educational.widgets.owgradientdescent import \
+ OWGradientDescent
+
+
+class TestOWGradientDescent(WidgetTest):
+
+ def setUp(self):
+ self.widget = self.create_widget(OWGradientDescent)
+ self.iris = Table('iris')
+
+ def test_set_data(self):
+ """
+ Test set data
+ """
+ w = self.widget
+
+ # test on init
+ self.assertIsNone(w.data)
+ self.assertEqual(w.cbx.count(), 0)
+ self.assertEqual(w.cby.count(), 0)
+ self.assertEqual(w.target_class_combobox.count(), 0)
+ self.assertIsNone(w.learner)
+ self.assertIsNone(w.cost_grid)
+
+ # call with none data
+ self.send_signal("Data", None)
+ self.assertIsNone(w.data)
+ self.assertEqual(w.cbx.count(), 0)
+ self.assertEqual(w.cby.count(), 0)
+ self.assertEqual(w.target_class_combobox.count(), 0)
+ self.assertIsNone(w.learner)
+ self.assertIsNone(w.cost_grid)
+
+ # call with no class variable
+ table_no_class = Table(
+ Domain([ContinuousVariable("x"), ContinuousVariable("y")]),
+ [[1, 2], [2, 3]])
+ self.send_signal("Data", table_no_class)
+ self.assertIsNone(w.data)
+ self.assertEqual(w.cbx.count(), 0)
+ self.assertEqual(w.cby.count(), 0)
+ self.assertEqual(w.target_class_combobox.count(), 0)
+ self.assertIsNone(w.learner)
+ self.assertIsNone(w.cost_grid)
+ self.assertTrue(w.Warning.no_class.is_shown())
+
+ # with only one class value
+ table_one_class = Table(
+ Domain([ContinuousVariable("x"), ContinuousVariable("y")],
+ DiscreteVariable("a", values=["k"])),
+ [[1, 2], [2, 3]], [0, 0])
+ self.send_signal("Data", table_one_class)
+ self.assertIsNone(w.data)
+ self.assertEqual(w.cbx.count(), 0)
+ self.assertEqual(w.cby.count(), 0)
+ self.assertEqual(w.target_class_combobox.count(), 0)
+ self.assertIsNone(w.learner)
+ self.assertIsNone(w.cost_grid)
+ self.assertTrue(w.Warning.no_class.is_shown())
+
+ # not enough continuous variables
+ table_no_enough_cont = Table(
+ Domain(
+ [ContinuousVariable("x"),
+ DiscreteVariable("y", values=["a", "b"])],
+ ContinuousVariable("a")),
+ [[1, 0], [2, 1]], [0, 0])
+ self.send_signal("Data", table_no_enough_cont)
+ self.assertIsNone(w.data)
+ self.assertEqual(w.cbx.count(), 0)
+ self.assertEqual(w.cby.count(), 0)
+ self.assertEqual(w.target_class_combobox.count(), 0)
+ self.assertIsNone(w.learner)
+ self.assertIsNone(w.cost_grid)
+ self.assertTrue(w.Warning.to_few_features.is_shown())
+
+ # init with ok data
+ num_continuous_attributes = sum(
+ True for var in self.iris.domain.attributes
+ if isinstance(var, ContinuousVariable))
+
+ self.send_signal("Data", self.iris)
+ self.assertEqual(w.cbx.count(), num_continuous_attributes)
+ self.assertEqual(w.cby.count(), num_continuous_attributes)
+ self.assertEqual(
+ w.target_class_combobox.count(),
+ len(self.iris.domain.class_var.values))
+ self.assertEqual(w.cbx.currentText(), self.iris.domain[0].name)
+ self.assertEqual(w.cby.currentText(), self.iris.domain[1].name)
+ self.assertEqual(
+ w.target_class_combobox.currentText(),
+ self.iris.domain.class_var.values[0])
+
+ self.assertEqual(w.attr_x, self.iris.domain[0].name)
+ self.assertEqual(w.attr_y, self.iris.domain[1].name)
+ self.assertEqual(w.target_class, self.iris.domain.class_var.values[0])
+
+ # change showed attributes
+ w.attr_x = self.iris.domain[1].name
+ w.attr_y = self.iris.domain[2].name
+ w.target_class = self.iris.domain.class_var.values[1]
+
+ self.assertEqual(w.cbx.currentText(), self.iris.domain[1].name)
+ self.assertEqual(w.cby.currentText(), self.iris.domain[2].name)
+ self.assertEqual(
+ w.target_class_combobox.currentText(),
+ self.iris.domain.class_var.values[1])
+
+ self.assertEqual(w.attr_x, self.iris.domain[1].name)
+ self.assertEqual(w.attr_y, self.iris.domain[2].name)
+ self.assertEqual(w.target_class, self.iris.domain.class_var.values[1])
+
+ # remove data
+ self.send_signal("Data", None)
+ self.assertIsNone(w.data)
+ self.assertEqual(w.cbx.count(), 0)
+ self.assertEqual(w.cby.count(), 0)
+ self.assertEqual(w.target_class_combobox.count(), 0)
+ self.assertIsNone(w.learner)
+ self.assertIsNone(w.cost_grid)
+
+ def test_restart(self):
+ """
+ Test if restart works fine
+ """
+ w = self.widget
+
+ # check if init is as expected
+ self.assertIsNone(w.selected_data)
+ self.assertIsNone(w.learner)
+
+ # with data
+ self.send_signal("Data", self.iris)
+ self.assertEqual(len(w.selected_data), len(self.iris))
+ assert_array_equal(w.learner.x, w.selected_data.X)
+ assert_array_equal(w.learner.y, w.selected_data.Y)
+ assert_array_equal(w.learner.domain, w.selected_data.domain)
+ self.assertEqual(w.learner.alpha, w.alpha)
+ self.assertEqual(w.learner.stochastic, False)
+ self.assertEqual(w.learner.stochastic_step_size, w.step_size)
+
+ # again no data
+ self.send_signal("Data", None)
+ self.assertIsNone(w.selected_data)
+ self.assertIsNone(w.learner)
+
+ def test_change_alpha(self):
+ """
+ Function check if alpha is changing correctly
+ """
+ w = self.widget
+
+ # to define learner
+ self.send_signal("Data", self.iris)
+
+ # check init alpha
+ self.assertEqual(w.learner.alpha, 0.1)
+
+ # change alpha
+ w.alpha_spin.setValue(1)
+ self.assertEqual(w.learner.alpha, 1)
+ w.alpha_spin.setValue(0.3)
+ self.assertEqual(w.learner.alpha, 0.3)
+
+ # just check if nothing happens when no learner
+ self.send_signal("Data", None)
+ self.assertIsNone(w.learner)
+ w.alpha_spin.setValue(5)
+
+ def test_change_stochastic(self):
+ """
+ Test changing stochastic
+ """
+ w = self.widget
+
+ # define learner
+ self.send_signal("Data", self.iris)
+
+ # check init
+ self.assertFalse(w.learner.stochastic)
+
+ # change stochastic
+ w.stochastic_checkbox.click()
+ self.assertTrue(w.learner.stochastic)
+ w.stochastic_checkbox.click()
+ self.assertFalse(w.learner.stochastic)
+
+ # just check if nothing happens when no learner
+ self.send_signal("Data", None)
+ self.assertIsNone(w.learner)
+ w.stochastic_checkbox.click()
+
+ def change_step(self):
+ """
+ Function check if change step works correctly
+ """
+ w = self.widget
+
+ # to define learner
+ self.send_signal("Data", self.iris)
+
+ # check init alpha
+ self.assertEqual(w.learner.stochastic_step_size, 30)
+
+ # change alpha
+ w.step_size_spin.setValue(50)
+ self.assertEqual(w.learner.stochastic_step_size, 50)
+ w.step_size_spin.setValue(40)
+ self.assertEqual(w.learner.stochastic_step_size, 40)
+
+ # just check if nothing happens when no learner
+ self.send_signal("Data", None)
+ self.assertIsNone(w.learner)
+ w.step_size_spin.setValue(40)
+
+ def test_change_theta(self):
+ """
+ Test setting theta
+ """
+ w = self.widget
+
+ # to define learner
+ self.send_signal("Data", self.iris)
+
+ # check init alpha
+ self.assertIsNone(w.learner.theta)
+
+ # change alpha
+ w.change_theta(1, 1)
+ assert_array_equal(w.learner.theta, [1, 1])
+ w.change_theta(1, 2)
+ assert_array_equal(w.learner.theta, [1, 2])
+
+ # just check if nothing happens when no learner
+ self.send_signal("Data", None)
+ self.assertIsNone(w.learner)
+ w.change_theta(1, 1)
+
+ def test_step(self):
+ """
+ Test step
+ """
+ w = self.widget
+
+ # test function not crashes when no data and learner
+ w.step()
+
+ self.send_signal("Data", self.iris)
+
+ # test theta set when none
+ self.assertIsNone(w.learner.theta)
+ w.step()
+ self.assertIsNotNone(w.learner.theta)
+
+ # check theta is changing when step
+ old_theta = np.copy(w.learner.theta)
+ w.step()
+ self.assertNotEqual(sum(old_theta - w.learner.theta), 0)
+
+ def test_step_back(self):
+ """
+ Test stepping back
+ """
+ w = self.widget
+
+ # test function not crashes when no data and learner
+ w.step_back()
+
+ self.send_signal("Data", self.iris)
+
+ # test step back not performed when step_no == 0
+ old_theta = np.copy(w.learner.theta)
+ w.step_back()
+ assert_array_equal(w.learner.theta, old_theta)
+
+ # test same theta when step performed
+ w.change_theta(1.0, 1.0)
+ theta = np.copy(w.learner.theta)
+ w.step()
+ w.step_back()
+ assert_array_equal(theta, w.learner.theta)
+
+ w.change_theta(1.0, 1.0)
+ theta1 = np.copy(w.learner.theta)
+ w.step()
+ theta2 = np.copy(w.learner.theta)
+ w.step()
+ theta3 = np.copy(w.learner.theta)
+ w.step()
+ w.step_back()
+ assert_array_equal(theta3, w.learner.theta)
+ w.step_back()
+ assert_array_equal(theta2, w.learner.theta)
+ w.step_back()
+ assert_array_equal(theta1, w.learner.theta)
+ w.step_back()
+ assert_array_equal(theta1, w.learner.theta)
+
+ # test for stochastic
+ w.stochastic_checkbox.click()
+
+ w.change_theta(1.0, 1.0)
+ theta = np.copy(w.learner.theta)
+ w.step()
+ w.step_back()
+ assert_array_equal(theta, w.learner.theta)
+
+ w.change_theta(1.0, 1.0)
+ theta1 = np.copy(w.learner.theta)
+ w.step()
+ theta2 = np.copy(w.learner.theta)
+ w.step()
+ theta3 = np.copy(w.learner.theta)
+ w.step()
+ w.step_back()
+ assert_array_equal(theta3, w.learner.theta)
+ w.step_back()
+ assert_array_equal(theta2, w.learner.theta)
+ w.step_back()
+ assert_array_equal(theta1, w.learner.theta)
+ w.step_back()
+ assert_array_equal(theta1, w.learner.theta)
+
+ # test mix stochastic and normal
+ # now it is stochastic
+
+ w.change_theta(1.0, 1.0)
+ theta1 = np.copy(w.learner.theta)
+ w.step()
+ theta2 = np.copy(w.learner.theta)
+ w.step()
+ w.stochastic_checkbox.click()
+ theta3 = np.copy(w.learner.theta)
+ w.step()
+ w.step_back()
+ assert_array_equal(theta3, w.learner.theta)
+ w.step_back()
+ assert_array_equal(theta2, w.learner.theta)
+ w.step_back()
+ w.stochastic_checkbox.click()
+ assert_array_equal(theta1, w.learner.theta)
+ w.step_back()
+ assert_array_equal(theta1, w.learner.theta)
+
+ def test_replot(self):
+ """
+ Test replot function and all functions connected with it
+ """
+ w = self.widget
+ # nothing happens when no data
+ w.replot()
+
+ self.assertIsNone(w.cost_grid)
+ self.assertEqual(w.scatter.count_replots, 1)
+
+ self.send_signal("Data", self.iris)
+ self.assertTupleEqual(w.cost_grid.shape, (w.grid_size, w.grid_size))
+ self.assertEqual(w.scatter.count_replots, 2)
+
+ # when step no new re-plots
+ w.step()
+ self.assertEqual(w.scatter.count_replots, 2)
+
+ # triggered new re-plot
+ self.send_signal("Data", self.iris)
+ self.assertTupleEqual(w.cost_grid.shape, (w.grid_size, w.grid_size))
+ self.assertEqual(w.scatter.count_replots, 3)
+
+ def test_select_data(self):
+ """
+ Test select data function
+ """
+ w = self.widget
+
+ # test for none data
+ self.send_signal("Data", None)
+
+ self.assertIsNone(w.select_data()) # result is none
+
+ # test on iris
+ self.send_signal("Data", self.iris)
+ self.assertEqual(len(w.select_data()), len(self.iris))
+ self.assertEqual(len(w.select_data().domain.attributes), 2)
+ self.assertEqual(len(w.select_data().domain.class_var.values), 2)
+ self.assertEqual(w.select_data().domain.class_var.values[1], 'Others')
+ self.assertEqual(w.select_data().domain.attributes[0].name, w.attr_x)
+ self.assertEqual(w.select_data().domain.attributes[1].name, w.attr_y)
+ self.assertEqual(
+ w.select_data().domain.class_var.values[0], w.target_class)
+
+ def test_autoplay(self):
+ """
+ Test autoplay functionalities
+ """
+ w = self.widget
+
+ # test if not chrashes when data is none
+ w.auto_play()
+
+ # set data
+ self.send_signal("Data", self.iris)
+
+ # check init
+ self.assertFalse(w.auto_play_enabled)
+ self.assertEqual(w.auto_play_button.text(), w.auto_play_button_text[0])
+ self.assertTrue((w.step_box.isEnabled()))
+ self.assertTrue((w.options_box.isEnabled()))
+ self.assertTrue((w.properties_box.isEnabled()))
+
+ # auto play on
+ w.auto_play()
+ self.assertTrue(w.auto_play_enabled)
+ self.assertEqual(w.auto_play_button.text(), w.auto_play_button_text[1])
+ self.assertFalse((w.step_box.isEnabled()))
+ self.assertFalse((w.options_box.isEnabled()))
+ self.assertFalse((w.properties_box.isEnabled()))
+
+ # stop auto play
+ w.auto_play()
+ self.assertFalse(w.auto_play_enabled)
+ self.assertEqual(w.auto_play_button.text(), w.auto_play_button_text[0])
+ self.assertTrue((w.step_box.isEnabled()))
+ self.assertTrue((w.options_box.isEnabled()))
+ self.assertTrue((w.properties_box.isEnabled()))
+
+ def test_disable_controls(self):
+ """
+ Test disabling controls
+ """
+ w = self.widget
+
+ # check init
+ self.assertTrue((w.step_box.isEnabled()))
+ self.assertTrue((w.options_box.isEnabled()))
+ self.assertTrue((w.properties_box.isEnabled()))
+
+ # disable
+ w.disable_controls(True)
+ self.assertFalse((w.step_box.isEnabled()))
+ self.assertFalse((w.options_box.isEnabled()))
+ self.assertFalse((w.properties_box.isEnabled()))
+
+ w.disable_controls(True)
+ self.assertFalse((w.step_box.isEnabled()))
+ self.assertFalse((w.options_box.isEnabled()))
+ self.assertFalse((w.properties_box.isEnabled()))
+
+ # enable
+ w.disable_controls(False)
+ self.assertTrue((w.step_box.isEnabled()))
+ self.assertTrue((w.options_box.isEnabled()))
+ self.assertTrue((w.properties_box.isEnabled()))
+
+ w.disable_controls(False)
+ self.assertTrue((w.step_box.isEnabled()))
+ self.assertTrue((w.options_box.isEnabled()))
+ self.assertTrue((w.properties_box.isEnabled()))
+
+ def test_send_model(self):
+ """
+ Test sending model
+ """
+ w = self.widget
+
+ # when no learner
+ self.assertIsNone(self.get_output("Classifier"))
+
+ # when learner but no theta
+ self.send_signal("Data", self.iris)
+ self.assertIsNone(self.get_output("Classifier"))
+
+ # when everything fine
+ w.change_theta(1., 1.)
+ assert_array_equal(self.get_output("Classifier").theta, [1., 1.])
+
+ # when data deleted
+ self.send_signal("Data", None)
+ self.assertIsNone(self.get_output("Classifier"))
+
+ def test_send_coefficients(self):
+ w = self.widget
+
+ # when no learner
+ self.assertIsNone(self.get_output("Coefficients"))
+
+ # when learner but no theta
+ self.send_signal("Data", self.iris)
+ self.assertIsNone(self.get_output("Coefficients"))
+
+ # when everything fine
+ w.change_theta(1., 1.)
+ coef_out = self.get_output("Coefficients")
+ self.assertEqual(len(coef_out), 2)
+ self.assertEqual(len(coef_out.domain.attributes), 1)
+ self.assertEqual(coef_out.domain.attributes[0].name, "Coefficients")
+ self.assertEqual(len(coef_out.domain.metas), 1)
+ self.assertEqual(coef_out.domain.metas[0].name, "Name")
+
+ # when data deleted
+ self.send_signal("Data", None)
+ self.assertIsNone(self.get_output("Coefficients"))
+
+ def test_send_data(self):
+ """
+ Test sending selected data to output
+ """
+ w = self.widget
+
+ # when no data
+ self.assertIsNone(self.get_output("Data"))
+
+ # when everything fine
+ self.send_signal("Data", self.iris)
+ w.change_theta(1., 1.)
+ assert_array_equal(self.get_output("Data"), w.selected_data)
+
+ # when data deleted
+ self.send_signal("Data", None)
+ self.assertIsNone(self.get_output("Data"))
From 911e9a07b4e2c82cddd743a6df8a60425eb3c875 Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Mon, 8 Aug 2016 13:17:44 +0200
Subject: [PATCH 024/128] Added unit test for contour
---
.../widgets/utils/tests/test_contours.py | 452 ++++++++++++++++++
1 file changed, 452 insertions(+)
create mode 100644 orangecontrib/educational/widgets/utils/tests/test_contours.py
diff --git a/orangecontrib/educational/widgets/utils/tests/test_contours.py b/orangecontrib/educational/widgets/utils/tests/test_contours.py
new file mode 100644
index 00000000..bd57074f
--- /dev/null
+++ b/orangecontrib/educational/widgets/utils/tests/test_contours.py
@@ -0,0 +1,452 @@
+import unittest
+
+import numpy as np
+from numpy.testing import assert_array_equal
+
+from orangecontrib.educational.widgets.utils.contour import Contour
+
+
+class TestContours(unittest.TestCase):
+
+ def setUp(self):
+ x = np.linspace(0, 10, 11)
+ y = np.linspace(0, 10, 11)
+ self.xv, self.yv = np.meshgrid(x, y)
+ self.z_vertical_asc = self.yv
+ self.z_vertical_desc = np.max(self.yv) - self.yv
+ self.z_horizontal_asc = self.xv
+ self.z_horizontal_desc = np.max(self.xv) - self.xv
+
+ # lt = left top, rt = right top, lb = left bottom, lt = left top
+ self.z_rt_lb_desc = self.xv + (np.max(self.yv) - self.yv)
+ self.z_rt_lb_asc = (np.max(self.xv) - self.xv) + self.yv
+ self.z_lt_rb_asc = self.xv + self.yv
+ self.z_lt_rb_desc = (np.max(self.xv) - self.xv) + \
+ (np.max(self.yv) - self.yv)
+
+ # test for testing cycles and 5s and 10s
+ self.cycle1 = np.array([[0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0],
+ [0, 0, 1, 0, 0],
+ [0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0]])
+ self.cycle2 = np.array([[0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0],
+ [0, 1, 0, 0, 0],
+ [0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0]])
+ x = np.linspace(0, 4, 5)
+ y = np.linspace(0, 4, 5)
+ self.xv_cycle, self.yv_cycle = np.meshgrid(x, y)
+
+ def test_contours(self):
+ """
+ Test if right amount of values
+ """
+ c = Contour(self.xv, self.yv, self.z_vertical_asc)
+ c_lines = c.contours([1, 2, 3])
+
+ # all line exists in particular data
+ self.assertIn(1, c_lines.keys())
+ self.assertIn(2, c_lines.keys())
+ self.assertIn(3, c_lines.keys())
+
+ # in particular data none line are in more peaces
+ self.assertEqual(len(c_lines[1]), 1)
+ self.assertEqual(len(c_lines[2]), 1)
+ self.assertEqual(len(c_lines[3]), 1)
+
+ c = Contour(self.xv, self.yv, self.z_vertical_desc)
+ c_lines = c.contours([1, 2, 3])
+
+ # all line exists in particular data
+ self.assertIn(1, c_lines.keys())
+ self.assertIn(2, c_lines.keys())
+ self.assertIn(3, c_lines.keys())
+
+ # in particular data none line are in more peaces
+ self.assertEqual(len(c_lines[1]), 1)
+ self.assertEqual(len(c_lines[2]), 1)
+ self.assertEqual(len(c_lines[3]), 1)
+
+ c = Contour(self.xv, self.yv, self.z_horizontal_asc)
+ c_lines = c.contours([1, 2, 3])
+
+ # all line exists in particular data
+ self.assertIn(1, c_lines.keys())
+ self.assertIn(2, c_lines.keys())
+ self.assertIn(3, c_lines.keys())
+
+ # in particular data none line are in more peaces
+ self.assertEqual(len(c_lines[1]), 1)
+ self.assertEqual(len(c_lines[2]), 1)
+ self.assertEqual(len(c_lines[3]), 1)
+
+ c = Contour(self.xv, self.yv, self.z_horizontal_desc)
+ c_lines = c.contours([1, 2, 3])
+
+ # all line exists in particular data
+ self.assertIn(1, c_lines.keys())
+ self.assertIn(2, c_lines.keys())
+ self.assertIn(3, c_lines.keys())
+
+ # in particular data none line are in more peaces
+ self.assertEqual(len(c_lines[1]), 1)
+ self.assertEqual(len(c_lines[2]), 1)
+ self.assertEqual(len(c_lines[3]), 1)
+
+ c = Contour(self.xv, self.yv, self.z_lt_rb_asc)
+ c_lines = c.contours([1, 2, 3])
+
+ # all line exists in particular data
+ self.assertIn(1, c_lines.keys())
+ self.assertIn(2, c_lines.keys())
+ self.assertIn(3, c_lines.keys())
+
+ # in particular data none line are in more peaces
+ self.assertEqual(len(c_lines[1]), 1)
+ self.assertEqual(len(c_lines[2]), 1)
+ self.assertEqual(len(c_lines[3]), 1)
+
+ c = Contour(self.xv, self.yv, self.z_lt_rb_desc)
+ c_lines = c.contours([1, 2, 3])
+
+ # all line exists in particular data
+ self.assertIn(1, c_lines.keys())
+ self.assertIn(2, c_lines.keys())
+ self.assertIn(3, c_lines.keys())
+
+ # in particular data none line are in more peaces
+ self.assertEqual(len(c_lines[1]), 1)
+ self.assertEqual(len(c_lines[2]), 1)
+ self.assertEqual(len(c_lines[3]), 1)
+
+ c = Contour(self.xv, self.yv, self.z_rt_lb_asc)
+ c_lines = c.contours([1, 2, 3])
+
+ # all line exists in particular data
+ self.assertIn(1, c_lines.keys())
+ self.assertIn(2, c_lines.keys())
+ self.assertIn(3, c_lines.keys())
+
+ # in particular data none line are in more peaces
+ self.assertEqual(len(c_lines[1]), 1)
+ self.assertEqual(len(c_lines[2]), 1)
+ self.assertEqual(len(c_lines[3]), 1)
+
+ c = Contour(self.xv, self.yv, self.z_rt_lb_desc)
+ c_lines = c.contours([1, 2, 3])
+
+ # all line exists in particular data
+ self.assertIn(1, c_lines.keys())
+ self.assertIn(2, c_lines.keys())
+ self.assertIn(3, c_lines.keys())
+
+ # in particular data none line are in more peaces
+ self.assertEqual(len(c_lines[1]), 1)
+ self.assertEqual(len(c_lines[2]), 1)
+ self.assertEqual(len(c_lines[3]), 1)
+
+ # test in cycle set
+ c = Contour(self.xv_cycle, self.yv_cycle, self.cycle1)
+ c_lines = c.contours([0.5])
+
+ self.assertIn(0.5, c_lines.keys())
+ self.assertEqual(len(c_lines[0.5]), 1)
+
+ # test start with square 5, before only 10 was checked
+ c = Contour(self.xv_cycle, self.yv_cycle, self.cycle2)
+ c_lines = c.contours([0.5])
+
+ self.assertIn(0.5, c_lines.keys())
+ self.assertEqual(len(c_lines[0.5]), 1)
+
+ # test no contours, then no key in dict
+ c = Contour(self.xv_cycle, self.yv_cycle, self.cycle2)
+ c_lines = c.contours([1.5])
+
+ self.assertNotIn(1.5, c_lines.keys())
+
+ def test_find_contours(self):
+ """
+ Test if right contours found for threshold
+ """
+ # check all horizontal edges
+ c = Contour(self.xv, self.yv, self.z_horizontal_asc)
+
+ points = c.find_contours(1)
+ self.assertEqual(len(points), 1) # only one line in particular example
+ for i in range(11):
+ self.assertIn([1, i], points[0])
+
+ points = c.find_contours(5)
+ self.assertEqual(len(points), 1) # only one line in particular example
+ for i in range(11):
+ self.assertIn([5, i], points[0])
+
+ c = Contour(self.xv, self.yv, self.z_horizontal_desc)
+
+ points = c.find_contours(1)
+ self.assertEqual(len(points), 1) # only one line in particular example
+ for i in range(11):
+ self.assertIn([9, i], points[0])
+
+ points = c.find_contours(5)
+ self.assertEqual(len(points), 1) # only one line in particular example
+ for i in range(11):
+ self.assertIn([5, i], points[0])
+
+ # check all vertical edges
+ c = Contour(self.xv, self.yv, self.z_vertical_asc)
+
+ points = c.find_contours(1)
+ self.assertEqual(len(points), 1) # only one line in particular example
+ for i in range(11):
+ self.assertIn([i, 1], points[0])
+
+ points = c.find_contours(5)
+ self.assertEqual(len(points), 1) # only one line in particular example
+ for i in range(11):
+ self.assertIn([i, 5], points[0])
+
+ c = Contour(self.xv, self.yv, self.z_vertical_desc)
+
+ points = c.find_contours(1)
+ self.assertEqual(len(points), 1) # only one line in particular example
+ for i in range(11):
+ self.assertIn([i, 9], points[0])
+
+ points = c.find_contours(5)
+ self.assertEqual(len(points), 1) # only one line in particular example
+ for i in range(11):
+ self.assertIn([i, 5], points[0])
+
+ # check all top-left bottom-right edges
+ c = Contour(self.xv, self.yv, self.z_lt_rb_asc)
+
+ points = c.find_contours(1)
+ self.assertEqual(len(points), 1) # only one line in particular example
+ self.assertIn([0, 1], points[0])
+ self.assertIn([1, 0], points[0])
+
+ points = c.find_contours(10)
+ self.assertEqual(len(points), 1) # only one line in particular example
+ for i in range(11):
+ self.assertIn([i, 10-i], points[0])
+
+ c = Contour(self.xv, self.yv, self.z_lt_rb_desc)
+
+ points = c.find_contours(1)
+ self.assertEqual(len(points), 1) # only one line in particular example
+ self.assertIn([10, 9], points[0])
+ self.assertIn([9, 10], points[0])
+
+ points = c.find_contours(10)
+ self.assertEqual(len(points), 1) # only one line in particular example
+ for i in range(11):
+ self.assertIn([i, 10-i], points[0])
+
+ # check all top-right bottom-left edges
+ c = Contour(self.xv, self.yv, self.z_rt_lb_asc)
+
+ points = c.find_contours(1)
+ self.assertEqual(len(points), 1) # only one line in particular example
+ self.assertIn([9, 0], points[0])
+ self.assertIn([10, 1], points[0])
+
+ points = c.find_contours(10)
+ self.assertEqual(len(points), 1) # only one line in particular example
+ for i in range(11):
+ self.assertIn([10-i, 10-i], points[0])
+
+ c = Contour(self.xv, self.yv, self.z_rt_lb_desc)
+
+ points = c.find_contours(1)
+ self.assertEqual(len(points), 1) # only one line in particular example
+ self.assertIn([0, 9], points[0])
+ self.assertIn([1, 10], points[0])
+
+ points = c.find_contours(10)
+ self.assertEqual(len(points), 1) # only one line in particular example
+ for i in range(11):
+ self.assertIn([10-i, 10-i], points[0])
+
+ c = Contour(self.xv_cycle, self.yv_cycle, self.cycle1)
+
+ points = c.find_contours(0.5)
+ self.assertEqual(len(points[0]), 13)
+ self.assertIn([1, 0.5], points[0])
+ self.assertIn([1.5, 1], points[0])
+ self.assertIn([2, 1.5], points[0])
+ self.assertIn([2.5, 2], points[0])
+ self.assertIn([2, 2.5], points[0])
+ self.assertIn([1.5, 3], points[0])
+ self.assertIn([1, 3.5], points[0])
+ self.assertIn([0.5, 3], points[0])
+ self.assertIn([1, 2.5], points[0])
+ self.assertIn([1.5, 2], points[0])
+ self.assertIn([1, 1.5], points[0])
+ self.assertIn([0.5, 1], points[0])
+
+ c = Contour(self.xv_cycle, self.yv_cycle, self.cycle2)
+
+ points = c.find_contours(0.5)
+ self.assertEqual(len(points[0]), 13)
+ self.assertIn([2, 0.5], points[0])
+ self.assertIn([2.5, 1], points[0])
+ self.assertIn([2, 1.5], points[0])
+ self.assertIn([1.5, 2], points[0])
+ self.assertIn([2, 2.5], points[0])
+ self.assertIn([2.5, 3], points[0])
+ self.assertIn([2, 3.5], points[0])
+ self.assertIn([1.5, 3], points[0])
+ self.assertIn([1, 2.5], points[0])
+ self.assertIn([0.5, 2], points[0])
+ self.assertIn([1, 1.5], points[0])
+ self.assertIn([1.5, 1], points[0])
+
+ def test_to_real_coordinate(self):
+ c = Contour(self.xv, self.yv, self.z_horizontal_asc)
+
+ # integers same because of grid with integers
+ self.assertEqual(c.to_real_coordinate([1, 1]), [1, 1])
+
+ # coordinate have to have x on first place (before row first)
+ self.assertEqual(c.to_real_coordinate([1, 2]), [2, 1])
+
+ # middle values
+ self.assertEqual(c.to_real_coordinate([1, 1.5]), [1.5, 1])
+ self.assertEqual(c.to_real_coordinate([1.5, 1.5]), [1.5, 1.5])
+ self.assertEqual(c.to_real_coordinate([1.5, 1]), [1, 1.5])
+ self.assertEqual(c.to_real_coordinate([5, 5.5]), [5.5, 5])
+ self.assertEqual(c.to_real_coordinate([5.5, 5.5]), [5.5, 5.5])
+ self.assertEqual(c.to_real_coordinate([5.5, 5]), [5, 5.5])
+
+ # meshgrid no integers
+ xv, yv = np.meshgrid(np.linspace(0, 5, 11), np.linspace(0, 5, 11))
+ c = Contour(xv, yv, self.z_horizontal_asc)
+
+ self.assertEqual(c.to_real_coordinate([1, 1]), [0.5, 0.5])
+ self.assertEqual(c.to_real_coordinate([1, 1.5]), [0.75, 0.5])
+ self.assertEqual(c.to_real_coordinate([1.5, 1.5]), [0.75, 0.75])
+ self.assertEqual(c.to_real_coordinate([1.5, 1]), [0.5, 0.75])
+ self.assertEqual(c.to_real_coordinate([5, 5.5]), [2.75, 2.5])
+ self.assertEqual(c.to_real_coordinate([5.5, 5.5]), [2.75, 2.75])
+ self.assertEqual(c.to_real_coordinate([5.5, 5]), [2.5, 2.75])
+
+ def test_triangulate(self):
+ self.assertEqual(Contour.triangulate(0, 0, 1), 0)
+ self.assertEqual(Contour.triangulate(1, 0, 1), 1)
+ self.assertEqual(Contour.triangulate(0.5, 0, 1), 0.5)
+ self.assertEqual(Contour.triangulate(0.3, 0, 1), 0.3)
+
+ self.assertEqual(Contour.triangulate(0, 1, 0), 1)
+ self.assertEqual(Contour.triangulate(1, 1, 0), 0)
+ self.assertEqual(Contour.triangulate(0.5, 1, 0), 0.5)
+ self.assertEqual(Contour.triangulate(0.3, 1, 0), 0.7)
+
+ def test_new_position(self):
+ # when sq not equal 5 or 10 previous position does not matter
+ assert_array_equal(Contour.new_position(
+ np.array([[0, 0], [1, 0]]), None, np.array([1, 1])), [2, 1])
+ assert_array_equal(Contour.new_position(
+ np.array([[0, 0], [0, 1]]), None, np.array([1, 1])), [1, 2])
+ assert_array_equal(Contour.new_position(
+ np.array([[0, 0], [1, 1]]), None, np.array([1, 1])), [1, 2])
+ assert_array_equal(Contour.new_position(
+ np.array([[0, 1], [0, 0]]), None, np.array([1, 1])), [0, 1])
+ assert_array_equal(Contour.new_position(
+ np.array([[0, 1], [0, 1]]), None, np.array([1, 1])), [0, 1])
+ assert_array_equal(Contour.new_position(
+ np.array([[0, 1], [1, 1]]), None, np.array([1, 1])), [0, 1])
+ assert_array_equal(Contour.new_position(
+ np.array([[1, 0], [0, 0]]), None, np.array([1, 1])), [1, 0])
+ assert_array_equal(Contour.new_position(
+ np.array([[1, 0], [1, 0]]), None, np.array([1, 1])), [2, 1])
+ assert_array_equal(Contour.new_position(
+ np.array([[1, 0], [1, 1]]), None, np.array([1, 1])), [1, 2])
+ assert_array_equal(Contour.new_position(
+ np.array([[1, 1], [0, 0]]), None, np.array([1, 1])), [1, 0])
+ assert_array_equal(Contour.new_position(
+ np.array([[1, 1], [1, 0]]), None, np.array([1, 1])), [2, 1])
+ assert_array_equal(Contour.new_position(
+ np.array([[1, 1], [0, 1]]), None, np.array([1, 1])), [1, 0])
+
+ # sq = 5
+ # start on edge
+ assert_array_equal(Contour.new_position(
+ np.array([[0, 1], [1, 0]]), None, np.array([1, 1])), [0, 1])
+ # previous from left
+ assert_array_equal(Contour.new_position(
+ np.array([[0, 1], [1, 0]]), np.array([1, 0]),
+ np.array([1, 1])), [0, 1])
+ # previous from right
+ assert_array_equal(Contour.new_position(
+ np.array([[0, 1], [1, 0]]), np.array([1, 2]),
+ np.array([1, 1])), [2, 1])
+
+ # sq = 10
+ # start on edge
+ assert_array_equal(Contour.new_position(
+ np.array([[1, 0], [0, 1]]), None, np.array([1, 1])), [1, 2])
+ # previous from top
+ assert_array_equal(Contour.new_position(
+ np.array([[1, 0], [0, 1]]), np.array([0, 1]),
+ np.array([1, 1])), [1, 2])
+ # previous from bottom
+ assert_array_equal(Contour.new_position(
+ np.array([[1, 0], [0, 1]]), np.array([2, 1]),
+ np.array([1, 1])), [1, 0])
+
+ def test_corner_idx(self):
+ self.assertEqual(Contour.corner_idx([[0, 0], [0, 0]]), 0)
+ self.assertEqual(Contour.corner_idx([[0, 0], [1, 0]]), 1)
+ self.assertEqual(Contour.corner_idx([[0, 0], [0, 1]]), 2)
+ self.assertEqual(Contour.corner_idx([[0, 0], [1, 1]]), 3)
+ self.assertEqual(Contour.corner_idx([[0, 1], [0, 0]]), 4)
+ self.assertEqual(Contour.corner_idx([[0, 1], [1, 0]]), 5)
+ self.assertEqual(Contour.corner_idx([[0, 1], [0, 1]]), 6)
+ self.assertEqual(Contour.corner_idx([[0, 1], [1, 1]]), 7)
+ self.assertEqual(Contour.corner_idx([[1, 0], [0, 0]]), 8)
+ self.assertEqual(Contour.corner_idx([[1, 0], [1, 0]]), 9)
+ self.assertEqual(Contour.corner_idx([[1, 0], [0, 1]]), 10)
+ self.assertEqual(Contour.corner_idx([[1, 0], [1, 1]]), 11)
+ self.assertEqual(Contour.corner_idx([[1, 1], [0, 0]]), 12)
+ self.assertEqual(Contour.corner_idx([[1, 1], [1, 0]]), 13)
+ self.assertEqual(Contour.corner_idx([[1, 1], [0, 1]]), 14)
+ self.assertEqual(Contour.corner_idx([[1, 1], [1, 1]]), 15)
+
+ def test_visited(self):
+ c = Contour(self.xv, self.yv, self.z_rt_lb_desc)
+ c.visited_points = np.zeros(self.xv.shape)
+
+ self.assertFalse(c.visited(0, 0, True))
+ self.assertFalse(c.visited(0, 0, False))
+
+ # check if upper
+ c.mark_visited(0, 0, True)
+ self.assertTrue(c.visited(0, 0, True))
+ self.assertFalse(c.visited(0, 0, False))
+
+ # check if lower
+ c.mark_visited(1, 1, False)
+ self.assertFalse(c.visited(1, 1, True))
+ self.assertTrue(c.visited(1, 1, False))
+
+ # check if ok when mark again
+ c.mark_visited(1, 1, False)
+ self.assertFalse(c.visited(1, 1, True))
+ self.assertTrue(c.visited(1, 1, False))
+
+ c.mark_visited(0, 0, True)
+ self.assertTrue(c.visited(0, 0, True))
+ self.assertFalse(c.visited(0, 0, False))
+
+ # check if booth lower fist, and upper first
+ c.mark_visited(1, 1, True)
+ self.assertTrue(c.visited(1, 1, True))
+ self.assertTrue(c.visited(1, 1, False))
+
+ c.mark_visited(0, 0, False)
+ self.assertTrue(c.visited(0, 0, True))
+ self.assertTrue(c.visited(0, 0, False))
From 9717692bf903f97a7983b213d785b62992a21a7f Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Mon, 8 Aug 2016 13:23:50 +0200
Subject: [PATCH 025/128] Updated unit test to reach higher coverage
---
orangecontrib/educational/widgets/owgradientdescent.py | 9 ---------
.../educational/widgets/tests/test_owgradientdescent.py | 4 ++--
2 files changed, 2 insertions(+), 11 deletions(-)
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
index de0275c1..6710a68f 100644
--- a/orangecontrib/educational/widgets/owgradientdescent.py
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -487,15 +487,6 @@ def plot_gradient_and_contour(self, x_from, x_to, y_from, y_to):
return self.plot_contour(xv, yv, self.cost_grid)
- def plot_gradient(self, x, y, grid):
- """
- Function constructs background gradient
- """
- return [dict(data=[[x[j, k], y[j, k], grid[j, k]] for j in range(len(x))
- for k in range(y.shape[1])],
- grid_width=self.grid_size,
- type="contour")]
-
def plot_contour(self, xv, yv, cost_grid):
"""
Function constructs contour lines
diff --git a/orangecontrib/educational/widgets/tests/test_owgradientdescent.py b/orangecontrib/educational/widgets/tests/test_owgradientdescent.py
index 23863c31..cff31e9d 100644
--- a/orangecontrib/educational/widgets/tests/test_owgradientdescent.py
+++ b/orangecontrib/educational/widgets/tests/test_owgradientdescent.py
@@ -196,7 +196,7 @@ def test_change_stochastic(self):
self.assertIsNone(w.learner)
w.stochastic_checkbox.click()
- def change_step(self):
+ def test_change_step(self):
"""
Function check if change step works correctly
"""
@@ -234,7 +234,7 @@ def test_change_theta(self):
# change alpha
w.change_theta(1, 1)
assert_array_equal(w.learner.theta, [1, 1])
- w.change_theta(1, 2)
+ w.scatter.chart_clicked(1, 2)
assert_array_equal(w.learner.theta, [1, 2])
# just check if nothing happens when no learner
From 0becef7c94c6d11df495fc35dfd2a9b0933f9da3 Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Mon, 8 Aug 2016 13:33:37 +0200
Subject: [PATCH 026/128] Updated unit test to reach higher coverage
---
.../educational/widgets/tests/test_owgradientdescent.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/orangecontrib/educational/widgets/tests/test_owgradientdescent.py b/orangecontrib/educational/widgets/tests/test_owgradientdescent.py
index cff31e9d..bf393400 100644
--- a/orangecontrib/educational/widgets/tests/test_owgradientdescent.py
+++ b/orangecontrib/educational/widgets/tests/test_owgradientdescent.py
@@ -217,7 +217,7 @@ def test_change_step(self):
# just check if nothing happens when no learner
self.send_signal("Data", None)
self.assertIsNone(w.learner)
- w.step_size_spin.setValue(40)
+ w.step_size_spin.setValue(30)
def test_change_theta(self):
"""
From 815d9b3608a1b69ff8790deeab13bb912d0bc2c0 Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Tue, 9 Aug 2016 10:27:50 +0200
Subject: [PATCH 027/128] Added gradient for function values.
---
.../educational/widgets/owgradientdescent.py | 35 ++++++++++++++++---
1 file changed, 30 insertions(+), 5 deletions(-)
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
index 6710a68f..db72f889 100644
--- a/orangecontrib/educational/widgets/owgradientdescent.py
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -257,13 +257,16 @@ def __init__(self):
yAxis_gridLineWidth=0,
title_text='',
tooltip_shared=False,
- debug=True)
+ debug=True,
+ legend_symbolWidth=0,
+ legend_symbolHeight=0)
# TODO: set false when end of development
gui.rubber(self.controlArea)
# Just render an empty chart so it shows a nice 'No data to display'
self.scatter.chart()
self.mainArea.layout().addWidget(self.scatter)
+ # to remove the legend
def set_data(self, data):
"""
@@ -378,8 +381,10 @@ def change_theta(self, x, y):
self.scatter.remove_series("path")
self.scatter.add_series([
dict(id="path", data=[[x, y]], showInLegend=False,
- type="scatter", lineWidth=1,
- marker=dict(enabled=True, radius=2))],)
+ type="scatter", lineWidth=1, enableMouseTracking=False,
+ color="#ff0000",
+ marker=dict(
+ enabled=True, radius=2))],)
self.send_output()
def step(self):
@@ -432,6 +437,9 @@ def replot(self):
options['series'] += self.plot_gradient_and_contour(
self.min_x, self.max_x, self.min_y, self.max_y)
+ min_value = np.min(self.cost_grid)
+ max_value = np.max(self.cost_grid)
+
# highcharts parameters
kwargs = dict(
xAxis_title_text="theta 0",
@@ -444,13 +452,20 @@ def replot(self):
xAxis_endOnTick=False,
yAxis_startOnTick=False,
yAxis_endOnTick=False,
- tooltip_enabled=False,
+ # tooltip_enabled=False,
+ colorAxis=dict(
+ minColor="#ffffff", maxColor="#00BFFF",
+ endOnTick=False, startOnTick=False),
+ plotOptions_contour_colsize=(self.max_y - self.min_y) / 1000,
+ plotOptions_contour_rowsize=(self.max_x - self.min_x) / 1000,
tooltip_headerFormat="",
tooltip_pointFormat="%s: {point.x:.2f}
"
"%s: {point.y:.2f}" %
(self.attr_x, self.attr_y))
self.scatter.chart(options, **kwargs)
+ # to remove the colorAxis legend
+ self.scatter.evalJS("chart.colorAxis[0].axisParent.destroy();")
def plot_gradient_and_contour(self, x_from, x_to, y_from, y_to):
"""
@@ -485,7 +500,17 @@ def plot_gradient_and_contour(self, x_from, x_to, y_from, y_to):
# results
self.cost_grid = cost_values.reshape(xv.shape)
- return self.plot_contour(xv, yv, self.cost_grid)
+ return self.plot_gradient(xv, yv, self.cost_grid) + \
+ self.plot_contour(xv, yv, self.cost_grid)
+
+ def plot_gradient(self, x, y, grid):
+ """
+ Function constructs background gradient
+ """
+ return [dict(data=[[x[j, k], y[j, k], grid[j, k]] for j in range(len(x))
+ for k in range(y.shape[1])],
+ grid_width=self.grid_size,
+ type="contour")]
def plot_contour(self, xv, yv, cost_grid):
"""
From 01a6acf2c350a4d2d53d85f1a0245b4139a1966b Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Thu, 11 Aug 2016 10:38:27 +0200
Subject: [PATCH 028/128] Modified control areas. Additional separators and
removed spaced between label and spin.
---
orangecontrib/educational/widgets/owgradientdescent.py | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
index db72f889..5c173564 100644
--- a/orangecontrib/educational/widgets/owgradientdescent.py
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -2,7 +2,6 @@
import time
import numpy as np
-from scipy.ndimage import gaussian_filter
from PyQt4.QtCore import pyqtSlot, Qt, QThread, SIGNAL
from PyQt4.QtGui import QSizePolicy, QPixmap, QColor, QIcon
@@ -215,6 +214,8 @@ def __init__(self):
self.cbx.setModel(self.x_var_model)
self.cby.setModel(self.y_var_model)
+ gui.separator(self.controlArea, 20, 20)
+
# properties box
self.properties_box = gui.widgetBox(self.controlArea, "Properties")
self.alpha_spin = gui.spin(
@@ -233,6 +234,11 @@ def __init__(self):
widget=self.properties_box, master=self,
callback=self.restart, label="Restart")
+ self.alpha_spin.setSizePolicy(policy)
+ self.step_size_spin.setSizePolicy(policy)
+
+ gui.separator(self.controlArea, 20, 20)
+
# step box
self.step_box = gui.widgetBox(self.controlArea, "Manually step through")
self.step_button = gui.button(
@@ -241,6 +247,8 @@ def __init__(self):
widget=self.step_box, master=self, callback=self.step_back,
label="Step back")
+ gui.separator(self.controlArea, 20, 20)
+
# run box
self.run_box = gui.widgetBox(self.controlArea, "Run")
self.auto_play_button = gui.button(
From f2aefd5ba3d8e6083c2ad6c38de0c5b579b39a02 Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Thu, 11 Aug 2016 11:01:48 +0200
Subject: [PATCH 029/128] Added special mark for the last point
---
orangecontrib/educational/widgets/owgradientdescent.py | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
index 5c173564..1eaaf0b6 100644
--- a/orangecontrib/educational/widgets/owgradientdescent.py
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -387,12 +387,16 @@ def change_theta(self, x, y):
if self.learner is not None:
self.learner.set_theta([x, y])
self.scatter.remove_series("path")
+ self.scatter.remove_series("last_point")
self.scatter.add_series([
+ dict(id="last_point", data=[[x, y]], showInLegend=False,
+ type="scatter", enableMouseTracking=False,
+ color="#ffcc00", marker=dict(radius=4)),
dict(id="path", data=[[x, y]], showInLegend=False,
type="scatter", lineWidth=1, enableMouseTracking=False,
color="#ff0000",
marker=dict(
- enabled=True, radius=2))],)
+ enabled=True, radius=2))])
self.send_output()
def step(self):
@@ -425,6 +429,8 @@ def plot_point(self, x, y):
Function add point to the path
"""
self.scatter.add_point_to_series("path", x, y)
+ self.scatter.remove_last_point("last_point")
+ self.scatter.add_point_to_series("last_point", x, y)
def replot(self):
"""
From b6da19f0b5b5a588b3185cc1f656ac7efbc9b74f Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Thu, 11 Aug 2016 11:09:39 +0200
Subject: [PATCH 030/128] SIGNAL changed with pyqtSignal
---
.../educational/widgets/owgradientdescent.py | 16 +++++++++-------
1 file changed, 9 insertions(+), 7 deletions(-)
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
index 1eaaf0b6..c65df7cb 100644
--- a/orangecontrib/educational/widgets/owgradientdescent.py
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -2,7 +2,7 @@
import time
import numpy as np
-from PyQt4.QtCore import pyqtSlot, Qt, QThread, SIGNAL
+from PyQt4.QtCore import pyqtSlot, Qt, QThread, pyqtSignal
from PyQt4.QtGui import QSizePolicy, QPixmap, QColor, QIcon
from Orange.widgets.utils import itemmodels
@@ -128,9 +128,9 @@ def run(self):
"""
while (not self.ow_gradient_descent.learner.converged and
self.ow_gradient_descent.auto_play_enabled):
- self.emit(SIGNAL('step()'))
+ self.ow_gradient_descent.step_trigger.emit()
time.sleep(2 - self.ow_gradient_descent.auto_play_speed)
- self.emit(SIGNAL('stop_auto_play()'))
+ self.ow_gradient_descent.stop_auto_play_trigger.emit()
class OWGradientDescent(OWWidget):
@@ -181,6 +181,10 @@ class OWGradientDescent(OWWidget):
auto_play_button_text = ["Run", "Stop"]
auto_play_thread = None
+ # signals
+ step_trigger = pyqtSignal()
+ stop_auto_play_trigger = pyqtSignal()
+
class Warning(OWWidget.Warning):
"""
Class used fro widget warnings.
@@ -595,10 +599,8 @@ def auto_play(self):
if self.auto_play_enabled:
self.disable_controls(self.auto_play_enabled)
self.auto_play_thread = Autoplay(self)
- self.connect(self.auto_play_thread, SIGNAL("step()"), self.step)
- self.connect(
- self.auto_play_thread, SIGNAL("stop_auto_play()"),
- self.stop_auto_play)
+ self.step_trigger.connect(self.step)
+ self.stop_auto_play_trigger.connect(self.stop_auto_play)
self.auto_play_thread.start()
else:
self.stop_auto_play()
From ce086b55a01d08acc7637f14a55a614445b8cff3 Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Thu, 11 Aug 2016 11:32:44 +0200
Subject: [PATCH 031/128] Modified icon for gradient descent.
---
.../widgets/icons/GradientDescent.svg | 87 ++++++++-----------
1 file changed, 37 insertions(+), 50 deletions(-)
diff --git a/orangecontrib/educational/widgets/icons/GradientDescent.svg b/orangecontrib/educational/widgets/icons/GradientDescent.svg
index 83948785..cc773e74 100644
--- a/orangecontrib/educational/widgets/icons/GradientDescent.svg
+++ b/orangecontrib/educational/widgets/icons/GradientDescent.svg
@@ -26,8 +26,8 @@
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="15.839192"
- inkscape:cx="29.162429"
- inkscape:cy="26.524336"
+ inkscape:cx="18.492693"
+ inkscape:cy="31.575099"
inkscape:document-units="px"
inkscape:current-layer="layer1"
showgrid="false"
@@ -45,7 +45,7 @@
image/svg+xml
-
+
@@ -55,80 +55,67 @@
id="layer1"
transform="translate(0,-1004.3622)">
-
+ cx="24"
+ cy="1028.3622"
+ rx="13.710499"
+ ry="12.214576" />
-
+ cx="24"
+ cy="1028.3622"
+ rx="7.7435975"
+ ry="6.3237586" />
From 6a9f577fdd8bfc5c79d3ea0147f00cfd2892ca6f Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Thu, 11 Aug 2016 11:50:36 +0200
Subject: [PATCH 032/128] Code clean
---
.../educational/widgets/owgradientdescent.py | 17 +++++------------
1 file changed, 5 insertions(+), 12 deletions(-)
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
index c65df7cb..90772162 100644
--- a/orangecontrib/educational/widgets/owgradientdescent.py
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -278,7 +278,6 @@ def __init__(self):
# Just render an empty chart so it shows a nice 'No data to display'
self.scatter.chart()
self.mainArea.layout().addWidget(self.scatter)
- # to remove the legend
def set_data(self, data):
"""
@@ -368,7 +367,7 @@ def restart(self):
def change_alpha(self):
"""
- Function changes alpha parameter of the alogrithm
+ Function changes alpha parameter of the algorithm
"""
if self.learner is not None:
self.learner.set_alpha(self.alpha)
@@ -455,9 +454,6 @@ def replot(self):
options['series'] += self.plot_gradient_and_contour(
self.min_x, self.max_x, self.min_y, self.max_y)
- min_value = np.min(self.cost_grid)
- max_value = np.max(self.cost_grid)
-
# highcharts parameters
kwargs = dict(
xAxis_title_text="theta 0",
@@ -470,7 +466,6 @@ def replot(self):
xAxis_endOnTick=False,
yAxis_startOnTick=False,
yAxis_endOnTick=False,
- # tooltip_enabled=False,
colorAxis=dict(
minColor="#ffffff", maxColor="#00BFFF",
endOnTick=False, startOnTick=False),
@@ -518,8 +513,8 @@ def plot_gradient_and_contour(self, x_from, x_to, y_from, y_to):
# results
self.cost_grid = cost_values.reshape(xv.shape)
- return self.plot_gradient(xv, yv, self.cost_grid) + \
- self.plot_contour(xv, yv, self.cost_grid)
+ return (self.plot_gradient(xv, yv, self.cost_grid) +
+ self.plot_contour(xv, yv, self.cost_grid))
def plot_gradient(self, x, y, grid):
"""
@@ -534,9 +529,7 @@ def plot_contour(self, xv, yv, cost_grid):
"""
Function constructs contour lines
"""
-
- contour = Contour(
- xv, yv, cost_grid)
+ contour = Contour(xv, yv, cost_grid)
contour_lines = contour.contours(
np.linspace(np.min(cost_grid), np.max(cost_grid), 20))
@@ -662,4 +655,4 @@ def send_data(self):
if self.selected_data is not None:
self.send("Data", self.selected_data)
else:
- self.send("Data", None)
\ No newline at end of file
+ self.send("Data", None)
From 51972c3a2f0fc661da7b0a3cb16fb7ce15a6b521 Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Thu, 11 Aug 2016 14:23:14 +0200
Subject: [PATCH 033/128] Max number of steps limited to 500
---
orangecontrib/educational/widgets/owgradientdescent.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
index 90772162..ca532088 100644
--- a/orangecontrib/educational/widgets/owgradientdescent.py
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -127,7 +127,8 @@ def run(self):
Stepping through the algorithm until converge or user interrupts
"""
while (not self.ow_gradient_descent.learner.converged and
- self.ow_gradient_descent.auto_play_enabled):
+ self.ow_gradient_descent.auto_play_enabled and
+ self.ow_gradient_descent.learner.step_no <= 500):
self.ow_gradient_descent.step_trigger.emit()
time.sleep(2 - self.ow_gradient_descent.auto_play_speed)
self.ow_gradient_descent.stop_auto_play_trigger.emit()
@@ -408,6 +409,8 @@ def step(self):
"""
if self.data is None:
return
+ if self.learner.step_no > 500: # limit step no to avoid freezes
+ return
if self.learner.theta is None:
self.change_theta(np.random.uniform(self.min_x, self.max_x),
np.random.uniform(self.min_y, self.max_y))
From 75478504b165e90faa10b80edda578024793bce6 Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Thu, 11 Aug 2016 14:34:20 +0200
Subject: [PATCH 034/128] No others when only two classes.
---
orangecontrib/educational/widgets/owgradientdescent.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
index ca532088..5a71c05d 100644
--- a/orangecontrib/educational/widgets/owgradientdescent.py
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -574,9 +574,12 @@ def select_data(self):
subset = self.data[:, attr]
cols.append(subset.X)
x = np.column_stack(cols)
+ if len(self.data.domain.class_var.values) == 2:
+ return self.data
+
domain = Domain(
[attr_x, attr_y],
- [DiscreteVariable(name=self.data.domain.class_var.name,
+ [DiscreteVariable(name=self.data.domain.class_var.name + "-bin",
values=[self.target_class, 'Others'])],
[self.data.domain.class_var])
y = [(0 if d.get_class().value == self.target_class else 1)
From c6400e922df7510166bd371595b027d0e66aee32 Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Thu, 11 Aug 2016 14:44:56 +0200
Subject: [PATCH 035/128] Keep theta on restart and set theta on setup.
---
orangecontrib/educational/widgets/owgradientdescent.py | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
index 5a71c05d..9b0375c6 100644
--- a/orangecontrib/educational/widgets/owgradientdescent.py
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -359,11 +359,18 @@ def restart(self):
Function restarts the algorithm
"""
self.selected_data = self.select_data()
+ theta = self.learner.history[0][0] if self.learner is not None else None
self.learner = self.default_learner(
data=self.selected_data,
alpha=self.alpha, stochastic=self.stochastic,
+ theta=theta,
step_size=self.step_size)
self.replot()
+ if theta is None: # no previous theta exist
+ self.change_theta(np.random.uniform(self.min_x, self.max_x),
+ np.random.uniform(self.min_y, self.max_y))
+ else: # theta already exist
+ self.change_theta(theta[0], theta[1])
self.send_output()
def change_alpha(self):
From f7da2643df778fa986388cd06890d49e8f306026 Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Thu, 11 Aug 2016 14:57:49 +0200
Subject: [PATCH 036/128] Test for owgradientdescent fixed.
---
.../widgets/tests/test_owgradientdescent.py | 15 +++++++--------
1 file changed, 7 insertions(+), 8 deletions(-)
diff --git a/orangecontrib/educational/widgets/tests/test_owgradientdescent.py b/orangecontrib/educational/widgets/tests/test_owgradientdescent.py
index bf393400..dc9b35bd 100644
--- a/orangecontrib/educational/widgets/tests/test_owgradientdescent.py
+++ b/orangecontrib/educational/widgets/tests/test_owgradientdescent.py
@@ -228,10 +228,10 @@ def test_change_theta(self):
# to define learner
self.send_signal("Data", self.iris)
- # check init alpha
- self.assertIsNone(w.learner.theta)
+ # check init theta
+ self.assertIsNotNone(w.learner.theta)
- # change alpha
+ # change theta
w.change_theta(1, 1)
assert_array_equal(w.learner.theta, [1, 1])
w.scatter.chart_clicked(1, 2)
@@ -253,8 +253,7 @@ def test_step(self):
self.send_signal("Data", self.iris)
- # test theta set when none
- self.assertIsNone(w.learner.theta)
+ # test theta set after step if not set yet
w.step()
self.assertIsNotNone(w.learner.theta)
@@ -471,9 +470,9 @@ def test_send_model(self):
# when no learner
self.assertIsNone(self.get_output("Classifier"))
- # when learner but no theta
+ # when learner theta set automatically
self.send_signal("Data", self.iris)
- self.assertIsNone(self.get_output("Classifier"))
+ self.assertIsNotNone(self.get_output("Classifier"))
# when everything fine
w.change_theta(1., 1.)
@@ -491,7 +490,7 @@ def test_send_coefficients(self):
# when learner but no theta
self.send_signal("Data", self.iris)
- self.assertIsNone(self.get_output("Coefficients"))
+ self.assertIsNotNone(self.get_output("Coefficients"))
# when everything fine
w.change_theta(1., 1.)
From 58ca46da19afab22bcc2ef9d94f7fd230464658e Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Thu, 11 Aug 2016 16:12:31 +0200
Subject: [PATCH 037/128] Updated description of the widget.
---
orangecontrib/educational/widgets/owgradientdescent.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/orangecontrib/educational/widgets/owgradientdescent.py b/orangecontrib/educational/widgets/owgradientdescent.py
index 9b0375c6..8aaa7afd 100644
--- a/orangecontrib/educational/widgets/owgradientdescent.py
+++ b/orangecontrib/educational/widgets/owgradientdescent.py
@@ -140,7 +140,8 @@ class OWGradientDescent(OWWidget):
"""
name = "Gradient Descent"
- description = "Widget shows the procedure of gradient descent."
+ description = "Widget shows the procedure of gradient descent " \
+ "on logistic regression."
icon = "icons/GradientDescent.svg"
want_main_area = True
From e88834f70d109ab6e3448dbda9da669dcd7f68b9 Mon Sep 17 00:00:00 2001
From: PrimozGodec
Date: Thu, 11 Aug 2016 17:16:10 +0200
Subject: [PATCH 038/128] Documentation for gradient descent.
---
doc/index.rst | 1 +
doc/widgets/gradientdescent.rst | 92 ++++++++++++++++++
doc/widgets/images/gradient-descent-flow.png | Bin 0 -> 43736 bytes
doc/widgets/images/gradient-descent.png | Bin 0 -> 153862 bytes
doc/widgets/images/gradient-descent1.png | Bin 0 -> 170624 bytes
doc/widgets/images/gradient-descent2.png | Bin 0 -> 174226 bytes
doc/widgets/images/gradient-descent3.png | Bin 0 -> 173190 bytes
doc/widgets/images/gradient-descent4.png | Bin 0 -> 222606 bytes
.../educational/widgets/owgradientdescent.py | 5 +
9 files changed, 98 insertions(+)
create mode 100644 doc/widgets/gradientdescent.rst
create mode 100644 doc/widgets/images/gradient-descent-flow.png
create mode 100644 doc/widgets/images/gradient-descent.png
create mode 100644 doc/widgets/images/gradient-descent1.png
create mode 100644 doc/widgets/images/gradient-descent2.png
create mode 100644 doc/widgets/images/gradient-descent3.png
create mode 100644 doc/widgets/images/gradient-descent4.png
diff --git a/doc/index.rst b/doc/index.rst
index cefae851..1df4dfc2 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -13,6 +13,7 @@ Widgets
widgets/kmeans
widgets/univariatepolynomialregression
+ widgets/gradientdescent
Indices and tables
==================
diff --git a/doc/widgets/gradientdescent.rst b/doc/widgets/gradientdescent.rst
new file mode 100644
index 00000000..6e469a4e
--- /dev/null
+++ b/doc/widgets/gradientdescent.rst
@@ -0,0 +1,92 @@
+Gradient Descent
+================
+
+.. figure:: icons/gradient_descent.png
+
+Educational widget that show gradient descent algorithm on logistic regression.
+
+Signals
+-------
+
+**Inputs**:
+
+- **Data**
+
+Input data set.
+
+**Outputs**:
+
+- **Data**
+
+Data with columns selected in widget.
+
+- **Classifier**
+
+Model produced on the current step of the algorithm.
+
+- **Coefficients**
+
+Logistic regression coefficient on the current step of the algorithm.
+
+Description
+-----------
+
+This widget shows steps of `gradient descent `__ for logistic regression
+step by step. Gradient descent is demonstrated on two attributes that are selected by user.
+
+.. figure:: images/gradient-descent.png
+
+1. Select two attributes (**x** and **y**) on which logistic regression algorithm is preformed.
+ Select **target class**. It is class that is classified against all other classes.
+
+2. **Learning rate** is step size in gradient descent
+
+ With **stochastic** checkbox you can select whether gradient descent is
+ `stochastic `__ or not.
+ If stochastic is checked you can set **step size** that is amount of steps of stochastic gradient descent
+ performed in one step.
+
+ **Restart**: start algorithm from beginning
+
+3. **Step**: perform one step of the algorithm
+
+ **Step back**: make a step back in algorithm
+
+4. **Run**: perform several steps until algorithm converge automatically
+
+ **Speed**: set speed of automatic stepping
+
+5. **Save Image** saves the image to the computer in a .svg or .png
+ format.
+
+ **Report** includes widget parameters and visualization in the report.
+
+Example
+-------
+
+In Orange we connected *File* widget with *Iris* data set to *Gradient Descent* widget. We connected outputs of
+the widget to *Predictions* widget to see how data are classified and *Data Table* widget where we inspect coefficients
+of logistic regression.
+
+.. figure:: images/gradient-descent-flow.png
+
+We opened *Gradient Descent* widget and set *X* to *sepal width* and *Y* to *sepal length*. Target class is set to
+*Iris-virginica*. We set *learning rate* to 0.02. With click in graph we set beginning coefficients (red dot).
+
+.. figure:: images/gradient-descent1.png
+
+We performs step of the algorithm with pressing **Step** button. When we get bored with clicking we can finish steping
+with press on **Run** button.
+
+.. figure:: images/gradient-descent2.png
+
+If we want to go back in the algorithm we can do it with pressing **Step back** button. This will also change model.
+Current model uses positions of last coefficients (red-yellow dot).
+
+.. figure:: images/gradient-descent3.png
+
+In the end we want to see predictions for input data so we can open *Predictions* widget. Predictions are listed in
+left column. We can compare this predictions to real classes.
+
+.. figure:: images/gradient-descent4.png
+
diff --git a/doc/widgets/images/gradient-descent-flow.png b/doc/widgets/images/gradient-descent-flow.png
new file mode 100644
index 0000000000000000000000000000000000000000..59ec28bd069e23466aff1da707fe99072cd4877f
GIT binary patch
literal 43736
zcmcF~1y@yF+cnZicZa0(p&JCGOX-F~cXvv7XprtM>28n`knZk~PNnnPct7t?_#BKO
za&YXm*VS{*E9{e^6zW@|w@^?}s4~(Ym7$=Z(V(E-@FKzkKjCoj#sU6`j)fc&1qgl2}N{>Y5?)V9dLVfu`44D*~y8Z)J
zk2se0J~wm11eO@$J5Ck(Fc|L9{^p^$S(>sNx7OOGwsCTIewjuaxkla88^(dm%9AyT
zIY0MZ9*#IL0em0@BJh<@adt$C0V|3DyV>Ry(H+Kd@O6;VBGM5^I_g)-47v78r8FRpM$P
z!6PBb#EI(>=lVjodU<`u`HEO<%<8X_PxsHJ4xh9Qw7W}DX`o2dc@qcY*#o-O;a4Xn
zIY||eVd1Mvw=TRK?^N$`;IdcWsw0Cmhe3uLdll$8x!E1UOm?i`G|043zgdG3S31la
z$_1i#_PAu)O9CaU=9v=
z!d26vkcwfx&;@I2`IxIr3RkwM=MDEe$bz7Lr)i!5(jrM2&6qp+v
zmuSc%Ng-&_O6HZl)NfXf#{our@bDqsmaYG1LxjZsIX7Y5XNBreOl9mXO@{>%YOS6L
zR8GC#DoYPv2C}Lc;h1DaEiqL0K&BAr-iZlN5*~ta*)sf>O5+1p%nD*&TXh?CWc3Kb
zcq6e`Q)(^fUPI?AycIH5C|ZeJLuEYLc=qV^u~Tdgskea95Tg8Il3)D(>b>XuDJ69g
zbXOM}5)7YxIB;^Y>Eyld;BDc$^qHzR)`XByvR!()#lLhAK<&;%C6+LE9o-KgL+Joko>O4r`GCgmVHtZRN&F0v9#;8u+(
zNW_>TPfKfB0N=_mV%BrrODTi-8fL*kVwP7)pB!_kWk}rj-z}6UeXy8tQGGGAu;4%u
zoQ&-i>zUgOz_hEIJ;~L>)j@y{+sp;$1a^O;zR#zcX!GhmL0hqek;e+B-mcb46=ZY1
z>HH@2@n48;e%ZUa(`?D*y>tfqafj7R?zaY19^7(?kX6YVbf$Bow8w5zLB5{o86J0o
zkM0ptH{bPO<0|Ta1f{!W@~GvdWply}4eP(+WqcX3SrS28d>GLG7c6#~D!S40#n2eI
zn37;WH^a-|-0gXouixxqlF{^fiES(ih=|D8~2&)wq)Z%p(#U`eGl;8P5o<|=}1qWOROxo~CQFATr<=@Aik^uUwD?|LR
zfK9I^Pp&qj!wK+kAb8w%Rn3kC4~ZM5^>A@Ioi7u<`!V4;%SV0NSGo6GE_&z{F_UN4
zTl1Vyhv~}`At;M08vkV^Gz((cGBJdi`kx-kTgD;s=Qt?~gTF7PNKX;n5Nww7KuMF>
z#vPG3pCEeFVU-SaPh_mZ+2IDr5CjU!5yHc~Pfvbn%K|qOO}t+J9;M{iv|1A#rU^eT17>rlp0~*f``XB)|b`8S*vYfx7FP
zsTUJ1i9T!6219xPO^a#KZ~21!;^beTJz;e8|J@nvD(L)y>oG06Y7P$-85L?4RY86{
z8zVL6hC|{o04WT$Ep-@^>MC^#7Eqtm67%vXx3;!YgnjvukdStEca?Q@D5^V
zac^0zcXOFun^n-!;5pF~%7x34q?Ey;mFPB6KEI!_h0vw}wTvFMemoq(~thGHRAzP!K#fB%3igsoC2bVrze7OKj6-
zTQwR^M4|^)#z7&3XBSt|K@jp~iQ6_tT9=ys9I*kEeus&P&qDmQ-f-y5ESviSYkM{4
zZh{w|A0#XQC+2LdpPpD!G?pV4s(iN1GG5c9+L(O#6
znCH1Tm@fgf`08*L2M;eUDM?aM
z5iK(_laq&MTKTi6BzUvhHoE>PS0@*R1d8BF~bjl?_xJ6nv;*X<-Qi)NY`NUXRw#
z{AXZTl+KW`v#gDzc{=BDdlG4F)>(reqh*wn&MlT(LLnJdx=a4>TU)fr4X3n$U`;Wn5f1;o#taV*?)ebACOhZ*zf`+Q!}=p3Hri9cW7C
zQ6mjY3OP+;I!gHupNQ@MJMaFC)wA1`!lP1ZPm6n|v{7U~bEoyrD5^dsvf@@xewpX)
z7~S2vjFOYpW0nG1T9L~7l&pd{M?=(TZX&}Q9Eo@IERcj-O=YYtzyb@u{T@z*HTKNb_sfVdP2j)^UBI1eq6g5
zL105-wFxDc@T-{eA%`T;<2S~;SyYatcg;v;z1(TarmxthWfxK0CPAw0}oYDTWG%by-wKtP^+
zn4cF}7-d_K1;d(0a-LiBr#z-38}SX$Dqt*sU725)po)}7>n#Z+)O
z)lk3Y1{fj64+gU?EN$FazsLJ8kxhPTE2%pA7%fM|xAO6La^87%S>t@myJ19#6`O-S
zS~_f~7_W}WOISn1iQc)VpNF^J-Bm?hMX4|89?Xnf^BL2mi2Ah>xqQ5BwsY>9pp*?6
z<=?@C5l7F6J))g<(;;&ExEy8oWoSqiI1?y+wCzg2buoDNDvy|79RDyn95KJJ1G}s*
zu&>=7ZS&tP9alyAg}*$%H;`}3l0)u=MiA|lTOEjT8rXW7vTNG3YI68F8uO>^YY9{P
zfnbvE4TzlV0aQ#&zd)PW{UYSu-+UT3aFkzO6kVIv3g$xoya@)vne833XO-%McTk;#
zm|-*wBAI>=Gg@n<@vkWsxR6!C>wTw|{nJxPRCstxd>u^`t+$Qb7>A`_KG;g>x=`cQ
zd1j-I<8%XU4cMP%QF`dIdgsSLD6de&%=Va>S+DWRxd|)2&;>W0x%NJ*Un;TPBQxus
zaBtzFxx-(2^7Es&YBmFDWOEf?==Ou6jKVB%!p|Bcj_Af$rDWKUSA2@GksRL1&+bJq#r)
zk0I$pgU}a~ag*zr)#cLRlics^;iGB|w}E=l>JeYt%U%A^nH|@Kg+Ss$VDUP+H6!^h
z%U3}tk%ncxhKXI*_UMQaQ4(zF?k&P#3eFU!B-_B=u4^~1Wl*3VW^bF0m)_G1Pt&H$
zg0zzCLgCZ0-hWOsAYY@B>aWUzl$h-
zVpA^Q-$r`4e+m}2q)F5vnh1&~?;H5Y=4nKWJ+1sH;i=fQo4)|$K(PXwb}O1x&@~j-
z+Fd!~!v)fFk(21NJ23{T7Rx-ZbZY?9aJNJye2{{%wZIb@I`9Q##0A^7hnM?>6Om51
z*Le-!i;mI2BNF{k_42f#{jnI~uAa+d&c@^W-Nu{B`wK^sh7F%npE9f%FnCMJ&P?=a
zTCnk5@~|Br_K`r7!*$HqUge8~(}nTPpX~%a5%h9<%k5rVM$fycw_g+@~IGutK`u$Dc4
zxQ&IN<6WWBzfJsIQO0qN_HT%-ZW;trr_4Lvh?6`15`yDpR@QTgVe;wUm>L(H_dbO)DmN3g|2<1?C7Kqk?-HlgTzU7N
zTa4v;xgwqAbP+1?6)4RXz0iV)RzflfCfm|*VCEh8mkCxc8_);;pc^sCbmc%6X^$HC
z;h80}=E30KdCx%BX@@!BSDzNjf&aI6Q06^#?Dw|s_g*yDTFx?QouTG0had8085s?Vwk$EPlmVqFeFX{ItyQTNT>YatrFub=b}X~4_axF}e_LIl*Q5?k
zhZOpu&{Est2}LXEMhjeu_}a)m1ahT2+Y81GpZY;&>+Cjn{!X>*bkC<=^-A;4YfQVl
zS>@bLYIKW`dnK(5m^qro1sbh3n8B}tzOPJmwZ=KMdco8Y^4q8v?I}>&i}hE(i$Vxo
zg6vM%_I?4H#>uk_<&Y|LXgQl6O
zC4FLH;jdbGF(-EL=H~5_-;%cW{?P&Wd-R-oD?fi|co?BG{?|9s`sC_bukUs<{Gm!p
zQJuf^aNWF4GVq^yyki5bPbmGaJP`0juI&{E>n9BUyyaQHgnH4vMcG?-VUerU5#3}q
zuelzSf^2H|lj0Y$ekDk{Y*ZqlQ_Q~3DAT9ffeL5C*4
zk>JnW$$O3J=lLSQve``K1a$B&l8S&71WH@7z%)ypZV+2VHU
zOVxMi*Ki8+mPh+}H5EZcb8btmtrz!_S*+bwY}mWi+x5#x&I^tUwkZm<^5OEHwn9-A
zC%swZDdCHOMb__YBNq!WPUkGt)^)4=x6H}5v
zhGr(sv{*V%f-UQlw=scbd+l8%Ft7;~o2P3On7cov(#!b#xjLLMM_ozgwIM^O8w~EX
zDJotMBHCTBb08qXgXm}bO-j^ra{n1M&;A;YCO(L(CUxB9nkG8kLA@;tZzqRqOD|Eg`cZSp1
z)&IT3jW~b0RIC&c4!Fi;J>MWkt86<`joYr1agTobGO@b1wH!sGI?waQkUKRcNg%dl
z5nBxT!-+!aVR2<);!;0eBWL5r)g45Lj1*JR$%jeif{ps;$IX?fhIeyG33aU&96-Bx
z3)Bi@0{FDbZ51_LJJ!rfza`4vZzhDPAKrg!Y$a=EwsqBk)+QplJ&j;}zg`~(S<>cT
zS~_oDC4dBs+R^cICnZFpI29|JUdKvdB`0t)q3r9dlF`fW!-}cvYOqc!A}F@uMl42B
zGD>5W)3YG^?zSTl@}Bh0cP*rUjCgfT-f3Tbl)_B+Q?KyrY{B8q7&Wbnl_$Z5$`q$N
zjmOaLLdgOw1QHaiQNd)AG$c}8={5#H8*Rb3P>KiIg^{#HyD{C@hHk`H8
z`M=((*AW&k$}3eN8<^Zy<3VCu-y^P(5s_@EDDz$%*SWV&?MjORvx`x|1Cx+1?+9&q
zcNc|RKxKIDQSkfPb|te2lK;@W$iy|N*?IE6cV;|;Az42i;U)v>Kze=C_k=gA*EDo1
zbzgW`{R1qM@oRS*?8Aglk{`EVzs49?Z|ut{QF!twEHR0y=TK7*&xP*N9%}`CC;i
zT|3HN5?)*G5i^zV4Pav}wf&d9gkavdEhD!Ffo0OXzSsVD#>Fdq{dd?ywn-{o_M+wL
z39bHdNhh=`gc%&u~+Mh?RNus-g-J*OpioyY%_X8XbgUhbePOH
z+7ZD2Zn*Mks|kMI;2fq$avaG_QnFFq;q@CMJrmb+X2fl{n&IRO!Apsw4y!Nlpe>*U
zTG3EnRz+$qE_dj3hSEuwOh^*JAD6xFG5n5~{HgEU4Ke3^&O)QAWiYi;!4FTP+
z!11^t!qXT`wpp7TzQ?*5ZZ5JL*kT^AlcRL>p
zzu1cRc9|Ly>_ywKavqmobx$se2hWC&S|K%Z{_1WahSnJwlP)YPjsdW2UttFLf7O_P
zv@dXG=?nXwI*UPyR5OYpEd$NlmQLarwB34g$KkuPi!%Hq$4wYIOpyaYG9RWe4gI@s
zJlxcvqh$Zh$UA!D``+cFL-Sb(77$r)s&$x0(&ChWnm|ULR7?!7L!`5yt0t!hxTJ|_
z^R%d<8BsjD@=ZDyv)PWhin>k+FpIU#_oXSAj%1}F#6{9;9_1{f7`dQ!l;JC>-l825
zHU8+@*Cv6lbax{}M4hW7%fp3h8x5Z|>ZfMxewQ3{M!Z{nHp}uo6Z0R`mtVlu(Tj@T
zA-&YW>O3rueU&XTKz^ntd(4{sTZxVa42|NBsif;9VB6^tAP^n8^qY%Db#{Dd^tTOD
znb^FmiI$nw80_1>wKuoYNHOyRyQSLPYBLjO7f=E3qEky`fToUuPuMMcoCXwEo5nBv
z;zo+G&dNlS|#-mqQGnhDTt!d2#OmU`+?`S>MmWn}+5MsK%&h;`T8!df=nOcZx}l
zOD7Aqcq6pVhEsGhJ=<5C7k8m=(U;GDSpS>QBozvGXUkfSuXv+J-MDAM?&aA-v!SLW@I$8i4xXVi7TQ1buGxV<^XbSlv
z43rbhU1K^#{Znde4jmn7@KWaJY_z7*79colVrpGGbW<)@0OB#S;Vkhk5wNQN&xw$g
z=*RWF=&K~xf6Ad}OWsSb-g}{&u5-nKIAVUWjkL`yP4;OVgQ+-8zCzpoxVB(6HiruDga|v9-|hHh+avX>XWS0n?d40FVpygo4g|NJ1sh+$h6e{!+B^B-^vkH8HC+EZqF?rGx2%nI1ATS=yy-!S+7BO@&}B6Tn3I7j
z_uWq13BlPhYB*l{vna4V&na8hRGnKN_v5*1AF*vzw!47o51mQm(T@CsGSGcx
zS7;O8dtN690x<~G_f~Mukyp-<*wwb0TDv}Oa_N+<@~6ye@Ed5iH3c@qbbbWAfnz7^
zgL#LvQOydo^4+e4DVSk1^zttU7xxRQpGg<8sAJ<(8x3$J&=xw4B7wRsTs+-khBUmY
z!sm7Rky7fOkzUGae|DZ@yQfd^YShOro1AZ#@)wY!e`xD`)UYSV;APAbF@8H4>%A+#
zUR-KZ`W=7)-p25N;9Q!gyDwK{0F36OY1sO#^f*$rU6|aUY8e*etx+5`TsVa(Arg{S
z92@;xym-QhQllF$N7w5OWs`PpMoEXH{+fE;4-S?#O(6~n19oi2?)=p$qbUk+!}DMx
zC}5nqQ0f^L%khjTdk;?BoTR;<%_6uDdmDv_BTPl6;5iCk?7pMz#X?P-@1J`qgj`Imkx4ku5C+T8{
zJuNRMU4cMc{W~WLj112=HIeWl@GR#sBtT6al&%20gPF>Pb}S5|K0HJR4ILE$>)%KY
zii~7_mnxl~Zr6~^%-FQiVKRp}&Vea#g(0ca`2B#Nc(_zkM;Br+{mnGhvIU!dWVqji
z=p~U+>MaVuWsUqArxl_HuQWAv%F2WiUp#lg${7*_z%(<-y_Yqlw~MfooWC+;4#q>l
z@p~*KA3Z{yQAEjbmQ&tnsC^}b7i|DSSfyI+9R?0G_}v7J;|4tB%|FBi1td7*`;3K-
z?)3Bc|Fi%iPq>@K`s4XGwmW0R#tz43jDEgy#0C|M((c*FUjQ~~7gzBe`v@PJfKZ0g
zm5QoU;vp97b{O30HvRG~|}wZzXFd*oTmf
zpyVoseZn9>K?-t34hj$r3;<^%I9U8g6+pq*Zzn%K#TMU=Jc?K9TrxRtje10Me)(Rl
z_bh(k`Ia3wT{$%stopC%%0-*$jFL|lkGoYfDMMfi<
zU(X1#$6*h+>a3fKqsR7h^8^-vH3F6qv@{81U%5~;FUY$1p1YF6@KJor%{a2m0t^-U
zsy-{@Jidy)4*Hi~JL9#>Kp}SRyvCYTHDbB_LnM^nDDM2c1r*6L2lNP6Z>$ZL7dQVr
z{8^27y&g0>D=z=Gd)nt>`OTF!MUtBdLjri3#0DQ7)j%aw`7KT^b7&5_JwxuOv+RYr
zXcl!lrreSSQbhR`P7aZfcBF%cKBr-Xwlg`<0^w`syjkep*_#eZe2fGsZyE900GE(m*)lLYei)TrQf>{^Wy8)!Wsjv&
zOLrWbn-N$|_3FtE7?b!^H{CQPN7z1ANY(hV;*ibkBwd|SJw#D;3
ztXv8D@1eH;=$7|_q$a7E$~PYgD`q8gkpWJo;Nx)SR(3WaYB@A&DA5IN6<@#zZ-|
z>K!&PZFK@ENWQqJWtTYUG$dyb0|=GCC0tR{WdTs87+CHxeo
z(l~$LTM?j3m~ag$pP?QT+_c!mSH#Q}T_}Dn=$la6xjP5h&TB#j(~Cdi_Q-wWnudj-
zZi&=CpJvD|{1I{Nt6J}N88a4i&F%I+Q#Ufn>K-k2IbYct-zOM;qaS9ZEKAC|`o!t}
zd;ng%HDb2wjEdB9<~#d@AYsom@EFm?TXW4Scx6g#=Xx_G?&^#Ea<|X%L(3piNlhj+
zYB#|cZTr@eUyEAI0D(Q1sAlDP9miT3G
zcoez$rp3F(Ltn6xOYYXinU$a0iM2Um{l$f7)A)9H?}j*`^z*Ikb02;K{3bUtvY(p(
z>!TUb+3Ev-QTF;EZisn2VOh;1kGaF5!SKat&w$m@@ovZXkFDt{jp^+}x)JbRvpD+Z
zj4>J&8@Ct!+GmVwl+WA1!QP0T=?|~t`->f_b()-xqwN8s-B!b?ixIjA(OGG6vxpab
zOqS#O`4=Z)4Z*)}jCT(wbiGXonf7l{x5~X2Lpr
zt`rzUNL)GLYPe^Hl692ez@{853d=@$Ux(>=>Pn>I9IVY+BL7=vuFPlKcDQ=-_Py3U>vpH(bZ?JHyAO@
zxX?q5o^k|N(-<5GkRVi2+p>gV&!Ai-xRVnvp0EY;m9|35$iuH;7
zHz-XZ%MCjB6bu==VU-lUQ-F8u$tT5O^3>0i}NHnRB$qBhr|H#^UV)
z&B~-JDeH#|C?xC+Klkcu5naF>~T9
zn=^cl^2eGztE}~o_Z+Ow57d`ACXYH#xUerqB!=5@eK
z{|q|q<%I()eL8LbJ0@^*r3gqeY_g*duXEBz-pj&Tj@fHn>WUQWdkRa*$~C{BatvgF
zzLwJysy_b)&+TQ7Jemw6BDjbs0Mv67k8pSYjPqS*$mzzEO7!+^yrMDq6fdjoNJ8Bb
zY_ZD4X)~E}(VNfP-xS!XK|aQ+WVc(zdINg7jOSI=9?EPI>C+$?3UkKl=d7>mC<`%V
zoUhL1te$ZVp6>qaYzFd{C38EPtDp?5_Q$dX2T~k2U9Ls7I}K}*`1c@XxQsHY-kTW3
z{K%5aOFrL7M=i*5<)CX48r~Y`r!eByc(NkvkU(9gS4?mWzM9U%KaU^w`!*zG
z6N7c?cy66Q?9^m>NDPrJ%8Z(>a}r^8SS=(S60M$dpa|n$k+q9|Idhd
z`99&HabV)PH5!FqyLaJJs5k*YzFqm@y-4h6ydzme1d`7r^oTuvLsESrr<+mLM;^Wh
zE-R$!>`YLv*-Q5yLr3y}vA6Dnga2~D>Lc*Ly-Vhb?QVDTWJ0;CJIGq3--WGIWprxm
z{JrA4iLE1_p8}O#KbyS4%^N*>_itISjOqy#zq7@U=Kc33J6l_}3S)R#=7I$wKaf^I
z?@P+TuAeO}N{&{_FFzm$Db#}y5ojmX-T_B4k!}hun7&(0XDR_1pD`HqJyiX@M@R=L
zq>I~m343-_IlfF0C_WEblB6b!dU=sFW;W!?Dzek2-+4^d`4md!-_!#mAHan%@!lh1
zMXZ~Yc)lrCjG%cpJaVs*WiF!yOSkHTO;@A^!~v1^((O*!Y{KkJ?g2j%+h-CHf4q1)
zv*-sRyugUWrtChon8zRu179wOyBlToreKv%ys^JlZ(FafW14>-`o}jLQW`uSLgU?#
zjfJUDN!eA-FW;W;mfb1+ks%>l?jfE(-MK?}f3eI6yqDaQ~Y_MqZUyFT_ShAgd1
z4%+AA7~L`-*$(=!-Z4I`r}#F{KlU^vu1B0eicDYE;0*=wg$Yrct1j;!x&L+5gNK_|
z6ZWv@gA)-+ibtad^lwO!in?T^b*qt)!BjUSr(PEsZ2r*z8oge={t-yuP-y9ieABcM
ziHtYAj3s+(z1jX5kw)Jy=gK##ZUno7Ea3!QyoOH6k9RC~^?hJ0k=P=wTro@|$3?#U
z>r?y;ckGZB<9fqHIQ|YEts710EPgtry}q@O|H293Bji*t7uTY(mK&Pnj0$cMF>yOc
z$8knYE~b7Xd?=zn5^w6M`)9dXk6&PhGs-ocQPV&>py=RN?a!HvFq~cU2s}u~$3}h!
z$X76QPZ7(;h-a1Kz`djoo$H@l*B6Sjc1~fRnn>ekACcnsjGhOvoqsm_@uahQwdSA0
zor|O>hVb-%N2iyjHM~MIUxc?3jY}m(5(zxFIl`)_-uRm;ghqffroG4JLxD~v{
zhOToiKqT~vZS1beP6&>a6MB-;4pXZE-(@id{5tue>T-7SYYzOCdm
zC(zLGZO+)d6M6S&Xo;f+JyCQG8ZH
zS1NL}+1QK^Cebw#OoOe^@Z1%a5F0n>PhUsad=f*3Pp>Np@#;vNH-6o_Gb^%}f#DT*
zNb1e0&d+2${z}ww=d-d*636A~_Qzkp3#z=ciEh;G&(?lM+_XdX3PjF(hRjrZ40-&{O12PwDepWT8~&06}V#2zkM*cRqLkJ_wXaF?6{$^OTLZ5g9VSXt*H8)
z6Va&n4fTxVn#=Nc;0|lmx}f@ED{dCknVnVZ7(H{vu)n0$*tP<&)cjHS_gDGn%7LNZ
zCeBnm7PJ!Yy*A6!&3N-uUZT9fk%r*aI5}vLuhF@ra!vSkm=%{JhQb6Z?vB1ESbydeh*(p7
zQs$?>hLN@df;ld&@w-$GR=(&$_bc?}xZ-2qAN3i(^pI=A*m`64*1x>ypc)GdCwP>J
zY$9(H*gagRZk1!WZ%Je{@3;vw{dJ({FG4DI-aYm>R%Qwz%j)TjSlJH-=WIJ)Z8z;Y
z?f;JOSTZKNlFky6dF|$}Nd_ZlJHk6x_Dt@#>SQ=zAQ1}wFQiQ!bG?zq&KCY#v_w<}
zyVt#q>&$F;rHAGUGK3PQYWt7?na?S)*`xItVZG+YoP|z@r08**jE3q(#`2?8rH6ZM
zTkPD4=yv%IPG~C5v^kNELzdunO==}?(=6;qnUOT{d7^CNL-1X);|3xR|{33&NE0^{A
z8vlXs&x$Pbm4)RXE&;^jUSqRszmtgu#o691%n9y%Dv7?9Ldh!kTwJu
zcScgV35!>`x2%~4>ry!&F+5Y;v_-`rcZ6k(Ycl7Lf?+Z;R4-%F>`o4vk;kF$C^xC?
zeEwKL4}hbdqZJ>)idGsr2bBjOXz-9X&c5dliyTLRzDng@a!=-q79gQ$5N1DDMGzv>
zMm8ISbKqIqlV&ndYrgTPa}}b^>lu}13B$BDds?k0Y{_sQ;LzQ%GwGc&_t;PxkGoun
zl3yO^w4x+J4g?85D)?N6L^NObS;a*QCVu63^zoUu_o!j`T4ygD&|gfx7okWh&JpXk
zBZd}Lx}E8LX8Lr2?y&sRRPcPueFm8AF4Lqu@9dSF9z4H(jHzA8Y3Yb}Pxc+$Td(Xr
zxxxaVzd@<-y_6P?fs-$5ON)JNFT^X7E+)Km#k52gL*xLM&SpAhn&D}}Mo*Pp3
zG+f>de#xCWBYTFno>B4t3L@DDhFky015PHYcX_4wQ*c;(5}p&1tH
zWEi(lNIy1^Zs7>f7$o~Hl^($ZjCki8(;Dof^e}V|TP^%?GLq~pTSa&H&SxCf9vAcT
zwH8j;XHUC$KZH)-_`UQx%-Mx@wOuypu3uVbJ@;PxjQW#MRJA?|NYVk`8p^OiGew9d
z2AET~xi10T?VpqW(FApSB)i^`cW|CNN+TcedKuB>ljO(RPq6sOpE!~`W&%h=GF2)&
zV~w|PHnDc)n;
z;7&pv(>>z>!;$AHjZm%X;<2dr?ypdB9OsY<;q00D>h4?aB=x+5fZQ0k_}cd)7L3Jq$b8SK?Ls4nl+OrJwa9U(N{IuV8w*
zQezUjV12wFGO6@AH^{h5{@Z*!WV!M*lCMhQ#O=P~`RBVBK;Z%{)`jh)*-F-jC{Z(6
zO6Ot)TFMbVyX>MLg;7=Wc$2fs2p<6O`Y&=Z;EZf`%YZaG@s+)ggh3|!%WmnrE6y<9
z2?2jycOpQFWk_7-!glRN&3GB(61+S`*L2yn_q^Ykw9f^^k%`3N2ge6+6twJto*Zwb
zqhm;$lqQc3_W(q2h7)Dv-okjS-lvvBdBB!9&>;sK(1!&(QegWoNIY(oEPGs5d=rLr+22by#JP(#1Tt)7ylDhF|Zo<-yq7%=3wgsDf7P)rWff9A|A_-DQ`-{ty&p>%BFT4
z=5Tj7wkRV*!V#`y0Y*k!AtFklo*NfUY#Z~18UQnfJEHH6v+%#=_j)O1e!kXy3KIEOOaM$`=~C$(G(hH)7E|W?fx2ZNb5w2qC6ccuU%>gs!?%1{SJ6>IVdK~!
z?pJn-=;n^@@wR5d0ubXoN8FPuYe*kdJqSX^-cvBM`K>g*Lqwo@#b_3&TpGxExHEPq
z8`y7EMTmzP7B$ts9->DY{Q?+`>lq))ftvH0_3hsPpADGyjUik5D||GFT_=VOixgPx
z`-;g_E8THLDd5hQuj>W?3i@wYew2N3uhopZut00ND$C-CL)=
zb6HUWn9o;K092i8$)8?kt)-((Mx>pJf!zoE&w|KpX{0Zut&-|??~AHLsz<;8MgG?e
z2LqcPqVe{Bc!YkTB@vBU1Mo~ye-P;BM@f*iEE(b8c3Y{5wG{q(IeED;B9ISV?oHTL
z;`&_$0?&OZU-cH=U)QTXE35V1k0$2A0ea`26h6KXq>{mH?s}}Q-)?nQ+hz>#k|rq%
zBO~Lv6asSP`t*ox|5fPs*@wjir4ZD@L|}7*Vxs30FJF3jLV+VHO6mGI080Y=uGlnZ
z!A5ALIBf(!yeI)Ha63I{1dwjSyXu-zm!YYN7um=o(en>`PTubss`3g-x$Eb|ZB(fN
zSnXSE2B>m9v69%Pvh;V`*3>6n8M*Mi2}`(-62ZVd0$K#XgBx>+B`HArfF=N#V_NNU
zj`$&L1mx7z)JP^;l1Vf;T9UE2fR-$-;Ot~5EOWJ^oVBc=wvkLIr4>+%r^aS0cun2T
zfuI2TwgxDTtG+Byc~rfr0=Bg&F+c}`fk_e^q^_sJSh}vHemIuii=d6?q9--`_`Njy
zISEgYcs^f4jk2l9==4At5K{$?`FpQAWYzddS4&RmJksDZj#J$f5S(CLjdGHS#p&TDVjh*{
z1r%6XhqTz_QGkHLF?}q`1G-uhfP3fNe=Q|n;h_wwzZzOYz&Qu}`q?pA%&yHMnqnT4
z+LS0TCWK5$-5XyxKmDw}N{3}Zfk*Imr@JhwC$YyS&-x*bvWuI{;Lgp&JnT7?*
zXaVz;MLZ4;CEx&)2S6*
zPMq_Lf-ztN%=%;8LrDn`AImc$7HR!ctm=~s
zOK7lY;m9#*N?T+WTkC4TCe%Lv|h;awyCWQArP*>
zwhpz7iFtvOv?vR(Xw_`5gb_>%c&04=*hFTASw8$jOy|6d;0+R~MEs;zFx5Le1{;*hWQx
zp~{`0R$J0e~Af+>#0fjCBpDr8iLy!{I-Ocj&5!@8xrE;w^4^a6V}
zjW&efvuEPP2~jpwbTpV;QT4!Fa2bCH7&6b3LmZF0JAR7*g|ZL}nQ
z1q=8TkbQ8}RJU&spvA3>1nY`-F!4!Sq#6N`XCJ_6NpN1dN1rP%3WqNOcl);KcU-_3
zuO}LJY$=8BC@?VOi@G-q+2MM`zR!x1*|>fSd?RL5-PS`ewPc85YfG?nm=my0{uV7@
znO_JQ56k^e3GYdxZI8H$wZIwc3nailO=_HeW%~*c?0;ULk_4cKSHXT?5s*iE1_$Mh
zl_lSzrQ61w1kD#iv?kaQ-GmrpAUr0&hi}U}*8pbRw{dRGTp%J2Yl3
zAN8B!9VS|v?E>d7nTs0v@yuVXqqrY(G})>6-b5@4R<7#+#3m~939tqMoRo;a4Ivc8
zJuWfsXK=lVQU3L@)(=4Y0Fbzpm;|t!UjQkdLzj+8_gK&5^+tKo~Z;x(8?vYFfW10KJQwwO=5ahRCQb
z>FdtM#FS#^0LSq~^S*Sga@;R@1xf~=@p%kjF%Qs2DJwV7aI%ZT!iz4vyqbGL*Nz8J
zyXI!^;VY7=yI2sw0}`|R^2loU7g9tRvuQ&MP->S6*_tg_`GflYgzKEkb#+No48-q-
zds+eg+ZUX+D0o>gaJ$a3sw1
zH*u0GHIcfC!mSvZI6+koZ{w#_+-@@gJwi-bcH@}cQd?`#ywI(x>%!k-b>u-L$AX^&
zacKmd^OEmfZdx=uTLS+qwSKBGl@%ian6&|4K3ibs%SJ@&d1EVXiQ8HU^9|sDM+2m$
zU4Y!QS!fRP^_$sW!<(NUEC`4waj+HqM96s>YJl@kCO{E>=vWr-SneHQwv`Vj)y
z9T0qmMMP6@GsG=?m)-(&5*Fa?>-|`u@_p#c{-UzfEzfWhshy>A^NxspU1M(8JZoD@b2G1gX-PgCFN!j}
z84WFnl_o49)PdS`;6)W>R7qTk++dEUx~Ktl6?9wPkoiX#AHRqe(~=vLsMT^m=nA9z
zsG9Xf>I_o~JUH@!8?Ld?hHJbRDr8kMcsgLPz>q5)$(a|?c>&3J31%McfsB}ePg_4*
zRz{5((3KsyBUsUom|=Tr9mo%6QPA391;eDgdQVw-5sQm}KOm_da9Qm-Qn~P{4t7{Y
zjLsq8f1LW6bV`OJQM5EW-l^+)2`etGs7hUIszl8_LipYZ<&Py%#gpBAPy;ktU6@A*
zOhg_9J*{$nKCb|KhOw85XI**VP?rATDp^)}$@WmBs*!vVm>D%UmLNR3y!6tyI`~|U
zRq8X2i3}wbA7@#5*}N;MhBY!+!WuzJ`u)&`AZmCIak9R=
zggv>O5Q9)R$U#h!Xuzz1Ds_}X=9V*RHfpro(ypkon1?g*3Q8+ZItJ0)OqsGSE*>=e
z_5u@ASJ;>+TXpu_Qcq~LG~3*oxS5*+x7g@Af~EKWX#t3YnH%zgi;B^f&q;lmD>SE-
zV{zfi`bK*4_b#rM6lR(lA>pJMs_t*U$3&Gv0ft==Y~Q?8c`;+jV`?*_3o)Aebg=xmd<6ZolMfr
z?Rs%_L^%6S*nqx*qO&=nKS_Q5IVBXtu8e>|j$5f#HZ#E+&D-b~z{zWEyPVZ
zVhmDK!%b(Jt}$wt@gUm
zXtGH}{ym&Pz0aRi6XTN9Wwo{Md^M1Y3#mES*`YKnGS
z<5@mYb$9uuvEJ2xNVLSH4*X(%PE8mxd?D3=i+prMVea%uX_|I8Kfl)MR6jM4I=eTH
zW1RJt+A)`p>)2YqnnC}R{HpR<2mOQE>LfOr0<-NgQ(4aU@;En@Vrzd@kM}|F)ZcKh
zXUlR@Advoxue2nb_cVHB_fVg~S&Ffxc}ArbeQh;kpNWz6IJC6cn{ecqbK=st&-P>LGi__WUUK#6)g(G@V>VRMaSA^w+x=ICe|jZFK9}
zT;Irw`Jkp|BGI4Gfh&?*)>^2OD7OdI(OQTa!ST{Nn=geyj?tBXK%P5ow->Bchlq@}
zdpMFyjo`Y6Z*j~SzrG+H^jA?}W;y+G!ceTeV^R)_4o#Yx?fn<@rrlSIjDPLR$V!V;
ztlF#p$&2^JNwr$(CCz;r`?POwm
zVkZ-8Vq;=^V%z3K-+A7(zPs-Kd%8|{)vmpF^))roTfhV%6$On9{!vuP(b`NtF>c)1
zsHvXXQHmu;Z%b2AS=!#b`IrIhO%Xi~+FMqMurRA0cs#s7QI6e$xoa!yYo{9Mf3H3k
z(Ss`KVzf*yaFCXFl$vXnsOl(M*9`iEyN5%4J4P7Y@hdkh9!r1sFE6Kt#G%?H#2Z)p
zpCS3jDyp{a1H+|4+4&j6M2eFZcZ`?_a;B+Q+ZmYYsoB|rwG17hB0^D2IA*Qt8-US+
z)6X85;Ov6avtpV83~VxbHab4dCums=*qi&3)y$uj&BgnPR8D72S{1dn1w}3K^W!=R
zGDzJ@(tc_Xw6Ls$QsLLE3Toow`*lJ8?IyxYsubI9O|7Ccm>~@OvyO8yn5#X}f?3qTLu5hU$M4#Gh?)+2n2$%EMK&xLdd(=^)OV_Qq6gJW#;BEQd8N#`tPGIS(Tx2;wYdYS^4;qJsVo?=2{3Gh`F(3
zF)|3Sw4
z{%<8FQ)zlOIZAOmG>F|E>mF2bIA?2Rm2}B}Hv~yK5yxSaEgL)H<8GdyL<_1*Ffpae
z4~Ni4(D$z+WXtwzxwnUZj59~wag+j0QB1$)f@ECT9FT;a%a
zLk{WntCty*7JLd8(wlIWw(OaDtDKv2rxax3;*m)3q;vv5Z+1kcDhmn==cs|B;XGMT
zPFDlK>DK0j`7P1T3Ut*n+?3VWzcvFVji%1{51lL8I=iF<%A!r{gt%m#CoWhh5DFec
z&Fkx#Uxh=wWyDq-WUyfD;fRM5m#@Em{hHNL5j7Vl->%Ug`G(Iuw=ow)0`vZKu>a!A
zKPV#Qg|mHdX{6~2UtC0%BoPE0B&x70r!DZ0>4s>7
zH;MGxGF$bF
z?neUg7j>4zEK%nqYRBogxxc02>B|=hG<^5>zj=J>ADMtJ7EJ)MlB$EydW@W49$?+P
z_C^E6$VHm--?`FJSC>YDq|WugH3WyijRR@YsUEG2rhoyqctd?<*2Nm%@UX=FtDq++
zuvJt*UOLpXCo4Q9^bnk+OjgV?HL3>q&<+G@H`nZCZ%`$rYv7)Hc)86q1tT1&BNjuo
zTCf57ZHkmuR$+AV_c^>LPf7nFpPh>S?(VI=&Gh;N2K)c0qL
zV4>p%q~vv>*Wsf7+nSX>%a_k(qNB)YxuP>}Sll{3=)V|b$;7{>==J~E9x0PH00mV+
z`3(tW%8@Mqbx@nU1Z%Ka`c|~Wl`I}|yM&L6fhXqR4^BTpF9Q!NMIEijY6s(JX@A&^
zn2m#vt_bW{)Q*1(9!o$
zFfud2(Z*^VN$PrnYB-w_5@Jc_m($x#rEmR3Np0z?v{Y42UtNOxZX%tg6%_w$pUVGk
zFO4URQ-tY@0!I@~Qo&4RjElEQWC_V*=|-y7u*}Ec4k|7R4I_{4_QzY}drIyV`!C`!
zg+Ro`!^i9_#65%{mA(baqRGm~T3W4vfPEn8GlD;Nz|>q4(nRmUQDQimX=5a-8tJCV
zGq@qo!Ub2dG0ooGH!$VLOa@r&-Vd3AaqZM^!W4P^rq~E84yRb@}
zAVf!J;zE~93A}A!h~cra?2~U}K{4Y4vXyXAAuK
zv%yW1mM;EgQd`M3WXi(SSr!+hhcD_DG=OA1*Iz~Ot(?xw5ZT;^;_t(30Y=^b=fmg>$@t`55>^ZN?$UowR~v94n~-?WKitlms9UDt**s
zIcc*_UAkF%CZLiYYQ;Y>{3udW`o&%fTk>=oGl^q7oHcadk_`@`3vvB&(`QQ0@B)~b
zj1?s*i^<4kk>N?#mXV1qub_d-YwpX>kB7girY;^a1=V+QLNl%S%~h?nX~NG?O`&)Q
z)tHr@0fIRPOv>YF?5{ASsuqN(abdR$1HjXCERl-~Rt5bp$;ke<%SFm7$Cuv4xa_xs
zipn8g{;^meD4UQEB#fR*M?^H3rtdTL01oolMbO$22~m-{Wm9}ssB7WRV}f7iiNCJg
zRZPAY5VsRjMe`|CS5{CCPK^Ex^`hv`gNH6-XWfB?!C{E
zv*K^Y|2^~tbe7ZXiX$H*R$A@4LW!%$qzqwSM&aqnuaB5S9-KY00aOT3plzwH8nJ0K
zp5^anfCQd8E(wen*^45?>)9i6u)A{5cp#9f`RIlZTEBc3=cMnMs;5wHArLdkKh1D1gkMjFGx`M+c;xHX>|D)a@ImGd8K6
zUdAe278$nDB6)$AXrJ1cgx#73xh#8NHd6EZ#@cXu1+4_cOJO<%P?FfWsPHvC&fL)@#F}RkgPY!uk_E5~g6;mrMWIhyHhYSGbF1G8HHeF41u4
z3eFz*2kNhGTRZg@)wPZBvuL@NMeep_QK}a&S4lgHPY|x`;#K>+a}9hrlI?pKiih?gbHE0b{t##JK#*;n!mjD+J
z{DIFwCUWFjP-rt^UmmKa{6?Tqgetet9GzwsxHW>~e}?R5LobeuG40bh?dE$l4=
zss3QF|Bw?9x7h-b_l-(dq6r#8$?AXZoUhGTV&PKN)Qh!Qei
zu4Rhb;uu@+5@*AdwvBHf3S^LR(B~u^Jn3X6E1K`jk+nAqbl8snH8}(%q!$cy%Ic(L
zOyI}|9415%?!MIC`DNIq!-z?<_s>~}Yp3e<+)TbrR=fwGmqSBGl~a(w5uL_Z8$p6&
z%--7Vffg>d*pba)S;-Hd>aDNzheHm0CuKxk_Oiz)(Dkj&Y0-SMy6WQOE
zL@^{qc7_CDH@9VmINMbDf^nt
zWJd(SCs&$7(pnv{w{~r3?dg__jn&mEzbB*x3H@=3k%OaId3#aJ-xVIKYHA;cn3>sx
zOe`u6hFpm6uKZ0=D_ZzC9$$zIxWCBjA{HVRte~m>Fb=;>`0KlnmKPLi)}g5Qet9ua
zl{bM48UPl@bflKeEqSn7)-;kAPASp<&*mA*$@N4#wc@=@;x8IHi(^1|uV~3G
zJbte#U$R|d>&YF&;(Fo65vd9{Fx)@ZP3T#dMwFDWIJ8-^)_s)J3;na4{K{UfSQJvb`oJrf)>w-tTf>+
zHn*VNOe9Q8Z&ZEvu}Hf>+ID-9IDrhZQlVIrhd^+c88H@AR!@*nBHB%B$Q}}H>3k<#
zQ_59*X|l$N3pe;A!*}&;lL%=|Pf7|=MgHe{_x$*0dpROV_O`rIPSqA47~%b?x!evR
zX>Kn_-`EfZY-U=nEPyiUiVgSAe8ENT-?e@l7p$y$FK#GI?lW
zb}%66t6~-{+FK`X5Yj>(qm_wMI;1jUa^KFSqJBR`YH#Y)CAuj02wPH}p?H5_zkOu$
zdEo?6TUxqbxNEMxL?4c$;1xP@3h>ja)&Tg_6?GLwNqfTRy%%j1$)L~^nR&GRs2I5G
z)VVulAoX=AYJJJF&plh5N&DnT|gr{ZoX0nPM*HJyv9;flCzVp^7e@>ISXZ?@7Ig?l~x6^
zB2~FPCFLcYHL2jalgvxFl`CLp9)X)TO%QxqnY#)duK!aTZKJvM3bgN5Vil%
z^2V|S-f_&@BgB6dXNHKY)OKdx%oX3t$F|3xnDjg?Zi2uSzlx
zs~3l66%ahM@;v=fUg2nVd&kM7>I|_ZPu{sx&gXN>?AT
zv!r{`8@sDE`1rg4B&EW_NV3ZmOBAnR?U&cL!sSz+SB{lSHsLxw>A1#>REp>vR)4Zx
zQqhRZ#x=vrbNsPwbtz|W%7%+bR@j?IV<$N@@RU6yzSEyJw8ziOd3!_GF{t2bc=%m7
zfeYk0>hBo$Hq-lK5!2=(Z$3gkN02RwSu-)3WxLJS3jh!!FyYvp-LBVYu$$|AUrU_4
z=rtT{i~RVy<#Nw@br^$Fuz)`W-^*mPc`-gYNiwyxh77FTzE~qlNm4p0*qRQjtSq8|
zv#5$|8JANK{`j%oq8J&ymq|13pJn3NU%2cRR!mzY^LC@+mCu*`Bg3i-!AC-6O@jx%Wujku{cJBQ+iT2bb?_XG?sv51GGWsNMy$=}BaI_^w<9uv9-cvi9-w)^
zE?9*DB1I-Sv{hv=G2_Qq^YWa5uJn2(rc5^Xo~mIZ{w~NS@am37iTeLwUFnh@>ocM^Zd)o%mB!MozTnFiY2f7=Z0hD95agjq(FPDl5N#*AAI0
zxaLNxxLV|H{_QSJI&`RB>mS9cVpdv4u;CWdocLVZnaf!A8B@kY6kAsldQH@pIis4l
zXJAa!`iU~LAwAtseJqXTzdZcP%)ApE%2a4oM*D{RSsiIKKB>^(G?)TFR|rL(2VJ$S
zz~0Pz1I7K4gxUg&Ex$EKzvN6vWasm7=MAT!)7ZGjwew9)O;k!*j7*GbBhZgO&o}<4!+uNa*nR)w$m304olvZ&s+PBtn
zWo^B!>W)dM!;;3J+|AB6_59jE{2&9NblVBra>#ldz!b^_is}#4YLJkqbJk4H!~v+s
zQ;D$lFS*=0W%pj8xzS%7XzcD-`zFszAl4E~==g{zr5dvqN1yZ)aif
zCE*+V1B%LowX%3*+|x+UE9|%l(@qfd%wWbFv4Pi_PY)`eayXn3^YJ=0)?F{7B*pd@
zQGz0Jsp}iIfX6V-#uL4BH1V=7SiWAWH-V0sch0Sg?F;)P97
zBX6rOQ5h&yciu}m4A3+cRL;(Bh`H+Wdk>WWw_u_Y?DXba*GM7+h(f&Lqk@XoSe};2
zvnmKip;`}Qc${JM^IstF-J69&;MQAoj}+9Cj-=pVM=HH#
zad05v*jE+_7d9n+94>4;kJJ5PKf3)Rhz^SdYPVSP#k_4FwA(mivLRFn!?gZ8EI8RE
z80?+L_vj~i9<|7RT|~vgu=5Q
z^noaigKs;^H%B16m?$_W-oseB8A#)q=I19Qo;oWibQZDdFYG1bPJUb!$op5GV;snj
zY3hJ)DHK31{*1P~<#2Y8y`5&8LKBjVo}d@V$*}>*KStsnL;z#}WjS#Nj(&1d
z)*`*O=63z}rV}ZLbvG_YMxGd%35&emqkOkX{{(1i=-Gxn*dqqK$@@BeiJYTN0MPYa
zkZXP0UlA?jJjT!S1$;Vh6+e(jLzB`42c+u^Wl!Q67Ol%tIFH_+jUg
zvaDu#Dk1*8y6lC{P3Eyijm{cXbMc=y-oM-h%CDX_NnJ48NF!}C!0udo<}3YN6n*Oa
zGI=e~eeI1Booz_d#I|7H4PS`*D-jYtELx~d@DH}7p|0&?m>E{Uo~^~O$HMn6Rp6m7
zH5Pe!uJZ<9+OBBKK3W92;-sdtIWODBmkZ{;R@GIe{#^*@tn-7wAj6u7SwNw_;r3V1
z!9{`{eohPA3T^d__m|BG%y4IC-^gW7V|s2(hlzvdcq+pY
zx-*^(1UK^mcRc%wHu5|`FJdhL$F-#j`|br1n}yV!gL=5;IH^~EIH#)aaLDf7Ab$%;
z7Q;DyAf@rRwNX%Urt!a4Xr}WybEC-FSnwVeIl1mXi0QRE0vhRS+znAAP(d+cC}UNx
zyF+jb!R)mi*#bYTt~%Bdn6Cn|s$-#F_FD739hGQefKlJ=b3%9S#U=O2uQ&5|S9AFB
zU7up0-hyV$)e0Ncm@GVTJ)l2m!S|Xb-&@OM*iaLqvZ_&0FavX0kjWG;eD-QZUiwL}lVayu3c`GVp=rLJmBjm!iH5
zb^3K&1x&})Sg2}4q>Ox{5UV&WJKieYy?}gIl)ow{tpCPwJZcJKdrLjpecDU0$(gVF
zEpo()$?}rh@&3-Y3Z}~rbmmhK{cAe+{qqs6vPfyCD`oy>J!DEI&HVHJ%|&O84~L6I
zyOGQ4!PZRMSXUG^``&|Wf~|;#&CiGpXeyt{gEAHN!xJ*T`j4w??o3A8J#Say&{fg}
zMuo8`Z!gweWhy`8khkB0E?SJj^ZZ!TbJ#LOrkJV6?x6NtttA-5{bRU$2mNhvhx8S9RGxYw76t9=B@~!
zYcF*Cff=5KOm*B-xz8L+4g}~|=F=hTt*qhFi
zB1!5qrcN|tEGhE87S;p6oH>3A2#cT5IA}WC(kxF}R6_Jlm-YfKrx{5y&37I30bs+9
zwGc|y>fpRdNpHL|-#kqX+^T1<^e7r{sJGruuHW-6ytB4${v%pfjJK(9d7KgOzGnC9
zvSWvI$KqhG88*aK_HItV#V@*2O;+q82OvNIWv-tpR|d@idj=KAIJj!4$R`!
zY^O=TY{QA41wH}ZSL!Xmt{pNM&u8B3-{9t&pD)=}=eNkVh?;#l
zFL>o4upL#jd-Wqq=xtB>eSfvTd;X0i4{WRZlgWiSAlti4x`A5rH{gi@tT9+B7(flj
z02;)2t7T4(c&ae-)c+1p*$|k>zaJ38NQ9wtv)FYc%4;q4Cqd9J)qFWLzLaOD%r{5E
zQ1eggA@8OtWZ!zd1#yMIGPrJE%$Zql4n5l_l#1;4!H&AHh8i99L`}90vwyb&|B*HL7)HId0RSx?Vm^`
zk7xDq?s?DdB6q%VgS4m$zxwvpaE_;Zhv%ylyV+gXhS#Png*PF1drYcNK+l^}i@_t+
zGs3pKXv#iF)zonKg?F!)DKk#qx
z4B~^PJ}JVtox##S_j=-n-DBP9D&gKf_soc=gm9qO4jY0o{{G64=;J>%yYh3zSP%|K
zE9;HlANEw*EVcsC>ZXKNEz7(~BmuHKL3=F`2A#d`*51{~C&kOfdE5RPLeT@_!TUc_
zM}ZffT=@K>_fT
zs*6bD+4D>e7n+JF=jxTF)Y3GrwCYU>KUs(rDWstpOXfpUPzdtfiUC;YKBe{r@4L@G
zTQEP?>Qg;Gn}bPPyJyNt$W4@S>vYv(A>Fm8p`HCE*bot+*S@XQ#|p)=YR(+r_pZ))
zUutt=q-&~Z=>Vp21!8tE6&|Q7m$%@WpVx+8xVQbs-tY4i1g1U^?6p3TDCVFst&2wT
zFr?>m>o2C`tC5EOQul{-w=X*cTlrs5!iQWY>{2Xy7wsGV+Ek!L3aU$G41SONEgW7+
zd>xCo*Tu|6l^lo@&8Cm!ba&Ihp+mmRRKKFYz
zakQngeio?iC7qDpr+&b6ZP-b@p3}X(9)|!(voj;{WMBw!<9QM|w96Udv-9RpC$}e`
zB_^Fw-I!JtdI|Q=J^uEOcQ-$4>aX6qyK(HQmA_Zt)~apWT&ERG5DBY&&6s)>yHURK
zg$@&fk@~6h2L?RzBCY3&ySj@}&(U3PeGVv(koZ3%S|bxlrZag{?IFX=4zZLq^e2{k
zKgTj+-2jw8`f!Tp%=fKQNf_p@#Pr+Do%FcgRxVzw`P;cbD_@B8m%nhP6!`GKM=1cG
zQ8^~oSQCD1$@h-@OD|Por*CWlWeHd@0{eJNR=i#jshu%#H|4|5&5Mj66}d5e;Y@Je
zzVMQ^s+=Fq`nH7bJ|?)q@}1)NLsJ{qn-gB>J=Bv}82tR(b$yePl)cW>#FtL&XLaD+
z)!U3y;WJ#CnznCOWms;$z?kpfl(yrVZ{AXJqZX3
z3%!w5W$}-BKbxn8>5uhG#(3Rc8&~@ryTUQBHTN>^48@IY+}!ngzwM@+zFUwQV(MDW
z_{5a;YVhpeBEK9=t>YTpKz;Kwh{Y7KH`z@0M(Dlg6!N+Jl9~G8CO)S7Wq8lCjEG=-
zHV}w7HycI+2Vi!-9O?mafTja#=V5PP^{@Jp<^)UUXsAAeS8(Q!yuPo^05IbvKY&rf
zMsw(iJKgOG_2T~haOm51Tl(g8Is(SWVJI87j|24TrfTYkGl;rCvKy2PXXl~3weqy?
zOH4X?h&lzei
z^Qrgt&NRKj6vy%0Wk~+$jv^4sB>l9|nS^G6gOLIQbIZ`>t5;(<42&~vf7)Y}c!f^k
zJJ(@?o7qngLoXonY%%@rC!6CNwHlg5I&!PlspO`Z%rY=|HIO}n74OE@XVkt}^{&9#qfM5YlO-JFb$H!*_edl<>dVz|?
zUV%y~BoDBJ^t|~);=9}>9rgsacC6fceSVDa(ry5w=U*ny4{;mLVmM@UHMZDnf!;ip
z`fR8R)UEfkGtE}{UBkE0Tl3}MX&=B7iaLuHrjhP<97QP7Z)C~T|%r)7v2IW}xI
zLZD_+zGFzTb1W&Xk1lyO9Qw-15^K($i#wGNMgC60s!uJ#b7M{XSI)MIuujBru?0^D
z@9QASmWzj(2eVV#+^BOjk@xw&H9^>7i__6KK7SM!TYvm@-+nQxD+-C7k5Lh-h&orQ
zNpB)S@e;$|i9S7W?;_n~2+s{KzDt4+%oI!y+r2llflurgAmV~yIopt!9-ubW!}}LeTho;N6~ZzkEMJ16(={*J
zFjTvvq~J-C&kqQKH+BCJ-QAsp1$IkD3=t&GxhQJK>uZ@x5i!Z&imXx!CMGzTht2ZmWXmio(BHD2
zlxk*rnnVt0Z}wcLo`S^h-e?$sQ+|cEv$i#Nx{WlZjOgEpzi*e-Z*~Er6!WGy-Y3
z;CkR&5l%mB0)vDeIe7(Rwa=Xpm;MWLUfYH9mDdA*p6(4?U5rnYi#FZ}{hFGj@Ssg&
zquC`DhPt_o92)E#s&uD3bU10)eI4Pq0qE!3!I6nkrizkUvV=oo;femk+!N2|&D$|XTE{6-lI)JI4hef<9+Oym09+MQr0llk<1t4IY*2Z
zw^_BO*ky;#T+fzgQvG_9!p#QXg%&!``K>u6UtMy{^{<`ovMmzfN3dxEqdpYw5NiW^
z?FWA*M8cY%b#IBjj^26hHH3EF9|lC9&rp>LWI~`pUQs`of?=tplHabGSJW2d{6Cqy
zKF+u=5<_r2hLtOYzd}>Z?cElS?-$;6+ny)BU%Z`l+6*kV&lG>MQAkKTeO8|$I4fNr
zVy50b0)yl_>I-GZF`wYP#!E);5V)?okocMxWJYVe2n_m@(R`@H-Bddq@rE2R;=*P_
z!!=Fr88C=&C&%i{l47MBL-e3uyEQ=SzPrb(7L`TF_8?6V5DZ(+zdrfEs1wz5c_qoy
z@)AskMMdC0{O-Wl8MbWjnxW0CKQZ-;al1i}>~xq7_1(f?kTT>;I{WP&l9;WwCm-Ix
zH%hwJFIB@7VSly=v7mq$kD-q2*4l9`vwuNa5T
z8VUX==Kx_y;o$uZnAi*pyiGvHM;(Z8J=);UKqU(P2&dQP4g_5ebQHUZv)l{wbP`QQ
z?x}xvQhgLN=(uIWE5R8y^y5xPe&{!B&+L>pJZSD@J~X5XOfJHMVn-3r
z@R$*_3%mfGelHKZ!9{kG|M}Ed>Fnn^h$d1|DfdiY(e2U=fAt0bz^ZpvCp|r_U%i?f
zVo
z;waX>vESJ|1R_;ADXaY@#lgVTP6EaYBvn)}Ak{C^kip`;%~dc^B|&8|k|f4h>llGC
z)HP=*fWVJQuaQzwvrxhr`mrKpF|Yvepy+XoH4NKrsX#sl*(>S?7_pTebm>dXu$F4>
zw);%GaDzZ*R|_f^1`3S7vpmX3?M^2R3^dWfiY=y%tE|~NiMn)&42UPGNU>UT@E#P$
zL*!LPs_t59;9O(at2ogZI1%aJKzzE1x+zGbP4XE~0lHn!AnZ>8H51#ra!+${^qAez
z`nKk*bqfoJJ;;6pGVhxH{45U>;Oor^HsLzr>*svrBQy<1GI4V07u6QCf5d;eXg_|<
zRBiv5?q#J?K$hd@U%sSpnuO_Z#zl$?=qJTQiY@|rc&uzhRbO_OeWmwm^%r>yf(4Xd
zSFWP-y>p$)P5EY9-4ey8qD_<#CJso)Z4!VdNv0u<_U-X%gpbzlM|xpq@HF_hdId6vjFh>+jVF(GFT-pWp}FzTS{q8gAkLp%s`nJ=&Z3YX7)HoYer
zCciXl%f`LaNut5ZzGcgpS>62UnkpU+&+%(HxRj_{V0{Y(M!{dM-(>&u_WX=jVh57H
zijF$Gh?O#ksx&e+2hC02w@Sd;klQ}_Q68RR_!SxtqmkF!jXgClHAYa6`xt)jeFjY%rjmK!0WP***0^UBgT_W>7
zkzxZP2NSN&z$NeQ*
z1UspSm>WeSRUB_(tB)E@ZMn%K7EYJeaH>0RXkqK9Mt-kJLLeg1
zA2CcP`iBFORfod=d!#QGu90
z7!bz?0HS6I>A&n{JXBhvW#1x{E6xISt{KY*zRVPY3@hekB&7NVv
zW%gPEyn_t%+5c-}dH)mcqSYx86EbC0%-0)>^@)u84~aW`SZ#|gX+3d`JWX4^>Y{g-
zMXa8T@2dqM9XIS{BJlu5=Uwm={dK^q_4L{=r1MjK`3|tl(NvD#JYcXbs3v%%1MX1|
zaXmAhfi*vt
zK<6nzkknuZF76YcpX7OHDx-%?N}AcyaaI*MykH>9v~Exkd>i=MqJp}$@gZ^}nhTH5
zhepGiG>(kxwN!lklE|TvDP}YE=j`emcpZ-m-yV=y0X`LJu7CzG5gT31iUNQKc-Ow7
zz!3wu!&Pd9{~YH^jbYo%b_cD0@mbxokWX^t2J*XZOpwU0GbVnb%;Z~@IPelPsNe+3
zv2{9^EUv%{Li;pVRlKeCk>B4Ms)#&j^ORRcPg#_e6eiQS<{gt<8N072u5UJll!L2W
z+xcc~t!%dc+U)W8#T#)HE@#KpIg(BzONK6G*4E+*?`h0N>?_riTY||n$HKvw37e@C
zOpO@#DJCI<0??jeD&kON(PWgVfB%c19W%q~jOdWk-+5-OO;ph$;ylV-`&5R!FR=B9
zsoly#p@Q@5q7XpmUbTGM)ORCC!kguXQh{cHPj938MN3
zZzbAle>ty*SvpBM6KO0s=$T~QZ9W*GHFeI6By}lz
z`INrbtSUnr|L*g~)|3UepxerQNhxtq>p6ILCm4x=YJe8=Y2
z2YK|Q_X{G|A;zVqWia@M_w8IZPIA@{Ux+Yy7Me~}q)LS7&E`5e1ric#0l?gsK!E($
z9r3-*4iN;ZJ6>K%7&iCn#K5|t`Zt>`J|yKKOmJW5Bq@TdEMKMne{1tnQ#X%bcmDa;
z;y5Y-wF@OtxV%i#5Xa
z)6XtgLhhur24Dpy?CW#hr_UKS*Is3T9EahnL6{o$#japds?5TZtrQ+|SyQoOzvpBs{N1<$u0mB7fa$gLHMr3bxXT`jU9gx_rb=6vRA8Ljyuzl|W%NX(r`3aI*}37+shZ6r7{PBbf5H
zF}cq1Sb{{r&w&ytTkR+O)-~SRTkCNjbJJw;;G^GarkY$(v~+0Hyz0X%ha{)uR6{&9
zJ2_v8*Yn}e>#jKU^7fAfsH%XVILm`{d&|b=`c7k^h<6UR`R@G+WY`Ip!;DqtpB8Dt
z&ir+bTyvRUl;5_U`4f76N0uNKno+)CD*^I=70)BT*Z{yi
zFC?UV!6!G?fJuj>vmm3tL2kU#7MI@*?5s4JK|T~I`wwJ#iA%fQP;0B6?9-&T
zUJ~Aq5{052bll!@F}%xxo7b54CO5=MB2^Td*04Xj(5FWiCvNc%UX#v6o4r3=;fl_E
z(IEb;szEf{ZmxnpN5k1XbA(q`j>J70*Y%64tJ^iib~~2=gS5VQr<>-HZs@3QvQAFd
zI%tmm?}j#Kc-}i-OyX#iWy&X{IBMLx1$0VPA;H~+D7qpv$-1lzz?iF>77bu21iXdb
zih{tL;@jpjc;T;xEcI+x7Wb?qiX_0k)w$OTOfSR-KJXFlha*yPlBT6^lHBR}DL=K^?$MJq?hq7SMx9NIF78W=-)m
z9n3`CKaTlEZpnzA=m;>>hb$U*Eo;DX7lL>4q0IPt{?^k42I`YxQ*FDmg
zl|8FKxV5(24~8_0>rH=_g4kuIYxcwRlAQT{A6iQI2CVfP5^rs#ZHGiicB-f|
z9O$>CoeT|S^}oE`1#L^}%dU%8C=*N*;^`%S+P5fCB&kxRE&)u`4SCTU$SbD=={q5i_fGbgZHai6-UqqL3RvjW)2~fa-&0Ql!;u&ktnq1FSSbz#TiX3t3+;O;kINIf9bkC2g5m=QW!xJ4$2kYA@<(?w
z7%$DFX$Khv4TuT~Fh&kc6Psy^CL`rSIoes9ug8-`kJACIunG!JPVyk2!F|;7l>uWf
zyF=?%|4EjB&E~g{D;MHa4Fh0wdEfg^6_3*~a|%yu20{I;UiDK_h|pLSW;Uqi2K>s)
zO^}>9Q{nRb`L2gE8G;j^_mrwXA*E8Q8+i4HA}X5_!FvQF7?6@4u0B68%~tDs0lKz9
zy>#o>{9=VseaPP{FTxO{SU#9rEZ9fXNZ3Wx6H4F@cav$zWQ$B`qa%|e$)aV7LTM^x
ziXx*UX@0Djw>V`H|LgQxSye_nU-_<
z7jy8<*JZBvPXbLqO>M38$<563Uyf~=W72M+tTV#m$3b|3yxGTj|B;x8B4Z#X(+{gH
zBXb91JSMjTX<$O)-A4|A21B}nazVgySKnpbxHMP0`drgm*>RmvSzCLRnKaz;>lC$j
zhWD_x*5J$|F8;c;=ed=JtJ+`)QI|_ul~R2RLR2efY1rp;J(u}#bc|iV{e&i@c;D|H
zTG(V^&B4Qd(<>?)DMtO-|EF-px^%yq)v_pcN84+&<9>W=Z^){D;PATe>~v?(jSC*r
zw%~jNDi-1~bi~--g#{7YxWeXiH6yP|
zV;>hNdXpqMD$OP@ExkY+=1$VRX{ag;qTMJTP|#bjjd03hEOZ1q3LF>-iNU--?#I$&
zOKOyv8F`>xlwsIp1P$#1*iaF?3*4itpQ3iX{FW3Z_122m>pn~0a~1UVbb%Yj1Q1GA
zIsy=FHQjFx{*V*!`^S@0&p6%b5pz3ij;6U+vBpYL;Wh@HUj>x3Zr^)+?gV|iE9%
z>Y3e)@_#|8>AfMTyViR35ZB_k5Z;PZ@-I}zfNiliT}R?wKN1Oy
z?>@GZnvZU4SWV)YNE-%S8i}xqQ9$K+(bL1j?_7uI!K(g&!aFk4Y}=cvm(oHV6+(*^(F%?d=qHUD5zie;Pm
zv4%!7m*G3S8Ycer8#s|)v0~}9?)&(RW&3eFl7^3kg%$?^zbg|8A_(vLQ#x^UFsbAE
zR+=Xd+t2;EEdq4yq0$!;M+fit^C~7UF;XoHZf!fc*sLrE+wiMF49_JZ>SLO`%=0~H
zRo&O=dX1-?sr&Iq6|QCpXMOkymk%#^^rmI@F^}Axw`pVq)EF(bx9I`7{p8aORCCqu
z)He0R-kgD5CIQ2fU?YqKcjie<3D<$~vfK6`C&aj>Gd&?2zsoRJbY#z>!15jsY)%qPNcuh
zZf^A}^0z2!s`}qDvj8I8^G#>3>Xd>BWgfn)BV$2NsNqxU2jbL91H+u2)B9
zAkp>|H&XNbg5&*}qVrjD8nzuSZPPGYMOB4~50fHSwz%!oDLlBH&ibV$kAZgWz)&}R
zoF-$#uc9!xut+AicQ{qF5oM37hW#{`WB;yayX`Jh>2JCFCl#*Pg7$+AR@HSSC57=%
zrBXWh=TQR`5?#q>lZ&_CwtPw;=$ePvTzOSm;5KnTG-xI@UGK?Zks
zcO4uC3GN4XC%6O%g9Z&SxI=&tAh^2+cL?zAob&Cyb$`H}nyQ)FA9nBE{XDDJv%0z$
zpLh65J<(jP3%LMKWh*RQw*A3>6rys(m@@jp*rX`^c;SV-*+VdOdJ_P2g09rPy@OC6
z^N_ZtBeesX<~msHdX|J`W)S^waBp`gEGnOjZ9Lp@UIM5K)t9f}-N2Y!8QK&P{wIF*qXE^z8XU`topd5cM?$xaW3?
zOT@=V{-w2jBVcyLX~(v_a6?i)&iy+lA2MRam_hHYK8Os${kG5bD!`GeKm<@M*r
z(k<4epd%!~mIN4wAm_Q6`WbryR^V*Vo;^EzGcEfMgJU_nWxMel+#g1YR0MlFXc^}k
zptKbN3u(TQY#HL4@8YahlZ@;5svE_jN(9-n9`naM%8t|+cD$>jE@v@9VWvxp!~uK>KDz&JKz})r6U-rt(s?3fD(%h*)6(*
z{R0pdUG^NEiVGU8r1}IvtYrAa;WSR#LCO(iVNysEw}r-K`k396&=>2t#<0^6wR0%4|D2ci&|l*v~h(<5euMCiUo*
z*<|D@xNc3&uZ_nU@FEUZ{MmaG5j8LP>;aZhaY*#!IeE>8k(B6#+KQ=Nwuz0Ij2;t_
z+&oZB$NtM*be_7YVCt)C*BH-pMz>$I6Q$!ixfTP4Seacl$ekhkjUB=jR)V^L#x}28
zo1M-ra&4)zs+SgFt}g9X_`4{2U0HUfSq)?3FN2@V@{BZDshEUiHV3lVG3qVA@rRs3
zo3DW&EVHxap-1f*9^}GkysEUE%yPuc2gahPJvLV5d>o=Ou4yuImF)}k)Ym8!&;aTg5Z`)$tz%dXM0m1~*yjPS(C)2S$qJbn&b{bW>bTl7Kh
zRFHkTj7(ZTACh=sB6NLJuGJ@NZh8{i@2BL&%8bn2_#N;0_yoGsimfS0JZ}>TW|S`e
z2-nM93mdnb>z#pUPjW&cOnEaB2CE}xVqrsOY#9#%}g=E=&aY
z+U0nYT=J|>bA6hL!x}moEF<{ARE8S5X+!aGhjVRJ*%FM(u4{90(gTclPkAM!6=hbt
zu&c_$1H(2n%i_-fknsgv`#Ia#kCAjfhaRRUd4L5oiagO*Cff!_@|fVSgPe)iU7s6I
z%F*wZZ;mZy2MBvur6lM^UdH*}wyvkUx-kvTc;Jn2I4f3HOF<-9LVAs*hLX6(jUCSf)rIItIRb~>I39j8F
zBUCRG0fsgWvVC>%09P!h=3QmSh1yI>!GXR@^yjNB;g0KZTESUgltmBLM=71mnnmY(
zagn>WS(ozKs~bxB+E!9gG{?N?Nrs8OmHD0dxMKB?
z$P*1&gplDhM#VM#E$x;U{!8{xX3i#$DY4f05hxB0CS3UXJ~wXezZRytTC~WG<*n8c
zHJQ>C2V->^v^RZnx-~Mwx(2CG)x**l#N$dX2~=xiqQYwy*WaMayr;nsk_6RDf-PLV
zQNm?HR%hnx#9xL3#5a!-W?SQJ#7jEs;d0<7@+$s{lb@qM2*Swc^#{K~K!1LwKCKZJ
zZb=G(J4*1UK%Cvk5PI479PGQVT1^(>6;@olHXQPl&cu9hi;xJYiR7Ex^zq3edg?mV
zGTxK((1*`sQ*K4ov;5wRG#6k^tKeuD;8Y^9)5r*CMnfrn&oi^0{D~IBGO@HAl4_-Y
zStBCGMn{~pf#p#^EO}1z4kQAPXQRmJmkRBSE7dNq&>*%vdsFiB2KfwwoJFax@O}GH
z3=lXMy{geyM&y+z2d2&zu@dbieEw!sb3ii=`iGXbpuuO_HzWA|ArjrSjW86@B$d-5
zd;t&3AI~^_gttjhrxHo;n4if$z-RlDj?e*>~DID_^|$fAeho-G$x%Z0#0u
zPT{1OM{hMpM#i=8cq$dba0~6-#QX8H*cNIB1h8K1G+p{sMGu!^xjiW{iGftBFcB{L
z)34A9K`rFGMNq^-ZKY4u^$%iafVzR?QrX<-W
z@mJ3L^$vev84U;*4Y6>K{|A_(kX#^2X15h%OwbcFw^~<4TA`>5xHXE-9QGokAmDS;bq?x1hw7BG?@r#
ztrziq#BDYES@c280x-b4UPtT0j5A);86_kuOgAFGgG2{ErNhuj+M2-fEh7Ly#0d$
zK~~4Rv2|lgN}7y}H?9gbWH?nQ-VIDd9P5Z0E46uWqE;8P1z59okr7cG5tS_yL7y|Y
zD#lXVDz||C`BOXko0x?GOcYLoy&n|k#;7bxUSp5LN%9dqowBLt)BWaXHD5Sq#oy&C
z?-xsZ-|`%gE=1{S59udvf(1KHn%C~G^BRawOxS?2L1HT2v#~8KDvnBDstE~7
zQd2e}Ui%UD!fNLi8NW+3FpZiqNx4FVa-V#&5AAoC`&f`M6vqe_5do?vhW-z)gpKrW
z+Z~LBbDQNA5}wERb?tSCPSh!%!hkwH4;05~$j@Qs$`IdZNpZ5Tp}mCwPysS0DtNDV
zyDyR>!V4n%3%@KhjeD|6L`)vGuWh^z6&1BpIA61g4NAV@60`!%p*Ru=GV@5+9OMze8!*u(+m|9KZ=b{{T7
z{5IbLK*a7Oj=X?l;_!k}m13Ln>f$n1DTqG{Mj8GN{c$05aDDttqx&Lrp3h;C^)(Hd
zLhDqfYk*&fR?Ziq-ygj{k-r-5dsAHe6%sD-HPcMcC=3%#$h+QDNh`Q^$hI&Eb>jQB
zi&GWSn6CxQEHQR)yrQb)L{RGNpX+6m`YGt=HJ-LapEV*^a>H+jzlin=tfsH%Hfz1N
ziyp$I!Op)ePXEjtHsY48iG&02ATRb#%0MH;Y@$mOVm*jpz1qA)cU#cqp
zPyh(f56krO5I6#j9