diff --git a/csrank/tuning.py b/csrank/tuning.py index 3e269d67..a4fca2a4 100644 --- a/csrank/tuning.py +++ b/csrank/tuning.py @@ -179,13 +179,13 @@ def splitter_dict(itr_dict): )) if "use_early_stopping" in self._ranker_params: self._ranker_class._use_early_stopping = self._ranker_params["use_early_stopping"] + param_ranges = self._ranker_class.set_tunable_parameter_ranges(parameters_ranges) if (optimizer is not None): opt = optimizer self.logger.debug('Setting the provided optimizer') self.log_best_params(opt) else: - param_ranges = self._ranker_class.set_tunable_parameter_ranges(parameters_ranges) transformed = [] for param in param_ranges: transformed.append(check_dimension(param)) diff --git a/experiments/experiment_cv.py b/experiments/experiment_cv.py index 0b9e6ee1..109c87d3 100644 --- a/experiments/experiment_cv.py +++ b/experiments/experiment_cv.py @@ -40,7 +40,7 @@ get_loss_for_array) from experiments.util import get_ranker_and_dataset_functions, get_ranker_parameters, ERROR_OUTPUT_STRING, \ lp_metric_dict, get_duration_microsecond, get_applicable_ranker_dataset, get_dataset_str, \ - log_test_train_data + log_test_train_data, get_optimizer DIR_PATH = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) @@ -97,32 +97,8 @@ optimizer_path = os.path.join(DIR_PATH, OPTIMIZER_FOLDER, (FILE_FORMAT).format(dataset_str, ranker_name, cluster_index)) create_dir_recursively(optimizer_path, True) - logger.info('Retrieving model stored at: {}'.format(optimizer_path)) - try: - optimizer = load(optimizer_path) - logger.info('Loading model stored at: {}'.format(optimizer_path)) - - except KeyError: - logger.error('Cannot open the file {}'.format(optimizer_path)) - optimizer = None - - except ValueError: - logger.error('Cannot open the file {}'.format(optimizer_path)) - optimizer = None - except FileNotFoundError: - logger.error('No such file or directory: {}'.format(optimizer_path)) - optimizer = None - if optimizer is not None: - finished_iterations = np.array(optimizer.yi).shape[0] - if finished_iterations == 0: - optimizer = None - logger.info('Optimizer did not finish any iterations so setting optimizer to null') - else: - n_iter = n_iter - finished_iterations - if n_iter < 0: - n_iter = 0 - logger.info( - 'Iterations already done: {} and running iterations {}'.format(finished_iterations, n_iter)) + + optimizer, n_iter = get_optimizer(logger, optimizer_path, n_iter) optimizer_fit_params = {'n_iter': n_iter, 'cv_iter': inner_cv, 'optimizer': optimizer, "parameters_ranges": parameter_ranges, 'acq_func': 'EIps'} diff --git a/experiments/experiment_script.py b/experiments/experiment_script.py index e0e9140b..71c3d687 100644 --- a/experiments/experiment_script.py +++ b/experiments/experiment_script.py @@ -28,14 +28,14 @@ import pandas as pd from docopt import docopt from sklearn.model_selection import ShuffleSplit -from skopt import load from csrank.tuning import ParameterOptimizer from csrank.util import create_dir_recursively, configure_logging_numpy_keras, \ duration_tillnow, microsec_to_time, get_mean_loss_for_dictionary, \ get_loss_for_array from experiments.util import get_ranker_and_dataset_functions, get_ranker_parameters, ERROR_OUTPUT_STRING, \ - lp_metric_dict, get_duration_microsecond, get_applicable_ranker_dataset, get_dataset_str, log_test_train_data + lp_metric_dict, get_duration_microsecond, get_applicable_ranker_dataset, get_dataset_str, log_test_train_data, \ + get_optimizer DIR_PATH = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) @@ -96,31 +96,7 @@ create_dir_recursively(optimizer_path, True) create_dir_recursively(pred_file, is_file_path=True) - logger.info('Retrieving model stored at: {}'.format(optimizer_path)) - try: - optimizer = load(optimizer_path) - logger.info('Loading model stored at: {}'.format(optimizer_path)) - - except KeyError: - logger.error('Cannot open the file {}'.format(optimizer_path)) - optimizer = None - - except ValueError: - logger.error('Cannot open the file {}'.format(optimizer_path)) - optimizer = None - except FileNotFoundError: - logger.error('No such file or directory: {}'.format(optimizer_path)) - optimizer = None - if optimizer is not None: - finished_iterations = np.array(optimizer.yi).shape[0] - if finished_iterations == 0: - optimizer = None - logger.info('Optimizer did not finish any iterations so setting optimizer to null') - else: - n_iter = n_iter - finished_iterations - if n_iter < 0: - n_iter = 0 - logger.info('Iterations already done: {} and running iterations {}'.format(finished_iterations, n_iter)) + optimizer, n_iter = get_optimizer(logger, optimizer_path, n_iter) if not (n_iter == 0 and os.path.isfile(pred_file)): optimizer_fit_params = {'n_iter': n_iter, 'cv_iter': cv, 'optimizer': optimizer, "parameters_ranges": parameter_ranges, 'acq_func': 'EIps'} diff --git a/experiments/util.py b/experiments/util.py index 73316b8f..e52aca7f 100644 --- a/experiments/util.py +++ b/experiments/util.py @@ -1,9 +1,11 @@ import re from collections import OrderedDict +import numpy as np from keras.losses import categorical_crossentropy from keras.metrics import categorical_accuracy from keras.optimizers import SGD +from skopt import load from csrank.callbacks import DebugOutput, LRScheduler from csrank.constants import OBJECT_RANKING, LABEL_RANKING, DYAD_RANKING, DISCRETE_CHOICE, BATCH_SIZE, LEARNING_RATE, \ @@ -185,3 +187,32 @@ def log_test_train_data(X_train, X_test, logger): n_instances, n_objects, n_features = X_train.shape logger.info("Train Set instances {} objects {} features {}".format(n_instances, n_objects, n_features)) return n_features, n_objects + + +def get_optimizer(logger, optimizer_path, n_iter): + logger.info('Retrieving model stored at: {}'.format(optimizer_path)) + try: + optimizer = load(optimizer_path) + logger.info('Loading model stored at: {}'.format(optimizer_path)) + + except KeyError: + logger.error('Cannot open the file {}'.format(optimizer_path)) + optimizer = None + + except ValueError: + logger.error('Cannot open the file {}'.format(optimizer_path)) + optimizer = None + except FileNotFoundError: + logger.error('No such file or directory: {}'.format(optimizer_path)) + optimizer = None + if optimizer is not None: + finished_iterations = np.array(optimizer.yi).shape[0] + if finished_iterations == 0: + optimizer = None + logger.info('Optimizer did not finish any iterations so setting optimizer to null') + else: + n_iter = n_iter - finished_iterations + if n_iter < 0: + n_iter = 0 + logger.info('Iterations already done: {} and running iterations {}'.format(finished_iterations, n_iter)) + return optimizer, n_iter \ No newline at end of file diff --git a/notebooks/GeneralizationOfExperiments.ipynb b/notebooks/GeneralizationOfExperiments.ipynb index 6057f1e1..1b8ccdaa 100644 --- a/notebooks/GeneralizationOfExperiments.ipynb +++ b/notebooks/GeneralizationOfExperiments.ipynb @@ -3,17 +3,15 @@ { "cell_type": "code", "execution_count": 1, - "metadata": {}, + "metadata": { + "collapsed": false + }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "/home/prithag/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n", - " from ._conv import register_converters as _register_converters\n", - "Using TensorFlow backend.\n", - "/home/prithag/anaconda3/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: compiletime version 3.5 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.6\n", - " return f(*args, **kwds)\n" + "Using TensorFlow backend.\n" ] } ], @@ -29,7 +27,8 @@ "from csrank.callbacks import DebugOutput\n", "from csrank.metrics import zero_one_rank_loss_for_scores\n", "from csrank.util import rename_file_if_exist, configure_logging_numpy_keras, get_tensor_value\n", - "from csrank.dataset_reader import SyntheticDatasetGenerator" + "from csrank.dataset_reader import SyntheticDatasetGenerator\n", + "from keras.callbacks import History" ] }, { @@ -48,7 +47,10 @@ "outputs": [], "source": [ "MODEL = \"aModel\"\n", - "ERROR_OUTPUT_STRING = 'Out of sample error {} : {} for n_objects {}'" + "ERROR_OUTPUT_STRING = 'Out of sample error {} : {} for n_objects {}'\n", + "his = History()\n", + "his.__name__ = \"History\"\n", + "objects = \"Objects\"" ] }, { @@ -67,8 +69,10 @@ "outputs": [], "source": [ "def generate_dataset(n_objects=5, random_state=42):\n", - " parameters = {\"n_features\": 2, \"n_objects\": n_objects, \"n_train_instances\": 10000, \"n_test_instances\": 100000,\n", - " \"dataset_type\": \"medoid\",\"random_state\":random_state}\n", + " parameters = {\"n_features\": 2, \"n_objects\": n_objects, \n", + " \"n_train_instances\": 10000, \"n_test_instances\": 100000,\n", + " \"dataset_type\": \"medoid\",\n", + " \"random_state\":random_state}\n", " generator = SyntheticDatasetGenerator(**parameters)\n", " return generator.get_single_train_test_split()" ] @@ -89,9 +93,9 @@ "outputs": [], "source": [ "def get_evaluation_result(gor, X_train, Y_train, epochs):\n", - " gor.fit(X_train, Y_train, log_callbacks=[DebugOutput(delta=10)], verbose=False, epochs=epochs)\n", + " gor.fit(X_train, Y_train, log_callbacks=[DebugOutput(delta=10), his], verbose=False, epochs=epochs)\n", " eval_results = {}\n", - " for n_objects in np.arange(3, 15):\n", + " for n_objects in np.arange(3, 20):\n", " _, _, X_test, Y_test = generate_dataset(n_objects=n_objects, random_state=seed + n_objects * 5)\n", " y_pred_scores = gor.predict_scores(X_test, batch_size=X_test.shape[0])\n", " metric_loss = get_tensor_value(zero_one_rank_loss_for_scores(Y_test, y_pred_scores))\n", @@ -144,7 +148,7 @@ "X_train, Y_train, _, _ = generate_dataset(n_objects=n_objects, random_state=seed)\n", "n_instances, n_objects, n_features = X_train.shape\n", "\n", - "epochs = 50\n", + "epochs = 700\n", "params = {\"n_objects\": n_objects, \n", " \"n_features\": n_features, \n", " \"n_object_features\": n_features, \n", @@ -161,19 +165,16 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "metadata": { - "collapsed": true + "collapsed": false }, "outputs": [], "source": [ "logger.info(\"############################# With Best Parameters FETA ##############################\")\n", - "best_point = [1, 16, 4.2054947998521569e-05, 2.6263496065703243e-10, 777]\n", "gor = FETANetwork(**params)\n", - "gor.set_tunable_parameter_ranges({})\n", - "gor.set_tunable_parameters(best_point)\n", "result = get_evaluation_result(gor, X_train, Y_train, epochs)\n", - "result[MODEL] = \"FETARanker\"\n", + "result[MODEL] = \"FETARankerDefault\"\n", "rows_list.append(result)" ] }, @@ -188,73 +189,1265 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": true + "collapsed": false }, "outputs": [], "source": [ "from csrank.losses import smooth_rank_loss\n", "logger.info(\"############################# With Best Parameters FATE ##############################\")\n", - "best_point = [1003, 0.0002908115170179143, 16, 132, 6, 247, 3.4195015492773324e-05]\n", "gor = FATEObjectRanker(**params)\n", - "gor.set_tunable_parameter_ranges({})\n", - "gor.set_tunable_parameters(best_point)\n", "result = get_evaluation_result(gor, X_train, Y_train, epochs)\n", - "result[MODEL] = \"FATERanker\"\n", + "result[MODEL] = \"FATERankerDefault\"\n", "rows_list.append(result)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Save the results into a dataframe" + ] + }, { "cell_type": "code", - "execution_count": null, + "execution_count": 70, "metadata": { - "collapsed": true + "collapsed": false }, - "outputs": [], + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
aModelFATERankerDefault
30.218561
40.947871
50.692061
60.669744
70.932732
80.804914
90.941135
100.964858
110.970023
120.090049
130.503294
140.228598
150.287196
160.000157
170.060977
180.381702
190.617856
\n", + "
" + ], + "text/plain": [ + "aModel FATERankerDefault\n", + "3 0.218561\n", + "4 0.947871\n", + "5 0.692061\n", + "6 0.669744\n", + "7 0.932732\n", + "8 0.804914\n", + "9 0.941135\n", + "10 0.964858\n", + "11 0.970023\n", + "12 0.090049\n", + "13 0.503294\n", + "14 0.228598\n", + "15 0.287196\n", + "16 0.000157\n", + "17 0.060977\n", + "18 0.381702\n", + "19 0.617856" + ] + }, + "execution_count": 70, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "df = pd.DataFrame(rows_list)\n", + "df = df.set_index(MODEL).T\n", + "cols = list(df.columns.values)\n", "df" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If the there is an eisting csv file saved then load it and add the new columns containing the results to it" + ] + }, { "cell_type": "code", - "execution_count": null, + "execution_count": 18, "metadata": { - "collapsed": true + "collapsed": false }, - "outputs": [], + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
ObjectsFETARankerBestParamsFETARankerFATERankerFATERanker10SetLayersFATERanker8SetLayers
030.1985870.1949510.0824330.0652850.094951
140.2264750.2284020.1418880.1207790.125498
250.2180700.2241310.1091250.1038040.107595
360.2155450.2279140.1339070.1093370.107254
470.2115870.2289700.1345490.1106990.107393
580.2077210.2304980.1428300.1121660.107801
690.2036560.2305200.1479080.1135330.110187
7100.2001190.2306970.1537530.1154460.112828
8110.1964790.2302960.1577040.1164000.115010
9120.1930270.2302990.1613880.1164450.116744
10130.1898830.2296090.1646610.1173370.119210
11140.1861840.2294950.1675310.1175980.121214
12150.1828400.2279230.1704890.1176230.123238
13160.1799230.2275560.1721290.1178900.125474
14170.1777950.2269430.1746900.1180510.127573
15180.1748080.2262510.1764150.1175210.129286
16190.1731070.2253300.1785380.1181200.131830
\n", + "
" + ], + "text/plain": [ + " Objects FETARankerBestParams FETARanker FATERanker \\\n", + "0 3 0.198587 0.194951 0.082433 \n", + "1 4 0.226475 0.228402 0.141888 \n", + "2 5 0.218070 0.224131 0.109125 \n", + "3 6 0.215545 0.227914 0.133907 \n", + "4 7 0.211587 0.228970 0.134549 \n", + "5 8 0.207721 0.230498 0.142830 \n", + "6 9 0.203656 0.230520 0.147908 \n", + "7 10 0.200119 0.230697 0.153753 \n", + "8 11 0.196479 0.230296 0.157704 \n", + "9 12 0.193027 0.230299 0.161388 \n", + "10 13 0.189883 0.229609 0.164661 \n", + "11 14 0.186184 0.229495 0.167531 \n", + "12 15 0.182840 0.227923 0.170489 \n", + "13 16 0.179923 0.227556 0.172129 \n", + "14 17 0.177795 0.226943 0.174690 \n", + "15 18 0.174808 0.226251 0.176415 \n", + "16 19 0.173107 0.225330 0.178538 \n", + "\n", + " FATERanker10SetLayers FATERanker8SetLayers \n", + "0 0.065285 0.094951 \n", + "1 0.120779 0.125498 \n", + "2 0.103804 0.107595 \n", + "3 0.109337 0.107254 \n", + "4 0.110699 0.107393 \n", + "5 0.112166 0.107801 \n", + "6 0.113533 0.110187 \n", + "7 0.115446 0.112828 \n", + "8 0.116400 0.115010 \n", + "9 0.116445 0.116744 \n", + "10 0.117337 0.119210 \n", + "11 0.117598 0.121214 \n", + "12 0.117623 0.123238 \n", + "13 0.117890 0.125474 \n", + "14 0.118051 0.127573 \n", + "15 0.117521 0.129286 \n", + "16 0.118120 0.131830 " + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "cols = list(df.columns.values)\n", - "cols = cols[-7:] + cols[:-7]\n", - "MODEL = \"aModel\"\n", - "for x in ['Unnamed: 0', 'aModel']:\n", - " if x in cols:\n", - " cols.remove(x)\n", - " cols.insert(0, x)\n", - "df = df[cols]\n", - "#del df['Unnamed: 0']\n", - "df = df.set_index(MODEL).T" + "if not os.path.isfile(df_path):\n", + " dataFrame = df\n", + "else:\n", + " dataFrame = pd.read_csv(df_path, index_col=0)\n", + " dataFrame = dataFrame.append(df, ignore_index=True)\n", + "dataFrame" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Save the dataframe to given file path" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 71, "metadata": { - "collapsed": true + "collapsed": false }, "outputs": [], "source": [ - "df.to_csv(df_path)" + "dataFrame.to_csv(df_path, index=objects)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Plot the results of the zero one rank accuracy" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 17, "metadata": { - "collapsed": true + "collapsed": false }, - "outputs": [], + "outputs": [ + { + "data": { + "application/javascript": [ + "/* Put everything inside the global mpl namespace */\n", + "window.mpl = {};\n", + "\n", + "\n", + "mpl.get_websocket_type = function() {\n", + " if (typeof(WebSocket) !== 'undefined') {\n", + " return WebSocket;\n", + " } else if (typeof(MozWebSocket) !== 'undefined') {\n", + " return MozWebSocket;\n", + " } else {\n", + " alert('Your browser does not have WebSocket support.' +\n", + " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", + " 'Firefox 4 and 5 are also supported but you ' +\n", + " 'have to enable WebSockets in about:config.');\n", + " };\n", + "}\n", + "\n", + "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", + " this.id = figure_id;\n", + "\n", + " this.ws = websocket;\n", + "\n", + " this.supports_binary = (this.ws.binaryType != undefined);\n", + "\n", + " if (!this.supports_binary) {\n", + " var warnings = document.getElementById(\"mpl-warnings\");\n", + " if (warnings) {\n", + " warnings.style.display = 'block';\n", + " warnings.textContent = (\n", + " \"This browser does not support binary websocket messages. \" +\n", + " \"Performance may be slow.\");\n", + " }\n", + " }\n", + "\n", + " this.imageObj = new Image();\n", + "\n", + " this.context = undefined;\n", + " this.message = undefined;\n", + " this.canvas = undefined;\n", + " this.rubberband_canvas = undefined;\n", + " this.rubberband_context = undefined;\n", + " this.format_dropdown = undefined;\n", + "\n", + " this.image_mode = 'full';\n", + "\n", + " this.root = $('
');\n", + " this._root_extra_style(this.root)\n", + " this.root.attr('style', 'display: inline-block');\n", + "\n", + " $(parent_element).append(this.root);\n", + "\n", + " this._init_header(this);\n", + " this._init_canvas(this);\n", + " this._init_toolbar(this);\n", + "\n", + " var fig = this;\n", + "\n", + " this.waiting = false;\n", + "\n", + " this.ws.onopen = function () {\n", + " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", + " fig.send_message(\"send_image_mode\", {});\n", + " if (mpl.ratio != 1) {\n", + " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", + " }\n", + " fig.send_message(\"refresh\", {});\n", + " }\n", + "\n", + " this.imageObj.onload = function() {\n", + " if (fig.image_mode == 'full') {\n", + " // Full images could contain transparency (where diff images\n", + " // almost always do), so we need to clear the canvas so that\n", + " // there is no ghosting.\n", + " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", + " }\n", + " fig.context.drawImage(fig.imageObj, 0, 0);\n", + " };\n", + "\n", + " this.imageObj.onunload = function() {\n", + " fig.ws.close();\n", + " }\n", + "\n", + " this.ws.onmessage = this._make_on_message_function(this);\n", + "\n", + " this.ondownload = ondownload;\n", + "}\n", + "\n", + "mpl.figure.prototype._init_header = function() {\n", + " var titlebar = $(\n", + " '
');\n", + " var titletext = $(\n", + " '
');\n", + " titlebar.append(titletext)\n", + " this.root.append(titlebar);\n", + " this.header = titletext[0];\n", + "}\n", + "\n", + "\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._init_canvas = function() {\n", + " var fig = this;\n", + "\n", + " var canvas_div = $('
');\n", + "\n", + " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", + "\n", + " function canvas_keyboard_event(event) {\n", + " return fig.key_event(event, event['data']);\n", + " }\n", + "\n", + " canvas_div.keydown('key_press', canvas_keyboard_event);\n", + " canvas_div.keyup('key_release', canvas_keyboard_event);\n", + " this.canvas_div = canvas_div\n", + " this._canvas_extra_style(canvas_div)\n", + " this.root.append(canvas_div);\n", + "\n", + " var canvas = $('');\n", + " canvas.addClass('mpl-canvas');\n", + " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", + "\n", + " this.canvas = canvas[0];\n", + " this.context = canvas[0].getContext(\"2d\");\n", + "\n", + " var backingStore = this.context.backingStorePixelRatio ||\n", + "\tthis.context.webkitBackingStorePixelRatio ||\n", + "\tthis.context.mozBackingStorePixelRatio ||\n", + "\tthis.context.msBackingStorePixelRatio ||\n", + "\tthis.context.oBackingStorePixelRatio ||\n", + "\tthis.context.backingStorePixelRatio || 1;\n", + "\n", + " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", + "\n", + " var rubberband = $('');\n", + " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", + "\n", + " var pass_mouse_events = true;\n", + "\n", + " canvas_div.resizable({\n", + " start: function(event, ui) {\n", + " pass_mouse_events = false;\n", + " },\n", + " resize: function(event, ui) {\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " stop: function(event, ui) {\n", + " pass_mouse_events = true;\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " });\n", + "\n", + " function mouse_event_fn(event) {\n", + " if (pass_mouse_events)\n", + " return fig.mouse_event(event, event['data']);\n", + " }\n", + "\n", + " rubberband.mousedown('button_press', mouse_event_fn);\n", + " rubberband.mouseup('button_release', mouse_event_fn);\n", + " // Throttle sequential mouse events to 1 every 20ms.\n", + " rubberband.mousemove('motion_notify', mouse_event_fn);\n", + "\n", + " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", + " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", + "\n", + " canvas_div.on(\"wheel\", function (event) {\n", + " event = event.originalEvent;\n", + " event['data'] = 'scroll'\n", + " if (event.deltaY < 0) {\n", + " event.step = 1;\n", + " } else {\n", + " event.step = -1;\n", + " }\n", + " mouse_event_fn(event);\n", + " });\n", + "\n", + " canvas_div.append(canvas);\n", + " canvas_div.append(rubberband);\n", + "\n", + " this.rubberband = rubberband;\n", + " this.rubberband_canvas = rubberband[0];\n", + " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", + " this.rubberband_context.strokeStyle = \"#000000\";\n", + "\n", + " this._resize_canvas = function(width, height) {\n", + " // Keep the size of the canvas, canvas container, and rubber band\n", + " // canvas in synch.\n", + " canvas_div.css('width', width)\n", + " canvas_div.css('height', height)\n", + "\n", + " canvas.attr('width', width * mpl.ratio);\n", + " canvas.attr('height', height * mpl.ratio);\n", + " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", + "\n", + " rubberband.attr('width', width);\n", + " rubberband.attr('height', height);\n", + " }\n", + "\n", + " // Set the figure to an initial 600x600px, this will subsequently be updated\n", + " // upon first draw.\n", + " this._resize_canvas(600, 600);\n", + "\n", + " // Disable right mouse context menu.\n", + " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", + " return false;\n", + " });\n", + "\n", + " function set_focus () {\n", + " canvas.focus();\n", + " canvas_div.focus();\n", + " }\n", + "\n", + " window.setTimeout(set_focus, 100);\n", + "}\n", + "\n", + "mpl.figure.prototype._init_toolbar = function() {\n", + " var fig = this;\n", + "\n", + " var nav_element = $('
')\n", + " nav_element.attr('style', 'width: 100%');\n", + " this.root.append(nav_element);\n", + "\n", + " // Define a callback function for later on.\n", + " function toolbar_event(event) {\n", + " return fig.toolbar_button_onclick(event['data']);\n", + " }\n", + " function toolbar_mouse_event(event) {\n", + " return fig.toolbar_button_onmouseover(event['data']);\n", + " }\n", + "\n", + " for(var toolbar_ind in mpl.toolbar_items) {\n", + " var name = mpl.toolbar_items[toolbar_ind][0];\n", + " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", + " var image = mpl.toolbar_items[toolbar_ind][2];\n", + " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", + "\n", + " if (!name) {\n", + " // put a spacer in here.\n", + " continue;\n", + " }\n", + " var button = $('