Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
260 changes: 200 additions & 60 deletions src/ess/reflectometry/gui.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,15 +10,19 @@
import pandas as pd
import plopp as pp
import scipp as sc
from ipydatagrid import DataGrid, VegaExpr
from ipydatagrid import DataGrid, TextRenderer, VegaExpr
from IPython.display import display
from ipytree import Node, Tree
from traitlets import Bool

from ess import amor
from ess.amor.types import ChopperPhase
from ess.reflectometry.figures import wavelength_z_figure
from ess.reflectometry.types import (
Filename,
QBins,
ReducedReference,
ReducibleData,
ReferenceRun,
ReflectivityOverQ,
SampleRun,
Expand All @@ -30,8 +34,95 @@
from ess.reflectometry.workflow import with_filenames


class NexusExplorer:
def __init__(self, runs_table: DataGrid, run_to_filepath: Callable[[str], str]):
class DetectorView(widgets.HBox):
is_active_tab = Bool(False).tag(sync=True)

def __init__(
self, runs_table: DataGrid, run_to_filepath: Callable[[str], str], **kwargs
):
super().__init__([], **kwargs)
self.runs_table = runs_table
self.run_to_filepath = run_to_filepath
self.plot_log = widgets.VBox([])
self.working_label = widgets.Label(
"...working", layout=widgets.Layout(display='none')
)
self.children = (
widgets.VBox(
[
widgets.Label("Runs Table"),
self.runs_table,
],
layout={"width": "35%"},
),
widgets.VBox(
[
widgets.HBox(
[
widgets.Label("Wavelength z-index counts distribution"),
self.working_label,
]
),
self.plot_log,
],
layout={"width": "60%"},
),
)

def run_when_selected_row_changes(change):
if not change['old'] or change['old'][0]['r1'] != change['new'][0]['r1']:
self.run_workflow()

self.runs_table.observe(run_when_selected_row_changes, names='selections')

def run_when_active_tab(change):
if change['new']:
self.run_workflow()

self.observe(run_when_active_tab, 'is_active_tab')

def run_workflow(self):
selections = self.runs_table.selections
if not self.is_active_tab or not selections:
return

self.working_label.layout.display = ''
row_idx = selections[0]['r1']
run = self.runs_table.data.iloc[row_idx]['Run']

workflow = amor.AmorWorkflow()
workflow[SampleSize[SampleRun]] = sc.scalar(10, unit='mm')
workflow[SampleSize[ReferenceRun]] = sc.scalar(10, unit='mm')

workflow[ChopperPhase[ReferenceRun]] = sc.scalar(7.5, unit='deg')
workflow[ChopperPhase[SampleRun]] = sc.scalar(7.5, unit='deg')

workflow[YIndexLimits] = (0, 64)
workflow[ZIndexLimits] = (0, 16 * 32)
workflow[WavelengthBins] = sc.geomspace(
'wavelength',
2,
13.5,
2001,
unit='angstrom',
)
Comment on lines +101 to +108
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I guess these are not taking into account what is set in the tab where you have Z and L parameters in a table.
Should it be in sync?

Also, 2000 wavelength bins is quite a lot, we could make the figures lighter by reducing that?

Copy link
Contributor Author

@jokasimr jokasimr Apr 23, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I guess these are not taking into account what is set in the tab where you have Z and L parameters in a table.
Should it be in sync?

Yes that's right. No I don't have the impression they should be in sync. This view is independent from the reduction settings.

Also, 2000 wavelength bins is quite a lot, we could make the figures lighter by reducing that?

I think 2000 is a good setting for now. In the future this might be a user defined setting. For now I think we shouldn't tweak this before getting some feedback from the ids.

workflow[Filename[SampleRun]] = self.run_to_filepath(run)
da = workflow.compute(ReducibleData[SampleRun])
da.bins.data[...] = sc.scalar(1.0, variance=1.0, unit=da.bins.unit)
da.bins.unit = 'counts'
Comment on lines +111 to +112
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am not sure why we need to do this here? Can we not histogram the data in ReducibleData ?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure what you mean here. If you're asking why we're setting the weights to 1, it's because we don't want any weights applied, we just want the detector counts.

da.masks.clear()
da.bins.masks.clear()
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why do we need to remove masks? Is the workflow setting masks somewhere?
I guess we have no interface to add masks in the GUI right now.
If there are masks on the data, they should be visible on the figure, no?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes the workflow is adding masks to the data.

In this case we don't want to view the masks, we only want the raw count data.
That's also the reason whey the counts are set to 1.0, we don't want any corrections applied.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If you just want raw counts, it sounds like we should compute another result from the workflow than the ReducibleData[SampleRun] (e.g. RawDetectorData)?

Computing something and then settings values to 1 seems strange to me. I still don't understand why you want to do that above...
Is it because you need the coordinates to compute the wavelengths to make the figure that you are using ReducibleData?
Maybe it's a sign that there should be an additional step in the workflow that splits this up, so you can compute a raw data with coordinates before masks and corrections are added?
Not sure if this would work with the current order in which operations are applied.

Copy link
Contributor Author

@jokasimr jokasimr Apr 23, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If you just want raw counts, it sounds like we should compute another result from the workflow than the

Problem is that RawDetectorData doesn't have wavelengths.

Is it because you need the coordinates to compute the wavelengths to make the figure that you are using ReducibleData?

Yes

Maybe it's a sign that there should be an additional step in the workflow that splits this up, so you can compute a raw data with coordinates before masks and corrections are added?

We could do that. But then you could say, what if I just want those coordinates and not those, what if I want those corrections but not those, what if I want those masks but not those, etc, and we would need to define a whole range of "domain types" for each of the combinations. That said, maybe this particular case is common enough that we do want to introduce a separate type that can be used here.

Not sure if this would work with the current order in which operations are applied.

It would work 👍

I agree it's a bit odd to set the weights to 1 and clear all masks. But on the other hand, concretely, what is the drawback? (besides performance, which is probably not that relevant)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The drawback is that the GUI is starting to feel (at least to me) like it contains quite a few hacks.
However, because I don't want to get too deeply involved in the GUI side of things, I'll let you decide what you want to do.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The drawback is that the GUI is starting to feel (at least to me) like it contains quite a few hacks.

That's a concern we should take seriously and try to adress.

If the issues you're referring to are not directly related to the changes in this PR I think we should adress them in separate PRs. If you have concrete problems or changes in mind feel free to open issues to fix those. If your concerns are too vauge to be directly converted to issues, let's sit down have a chat about them and see if we can figure something out.

Like I said above, I'm not against adding a DetectorDataWithCoords domain type. I just don't see the advantage, and I see that adding more domain types complicates the package and makes it more confusing for users, so if possible I'd prefer to avoid it.

p = wavelength_z_figure(da, wavelength_bins=workflow.compute(WavelengthBins))
self.plot_log.children = (p,)
self.working_label.layout.display = 'none'


class NexusExplorer(widgets.VBox):
def __init__(
self, runs_table: DataGrid, run_to_filepath: Callable[[str], str], **kwargs
):
kwargs.setdefault('layout', {"width": "100%"})
super().__init__(**kwargs)
self.runs_table = runs_table
self.run_to_filepath = run_to_filepath

Expand All @@ -58,47 +149,44 @@ def __init__(self, runs_table: DataGrid, run_to_filepath: Callable[[str], str]):
self.nexus_tree.observe(self.on_tree_select, names='selected_nodes')

# Create the Nexus Explorer tab content
self.widget = widgets.VBox(
[
widgets.Label("Nexus Explorer"),
widgets.HBox(
[
widgets.VBox(
[
widgets.Label("Runs Table"),
self.runs_table,
],
layout={"width": "30%"},
),
widgets.VBox(
[
widgets.Label("File Structure"),
widgets.VBox(
[self.nexus_tree],
layout=widgets.Layout(
width='100%',
height='600px',
min_height='100px', # Min resize height
max_height='1000px', # Max resize height
overflow_y='scroll',
border='1px solid lightgray',
resize='vertical', # Add resize handle
),
self.children = (
widgets.Label("Nexus Explorer"),
widgets.HBox(
[
widgets.VBox(
[
widgets.Label("Runs Table"),
self.runs_table,
],
layout={"width": "30%"},
),
widgets.VBox(
[
widgets.Label("File Structure"),
widgets.VBox(
[self.nexus_tree],
layout=widgets.Layout(
width='100%',
height='600px',
min_height='100px', # Min resize height
max_height='1000px', # Max resize height
overflow_y='scroll',
border='1px solid lightgray',
resize='vertical', # Add resize handle
),
],
layout={"width": "35%"},
),
widgets.VBox(
[
widgets.Label("Content"),
self.nexus_content,
],
layout={"width": "35%"},
),
]
),
],
layout={"width": "100%"},
),
],
layout={"width": "35%"},
),
widgets.VBox(
[
widgets.Label("Content"),
self.nexus_content,
],
layout={"width": "35%"},
),
]
),
)

def create_hdf5_tree(self, filepath):
Expand Down Expand Up @@ -241,6 +329,8 @@ def set_table_colors(self, table):
if self.get_row_key(row) == row_key:
expr += template.format(i=i, reduced_color="'lightgreen'")
expr += "default_value"
for renderer in table.renderers.values():
renderer.background_color = VegaExpr(expr)
table.default_renderer.background_color = VegaExpr(expr)

@staticmethod
Expand All @@ -254,6 +344,18 @@ def set_result(self, metadata, result):
self.set_table_colors(self.custom_reduction_table)
self.set_table_colors(self.reference_table)

def get_renderers_for_reduction_table(self):
return {}

def get_renderers_for_reference_table(self):
return {}

def get_renderers_for_custom_reduction_table(self):
return {}

def get_renderers_for_runs_table(self):
return {}

def log(self, message):
out = widgets.Output()
with out:
Expand Down Expand Up @@ -306,27 +408,31 @@ def __init__(self):
auto_fit_columns=True,
column_visibility={"key": False},
selection_mode="cell",
renderers=self.get_renderers_for_runs_table(),
)
self.reduction_table = DataGrid(
pd.DataFrame([]),
editable=True,
auto_fit_columns=True,
column_visibility={"key": False},
selection_mode="cell",
renderers=self.get_renderers_for_reduction_table(),
)
self.reference_table = DataGrid(
pd.DataFrame([]),
editable=True,
auto_fit_columns=True,
column_visibility={"key": False},
selection_mode="cell",
renderers=self.get_renderers_for_reference_table(),
)
self.custom_reduction_table = DataGrid(
pd.DataFrame([]),
editable=True,
auto_fit_columns=True,
column_visibility={"key": False},
selection_mode="cell",
renderers=self.get_renderers_for_custom_reduction_table(),
)

self.runs_table.on_cell_change(self.sync)
Expand Down Expand Up @@ -515,9 +621,17 @@ def delete_row(_):
tab_settings,
tab_log,
]
self.tabs.set_title(0, "Reduce")
self.tabs.set_title(1, "Settings")
self.tabs.set_title(2, "Log")
self.tabs.titles = ["Reduce", "Settings", "Log"]

def on_tab_change(change):
old = self.tabs.children[change['old']]
new = self.tabs.children[change['new']]
if hasattr(old, 'is_active_tab'):
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I guess you could even leave the hasattr check out, just setting the attribute on the old object, even if it doesn't exist to begin with, should be allowed?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What's the benefit?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Simpler code?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If the simplification is removing the hasattr checks, at the cost of adding attributes to random objects, that's not worth it in my opinion.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wouldn't call them completely 'random', but it was just a suggestion. You can take it or leave it.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry that came of as dismissive, I didn't mean it like that. I appreciate your suggestions and the effort you put into the review

old.is_active_tab = False
if hasattr(new, 'is_active_tab'):
new.is_active_tab = True

self.tabs.observe(on_tab_change, names='selected_index')

self.main = widgets.VBox(
[
Expand Down Expand Up @@ -555,8 +669,16 @@ class AmorBatchReductionGUI(ReflectometryBatchReductionGUI):
def __init__(self):
super().__init__()
self.nexus_explorer = NexusExplorer(self.runs_table, self.get_filepath_from_run)
self.tabs.children = (*self.tabs.children, self.nexus_explorer.widget)
self.tabs.set_title(len(self.tabs.children) - 1, "Nexus Explorer")
self.detector_display = DetectorView(
self.runs_table, self.get_filepath_from_run
)
self.tabs.children = (
*self.tabs.children,
self.nexus_explorer,
self.detector_display,
)
# Empty titles are automatically added for the new children
self.tabs.titles = [*self.tabs.titles[:-2], "Nexus Explorer", "Detector View"]

def read_meta_data(self, path):
with h5py.File(path) as f:
Expand All @@ -566,6 +688,21 @@ def read_meta_data(self, path):
"Angle": f['entry1']['Amor']['master_parameters']['mu']['value'][0, 0],
}

def get_renderers_for_reduction_table(self):
return {
'Angle': TextRenderer(text_value=VegaExpr("format(cell.value, ',.3f')"))
}

def get_renderers_for_custom_reduction_table(self):
return {
'Angle': TextRenderer(text_value=VegaExpr("format(cell.value, ',.3f')"))
}

def get_renderers_for_runs_table(self):
return {
'Angle': TextRenderer(text_value=VegaExpr("format(cell.value, ',.3f')"))
}

@staticmethod
def _merge_old_and_new_state(new, old, on, how='left'):
old = old if on in old else old.assign(**{on: None})
Expand Down Expand Up @@ -623,22 +760,24 @@ def sync_reference_table(self, db):
df = db["user_runs"]
df = (
df[df["Sample"] == "sm5"][~df["Exclude"]]
.groupby(["Sample"], as_index=False)
.groupby(["Sample", "Angle"], as_index=False)
.agg(Runs=("Run", tuple))
.sort_values(by="Sample")
.sort_values(["Sample", "Angle"])
)
# We don't want changes to Sample
# in the user_reference table to persist
user_reference = db['user_reference'].drop(columns=["Sample"], errors='ignore')
user_reference = db['user_reference'].drop(
columns=["Sample", "Angle"], errors='ignore'
)
df = self._merge_old_and_new_state(df, user_reference, on='Runs')
self._setdefault(df, "Ymin", 17)
self._setdefault(df, "Ymax", 47)
self._setdefault(df, "Zmin", 60)
self._setdefault(df, "Zmax", 380)
self._setdefault(df, "Lmin", 3.0)
self._setdefault(df, "Lmax", 12.5)
df = self._ordercolumns(df, 'Sample', 'Runs')
return df.sort_values(by="Sample")
df = self._ordercolumns(df, 'Sample', 'Angle', 'Runs')
return df.sort_values(["Sample", "Angle"])

def sync_custom_reduction_table(self):
df = self.custom_reduction_table.data.copy()
Expand All @@ -656,17 +795,17 @@ def display_results(self):
if len(df) == 0:
self.log('There was nothing to display')
return
try:
for _ in range(2):
results = [
next(v for (m, _), v in self.results.items() if m == key)
for key in (tuple(row) for _, row in df.iterrows())
self.results[key]
for _, row in df.iterrows()
if (key := self.get_row_key(row)) in self.results
]
except StopIteration:
# No results were found for the selected row
if len(results) == len(df):
break
# No results were found for some of the selected rows.
# It hasn't been computed yet, so compute it and try again.
self.run_workflow()
self.display_results()
return

def get_unique_names(df):
# Create labels with Sample name and runs
Expand Down Expand Up @@ -694,6 +833,7 @@ def get_unique_names(df):
results,
norm='log',
figsize=(12, 6),
vmin=1e-6,
)
]
)
Expand Down
Loading