-
Notifications
You must be signed in to change notification settings - Fork 96
/
atwork_subtour_destination.py
265 lines (202 loc) · 9.06 KB
/
atwork_subtour_destination.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
# ActivitySim
# See full license in LICENSE.txt.
from __future__ import (absolute_import, division, print_function, )
from future.standard_library import install_aliases
install_aliases() # noqa: E402
import logging
import pandas as pd
from activitysim.core import tracing
from activitysim.core import config
from activitysim.core import pipeline
from activitysim.core import simulate
from activitysim.core import inject
from activitysim.core.interaction_sample_simulate import interaction_sample_simulate
from activitysim.core.interaction_sample import interaction_sample
from activitysim.core.util import assign_in_place
from .util import logsums as logsum
from activitysim.abm.tables.size_terms import tour_destination_size_terms
logger = logging.getLogger(__name__)
DUMP = False
def atwork_subtour_destination_sample(
tours,
persons_merged,
skim_dict,
destination_size_terms,
chunk_size, trace_hh_id):
trace_label = 'atwork_subtour_location_sample'
model_settings = config.read_model_settings('atwork_subtour_destination.yaml')
model_spec = simulate.read_model_spec(file_name='atwork_subtour_destination_sample.csv')
# merge persons into tours
choosers = pd.merge(tours, persons_merged, left_on='person_id', right_index=True)
# FIXME - MEMORY HACK - only include columns actually used in spec
chooser_columns = model_settings['SIMULATE_CHOOSER_COLUMNS']
choosers = choosers[chooser_columns]
constants = config.get_model_constants(model_settings)
sample_size = model_settings["SAMPLE_SIZE"]
alt_dest_col_name = model_settings["ALT_DEST_COL_NAME"]
logger.info("Running atwork_subtour_location_sample with %d tours", len(choosers))
# create wrapper with keys for this lookup - in this case there is a workplace_taz
# in the choosers and a TAZ in the alternatives which get merged during interaction
# the skims will be available under the name "skims" for any @ expressions
skims = skim_dict.wrap('workplace_taz', 'TAZ')
locals_d = {
'skims': skims
}
if constants is not None:
locals_d.update(constants)
choices = interaction_sample(
choosers,
alternatives=destination_size_terms,
sample_size=sample_size,
alt_col_name=alt_dest_col_name,
spec=model_spec,
skims=skims,
locals_d=locals_d,
chunk_size=chunk_size,
trace_label=trace_label)
# remember person_id in chosen alts so we can merge with persons in subsequent steps
choices['person_id'] = choosers.person_id
return choices
def atwork_subtour_destination_logsums(
persons_merged,
destination_sample,
skim_dict, skim_stack,
chunk_size, trace_hh_id):
"""
add logsum column to existing atwork_subtour_destination_sample table
logsum is calculated by running the mode_choice model for each sample (person, dest_taz) pair
in atwork_subtour_destination_sample, and computing the logsum of all the utilities
+-----------+--------------+----------------+------------+----------------+
| person_id | dest_TAZ | rand | pick_count | logsum (added) |
+===========+==============+================+============+================+
| 23750 | 14 | 0.565502716034 | 4 | 1.85659498857 |
+-----------+--------------+----------------+------------+----------------+
+ 23750 | 16 | 0.711135838871 | 6 | 1.92315598631 |
+-----------+--------------+----------------+------------+----------------+
+ ... | | | | |
+-----------+--------------+----------------+------------+----------------+
| 23751 | 12 | 0.408038878552 | 1 | 2.40612135416 |
+-----------+--------------+----------------+------------+----------------+
| 23751 | 14 | 0.972732479292 | 2 | 1.44009018355 |
+-----------+--------------+----------------+------------+----------------+
"""
trace_label = 'atwork_subtour_destination_logsums'
model_settings = config.read_model_settings('atwork_subtour_destination.yaml')
logsum_settings = config.read_model_settings(model_settings['LOGSUM_SETTINGS'])
# FIXME - MEMORY HACK - only include columns actually used in spec
persons_merged = logsum.filter_chooser_columns(persons_merged, logsum_settings, model_settings)
# merge persons into tours
choosers = pd.merge(destination_sample,
persons_merged,
left_on='person_id',
right_index=True,
how="left")
logger.info("Running %s with %s rows", trace_label, len(choosers))
tracing.dump_df(DUMP, persons_merged, trace_label, 'persons_merged')
tracing.dump_df(DUMP, choosers, trace_label, 'choosers')
tour_purpose = 'atwork'
logsums = logsum.compute_logsums(
choosers,
tour_purpose,
logsum_settings, model_settings,
skim_dict, skim_stack,
chunk_size, trace_hh_id,
trace_label)
destination_sample['mode_choice_logsum'] = logsums
return destination_sample
def atwork_subtour_destination_simulate(
subtours,
persons_merged,
destination_sample,
skim_dict,
destination_size_terms,
chunk_size, trace_hh_id):
"""
atwork_subtour_destination model on atwork_subtour_destination_sample
annotated with mode_choice logsum to select a destination from sample alternatives
"""
trace_label = 'atwork_subtour_destination_simulate'
model_settings = config.read_model_settings('atwork_subtour_destination.yaml')
model_spec = simulate.read_model_spec(file_name='atwork_subtour_destination.csv')
# interaction_sample_simulate insists choosers appear in same order as alts
subtours = subtours.sort_index()
# merge persons into tours
choosers = pd.merge(subtours,
persons_merged,
left_on='person_id', right_index=True)
# FIXME - MEMORY HACK - only include columns actually used in spec
chooser_columns = model_settings['SIMULATE_CHOOSER_COLUMNS']
choosers = choosers[chooser_columns]
alt_dest_col_name = model_settings["ALT_DEST_COL_NAME"]
chooser_col_name = 'workplace_taz'
# alternatives are pre-sampled and annotated with logsums and pick_count
# but we have to merge destination_size_terms columns into alt sample list
alternatives = \
pd.merge(destination_sample, destination_size_terms,
left_on=alt_dest_col_name, right_index=True, how="left")
tracing.dump_df(DUMP, alternatives, trace_label, 'alternatives')
constants = config.get_model_constants(model_settings)
logger.info("Running atwork_subtour_destination_simulate with %d persons", len(choosers))
# create wrapper with keys for this lookup - in this case there is a TAZ in the choosers
# and a TAZ in the alternatives which get merged during interaction
# the skims will be available under the name "skims" for any @ expressions
skims = skim_dict.wrap(chooser_col_name, alt_dest_col_name)
locals_d = {
'skims': skims,
}
if constants is not None:
locals_d.update(constants)
tracing.dump_df(DUMP, choosers, trace_label, 'choosers')
choices = interaction_sample_simulate(
choosers,
alternatives,
spec=model_spec,
choice_column=alt_dest_col_name,
skims=skims,
locals_d=locals_d,
chunk_size=chunk_size,
trace_label=trace_label,
trace_choice_name='workplace_location')
return choices
@inject.step()
def atwork_subtour_destination(
tours,
persons_merged,
skim_dict,
skim_stack,
land_use, size_terms,
chunk_size, trace_hh_id):
persons_merged = persons_merged.to_frame()
tours = tours.to_frame()
subtours = tours[tours.tour_category == 'atwork']
# - if no atwork subtours
if tours.shape[0] == 0:
tracing.no_results('atwork_subtour_destination')
return
destination_size_terms = tour_destination_size_terms(land_use, size_terms, 'atwork')
destination_sample = atwork_subtour_destination_sample(
subtours,
persons_merged,
skim_dict,
destination_size_terms,
chunk_size, trace_hh_id)
destination_sample = atwork_subtour_destination_logsums(
persons_merged,
destination_sample,
skim_dict, skim_stack,
chunk_size, trace_hh_id)
choices = atwork_subtour_destination_simulate(
subtours,
persons_merged,
destination_sample,
skim_dict,
destination_size_terms,
chunk_size, trace_hh_id)
subtours['destination'] = choices
assign_in_place(tours, subtours[['destination']])
pipeline.replace_table("tours", tours)
tracing.print_summary('subtour destination', subtours.destination, describe=True)
if trace_hh_id:
tracing.trace_df(tours,
label='atwork_subtour_destination',
columns=['destination'])