-
Notifications
You must be signed in to change notification settings - Fork 1
/
3.2.2_assign_primary_zone_work.py
334 lines (273 loc) · 11.8 KB
/
3.2.2_assign_primary_zone_work.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
import os
import geopandas as gpd
import pandas as pd
import acbm
from acbm.assigning.plots import (
plot_workzone_assignment_heatmap,
plot_workzone_assignment_line,
)
from acbm.assigning.select_zone_work import WorkZoneAssignment
from acbm.assigning.utils import (
activity_chains_for_assignment,
cols_for_assignment_work,
filter_matrix_to_boundary,
)
from acbm.cli import acbm_cli
from acbm.config import load_config
from acbm.logger_config import assigning_primary_zones_logger as logger
from acbm.preprocessing import add_locations_to_activity_chains
from acbm.utils import calculate_rmse
@acbm_cli
def main(config_file):
config = load_config(config_file)
config.init_rng()
#### LOAD DATA ####
# --- Possible zones for each activity (calculated in 3.1_assign_possible_zones.py)
possible_zones_work = pd.read_pickle(
acbm.root_path / "data/interim/assigning/possible_zones_work.pkl"
)
# --- boundaries
logger.info("Loading study area boundaries")
boundaries = gpd.read_file(
acbm.root_path / "data/external/boundaries/study_area_zones.geojson"
)
logger.info("Study area boundaries loaded")
# osm POI data
osm_data_gdf = pd.read_pickle(
acbm.root_path / "data/interim/assigning/osm_poi_with_zones.pkl"
)
# Convert the DataFrame into a GeoDataFrame, and assign a coordinate reference system (CRS)
osm_data_gdf = gpd.GeoDataFrame(osm_data_gdf, geometry="geometry", crs="EPSG:4326")
# --- Activity chains
activity_chains = activity_chains_for_assignment(cols_for_assignment_work())
activity_chains = add_locations_to_activity_chains(activity_chains)
activity_chains = activity_chains[activity_chains["TravDay"] == 3] # Wednesday
activity_chains_work = activity_chains[activity_chains["dact"] == "work"]
# --- WORK: existing travel demand data
# Commuting matrices (from 2021 census)
# TODO: consider making this configurable
commute_level = "OA" # "OA" or "MSOA" data
logger.info(f"Loading commuting matrices at {commute_level} level")
# Clean the data
if commute_level == "MSOA":
print("Step 1: Reading in the zipped csv file")
travel_demand = pd.read_csv(
acbm.root_path / "data/external/ODWP15EW_MSOA_v1.zip"
)
print("Step 2: Creating commute_mode_dict")
commute_mode_dict = {
"Bus, minibus or coach": "pt",
"Driving a car or van": "car",
"Train": "pt",
"Underground, metro, light rail, tram": "pt",
"On foot": "walk",
"Taxi": "car",
"Other method of travel to work": "other",
"Bicycle": "cycle",
"Passenger in a car or van": "car",
"Motorcycle, scooter or moped": "car",
"Work mainly at or from home": "home",
}
print("Step 3: Mapping commute mode to model mode")
travel_demand["mode"] = travel_demand[
"Method used to travel to workplace (12 categories) label"
].map(commute_mode_dict)
print("Step 4: Filtering rows and dropping unnecessary columns")
travel_demand_clipped = travel_demand[
travel_demand["Place of work indicator (4 categories) code"].isin([1, 3])
]
travel_demand_clipped = travel_demand_clipped.drop(
columns=[
"Middle layer Super Output Areas label",
"MSOA of workplace label",
"Method used to travel to workplace (12 categories) label",
"Method used to travel to workplace (12 categories) code",
"Place of work indicator (4 categories) code",
"Place of work indicator (4 categories) label",
]
)
print("Step 5: Renaming columns and grouping")
travel_demand_clipped = travel_demand_clipped.rename(
columns={
"Middle layer Super Output Areas code": "MSOA21CD_home",
"MSOA of workplace code": "MSOA21CD_work",
}
)
travel_demand_clipped = (
travel_demand_clipped.groupby(["MSOA21CD_home", "MSOA21CD_work", "mode"])
.agg({"Count": "sum"})
.reset_index()
)
print("Step 6: Filtering matrix to boundary")
travel_demand_clipped = filter_matrix_to_boundary(
boundary=boundaries,
matrix=travel_demand_clipped,
boundary_id_col="MSOA21CD",
matrix_id_col="MSOA21CD",
type="both",
)
elif commute_level == "OA":
print("Step 1: Reading in the zipped csv file")
travel_demand = pd.read_csv(acbm.root_path / "data/external/ODWP01EW_OA.zip")
print("Step 2: Filtering rows and dropping unnecessary columns")
travel_demand_clipped = travel_demand[
travel_demand["Place of work indicator (4 categories) code"].isin([1, 3])
]
travel_demand_clipped = travel_demand_clipped.drop(
columns=[
"Place of work indicator (4 categories) code",
"Place of work indicator (4 categories) label",
]
)
print("Step 3: Renaming columns and grouping")
travel_demand_clipped = travel_demand_clipped.rename(
columns={
"Output Areas code": "OA21CD_home",
"OA of workplace code": "OA21CD_work",
}
)
travel_demand_clipped = (
travel_demand_clipped.groupby(["OA21CD_home", "OA21CD_work"])
.agg({"Count": "sum"})
.reset_index()
)
print("Step 4: Filtering matrix to boundary")
travel_demand_clipped = filter_matrix_to_boundary(
boundary=boundaries,
matrix=travel_demand_clipped,
boundary_id_col=config.zone_id,
matrix_id_col=config.zone_id,
type="both",
)
logger.info(f"Commuting matrices at {commute_level} level loaded")
# Get dictionary of commuting matrices
logger.info("Converting commuting matrices to dictionaries")
if commute_level == "MSOA":
# TODO: check, currently unsused
_travel_demand_dict_mode = (
travel_demand_clipped.groupby(["MSOA21CD_home", "MSOA21CD_work"])
.apply(lambda x: dict(zip(x["mode"], x["Count"])))
.to_dict()
)
travel_demand_dict_nomode = (
travel_demand_clipped.groupby(["MSOA21CD_home", "MSOA21CD_work"])["Count"]
.sum()
.to_dict()
)
elif commute_level == "OA":
travel_demand_dict_nomode = (
travel_demand_clipped.groupby(["OA21CD_home", "OA21CD_work"])["Count"]
.sum()
.to_dict()
)
logger.info("Commuting matrices converted to dictionaries")
#### ASSIGN TO ZONE FROM FEASIBLE ZONES ####
zone_assignment = WorkZoneAssignment(
activities_to_assign=possible_zones_work, actual_flows=travel_demand_dict_nomode
)
assignments_df = zone_assignment.select_work_zone_optimization(
use_percentages=config.work_assignment.use_percentages,
weight_max_dev=config.work_assignment.weight_max_dev,
weight_total_dev=config.work_assignment.weight_total_dev,
max_zones=config.work_assignment.max_zones,
)
# Add assigned zones to activity_chains_work. Replace dzone with assigned_zone
activity_chains_work["dzone"] = activity_chains_work["id"].map(
assignments_df.set_index("person_id")["assigned_zone"]
)
# --- Evaluating assignment quality
# - RMSE
# Step 1: Convert both the actual demand and the assigned demand data to the correct format
# df: origin_zone, assigned_zone, demand_assigned
# a: Aggregate assignment_opt DataFrame
assignment_agg = (
assignments_df.groupby(["origin_zone", "assigned_zone"])
.size()
.reset_index(name="demand_assigned")
)
# b: Convert travel_demand_dict_no_mode to DataFrame
demand_df = pd.DataFrame(
list(travel_demand_dict_nomode.items()), columns=["zone_pair", "demand_actual"]
)
demand_df[["origin_zone", "assigned_zone"]] = pd.DataFrame(
demand_df["zone_pair"].tolist(), index=demand_df.index
)
demand_df.drop(columns=["zone_pair"], inplace=True)
# Step 2: Merge the two DataFrames
workzone_assignment_opt = pd.merge(
assignment_agg, demand_df, on=["origin_zone", "assigned_zone"], how="outer"
).fillna(0)
# (1) % of Total Demand
workzone_assignment_opt["pct_of_total_demand_actual"] = (
workzone_assignment_opt["demand_actual"]
/ workzone_assignment_opt["demand_actual"].sum()
) * 100
workzone_assignment_opt["pct_of_total_demand_assigned"] = (
workzone_assignment_opt["demand_assigned"]
/ workzone_assignment_opt["demand_assigned"].sum()
) * 100
# (2) For each OD pair, demand as % of total demand from the same origin
workzone_assignment_opt["pct_of_o_total_actual"] = workzone_assignment_opt.groupby(
"origin_zone"
)["demand_actual"].transform(lambda x: (x / x.sum()) * 100)
workzone_assignment_opt[
"pct_of_o_total_assigned"
] = workzone_assignment_opt.groupby("origin_zone")["demand_assigned"].transform(
lambda x: (x / x.sum()) * 100
)
# (3) For each OD pair, demand as % of total demand to each destination
workzone_assignment_opt["pct_of_d_total_actual"] = workzone_assignment_opt.groupby(
"assigned_zone"
)["demand_actual"].transform(lambda x: (x / x.sum()) * 100)
workzone_assignment_opt[
"pct_of_d_total_assigned"
] = workzone_assignment_opt.groupby("assigned_zone")["demand_assigned"].transform(
lambda x: (x / x.sum()) * 100
)
# Define the output file path
os.makedirs(acbm.root_path / "data/processed/", exist_ok=True)
output_file_path = acbm.root_path / "data/processed/workzone_rmse_results.txt"
# Open the file in write mode
with open(output_file_path, "w") as file:
# (1) RMSE for % of Total Demand
predictions = workzone_assignment_opt["pct_of_total_demand_assigned"]
targets = workzone_assignment_opt["pct_of_total_demand_actual"]
rmse_pct_of_total_demand = calculate_rmse(predictions, targets)
file.write(f"RMSE for % of Total Demand: {rmse_pct_of_total_demand}\n")
# (2) RMSE for demand as % of total demand from the same origin
predictions = workzone_assignment_opt["pct_of_o_total_assigned"]
targets = workzone_assignment_opt["pct_of_o_total_actual"]
rmse_pct_of_o_total = calculate_rmse(predictions, targets)
file.write(
f"RMSE for % of Total Demand from the Same Origin: {rmse_pct_of_o_total}\n"
)
# (3) RMSE for demand as % of total demand to each destination
predictions = workzone_assignment_opt["pct_of_d_total_assigned"]
targets = workzone_assignment_opt["pct_of_d_total_actual"]
rmse_pct_of_d_total = calculate_rmse(predictions, targets)
file.write(
f"RMSE for % of Total Demand to Each Destination: {rmse_pct_of_d_total}\n"
)
# --- Plots
# Plot the demand_actual and demand_assigned values as a line graph for n origin_zones.
plot_workzone_assignment_line(
assignment_results=workzone_assignment_opt,
n=10,
selection_type="top",
sort_by="actual",
save_dir=acbm.root_path / "data/processed/plots/assigning/",
)
# Plot the demand_actual and demand_assigned values as a heatmap for n origin_zones.
plot_workzone_assignment_heatmap(
workzone_assignment_opt,
n=20,
selection_type="top",
sort_by="assigned",
save_dir=acbm.root_path / "data/processed/plots/assigning/",
)
# save the activity chains as a pickle
activity_chains_work.to_pickle(
acbm.root_path / "data/interim/assigning/activity_chains_work.pkl"
)
if __name__ == "__main__":
main()