-
-
Notifications
You must be signed in to change notification settings - Fork 159
/
Copy pathutils.py
1620 lines (1444 loc) · 61.2 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""
PUBLIC low-level utility functions for Tax-Calculator.
"""
# CODING-STYLE CHECKS:
# pycodestyle utils.py
# pylint --disable=locally-disabled utils.py
#
# pylint: disable=too-many-lines
import os
import math
import json
import copy
import collections
import importlib.resources as implibres
import numpy as np
import pandas as pd
import bokeh.plotting as bp
from bokeh.models import PrintfTickFormatter
from taxcalc.utilsprvt import (weighted_mean,
wage_weighted, agi_weighted,
expanded_income_weighted)
# Items in the DIST_TABLE_COLUMNS list below correspond to the items in the
# DIST_TABLE_LABELS list below; this correspondence allows us to use this
# labels list to map a label to the correct column in a distribution table.
DIST_VARIABLES = ['expanded_income', 'c00100', 'aftertax_income', 'standard',
'c04470', 'c04600', 'c04800', 'taxbc', 'c62100', 'c09600',
'c05800', 'surtax', 'othertaxes', 'refund', 'c07100',
'iitax', 'payrolltax', 'combined', 's006', 'ubi',
'benefit_cost_total', 'benefit_value_total', 'XTOT']
DIST_TABLE_COLUMNS = ['count',
'c00100',
'count_StandardDed',
'standard',
'count_ItemDed',
'c04470',
'c04600',
'c04800',
'taxbc',
'c62100',
'count_AMT',
'c09600',
'c05800',
'c07100',
'othertaxes',
'refund',
'iitax',
'payrolltax',
'combined',
'ubi',
'benefit_cost_total',
'benefit_value_total',
'expanded_income',
'aftertax_income']
DIST_TABLE_LABELS = ['Number of Returns',
'AGI',
'Number of Returns Claiming Standard Deduction',
'Standard Deduction',
'Number of Returns Itemizing',
'Itemized Deduction',
'Personal Exemption',
'Taxable Income',
'Regular Tax',
'AMTI',
'Number of Returns with AMT',
'AMT',
'Tax before Credits',
'Non-refundable Credits',
'Other Taxes',
'Refundable Credits',
'Individual Income Tax Liabilities',
'Payroll Tax Liabilities',
'Combined Payroll and Individual Income Tax Liabilities',
'Universal Basic Income',
'Total Cost of Benefits',
'Consumption Value of Benefits',
'Expanded Income',
'After-Tax Expanded Income']
# Items in the DIFF_TABLE_COLUMNS list below correspond to the items in the
# DIFF_TABLE_LABELS list below; this correspondence allows us to use this
# labels list to map a label to the correct column in a difference table.
DIFF_VARIABLES = ['expanded_income', 'c00100', 'aftertax_income',
'iitax', 'payrolltax', 'combined', 's006', 'XTOT',
'ubi', 'benefit_cost_total', 'benefit_value_total']
DIFF_TABLE_COLUMNS = ['count',
'tax_cut',
'perc_cut',
'tax_inc',
'perc_inc',
'mean',
'tot_change',
'share_of_change',
'ubi',
'benefit_cost_total',
'benefit_value_total',
'pc_aftertaxinc']
DIFF_TABLE_LABELS = ['Number of Returns',
'Number of Returns with Tax Cut',
'Percent with Tax Cut',
'Number of Returns with Tax Increase',
'Percent with Tax Increase',
'Average Tax Change',
'Total Tax Difference',
'Share of Overall Change',
'Universal Basic Income',
'Total Cost of Benefits',
'Consumption Value of Benefits',
'% Change in After-Tax Income']
DECILE_ROW_NAMES = ['0-10n', '0-10z', '0-10p',
'10-20', '20-30', '30-40', '40-50',
'50-60', '60-70', '70-80', '80-90', '90-100',
'ALL',
'90-95', '95-99', 'Top 1%']
STANDARD_ROW_NAMES = ['<$0K', '=$0K', '$0-10K', '$10-20K', '$20-30K',
'$30-40K', '$40-50K', '$50-75K', '$75-100K',
'$100-200K', '$200-500K', '$500-1000K', '>$1000K', 'ALL']
STANDARD_INCOME_BINS = [-9e99, -1e-9, 1e-9, 10e3, 20e3, 30e3, 40e3, 50e3,
75e3, 100e3, 200e3, 500e3, 1e6, 9e99]
SOI_AGI_BINS = [-9e99, 1.0, 5e3, 10e3, 15e3, 20e3, 25e3, 30e3, 40e3, 50e3,
75e3, 100e3, 200e3, 500e3, 1e6, 1.5e6, 2e6, 5e6, 10e6, 9e99]
def unweighted_sum(dframe, col_name):
"""
Return unweighted sum of Pandas DataFrame col_name items.
"""
return dframe[col_name].sum()
def weighted_sum(dframe, col_name):
"""
Return weighted sum of Pandas DataFrame col_name items.
"""
return (dframe[col_name] * dframe['s006']).sum()
def add_quantile_table_row_variable(dframe, income_measure, num_quantiles,
pop_quantiles=False,
decile_details=False,
weight_by_income_measure=False):
"""
Add a variable to specified Pandas DataFrame, dframe, that specifies
the table row and is called 'table_row'.
When weight_by_income_measure=False, the rows hold an equal number of
people if pop_quantiles=True or an equal number of filing units if
pop_quantiles=False.
When weight_by_income_measure=True, the rows hold an equal number
of income dollars.
This function assumes that specified dframe contains columns for
the specified income_measure and for sample weights, s006, and when
pop_quantiles=True, number of exemptions, XTOT.
. When num_quantiles is 10 and decile_details is True,
the bottom decile is broken up into three subgroups
(neg, zero, and pos income_measure)
and the top decile is broken into three subgroups
(90-95, 95-99, and top 1%).
"""
# pylint: disable=too-many-arguments,too-many-positional-arguments
# pylint: disable=too-many-locals
assert isinstance(dframe, pd.DataFrame)
assert income_measure in dframe
assert 's006' in dframe
if decile_details and num_quantiles != 10:
msg = 'decile_details is True when num_quantiles is {}'
raise ValueError(msg.format(num_quantiles))
if pop_quantiles:
assert not weight_by_income_measure
assert 'XTOT' in dframe
# adjust income measure by square root of filing unit size
adj = np.sqrt(np.where(dframe['XTOT'] == 0, 1, dframe['XTOT']))
dframe['adj_income_measure'] = np.divide(dframe[income_measure], adj)
else:
dframe['adj_income_measure'] = dframe[income_measure]
dframe.sort_values(by='adj_income_measure', inplace=True)
if weight_by_income_measure:
dframe['cumsum_temp'] = np.cumsum(
np.multiply(dframe[income_measure].values, dframe['s006'].values)
)
min_cumsum = dframe['cumsum_temp'].values[0]
else:
if pop_quantiles:
dframe['cumsum_temp'] = np.cumsum(
np.multiply(dframe['XTOT'].values, dframe['s006'].values)
)
else:
dframe['cumsum_temp'] = np.cumsum(
dframe['s006'].values
)
min_cumsum = 0. # because s006 and XTOT values are non-negative
max_cumsum = dframe['cumsum_temp'].values[-1]
cumsum_range = max_cumsum - min_cumsum
bin_width = cumsum_range / float(num_quantiles)
bin_edges = list(min_cumsum +
np.arange(0, (num_quantiles + 1)) * bin_width)
bin_edges[-1] = 9e99 # raise top of last bin to include all observations
bin_edges[0] = -9e99 # lower bottom of 1st bin to include all observations
num_bins = num_quantiles
if decile_details:
assert bin_edges[1] > 1e-9 # bin_edges[1] is top of bottom decile
neg_im = np.less_equal(dframe[income_measure], -1e-9)
neg_wght = dframe['s006'][neg_im].sum()
zer_im = np.logical_and(
np.greater(dframe[income_measure], -1e-9),
np.less(dframe[income_measure], 1e-9)
)
zer_wght = dframe['s006'][zer_im].sum()
bin_edges.insert(1, neg_wght + zer_wght) # top of zeros
bin_edges.insert(1, neg_wght) # top of negatives
bin_edges.insert(-1, bin_edges[-2] + 0.5 * bin_width) # top of 90-95
bin_edges.insert(-1, bin_edges[-2] + 0.4 * bin_width) # top of 95-99
num_bins += 4
labels = range(1, (num_bins + 1))
dframe['table_row'] = pd.cut(dframe['cumsum_temp'], bin_edges,
right=False, labels=labels)
dframe.drop('cumsum_temp', axis=1, inplace=True)
return dframe
def add_income_table_row_variable(dframe, income_measure, bin_edges):
"""
Add a variable to specified Pandas DataFrame, dframe, that specifies the
table row and is called 'table_row'. The rows are defined by the
specified bin_edges function argument. Note that the bin groupings
are LEFT INCLUSIVE, which means that bin_edges=[1,2,3,4] implies these
three bin groupings: [1,2), [2,3), [3,4).
Parameters
----------
dframe: Pandas DataFrame
the object to which we are adding bins
income_measure: String
specifies income variable used to construct bins
bin_edges: list of scalar bin edges
Returns
-------
dframe: Pandas DataFrame
the original input plus the added 'table_row' column
"""
assert isinstance(dframe, pd.DataFrame)
assert income_measure in dframe
assert isinstance(bin_edges, list)
dframe['table_row'] = pd.cut(dframe[income_measure],
bin_edges, right=False)
return dframe
def get_sums(dframe):
"""
Compute unweighted sum of items in each column of Pandas DataFrame, dframe.
Returns
-------
Pandas Series object containing column sums indexed by dframe column names.
"""
sums = {}
for col in dframe.columns.values.tolist():
if col != 'table_row':
sums[col] = dframe[col].sum()
return pd.Series(sums, name='ALL')
def create_distribution_table(vdf, groupby, income_measure,
pop_quantiles=False, scaling=True):
"""
Get results from vdf, sort them by expanded_income based on groupby,
and return them as a table.
Parameters
----------
vdf : Pandas DataFrame including columns named in DIST_TABLE_COLUMNS list
for example, an object returned from the distribution_table_dataframe
function in the Calculator distribution_tables method
groupby : String object
options for input: 'weighted_deciles' or
'standard_income_bins' or 'soi_agi_bins'
determines how the rows in the resulting Pandas DataFrame are sorted
income_measure: String object
options for input: 'expanded_income' or 'expanded_income_baseline'
determines which variable is used to sort rows
pop_quantiles : boolean
specifies whether or not weighted_deciles contain an equal number
of people (True) or an equal number of filing units (False)
scaling : boolean
specifies whether or not table entry values are scaled
Returns
-------
distribution table as a Pandas DataFrame with DIST_TABLE_COLUMNS and
groupby rows.
NOTE: when groupby is 'weighted_deciles', the returned table has three
extra rows containing top-decile detail consisting of statistics
for the 0.90-0.95 quantile range (bottom half of top decile),
for the 0.95-0.99 quantile range, and
for the 0.99-1.00 quantile range (top one percent); and the
returned table splits the bottom decile into filing units with
negative (denoted by a 0-10n row label),
zero (denoted by a 0-10z row label), and
positive (denoted by a 0-10p row label) values of the
specified income_measure.
"""
# pylint: disable=too-many-statements,too-many-branches
# nested function that returns calculated column statistics as a DataFrame
def stat_dataframe(gdf):
"""
Returns calculated distribution table column statistics derived from
the specified grouped Dataframe object, gdf.
"""
unweighted_columns = ['count', 'count_StandardDed',
'count_ItemDed', 'count_AMT']
sdf = pd.DataFrame()
for col in DIST_TABLE_COLUMNS:
if col in unweighted_columns:
sdf[col] = gdf.apply(
unweighted_sum, col, include_groups=False
).values[:, 1]
else:
sdf[col] = gdf.apply(
weighted_sum, col, include_groups=False
).values[:, 1]
return sdf
# main logic of create_distribution_table
assert isinstance(vdf, pd.DataFrame)
assert groupby in ('weighted_deciles',
'standard_income_bins',
'soi_agi_bins')
assert income_measure in ('expanded_income', 'expanded_income_baseline')
assert income_measure in vdf
assert 'table_row' not in vdf
if pop_quantiles:
assert groupby == 'weighted_deciles'
# sort the data given specified groupby and income_measure
dframe = None
if groupby == 'weighted_deciles':
dframe = add_quantile_table_row_variable(vdf, income_measure, 10,
pop_quantiles=pop_quantiles,
decile_details=True)
elif groupby == 'standard_income_bins':
dframe = add_income_table_row_variable(vdf, income_measure,
STANDARD_INCOME_BINS)
elif groupby == 'soi_agi_bins':
dframe = add_income_table_row_variable(vdf, income_measure,
SOI_AGI_BINS)
# construct grouped DataFrame
gdf = dframe.groupby('table_row', observed=False, as_index=False)
dist_table = stat_dataframe(gdf)
del dframe['table_row']
# compute sum row
sum_row = get_sums(dist_table)[dist_table.columns]
# handle placement of sum_row in table
if groupby == 'weighted_deciles':
# compute top-decile row
lenindex = len(dist_table.index)
assert lenindex == 14 # rows should be indexed from 0 to 13
topdec_row = get_sums(dist_table[11:lenindex])[dist_table.columns]
# move top-decile detail rows to make room for topdec_row and sum_row
dist_table = dist_table.reindex(index=range(0, lenindex + 2))
dist_table.iloc[15] = dist_table.iloc[13]
dist_table.iloc[14] = dist_table.iloc[12]
dist_table.iloc[13] = dist_table.iloc[11]
dist_table.iloc[12] = sum_row
dist_table.iloc[11] = topdec_row
del topdec_row
else:
dist_table.loc["ALL"] = sum_row
del sum_row
# ensure dist_table columns are in correct order
assert dist_table.columns.values.tolist() == DIST_TABLE_COLUMNS
# add row names to table if using weighted_deciles or standard_income_bins
if groupby == 'weighted_deciles':
rownames = DECILE_ROW_NAMES
elif groupby == 'standard_income_bins':
rownames = STANDARD_ROW_NAMES
else:
rownames = None
if rownames:
assert len(dist_table.index) == len(rownames)
dist_table.index = rownames
del rownames
# delete intermediate Pandas DataFrame objects
del gdf
del dframe
# scale table elements
if scaling:
count_vars = ['count',
'count_StandardDed',
'count_ItemDed',
'count_AMT']
for col in dist_table.columns:
# if col in count_vars:
# dist_table[col] = np.round(dist_table[col] * 1e-6, 2)
# else:
# dist_table[col] = np.round(dist_table[col] * 1e-9, 3)
if col in count_vars:
dist_table[col] *= 1e-6
dist_table.round({col: 2})
else:
dist_table[col] *= 1e-9
dist_table.round({col: 3})
# return table as Pandas DataFrame
vdf.sort_index(inplace=True)
return dist_table
def create_difference_table(vdf1, vdf2, groupby, tax_to_diff,
pop_quantiles=False):
"""
Get results from two different vdf, construct tax difference results,
and return the difference statistics as a table.
Parameters
----------
vdf1 : Pandas DataFrame including columns named in DIFF_VARIABLES list
for example, object returned from a dataframe(DIFF_VARIABLES) call
on the basesline Calculator object
vdf2 : Pandas DataFrame including columns in the DIFF_VARIABLES list
for example, object returned from a dataframe(DIFF_VARIABLES) call
on the reform Calculator object
groupby : String object
options for input: 'weighted_deciles' or
'standard_income_bins' or 'soi_agi_bins'
determines how the rows in the resulting Pandas DataFrame are sorted
tax_to_diff : String object
options for input: 'iitax', 'payrolltax', 'combined'
specifies which tax to difference
pop_quantiles : boolean
specifies whether or not weighted_deciles contain an equal number
of people (True) or an equal number of filing units (False)
Returns
-------
difference table as a Pandas DataFrame with DIFF_TABLE_COLUMNS and
groupby rows.
NOTE: when groupby is 'weighted_deciles', the returned table has three
extra rows containing top-decile detail consisting of statistics
for the 0.90-0.95 quantile range (bottom half of top decile),
for the 0.95-0.99 quantile range, and
for the 0.99-1.00 quantile range (top one percent); and the
returned table splits the bottom decile into filing units with
negative (denoted by a 0-10n row label),
zero (denoted by a 0-10z row label), and
positive (denoted by a 0-10p row label) values of the
specified income_measure.
"""
# pylint: disable=too-many-statements,too-many-locals,too-many-branches
# nested function that creates dataframe containing additive statistics
def additive_stats_dataframe(gdf):
"""
Nested function that returns additive stats DataFrame derived from gdf
"""
def count_lt_zero(dframe, col_name, tolerance=-0.001):
"""
Return count sum of negative Pandas DataFrame col_name items.
"""
return dframe[dframe[col_name] < tolerance]['count'].sum()
def count_gt_zero(dframe, col_name, tolerance=0.001):
"""
Return count sum of positive Pandas DataFrame col_name items.
"""
return dframe[dframe[col_name] > tolerance]['count'].sum()
# start of additive_stats_dataframe code
sdf = pd.DataFrame()
sdf['count'] = gdf.apply(
unweighted_sum, 'count', include_groups=False
).values[:, 1]
sdf['tax_cut'] = gdf.apply(
count_lt_zero, 'tax_diff', include_groups=False
).values[:, 1]
sdf['tax_inc'] = gdf.apply(
count_gt_zero, 'tax_diff', include_groups=False
).values[:, 1]
sdf['tot_change'] = gdf.apply(
weighted_sum, 'tax_diff', include_groups=False
).values[:, 1]
sdf['ubi'] = gdf.apply(
weighted_sum, 'ubi', include_groups=False
).values[:, 1]
sdf['benefit_cost_total'] = gdf.apply(
weighted_sum, 'benefit_cost_total', include_groups=False
).values[:, 1]
sdf['benefit_value_total'] = gdf.apply(
weighted_sum, 'benefit_value_total', include_groups=False
).values[:, 1]
sdf['atinc1'] = gdf.apply(
weighted_sum, 'atinc1', include_groups=False
).values[:, 1]
sdf['atinc2'] = gdf.apply(
weighted_sum, 'atinc2', include_groups=False
).values[:, 1]
return sdf
# main logic of create_difference_table
assert groupby in ('weighted_deciles',
'standard_income_bins',
'soi_agi_bins')
if pop_quantiles:
assert groupby == 'weighted_deciles'
assert 'expanded_income' in vdf1
assert tax_to_diff in ('iitax', 'payrolltax', 'combined')
assert 'table_row' not in vdf1
assert 'table_row' not in vdf2
assert isinstance(vdf1, pd.DataFrame)
assert isinstance(vdf2, pd.DataFrame)
assert np.allclose(vdf1['XTOT'], vdf2['XTOT']) # check rows are the same
assert np.allclose(vdf1['s006'], vdf2['s006']) # units and in same order
baseline_expanded_income = 'expanded_income_baseline'
df2 = copy.deepcopy(vdf2)
df2[baseline_expanded_income] = vdf1['expanded_income']
df2['tax_diff'] = df2[tax_to_diff] - vdf1[tax_to_diff]
for col in ['ubi', 'benefit_cost_total', 'benefit_value_total']:
df2[col] = df2[col] - vdf1[col]
df2['atinc1'] = vdf1['aftertax_income']
df2['atinc2'] = vdf2['aftertax_income']
# specify count variable in df2
if pop_quantiles:
df2['count'] = np.multiply(df2['s006'], df2['XTOT'])
else:
df2['count'] = df2['s006']
# add table_row column to df2 given specified groupby and income_measure
dframe = None
if groupby == 'weighted_deciles':
dframe = add_quantile_table_row_variable(
df2, baseline_expanded_income, 10,
pop_quantiles=pop_quantiles, decile_details=True)
elif groupby == 'standard_income_bins':
dframe = add_income_table_row_variable(
df2, baseline_expanded_income, STANDARD_INCOME_BINS)
elif groupby == 'soi_agi_bins':
dframe = add_income_table_row_variable(
df2, baseline_expanded_income, SOI_AGI_BINS)
del df2
# create grouped Pandas DataFrame
gdf = dframe.groupby('table_row', as_index=False, observed=False)
# create additive difference table statistics from gdf
diff_table = additive_stats_dataframe(gdf)
# calculate additive statistics on sums row
sum_row = get_sums(diff_table)[diff_table.columns]
# handle placement of sum_row in table
if groupby == 'weighted_deciles':
# compute top-decile row
lenindex = len(diff_table.index)
assert lenindex == 14 # rows should be indexed from 0 to 13
topdec_row = get_sums(diff_table[11:lenindex])[diff_table.columns]
# move top-decile detail rows to make room for topdec_row and sum_row
diff_table = diff_table.reindex(index=range(0, lenindex + 2))
diff_table.iloc[15] = diff_table.iloc[13]
diff_table.iloc[14] = diff_table.iloc[12]
diff_table.iloc[13] = diff_table.iloc[11]
diff_table.iloc[12] = sum_row
diff_table.iloc[11] = topdec_row
del topdec_row
else:
diff_table.loc["ALL"] = sum_row
# delete intermediate Pandas DataFrame objects
del gdf
del dframe
# compute non-additive stats in each table cell
count = diff_table['count'].values
diff_table['perc_cut'] = np.divide(
100 * diff_table['tax_cut'].values, count,
out=np.zeros_like(diff_table['tax_cut'].values),
where=count > 0)
diff_table['perc_inc'] = np.divide(
100 * diff_table['tax_inc'].values, count,
out=np.zeros_like(diff_table['tax_inc'].values),
where=count > 0)
diff_table['mean'] = np.divide(
diff_table['tot_change'].values, count,
out=np.zeros_like(diff_table['tot_change'].values),
where=count > 0)
total_change = sum_row['tot_change']
diff_table['share_of_change'] = np.divide(
100 * diff_table['tot_change'].values, total_change,
out=np.zeros_like(diff_table['tot_change'].values),
where=total_change > 0)
quotient = np.divide(
diff_table['atinc2'].values, diff_table['atinc1'].values,
out=np.zeros_like(diff_table['atinc2'].values),
where=diff_table['atinc1'].values != 0)
diff_table['pc_aftertaxinc'] = np.where(
diff_table['atinc1'].values == 0., np.nan, 100 * (quotient - 1))
# delete intermediate Pandas DataFrame objects
del diff_table['atinc1']
del diff_table['atinc2']
del count
del sum_row
# put diff_table columns in correct order
diff_table = diff_table.reindex(columns=DIFF_TABLE_COLUMNS)
# add row names to table if using weighted_deciles or standard_income_bins
if groupby == 'weighted_deciles':
rownames = DECILE_ROW_NAMES
elif groupby == 'standard_income_bins':
rownames = STANDARD_ROW_NAMES
else:
rownames = None
if rownames:
assert len(diff_table.index) == len(rownames)
diff_table.index = rownames
del rownames
# scale table elements
count_vars = ['count', 'tax_cut', 'tax_inc']
scale_vars = ['tot_change', 'ubi',
'benefit_cost_total', 'benefit_value_total']
for col in diff_table.columns:
if col in count_vars:
diff_table[col] *= 1e-6
diff_table.round({col: 2})
elif col in scale_vars:
diff_table[col] *= 1e-9
diff_table.round({col: 3})
else:
diff_table.round({col: 1})
return diff_table
def create_diagnostic_table(dframe_list, year_list):
"""
Extract diagnostic table from list of Pandas DataFrame objects
returned from a Calculator dataframe(DIST_VARIABLES) call for
each year in the specified list of years.
Parameters
----------
dframe_list : list of Pandas DataFrame objects containing the variables
year_list : list of calendar years corresponding to the dframe_list
Returns
-------
Pandas DataFrame object containing the diagnostic table
"""
# pylint: disable=too-many-statements
def diagnostic_table_odict(vdf):
"""
Nested function that extracts diagnostic table dictionary from
the specified Pandas DataFrame object, vdf.
Parameters
----------
vdf : Pandas DataFrame object containing the variables
Returns
-------
ordered dictionary of variable names and aggregate weighted values
"""
# aggregate weighted values expressed in millions or billions
in_millions = 1.0e-6
in_billions = 1.0e-9
odict = collections.OrderedDict()
# total number of filing units
wghts = vdf['s006']
odict['Returns (#m)'] = round(wghts.sum() * in_millions, 2)
# adjusted gross income
agi = vdf['c00100']
odict['AGI ($b)'] = round((agi * wghts).sum() * in_billions, 3)
# number of itemizers
val = wghts[vdf['c04470'] > 0.].sum()
odict['Itemizers (#m)'] = round(val * in_millions, 2)
# itemized deduction
ided1 = vdf['c04470'] * wghts
val = ided1[vdf['c04470'] > 0.].sum()
odict['Itemized Deduction ($b)'] = round(val * in_billions, 3)
# number of standard deductions
val = wghts[vdf['standard'] > 0.].sum()
odict['Standard Deduction Filers (#m)'] = round(val * in_millions, 2)
# standard deduction
sded1 = vdf['standard'] * wghts
val = sded1[vdf['standard'] > 0.].sum()
odict['Standard Deduction ($b)'] = round(val * in_billions, 3)
# personal exemption
val = (vdf['c04600'] * wghts).sum()
odict['Personal Exemption ($b)'] = round(val * in_billions, 3)
# taxable income
val = (vdf['c04800'] * wghts).sum()
odict['Taxable Income ($b)'] = round(val * in_billions, 3)
# regular tax liability
val = (vdf['taxbc'] * wghts).sum()
odict['Regular Tax ($b)'] = round(val * in_billions, 3)
# AMT taxable income
val = (vdf['c62100'] * wghts).sum()
odict['AMT Income ($b)'] = round(val * in_billions, 3)
# total AMT liability
val = (vdf['c09600'] * wghts).sum()
odict['AMT Liability ($b)'] = round(val * in_billions, 3)
# number of people paying AMT
val = wghts[vdf['c09600'] > 0.].sum()
odict['AMT Filers (#m)'] = round(val * in_millions, 2)
# tax before credits
val = (vdf['c05800'] * wghts).sum()
odict['Tax before Credits ($b)'] = round(val * in_billions, 3)
# refundable credits
val = (vdf['refund'] * wghts).sum()
odict['Refundable Credits ($b)'] = round(val * in_billions, 3)
# nonrefundable credits
val = (vdf['c07100'] * wghts).sum()
odict['Nonrefundable Credits ($b)'] = round(val * in_billions, 3)
# reform surtaxes (part of federal individual income tax liability)
val = (vdf['surtax'] * wghts).sum()
odict['Reform Surtaxes ($b)'] = round(val * in_billions, 3)
# other taxes on Form 1040
val = (vdf['othertaxes'] * wghts).sum()
odict['Other Taxes ($b)'] = round(val * in_billions, 3)
# federal individual income tax liability
val = (vdf['iitax'] * wghts).sum()
odict['Ind Income Tax ($b)'] = round(val * in_billions, 3)
# OASDI+HI payroll tax liability (including employer share)
val = (vdf['payrolltax'] * wghts).sum()
odict['Payroll Taxes ($b)'] = round(val * in_billions, 3)
# combined income and payroll tax liability
val = (vdf['combined'] * wghts).sum()
odict['Combined Liability ($b)'] = round(val * in_billions, 3)
# number of tax units with non-positive income tax liability
val = (wghts[vdf['iitax'] <= 0]).sum()
odict['With Income Tax <= 0 (#m)'] = round(val * in_millions, 2)
# number of tax units with non-positive combined tax liability
val = (wghts[vdf['combined'] <= 0]).sum()
odict['With Combined Tax <= 0 (#m)'] = round(val * in_millions, 2)
# UBI benefits
val = (vdf['ubi'] * wghts).sum()
odict['UBI Benefits ($b)'] = round(val * in_billions, 3)
# Total consumption value of benefits
val = (vdf['benefit_value_total'] * wghts).sum()
odict['Total Benefits, Consumption Value ($b)'] = round(
val * in_billions, 3)
# Total dollar cost of benefits
val = (vdf['benefit_cost_total'] * wghts).sum()
odict['Total Benefits Cost ($b)'] = round(val * in_billions, 3)
return odict
# check function arguments
assert isinstance(dframe_list, list)
assert dframe_list
assert isinstance(year_list, list)
assert year_list
assert len(dframe_list) == len(year_list)
assert isinstance(year_list[0], int)
assert isinstance(dframe_list[0], pd.DataFrame)
# construct diagnostic table
tlist = []
for year, vardf in zip(year_list, dframe_list):
odict = diagnostic_table_odict(vardf)
ddf = pd.DataFrame(data=odict, index=[year], columns=odict.keys())
ddf = ddf.transpose()
tlist.append(ddf)
del odict
return pd.concat(tlist, axis=1)
def mtr_graph_data(vdf, year,
mars='ALL',
mtr_measure='combined',
mtr_variable='e00200p',
alt_e00200p_text='',
mtr_wrt_full_compen=False,
income_measure='expanded_income',
pop_quantiles=False,
dollar_weighting=False):
"""
Prepare marginal tax rate data needed by xtr_graph_plot utility function.
Parameters
----------
vdf : a Pandas DataFrame object containing variables and marginal tax rates
(See Calculator.mtr_graph method for required elements of vdf.)
year : integer
specifies calendar year of the data in vdf
mars : integer or string
specifies which filing status subgroup to show in the graph
- 'ALL': include all filing units in sample
- 1: include only single filing units
- 2: include only married-filing-jointly filing units
- 3: include only married-filing-separately filing units
- 4: include only head-of-household filing units
mtr_measure : string
specifies which marginal tax rate to show on graph's y axis
- 'itax': marginal individual income tax rate
- 'ptax': marginal payroll tax rate
- 'combined': sum of marginal income and payroll tax rates
mtr_variable : string
any string in the Calculator.VALID_MTR_VARS set
specifies variable to change in order to compute marginal tax rates
alt_e00200p_text : string
text to use in place of mtr_variable when mtr_variable is 'e00200p';
if empty string then use 'e00200p'
mtr_wrt_full_compen : boolean
see documentation of Calculator.mtr() argument wrt_full_compensation
(value has an effect only if mtr_variable is 'e00200p')
income_measure : string
specifies which income variable to show on the graph's x axis
- 'wages': wage and salary income (e00200)
- 'agi': adjusted gross income, AGI (c00100)
- 'expanded_income': sum of AGI, non-taxable interest income,
non-taxable social security benefits, and employer share of
FICA taxes.
pop_quantiles : boolean
specifies whether or not quantiles contain an equal number
of people (True) or an equal number of filing units (False)
dollar_weighting : boolean
False implies both income_measure percentiles on x axis and
mtr values for each percentile on the y axis are computed
without using dollar income_measure weights (just sampling weights);
True implies both income_measure percentiles on x axis and
mtr values for each percentile on the y axis are computed
using dollar income_measure weights (in addition to sampling weights).
Specifying True produces a graph x axis that shows income_measure
(not filing unit) percentiles.
Returns
-------
dictionary object suitable for passing to xtr_graph_plot utility function
"""
# pylint: disable=too-many-arguments,,too-many-positional-arguments
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
# check validity of function arguments
# . . check income_measure value
weighting_function = weighted_mean
if income_measure == 'wages':
income_var = 'e00200'
income_str = 'Wage'
if dollar_weighting:
weighting_function = wage_weighted
elif income_measure == 'agi':
income_var = 'c00100'
income_str = 'AGI'
if dollar_weighting:
weighting_function = agi_weighted
elif income_measure == 'expanded_income':
income_var = 'expanded_income'
income_str = 'Expanded-Income'
if dollar_weighting:
weighting_function = expanded_income_weighted
else:
msg = ('income_measure="{}" is neither '
'"wages", "agi", nor "expanded_income"')
raise ValueError(msg.format(income_measure))
# . . check mars value
if isinstance(mars, str):
if mars != 'ALL':
msg = 'string value of mars="{}" is not "ALL"'
raise ValueError(msg.format(mars))
elif isinstance(mars, int):
if mars < 1 or mars > 4:
msg = 'integer mars="{}" is not in [1,4] range'
raise ValueError(msg.format(mars))
else:
msg = 'mars="{}" is neither a string nor an integer'
raise ValueError(msg.format(mars))
# . . check mars value if mtr_variable is e00200s
if mtr_variable == 'e00200s' and mars != 2:
msg = 'mtr_variable == "e00200s" but mars != 2'
raise ValueError(msg)
# . . check mtr_measure value
if mtr_measure == 'itax':
mtr_str = 'Income-Tax'
elif mtr_measure == 'ptax':
mtr_str = 'Payroll-Tax'
elif mtr_measure == 'combined':
mtr_str = 'Income+Payroll-Tax'
else:
msg = ('mtr_measure="{}" is neither '
'"itax" nor "ptax" nor "combined"')
raise ValueError(msg.format(mtr_measure))
# . . check vdf
assert isinstance(vdf, pd.DataFrame)
# . . check pop_quantiles and dollar_weighting
if pop_quantiles:
assert not dollar_weighting
# create 'table_row' column given specified income_var and dollar_weighting
dfx = add_quantile_table_row_variable(
vdf, income_var, 100,
pop_quantiles=pop_quantiles,
weight_by_income_measure=dollar_weighting
)
# split dfx into groups specified by 'table_row' column
gdfx = dfx.groupby('table_row', observed=False, as_index=False)
# apply the weighting_function to percentile-grouped mtr values
mtr1_series = gdfx.apply(
weighting_function, 'mtr1', include_groups=False
).values[:, 1]
mtr2_series = gdfx.apply(
weighting_function, 'mtr2', include_groups=False
).values[:, 1]
# construct DataFrame containing the two mtr?_series
lines = pd.DataFrame()
lines['base'] = mtr1_series
lines['reform'] = mtr2_series
# construct dictionary containing merged data and auto-generated labels
data = {}
data['lines'] = lines
if dollar_weighting:
income_str = f'Dollar-weighted {income_str}'
mtr_str = f'Dollar-weighted {mtr_str}'
data['ylabel'] = f'{mtr_str} MTR'
xlabel_str = f'Baseline {income_str} Percentile'
if mars != 'ALL':
xlabel_str = f'{xlabel_str} for MARS={mars}'
data['xlabel'] = xlabel_str
var_str = f'{mtr_variable}'
if mtr_variable == 'e00200p' and alt_e00200p_text != '':
var_str = f'{alt_e00200p_text}'
if mtr_variable == 'e00200p' and mtr_wrt_full_compen:
var_str = f'{var_str} wrt full compensation'
title_str = f'Mean Marginal Tax Rate for {var_str} by Income Percentile'
if mars != 'ALL':
title_str = f'{title_str} for MARS={mars}'
title_str = f'{title_str} for {year}'
data['title'] = title_str
return data
def atr_graph_data(vdf, year,
mars='ALL',
atr_measure='combined',
pop_quantiles=False):
"""
Prepare average tax rate data needed by xtr_graph_plot utility function.
Parameters
----------
vdf : a Pandas DataFrame object containing variables and tax liabilities
(See Calculator.atr_graph method for required elements of vdf.)
year : integer
specifies calendar year of the data in vdf
mars : integer or string
specifies which filing status subgroup to show in the graph
- 'ALL': include all filing units in sample
- 1: include only single filing units
- 2: include only married-filing-jointly filing units
- 3: include only married-filing-separately filing units
- 4: include only head-of-household filing units
atr_measure : string
specifies which average tax rate to show on graph's y axis
- 'itax': average individual income tax rate
- 'ptax': average payroll tax rate
- 'combined': sum of average income and payroll tax rates
pop_quantiles : boolean
specifies whether or not quantiles contain an equal number
of people (True) or an equal number of filing units (False)
Returns