-
-
Notifications
You must be signed in to change notification settings - Fork 50
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Set data when building Linearmodel #249
Changes from 7 commits
baa445b
299dabc
56293c0
94ad096
28a6cb1
40abe19
f4bc623
d0f9704
101dae7
6850f4c
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -220,6 +220,68 @@ def test_sample_posterior_predictive(fitted_model_instance, combined): | |
assert np.issubdtype(pred[fitted_model_instance.output_var].dtype, np.floating) | ||
|
||
|
||
@pytest.mark.parametrize("extend_idata", [True, False]) | ||
def test_sample_prior_extend_idata_param(fitted_model_instance, extend_idata): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm concerned about mutating the Also can we parametrize the sample method to merge this and the posterior predictive tests. They seem to share most of the logic anyway. Otherwise I think it's more than ready There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I see what you're saying about modifying the I originally wrote the prior and posterior predictive tests in the same test, but there were so many |
||
output_var = fitted_model_instance.output_var | ||
idata_prev = fitted_model_instance.idata.prior_predictive[output_var] | ||
|
||
# Since coordinates are provided, the dimension must match | ||
n_pred = 100 # Must match toy_x | ||
x_pred = np.random.uniform(0, 1, n_pred) | ||
|
||
prediction_data = pd.DataFrame({"input": x_pred}) | ||
pred = fitted_model_instance.sample_prior_predictive( | ||
prediction_data["input"], combined=False, extend_idata=extend_idata | ||
) | ||
|
||
pred_unstacked = pred[output_var].values | ||
idata_now = fitted_model_instance.idata.prior_predictive[output_var].values | ||
|
||
if extend_idata: | ||
# After sampling, data in the model should be the same as the predictions | ||
np.testing.assert_array_equal(idata_now, pred_unstacked) | ||
# Data in the model should NOT be the same as before | ||
if idata_now.shape == idata_prev.values.shape: | ||
assert np.sum(np.abs(idata_now - idata_prev.values) < 1e-5) <= 2 | ||
else: | ||
# After sampling, data in the model should be the same as it was before | ||
np.testing.assert_array_equal(idata_now, idata_prev.values) | ||
# Data in the model should NOT be the same as the predictions | ||
if idata_now.shape == pred_unstacked.shape: | ||
assert np.sum(np.abs(idata_now - pred_unstacked) < 1e-5) <= 2 | ||
|
||
|
||
@pytest.mark.parametrize("extend_idata", [True, False]) | ||
def test_sample_posterior_extend_idata_param(fitted_model_instance, extend_idata): | ||
output_var = fitted_model_instance.output_var | ||
idata_prev = fitted_model_instance.idata.posterior_predictive[output_var] | ||
|
||
# Since coordinates are provided, the dimension must match | ||
n_pred = 100 # Must match toy_x | ||
x_pred = np.random.uniform(0, 1, n_pred) | ||
|
||
prediction_data = pd.DataFrame({"input": x_pred}) | ||
pred = fitted_model_instance.sample_posterior_predictive( | ||
prediction_data["input"], combined=False, extend_idata=extend_idata | ||
) | ||
|
||
pred_unstacked = pred[output_var].values | ||
idata_now = fitted_model_instance.idata.posterior_predictive[output_var].values | ||
|
||
if extend_idata: | ||
# After sampling, data in the model should be the same as the predictions | ||
np.testing.assert_array_equal(idata_now, pred_unstacked) | ||
# Data in the model should NOT be the same as before | ||
if idata_now.shape == idata_prev.values.shape: | ||
assert np.sum(np.abs(idata_now - idata_prev.values) < 1e-5) <= 2 | ||
else: | ||
# After sampling, data in the model should be the same as it was before | ||
np.testing.assert_array_equal(idata_now, idata_prev.values) | ||
# Data in the model should NOT be the same as the predictions | ||
if idata_now.shape == pred_unstacked.shape: | ||
assert np.sum(np.abs(idata_now - pred_unstacked) < 1e-5) <= 2 | ||
|
||
|
||
def test_model_config_formatting(): | ||
model_config = { | ||
"a": { | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Can we test that all these
join="right"
are doing the right thing (i.e., discarding the old value and replacing the new one), and thatextend_idata=False
is being respected?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It should be possible. I'll work on it and update the PR accordingly.