Skip to content

Commit

Permalink
Behnam: count glom added, phenocycler code added
Browse files Browse the repository at this point in the history
  • Loading branch information
behnam-yousefi committed Aug 28, 2024
1 parent d180805 commit 9f9736a
Show file tree
Hide file tree
Showing 6 changed files with 1,577 additions and 103 deletions.
4 changes: 2 additions & 2 deletions notebooks/03_image_registration/README.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Image registration
# H&E-DAPI registration

For each sample,
1. Cut the DAPI via ```seperate_samples_LEVEL1.ipynb``` and save TIFF and anndata objects.
Expand All @@ -8,7 +8,7 @@ For each sample,
As for visualization, we need to read the anndata for each sample, then attach the corresponding registared H&E image to it as in ```plot_example.ipynb```.


# Phenocycler Registration
# Phenocycler-DAPI Registration

1. Cut the DAPI via ```seperate_samples_LEVEL1_slide.ipynb```
2. Cut Phenocycler image ```Xenium Slide_Scan1.qptiff``` via Fiji.
Expand Down
303 changes: 303 additions & 0 deletions notebooks/03_image_registration/add_phenocycler_as_column.ipynb

Large diffs are not rendered by default.

51 changes: 37 additions & 14 deletions notebooks/03_image_registration/plot_example.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,10 @@
"cells": [
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 29,
"id": "41f32fd4-3393-49ef-839a-100649382c42",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"The autoreload extension is already loaded. To reload it, use:\n",
" %reload_ext autoreload\n"
]
}
],
"outputs": [],
"source": [
"import numpy as np\n",
"import pandas as pd\n",
Expand All @@ -31,7 +22,7 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 30,
"id": "b70c8c05-922d-46bb-af71-9d29b7398a7b",
"metadata": {},
"outputs": [],
Expand All @@ -42,7 +33,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 31,
"id": "cf619637-9ced-4975-9d9e-788e1bf91409",
"metadata": {},
"outputs": [],
Expand All @@ -54,7 +45,7 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 72,
"id": "56e014fb-93f0-40e6-be1f-44a2f980d00b",
"metadata": {},
"outputs": [],
Expand Down Expand Up @@ -142,6 +133,38 @@
"\n",
" adata.write_h5ad(save_dir + f'{sample}.h5ad')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "28107df7-9793-4e36-9354-083bc1332249",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "e38d6302-ed2f-47da-8eb5-0b6a954bf155",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "b321caa7-ab45-4d53-b1e4-8a63d96a9433",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "238fbd8c-e775-4233-b715-5e6bb9dc3ea5",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,18 +84,18 @@
},
{
"cell_type": "code",
"execution_count": 56,
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"he_dir = \"../Phenocycler/\"\n",
"dapi_dir = \"/data/projects/robin/robin_xenium_cgn_from_epyc/seperate_samples/imgs_LEVEL1_new/\"\n",
"moving_dapi_dir = \"data_level_1/phenocycler_croped/\"\n",
"fixed_dapi_dir = \"data_level_1/dapi/\"\n",
"\n",
"save_dir = \"data/registered_phenocycler/\"\n",
"save_dir = \"data_level_1/phenocycler_registered/\"\n",
"\n",
"sample = 'X1'\n",
"he_image = he_dir + sample + '.tif'\n",
"dapi_image = dapi_dir + sample + '.tiff'\n",
"moving_dapi = moving_dapi_dir + sample + '_dapi.tif'\n",
"fixed_dapi = fixed_dapi_dir + sample + '.tiff'\n",
"\n",
"# dapi_image = \"/projectbig/jupyternotebook/xenium_cgn/robin_xenium_cgn_from_epyc/seperate_samples/imgs1/slide1_X1_Cntrl_0.tiff\""
]
Expand All @@ -117,7 +117,7 @@
}
],
"source": [
"fixed_image_original = imread(dapi_image)\n",
"fixed_image_original = imread(fixed_dapi)\n",
"# fixed_image = rescale(fixed_image, 1, preserve_range=True)\n",
"# rescale to 0-255\n",
"# fixed_image = (fixed_image - fixed_image.min()) / (fixed_image.max() - fixed_image.min()) * 255\n",
Expand All @@ -142,7 +142,7 @@
}
],
"source": [
"moving_image_all = imread(he_image)\n",
"moving_image_all = imread(moving_dapi)\n",
"# moving_image = rescale(moving_image, 1, preserve_range=True)\n",
"# rescale to 0-255\n",
"# moving_image = (moving_image - moving_image.min()) / (moving_image.max() - moving_image.min()) * 255\n",
Expand Down Expand Up @@ -412,90 +412,25 @@
}
],
"source": [
"# reg = np.zeros_like(moving_image_original)\n",
"n_phenocycler_channel = 5\n",
"\n",
"for i in range(1): # Loop over the RGB channels\n",
" channel = moving_image_original\n",
"for i in range(n_phenocycler_channel): # phenocycler channel\n",
" \n",
" channel = moving_dapi_dir + sample + '_channel_{i}.tif'\n",
" warped_channel_1 = warp_via_vectorfield(channel, field1)\n",
" warped_channel_2 = warp_via_vectorfield(warped_channel_1, field2)\n",
" reg = warped_channel_2\n",
"reg.shape"
]
},
{
"cell_type": "code",
"execution_count": 93,
"metadata": {},
"outputs": [],
"source": [
"reg_image = save_dir + sample + '_reg.tiff'\n",
"imsave(reg_image, reg)"
" reg_image = save_dir + sample + '_reg_channel_{i}.tiff'\n",
" imsave(reg_image, reg)\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [],
"source": [
"# imsave(f'reg_x21_level_1.tiff', reg)"
]
},
{
"cell_type": "code",
"execution_count": 74,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"reg_image = save_dir + sample + '_dapi.tiff'\n",
"imsave(reg_image, fixed_image_original)"
]
},
{
"cell_type": "code",
"execution_count": 84,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(6939, 5252)"
]
},
"execution_count": 84,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"moving_image_original.shape"
]
},
{
"cell_type": "code",
"execution_count": 91,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([[0, 1, 0, ..., 0, 0, 0],\n",
" [0, 0, 0, ..., 0, 0, 0],\n",
" [1, 0, 0, ..., 0, 0, 0],\n",
" ...,\n",
" [1, 1, 1, ..., 1, 1, 1],\n",
" [0, 1, 1, ..., 1, 1, 1],\n",
" [1, 1, 1, ..., 1, 1, 1]], dtype=uint8)"
]
},
"execution_count": 91,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"moving_image_original"
]
"source": []
},
{
"cell_type": "markdown",
Expand Down
10 changes: 5 additions & 5 deletions notebooks/03_image_registration/seperate_samples.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -183,15 +183,15 @@
" #plt.clf()\n",
"\n",
" img = sub.uns[\"spatial\"][ID][\"images\"][f\"dapi_LEVEL{LEVEL}\"]\n",
" cur_coords = np.concatenate([xlim, ylim])\n",
" img_cropped = img[int(cur_coords[2]): int(np.ceil(cur_coords[3])),\\\n",
" int(cur_coords[0]): int(np.ceil(cur_coords[1]))]\n",
" cut_coords = np.concatenate([xlim, ylim])\n",
" img_cropped = img[int(cut_coords[2]): int(np.ceil(cut_coords[3])),\\\n",
" int(cut_coords[0]): int(np.ceil(cut_coords[1]))]\n",
"\n",
" sub.uns[\"spatial\"][ID][\"images\"][f\"dapi_LEVEL{LEVEL}\"+\"_cropped\"] = img_cropped\n",
" sub.uns[\"spatial\"][ID][\"scalefactors\"][\"tissue_dapi_LEVEL{LEVEL}_cropped_scalef\"] = 1\n",
"\n",
" sub.obsm[\"spatial\"][:,0] = sub.obsm[\"spatial\"][:,0]*scale_factor-int(cur_coords[0])\n",
" sub.obsm[\"spatial\"][:,1] = sub.obsm[\"spatial\"][:,1]*scale_factor-int(cur_coords[2])\n",
" sub.obsm[\"spatial\"][:,0] = sub.obsm[\"spatial\"][:,0]*scale_factor-int(cut_coords[0])\n",
" sub.obsm[\"spatial\"][:,1] = sub.obsm[\"spatial\"][:,1]*scale_factor-int(cut_coords[2])\n",
"\n",
" keys = list(sub.uns[\"spatial\"].keys())\n",
" newdict = deepcopy(sub.uns[\"spatial\"])\n",
Expand Down
Loading

0 comments on commit 9f9736a

Please sign in to comment.