## The basic files and libraries needed for most presentations# creates the libraries and common-functions sections
read_chunk("../common/utility_functions.R")
+/- r Code
require(ggplot2) #for plotsrequire(lattice) # nicer scatter plotsrequire(plyr) # for processing data.framesrequire(grid) # contains the arrow functionrequire(doMC) # for parallel coderequire(png) # for reading png imagesrequire(gridExtra)
require(reshape2) # for the melt function#if (!require("biOps")) {# # for basic image processing# devtools::install_github("cran/biOps")# library("biOps")#}## To install EBImageif (!require("EBImage")) { # for more image processingsource("http://bioconductor.org/biocLite.R")
biocLite("EBImage")
}
used.libraries<-c("ggplot2","lattice","plyr","reshape2","grid","gridExtra","biOps","png","EBImage")
+/- r Code
# start parallel environment
registerDoMC()
# functions for converting images back and forth
im.to.df<-function(in.img,out.col="val") {
out.im<-expand.grid(x=1:nrow(in.img),y=1:ncol(in.img))
out.im[,out.col]<-as.vector(in.img)
out.im
}
df.to.im<-function(in.df,val.col="val",inv=F) {
in.vals<-in.df[[val.col]]
if(class(in.vals[1])=="logical") in.vals<-as.integer(in.vals*255)
if(inv) in.vals<-255-in.vals
out.mat<-matrix(in.vals,nrow=length(unique(in.df$x)),byrow=F)
attr(out.mat,"type")<-"grey"
out.mat
}
ddply.cutcols<-function(...,cols=1) {
# run standard ddply command
cur.table<-ddply(...)
cutlabel.fixer<-function(oVal) {
sapply(oVal,function(x) {
cnv<-as.character(x)
mean(as.numeric(strsplit(substr(cnv,2,nchar(cnv)-1),",")[[1]]))
})
}
cutname.fixer<-function(c.str) {
s.str<-strsplit(c.str,"(",fixed=T)[[1]]
t.str<-strsplit(paste(s.str[c(2:length(s.str))],collapse="("),",")[[1]]
paste(t.str[c(1:length(t.str)-1)],collapse=",")
}
for(i in c(1:cols)) {
cur.table[,i]<-cutlabel.fixer(cur.table[,i])
names(cur.table)[i]<-cutname.fixer(names(cur.table)[i])
}
cur.table
}
show.pngs.as.grid<-function(file.list,title.fun,zoom=1) {
preparePng<-function(x) rasterGrob(readPNG(x,native=T,info=T),width=unit(zoom,"npc"),interp=F)
labelPng<-function(x,title="junk") (qplot(1:300, 1:300, geom="blank",xlab=NULL,ylab=NULL,asp=1)+
annotation_custom(preparePng(x))+
labs(title=title)+theme_bw(24)+
theme(axis.text.x = element_blank(),
axis.text.y = element_blank()))
imgList<-llply(file.list,function(x) labelPng(x,title.fun(x)) )
do.call(grid.arrange,imgList)
}
## Standard image processing tools which I use for visualizing the examples in the script
commean.fun<-function(in.df) {
ddply(in.df,.(val), function(c.cell) {
weight.sum<-sum(c.cell$weight)
data.frame(xv=mean(c.cell$x),
yv=mean(c.cell$y),
xm=with(c.cell,sum(x*weight)/weight.sum),
ym=with(c.cell,sum(y*weight)/weight.sum)
)
})
}
colMeans.df<-function(x,...) as.data.frame(t(colMeans(x,...)))
pca.fun<-function(in.df) {
ddply(in.df,.(val), function(c.cell) {
c.cell.cov<-cov(c.cell[,c("x","y")])
c.cell.eigen<-eigen(c.cell.cov)
c.cell.mean<-colMeans.df(c.cell[,c("x","y")])
out.df<-cbind(c.cell.mean,
data.frame(vx=c.cell.eigen$vectors[1,],
vy=c.cell.eigen$vectors[2,],
vw=sqrt(c.cell.eigen$values),
th.off=atan2(c.cell.eigen$vectors[2,],c.cell.eigen$vectors[1,]))
)
})
}
vec.to.ellipse<-function(pca.df) {
ddply(pca.df,.(val),function(cur.pca) {
# assume there are two vectors now
create.ellipse.points(x.off=cur.pca[1,"x"],y.off=cur.pca[1,"y"],
b=sqrt(5)*cur.pca[1,"vw"],a=sqrt(5)*cur.pca[2,"vw"],
th.off=pi/2-atan2(cur.pca[1,"vy"],cur.pca[1,"vx"]),
x.cent=cur.pca[1,"x"],y.cent=cur.pca[1,"y"])
})
}
# test function for ellipse generation# ggplot(ldply(seq(-pi,pi,length.out=100),function(th) create.ellipse.points(a=1,b=2,th.off=th,th.val=th)),aes(x=x,y=y))+geom_path()+facet_wrap(~th.val)+coord_equal()
create.ellipse.points<-function(x.off=0,y.off=0,a=1,b=NULL,th.off=0,th.max=2*pi,pts=36,...) {
if (is.null(b)) b<-a
th<-seq(0,th.max,length.out=pts)
data.frame(x=a*cos(th.off)*cos(th)+b*sin(th.off)*sin(th)+x.off,
y=-1*a*sin(th.off)*cos(th)+b*cos(th.off)*sin(th)+y.off,
id=as.factor(paste(x.off,y.off,a,b,th.off,pts,sep=":")),...)
}
deform.ellipse.draw<-function(c.box) {
create.ellipse.points(x.off=c.box$x[1],
y.off=c.box$y[1],
a=c.box$a[1],
b=c.box$b[1],
th.off=c.box$th[1],
col=c.box$col[1])
}
bbox.fun<-function(in.df) {
ddply(in.df,.(val), function(c.cell) {
c.cell.mean<-colMeans.df(c.cell[,c("x","y")])
xmn<-emin(c.cell$x)
xmx<-emax(c.cell$x)
ymn<-emin(c.cell$y)
ymx<-emax(c.cell$y)
out.df<-cbind(c.cell.mean,
data.frame(xi=c(xmn,xmn,xmx,xmx,xmn),
yi=c(ymn,ymx,ymx,ymn,ymn),
xw=xmx-xmn,
yw=ymx-ymn
))
})
}
# since the edge of the pixel is 0.5 away from the middle of the pixel
emin<-function(...) min(...)-0.5
emax<-function(...) max(...)+0.5
extents.fun<-function(in.df) {
ddply(in.df,.(val), function(c.cell) {
c.cell.mean<-colMeans.df(c.cell[,c("x","y")])
out.df<-cbind(c.cell.mean,data.frame(xmin=c(c.cell.mean$x,emin(c.cell$x)),
xmax=c(c.cell.mean$x,emax(c.cell$x)),
ymin=c(emin(c.cell$y),c.cell.mean$y),
ymax=c(emax(c.cell$y),c.cell.mean$y)))
})
}
common.image.path<-"../common"
qbi.file<-function(file.name) file.path(common.image.path,"figures",file.name)
qbi.data<-function(file.name) file.path(common.image.path,"data",file.name)
th_fillmap.fn<-function(max.val) scale_fill_gradientn(colours=rainbow(10),limits=c(0,max.val))
Quantitative Big Imaging
author: Kevin Mader
date: 30 April 2015
width: 1440
height: 900
css: ../common/template.css
transition: rotate
ETHZ: 227-0966-00L
Dynamic Experiments
Course Outline
+/- r Code
source('../common/schedule.R')
19th February - Introduction and Workflows
26th February - Image Enhancement (A. Kaestner)
5th March - Basic Segmentation, Discrete Binary Structures
12th March - Advanced Segmentation
19th March - Applying Graphical Models and Machine Learning (A. Lucchi)
26th March - Analyzing Single Objects
2nd April - Analyzing Complex Objects
16th April - Groups and Spatial Distribution
23rd April - Statistics and Reproducibility
30th April - Dynamic Experiments
7th May - Scaling Up / Big Data
21th May - Guest Lecture, Applications in High-content Screening and Wood
John C. Russ, “The Image Processing Handbook”,(Boca Raton, CRC Press)
Available online within domain ethz.ch (or proxy.ethz.ch / public VPN)
Papers / Sites
Comparsion of Tracking Methods in Biology
Chenouard, N., Smal, I., de Chaumont, F., Maška, M., Sbalzarini, I. F., Gong, Y., … Meijering, E. (2014). Objective comparison of particle tracking methods. Nature Methods, 11(3), 281–289. doi:10.1038/nmeth.2808
Maska, M., Ulman, V., Svoboda, D., Matula, P., Matula, P., Ederra, C., … Ortiz-de-Solorzano, C. (2014). A benchmark for comparison of cell tracking algorithms. Bioinformatics (Oxford, England), btu080–. doi:10.1093/bioinformatics/btu080
Multiple Hypothesis Testing
Coraluppi, S. & Carthel, C. Multi-stage multiple-hypothesis tracking.
J. Adv. Inf. Fusion 6, 57–67 (2011).
Chenouard, N., Bloch, I. & Olivo-Marin, J.-C. Multiple hypothesis
tracking in microscopy images. in Proc. IEEE Int. Symp. Biomed. Imaging
1346–1349 (IEEE, 2009).
Previously on QBI ...
Image Enhancment
Highlighting the contrast of interest in images
Minimizing Noise
Understanding image histograms
Automatic Methods
Component Labeling
Single Shape Analysis
Complicated Shapes
Distribution Analysis
Quantitative "Big" Imaging
The course has covered imaging enough and there have been a few quantitative metrics, but "big" has not really entered.
What does big mean?
Not just / even large
it means being ready for big data
volume, velocity, variety (3 V's)
scalable, fast, easy to customize
So what is "big" imaging
doing analyses in a disciplined manner
fixed steps
easy to regenerate results
no magic
having everything automated
100 samples is as easy as 1 sample
being able to adapt and reuse analyses
one really well working script and modify parameters
different types of cells
different regions
Objectives
What sort of dynamic experiments do we have?
How can we design good dynamic experiments?
How can we track objects between points?
How can we track shape?
How can we track distribution?
How can we track topology?
How can we track voxels?
How can we assess deformation and strain?
How can assess more general cases?
Outline
Motivation (Why and How?)
Scientific Goals
Experiments
Simulations
Experiment Design
Object Tracking
Distribution
Topology
Voxel-based Methods
Cross Correlation
DIC
DIC + Physics
General Problems
Thickness - Lung Tissue
Curvature - Metal Systems
Two Point Correlation - Volcanic Rock
Motivation
3D images are already difficult to interpret on their own
3D movies (4D) are almost impossible
2D movies (3D) can also be challenging
We can say that it looks like, but many pieces of quantitative information are difficult to extract
how fast is it going?
how many particles are present?
are their sizes constant?
are some moving faster?
are they rearranging?
Scientific Goals
Rheology
Understanding the flow of liquids and mixtures is important for many processes
blood movement in arteries, veins, and capillaries
oil movement through porous rock
air through dough when cooking bread
magma and gas in a volcano
Deformation
Deformation is similarly important since it plays a significant role in the following scenarios
red blood cell lysis in artificial heart valves
microfractures growing into stress fractures in bone
toughening in certain wood types
Experiments
The first step of any of these analyses is proper experimental design. Since there is always
a limited field of view
a voxel size
a maximum rate of measurements
a non-zero cost for each measurement
There are always trade-offs to be made between getting the best possible high-resolution nanoscale dynamics and capturing the system level behavior.
If we measure too fast
sample damage
miss out on long term changes
have noisy data
Too slow
miss small, rapid changes
blurring and other motion artifacts
Too high resolution
not enough unique structures in field of view to track
Too low resolution
not sensitive to small changes
Simulation
In many cases, experimental data is inherited and little can be done about the design, but when there is still the opportunity, simulations provide a powerful tool for tuning and balancing a large number parameters
Simulations also provide the ability to pair post-processing to the experiments and determine the limits of tracking.
What do we start with?
Going back to our original cell image
We have been able to get rid of the noise in the image and find all the cells (lecture 2-4)
We have analyzed the shape of the cells using the shape tensor (lecture 5)
We even separated cells joined together using Watershed (lecture 6)
We have created even more metrics characterizing the distribution (lecture 7)
We have at least a few samples (or different regions), large number of metrics and an almost as large number of parameters to tune
How do we do something meaningful with it?
Basic Simulation
We start with a starting image
+/- r Code
# Fill Image code# ... is for extra columns in the data set
fill.img.fn<-function(in.img,step.size=1,...) {
xr<-range(in.img$x)
yr<-range(in.img$y)
ddply(expand.grid(x=seq(xr[1],xr[2],step.size),
y=seq(yr[1],yr[2],step.size)),
.(x,y),
function(c.pos) {
ix<-c.pos$x[1]
iy<-c.pos$y[1]
nset<-subset(in.img,x==ix & y==iy)
if(nrow(nset)<1) nset<-data.frame(x=ix,y=iy,val=0,...)
nset
})
}
make.spheres<-function(sph.list,base.gr=seq(-1,1,length.out=40)) {
start.image<-expand.grid(x=base.gr,y=base.gr)
start.image$val<-c(0)
for(i in1:nrow(sph.list)) {
start.image$val<-with(start.image,
val + (
((x-sph.list[i,"x"])^2+(y-sph.list[i,"y"])^2)<
sph.list[i,"r"]^2)
)
}
start.image$phase<-with(start.image,ifelse(val>0,TRUE,FALSE))
start.image
}
rand.list<-function(n.pts,r=0.15,min=-1,max=1) data.frame(x=runif(n.pts,min=-1,max=1),y=runif(n.pts,min=-1,max=1),r=r)
grid.list<-function(n.pts,r=0.15,min=-1,max=1) cbind(expand.grid(x=seq(min,max,length.out=n.pts),y=seq(min,max,length.out=n.pts)),r=r)
A number of sphere objects with the same radius scattered evenly across the field of view
Even perfect spherical objects do not move in a straight line. The jitter can be seen as a stochastic variable with a random magnitude (a) and angle (b). This is then sampled at every point in the field
The simulation can be represented in a more clear fashion by using single lines to represent each spheroid
+/- r Code
ggplot(many.grids,aes(x,y,))+
geom_path(aes(color=id,group=id))+
coord_equal()+
labs(title="Different Paths in Linear Jittered Flow Image")+
scale_color_gradientn(colours=rainbow(10))+
theme_bw(20)
Limits of Tracking
We see that visually tracking samples can be difficult and there are a number of parameters which affect the ability for us to clearly see the tracking.
flow rate
flow type
density
appearance and disappearance rate
jitter
particle uniqueness
We thus try to quantify the limits of these parameters for different tracking methods in order to design experiments better.
While there exist a number of different methods and complicated approaches for tracking, for experimental design it is best to start with the simplist, easiest understood method. The limits of this can be found and components added as needed until it is possible to realize the experiment
If a dataset can only be analyzed with a multiple-hypothesis testing neural network model then it might not be so reliable
We then return to nearest neighbor which means we track a point (→P0) from an image (I0) at t0 to a point (→P1) in image (I1) at t1 by
→P1=argmin(||→P0−→y||∀→y∈I1)
Scoring Tracking
In the following examples we will use simple metrics for scoring fits where the objects are matched and the number of misses is counted.
There are a number of more sensitive scoring metrics which can be used, by finding the best submatch for a given particle since the number of matches and particles does not always correspond. See the papers at the beginning for more information
Before any meaningful tracking tasks can be performed, the first step is to register the measurements so they are all on the same coordinate system.
Often the registration can be done along with the tracking by separating the movement into actual sample movement and other (camera, setup, etc) if the motion of either the sample or the other components can be well modeled.
Quantifying Tracking Rate
We can then quantify the success rate of each algorithm on the data set using the very simple match and mismatch metrics
n.iters<-20
registerDoMC(8) # divide the jobs better
jitter.vals<-seq(0,2,length.out=4)
irseq<-function(a,b,length.out) {1/seq(1/b^(1/3),1/a^(1/3),length.out=length.out)^3} # seq for inverted numbers
obj.count<-irseq(25,2500,length.out=3)
jit.bub<-merge(obj.count,jitter.vals)
jd.vals<-mapply(list,rep(jit.bub[,1],n.iters),rep(jit.bub[,2],n.iters),SIMPLIFY=F)
jd.full<-ldply(jd.vals,.parallel=T,
function(c.in) jd.gen.fun(c.in[[2]],c.in[[1]]))
jd.summary<-ddply(jd.full,.(jitter,mean_obj_spacing,Matching),function(c.subset) {
data.frame(obj.count=c.subset$obj.count[1],
Matched=100*sum(c.subset$Obj.Matched)/(sum(c.subset$Obj.Missed)+sum(c.subset$Obj.Matched)),
obj.matched=sum(c.subset$Obj.Matched),
obj.found=100*with(c.subset,sum(Obj.Matched)/(n.iters*(n.frames-1)*obj.count[1])))
})
+/- r Code
ggplot(jd.summary,aes(x=100*jitter,y=Matched,color=as.factor(round(100*mean_obj_spacing))))+
geom_line()+geom_point()+facet_grid(~Matching)+
theme_bw(24)+labs(x="Position Jitter (% of Velocity)",y="% of Obj Matched",color="Obj.Spacing\n(% of Velocity)")
+/- r Code
ggplot(jd.summary,aes(x=100*jitter,y=100*mean_obj_spacing,fill=Matched))+
geom_tile()+facet_grid(~Matching)+
labs(x="Position Jitter (% of Velocity)",fill="% of Obj Matched",y="Obj.Spacing (% of Velocity)")+
theme_bw(10)
Designing Experiments
How does this help us to design experiments?
density can be changed by adjusting the concentration of the substances being examined or the field of view
flow per frame (image velocity) can usually be adjusted by changing pressure or acquisition time
jitter can be estimated from images
How much is enough?
difficult to create one number for every experiment
5% error in bubble position
→ <5% in flow field
→ >20% error in topology
5% error in shape or volume
→ 5% in distribution or changes
→ >5% in individual bubble changes
→ >15% for single bubble strain tensor calculations
Extending Nearest Neighbor
Bijective Requirement
We define →Pf as the result of performing the nearest neigbhor tracking on →P0
→Pf=argmin(||→P0−→y||∀→y∈I1)
We define →Pi as the result of performing the nearest neigbhor tracking on →Pf
→Pi=argmin(||→Pf−→y||∀→y∈I0)
We say the tracking is bijective if these two points are the same
Can then be calculated in an iterative fashion where the offset is the average from all of the →P1−→P0 vectors. It can also be performed
→P1=argmin(||→P0+→voffset−→y||∀→y∈I1)
Beyond Nearest Neighbor
While nearest neighbor provides a useful starting tool it is not sufficient for truly complicated flows and datasets.
Better Approaches
Multiple Hypothesis Testing
Nearest neighbor just compares the points between two frames and there is much more information available in most time-resolved datasets. This approach allows for multiple possible paths to be explored at the same time and the best chosen only after all frames have been examined
Shortcomings
Merging and Splitting Particles
The simplicity of the nearest neighbor model does really allow for particles to merge and split (relaxing the bijective requirement allows such behavior, but the method is still not suited for such tracking). For such systems a more specific, physically-based is required to encapsulate this behavior.
Voxel-based Approaches
For voxel-based approachs the most common analyses are digital image correlation (or for 3D images digital volume correlation), where the correlation is calculated between two images or volumes.
Standard Image Correlation
Given images I0(→x) and I1(→x) at time t0 and t1 respectively. The correlation between these two images can be calculated
CI0,I1(→r)=⟨I0(→x)I1(→x+→r)⟩
+/- r Code
# so everything is on an integer lattice (interpolation makes everything messier)
fix.grid<-function(in.grid) {
cols.nopos<-!(names(in.grid) %in% c("x","y"))
out.grid<-in.grid[,cols.nopos]
out.grid$x<-as.numeric(as.factor(in.grid$x))
out.grid$y<-as.numeric(as.factor(in.grid$y))
out.grid
}
start.grid<-grid.list(5)
start.img<-fix.grid(make.spheres(start.grid,base.gr=seq(-1,1,length.out=40)))
final.grid<-data.frame(x=with(start.grid,x),y=with(start.grid,y+0.1),r=start.grid$r)
final.img<-fix.grid(make.spheres(final.grid,base.gr=seq(-1,1,length.out=40)))
ggplot()+
geom_raster(data=subset(start.img,phase),aes(x,y,fill="0"),alpha=0.75)+
geom_raster(data=subset(final.img,phase),aes(x,y,fill="1"),alpha=0.75)+
coord_equal()+
labs(fill="time")+
theme_bw(20)
+/- r Code
#' Calculate the cross correlation #' @author Kevin Mader (kevin.mader@gmail.com)#' Generates flow with given object count, frame count and randomness#' the box and crop are introduced to allow for objects entering and #' leaving the field of view#'#' @param img.a is the starting or I_0 image#' @param img.b is the destination or I_1 image#' @param tr.x is the function transforming the x coordinate#' @param #'
cc.imfun<-function(img.a,img.b,tr.x=function(x,y) x,tr.y=function(x,y) y) {
# get the positions in image a
x.vals<-unique(img.a$x)
y.vals<-unique(img.a$y)
# transform image b
tr.img.b<-img.b
# round is used to put everything back on an integer lattice
tr.img.b$x<-round(with(img.b,tr.x(x,y)))
tr.img.b$y<-round(with(img.b,tr.y(x,y)))
# count the overlapping pixels in the window to normalize
tr.img.b<-subset(tr.img.b,
((x %in% x.vals) & (y %in% y.vals))
)
norm.f<-nrow(tr.img.b)
if(norm.f<1) norm.f<-1# keep only the in phase objects
tr.img.a<-subset(img.a,phase)
tr.img.b<-subset(tr.img.b,phase)
if (nrow(tr.img.a)>0 & nrow(tr.img.b)>0) {
matches<-ddply(rbind(cbind(tr.img.a,label="A"),cbind(tr.img.b,label="B")),.(x,y),
function(c.pos) {
if(nrow(c.pos)>1) data.frame(e.val=1)
else data.frame(e.val=c())
})
data.frame(e.val=sum(matches$e.val)/norm.f,count=nrow(matches),size=norm.f)
} else {
data.frame(e.val=0,count=0,size=norm.f)
}
}
ggplot(cc.img,aes(vx,vy,fill=e.val))+
geom_raster()+geom_density2d(data=subset(cc.img,e.val>0),aes(weight=e.val^2))+
labs(x="u",y="v",fill="Correlation",title="Correlation vs r")+
scale_fill_gradient2(high="red")+
theme_bw(25)
Extending Correlation
The correlation function can be extended by adding rotation and scaling terms to the offset making the tool more flexible but also more computationally expensive for large search spaces.
#' Calculate the cross correlation #' @author Kevin Mader (kevin.mader@gmail.com)#' Generates flow with given object count, frame count and randomness#' the box and crop are introduced to allow for objects entering and #' leaving the field of view#'#' @param bulk.img the image passed from the blockify command#' @param f.img is the template image to compare against (since we want the whole image not just a region)#' @param nsize is the size of the region to search#' @param nstep is the step to use
block.corr.fun<-function(bulk.img,s.img,nsize=6,nstep=2) {
cc.points<-expand.grid(vx=seq(-nsize,nsize,nstep),vy=seq(-nsize,nsize,nstep))
ddply(cc.points,.(vx,vy),function(c.pt) {
tr.x<-function(x,y) (x+c.pt[1,"vx"])
tr.y<-function(x,y) (y+c.pt[1,"vy"])
cc.imfun(s.img,bulk.img,tr.x=tr.x,tr.y=tr.y)
})
}
w.bcf<-function(s.img,nsize=7,nstep=2) function(cur.block) block.corr.fun(cur.block,s.img,nsize=nsize,nstep=nstep)
max.corr.val<-function(in.blocks) ddply(in.blocks,.(x.center,y.center),function(in.block) {
subset(in.block,e.val>=max(in.block$e.val) & count>0)
})
DIC or DVC by themselves include no sanity check for realistic offsets in the correlation itself. The method can, however be integrated with physical models to find a more optimal solutions.
information from surrounding points
smoothness criteria
maximum deformation / force
material properties
Ccost=CI0,I1(→r)⏟Correlation Term+λ||→r||⏟deformation term
Distribution Metrics
As we covered before distribution metrics like the distribution tensor can be used for tracking changes inside a sample. Of these the most relevant is the texture tensor from cellular materials and liquid foam. The texture tensor is the same as the distribution tensor except that the edges (or faces) represent physically connected / touching objects rather than touching Voronoi faces (or conversely Delaunay triangles).
These metrics can also be used for tracking the behavior of a system without tracking the single points since most deformations of a system also deform the distribution tensor and can thus be extracted by comparing the distribution tensor at different time steps.
Quantifying Deformation: Strain
We can take any of these approaches and quantify the deformation using a tool called the strain tensor. Strain is defined in mechanics for the simple 1D case as the change in the length against the change in the original length.
e=ΔLL
While this defines the 1D case well, it is difficult to apply such metrics to voxel, shape, and tensor data.
Strain Tensor
There are a number of different ways to calculate strain and the strain tensor, but the most applicable for general image based applications is called the infinitesimal strain tensor, because the element matches well to square pixels and cubic voxels.
Data provided by Mattia Pistone and Julie Fife
The air phase changes from small very anisotropic bubbles to one large connected pore network. The same tools cannot be used to quantify those systems. Furthermore there are motion artifacts which are difficult to correct.
We can utilize the two point correlation function of the material to characterize the shape generically for each time step and then compare.