par(mfrow=c(1,2)) > barplot(summary(pca)$importance[3,],main="Cumulative Proportion of Variance",cex.main=0.8,cex.names=0.6) > barplot(summary(pca)$importance[2,],main="Proportion of Variance",cex.main=0.8,cex.names=0.6) > dev.off() > pc1.pc2.pc3=data.frame(PC1=pca$x[,1],PC2=pca$x[,2],PC3=pca$x[,3]) > mfit=Mclust(pc1.pc2.pc3,G=1:4) > COLOR=c(1:mfit$G) > png("fig2.png") > par(mfrow=c(2,2)) > plot(pc1.pc2.pc3[,c(1,2)],col=COLOR[mfit$classification]) > plot(pc1.pc2.pc3[,c(2,3)],col=COLOR[mfit$classification]) > plot(pc1.pc2.pc3[,c(1,3)],col=COLOR[mfit$classification]) > plot.new() > legend("center", legend=c("Cluster 1","Cluster 2","Cluster 3"),fill=COLOR) > dev.off() High dimensional continuous data (row: measurements, columns: variables) Principal component analysis on variables pre-processing Model based clustering processing"> par(mfrow=c(1,2)) > barplot(summary(pca)$importance[3,],main="Cumulative Proportion of Variance",cex.main=0.8,cex.names=0.6) > barplot(summary(pca)$importance[2,],main="Proportion of Variance",cex.main=0.8,cex.names=0.6) > dev.off() > pc1.pc2.pc3=data.frame(PC1=pca$x[,1],PC2=pca$x[,2],PC3=pca$x[,3]) > mfit=Mclust(pc1.pc2.pc3,G=1:4) > COLOR=c(1:mfit$G) > png("fig2.png") > par(mfrow=c(2,2)) > plot(pc1.pc2.pc3[,c(1,2)],col=COLOR[mfit$classification]) > plot(pc1.pc2.pc3[,c(2,3)],col=COLOR[mfit$classification]) > plot(pc1.pc2.pc3[,c(1,3)],col=COLOR[mfit$classification]) > plot.new() > legend("center", legend=c("Cluster 1","Cluster 2","Cluster 3"),fill=COLOR) > dev.off() High dimensional continuous data (row: measurements, columns: variables) Principal component analysis on variables pre-processing Model based clustering processing">

Presentation is loading. Please wait.

Presentation is loading. Please wait.

Unsupervised pattern discovery through segmentation Shoaib Amini Bioinformatician Contact:

Similar presentations


Presentation on theme: "Unsupervised pattern discovery through segmentation Shoaib Amini Bioinformatician Contact:"— Presentation transcript:

1 Unsupervised pattern discovery through segmentation Shoaib Amini Bioinformatician Contact: shoaibamini@gmail.com

2 High dimensional continuous data (row: measurements, columns: variables) Principal component analysis on variables Model based clustering First three principal components High dimensional discrete data (row: variables, columns: segments) Principal component analysis on segments Correlation analysis with the metadata High dimensional discrete data (row: variables, columns: segments) Reconstruct original data from the first K principal components Hierarchical clustering grouped segments (row: variables, columns: segments) Principal component analysis on segments Extract first K principal components (reduce technical variability) pre-processing processing post-processing integration prediction using (multiple) linear regression Artificial neural network (ANN) prediction

3 > var1 = rnorm(1000,mean=1) > var1[100:199] <- rnorm(100,mean=60,sd=1) > var1[500:599] <- rnorm(100,mean=10,sd=1) > var2 = rnorm(1000,mean=1) > var2[100:199] <- rnorm(100,mean=50,sd=1) > var2[500:599] <- rnorm(100,mean=20,sd=1) > var3 = rnorm(1000,mean=1) > var3[100:199] <- rnorm(100,mean=40,sd=1) > var3[500:599] <- rnorm(100,mean=30,sd=1) > var4 = rnorm(1000,mean=1) > var4[100:199] <- rnorm(100,mean=30,sd=1) > var4[500:599] <- rnorm(100,mean=40,sd=1) > var5 = rnorm(1000,mean=1) > var5[100:199] <- rnorm(100,mean=20,sd=1) > var5[500:599] <- rnorm(100,mean=50,sd=1) > var6 = rnorm(1000,mean=1) > var6[100:199] <- rnorm(100,mean=10,sd=1) > var6[500:599] <- rnorm(100,mean=60,sd=1) > metadata=c(rnorm(1,mean=10,sd=1),rnorm(1,mean=20,sd=1),rnorm(1,mean=30,sd=1),rnorm(1,mean=40,sd=1), rnorm(1,mean=50,sd=1),rnorm(1,mean=60,sd=1)) > all_var=cbind(var1,var2,var3,var4,var5,var6) > head(al_var) var1 var2 var3 var4 var5 var6 [1,] 0.03806658 0.3982997 2.675511570 0.9291286 0.7773115 0.97503318 [2,] 0.70747428 1.8644934 -0.231888568 2.4432613 0.9042833 0.07779383 [3,] 1.25878822 0.7313337 1.210849907 1.9844810 1.4122895 0.93523563 [4,] -0.15213189 1.5592865 1.253262944 1.2078608 1.5093718 0.60844136 [5,] 1.19578283 1.2349121 -0.004358039 1.1955679 0.8521005 0.01028496 [6,] 1.03012394 0.9740306 1.056100821 -0.1404599 1.1502263 -0.40404076 > pca <- prcomp(all_var, retx=TRUE, center=TRUE, scale=TRUE) > png("fig1.png") > par(mfrow=c(1,2)) > barplot(summary(pca)$importance[3,],main="Cumulative Proportion of Variance",cex.main=0.8,cex.names=0.6) > barplot(summary(pca)$importance[2,],main="Proportion of Variance",cex.main=0.8,cex.names=0.6) > dev.off() > pc1.pc2.pc3=data.frame(PC1=pca$x[,1],PC2=pca$x[,2],PC3=pca$x[,3]) > mfit=Mclust(pc1.pc2.pc3,G=1:4) > COLOR=c(1:mfit$G) > png("fig2.png") > par(mfrow=c(2,2)) > plot(pc1.pc2.pc3[,c(1,2)],col=COLOR[mfit$classification]) > plot(pc1.pc2.pc3[,c(2,3)],col=COLOR[mfit$classification]) > plot(pc1.pc2.pc3[,c(1,3)],col=COLOR[mfit$classification]) > plot.new() > legend("center", legend=c("Cluster 1","Cluster 2","Cluster 3"),fill=COLOR) > dev.off() High dimensional continuous data (row: measurements, columns: variables) Principal component analysis on variables pre-processing Model based clustering processing

4 > par(mar=c(5.1,4.1,0.1,2.1),mfrow=c(10,1)) > plot(var1,type='h',ylim=c(0,70),xlab="",xaxs="i", yaxs="i") > plot(var2,type='h',ylim=c(0,70),xlab="",xaxs="i", yaxs="i") > plot(var3,type='h',ylim=c(0,70),xlab="",xaxs="i", yaxs="i") > plot(var4,type='h',ylim=c(0,70),xlab="",xaxs="i", yaxs="i") > plot(var5,type='h',ylim=c(0,70),xlab="",xaxs="i", yaxs="i") > plot(var6,type='h',ylim=c(0,70),xlab="",xaxs="i", yaxs="i") > plot(pc1.pc2.pc3[,3],type='h',ylab="PC3",xlab="",xaxs="i", yaxs="i") > plot(pc1.pc2.pc3[,2],type='h',ylab="PC2",xlab="",xaxs="i", yaxs="i") > plot(pc1.pc2.pc3[,1],type='h',ylab="PC1",xlab="",xaxs="i", yaxs="i") > image(as.matrix(mfit$classification),axes=FALSE,col = c(1:mfit$G),xlab="measurments") > all_var_seg=cbind(var1,var2,var3,var4,var5,var6,pc1.pc2.pc3,states=mfit$classification) > all_var_seg_mean=rbind(sapply(all_var_seg[all_var_seg$states==1,][,c(1,2,3,4,5,6)],mean), sapply(all_var_seg[all_var_seg$states==2,][,c(1,2,3,4,5,6)],mean), sapply(all_var_seg[all_var_seg$states==3,][,c(1,2,3,4,5,6)],mean)) > head(all_var_seg_mean) var1 var2 var3 var4 var5 var6 [1,] 1.025863 0.9857527 0.998589 0.9448508 0.9804805 1.035885 [2,] 59.983324 50.0217464 39.964781 29.8815236 19.8794280 10.015864 [3,] 9.788259 20.0648438 30.169022 40.1060414 49.8751476 60.002879 > all_var_seg_mean_pca png("fig4.png") > par(mfrow=c(1,2)) > barplot(summary(all_var_seg_mean_pca)$importance[3,],main="Cumulative Proportion of Variance",,cex.main=0.8,cex.names=0.7) > barplot(summary(all_var_seg_mean_pca)$importance[2,],main="Proportion of Variance",cex.main=0.8,cex.names=0.7) dev.off() > png("fig5.png") > my_palette colnames(all_var_seg_mean_pca_recon)=c("Segment 1","Segment 2","Segment 3") > all_var_seg_mean_pca_recon=round(prcomp.recon(all_var_seg_mean_pca,pcs=c(1,2)),1) > heatmap.2(t(all_var_seg_mean_pca_recon), > main = "Heatmap", # heat map title > notecol="black", # change font color of cell labels to black > density.info="none", # turns off density plot inside color legend > trace="none", # turns off trace lines inside the heat map > margins =c(12,9), # widens margins around plot > col=my_palette, # use on color palette defined earlier > #breaks=col_breaks, # enable color transition at specified limits > #dendrogram="row", # only draw a row dendrogram > cexRow=0.9 ) dev.off() High dimensional discrete data (row: variables, columns: segments) Principal component analysis on segments Extract first K principal components (reduce technical variability) Reconstruct original data from the first K principal components post-processing Hierarchical clustering Visualization

5 Principal component analysis & Model based clustering Segment 1 Segment 2 Segment 3

6 metadata_all=cbind(metadata_num=metadata,metadata_cat) final_data=data.frame(all_var_seg_mean_pca_recon,metadata_num=metadata,metadata_cat=metadata_cat) png("fig6.png") corrplot(cor(metadata_all,all_var_seg_mean_pca_recon), method="color") dev.off() lm.fit_seg2=lm(Segment.2 ~ metadata_num, data=final_data) lm.predict_seg2=predict(lm.fit_seg2) summary(final_data$Segment.2) nnet.fit_seg2=nnet(Segment.2/68.50 ~ metadata_num, data=final_data,size=2) nnet.predict_seg2=predict(nnet.fit_seg2) lm.fit_seg3=lm(Segment.3 ~ metadata_cat, data=final_data) lm.predict_seg3=predict(lm.fit_seg3) summary(final_data$Segment.3) nnet.fit_seg3=nnet(Segment.3/60 ~ metadata_cat, data=final_data,size=2) nnet.predict_seg3=predict(nnet.fit_seg3) png("fig7.png") par(mfrow=c(2,2)) plot(final_data$Segment.3, lm.predict_seg3, xlab="Actual", main="Linear regression predictions vs actual",cex.main=0.9) plot(final_data$Segment.3, nnet.predict_seg3, xlab="Actual", main="neural network predictions vs actual",cex.main=0.9) plot(final_data$Segment.2, lm.predict_seg2, xlab="Actual", main="Linear regression predictiona vs actual",cex.main=0.9) plot(final_data$Segment.2, nnet.predict_seg2, xlab="Actual", main="neural network predictions vs actual",cex.main=0.9) dev.off() Correlation analysis with the metadata grouped segments (row: variables, columns: segments) Principal component analysis on segments integration prediction using (multiple) linear regression Artificial neural network (ANN) prediction

7 Correlation Analysis metadata vs segments


Download ppt "Unsupervised pattern discovery through segmentation Shoaib Amini Bioinformatician Contact:"

Similar presentations


Ads by Google