多分辨率风场数据可视化

/************中心环形矢量场*马鞍矢量场*****卷积白噪声纹理***********/

#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <malloc.h>
#include <cv.h>
#include <highgui.h>
#include <iostream>
#include "netcdfcpp.h"
using namespace std;

#define  SQUARE_FLOW_FIELD_SZ	400
#define	 DISCRETE_FILTER_SIZE	2048        //离散的滤波尺寸
#define  LOWPASS_FILTR_LENGTH	10.00000f	//低通滤波长度
#define	 LINE_SQUARE_CLIP_MAX	100000.0f	//线性平方夹
#define	 VECTOR_COMPONENT_MIN   0.050000f //矢量分量最小值


void     SyntheszSaddle(int  n_xres,  int     n_yres,  float*   pVectr);
void	 NormalizVectrs(int  n_xres,  int     n_yres,  float*   pVectr,float* vecSize);
void     GenBoxFiltrLUT(int  LUTsiz,  float*  p_LUT0,  float*   p_LUT1);
void     MakeWhiteNoise(int  n_xres,  int     n_yres,  unsigned char*  pNoise);
void	 FlowImagingLIC(int  n_xres,  int     n_yres,  float*   pVectr,   unsigned char*  pNoise,  
	unsigned char*  pImage,  float*  p_LUT0,  float*  p_LUT1,  float  krnlen);
void 	 WriteImage2PPM(int  n_xres,  int     n_yres,  unsigned char*  pImage,char*  f_name);
void  color(int n_xres, int n_yres,unsigned char *pImage,float* vecSize);

double maxvecmag;

void	main()
{
// 	int				n_xres = 721;
// 	int				n_yres = 281;

	int				n_xres = 1441;
	int				n_yres = 561;

// 	int				n_xres = 2881;
// 	int				n_yres = 1121;
// 
// 	int				n_xres = 5761;
// 	int				n_yres = 2241;

// 	int				n_xres = 11521;
//  	int				n_yres = 2241;

	float*			pVectr = (float*         ) malloc( sizeof(float        ) * n_xres * n_yres * 2 );
	float*			p_LUT0 = (float*		 ) malloc( sizeof(float        ) * DISCRETE_FILTER_SIZE);
	float*			p_LUT1 = (float*		 ) malloc( sizeof(float        ) * DISCRETE_FILTER_SIZE);
	unsigned char*	pNoise = (unsigned char* ) malloc( sizeof(unsigned char) * n_xres * n_yres     );
	unsigned char*	pImage = (unsigned char* ) malloc( sizeof(unsigned char) * n_xres * n_yres     );
	float*	vecSize = (float* ) malloc( sizeof(float) * n_xres*n_yres );
	
	SyntheszSaddle(  n_xres,   n_yres,   pVectr);
	//CenterFiled(n_xres, n_yres, pVectr);//包含矢量归一化
	NormalizVectrs(n_xres, n_yres, pVectr,vecSize);//所以这就不用矢量归一化了,因为之前的马鞍矢量场生成函数里没有归一化,才有此步的
	MakeWhiteNoise(n_xres, n_yres, pNoise);
	GenBoxFiltrLUT(DISCRETE_FILTER_SIZE, p_LUT0, p_LUT1);
	FlowImagingLIC(n_xres, n_yres, pVectr, pNoise, pImage, p_LUT0, p_LUT1, LOWPASS_FILTR_LENGTH);
//	color(n_xres, n_yres,pImage,vecSize);

	//WriteImage2PPM(n_xres, n_yres, pImage, "LIC.ppm");
//	WriteImage2PPM(n_xres, n_yres, pImage, "LIC_721_281.ppm");
 	WriteImage2PPM(n_xres, n_yres, pImage, "LIC1441_561.ppm");
// 	WriteImage2PPM(n_xres, n_yres, pImage, "LIC2281_1121.ppm");
 	//WriteImage2PPM(n_xres, n_yres, pImage, "LIC5761_1121.ppm");
	//WriteImage2PPM(n_xres, n_yres, pImage, "LIC_11521_2241.ppm");
	//system("pause");
	free(pVectr);	pVectr = NULL;
	free(p_LUT0);	p_LUT0 = NULL;
	free(p_LUT1);	p_LUT1 = NULL;
	free(pNoise);	pNoise = NULL;
	free(pImage);	pImage = NULL;
}


///	中心环形矢量场图形	synthesize a saddle-shaped vector field     ///
void	SyntheszSaddle(int  n_xres,  int  n_yres,  float*  pVectr)
{   	
	static const int LatNum = n_yres;
	static const int LonNum = n_xres;
	static const int Time = 1;
	static const int TMP = Time*LonNum*LatNum;
	//NcFile dataReadFile("global_721_281.nc",NcFile::ReadOnly);
 	NcFile dataReadFile("global_1441_561.nc",NcFile::ReadOnly);
// 	NcFile dataReadFile("global_2881_1121.nc",NcFile::ReadOnly);
 //	NcFile dataReadFile("global_5761_2241.nc",NcFile::ReadOnly);
 	//NcFile dataReadFile("global_11521_2241.nc",NcFile::ReadOnly);

	if (!dataReadFile.is_valid())
	{
		std::cout<<"couldn't open file!"<<std::endl;
	}
	double *Tmp_UX = new double[TMP];
	double *Tmp_VY = new double[TMP];
	double *Tmp_LAT = new double[TMP];
	double *Tmp_LON = new double[TMP];

	NcVar *dataTmp_LAT = dataReadFile.get_var("LAT");	
	NcVar *dataTmp_LON = dataReadFile.get_var("LON");	
	NcVar *dataTmp_UX = dataReadFile.get_var("UX");	
	NcVar *dataTmp_VY = dataReadFile.get_var("VY");	

	dataTmp_LAT->get(Tmp_LAT,LatNum,LatNum);
	dataTmp_LON->get(Tmp_LON,LonNum,LonNum);
	dataTmp_UX->get(Tmp_UX,Time,LatNum,LonNum);
	dataTmp_VY->get(Tmp_VY,Time,LatNum,LonNum);
	for(int  j = 0;  j < n_yres;  j ++)
		for(int  i = 0;  i < n_xres;  i ++)
		{	
			int	 index = (  (n_yres - 1 - j) * n_xres + i  )  <<  1;
			//int index = j*n_yres+i;
			pVectr[index    ] = Tmp_UX[j*LonNum+i];
			pVectr[index +1   ]= Tmp_VY[j*LonNum+i];
		}	
		delete []Tmp_UX;
		delete []Tmp_VY;
		delete []Tmp_LAT;

}

///		normalize the vector field     ///
// void    NormalizVectrs(int  n_xres,  int  n_yres,  float*  pVectr)
// {	
// 
// 
// 	for(int	 j = 0;  j < n_yres;  j ++)
// 		for(int	 i = 0;  i < n_xres;  i ++)
// 		{	
// 			int		index = (j * n_xres + i) << 1;
// 			float	vcMag = float(  sqrt( double(pVectr[index] * pVectr[index] + pVectr[index + 1] * pVectr[index + 1]) )  );
// 
// 			float	scale = (vcMag == 0.0f) ? 0.0f : 1.0f / vcMag;//矢量大小归一化后的矢量值
// 			//pVectr[index    ] *= scale;
// 			pVectr[index    ]=pVectr[index    ]*scale;
// 			//	cout<<"pVectr[index    ]="<<pVectr[index    ];
// 			pVectr[index + 1] *= scale;
// 			//cout<<"pVectr[index    ]="<<pVectr[index  +1  ];
// 
// 		}
// }
void    NormalizVectrs(int n_xres,  int  n_yres,  float*  pVectr,float* vecSize)
{	
	for(int	 j = 0;  j < n_yres;  j ++)
		for(int	 i = 0;  i < n_xres;  i ++)
		{	
			int		index = (j * n_xres + i) << 1;
			float	vcMag = float(  sqrt( double(pVectr[index] * pVectr[index] + pVectr[index + 1] * pVectr[index + 1]) )  );
			vecSize[j * n_xres + i]=vcMag;
			if (vcMag<100&&vcMag>maxvecmag)
			{
				maxvecmag=vcMag;
			}

			float	scale = (vcMag == 0.0f) ? 0.0f : 1.0f / vcMag;
			pVectr[index    ] *= scale*5.5;//????????????????????????????????????????????????????????????原来问题出在这
			pVectr[index + 1] *= scale*5.5;
		}
}

///		make white noise as the LIC input texture     ///
// void	MakeWhiteNoise(int  n_xres,  int  n_yres,  unsigned char*  pNoise)
// {		
// 	IplImage * NoiseImg=cvCreateImage(cvSize(n_xres,n_yres),IPL_DEPTH_8U,1);
// 	CvScalar s;
// 
// 	for(int  j = 0;   j < n_yres;  j ++)
// 	{
// 		for(int  i = 0;   i < n_xres;  i ++)
// 
// // 	for(int  j = 0;   j < n_yres;  j=j +10)//产生稀疏白噪声
// // 	{
// // 		for(int  i = 0;   i < n_xres;  i=i+10)
// 
// 		{	
// 			int  r = rand();
// 			r = (  (r & 0xff) + ( (r & 0xff00) >> 8 )  ) & 0xff;
// 			pNoise[j * n_xres + i] = (unsigned char) r;
// 			s = cvGet2D(NoiseImg,i,j);
// 			s.val[0]=r;
// 			s.val[1]=r;
// 			s.val[2]=r;
// 			cvSet2D(NoiseImg,i,j,s);
// 		}
// 	}
// 
// }

void	MakeWhiteNoise(int  n_xres,  int  n_yres,  unsigned char*  pNoise)
{		
	for(int  j = 0;   j < n_yres;  j ++)
		for(int  i = 0;   i < n_xres;  i ++)
		{	
			int  r = rand();
			r = (  (r & 0xff) + ( (r & 0xff00) >> 8 )  ) & 0xff;
			pNoise[j * n_xres + i] = (unsigned char) r;
		}
}
///		generate box filter LUTs     ///
void    GenBoxFiltrLUT(int  LUTsiz,  float*  p_LUT0,  float*  p_LUT1)
{  		
	for(int  i = 0;  i < LUTsiz;  i ++)  p_LUT0[i] = p_LUT1[i] = i;
}

void color(int n_xres,int n_yres, unsigned char* pImage,float* vecSize)
{
	IplImage * licImage = cvCreateImage(cvSize(n_xres,n_yres),IPL_DEPTH_8U,3);
	IplImage* img = cvLoadImage("11.jpg",1);
	int k = 0;

	double magind;
	double mag;
	double newMag;
	double x = 0.1;//x为非线性映射因子,且x!=1
	CvScalar colorTable[500];
	CvScalar s,s1;
	for (int i = 0;i < img->width;i++)
	{
		s = cvGet2D(img,1,i);
		colorTable[i] =s;
	}
	for (int j=0;j<n_yres;j++)
	{
		for (int i= 0;i<n_xres;i++)
		{
			
				if (k>=img->width)
				{
					k=0;
				}
			double	scale=	 pImage[j * n_xres + i];
			mag = vecSize[j * n_xres + i];
						//********矢量大小归一化******
			if (mag<100)
			{
					magind = (mag/maxvecmag);
			}
						//非线性颜色增强LIC
						newMag =(pow(x,magind)-1)/(x-1);
						s = cvGet2D(licImage,j,i);
				//渐变颜色映射表
				int k = int(newMag*446); 
				s1.val[0]=colorTable[k].val[0]*(k+1-newMag*446)+colorTable[k+1].val[0]*(newMag*446-k);
				s1.val[1]=colorTable[k].val[1]*(k+1-newMag*446)+colorTable[k+1].val[1]*(newMag*446-k);
				s1.val[2]=colorTable[k].val[2]*(k+1-newMag*446)+colorTable[k+1].val[2]*(newMag*446-k);
 				s1.val[0]*=scale;
				s1.val[1]*=scale;
 				s1.val[2]*=scale;
				/*cout<<"s1.val[3]="<<s1.val[3]<<endl;*/
				cvSet2D(licImage,j,i,s1);
		}
	}
	//Mat AlphaImage= imread("s.jpg");
	
	//cv::Mat AlphaImage = imread("licImage",1);
	cvNamedWindow("lic_three channles",0);
	cvShowImage("lic_three channles",licImage);
	cvWaitKey(0);
	system("pause");
	cvDestroyWindow("lic_three channles");
	cvReleaseImage(&licImage);
}
///		write the LIC image to a PPM file     ///
void	WriteImage2PPM(int  n_xres,  int  n_yres,  unsigned char*  pImage,  char*  f_name)
{	
	FILE*	o_file;
	if(   ( o_file = fopen(f_name, "w") )  ==  NULL   )  
	{  
		printf("Can't open output file\n");  
		return;  
	}

	fprintf(o_file, "P6\n%d %d\n255\n", n_xres, n_yres);

	for(int  j = 0;  j < n_yres;  j ++)
		for(int  i = 0;  i < n_xres;  i ++)
		{
			unsigned  char	unchar = pImage[j * n_xres + i];//某点像素的灰度纹理值
			fprintf(o_file, "%c%c%c", unchar, unchar, unchar);//
		}

		fclose (o_file);	o_file = NULL;
}


///		flow imaging (visualization) through Line Integral Convolution     ///
void	FlowImagingLIC(int     n_xres,  int     n_yres,  float*  pVectr,  unsigned char*  pNoise,  unsigned char*  pImage,  
	float*  p_LUT0,  float*  p_LUT1,  float   krnlen)
{	
	int		vec_id;						///ID in the VECtor buffer (for the input flow field)
	int		advDir;						///ADVection DIRection (0: positive;  1: negative)
	int		advcts;						///number of ADVeCTion stepS per direction (a step counter)
	int		ADVCTS = int(krnlen * 3);	///MAXIMUM number of advection steps per direction to break dead loops	//跳出死循环的条件

	float	vctr_x;						///x-component  of the VeCToR at the forefront point
	float	vctr_y;						///y-component  of the VeCToR at the forefront point
	float	clp0_x;						///x-coordinate of CLiP point 0 (current)
	float	clp0_y;						///y-coordinate of CLiP point 0	(current)
	float	clp1_x;						///x-coordinate of CLiP point 1 (next   )
	float	clp1_y;						///y-coordinate of CLiP point 1 (next   )
	float	samp_x;						///x-coordinate of the SAMPle in the current pixel
	float	samp_y;						///y-coordinate of the SAMPle in the current pixel
	float	tmpLen;						///TeMPorary LENgth of a trial clipped-segment
	float	segLen;						///SEGment   LENgth
	float	curLen;						///CURrent   LENgth of the streamline
	float	prvLen;						///PReVious  LENgth of the streamline		
	float	W_ACUM;						///ACcuMulated Weight from the seed to the current streamline forefront
	float	texVal;						///TEXture VALue
	float	smpWgt;						///WeiGhT of the current SaMPle
	float	t_acum[2];					///two ACcUMulated composite Textures for the two directions, perspectively 两个方向的卷积和
	float	w_acum[2];					///two ACcUMulated Weighting values   for the two directions, perspectively 两个方向的权重和
	float*	wgtLUT = NULL;				///WeiGhT Look Up Table pointing to the target filter LUT权重查找表
	float	len2ID = (DISCRETE_FILTER_SIZE - 1) / krnlen;	///map a curve LENgth TO an ID in the LUT

	///for each pixel in the 2D output LIC image///
	for(int  j = 0;	 j < n_yres;  j ++)
		for(int  i = 0;	 i < n_xres;  i ++)
		{	
			///init the composite texture accumulators and the weight accumulators///每一个像素点为起始点,初始化一次权重和卷积和
			t_acum[0] = t_acum[1] = w_acum[0] = w_acum[1] = 0.0f;//初始化正反方向卷积和及权重和

			///for either advection direction///分别计算正反方向的卷积和及权重和
			for(advDir = 0;  advDir < 2;  advDir ++)
			{	
				///init the step counter, curve-length measurer, and streamline seed///
				//初始化当前方向上前进的步数和当前流线的总长
				advcts = 0;//前进的步数
				curLen = 0.0f;
				clp0_x = i + 0.5f;
				clp0_y = j + 0.5f;

				///access the target filter LUT///LUT显示查找表
				wgtLUT = (advDir == 0) ? p_LUT0 : p_LUT1;

				///until the streamline is advected long enough or a tightly  spiralling center / focus is encountered///
				while( curLen < krnlen && advcts < ADVCTS ) //??????
				{
					///access the vector at the sample///
					vec_id = ( int(clp0_y) * n_xres + int(clp0_x) ) << 1;
					vctr_x = pVectr[vec_id    ];
					vctr_y = pVectr[vec_id + 1];

					///in case of a critical point///遇到零向量,结束循环
					if( vctr_x == 0.0f && vctr_y == 0.0f )
					{	
						t_acum[advDir] = (advcts == 0) ? 0.0f : t_acum[advDir];		   ///this line is indeed unnecessary
						w_acum[advDir] = (advcts == 0) ? 1.0f : w_acum[advDir];
						break;
					}

					///negate the vector for the backward-advection case///相反的方向取相反的方向
					vctr_x = (advDir == 0) ? vctr_x : -vctr_x;
					vctr_y = (advDir == 0) ? vctr_y : -vctr_y;

					///clip the segment against the pixel boundaries --- find the shorter from the two clipped segments///
					///replace  all  if-statements  whenever  possible  as  they  might  affect the computational speed///
					segLen = LINE_SQUARE_CLIP_MAX;
					//cout<<"segLen="<<segLen<<endl;

					//cout<<"VECTOR_COMPONENT_MIN="<<LINE_SQUARE_CLIP_MAX<<endl;
					segLen = (vctr_x < -VECTOR_COMPONENT_MIN) ? ( int(     clp0_x         ) - clp0_x ) / vctr_x : segLen;//int(0.5)=0
					segLen = (vctr_x >  VECTOR_COMPONENT_MIN) ? ( int( int(clp0_x) + 1.5f ) - clp0_x ) / vctr_x : segLen;
					segLen = (vctr_y < -VECTOR_COMPONENT_MIN) ?	
						(      (    (  tmpLen = ( int(     clp0_y)          - clp0_y ) / vctr_y  )  <  segLen    ) ? tmpLen : segLen      ) 
						: segLen;
					segLen = (vctr_y >  VECTOR_COMPONENT_MIN) ?
						(      (    (  tmpLen = ( int( int(clp0_y) + 1.5f ) - clp0_y ) / vctr_y  )  <  segLen    ) ? tmpLen : segLen      ) 
						: segLen;

					///update the curve-length measurers///
					prvLen = curLen;
					curLen+= segLen;
					segLen+= 0.0004f;

					///check if the filter has reached either end///
					segLen = (curLen > krnlen) ? ( (curLen = krnlen) - prvLen ) : segLen;

					///obtain the next clip point///
					clp1_x = clp0_x + vctr_x * segLen;
					clp1_y = clp0_y + vctr_y * segLen;

					///obtain the middle point of the segment as the texture-contributing sample///
					samp_x = (clp0_x + clp1_x) * 0.5f;
					samp_y = (clp0_y + clp1_y) * 0.5f;

					///obtain the texture value of the sample///
					texVal = pNoise[ int(samp_y) * n_xres + int(samp_x) ];

					///update the accumulated weight and the accumulated composite texture (texture x weight)///
					W_ACUM = wgtLUT[ int(curLen * len2ID) ];
					smpWgt = W_ACUM - w_acum[advDir];			
					w_acum[advDir]  = W_ACUM;								
					t_acum[advDir] += texVal * smpWgt;

					///update the step counter and the "current" clip point///
					advcts ++;
					clp0_x = clp1_x;
					clp0_y = clp1_y;

					///check if the streamline has gone beyond the flow field///
					if( clp0_x < 0.0f || clp0_x >= n_xres || clp0_y < 0.0f || clp0_y >= n_yres)  break;
				}	
			}

			///normalize the accumulated composite texture///
			texVal = (t_acum[0] + t_acum[1]) / (w_acum[0] + w_acum[1]);

			///clamp the texture value against the displayable intensity range [0, 255]
			texVal = (texVal <   0.0f) ?   0.0f : texVal;
			texVal = (texVal > 255.0f) ? 255.0f : texVal; 
			pImage[j * n_xres + i] = (unsigned char) texVal;
		} 	
}

转载自:https://blog.csdn.net/syy_1263041983/article/details/41082833

You may also like...