forked from fercook/SciViz
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathnetcdf2binary.py
142 lines (126 loc) · 4.95 KB
/
netcdf2binary.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
#!/usr/bin/python
#
# This program converts a variable in NetCDF format to binary individual files
# one for each time frame
# It requires the program ncdump to export a single variable into ASCII CSV format,
# and the program uses the unix pipeline to avoid creating large temporary files.
# Make sure you have ncdump in your path.
#
# Syntax:
#
# ncdump -v variable NetCDFfile.nc | netcdf2bin.py
#
# where variable is what you want to export to Blender voxels
#
#
import struct;
import math;
import sys;
import array;
from optparse import OptionParser;
usage = "Convert output from ncdump (NetCDF)) to raw binary.\n syntax: ncdump -v var file.nc | netcdf2bin.py [options]";
parser = OptionParser(usage=usage);
parser.add_option("-V","--verbose",action="store_true",
dest="Verbose",default=False,
help="Switch on verbose mode.");
parser.add_option("-x","--xlength",type="int",action="store", dest="Xres",default=50,
help="X lenght of the data.");
parser.add_option("-y","--ylength",type="int",action="store", dest="Yres",default=50,
help="Y lenght of the data.");
parser.add_option("-z","--zlength",type="int",action="store", dest="Zres",default=50,
help="Z lenght of the data.");
parser.add_option("-t", "--times", type="int", action="store", dest="Times", default = 1,
help="Number of frames to process.");
parser.add_option("-f", "--file", action="store", dest="Filename", default = "out.XXXXX",
help="Output file name.");
parser.add_option("-m", "--min", action="store", dest="minvalue", type="float", default = 0.0,
help="Minimum data value to output as zero.");
parser.add_option("-M", "--max", action="store", dest="maxvalue", type="float", default = 0.0,
help="Maximum data value to output as one");
parser.add_option("-s", "--scale", action="store", dest="ScaleFactor", type="float", default = 1.0,
help="Scale Data by Factor");
# Parse arguments
(options, args) = parser.parse_args();
times = options.Times
xres = options.Xres;
yres = options.Yres;
zres = options.Zres;
Verbose = options.Verbose
MinValue = options.minvalue;
MaxValue = options.maxvalue;
filename = options.Filename
scalefactor = options.ScaleFactor;
if (MinValue != MaxValue):
def Normalize(val):
return max ( min( 1.0, (val-MinValue)/(MaxValue-MinValue) ) , 0.0)
else:
def Normalize(val):
return val
# In this function one must define any change of scales (linear to log or whatever)
def scale(x):
return scalefactor*x;
TotalDataToRead = xres*yres*zres*times;
TotalDataRead = 0;
ChunkSize = xres*yres*zres;
AccumulatedRead = 0;
ChunksRead = 0;
MinValueFound = 1.0E100;
MaxValueFound = -1.0E100;
if Verbose:
print("Out as "+filename);
print("x as "+str(xres));
print("y as "+str(yres));
print("z as "+str(zres));
print("T as "+str(times));
if (MinValue != MaxValue):
print("min as "+str(MinValue));
print("Max as "+str(MaxValue));
#skip the ncdump header
line = sys.stdin.readline();
while (line.find("data:") == -1) :
line = sys.stdin.readline();
line = sys.stdin.readline();
line = sys.stdin.readline();
fil = open(filename+"."+str(ChunksRead).zfill(5),'wb')
for line in sys.stdin:
linedata = line.split(',');
prepareddata = array.array('f');
for Strnumber in linedata:
#We need to check for the final ";" in the data
if Strnumber.find(";") > -1:
Strnumber = Strnumber.replace(";","");
try:
RawData = scale(float(Strnumber.strip()));
NormalizedValue = Normalize(RawData)
prepareddata.append(NormalizedValue);
#if Verbose: print(str(RawData)+" converted to "+str(NormalizedValue));
TotalDataRead = TotalDataRead + 1;
AccumulatedRead = AccumulatedRead +1;
if (AccumulatedRead == ChunkSize): #We should finish up the frame
prepareddata.tofile(fil);
fil.flush();
fil.close();
ChunksRead = ChunksRead + 1;
AccumulatedRead = 0;
prepareddata = array.array('f');
fil = open(filename+"."+str(ChunksRead).zfill(5),'wb')
if ((100*TotalDataRead) % TotalDataToRead==0):
print(str((100.0*TotalDataRead)/TotalDataToRead)+" % done")
MinValueFound = min (MinValueFound, RawData);
MaxValueFound = max (MaxValueFound, RawData);
if (TotalDataRead == TotalDataToRead):
break;
except:
continue;
if (len(prepareddata)>0):
prepareddata.tofile(fil);
if (TotalDataRead == TotalDataToRead):
break;
fil.flush();
fil.close();
print "Maximum Value Found :"+str(MaxValueFound);
print "Minimum Value Found :"+str(MinValueFound);
if (MaxValue != MinValue):
print " but wrote in the range "+str(MinValue)+"--"+str(MaxValue);
if ( TotalDataToRead != TotalDataRead):
print "Some error occurred, expected "+str(TotalDataToRead)+" points and found "+str(TotalDataRead);