netcdf-c/ncgen/ncgen.l

910 lines
25 KiB
Plaintext
Raw Normal View History

2010-06-03 21:24:43 +08:00
%{
/*********************************************************************
* Copyright 1993, UCAR/Unidata
* See netcdf/COPYRIGHT file for copying and redistribution conditions.
* $Id: ncgen.l,v 1.24 2009/09/25 18:22:35 dmh Exp $
*********************************************************************/
/* Problems:
1. We assume the input is true utf8.
2010-06-03 21:24:43 +08:00
Unfortunately, we may actually get iso-latin-8859-1.
This means that there will be ambiguity about the characters
in the range 128-255 because they will look like n-byte unicode
when they are 1-byte 8859 characters. Because of our encoding,
8859 characters above 128 will be handles as n-byte utf8 and so
will probably not lex correctly.
Solution: assume utf8 and note in the documentation that
ISO8859 is specifically unsupported.
2010-06-03 21:24:43 +08:00
2. The netcdf function NC_check_name in string.c must be modified to
conform to the use of UTF8.
3. We actually have three tests for UTF8 of increasing correctness
(in the sense that the least correct will allow some sequences that
are technically illegal UTF8).
The tests are derived from the table at
http://www.w3.org/2005/03/23-lex-U
We include lexical definitions for all three, but use the second version.
2012-01-27 11:17:03 +08:00
4. Single character constants enclosed in '...' cannot be
utf-8, so we assume they are by default encoded using the 1-byte
subset of utf-8. It turns out that this subset is in fact
equivalent to US-Ascii (7-bit).
We could use ISO-8859-1, but that conflicts with UTF-8 above value 127.
2010-06-03 21:24:43 +08:00
*/
/* lex specification for tokens for ncgen */
/* Fill value used by ncdump from version 2.4 and later. Should match
definition of FILL_STRING in ../ncdump/vardata.h */
2016-05-12 05:31:17 +08:00
#include "ncgen.h"
#include "ncgeny.h"
#include "isnan.h"
2016-05-12 05:31:17 +08:00
2010-06-03 21:24:43 +08:00
#define FILL_STRING "_"
#define XDR_INT32_MIN (-2147483647-1)
#define XDR_INT32_MAX 2147483647
#define XDR_INT64_MIN (-9223372036854775807LL-1)
#define XDR_INT64_MAX (9223372036854775807LL)
2015-08-16 06:26:35 +08:00
#undef DEBUG
#ifdef DEBUG
static int MIN_BYTE = NC_MIN_BYTE;
static int MIN_SHORT = NC_MIN_SHORT;
static int MIN_INT = NC_MIN_INT;
static int MAX_BYTE = NC_MAX_BYTE;
static int MAX_SHORT = NC_MAX_SHORT;
static int MAX_INT = NC_MAX_INT;
static int MAX_UBYTE = NC_MAX_UBYTE;
static int MAX_USHORT = NC_MAX_USHORT;
static unsigned int MAX_UINT = NC_MAX_UINT;
#undef NC_MIN_BYTE
#undef NC_MIN_SHORT
#undef NC_MIN_INT
#undef NC_MAX_BYTE
#undef NC_MAX_SHORT
#undef NC_MAX_INT
#undef NC_MAX_UBYTE
#undef NC_MAX_USHORT
#undef NC_MAX_UINT
#define NC_MIN_BYTE MIN_BYTE
#define NC_MIN_SHORT MIN_SHORT
#define NC_MIN_INT MIN_INT
#define NC_MAX_BYTE MAX_BYTE
#define NC_MAX_SHORT MAX_SHORT
#define NC_MAX_INT MAX_INT
#define NC_MAX_UBYTE MAX_UBYTE
#define NC_MAX_USHORT MAX_USHORT
#define NC_MAX_UINT MAX_UINT
#endif
2015-11-20 04:44:07 +08:00
#define TAGCHARS "BbSsLlUu"
2015-08-16 06:26:35 +08:00
#define tstdecimal(ch) ((ch) >= '0' && (ch) <= '9')
#define tstoctal(ch) ((ch) == '0')
2015-08-16 06:26:35 +08:00
/*Mnemonics*/
#define ISIDENT 1
2015-11-20 04:44:07 +08:00
/* Define a fake constant indicating that
no tag was specified */
#define NC_NOTAG (-1)
2010-06-03 21:24:43 +08:00
char errstr[100]; /* for short error messages */
Fix more memory leaks in netcdf-c library This is a follow up to PR https://github.com/Unidata/netcdf-c/pull/1173 Sorry that it is so big, but leak suppression can be complex. This PR fixes all remaining memory leaks -- as determined by -fsanitize=address, and with the exceptions noted below. Unfortunately. there remains a significant leak that I cannot solve. It involves vlens, and it is unclear if the leak is occurring in the netcdf-c library or the HDF5 library. I have added a check_PROGRAM to the ncdump directory to show the problem. The program is called tst_vlen_demo.c To exercise it, build the netcdf library with -fsanitize=address enabled. Then go into ncdump and do a "make clean check". This should build tst_vlen_demo without actually executing it. Then do the command "./tst_vlen_demo" to see the output of the memory checker. Note the the lost malloc is deep in the HDF5 library (in H5Tvlen.c). I am temporarily working around this error in the following way. 1. I modified several test scripts to not execute known vlen tests that fail as described above. 2. Added an environment variable called NC_VLEN_NOTEST. If set, then those specific tests are suppressed. This should mean that the --disable-utilities option to ./configure should not need to be set to get a memory leak clean build. This should allow for detection of any new leaks. Note: I used an environment variable rather than a ./configure option to control the vlen tests. This is because it is temporary (I hope) and because it is a bit tricky for shell scripts to access ./configure options. Finally, as before, this only been tested with netcdf-4 and hdf5 support.
2018-11-16 01:00:38 +08:00
int lineno; /* line number for error messages */
Bytebuffer* lextext; /* name or string with escapes removed */
2010-06-03 21:24:43 +08:00
#define YY_BREAK /* defining as nothing eliminates unreachable
2015-08-16 06:26:35 +08:00
statement warnings from flex output,
2010-06-03 21:24:43 +08:00
but make sure every action ends with
"return" or "break"! */
int specialconstants; /* 1 if nan, nanf, infinity, etc is used */
2010-06-03 21:24:43 +08:00
double double_val; /* last double value read */
float float_val; /* last float value read */
long long int64_val; /* last int64 value read */
int int32_val; /* last int32 value read */
short int16_val; /* last short value read */
unsigned long long uint64_val; /* last int64 value read */
unsigned int uint32_val; /* last int32 value read */
unsigned short uint16_val; /* last short value read */
char char_val; /* last char value read */
signed char byte_val; /* last byte value read */
unsigned char ubyte_val; /* last byte value read */
/* Forward */
2010-06-03 21:24:43 +08:00
static Symbol* makepath(char* text);
static int lexdebug(int);
static unsigned long long parseULL(int radix, char* text, int*);
2015-11-20 04:44:07 +08:00
static nc_type downconvert(unsigned long long uint64, int*, int, int);
static int tagmatch(nc_type nct, int tag);
2015-08-16 06:26:35 +08:00
static int nct2lexeme(nc_type nct);
2015-11-20 04:44:07 +08:00
static int collecttag(char* text, char** stagp);
static int identcheck(int token);
2010-06-03 21:24:43 +08:00
struct Specialtoken specials[] = {
{"_FillValue",_FILLVALUE,_FILLVALUE_FLAG},
{"_Format",_FORMAT,_FORMAT_FLAG},
{"_Storage",_STORAGE,_STORAGE_FLAG},
{"_ChunkSizes",_CHUNKSIZES,_CHUNKSIZES_FLAG},
{"_Fletcher32",_FLETCHER32,_FLETCHER32_FLAG},
{"_DeflateLevel",_DEFLATELEVEL,_DEFLATE_FLAG},
{"_Shuffle",_SHUFFLE,_SHUFFLE_FLAG},
{"_Endianness",_ENDIANNESS,_ENDIAN_FLAG},
{"_NoFill",_NOFILL,_NOFILL_FLAG},
{"_NCProperties",_NCPROPS,_NCPROPS_FLAG},
{"_IsNetcdf4",_ISNETCDF4,_ISNETCDF4_FLAG},
{"_SuperblockVersion",_SUPERBLOCK,_SUPERBLOCK_FLAG},
{"_Filter",_FILTER,_FILTER_FLAG},
Add filter support to NCZarr Filter support has three goals: 1. Use the existing HDF5 filter implementations, 2. Allow filter metadata to be stored in the NumCodecs metadata format used by Zarr, 3. Allow filters to be used even when HDF5 is disabled Detailed usage directions are define in docs/filters.md. For now, the existing filter API is left in place. So filters are defined using ''nc_def_var_filter'' using the HDF5 style where the id and parameters are unsigned integers. This is a big change since filters affect many parts of the code. In the following, the terms "compressor" and "filter" and "codec" are generally used synonomously. ### Filter-Related Changes: * In order to support dynamic loading of shared filter libraries, a new library was added in the libncpoco directory; it helps to isolate dynamic loading across multiple platforms. * Provide a json parsing library for use by plugins; this is created by merging libdispatch/ncjson.c with include/ncjson.h. * Add a new _Codecs attribute to allow clients to see what codecs are being used; let ncdump -s print it out. * Provide special headers to help support compilation of HDF5 filters when HDF5 is not enabled: netcdf_filter_hdf5_build.h and netcdf_filter_build.h. * Add a number of new test to test the new nczarr filters. * Let ncgen parse _Codecs attribute, although it is ignored. ### Plugin directory changes: * Add support for the Blosc compressor; this is essential because it is the most common compressor used in Zarr datasets. This also necessitated adding a CMake FindBlosc.cmake file * Add NCZarr support for the big-four filters provided by HDF5: shuffle, fletcher32, deflate (zlib), and szip * Add a Codec defaulter (see docs/filters.md) for the big four filters. * Make plugins work with windows by properly adding __declspec declaration. ### Misc. Non-Filter Changes * Replace most uses of USE_NETCDF4 (deprecated) with USE_HDF5. * Improve support for caching * More fixes for path conversion code * Fix misc. memory leaks * Add new utility -- ncdump/ncpathcvt -- that does more or less the same thing as cygpath. * Add a number of new test to test the non-filter fixes. * Update the parsers * Convert most instances of '#ifdef _MSC_VER' to '#ifdef _WIN32'
2021-09-03 07:04:26 +08:00
{"_Codecs",_CODECS,_CODECS_FLAG},
2010-06-03 21:24:43 +08:00
{NULL,0} /* null terminate */
};
%}
%x ST_C_COMMENT
%x TEXT
%p 6000
/* The most correct (validating) version of UTF8 character set
(Taken from: http://www.w3.org/2005/03/23-lex-U)
The lines of the expression cover the UTF8 characters as follows:
1. non-overlong 2-byte
2. excluding overlongs
3. straight 3-byte
4. excluding surrogates
5. straight 3-byte
6. planes 1-3
7. planes 4-15
8. plane 16
UTF8 ([\xC2-\xDF][\x80-\xBF]) \
| (\xE0[\xA0-\xBF][\x80-\xBF]) \
| ([\xE1-\xEC][\x80-\xBF][\x80-\xBF]) \
| (\xED[\x80-\x9F][\x80-\xBF]) \
| ([\xEE-\xEF][\x80-\xBF][\x80-\xBF]) \
| (\xF0[\x90-\xBF][\x80-\xBF][\x80-\xBF]) \
| ([\xF1-\xF3][\x80-\xBF][\x80-\xBF][\x80-\xBF]) \
| (\xF4[\x80-\x8F][\x80-\xBF][\x80-\xBF]) \
*/
/* Wish there was some way to ifdef lex files */
/*The most relaxed version of UTF8 (not used)
UTF8 ([\xC0-\xD6].)|([\xE0-\xEF]..)|([\xF0-\xF7]...)
*/
/*The partially relaxed version of UTF8, and the one used here */
UTF8 ([\xC0-\xD6][\x80-\xBF])|([\xE0-\xEF][\x80-\xBF][\x80-\xBF])|([\xF0-\xF7][\x80-\xBF][\x80-\xBF][\x80-\xBF])
/* The old definition of ID
ID ([A-Za-z_]|{UTF8})([A-Z.@#\[\]a-z_0-9+-]|{UTF8})*
*/
/* Don't permit control characters or '/' in names, but other special
chars OK if escaped. Note that to preserve backwards
compatibility, none of the characters _.@+- should be escaped, as
they were previously permitted in names without escaping. */
idescaped \\[ !"#$%&'()*,:;<=>?\[\\\]^`{|}~]
numescaped \\[0-9]
/* New definition to conform to a subset of string.c */
ID ([a-zA-Z_]|{UTF8}|{numescaped})([a-zA-Z0-9_.@+-]|{UTF8}|{idescaped})*
escaped \\.
/* Capture a datasetidentifier */
/* DATASETID ([a-zA-Z0-9!#$%&*:;<=>?/^|~_.@+-]|{UTF8})* */
DATASETID [^{][^{]*
/* Note: this definition of string will work for utf8 as well,
although it is a very relaxed definition
*/
nonquotes ([^"\\]|{escaped})*
exp ([eE][+-]?[0-9]+)
OPAQUESTRING (0[xX][0-9A-Fa-f][0-9A-Fa-f]*)
PATH ([/]|([/]{ID})([/]{ID})*)
2015-11-20 04:44:07 +08:00
XUNUMBER {OPAQUESTRING}([Ss]|[Ll]|[Ll][Ll])?
NUMBER [+-]?[0-9][0-9]*[Uu]?([BbSs]|[Ll]|[Ll][Ll])?
2010-06-03 21:24:43 +08:00
DBLNUMBER [+-]?[0-9]*\.[0-9]*{exp}?[LlDd]?|[+-]?[0-9]*{exp}[LlDd]?
FLTNUMBER [+-]?[0-9]*\.[0-9]*{exp}?[Ff]|[+-]?[0-9]*{exp}[Ff]
Add filter support to NCZarr Filter support has three goals: 1. Use the existing HDF5 filter implementations, 2. Allow filter metadata to be stored in the NumCodecs metadata format used by Zarr, 3. Allow filters to be used even when HDF5 is disabled Detailed usage directions are define in docs/filters.md. For now, the existing filter API is left in place. So filters are defined using ''nc_def_var_filter'' using the HDF5 style where the id and parameters are unsigned integers. This is a big change since filters affect many parts of the code. In the following, the terms "compressor" and "filter" and "codec" are generally used synonomously. ### Filter-Related Changes: * In order to support dynamic loading of shared filter libraries, a new library was added in the libncpoco directory; it helps to isolate dynamic loading across multiple platforms. * Provide a json parsing library for use by plugins; this is created by merging libdispatch/ncjson.c with include/ncjson.h. * Add a new _Codecs attribute to allow clients to see what codecs are being used; let ncdump -s print it out. * Provide special headers to help support compilation of HDF5 filters when HDF5 is not enabled: netcdf_filter_hdf5_build.h and netcdf_filter_build.h. * Add a number of new test to test the new nczarr filters. * Let ncgen parse _Codecs attribute, although it is ignored. ### Plugin directory changes: * Add support for the Blosc compressor; this is essential because it is the most common compressor used in Zarr datasets. This also necessitated adding a CMake FindBlosc.cmake file * Add NCZarr support for the big-four filters provided by HDF5: shuffle, fletcher32, deflate (zlib), and szip * Add a Codec defaulter (see docs/filters.md) for the big four filters. * Make plugins work with windows by properly adding __declspec declaration. ### Misc. Non-Filter Changes * Replace most uses of USE_NETCDF4 (deprecated) with USE_HDF5. * Improve support for caching * More fixes for path conversion code * Fix misc. memory leaks * Add new utility -- ncdump/ncpathcvt -- that does more or less the same thing as cygpath. * Add a number of new test to test the non-filter fixes. * Update the parsers * Convert most instances of '#ifdef _MSC_VER' to '#ifdef _WIN32'
2021-09-03 07:04:26 +08:00
SPECIAL "_FillValue"|"_Format"|"_Storage"|"_ChunkSizes"|"_Fletcher32"|"_DeflateLevel"|"_Shuffle"|"_Endianness"|"_NoFill"|"_NCProperties"|"_IsNetcdf4"|"_SuperblockVersion"|"_Filter"|"_Codecs"
2010-06-03 21:24:43 +08:00
USASCII [\x01-\x7F]
2012-01-27 11:17:03 +08:00
2010-06-03 21:24:43 +08:00
%%
2015-08-16 06:26:35 +08:00
[ \r\t\f]+ { /* whitespace */
break;
2010-06-03 21:24:43 +08:00
}
2015-08-16 06:26:35 +08:00
\/\/.* { /* comment */
2010-06-03 21:24:43 +08:00
break;
}
Fix more memory leaks in netcdf-c library This is a follow up to PR https://github.com/Unidata/netcdf-c/pull/1173 Sorry that it is so big, but leak suppression can be complex. This PR fixes all remaining memory leaks -- as determined by -fsanitize=address, and with the exceptions noted below. Unfortunately. there remains a significant leak that I cannot solve. It involves vlens, and it is unclear if the leak is occurring in the netcdf-c library or the HDF5 library. I have added a check_PROGRAM to the ncdump directory to show the problem. The program is called tst_vlen_demo.c To exercise it, build the netcdf library with -fsanitize=address enabled. Then go into ncdump and do a "make clean check". This should build tst_vlen_demo without actually executing it. Then do the command "./tst_vlen_demo" to see the output of the memory checker. Note the the lost malloc is deep in the HDF5 library (in H5Tvlen.c). I am temporarily working around this error in the following way. 1. I modified several test scripts to not execute known vlen tests that fail as described above. 2. Added an environment variable called NC_VLEN_NOTEST. If set, then those specific tests are suppressed. This should mean that the --disable-utilities option to ./configure should not need to be set to get a memory leak clean build. This should allow for detection of any new leaks. Note: I used an environment variable rather than a ./configure option to control the vlen tests. This is because it is temporary (I hope) and because it is a bit tricky for shell scripts to access ./configure options. Finally, as before, this only been tested with netcdf-4 and hdf5 support.
2018-11-16 01:00:38 +08:00
\"{nonquotes}\" {int len; char* s = NULL;
2010-06-03 21:24:43 +08:00
/* In netcdf4, this will be used in a variety
of places, so only remove escapes */
/*
if(yyleng > MAXTRST) {
yyerror("string too long, truncated\n");
yytext[MAXTRST-1] = '\0';
}
*/
Fix more memory leaks in netcdf-c library This is a follow up to PR https://github.com/Unidata/netcdf-c/pull/1173 Sorry that it is so big, but leak suppression can be complex. This PR fixes all remaining memory leaks -- as determined by -fsanitize=address, and with the exceptions noted below. Unfortunately. there remains a significant leak that I cannot solve. It involves vlens, and it is unclear if the leak is occurring in the netcdf-c library or the HDF5 library. I have added a check_PROGRAM to the ncdump directory to show the problem. The program is called tst_vlen_demo.c To exercise it, build the netcdf library with -fsanitize=address enabled. Then go into ncdump and do a "make clean check". This should build tst_vlen_demo without actually executing it. Then do the command "./tst_vlen_demo" to see the output of the memory checker. Note the the lost malloc is deep in the HDF5 library (in H5Tvlen.c). I am temporarily working around this error in the following way. 1. I modified several test scripts to not execute known vlen tests that fail as described above. 2. Added an environment variable called NC_VLEN_NOTEST. If set, then those specific tests are suppressed. This should mean that the --disable-utilities option to ./configure should not need to be set to get a memory leak clean build. This should allow for detection of any new leaks. Note: I used an environment variable rather than a ./configure option to control the vlen tests. This is because it is temporary (I hope) and because it is a bit tricky for shell scripts to access ./configure options. Finally, as before, this only been tested with netcdf-4 and hdf5 support.
2018-11-16 01:00:38 +08:00
len = unescape((char *)yytext+1,yyleng-2,!ISIDENT,&s);
if(len < 0) {
sprintf(errstr,"Illegal character: %s",yytext);
yyerror(errstr);
}
Fix more memory leaks in netcdf-c library This is a follow up to PR https://github.com/Unidata/netcdf-c/pull/1173 Sorry that it is so big, but leak suppression can be complex. This PR fixes all remaining memory leaks -- as determined by -fsanitize=address, and with the exceptions noted below. Unfortunately. there remains a significant leak that I cannot solve. It involves vlens, and it is unclear if the leak is occurring in the netcdf-c library or the HDF5 library. I have added a check_PROGRAM to the ncdump directory to show the problem. The program is called tst_vlen_demo.c To exercise it, build the netcdf library with -fsanitize=address enabled. Then go into ncdump and do a "make clean check". This should build tst_vlen_demo without actually executing it. Then do the command "./tst_vlen_demo" to see the output of the memory checker. Note the the lost malloc is deep in the HDF5 library (in H5Tvlen.c). I am temporarily working around this error in the following way. 1. I modified several test scripts to not execute known vlen tests that fail as described above. 2. Added an environment variable called NC_VLEN_NOTEST. If set, then those specific tests are suppressed. This should mean that the --disable-utilities option to ./configure should not need to be set to get a memory leak clean build. This should allow for detection of any new leaks. Note: I used an environment variable rather than a ./configure option to control the vlen tests. This is because it is temporary (I hope) and because it is a bit tricky for shell scripts to access ./configure options. Finally, as before, this only been tested with netcdf-4 and hdf5 support.
2018-11-16 01:00:38 +08:00
bbClear(lextext);
bbAppendn(lextext,s,len);
bbNull(lextext);
Fix more memory leaks in netcdf-c library This is a follow up to PR https://github.com/Unidata/netcdf-c/pull/1173 Sorry that it is so big, but leak suppression can be complex. This PR fixes all remaining memory leaks -- as determined by -fsanitize=address, and with the exceptions noted below. Unfortunately. there remains a significant leak that I cannot solve. It involves vlens, and it is unclear if the leak is occurring in the netcdf-c library or the HDF5 library. I have added a check_PROGRAM to the ncdump directory to show the problem. The program is called tst_vlen_demo.c To exercise it, build the netcdf library with -fsanitize=address enabled. Then go into ncdump and do a "make clean check". This should build tst_vlen_demo without actually executing it. Then do the command "./tst_vlen_demo" to see the output of the memory checker. Note the the lost malloc is deep in the HDF5 library (in H5Tvlen.c). I am temporarily working around this error in the following way. 1. I modified several test scripts to not execute known vlen tests that fail as described above. 2. Added an environment variable called NC_VLEN_NOTEST. If set, then those specific tests are suppressed. This should mean that the --disable-utilities option to ./configure should not need to be set to get a memory leak clean build. This should allow for detection of any new leaks. Note: I used an environment variable rather than a ./configure option to control the vlen tests. This is because it is temporary (I hope) and because it is a bit tricky for shell scripts to access ./configure options. Finally, as before, this only been tested with netcdf-4 and hdf5 support.
2018-11-16 01:00:38 +08:00
if(s) efree(s);
2010-06-03 21:24:43 +08:00
return lexdebug(TERMSTRING);
}
{OPAQUESTRING} { /* drop leading 0x; pad to even number of chars */
char* p = yytext+2;
int len = yyleng - 2;
bbClear(lextext);
bbAppendn(lextext,p,len);
2012-05-06 06:31:24 +08:00
if((len % 2) == 1) bbAppend(lextext,'0');
bbNull(lextext);
2010-06-03 21:24:43 +08:00
/* convert all chars to lower case */
2015-11-20 04:44:07 +08:00
for(p=bbContents(lextext);(int)*p;p++) *p = tolower(*p);
2010-06-03 21:24:43 +08:00
return lexdebug(OPAQUESTRING);
}
compound|struct|structure {return lexdebug(COMPOUND);}
enum {return lexdebug(ENUM);}
opaque {return lexdebug(OPAQUE_);}
float {return lexdebug(FLOAT_K);}
double {return lexdebug(DOUBLE_K);}
char {return lexdebug(CHAR_K);}
byte {return lexdebug(BYTE_K);}
short {return lexdebug(SHORT_K);}
int {return lexdebug(INT_K);}
ubyte {return lexdebug(identcheck(UBYTE_K));}
ushort {return lexdebug(identcheck(USHORT_K));}
uint {return lexdebug(identcheck(UINT_K));}
int64 {return lexdebug(identcheck(INT64_K));}
uint64 {return lexdebug(identcheck(UINT64_K));}
string {return lexdebug(identcheck(STRING_K));}
real {return lexdebug(FLOAT_K);}
long {return lexdebug(INT_K);}
integer {return lexdebug(INT_K);}
ulong {return lexdebug(identcheck(UINT_K));}
uinteger {return lexdebug(identcheck(UINT_K));}
2010-06-03 21:24:43 +08:00
unlimited|UNLIMITED {int32_val = -1;
return lexdebug(NC_UNLIMITED_K);}
2010-06-03 21:24:43 +08:00
2010-08-26 03:01:07 +08:00
types: {return lexdebug(TYPES);}
dimensions: {return lexdebug(DIMENSIONS);}
variables: {return lexdebug(VARIABLES);}
data: {return lexdebug(DATA);}
group: {return lexdebug(GROUP);}
2010-06-03 21:24:43 +08:00
(netcdf|NETCDF|netCDF) {BEGIN(TEXT);return lexdebug(NETCDF);}
DoubleInf|-?Infinity { /* missing value (pre-2.4 backward compatibility) */
2010-06-03 21:24:43 +08:00
if (yytext[0] == '-') {
double_val = -INFINITY;
2010-06-03 21:24:43 +08:00
} else {
double_val = INFINITY;
2010-06-03 21:24:43 +08:00
}
specialconstants = 1;
2010-06-03 21:24:43 +08:00
return lexdebug(DOUBLE_CONST);
}
NaN|nan { /* missing value (pre-2.4 backward compatibility) */
double_val = NAN;
specialconstants = 1;
return lexdebug(DOUBLE_CONST);
}
FloatInf|-?Infinityf|-?Inff {/* missing value (pre-2.4 backward compatibility)*/
2010-06-03 21:24:43 +08:00
if (yytext[0] == '-') {
float_val = -INFINITYF;
2010-06-03 21:24:43 +08:00
} else {
float_val = INFINITYF;
2010-06-03 21:24:43 +08:00
}
specialconstants = 1;
return lexdebug(FLOAT_CONST);
}
NaNf|nanf { /* missing value (pre-2.4 backward compatibility) */
float_val = NANF;
specialconstants = 1;
2010-06-03 21:24:43 +08:00
return lexdebug(FLOAT_CONST);
}
NIL|nil|Nil {
#ifdef USE_NETCDF4
if(l_flag == L_C || l_flag == L_BINARY)
return lexdebug(NIL);
yyerror("NIL only allowed for netcdf-4 and for -lc or -lb");
#else
yyerror("NIL only allowed for netcdf-4 and for -lc or -lb");
#endif
}
2010-06-03 21:24:43 +08:00
{PATH} {
bbClear(lextext);
bbAppendn(lextext,(char*)yytext,yyleng+1); /* include null */
bbNull(lextext);
yylval.sym = makepath(bbContents(lextext));
2010-06-03 21:24:43 +08:00
return lexdebug(PATH);
}
{SPECIAL} {struct Specialtoken* st;
bbClear(lextext);
bbAppendn(lextext,(char*)yytext,yyleng+1); /* include null */
bbNull(lextext);
2010-06-03 21:24:43 +08:00
for(st=specials;st->name;st++) {
if(strcmp(bbContents(lextext),st->name)==0) {return lexdebug(st->token);}
2010-06-03 21:24:43 +08:00
}
return 0;
}
<TEXT>{DATASETID} {
int c;
char* p; char* q;
/* copy the trimmed name */
bbClear(lextext);
bbAppendn(lextext,(char*)yytext,yyleng+1); /* include null */
bbNull(lextext);
p = bbContents(lextext);
q = p;
2010-06-03 21:24:43 +08:00
while((c=*p++)) {if(c > ' ') *q++ = c;}
*q = '\0';
re e-support UBS-599337 re pull request https://github.com/Unidata/netcdf-c/pull/405 re pull request https://github.com/Unidata/netcdf-c/pull/446 Notes: 1. This branch is a cleanup of the magic.dmh branch. 2. magic.dmh was originally merged, but caused problems with parallel IO. It was re-issued as pull request https://github.com/Unidata/netcdf-c/pull/446. 3. This branch + pull request replace any previous pull requests and magic.dmh branch. Given an otherwise valid netCDF file that has a corrupted header, the netcdf library currently crashes. Instead, it should return NC_ENOTNC. Additionally, the NC_check_file_type code does not do the forward search required by hdf5 files. It currently only looks at file position 0 instead of 512, 1024, 2048,... Also, it turns out that the HDF4 magic number is assumed to always be at the beginning of the file (unlike HDF5). The change is localized to libdispatch/dfile.c See https://support.hdfgroup.org/release4/doc/DSpec_html/DS.pdf Also, it turns out that the code in NC_check_file_type is duplicated (mostly) in the function libsrc4/nc4file.c#nc_check_for_hdf. This branch does the following. 1. Make NC_check_file_type return NC_ENOTNC instead of crashing. 2. Remove nc_check_for_hdf and centralize all file format checking NC_check_file_type. 3. Add proper forward search for HDF5 files (but not HDF4 files) to look for the magic number at offsets of 0, 512, 1024... 4. Add test tst_hdf5_offset.sh. This tests that hdf5 files with an offset are properly recognized. It does so by prefixing a legal file with some number of zero bytes: 512, 1024, etc. 5. Off-topic: Added -N flag to ncdump to force a specific output dataset name.
2017-10-25 06:25:09 +08:00
if(datasetname == NULL)
datasetname = bbDup(lextext);
2010-06-03 21:24:43 +08:00
BEGIN(INITIAL);
return lexdebug(DATASETID);
}
Fix more memory leaks in netcdf-c library This is a follow up to PR https://github.com/Unidata/netcdf-c/pull/1173 Sorry that it is so big, but leak suppression can be complex. This PR fixes all remaining memory leaks -- as determined by -fsanitize=address, and with the exceptions noted below. Unfortunately. there remains a significant leak that I cannot solve. It involves vlens, and it is unclear if the leak is occurring in the netcdf-c library or the HDF5 library. I have added a check_PROGRAM to the ncdump directory to show the problem. The program is called tst_vlen_demo.c To exercise it, build the netcdf library with -fsanitize=address enabled. Then go into ncdump and do a "make clean check". This should build tst_vlen_demo without actually executing it. Then do the command "./tst_vlen_demo" to see the output of the memory checker. Note the the lost malloc is deep in the HDF5 library (in H5Tvlen.c). I am temporarily working around this error in the following way. 1. I modified several test scripts to not execute known vlen tests that fail as described above. 2. Added an environment variable called NC_VLEN_NOTEST. If set, then those specific tests are suppressed. This should mean that the --disable-utilities option to ./configure should not need to be set to get a memory leak clean build. This should allow for detection of any new leaks. Note: I used an environment variable rather than a ./configure option to control the vlen tests. This is because it is temporary (I hope) and because it is a bit tricky for shell scripts to access ./configure options. Finally, as before, this only been tested with netcdf-4 and hdf5 support.
2018-11-16 01:00:38 +08:00
{ID} { char* id = NULL; int len;
len = strlen(yytext);
len = unescape(yytext,len,ISIDENT,&id);
if(NCSTREQ(id, FILL_STRING)) {
efree(id);
return lexdebug(FILLMARKER);
}
yylval.sym = install(id);
Fix more memory leaks in netcdf-c library This is a follow up to PR https://github.com/Unidata/netcdf-c/pull/1173 Sorry that it is so big, but leak suppression can be complex. This PR fixes all remaining memory leaks -- as determined by -fsanitize=address, and with the exceptions noted below. Unfortunately. there remains a significant leak that I cannot solve. It involves vlens, and it is unclear if the leak is occurring in the netcdf-c library or the HDF5 library. I have added a check_PROGRAM to the ncdump directory to show the problem. The program is called tst_vlen_demo.c To exercise it, build the netcdf library with -fsanitize=address enabled. Then go into ncdump and do a "make clean check". This should build tst_vlen_demo without actually executing it. Then do the command "./tst_vlen_demo" to see the output of the memory checker. Note the the lost malloc is deep in the HDF5 library (in H5Tvlen.c). I am temporarily working around this error in the following way. 1. I modified several test scripts to not execute known vlen tests that fail as described above. 2. Added an environment variable called NC_VLEN_NOTEST. If set, then those specific tests are suppressed. This should mean that the --disable-utilities option to ./configure should not need to be set to get a memory leak clean build. This should allow for detection of any new leaks. Note: I used an environment variable rather than a ./configure option to control the vlen tests. This is because it is temporary (I hope) and because it is a bit tricky for shell scripts to access ./configure options. Finally, as before, this only been tested with netcdf-4 and hdf5 support.
2018-11-16 01:00:38 +08:00
efree(id);
2010-06-03 21:24:43 +08:00
return lexdebug(IDENT);
}
2015-08-16 06:26:35 +08:00
{NUMBER} {
2015-11-20 04:44:07 +08:00
/*
We need to try to see what size of integer ((u)int).
Technically, the user should specify, but...
If out of any integer range, then complain
Also, if the digits begin with 0, then assume octal.
2015-11-20 04:44:07 +08:00
*/
2015-08-16 06:26:35 +08:00
int slen = strlen(ncgtext);
2015-11-20 04:44:07 +08:00
char* stag = NULL;
int tag = NC_NAT;
2015-08-16 06:26:35 +08:00
int isneg = 0;
int c = ncgtext[0];
int fail = 0;
nc_type nct = 0;
char* pos = NULL;
2015-11-20 04:44:07 +08:00
int hasU = 0;
int radix = 10;
pos = ncgtext;
2015-08-16 06:26:35 +08:00
2015-11-20 04:44:07 +08:00
/* capture the tag string */
tag = collecttag(pos,&stag);
2015-11-20 04:44:07 +08:00
if(tag == NC_NAT) {
sprintf(errstr,"Illegal integer suffix: %s",stag);
yyerror(errstr);
goto done;
}
2015-11-20 04:44:07 +08:00
/* drop the tag from the input text */
ncgtext[slen - strlen(stag)] = '\0';
hasU = isuinttype(tag);
/* Capture the sign, if any */
isneg = (c == '-');
/* skip leading sign */
if(c == '-' || c == '+')
pos++;
c = pos[0];
if(tstoctal(c))
radix = 8;
else
radix = 10;
2015-08-16 06:26:35 +08:00
if(isneg && hasU) {
sprintf(errstr,"Unsigned integer cannot be signed: %s",ncgtext);
yyerror(errstr);
goto done;
2010-06-03 21:24:43 +08:00
}
uint64_val = parseULL(radix, pos,&fail);
2015-08-16 06:26:35 +08:00
if(fail) {
sprintf(errstr,"integer constant out of range: %s",ncgtext);
yyerror(errstr);
goto done;
2010-06-03 21:24:43 +08:00
}
2015-08-16 06:26:35 +08:00
/* Down convert to smallest possible range */
2015-11-20 04:44:07 +08:00
nct = downconvert(uint64_val,&tag,isneg,hasU);
2015-08-16 06:26:35 +08:00
switch (k_flag) {
case NC_FORMAT_64BIT_DATA:
case NC_FORMAT_NETCDF4:
return lexdebug(nct2lexeme(nct));
case NC_FORMAT_CLASSIC:
case NC_FORMAT_64BIT_OFFSET:
case NC_FORMAT_NETCDF4_CLASSIC:
if(nct > NC_INT) {
sprintf(errstr,"Illegal integer constant for classic format: %s",ncgtext);
yyerror(errstr);
goto done;
}
2010-06-03 21:24:43 +08:00
}
2015-11-20 04:44:07 +08:00
if(!tagmatch(nct,tag)) {
semwarn(lineno,"Warning: Integer out of range for tag: %s; tag treated as changed.",ncgtext);
2010-06-03 21:24:43 +08:00
}
2015-08-16 06:26:35 +08:00
return lexdebug(nct2lexeme(nct));
done: return 0;
2010-06-03 21:24:43 +08:00
}
{XUNUMBER} {
int c;
int token = 0;
int slen = strlen(yytext);
2015-11-20 04:44:07 +08:00
char* stag = NULL;
int tag = NC_NAT;
2010-06-03 21:24:43 +08:00
char* hex = yytext+2; /* point to first true hex digit */
int xlen = (slen - 3); /* true hex length */
2015-11-20 04:44:07 +08:00
2010-06-03 21:24:43 +08:00
yytext[slen-1] = '\0';
2015-11-20 04:44:07 +08:00
/* capture the tag string */
tag = collecttag(yytext,&stag);
if(tag == NC_NAT) {
sprintf(errstr,"Illegal integer suffix: %s",stag);
yyerror(errstr);
goto done;
}
2015-11-20 04:44:07 +08:00
yytext[slen - strlen(stag)] = '\0';
2010-06-03 21:24:43 +08:00
if(xlen > 16) { /* truncate hi order digits */
hex += (xlen - 16);
}
/* convert to an unsigned long long */
uint64_val = 0;
while((c=*hex++)) {
unsigned int hexdigit = (c <= '9'?(c-'0'):(c-'a')+0xa);
uint64_val = ((uint64_val << 4) | hexdigit);
}
switch (tag) {
2015-11-20 04:44:07 +08:00
case NC_USHORT:
2010-06-03 21:24:43 +08:00
uint16_val = (unsigned short)uint64_val;
token = USHORT_CONST;
break;
2015-11-20 04:44:07 +08:00
case NC_UINT:
token = UINT_CONST;
break;
case NC_UINT64:
2010-06-03 21:24:43 +08:00
token = UINT64_CONST;
break;
default: /* should never happen */
if (sscanf((char*)yytext, "%i", &uint32_val) != 1) {
sprintf(errstr,"bad unsigned int constant: %s",(char*)yytext);
yyerror(errstr);
}
token = UINT_CONST;
}
return lexdebug(token);
}
{DBLNUMBER} {
if (sscanf((char*)yytext, "%le", &double_val) != 1) {
sprintf(errstr,"bad long or double constant: %s",(char*)yytext);
yyerror(errstr);
}
return lexdebug(DOUBLE_CONST);
}
{FLTNUMBER} {
if (sscanf((char*)yytext, "%e", &float_val) != 1) {
sprintf(errstr,"bad float constant: %s",(char*)yytext);
yyerror(errstr);
}
return lexdebug(FLOAT_CONST);
}
\'[^\\]\' {
(void) sscanf((char*)&yytext[1],"%c",&byte_val);
return lexdebug(BYTE_CONST);
}
\'\\[0-7][0-7][0-7]\' {
int oct = unescapeoct(&yytext[2]);
if(oct < 0) {
2013-09-24 04:19:40 +08:00
sprintf(errstr,"bad octal character constant: %s",(char*)yytext);
yyerror(errstr);
2015-08-16 06:26:35 +08:00
}
byte_val = (unsigned int)oct;
2010-06-03 21:24:43 +08:00
return lexdebug(BYTE_CONST);
}
\'\\[xX][0-9a-fA-F][0-9a-fA-F]\' {
int hex = unescapehex(&yytext[3]);
2013-09-24 04:19:40 +08:00
if(byte_val < 0) {
sprintf(errstr,"bad hex character constant: %s",(char*)yytext);
yyerror(errstr);
2015-08-16 06:26:35 +08:00
}
byte_val = (unsigned int)hex;
2010-06-03 21:24:43 +08:00
return lexdebug(BYTE_CONST);
}
\'\\.\' {
switch ((char)yytext[2]) {
case 'a': byte_val = '\007'; break; /* not everyone under-
* stands '\a' yet */
case 'b': byte_val = '\b'; break;
case 'f': byte_val = '\f'; break;
case 'n': byte_val = '\n'; break;
case 'r': byte_val = '\r'; break;
case 't': byte_val = '\t'; break;
case 'v': byte_val = '\v'; break;
case '\\': byte_val = '\\'; break;
case '?': byte_val = '\177'; break;
case '\'': byte_val = '\''; break;
default: byte_val = (char)yytext[2];
}
return lexdebug(BYTE_CONST);
}
\n {
lineno++ ;
break;
}
"/""*" {/*initial*/
BEGIN(ST_C_COMMENT);
break;
}
<ST_C_COMMENT>([^*]|"*"[^/])* {/* continuation */
break;
}
<ST_C_COMMENT>"*/" {/* final */
BEGIN(INITIAL);
break;
}
<ST_C_COMMENT><<EOF>> {/* final, error */
fprintf(stderr,"unterminated /**/ comment");
BEGIN(INITIAL);
break;
}
. {/* Note: this next rule will not work for UTF8 characters */
return lexdebug(yytext[0]) ;
}
%%
static int
lexdebug(int token)
{
if(debug >= 2)
{
fprintf(stderr,"Token=%d |%s| line=%d\n",token,yytext,lineno);
2010-06-03 21:24:43 +08:00
}
return token;
}
int
lex_init(void)
{
lineno = 1;
lextext = bbNew();
2010-06-03 21:24:43 +08:00
if(0) unput(0); /* keep -Wall quiet */
return 0;
}
static Symbol*
makepath(char* text0)
{
/* Create a reference symbol.
Convert path to a sequence of symbols.
Use last name as symbol name (with root group reference ('/') as exception).
*/
Symbol* refsym = NULL;
2010-06-03 21:24:43 +08:00
/* walk the path converting to a sequence of symbols */
if(strcmp(text0,"/")==0) {
/* special case of root reference */
refsym = rootgroup;
2010-06-03 21:24:43 +08:00
} else {
List* prefix = listnew();
/* split the text into IDENT chunks, convert to symbols */
Symbol* container = rootgroup;
Fix more memory leaks in netcdf-c library This is a follow up to PR https://github.com/Unidata/netcdf-c/pull/1173 Sorry that it is so big, but leak suppression can be complex. This PR fixes all remaining memory leaks -- as determined by -fsanitize=address, and with the exceptions noted below. Unfortunately. there remains a significant leak that I cannot solve. It involves vlens, and it is unclear if the leak is occurring in the netcdf-c library or the HDF5 library. I have added a check_PROGRAM to the ncdump directory to show the problem. The program is called tst_vlen_demo.c To exercise it, build the netcdf library with -fsanitize=address enabled. Then go into ncdump and do a "make clean check". This should build tst_vlen_demo without actually executing it. Then do the command "./tst_vlen_demo" to see the output of the memory checker. Note the the lost malloc is deep in the HDF5 library (in H5Tvlen.c). I am temporarily working around this error in the following way. 1. I modified several test scripts to not execute known vlen tests that fail as described above. 2. Added an environment variable called NC_VLEN_NOTEST. If set, then those specific tests are suppressed. This should mean that the --disable-utilities option to ./configure should not need to be set to get a memory leak clean build. This should allow for detection of any new leaks. Note: I used an environment variable rather than a ./configure option to control the vlen tests. This is because it is temporary (I hope) and because it is a bit tricky for shell scripts to access ./configure options. Finally, as before, this only been tested with netcdf-4 and hdf5 support.
2018-11-16 01:00:38 +08:00
char *ident, *p, *match;
2017-10-31 05:52:08 +08:00
char* text = estrdup(text0);
Fix more memory leaks in netcdf-c library This is a follow up to PR https://github.com/Unidata/netcdf-c/pull/1173 Sorry that it is so big, but leak suppression can be complex. This PR fixes all remaining memory leaks -- as determined by -fsanitize=address, and with the exceptions noted below. Unfortunately. there remains a significant leak that I cannot solve. It involves vlens, and it is unclear if the leak is occurring in the netcdf-c library or the HDF5 library. I have added a check_PROGRAM to the ncdump directory to show the problem. The program is called tst_vlen_demo.c To exercise it, build the netcdf library with -fsanitize=address enabled. Then go into ncdump and do a "make clean check". This should build tst_vlen_demo without actually executing it. Then do the command "./tst_vlen_demo" to see the output of the memory checker. Note the the lost malloc is deep in the HDF5 library (in H5Tvlen.c). I am temporarily working around this error in the following way. 1. I modified several test scripts to not execute known vlen tests that fail as described above. 2. Added an environment variable called NC_VLEN_NOTEST. If set, then those specific tests are suppressed. This should mean that the --disable-utilities option to ./configure should not need to be set to get a memory leak clean build. This should allow for detection of any new leaks. Note: I used an environment variable rather than a ./configure option to control the vlen tests. This is because it is temporary (I hope) and because it is a bit tricky for shell scripts to access ./configure options. Finally, as before, this only been tested with netcdf-4 and hdf5 support.
2018-11-16 01:00:38 +08:00
int lastident;
2010-06-03 21:24:43 +08:00
ident=text+1; p=ident; /* skip leading '/' */
Fix more memory leaks in netcdf-c library This is a follow up to PR https://github.com/Unidata/netcdf-c/pull/1173 Sorry that it is so big, but leak suppression can be complex. This PR fixes all remaining memory leaks -- as determined by -fsanitize=address, and with the exceptions noted below. Unfortunately. there remains a significant leak that I cannot solve. It involves vlens, and it is unclear if the leak is occurring in the netcdf-c library or the HDF5 library. I have added a check_PROGRAM to the ncdump directory to show the problem. The program is called tst_vlen_demo.c To exercise it, build the netcdf library with -fsanitize=address enabled. Then go into ncdump and do a "make clean check". This should build tst_vlen_demo without actually executing it. Then do the command "./tst_vlen_demo" to see the output of the memory checker. Note the the lost malloc is deep in the HDF5 library (in H5Tvlen.c). I am temporarily working around this error in the following way. 1. I modified several test scripts to not execute known vlen tests that fail as described above. 2. Added an environment variable called NC_VLEN_NOTEST. If set, then those specific tests are suppressed. This should mean that the --disable-utilities option to ./configure should not need to be set to get a memory leak clean build. This should allow for detection of any new leaks. Note: I used an environment variable rather than a ./configure option to control the vlen tests. This is because it is temporary (I hope) and because it is a bit tricky for shell scripts to access ./configure options. Finally, as before, this only been tested with netcdf-4 and hdf5 support.
2018-11-16 01:00:38 +08:00
lastident = 0;
2010-06-03 21:24:43 +08:00
do {
Fix more memory leaks in netcdf-c library This is a follow up to PR https://github.com/Unidata/netcdf-c/pull/1173 Sorry that it is so big, but leak suppression can be complex. This PR fixes all remaining memory leaks -- as determined by -fsanitize=address, and with the exceptions noted below. Unfortunately. there remains a significant leak that I cannot solve. It involves vlens, and it is unclear if the leak is occurring in the netcdf-c library or the HDF5 library. I have added a check_PROGRAM to the ncdump directory to show the problem. The program is called tst_vlen_demo.c To exercise it, build the netcdf library with -fsanitize=address enabled. Then go into ncdump and do a "make clean check". This should build tst_vlen_demo without actually executing it. Then do the command "./tst_vlen_demo" to see the output of the memory checker. Note the the lost malloc is deep in the HDF5 library (in H5Tvlen.c). I am temporarily working around this error in the following way. 1. I modified several test scripts to not execute known vlen tests that fail as described above. 2. Added an environment variable called NC_VLEN_NOTEST. If set, then those specific tests are suppressed. This should mean that the --disable-utilities option to ./configure should not need to be set to get a memory leak clean build. This should allow for detection of any new leaks. Note: I used an environment variable rather than a ./configure option to control the vlen tests. This is because it is temporary (I hope) and because it is a bit tricky for shell scripts to access ./configure options. Finally, as before, this only been tested with netcdf-4 and hdf5 support.
2018-11-16 01:00:38 +08:00
match = esc_strchr(p,'/',0);
lastident = (*match == '\0');
*match='\0';
(void)unescape(p,strlen(p),ISIDENT,&ident);
refsym = lookupingroup(NC_GRP,ident,container);
if(!lastident) {
if(refsym == NULL) {
sprintf(errstr,"Undefined or forward referenced group: %s",ident);
yyerror(errstr);
refsym = rootgroup;
} else
listpush(prefix,(void*)refsym);
} else {/* lasiident is true */
refsym = install(ident); /* create as symbol */
refsym->objectclass = NC_GRP;/* tentative */
refsym->ref.is_ref = 1;
refsym->container = container;
refsym->subnodes = listnew();
2010-06-03 21:24:43 +08:00
}
Fix more memory leaks in netcdf-c library This is a follow up to PR https://github.com/Unidata/netcdf-c/pull/1173 Sorry that it is so big, but leak suppression can be complex. This PR fixes all remaining memory leaks -- as determined by -fsanitize=address, and with the exceptions noted below. Unfortunately. there remains a significant leak that I cannot solve. It involves vlens, and it is unclear if the leak is occurring in the netcdf-c library or the HDF5 library. I have added a check_PROGRAM to the ncdump directory to show the problem. The program is called tst_vlen_demo.c To exercise it, build the netcdf library with -fsanitize=address enabled. Then go into ncdump and do a "make clean check". This should build tst_vlen_demo without actually executing it. Then do the command "./tst_vlen_demo" to see the output of the memory checker. Note the the lost malloc is deep in the HDF5 library (in H5Tvlen.c). I am temporarily working around this error in the following way. 1. I modified several test scripts to not execute known vlen tests that fail as described above. 2. Added an environment variable called NC_VLEN_NOTEST. If set, then those specific tests are suppressed. This should mean that the --disable-utilities option to ./configure should not need to be set to get a memory leak clean build. This should allow for detection of any new leaks. Note: I used an environment variable rather than a ./configure option to control the vlen tests. This is because it is temporary (I hope) and because it is a bit tricky for shell scripts to access ./configure options. Finally, as before, this only been tested with netcdf-4 and hdf5 support.
2018-11-16 01:00:38 +08:00
container = refsym;
p = (lastident?match:match+1);
if(ident) efree(ident);
} while(!lastident);
refsym->prefix = prefix;
Fix more memory leaks in netcdf-c library This is a follow up to PR https://github.com/Unidata/netcdf-c/pull/1173 Sorry that it is so big, but leak suppression can be complex. This PR fixes all remaining memory leaks -- as determined by -fsanitize=address, and with the exceptions noted below. Unfortunately. there remains a significant leak that I cannot solve. It involves vlens, and it is unclear if the leak is occurring in the netcdf-c library or the HDF5 library. I have added a check_PROGRAM to the ncdump directory to show the problem. The program is called tst_vlen_demo.c To exercise it, build the netcdf library with -fsanitize=address enabled. Then go into ncdump and do a "make clean check". This should build tst_vlen_demo without actually executing it. Then do the command "./tst_vlen_demo" to see the output of the memory checker. Note the the lost malloc is deep in the HDF5 library (in H5Tvlen.c). I am temporarily working around this error in the following way. 1. I modified several test scripts to not execute known vlen tests that fail as described above. 2. Added an environment variable called NC_VLEN_NOTEST. If set, then those specific tests are suppressed. This should mean that the --disable-utilities option to ./configure should not need to be set to get a memory leak clean build. This should allow for detection of any new leaks. Note: I used an environment variable rather than a ./configure option to control the vlen tests. This is because it is temporary (I hope) and because it is a bit tricky for shell scripts to access ./configure options. Finally, as before, this only been tested with netcdf-4 and hdf5 support.
2018-11-16 01:00:38 +08:00
efree(text);
2010-06-03 21:24:43 +08:00
}
return refsym;
2010-06-03 21:24:43 +08:00
}
2015-08-16 06:26:35 +08:00
/*
2015-11-20 04:44:07 +08:00
Parse a simple string of digits into an unsigned long long
2015-08-16 06:26:35 +08:00
Return the value.
*/
static unsigned long long
parseULL(int radix, char* text, int* failp)
{
char* endptr;
2015-08-16 06:26:35 +08:00
unsigned long long uint64 = 0;
errno = 0; endptr = NULL;
2015-08-16 06:26:35 +08:00
#ifdef HAVE_STRTOULL
uint64 = strtoull(text,&endptr,radix);
2015-08-16 06:26:35 +08:00
if(errno == ERANGE) {
if(failp) *failp = ERANGE;
return 0;
}
#else /*!defined HAVE_STRTOULL*/
/* Have no useful way to detect out of range */
if(radix == 8)
sscanf((char*)text, "%llo", &uint64);
else
sscanf((char*)text, "%llu", &uint64);
#endif /*!defined HAVE_STRTOULL*/
2015-08-16 06:26:35 +08:00
return uint64;
}
/**
Given the raw bits, the sign char, the tag, and hasU
fill in the appropriate *_val field
2015-11-20 04:44:07 +08:00
and return the type.
Note that we cannot return unsigned types if running pure netcdf classic.
The rule is to pick the smallest enclosing type.
The rule used here is that the tag (the suffix, if any)
always takes precedence and the value is modified to conform
if possible, otherwise out-of-range is signalled.
For historical reasons (ncgen3), values that fit as unsigned
are acceptable for the signed tag and conversion is attempted;
e.g. 65535s; is legal and is return as a negative short.
2015-08-16 06:26:35 +08:00
*/
static nc_type
2015-11-20 04:44:07 +08:00
downconvert(unsigned long long uint64, int* tagp, int isneg, int hasU)
2015-08-16 06:26:35 +08:00
{
nc_type nct = NC_NAT;
2015-11-20 04:44:07 +08:00
int tag = *tagp;
int bit63set = (uint64 >> 63);
2015-08-16 06:26:35 +08:00
long long int64 = *((long long*)&uint64);
2015-11-20 04:44:07 +08:00
if(isneg && hasU) {
return (*tagp = NC_NAT);
}
/* To simplify the code, we look for special case of NC_UINT64
constants that will not fit into an NC_INT64 constant.
*/
if(tag == NC_UINT64 && bit63set) {
2015-08-16 06:26:35 +08:00
uint64_val = uint64;
2015-11-20 04:44:07 +08:00
return tag;
2015-08-16 06:26:35 +08:00
}
2015-11-20 04:44:07 +08:00
/* At this point we need deal only with int64 value */
/* Apply the isneg */
if(isneg)
int64 = - int64;
if(tag == NC_NOTAG) {
/* If we have no other info, then assume NC_(U)INT(64) */
if(int64 >= NC_MIN_INT && int64 <= NC_MAX_INT) {
nct = (tag = NC_INT);
int32_val = (signed int)int64;
} else if(int64 >= 0 && int64 <= NC_MAX_UINT) {
nct = (tag = NC_UINT);
uint32_val = (unsigned int)int64;
} else if(int64 < 0) {
nct = (tag = NC_INT64);
int64_val = (signed long long)int64;
} else {
nct = (tag = NC_UINT64);
uint64_val = (unsigned long long)int64;
2015-08-16 06:26:35 +08:00
}
2015-11-20 04:44:07 +08:00
goto done;
2015-08-16 06:26:35 +08:00
}
2015-11-20 04:44:07 +08:00
if(isuinttype(tag) && int64 < 0)
goto outofrange;
switch (tag) {
case NC_UBYTE:
if(int64 <= NC_MAX_UBYTE) {
2015-08-16 06:26:35 +08:00
nct = NC_UBYTE;
ubyte_val = (unsigned char)int64;
2015-11-20 04:44:07 +08:00
} else
goto outofrange;
break;
case NC_USHORT:
if(int64 <= NC_MAX_USHORT) {
2015-08-16 06:26:35 +08:00
nct = NC_USHORT;
uint16_val = (unsigned short)int64;
} else
2015-11-20 04:44:07 +08:00
goto outofrange;
break;
case NC_UINT:
if(int64 <= NC_MAX_UINT) {
nct = NC_UINT;
uint32_val = (unsigned int)int64;
} else
goto outofrange;
break;
case NC_UINT64:
if(int64 <= NC_MAX_UINT64) {
2015-08-16 06:26:35 +08:00
nct = NC_UINT64;
uint64_val = uint64;
2015-11-20 04:44:07 +08:00
} else
goto outofrange;
2015-08-16 06:26:35 +08:00
break;
2015-11-20 04:44:07 +08:00
case NC_INT64:
nct = NC_INT64;
int64_val = int64;
2015-08-16 06:26:35 +08:00
break;
2015-11-20 04:44:07 +08:00
case NC_BYTE:
nct = NC_BYTE;
byte_val = (signed char)int64;
2015-08-16 06:26:35 +08:00
break;
2015-11-20 04:44:07 +08:00
case NC_SHORT:
nct = NC_SHORT;
int16_val = (signed short)int64;
break;
case NC_INT:
nct = NC_INT;
int32_val = (signed int)int64;
break;
default:
goto outofrange;
2015-08-16 06:26:35 +08:00
}
2015-11-20 04:44:07 +08:00
done:
*tagp = tag;
2015-08-16 06:26:35 +08:00
return nct;
2015-11-20 04:44:07 +08:00
outofrange:
yyerror("Value out of range");
return NC_NAT;
2015-08-16 06:26:35 +08:00
}
static int
nct2lexeme(nc_type nct)
{
switch(nct) {
case NC_BYTE: return BYTE_CONST;
case NC_CHAR: return CHAR_CONST;
case NC_SHORT: return SHORT_CONST;
case NC_INT: return INT_CONST;
case NC_UBYTE: return UBYTE_CONST;
case NC_USHORT: return USHORT_CONST;
case NC_UINT: return UINT_CONST;
case NC_INT64: return INT64_CONST;
case NC_UINT64: return UINT64_CONST;
}
return 0;
}
2015-11-20 04:44:07 +08:00
static int
tagmatch(nc_type nct, int tag)
{
if(tag == NC_NAT || tag == NC_NOTAG)
return 1;
return nct == tag;
}
2015-11-20 04:44:07 +08:00
/* capture the tag string */
2015-08-16 06:26:35 +08:00
static int
2015-11-20 04:44:07 +08:00
collecttag(char* text, char** stagp)
2015-08-16 06:26:35 +08:00
{
2015-11-20 04:44:07 +08:00
char* stag0;
#define MAXTAGLEN 3
char stag[MAXTAGLEN+1];
int slen = strlen(text);
int staglen;
int tag = NC_NAT;
int hasU = 0;
for(stag0 = text+(slen-1);stag0 > 0;stag0--) {
if(strchr(TAGCHARS,*stag0) == NULL) {stag0++; break;}
2015-08-16 06:26:35 +08:00
}
2015-11-20 04:44:07 +08:00
if(stagp) *stagp = stag0;
staglen = strlen(stag0);
if(staglen == 0)
return NC_NOTAG;
if(staglen > MAXTAGLEN)
return tag;
strncpy(stag,stag0,sizeof(stag));
stag[MAXTAGLEN] = '\0';
if(stag[0] == 'U' || stag[0] == 'u') {
hasU = 1;
memmove(stag,stag+1,MAXTAGLEN);
2015-11-20 04:44:07 +08:00
staglen--;
} else if(stag[staglen-1] == 'U' || stag[staglen-1] == 'u') {
hasU = 1;
staglen--;
stag[staglen] = '\0';
}
if(strlen(stag) == 0 && hasU) {
tag = NC_UINT;
2015-11-20 04:44:07 +08:00
} else if(strlen(stag) == 1) {
switch (stag[0]) {
case 'B': case 'b': tag = (hasU ? NC_UBYTE : NC_BYTE); break;
case 'S': case 's': tag = (hasU ? NC_USHORT : NC_SHORT); break;
case 'L': case 'l': tag = (hasU ? NC_UINT : NC_INT); break;
default: break;
}
} else if(strcasecmp(stag,"ll") == 0) {
tag = (hasU ? NC_UINT64 : NC_INT64);
}
if(tag == NC_NAT) {
if(strlen(stag) > 0)
2015-11-20 04:44:07 +08:00
return tag;
tag = NC_NAT;
}
2015-11-20 04:44:07 +08:00
return tag;
2015-08-16 06:26:35 +08:00
}
static int
identcheck(int token)
{
switch (token) {
case UBYTE_K:
case USHORT_K:
case UINT_K:
case INT64_K:
case UINT64_K:
if(k_flag != NC_FORMAT_NETCDF4 && k_flag != NC_FORMAT_64BIT_DATA) {
yylval.sym = install(yytext);
token = IDENT;
}
break;
case STRING_K:
if(k_flag != NC_FORMAT_NETCDF4) {
yylval.sym = install(yytext);
token = IDENT;
}
break;
default:
break;
}
return token;
}