diff --git a/Dockerfile b/Dockerfile index d463363..92bd75d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,7 +16,7 @@ RUN git clone https://github.com/sunyinqi0508/AQuery2 RUN python3 -m pip install -r AQuery2/requirements.txt -ENV IS_DOCKER_IMAGE=1 CXX=clang-14 +ENV IS_DOCKER_IMAGE=1 CXX=clang++-14 CMD cd AQuery2 && python3 prompt.py diff --git a/Makefile b/Makefile index 4a16eb8..291a2a6 100644 --- a/Makefile +++ b/Makefile @@ -97,6 +97,6 @@ docker: docker build -t aquery . clean: - rm *.shm *.o dll.so server.so server.bin libaquery.a libaquery.lib -rf 2> $(NULL_DEVICE) || true + rm .cached *.shm *.o dll.so server.so server.bin libaquery.a libaquery.lib -rf 2> $(NULL_DEVICE) || true diff --git a/README.md b/README.md index 83a072b..df2e038 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,92 @@ AQuery++ Database is a cross-platform, In-Memory Column-Store Database that incorporates compiled query execution. +## Requirements +1. Recent version of Linux, Windows or MacOS, with recent C++ compiler that has C++17 (1z) support. (however c++20 is recommended if available for heterogeneous lookup on unordered containers) + - GCC: 9.0 or above (g++ 7.x, 8.x fail to handle fold-expressions due to a compiler bug) + - Clang: 5.0 or above (Recommended) + - MSVC: 2019 or later (2022 or above is recommended) + +2. Monetdb for Hybrid Engine + - On windows, the required libraries and headers are already included in the repo. + - On Linux, see [Monetdb Easy Setup](https://www.monetdb.org/easy-setup/) for instructions. + - On MacOS, Monetdb can be easily installed in homebrew `brew install monetdb`. + +3. Python 3.6 or above and install required packages in requirements.txt by `python3 -m pip install -r requirements.txt` + +## Installation +AQuery is tested on mainstream operating systems such as Windows, macOS and Linux +### Windows +There're multiple options to run AQuery on Windows. You can use the native toolchain from Microsoft Visual Studio or gcc from Cygwin/MinGW or run it under Windows Subsystem for Linux. + +- For WSL, Docker or Linux virtual machines, see Linux, Docker sections below +- For Visual Studio (Recommended): + 1. Install python3.6 or above from [official website](https://www.python.org/downloads/windows/) or Microsoft Store. + 2. Install Microsoft Visual Studio 2022 or later with **Desktop development with C++** selected. + 3. Clone AQuery repo from [Github](https://github.com/sunyinqi0508/AQuery2) + 4. Install python requirements with pip `python3 -m pip install -r requirements.txt` + 5. Change the build_driver variable in aquery_config.py to "MSBuild" + 6. The libraries and headers for Monetdb are already included in msc-plugins, however you can also choose to download them from [Monetdb Easy Setup](https://www.monetdb.org/easy-setup/) and put them in the same place. + +- For Winlibs (Recommended): + - Download latest winlibs toolchain from the [official website](https://winlibs.com/) + - Since winlibs is linked with native windows runtime libraries (UCRT or MSVCRT), it offers better interoperatibility with other libraries built with MSVC such as python and monetdb. + - Other steps can be either the same as Visual Studio or Cygwin/Mingw (below) without ABI break. + - Copy or link `mingw64/libexec/gcc///liblto-plugin.dll` to `mingw64/lib/bfd-plugins/` For Link time optimization support on gcc-ar and gcc-ranlib + +- For CygWin/MinGW: + 1. Install gcc and python3 using its **builtin package manager** instead of the one from python.org or windows store. (For Msys2, `pacman -S gcc python3`). Otherwise, ABI breakage may happen. + 2. Clone AQuery repo from Github + 3. Install python requirements + 4. The prebuilt binaries are included in ./lib directory. However, you could also rebuild them from [source](https://github.com/MonetDB/MonetDB). + +### macOS +- If you're using an arm-based mac (e.g. M1, M2 processors). Please go to the Application folder and right-click on the Terminal app, select 'Get Info' and ensure that the 'Open using Rosetta' option is unchecked. See the section below for more notes for arm-based macs. +- Install a package manager such as [homebrew](https://brew.sh) +- Install python3 and monetdb using homebrew `brew install python3 monetdb` +- Install C++ compiler come with Xcode commandline tool by `xcode-select --install` or from homebrew +- If you have multiple C++ compilers on the system. Specify C++ compiler by setting the **CXX** environment variable. e.g. `export CXX=clang` +- Install python packages from **requirements.txt** + +**for arm64 macOS users** +- In theory, AQuery++ can work on both native arm64 and x86_64 through Rosetta. But for maximum performance, running native is preferred. +- However, they can't be mixed up, i.e. make sure every component, `python` , `C++ compiler`, `monetdb` library and system commandline utilities such as `uname` should have the same architecture. +- Use the script `./arch-check.sh` to check if relevant binaries all have the same architecture. +- In the case where binaries have different architectures, install the software with desired architecture and make an alias or link to ensure the newly installed binary is referred to. +- Because I can't get access to an arm-based mac to fully test this setup, there might still be issues. Please open an issue if you encounter any problems. + +### Linux +- Install monetdb, see [Monetdb Easy Setup](https://www.monetdb.org/easy-setup/) for instructions. +- Install python3, C++ compiler and git. (For Ubuntu, run `apt update && apt install -y python3 python3-pip clang-14 libmonetdbe-dev git `) +- Install required python packages by `python3 -m pip install -r requirements.txt` +- If you have multiple C++ compilers on the system. Specify C++ compiler by setting the **CXX** environment variable. e.g. `export CXX=clang++-14` +- Note for anaconda users: the system libraries included in anaconda might differ from the ones your compiler is using. In this case, you might get errors similar to: + >ImportError: libstdc++.so.6: version `GLIBCXX_3.4.26' not found + + In this case, upgrade anaconda or your compiler or use the python from your OS or package manager instead. Or (**NOT recommended**) copy/link the library from your system (e.g. /usr/lib/x86_64-linux-gnu/libstdc++.so.6) to anaconda's library directory (e.g. ~/Anaconda3/lib/). + +### Docker: + - Alternatively, you can also use docker to run AQuery. + - Type `make docker` to build the docker image from scratch. + - For Arm-based Mac users, you would have to build and run the x86_64 docker image because MonetDB doesn't offer official binaries for arm64 Linux. +## Usage +`python3 prompt.py` will launch the interactive command prompt. The server binary will be automatically rebuilt and started. +#### Commands: +- ``: parse AQuery statement +- `f `: parse all AQuery statements in file +- `exec`: execute last parsed statement(s) with Hybrid Execution Engine. Hybrid Execution Engine decouples the query into two parts. The standard SQL (MonetDB dialect) part is executed by an Embedded version of Monetdb and everything else is executed by a post-process module which is generated by AQuery++ Compiler in C++ and then compiled and executed. +- `dbg` start debugging session +- `print`: printout parsed AQuery statements +- `save `: save current code snippet. will use random filename if not specified. +- `exit`: quit the prompt +- `r`: run the last generated code snippet +### Example: + `f moving_avg.a`
+ `xexec` + +See ./tests/ for more examples. + + ## Architecture ![Architecture](./docs/arch-hybrid.svg) @@ -40,53 +126,12 @@ AQuery++ Database is a cross-platform, In-Memory Column-Store Database that inco ## Known Issues: -- [x] User Module test - [ ] Interval based triggers -- [x] Hot reloading server binary +- [ ] Hot reloading server binary - [x] Bug fixes: type deduction misaligned in Hybrid Engine - [ ] Investigation: Using postproc only for q1 in Hybrid Engine (make is_special always on) -- [x] Limitation: putting ColRefs back to monetdb. (Comparison) - [ ] C++ Meta-Programming: Eliminate template recursions as much as possible. -- [x] Limitation: Date and Time, String operations, Funcs in groupby agg. - [ ] Functionality: Basic helper functions in aquery -- [ ] Improvement: More DDLs, e.g. drop table, update table, etc. +- [x] Improvement: More DDLs, e.g. drop table, update table, etc. - [ ] Bug: Join-Aware Column management - [ ] Bug: Order By after Group By - - -# Installation -## Requirements -1. Recent version of Linux, Windows or MacOS, with recent C++ compiler that has C++17 (1z) support. (however c++20 is recommended if available for heterogeneous lookup on unordered containers) - - GCC: 9.0 or above (g++ 7.x, 8.x fail to handle fold-expressions due to a compiler bug) - - Clang: 5.0 or above (Recommended) - - MSVC: 2017 or later (2022 or above is recommended) - -2. Monetdb for Hybrid Engine - - On windows, the required libraries and headers are already included in the repo. - - On Linux, see [Monetdb Easy Setup](https://www.monetdb.org/easy-setup/) for instructions. - - On MacOS, Monetdb can be easily installed in homebrew `brew install monetdb`. - -3. Python 3.6 or above and install required packages in requirements.txt by `python3 -m pip install -r requirements.txt` -## Usage -`python3 prompt.py` will launch the interactive command prompt. The server binary will be autometically rebuilt and started. -#### Commands: -- ``: parse AQuery statement -- `f `: parse all AQuery statements in file -- `dbg` start debugging session -- `print`: printout parsed AQuery statements - -- `xexec`: execute last parsed statement(s) with Hybrid Execution Engine. Hybrid Execution Engine decouples the query into two parts. The standard SQL (MonetDB dialect) part is executed by an Embedded version of Monetdb and everything else is executed by a post-process module which is generated by AQuery++ Compiler in C++ and then compiled and executed. -- `save `: save current code snippet. will use random filename if not specified. -- `exit`: quit the prompt -- `exec`: execute last parsed statement(s) with AQuery Execution Engine (Old). AQuery Execution Engine executes query by compiling it to C++ code and then executing it. -- `r`: run the last generated code snippet -### Example: - `f moving_avg.a`
- `xexec` - -See ./tests/ for more examples. - -## Notes for arm64 macOS users -- In theory, AQuery++ could work on both native arm64 and x86_64 through Rosetta. But for maximum performance, running native is preferred. -- However, they can't be mixed up, i.e. make sure every component, `python` binary, `C++ compiler`, `monetdb` library and system commandline utilities such as `uname` should have the same architecture. -- Because I can't get access to an arm-based mac to fully test this setup, there might still be issues. Please open an issue if you encounter any problems. \ No newline at end of file diff --git a/aquery_config.py b/aquery_config.py index 3330b6e..c2ede14 100644 --- a/aquery_config.py +++ b/aquery_config.py @@ -2,7 +2,7 @@ ## GLOBAL CONFIGURATION FLAGS -version_string = '0.4.5a' +version_string = '0.4.7a' add_path_to_ldpath = True rebuild_backend = False run_backend = True @@ -13,7 +13,7 @@ os_platform = 'unknown' build_driver = 'Makefile' def init_config(): - global __config_initialized__, os_platform, msbuildroot + global __config_initialized__, os_platform, msbuildroot, build_driver ## SETUP ENVIRONMENT VARIABLES # __config_initialized__ = False #os_platform = 'unkown' @@ -44,15 +44,23 @@ def init_config(): if os_platform == 'win': add_dll_dir(cygroot) add_dll_dir(os.path.abspath('./msc-plugin')) - import vswhere - vsloc = vswhere.find(prerelease = True, latest = True, prop = 'installationPath') - if vsloc: - msbuildroot = vsloc[0] + '/MSBuild/Current/Bin/MSBuild.exe' - else: - print('Warning: No Visual Studio installation found.') + if build_driver == 'Auto': + try: + import vswhere + vsloc = vswhere.find(prerelease = True, latest = True, prop = 'installationPath') + if vsloc: + msbuildroot = vsloc[0] + '/MSBuild/Current/Bin/MSBuild.exe' + build_driver = 'MSBuild' + else: + print('Warning: No Visual Studio installation found.') + build_driver = 'Makefile' + except ModuleNotFoundError: + build_driver = 'Makefile' # print("adding path") else: import readline + if build_driver == 'Auto': + build_driver = 'Makefile' if os_platform == 'cygwin': add_dll_dir('./lib') __config_initialized__ = True diff --git a/arch-check.sh b/arch-check.sh new file mode 100644 index 0000000..a472028 --- /dev/null +++ b/arch-check.sh @@ -0,0 +1,21 @@ +ARCH=`uname -m` +ARCH2=`arch` +echo Current architechure: $ARCH $ARCH2 +echo Current shell: $SHELL +PASSED=1 +for i in python3 c++ make ranlib libtool $SHELL +do + FILEPATH=`which $i` + FILEINFO=`file $FILEPATH` + if [[ $FILEINFO =~ $ARCH ]]; then + echo $i@$FILEPATH: passed + else + echo "\033[1;31mERROR\033[0m: Architecture of $i is not $ARCH: $FILEINFO" + PASSED=0 + fi +done + +if [[ PASSED -eq 1 ]]; then + echo "\033[1;32mBinary archtechure check passed\033[0m" +fi + diff --git a/build.py b/build.py index 3312a0b..172b30c 100644 --- a/build.py +++ b/build.py @@ -14,16 +14,17 @@ class checksums: libaquery_a : Optional[Union[bytes, bool]] = None pch_hpp_gch : Optional[Union[bytes, bool]] = None server : Optional[Union[bytes, bool]] = None - sources : Union[Dict[str, bytes], bool] = None + sources : Optional[Union[Dict[str, bytes], bool]] = None env : str = '' - def calc(self, libaquery_a = 'libaquery.a' , + def calc(self, compiler_name, libaquery_a = 'libaquery.a' , pch_hpp_gch = 'server/pch.hpp.gch', server = 'server.so' ): from platform import machine self.env = (aquery_config.os_platform + machine() + - aquery_config.build_driver + aquery_config.build_driver + + compiler_name ) for key in self.__dict__.keys(): try: @@ -69,7 +70,7 @@ class checksums: class build_manager: sourcefiles = [ - 'build.py', + 'build.py', 'Makefile', 'server/server.cpp', 'server/io.cpp', 'server/monetdb_conn.cpp', 'server/threading.cpp', 'server/winhelper.cpp' @@ -93,8 +94,8 @@ class build_manager: ret = True for c in self.build_cmd: if c: - try: - ret = subprocess.call(c, stdout = stdout, stderr = stderr) and ret + try: # only last success matters + ret = not subprocess.call(c, stdout = stdout, stderr = stderr) # and ret except (FileNotFoundError): ret = False pass @@ -104,7 +105,10 @@ class build_manager: def __init__(self, mgr : 'build_manager') -> None: super().__init__(mgr) os.environ['PCH'] = f'{mgr.PCH}' - os.environ['CXX'] = mgr.cxx if mgr.cxx else 'c++' + if 'CXX' not in os.environ: + os.environ['CXX'] = mgr.cxx if mgr.cxx else 'c++' + else: + mgr.cxx = os.environ['CXX'] def libaquery_a(self): self.build_cmd = [['rm', 'libaquery.a'],['make', 'libaquery.a']] @@ -129,6 +133,10 @@ class build_manager: class MSBuildDriver(DriverBase): platform_map = {'amd64':'x64', 'arm64':'arm64', 'x86':'win32'} opt_map = {'0':'Debug', '1':'RelWithDebugInfo', '2':'Release', '3':'Release', '4':'Release'} + def __init__(self, mgr : 'build_manager') -> None: + super().__init__(mgr) + mgr.cxx = aquery_config.msbuildroot + def get_flags(self): self.platform = self.platform_map[self.mgr.Platform] self.platform = f'/p:platform={self.platform}' @@ -142,7 +150,7 @@ class build_manager: return self.build() def pch(self): - pass + return True def server(self): loc = os.path.abspath('./msc-plugin/server.vcxproj') @@ -184,7 +192,7 @@ class build_manager: libaquery_a = 'libaquery.a' if aquery_config.os_platform == 'win': libaquery_a = 'libaquery.lib' - current.calc(libaquery_a) + current.calc(self.cxx, libaquery_a) try: with open('.cached', 'rb') as cache_sig: cached = pickle.loads(cache_sig.read()) @@ -193,22 +201,27 @@ class build_manager: self.cache_status = current != cached success = True - if force or self.cache_status.sources: - self.driver.pch() - self.driver.libaquery_a() - self.driver.server() + if (force or + self.cache_status.sources or + self.cache_status.env + ): + success &= self.driver.pch() + success &= self.driver.libaquery_a() + success &= self.driver.server() else: if self.cache_status.libaquery_a: - success = self.driver.libaquery_a() and success + success &= self.driver.libaquery_a() if self.cache_status.pch_hpp_gch: - success = self.driver.pch() and success + success &= self.driver.pch() if self.cache_status.server: - success = self.driver.server() and success + success &= self.driver.server() if success: - current.calc(libaquery_a) + current.calc(self.cxx, libaquery_a) with open('.cached', 'wb') as cache_sig: cache_sig.write(pickle.dumps(current)) else: + if aquery_config.os_platform == 'mac': + os.system('./arch-check.sh') try: os.remove('./.cached') except: diff --git a/engine/types.py b/engine/types.py index 3083795..74541c6 100644 --- a/engine/types.py +++ b/engine/types.py @@ -106,11 +106,13 @@ ULongT = Types(8, name = 'uint64', sqlname = 'UINT64', fp_type=DoubleT) UIntT = Types(7, name = 'uint32', sqlname = 'UINT32', long_type=ULongT, fp_type=FloatT) UShortT = Types(6, name = 'uint16', sqlname = 'UINT16', long_type=ULongT, fp_type=FloatT) UByteT = Types(5, name = 'uint8', sqlname = 'UINT8', long_type=ULongT, fp_type=FloatT) -StrT = Types(200, name = 'str', cname = 'const char*', sqlname='VARCHAR', ctype_name = 'types::ASTR') +StrT = Types(200, name = 'str', cname = 'const char*', sqlname='TEXT', ctype_name = 'types::ASTR') +TextT = Types(200, name = 'text', cname = 'const char*', sqlname='TEXT', ctype_name = 'types::ASTR') +VarcharT = Types(200, name = 'varchar', cname = 'const char*', sqlname='VARCHAR', ctype_name = 'types::ASTR') VoidT = Types(200, name = 'void', cname = 'void', sqlname='Null', ctype_name = 'types::None') class VectorT(Types): - def __init__(self, inner_type : Types, vector_type:str = 'ColRef'): + def __init__(self, inner_type : Types, vector_type:str = 'vector_type'): self.inner_type = inner_type self.vector_type = vector_type @@ -119,7 +121,7 @@ class VectorT(Types): return f'{self.vector_type}<{self.inner_type.name}>' @property def sqlname(self) -> str: - return 'BINARY' + return 'BIGINT' @property def cname(self) -> str: return self.name @@ -138,7 +140,10 @@ int_types : Dict[str, Types] = _ty_make_dict('t.sqlname.lower()', LongT, ByteT, uint_types : Dict[str, Types] = _ty_make_dict('t.sqlname.lower()', ULongT, UByteT, UShortT, UIntT) fp_types : Dict[str, Types] = _ty_make_dict('t.sqlname.lower()', FloatT, DoubleT) temporal_types : Dict[str, Types] = _ty_make_dict('t.sqlname.lower()', DateT, TimeT, TimeStampT) -builtin_types : Dict[str, Types] = {**_ty_make_dict('t.sqlname.lower()', AnyT, StrT), **int_types, **fp_types, **temporal_types} +builtin_types : Dict[str, Types] = { + 'string' : StrT, + **_ty_make_dict('t.sqlname.lower()', AnyT, TextT, VarcharT), + **int_types, **fp_types, **temporal_types} def get_int128_support(): for t in int_types.values(): @@ -267,7 +272,9 @@ def windowed_fn_behavor(op: OperatorBase, c_code, *x): # arithmetic opadd = OperatorBase('add', 2, auto_extension, cname = '+', sqlname = '+', call = binary_op_behavior) -opdiv = OperatorBase('div', 2, fp(auto_extension), cname = '/', sqlname = '/', call = binary_op_behavior) +# monetdb wont extend int division to fp type +# opdiv = OperatorBase('div', 2, fp(auto_extension), cname = '/', sqlname = '/', call = binary_op_behavior) +opdiv = OperatorBase('div', 2, auto_extension, cname = '/', sqlname = '/', call = binary_op_behavior) opmul = OperatorBase('mul', 2, fp(auto_extension), cname = '*', sqlname = '*', call = binary_op_behavior) opsub = OperatorBase('sub', 2, auto_extension, cname = '-', sqlname = '-', call = binary_op_behavior) opmod = OperatorBase('mod', 2, auto_extension_int, cname = '%', sqlname = '%', call = binary_op_behavior) @@ -288,7 +295,9 @@ opdistinct = OperatorBase('distinct', 1, as_is, cname = '.distinct()', sqlname = fnmax = OperatorBase('max', 1, as_is, cname = 'max', sqlname = 'MAX', call = fn_behavior) fnmin = OperatorBase('min', 1, as_is, cname = 'min', sqlname = 'MIN', call = fn_behavior) fndeltas = OperatorBase('deltas', 1, as_is, cname = 'deltas', sqlname = 'DELTAS', call = fn_behavior) +fnratios = OperatorBase('ratios', [1, 2], fp(ty_clamp(as_is, -1)), cname = 'ratios', sqlname = 'RATIOS', call = windowed_fn_behavor) fnlast = OperatorBase('last', 1, as_is, cname = 'last', sqlname = 'LAST', call = fn_behavior) +fnfirst = OperatorBase('first', 1, as_is, cname = 'frist', sqlname = 'FRIST', call = fn_behavior) #fnsum = OperatorBase('sum', 1, ext(auto_extension), cname = 'sum', sqlname = 'SUM', call = fn_behavior) #fnavg = OperatorBase('avg', 1, fp(ext(auto_extension)), cname = 'avg', sqlname = 'AVG', call = fn_behavior) fnsum = OperatorBase('sum', 1, long_return, cname = 'sum', sqlname = 'SUM', call = fn_behavior) @@ -324,7 +333,7 @@ builtin_unary_logical = _op_make_dict(opnot) builtin_unary_arith = _op_make_dict(opneg) builtin_unary_special = _op_make_dict(spnull, opdistinct) builtin_cstdlib = _op_make_dict(fnsqrt, fnlog, fnsin, fncos, fntan, fnpow) -builtin_func = _op_make_dict(fnmax, fnmin, fnsum, fnavg, fnmaxs, fnmins, fndeltas, fnlast, fnsums, fnavgs, fncnt) +builtin_func = _op_make_dict(fnmax, fnmin, fnsum, fnavg, fnmaxs, fnmins, fndeltas, fnratios, fnlast, fnfirst, fnsums, fnavgs, fncnt) user_module_func = {} builtin_operators : Dict[str, OperatorBase] = {**builtin_binary_arith, **builtin_binary_logical, **builtin_unary_arith, **builtin_unary_logical, **builtin_unary_special, **builtin_func, **builtin_cstdlib, diff --git a/engine/utils.py b/engine/utils.py index 1a8b403..065f8c8 100644 --- a/engine/utils.py +++ b/engine/utils.py @@ -137,3 +137,10 @@ def add_dll_dir(dll: str): os.environ['PATH'] = os.path.abspath(dll) + os.pathsep + os.environ['PATH'] nullstream = open(os.devnull, 'w') + + +def clamp(val, minval, maxval): + return min(max(val, minval), maxval) + +def escape_qoutes(string : str): + return re.sub(r'^\'', r'\'',re.sub(r'([^\\])\'', r'\1\'', string)) diff --git a/monetdb/msvc/monetdb_config.h b/monetdb/msvc/monetdb_config.h index d47fef2..730c019 100644 --- a/monetdb/msvc/monetdb_config.h +++ b/monetdb/msvc/monetdb_config.h @@ -435,7 +435,7 @@ gmtime_r(const time_t *__restrict__ timep, struct tm *__restrict__ result) #define HAVE_SOCKLEN_T 1 #ifndef _MSC_VER -#define SOCKET int +// #define SOCKET int #define closesocket close #endif diff --git a/prompt.py b/prompt.py index 0f59d33..3598c62 100644 --- a/prompt.py +++ b/prompt.py @@ -29,7 +29,7 @@ prompt_help = '''\ ******** AQuery Prompt Help ********* help: - print out this message + print this help message help commandline: print help message for AQuery Commandline : @@ -43,7 +43,7 @@ dbg: print: printout parsed sql statements exec: - execute last parsed statement(s) with AQuery Execution Engine + execute last parsed statement(s) with AQuery Execution Engine (disabled) xexec: execute last parsed statement(s) with Hybrid Execution Engine r: @@ -336,7 +336,7 @@ def prompt(running = lambda:True, next = lambda:input('> '), state = None): time.sleep(.00001) og_q : str = next() q = og_q.lower().strip() - if q == 'exec': # generate build and run (AQuery Engine) + if False and q == 'exec': # generate build and run (AQuery Engine) state.cfg.backend_type = Backend_Type.BACKEND_AQuery.value cxt = engine.exec(state.stmts, cxt, keep) if state.buildmgr.build_dll() == 0: @@ -352,7 +352,7 @@ def prompt(running = lambda:True, next = lambda:input('> '), state = None): else: print(prompt_help) continue - elif q.startswith('xexec'): # generate build and run (MonetDB Engine) + elif q.startswith('xexec') or q.startswith('exec'): # generate build and run (MonetDB Engine) state.cfg.backend_type = Backend_Type.BACKEND_MonetDB.value cxt = xengine.exec(state.stmts, cxt, keep) @@ -420,7 +420,7 @@ def prompt(running = lambda:True, next = lambda:input('> '), state = None): continue elif q == 'format' or q == 'fmt': subprocess.call(['clang-format', 'out.cpp']) - elif q == 'exit': + elif q == 'exit' or q == 'exit()': rm(state) exit() elif q == 'r': # build and run diff --git a/reconstruct/__init__.py b/reconstruct/__init__.py index c27a9da..fd02f61 100644 --- a/reconstruct/__init__.py +++ b/reconstruct/__init__.py @@ -18,6 +18,8 @@ def generate(ast, cxt): ast_node.types[k](None, ast, cxt) def exec(stmts, cxt = None, keep = False): + if 'stmts' not in stmts: + return cxt = initialize(cxt, keep) stmts_stmts = stmts['stmts'] if type(stmts_stmts) is list: diff --git a/reconstruct/ast.py b/reconstruct/ast.py index d6293c4..44fb969 100644 --- a/reconstruct/ast.py +++ b/reconstruct/ast.py @@ -2,6 +2,7 @@ from copy import deepcopy from dataclasses import dataclass from enum import Enum, auto from typing import Set, Tuple, Dict, Union, List, Optional + from engine.types import * from engine.utils import enlist, base62uuid, base62alp, get_legal_name from reconstruct.storage import Context, TableInfo, ColRef @@ -133,7 +134,7 @@ class projection(ast_node): sql_expr = expr(self, e, c_code=False) this_type = proj_expr.type name = proj_expr.sql - compound = True # compound column + compound = [proj_expr.is_compound > 1] # compound column proj_expr.cols_mentioned = self.datasource.rec alias = '' if 'name' in proj: # renaming column by AS keyword @@ -142,23 +143,29 @@ class projection(ast_node): if not proj_expr.is_special: if proj_expr.node == '*': name = [c.get_full_name() for c in self.datasource.rec] + this_type = [c.type for c in self.datasource.rec] + compound = [c.compound for c in self.datasource.rec] + proj_expr = [expr(self, c.name) for c in self.datasource.rec] else: y = lambda x:x count = lambda : 'count(*)' name = enlist(sql_expr.eval(False, y, count=count)) - for n in name: + this_type = enlist(this_type) + proj_expr = enlist(proj_expr) + for t, n, pexpr, cp in zip(this_type, name, proj_expr, compound): + t = VectorT(t) if cp else t offset = len(col_exprs) if n not in self.var_table: self.var_table[n] = offset - if proj_expr.is_ColExpr and type(proj_expr.raw_col) is ColRef: - for _alias in (proj_expr.raw_col.table.alias): + if pexpr.is_ColExpr and type(pexpr.raw_col) is ColRef: + for _alias in (pexpr.raw_col.table.alias): self.var_table[f'{_alias}.'+n] = offset - proj_map[i] = [this_type, offset, proj_expr] + proj_map[i] = [t, offset, pexpr] col_expr = n + ' AS ' + alias if alias else n if alias: self.var_table[alias] = offset - col_exprs.append((col_expr, proj_expr.type)) + col_exprs.append((col_expr, t)) i += 1 else: self.context.headers.add('"./server/aggregations.h"') @@ -169,7 +176,8 @@ class projection(ast_node): i += 1 name = enlist(name) disp_name = [get_legal_name(alias if alias else n) for n in name] - + this_type = enlist(this_type) + elif type(proj) is str: col = self.datasource.get_col(proj) this_type = col.type @@ -178,8 +186,8 @@ class projection(ast_node): # name = col.name self.datasource.rec = None # TODO: Type deduction in Python - for n in disp_name: - cols.append(ColRef(this_type, self.out_table, None, n, len(cols), compound=compound)) + for t, n, c in zip(this_type, disp_name, compound): + cols.append(ColRef(t, self.out_table, None, n, len(cols), compound=c)) self.out_table.add_cols(cols, new = False) @@ -213,7 +221,7 @@ class projection(ast_node): self.add(self.group_node.sql) if self.col_ext or self.group_node and self.group_node.use_sp_gb: - self.use_postproc = True + self.has_postproc = True o = self.assumptions if 'orderby' in node: @@ -223,7 +231,7 @@ class projection(ast_node): if 'outfile' in node: self.outfile = outfile(self, node['outfile'], sql = self.sql) - if not self.use_postproc: + if not self.has_postproc: self.sql += self.outfile.sql else: self.outfile = None @@ -280,10 +288,11 @@ class projection(ast_node): self.datasource.all_cols().difference(self.group_node.refs)) ) and val[2].is_compound # compound val not in key # or + # val[2].is_compound > 1 # (not self.group_node and val[2].is_compound) ): - out_typenames[key] = f'ColRef<{out_typenames[key]}>' - + out_typenames[key] = f'vector_type<{out_typenames[key]}>' + self.out_table.columns[key].compound = True outtable_col_nameslist = ', '.join([f'"{c.name}"' for c in self.out_table.columns]) self.outtable_col_names = 'names_' + base62uuid(4) self.context.emitc(f'const char* {self.outtable_col_names}[] = {{{outtable_col_nameslist}}};') @@ -523,7 +532,7 @@ class groupby_c(ast_node): materialize_builtin['_builtin_len'] = len_var if '_builtin_ret' in ex.udf_called.builtin_used: define_len_var() - gscanner.add(f'{ce[0]}.emplace_back({{{len_var}}});\n') + gscanner.add(f'{ce[0]}.emplace_back({len_var});\n') materialize_builtin['_builtin_ret'] = f'{ce[0]}.back()' gscanner.add(f'{ex.eval(c_code = True, y=get_var_names, materialize_builtin = materialize_builtin)};\n') continue @@ -608,6 +617,7 @@ class join(ast_node): self.join_conditions = [] # self.tmp_name = 'join_' + base62uuid(4) # self.datasource = TableInfo(self.tmp_name, [], self.context) + def append(self, tbls, __alias = ''): alias = lambda t : t + ' ' + __alias if len(__alias) else t if type(tbls) is join: @@ -652,8 +662,11 @@ class join(ast_node): self.have_sep = True j = join(self, node[keys[0]]) tablename = f' {keys[0]} {j}' - if len(keys) > 1 and keys[1].lower() == 'on': - tablename += f' on {expr(self, node[keys[1]])}' + if len(keys) > 1 : + if keys[1].lower() == 'on': + tablename += f' ON {expr(self, node[keys[1]])}' + elif keys[1].lower() == 'using': + tablename += f' USING {expr(self, node[keys[1]])}' self.joins.append((tablename, self.have_sep)) self.tables += j.tables self.tables_dir = {**self.tables_dir, **j.tables_dir} @@ -722,13 +735,25 @@ class join(ast_node): class filter(ast_node): name = 'where' def produce(self, node): - self.add(expr(self, node).sql) - + filter_expr = expr(self, node) + self.add(filter_expr.sql) + self.datasource.join_conditions += filter_expr.join_conditions class create_table(ast_node): name = 'create_table' first_order = name def init(self, node): + node = node[self.name] + if 'query' in node: + if 'name' not in node: + raise ValueError("Table name not specified") + projection_node = node['query'] + projection_node['into'] = node['name'] + projection(None, projection_node, self.context) + self.produce = lambda *_: None + self.spawn = lambda *_: None + self.consume = lambda *_: None + return if self.parent is None: self.context.sql_begin() self.sql = 'CREATE TABLE ' @@ -745,16 +770,45 @@ class create_table(ast_node): if self.context.use_columnstore: self.sql += ' engine=ColumnStore' +class drop(ast_node): + name = 'drop' + first_order = name + def produce(self, node): + node = node['drop'] + tbl_name = node['table'] + if tbl_name in self.context.tables_byname: + tbl_obj = self.context.tables_byname[tbl_name] + # TODO: delete in postproc engine + self.context.tables_byname.pop(tbl_name) + self.context.tables.remove(tbl_obj) + self.sql += 'TABLE IF EXISTS ' + tbl_name + return + elif 'if_exists' not in node or not node['if_exists']: + print(f'Error: table {tbl_name} not found.') + self.sql = '' + class insert(ast_node): name = 'insert' first_order = name - + def init(self, node): + values = node['query'] + complex_query_kw = ['from', 'where', 'groupby', 'having', 'orderby', 'limit'] + if any([kw in values for kw in complex_query_kw]): + values['into'] = node['insert'] + projection(None, values, self.context) + self.produce = lambda*_:None + self.spawn = lambda*_:None + self.consume = lambda*_:None + else: + super().init(node) + def produce(self, node): values = node['query']['select'] tbl = node['insert'] self.sql = f'INSERT INTO {tbl} VALUES(' # if len(values) != table.n_cols: # raise ValueError("Column Mismatch") + list_values = [] for i, s in enumerate(values): if 'value' in s: @@ -851,7 +905,7 @@ class outfile(ast_node): def init(self, _): assert(isinstance(self.parent, projection)) - if not self.parent.use_postproc: + if not self.parent.has_postproc: if self.context.dialect == 'MonetDB': self.produce = self.produce_monetdb else: diff --git a/reconstruct/expr.py b/reconstruct/expr.py index 504ab8e..0faf9a5 100644 --- a/reconstruct/expr.py +++ b/reconstruct/expr.py @@ -12,6 +12,11 @@ from engine.types import * class expr(ast_node): name='expr' + valid_joincond = { + 0 : ('and', 'eq', 'not'), + 1 : ('or', 'neq', 'not'), + 2 : ('', '', '') + } @property def udf_decltypecall(self): return self._udf_decltypecall if self._udf_decltypecall else self.sql @@ -46,6 +51,7 @@ class expr(ast_node): self.node = node self.supress_undefined = supress_undefined if(type(parent) is expr): + self.next_valid = parent.next_valid self.inside_agg = parent.inside_agg self.is_udfexpr = parent.is_udfexpr self.is_agg_func = parent.is_agg_func @@ -53,6 +59,8 @@ class expr(ast_node): self.c_code = parent.c_code self.builtin_vars = parent.builtin_vars else: + self.join_conditions = [] + self.next_valid = 0 self.is_agg_func = False self.is_udfexpr = type(parent) is udf self.root : expr = self @@ -80,7 +88,7 @@ class expr(ast_node): self.udf_map = parent.context.udf_map self.func_maps = {**builtin_func, **self.udf_map, **user_module_func} self.operators = {**builtin_operators, **self.udf_map, **user_module_func} - self.ext_aggfuncs = ['sum', 'avg', 'count', 'min', 'max', 'last'] + self.ext_aggfuncs = ['sum', 'avg', 'count', 'min', 'max', 'last', 'first'] def produce(self, node): from engine.utils import enlist @@ -92,9 +100,18 @@ class expr(ast_node): else: if len(node) > 1: print(f'Parser Error: {node} has more than 1 dict entry.') - + + is_joincond = False for key, val in node.items(): key = key.lower() + if key not in self.valid_joincond[self.next_valid]: + self.next_valid = 2 + else: + if key == self.valid_joincond[self.next_valid][2]: + self.next_valid = not self.next_valid + elif key == self.valid_joincond[self.next_valid][1]: + self.next_valid = 2 + is_joincond = True if key in self.operators: if key in builtin_func: if self.is_agg_func: @@ -114,9 +131,9 @@ class expr(ast_node): str_vals = [e.sql for e in exp_vals] type_vals = [e.type for e in exp_vals] - is_compound = any([e.is_compound for e in exp_vals]) + is_compound = max([e.is_compound for e in exp_vals]) if key in self.ext_aggfuncs: - self.is_compound = False + self.is_compound = max(0, is_compound - 1) else: self.is_compound = is_compound try: @@ -134,7 +151,7 @@ class expr(ast_node): self.sql = op(self.c_code, *str_vals) special_func = [*self.context.udf_map.keys(), *self.context.module_map.keys(), - "maxs", "mins", "avgs", "sums", "deltas", "last"] + "maxs", "mins", "avgs", "sums", "deltas", "last", "first", "ratios"] if self.context.special_gb: special_func = [*special_func, *self.ext_aggfuncs] @@ -200,6 +217,9 @@ class expr(ast_node): else: print(f'Undefined expr: {key}{val}') + if (is_joincond and len(self.children) == 2 + and all([c.is_ColExpr for c in self.children])) : + self.root.join_conditions.append((c.raw_col for c in self.children)) if type(node) is str: if self.is_udfexpr: @@ -259,6 +279,7 @@ class expr(ast_node): self.sql = table_name + self.raw_col.name self.type = self.raw_col.type self.is_compound = True + self.is_compound += self.raw_col.compound self.opname = self.raw_col else: self.sql = '\'' + node + '\'' if node != '*' else '*' @@ -341,9 +362,11 @@ class expr(ast_node): exec(f'loc["{b}"] = lambda : "{b}"') x = self.c_code if c_code is None else c_code + from engine.utils import escape_qoutes if decltypestr: - return eval('f\'' + self.udf_decltypecall + '\'') - return eval('f\'' + self.sql + '\'') + return eval('f\'' + escape_qoutes(self.udf_decltypecall) + '\'') + self.sql.replace("'", "\\'") + return eval('f\'' + escape_qoutes(self.sql) + '\'') if self.is_recursive_call_inudf or (self.need_decltypestr and self.is_udfexpr) or gettype: return call else: diff --git a/reconstruct/new_expr.py b/reconstruct/new_expr.py index d12ef56..b6b02cf 100644 --- a/reconstruct/new_expr.py +++ b/reconstruct/new_expr.py @@ -16,7 +16,7 @@ class expr_base(ast_node, metaclass = abc.ABCMeta): self.udf_map = self.context.udf_map self.func_maps = {**builtin_func, **self.udf_map, **user_module_func} self.operators = {**builtin_operators, **self.udf_map, **user_module_func} - self.narrow_funcs = ['sum', 'avg', 'count', 'min', 'max', 'last'] + self.narrow_funcs = ['sum', 'avg', 'count', 'min', 'max', 'last', 'first'] def get_variable(self): pass @@ -56,7 +56,7 @@ class expr_base(ast_node, metaclass = abc.ABCMeta): raise ValueError(f'Parse Error: more than 1 entry in {node}.') key, val = next(iter(node.items())) if key in self.operators: - self.child_exprs = [__class__(self, v) for v in val] + self.child_exprs = [self.__class__(self, v) for v in val] self.process_child_nodes() else: self.process_non_operator(key, val) diff --git a/reconstruct/storage.py b/reconstruct/storage.py index c43131c..790f073 100644 --- a/reconstruct/storage.py +++ b/reconstruct/storage.py @@ -59,7 +59,7 @@ class TableInfo: cxt.tables_byname[self.table_name] = self # construct reverse map def add_cols(self, cols, new = True): - for c in cols: + for c in enlist(cols): self.add_col(c, new) def add_col(self, c, new = True): diff --git a/sdk/aquery.h b/sdk/aquery.h index fbe6517..4c9c779 100644 --- a/sdk/aquery.h +++ b/sdk/aquery.h @@ -76,12 +76,11 @@ __AQEXPORT__(void) init_session(Context* cxt); #define __AQ_NO_SESSION__ __AQEXPORT__(void) init_session(Context*) {} -#ifdef _MSC_VER -void* _cdecl memcpy(void*, void*, size_t); +#ifdef _WIN32 +#include #else void* memcpy(void*, const void*, unsigned long long); #endif - struct ColRef_storage { void* container; unsigned int capacity, size; diff --git a/server/aggregations.h b/server/aggregations.h index 583f246..77b5cf5 100644 --- a/server/aggregations.h +++ b/server/aggregations.h @@ -107,18 +107,26 @@ decayed_t maxw(uint32_t w, const VT& arr) { } template class VT> -decayed_t> ratios(const VT& arr) { - uint32_t len = arr.size - 1; - if (!arr.size) +decayed_t> ratiow(uint32_t w, const VT& arr) { + typedef std::decay_t> FPType; + uint32_t len = arr.size; + if (arr.size <= w) len = 1; - decayed_t> ret(len); + w = w > len ? len : w; + decayed_t ret(arr.size); ret[0] = 0; - - for (uint32_t i = 1; i < arr.size; ++i) - ret[i - 1] = arr[i] / arr[i - 1]; + for (uint32_t i = 0; i < w; ++i) + ret[i] = arr[i] / (FPType)arr[0]; + for (uint32_t i = w; i < arr.size; ++i) + ret[i] = arr[i] / (FPType) arr[i - w]; return ret; } +template class VT> +decayed_t> ratios(const VT& arr) { + return ratiow(1, arr); +} + template class VT> decayed_t> sums(const VT& arr) { const uint32_t& len = arr.size; @@ -129,6 +137,7 @@ decayed_t> sums(const VT& arr) { ret[i] = ret[i - 1] + arr[i]; return ret; } + template class VT> decayed_t>> avgs(const VT& arr) { const uint32_t& len = arr.size; @@ -141,6 +150,7 @@ decayed_t>> avgs(const VT& arr) { ret[i] = (s += arr[i]) / (FPType)(i + 1); return ret; } + template class VT> decayed_t> sumw(uint32_t w, const VT& arr) { const uint32_t& len = arr.size; @@ -154,6 +164,7 @@ decayed_t> sumw(uint32_t w, const VT& arr) { ret[i] = ret[i - 1] + arr[i] - arr[i - w]; return ret; } + template class VT> decayed_t>> avgw(uint32_t w, const VT& arr) { typedef types::GetFPType> FPType; @@ -209,6 +220,7 @@ template constexpr inline T maxw(uint32_t, const T& v) { return v; } template constexpr inline T minw(uint32_t, const T& v) { return v; } template constexpr inline T avgw(uint32_t, const T& v) { return v; } template constexpr inline T sumw(uint32_t, const T& v) { return v; } +template constexpr inline T ratiow(uint32_t, const T& v) { return 1; } template constexpr inline T maxs(const T& v) { return v; } template constexpr inline T mins(const T& v) { return v; } template constexpr inline T avgs(const T& v) { return v; } diff --git a/server/hasher.h b/server/hasher.h index 526c168..70a97e8 100644 --- a/server/hasher.h +++ b/server/hasher.h @@ -2,6 +2,7 @@ #include #include +#include #include "types.h" // only works for 64 bit systems constexpr size_t _FNV_offset_basis = 14695981039346656037ULL; @@ -21,7 +22,31 @@ inline size_t append_bytes(const astring_view& view) noexcept { return append_bytes(view.str); } - +#ifdef __SIZEOF_INT128__ +union int128_struct +{ + struct { + uint64_t low, high; + }__struct; + __int128_t value = 0; + __uint128_t uvalue; + constexpr int128_struct() : value(0) {} + constexpr int128_struct(const __int128_t &value) noexcept : value(value) {} + constexpr int128_struct(const __uint128_t &value) noexcept : uvalue(value) {} + operator __int128_t () const { + return value; + } + operator __uint128_t () const { + return uvalue; + } + operator __int128_t& () { + return value; + } + operator __uint128_t& () { + return uvalue; + } +}; +#endif template struct hasher { template typename std::enable_if< i == sizeof...(Types), @@ -32,8 +57,15 @@ struct hasher { template typename std::enable_if < i < sizeof ...(Types), size_t>::type hashi(const std::tuple& record) const { using current_type = typename std::decay>::type>::type; - - return std::hash()(std::get(record)) ^ hashi(record); +#ifdef __SIZEOF_INT128__ + using _current_type = typename std::conditional_t< + std::is_same_v || + std::is_same_v, + int128_struct, current_type>; +#else + #define _current_type current_type +#endif + return std::hash<_current_type>()(std::get(record)) ^ hashi(record); } size_t operator()(const std::tuple& record) const { return hashi(record); @@ -75,7 +107,15 @@ namespace std{ std::hash()(_Keyval.time); } }; +#ifdef __SIZEOF_INT128__ + template<> + struct hash{ + size_t operator() (const int128_struct& _Keyval) const noexcept { + return std::hash()(_Keyval.__struct.low) ^ std::hash()(_Keyval.__struct.high); + } + }; +#endif template struct hash> : public hasher{ }; diff --git a/server/io.cpp b/server/io.cpp index 107829b..a47b3b3 100644 --- a/server/io.cpp +++ b/server/io.cpp @@ -265,16 +265,16 @@ string base62uuid(int l) { } -template -inline void vector_type<_Ty>::out(uint32_t n, const char* sep) const -{ - n = n > size ? size : n; - std::cout << '('; - { - uint32_t i = 0; - for (; i < n - 1; ++i) - std::cout << this->operator[](i) << sep; - std::cout << this->operator[](i); - } - std::cout << ')'; -} +// template +// inline void vector_type<_Ty>::out(uint32_t n, const char* sep) const +// { +// n = n > size ? size : n; +// std::cout << '('; +// { +// uint32_t i = 0; +// for (; i < n - 1; ++i) +// std::cout << this->operator[](i) << sep; +// std::cout << this->operator[](i); +// } +// std::cout << ')'; +// } diff --git a/server/server.cpp b/server/server.cpp index b10ca80..80691ff 100644 --- a/server/server.cpp +++ b/server/server.cpp @@ -176,10 +176,10 @@ int dll_main(int argc, char** argv, Context* cxt){ //getlasterror if (!user_module_handle) -#ifndef _MSC_VER +#ifndef _WIN32 puts(dlerror()); #else - printf("Fatal Error: Module %s failed to load with error code %d.\n", mname, GetLastError()); + printf("Fatal Error: Module %s failed to load with error code %d.\n", mname, dlerror()); #endif user_module_map[mname] = user_module_handle; initialize_module(mname, user_module_handle, cxt); diff --git a/server/table.h b/server/table.h index 76f5fb0..58691df 100644 --- a/server/table.h +++ b/server/table.h @@ -129,8 +129,8 @@ public: } // defined in table_ext_monetdb.hpp - void* monetdb_get_col(); - + void* monetdb_get_col(void** gc_vecs, uint32_t& cnt); + }; template<> class ColRef : public ColRef {}; @@ -391,11 +391,11 @@ struct TableInfo { constexpr auto num_date = count_type((tuple_type*)(0)); constexpr auto num_time = count_type((tuple_type*)(0)); constexpr auto num_timestamp = count_type((tuple_type*)(0)); - char cbuf[num_hge * 41 - + num_time * types::time_t::string_length() - + num_date * types::date_t::string_length() - + num_timestamp * types::timestamp_t::string_length() - + 1 + char cbuf[ num_hge * 41 + + num_time * types::time_t::string_length() + + num_date * types::date_t::string_length() + + num_timestamp * types::timestamp_t::string_length() + + 1 // padding for msvc not allowing empty arrays ]; setgbuf(cbuf); if (view) @@ -625,106 +625,164 @@ inline void TableInfo::print(const char* __restrict sep, const char* _ std::cout << end; } } -template class VT, template class VT2> -decayed_t::type> operator -(const VT& lhs, const VT2& rhs) { - auto ret = decayed_t::type>(lhs.size); +template class VT, + class TRet> +using test_vt_support = typename std::enable_if_t, ColRef> || + std::is_same_v, ColView> || + std::is_same_v, vector_type>, TRet>; + +template class VT> +using get_autoext_type = test_vt_support::type>>; + +template class VT> +using get_long_type = test_vt_support::type>>>; + +template class VT> +using get_fp_type = test_vt_support::type>>>; + +template class VT, template class VT2, + class TRet> +using test_vt_support2 = typename std::enable_if_t<(std::is_same_v, ColRef> || + std::is_same_v, ColView> || + std::is_same_v, vector_type>) && + (std::is_same_v, ColRef> || + std::is_same_v, ColView> || + std::is_same_v, vector_type>), TRet >; +template class VT, template class VT2> +using get_autoext_type2 = test_vt_support2::type>>; + +template class VT, template class VT2> +using get_long_type2 = test_vt_support2::type>>>; + +template class VT, template class VT2> +using get_fp_type2 = test_vt_support2::type>>>; + +template class VT, template class VT2> +get_autoext_type2 +operator -(const VT& lhs, const VT2& rhs) { + auto ret = get_autoext_type2(lhs.size); for (uint32_t i = 0; i < lhs.size; ++i) ret[i] = lhs[i] - rhs[i]; return ret; } -template class VT> -decayed_t::type> operator -(const VT& lhs, const T2& rhs) { - auto ret = decayed_t::type>(lhs.size); +template class VT> +get_autoext_type +operator -(const VT& lhs, const T2& rhs) { + auto ret = get_autoext_type(lhs.size); for (uint32_t i = 0; i < lhs.size; ++i) ret[i] = lhs[i] - rhs; return ret; } -template class VT> -decayed_t::type> operator -(const T2& lhs, const VT& rhs) { - auto ret = decayed_t::type>(rhs.size); +template class VT> +get_autoext_type +operator -(const T2& lhs, const VT& rhs) { + auto ret = get_autoext_type(rhs.size); for (uint32_t i = 0; i < rhs.size; ++i) ret[i] = lhs - rhs[i]; return ret; } -template class VT, template class VT2> -decayed_t::type> operator +(const VT& lhs, const VT2& rhs) { - auto ret = decayed_t::type>(lhs.size); +template class VT, template class VT2> +get_autoext_type2 +operator +(const VT& lhs, const VT2& rhs) { + auto ret = get_autoext_type2(lhs.size); for (uint32_t i = 0; i < lhs.size; ++i) ret[i] = lhs[i] + rhs[i]; return ret; } -template class VT> -decayed_t::type> operator +(const VT& lhs, const T2& rhs) { - auto ret = decayed_t::type>(lhs.size); +template class VT> +get_autoext_type +operator +(const VT& lhs, const T2& rhs) { + auto ret = get_autoext_type(lhs.size); for (uint32_t i = 0; i < lhs.size; ++i) ret[i] = lhs[i] + rhs; return ret; } -template class VT> -decayed_t::type> operator +(const T2& lhs, const VT& rhs) { - auto ret = decayed_t::type>(rhs.size); +template class VT> +get_autoext_type +operator +(const T2& lhs, const VT& rhs) { + auto ret = get_autoext_type (rhs.size); for (uint32_t i = 0; i < rhs.size; ++i) ret[i] = lhs + rhs[i]; return ret; } -template class VT, template class VT2> -decayed_t::type> operator *(const VT& lhs, const VT2& rhs) { - auto ret = decayed_t::type>(lhs.size); +template class VT, template class VT2> +get_long_type2 +operator *(const VT& lhs, const VT2& rhs) { + auto ret = get_long_type2(lhs.size); for (uint32_t i = 0; i < lhs.size; ++i) ret[i] = lhs[i] * rhs[i]; return ret; } -template class VT> -decayed_t::type> operator *(const VT& lhs, const T2& rhs) { - auto ret = decayed_t::type>(lhs.size); +template class VT> +get_long_type +operator *(const VT& lhs, const T2& rhs) { + auto ret = get_long_type(lhs.size); for (uint32_t i = 0; i < lhs.size; ++i) ret[i] = lhs[i] * rhs; return ret; } -template class VT> -decayed_t::type> operator *(const T2& lhs, const VT& rhs) { - auto ret = decayed_t::type>(rhs.size); +template class VT> +get_long_type +operator *(const T2& lhs, const VT& rhs) { + auto ret = get_long_type(rhs.size); for (uint32_t i = 0; i < rhs.size; ++i) ret[i] = lhs * rhs[i]; return ret; } -template class VT, template class VT2> -decayed_t::type>> operator /(const VT& lhs, const VT2& rhs) { - auto ret = decayed_t::type>>(lhs.size); +template class VT, template class VT2> +get_fp_type2 +operator /(const VT& lhs, const VT2& rhs) { + auto ret = get_fp_type2(lhs.size); for (uint32_t i = 0; i < lhs.size; ++i) ret[i] = lhs[i] / rhs[i]; return ret; } -template class VT> -decayed_t::type>> operator /(const VT& lhs, const T2& rhs) { - auto ret = decayed_t::type>>(lhs.size); +template class VT> +get_fp_type +operator /(const VT& lhs, const T2& rhs) { + auto ret = get_fp_type(lhs.size); for (uint32_t i = 0; i < lhs.size; ++i) ret[i] = lhs[i] / rhs; return ret; } -template class VT> -decayed_t::type>> operator /(const T2& lhs, const VT& rhs) { - auto ret = decayed_t::type>>(rhs.size); +template class VT> +get_fp_type +operator /(const T2& lhs, const VT& rhs) { + auto ret = get_fp_type(rhs.size); for (uint32_t i = 0; i < rhs.size; ++i) ret[i] = lhs / rhs[i]; return ret; } -template class VT, template class VT2> +template class VT, template class VT2> VT operator >(const VT& lhs, const VT2& rhs) { auto ret = VT(lhs.size); for (uint32_t i = 0; i < lhs.size; ++i) ret[i] = lhs[i] > rhs[i]; return ret; } -template class VT> +template class VT> VT operator >(const VT& lhs, const T2& rhs) { auto ret = VT(lhs.size); for (uint32_t i = 0; i < lhs.size; ++i) ret[i] = lhs[i] > rhs; return ret; } -template class VT> +template class VT> VT operator >(const T2& lhs, const VT& rhs) { auto ret = VT(rhs.size); for (uint32_t i = 0; i < rhs.size; ++i) diff --git a/server/table_ext_monetdb.hpp b/server/table_ext_monetdb.hpp index e74cfef..c128559 100644 --- a/server/table_ext_monetdb.hpp +++ b/server/table_ext_monetdb.hpp @@ -22,7 +22,12 @@ inline constexpr monetdbe_types AQType_2_monetdbe[] = { #else monetdbe_int64_t, #endif - monetdbe_int16_t, monetdbe_int8_t, monetdbe_bool, monetdbe_int64_t, + monetdbe_int16_t, monetdbe_int8_t, monetdbe_bool, +#ifdef HAVE_HGE + monetdbe_int128_t, +#else + monetdbe_int64_t, +#endif monetdbe_timestamp, monetdbe_int64_t, monetdbe_int64_t }; @@ -35,19 +40,22 @@ void TableInfo::monetdb_append_table(void* srv, const char* alt_name) { monetdbe_column** monetdbe_cols = new monetdbe_column * [sizeof...(Ts)]; uint32_t i = 0; + constexpr auto n_vecs = count_vector_type((tuple_type*)(0)); + void* gc_vecs[1 + n_vecs]; puts("getcols..."); - const auto get_col = [&monetdbe_cols, &i, *this](auto v) { + uint32_t cnt = 0; + const auto get_col = [&monetdbe_cols, &i, *this, &gc_vecs, &cnt](auto v) { printf("%d %d\n", i, (ColRef*)v - colrefs); - monetdbe_cols[i++] = (monetdbe_column*)v->monetdb_get_col(); + monetdbe_cols[i++] = (monetdbe_column*)v->monetdb_get_col(gc_vecs, cnt); }; (get_col((ColRef*)(colrefs + i)), ...); puts("getcols done"); for(int i = 0; i < sizeof...(Ts); ++i) { - printf("no:%d name: %s count:%d data: %p \n", - i, monetdbe_cols[i]->name, monetdbe_cols[i]->count, monetdbe_cols[i]->data); + printf("no:%d name: %s count:%d data: %p type:%d \n", + i, monetdbe_cols[i]->name, monetdbe_cols[i]->count, monetdbe_cols[i]->data, monetdbe_cols[i]->type); } - std::string create_table_str = "CREATE TABLE "; + std::string create_table_str = "CREATE TABLE IF NOT EXISTS "; create_table_str += alt_name; create_table_str += " ("; i = 0; @@ -70,12 +78,14 @@ void TableInfo::monetdb_append_table(void* srv, const char* alt_name) { return; } } + // for(uint32_t i = 0; i < n_vecs; ++i) + // free(gc_vecs[i]); puts("Error! Empty table."); } template -void* ColRef::monetdb_get_col() { +void* ColRef::monetdb_get_col(void** gc_vecs, uint32_t& cnt) { auto aq_type = AQType_2_monetdbe[types::Types::getType()]; monetdbe_column* col = (monetdbe_column*)malloc(sizeof(monetdbe_column)); @@ -83,7 +93,13 @@ void* ColRef::monetdb_get_col() { col->count = this->size; col->data = this->container; col->name = const_cast(this->name); - + // auto arr = (types::timestamp_t*) malloc (sizeof(types::timestamp_t)* this->size); + // if constexpr (is_vector_type){ + // for(uint32_t i = 0; i < this->size; ++i){ + // memcpy(arr + i, this->container + i, sizeof(types::timestamp_t)); + // } + // gc_vecs[cnt++] = arr; + // } return col; } diff --git a/server/types.h b/server/types.h index 8aa13ca..f1b041d 100644 --- a/server/types.h +++ b/server/types.h @@ -3,6 +3,7 @@ #include #include #include +using std::size_t; #if defined(__SIZEOF_INT128__) and not defined(_WIN32) #define __AQ__HAS__INT128__ @@ -29,9 +30,9 @@ namespace types { static constexpr const char* printf_str[] = { "%d", "%f", "%s", "%lf", "%Lf", "%ld", "%d", "%hi", "%s", "%s", "%c", "%u", "%lu", "%s", "%hu", "%hhu", "%s", "%s", "Vector<%s>", "%s", "NULL", "ERROR" }; static constexpr const char* SQL_Type[] = { "INT", "REAL", "TEXT", "DOUBLE", "DOUBLE", "BIGINT", "HUGEINT", "SMALLINT", "DATE", "TIME", "TINYINT", - "INT", "BIGINT", "HUGEINT", "SMALLINT", "TINYINT", "BOOL", "BLOB", "TIMESTAMP", "NULL", "ERROR" }; - - + "INT", "BIGINT", "HUGEINT", "SMALLINT", "TINYINT", "BOOL", "HUGEINT", "TIMESTAMP", "NULL", "ERROR"}; + + // TODO: deal with data/time <=> str/uint conversion struct date_t { unsigned char day = 0; @@ -168,23 +169,22 @@ namespace types { template using GetLongType = typename GetLongTypeImpl::type>::type; - template struct GetLongerTypeImpl { using type = Cond( __U(T), Cond(__Eq(char), unsigned short, - Cond(__Eq(short), unsigned int, - Cond(__Eq(int), unsigned long long, + Cond(__Eq(short), unsigned int, + Cond(__Eq(int), unsigned long long, ULL_Type - ))), + ))), - Cond(Fp(T), double, + Cond(Fp(T), double, - Cond(__Eq(char), short, - Cond(__Eq(short), int, - Cond(__Eq(int), long, - LL_Type + Cond(__Eq(char), short, + Cond(__Eq(short), int, + Cond(__Eq(int), long, + LL_Type )))) ); @@ -194,22 +194,22 @@ namespace types { } -struct astring_view { +union astring_view { const unsigned char* str = 0; - -#if defined(__clang__) || !defined(__GNUC__) + const signed char* sstr; + const char* rstr; + size_t ptr; + + + constexpr + astring_view(const char* str) noexcept : + rstr(str) {} constexpr -#endif - astring_view(const char* str) noexcept : - str((const unsigned char*)(str)) {} -#if defined(__clang__) || !defined(__GNUC__) - constexpr -#endif - astring_view(const signed char* str) noexcept : - str((const unsigned char*)(str)) {} + astring_view(const signed char* str) noexcept : + sstr(str) {} constexpr - astring_view(const unsigned char* str) noexcept : + astring_view(const unsigned char* str) noexcept : str(str) {} constexpr astring_view() noexcept = default; @@ -224,17 +224,28 @@ struct astring_view { } return !(*this_str || *other_str); } - bool operator >(const astring_view& r) const { - + bool operator >(const astring_view&r) const{ + auto this_str = str; + auto other_str = r.str; + bool ret = true; + while (*this_str && *other_str) { + if (*this_str <= *other_str) + ret = false; + this_str++; + other_str++; + } + + return (*this_str && !*other_str) || + (ret && !*this_str && *other_str); } operator const char* () const { - return reinterpret_cast(str); + return rstr; } operator const unsigned char* () const { - return reinterpret_cast(str); + return str; } operator const signed char* () const { - return reinterpret_cast(str); + return sstr; } }; @@ -373,4 +384,10 @@ constexpr size_t count_type(std::tuple* ts) { size_t t[] = { sum_type() ... }; return sum_type(t, sizeof...(Types)); } +template +constexpr size_t count_vector_type(std::tuple* ts) { + size_t t[] = {is_vector_type ...}; + return sum_type(t, sizeof...(Types)); +} + #endif // !_TYPES_H diff --git a/server/vector_type.hpp b/server/vector_type.hpp index 720db75..a73570d 100644 --- a/server/vector_type.hpp +++ b/server/vector_type.hpp @@ -12,11 +12,16 @@ #include #include #include +#include #include "hasher.h" #include "types.h" #pragma pack(push, 1) template +class slim_vector { + +}; +template class vector_type { public: typedef vector_type<_Ty> Decayed_t; @@ -249,7 +254,25 @@ public: } size = this->size + dist; } - void out(uint32_t n = 4, const char* sep = " ") const; + inline void out(uint32_t n = 4, const char* sep = " ") const + { + const char* more = ""; + if (n < this->size) + more = " ... "; + else + n = this->size; + + std::cout << '('; + if (n > 0) + { + uint32_t i = 0; + for (; i < n - 1; ++i) + std::cout << this->operator[](i) << sep; + std::cout << this->operator[](i); + } + std::cout<< more; + std::cout << ')'; + } vector_type<_Ty> subvec_memcpy(uint32_t start, uint32_t end) const { vector_type<_Ty> subvec(end - start); memcpy(subvec.container, container + start, sizeof(_Ty) * (end - start)); diff --git a/server/winhelper.cpp b/server/winhelper.cpp index 08fd1a2..ed418af 100644 --- a/server/winhelper.cpp +++ b/server/winhelper.cpp @@ -20,6 +20,10 @@ int dlclose(void* handle) return FreeLibrary(static_cast(handle)); } +int dlerror() { + return GetLastError(); +} + SharedMemory::SharedMemory(const char* fname) { this->hFileMap = CreateFileMappingA(INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE, 0, 2, fname); diff --git a/server/winhelper.h b/server/winhelper.h index 5993943..df9231e 100644 --- a/server/winhelper.h +++ b/server/winhelper.h @@ -5,6 +5,8 @@ static constexpr int RTLD_LAZY = 1; void* dlopen(const char*, int); void* dlsym(void*, const char*); int dlclose(void*); +int dlerror(); + struct SharedMemory { void* hFileMap; diff --git a/test.aquery b/test.aquery index f9cd33c..7756ff4 100644 Binary files a/test.aquery and b/test.aquery differ diff --git a/tests/best_profit.a b/tests/best_profit.a index f6f3bf5..4d242ec 100644 --- a/tests/best_profit.a +++ b/tests/best_profit.a @@ -43,5 +43,4 @@ SELECT ID, avgs(10, ClosePrice) FROM td NATURAL JOIN HistoricQuotes ASSUMING ASC TradeDate -GROUP BY ID -ORDER BY ID \ No newline at end of file +GROUP BY ID \ No newline at end of file diff --git a/tests/network.a b/tests/network.a index 922fec4..169a8b6 100644 --- a/tests/network.a +++ b/tests/network.a @@ -10,4 +10,3 @@ FROM network ASSUMING ASC src, ASC dst, ASC _time GROUP BY src, dst, sums (deltas(_time) > 120) -