diff --git a/.gitignore b/.gitignore index 2a72be1..0fccad3 100644 --- a/.gitignore +++ b/.gitignore @@ -1,70 +1,72 @@ -test.lib -test.exp -*.pdb -libaquery.a -libaquery.lib -server.lib -server.exp -*.idb -aq -aq.exe -.cached -*.json -!sample_ast.json -*.o -*.pch -*.gch -a.out.* -*.log -*.pyc -*.tab -out -.idea -.svn -*.iml -/mo_sql_parsing.egg-info -/build -/dist -/mo-sql-parsing -vendor/ -._* -.DS_Store -.eggs -.vscode -out.k -k -*.so -*.pdf -**/*.cmake -**/Debug -**/Release -test*.c* -*.csv -!test.csv -!test2.csv -!moving_avg.csv -!nyctx100.csv -*.out -*.asm -!mmw.so -*.k -!header.k -!join.k -**/.vs -**/x64 -*.user -*.filters -*.tmp -*.bin -*.shm -server/packages/** -*.ipynb -*.cmake -*.stackdump -saves -*.exe -out*.cpp -udf*.hpp -*.ipynb - - +test.lib +test.exp +*.pdb +dll.lib +dll.exp +libaquery.a +libaquery.lib +server.lib +server.exp +*.idb +aq +aq.exe +.cached +*.json +!sample_ast.json +*.o +*.pch +*.gch +a.out.* +*.log +*.pyc +*.tab +out +.idea +.svn +*.iml +/mo_sql_parsing.egg-info +/build +/dist +/mo-sql-parsing +vendor/ +._* +.DS_Store +.eggs +.vscode +out.k +k +*.so +*.pdf +**/*.cmake +**/Debug +**/Release +test*.c* +*.csv +!test.csv +!test2.csv +!moving_avg.csv +!nyctx100.csv +*.out +*.asm +!mmw.so +*.k +!header.k +!join.k +**/.vs +**/x64 +*.user +*.filters +*.tmp +*.bin +*.shm +server/packages/** +*.ipynb +*.cmake +*.stackdump +saves +*.exe +out*.cpp +udf*.hpp +*.ipynb + + diff --git a/LICENSE b/LICENSE index df5936e..e87a115 100644 --- a/LICENSE +++ b/LICENSE @@ -1,363 +1,363 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/Makefile b/Makefile index 646b466..056d051 100644 --- a/Makefile +++ b/Makefile @@ -3,11 +3,11 @@ MonetDB_LIB = MonetDB_INC = Threading = CXXFLAGS = --std=c++1z -OPTFLAGS = -g3 #-O3 -fno-semantic-interposition +OPTFLAGS = -O3 -fno-semantic-interposition LINKFLAGS = -flto SHAREDFLAGS = -shared FPIC = -fPIC -COMPILER = $(shell $(CXX) --version | grep -q 'clang' && echo "clang"|| echo "gcc") +COMPILER = $(shell $(CXX) --version | grep -q clang && echo clang|| echo gcc) LIBTOOL = USELIB_FLAG = -Wl,--whole-archive,libaquery.a -Wl,-no-whole-archive LIBAQ_SRC = server/server.cpp server/monetdb_conn.cpp server/io.cpp @@ -18,12 +18,16 @@ else PCHFLAGS = endif + ifeq ($(OS),Windows_NT) NULL_DEVICE = NUL OS_SUPPORT += server/winhelper.cpp MonetDB_LIB += msc-plugin/monetdbe.dll MonetDB_INC += -Imonetdb/msvc LIBTOOL = gcc-ar rcs + ifeq ($(COMPILER), clang ) + FPIC = + endif else UNAME_S = $(shell uname -s) UNAME_M = $(shell uname -m) @@ -34,7 +38,7 @@ else USELIB_FLAG = -Wl,-force_load MonetDB_LIB += -L$(shell brew --prefix monetdb)/lib MonetDB_INC += -I$(shell brew --prefix monetdb)/include/monetdb - ifeq ($(COMPILER), clang) + ifeq ($(COMPILER),clang ) LIBTOOL = libtool -static -o endif ifneq ($(UNAME_M),arm64) @@ -61,7 +65,9 @@ info: $(info "test") $(info $(LIBTOOL)) $(info $(MonetDB_INC)) + $(info $(COMPILER)) $(info $(CXX)) + $(info $(FPIC)) pch: $(CXX) -x c++-header server/pch.hpp $(FPIC) $(MonetDB_INC) $(OPTFLAGS) $(CXXFLAGS) $(Threading) libaquery.a: @@ -88,6 +94,6 @@ docker: docker build -t aquery . clean: - rm *.shm *.o dll.so server.so server.bin libaquery.a .cached -rf 2> $(NULL_DEVICE) || true + rm *.shm *.o dll.so server.so server.bin -rf 2> $(NULL_DEVICE) || true diff --git a/README.md b/README.md index f8f0967..0941c14 100644 --- a/README.md +++ b/README.md @@ -1,83 +1,83 @@ - -# AQuery++ Database -## Introduction - -AQuery++ Database is a cross-platform, In-Memory Column-Store Database that incorporates compiled query execution. - -## Architecture -![Architecture](./docs/arch-hybrid.svg) - -### AQuery Compiler -- The query is first processed by the AQuery Compiler which is composed of a frontend that parses the query into AST and a backend that generates target code that delivers the query. -- Front end of AQuery++ Compiler is built on top of [mo-sql-parsing](https://github.com/klahnakoski/mo-sql-parsing) with modifications to handle AQuery dialect and extension. -- Backend of AQuery++ Compiler generates target code dependent on the Execution Engine. It can either be the C++ code for AQuery Execution Engine or sql and C++ post-processor for Hybrid Engine or k9 for the k9 Engine. -### Execution Engines -- AQuery++ supports different execution engines thanks to the decoupled compiler structure. -- AQuery Execution Engine: executes query by compiling the query plan to C++ code. Doesn't support joins and udf functions. -- Hybrid Execution Engine: decouples the query into two parts. The sql-compliant part is executed by an Embedded version of Monetdb and everything else is executed by a post-process module which is generated by AQuery++ Compiler in C++ and then compiled and executed. -- K9 Execution Engine: (discontinued). - -## Roadmap -- [x] SQL Parser -> AQuery Parser (Front End) -- [x] AQuery-C++ Compiler (Back End) - - [x] Schema and Data Model - - [x] Data acquisition/output from/to csv file -- [ ] Execution Engine - - [x] Projections and single-group Aggregations - - [x] Group by Aggregations - - [x] Filters - - [x] Order by - - [x] Assumption - - [x] Flatten - - [x] UDFs (Hybrid Engine only) - - [x] User Module - - [ ] Triggers - - [x] Join (Hybrid Engine only) - - [ ] Subqueries -- [x] Query Optimization - - [x] Selection/Order by push-down - - [x] Join Optimization (Only in Hybrid Engine) - -## TODO: - -- [x] User Module load syntax parsing (fn definition/registration) -- [x] User Module initialize location - -> User Module test - -> Interval based triggers - -> Optimize Compilation Process, using static libraries, hot reloading server binary -- [x] Bug fixes: type deduction misaligned in Hybrid Engine - -> Investigation: Using postproc only for q1 in Hybrid Engine (make is_special always on) -- [x] Limitation: putting ColRefs back to monetdb. -- [ ] Limitation: String operations and Date/Time data type. -- [ ] C++ Meta-Programming: Eliminate template recursions as much as possible. -- [ ] Limitation: Date and Time, String operations, Funcs in groupby agg. - -# Installation -## Requirements -1. Recent version of Linux, Windows or MacOS, with recent C++ compiler that has C++17 (1z) support. (however c++20 is recommended if available for heterogeneous lookup on unordered containers) - - GCC: 9.0 or above (g++ 7.x, 8.x fail to handle fold-expressions due to a compiler bug) - - Clang: 5.0 or above (Recommended) - - MSVC: 2017 or later (2022 or above is recommended) - -2. Monetdb for Hybrid Engine - - On windows, the required libraries and headers are already included in the repo. - - On Linux, see [Monetdb Easy Setup](https://www.monetdb.org/easy-setup/) for instructions. - - On MacOS, Monetdb can be easily installed in homebrew `brew install monetdb`. - -3. Python 3.6 or above and install required packages in requirements.txt by `python3 -m pip install -r requirements.txt` -## Usage -`python3 prompt.py` will launch the interactive command prompt. The server binary will be autometically rebuilt and started. -#### Commands: -- ``: parse sql statement -- `f `: parse all sql statements in file -- `dbg` start debugging session -- `print`: printout parsed sql statements -- `exec`: execute last parsed statement(s) with AQuery Execution Engine. AQuery Execution Engine executes query by compiling it to C++ code and then executing it. - -- `xexec`: execute last parsed statement(s) with Hybrid Execution Engine. Hybrid Execution Engine decouples the query into two parts. The sql-compliant part is executed by an Embedded version of Monetdb and everything else is executed by a post-process module which is generated by AQuery++ Compiler in C++ and then compiled and executed. -- `r`: run the last generated code snippet -- `save `: save current code snippet. will use random filename if not specified. -- `exit`: quit the prompt -#### Example: - `f moving_avg.a`
- `xexec` + +# AQuery++ Database +## Introduction + +AQuery++ Database is a cross-platform, In-Memory Column-Store Database that incorporates compiled query execution. + +## Architecture +![Architecture](./docs/arch-hybrid.svg) + +### AQuery Compiler +- The query is first processed by the AQuery Compiler which is composed of a frontend that parses the query into AST and a backend that generates target code that delivers the query. +- Front end of AQuery++ Compiler is built on top of [mo-sql-parsing](https://github.com/klahnakoski/mo-sql-parsing) with modifications to handle AQuery dialect and extension. +- Backend of AQuery++ Compiler generates target code dependent on the Execution Engine. It can either be the C++ code for AQuery Execution Engine or sql and C++ post-processor for Hybrid Engine or k9 for the k9 Engine. +### Execution Engines +- AQuery++ supports different execution engines thanks to the decoupled compiler structure. +- AQuery Execution Engine: executes query by compiling the query plan to C++ code. Doesn't support joins and udf functions. +- Hybrid Execution Engine: decouples the query into two parts. The sql-compliant part is executed by an Embedded version of Monetdb and everything else is executed by a post-process module which is generated by AQuery++ Compiler in C++ and then compiled and executed. +- K9 Execution Engine: (discontinued). + +## Roadmap +- [x] SQL Parser -> AQuery Parser (Front End) +- [x] AQuery-C++ Compiler (Back End) + - [x] Schema and Data Model + - [x] Data acquisition/output from/to csv file +- [ ] Execution Engine + - [x] Projections and single-group Aggregations + - [x] Group by Aggregations + - [x] Filters + - [x] Order by + - [x] Assumption + - [x] Flatten + - [x] UDFs (Hybrid Engine only) + - [x] User Module + - [ ] Triggers + - [x] Join (Hybrid Engine only) + - [ ] Subqueries +- [x] Query Optimization + - [x] Selection/Order by push-down + - [x] Join Optimization (Only in Hybrid Engine) + +## TODO: + +- [x] User Module load syntax parsing (fn definition/registration) +- [x] User Module initialize location + -> User Module test + -> Interval based triggers + -> Optimize Compilation Process, using static libraries, hot reloading server binary +- [x] Bug fixes: type deduction misaligned in Hybrid Engine + -> Investigation: Using postproc only for q1 in Hybrid Engine (make is_special always on) +- [x] Limitation: putting ColRefs back to monetdb. +- [ ] Limitation: String operations and Date/Time data type. +- [ ] C++ Meta-Programming: Eliminate template recursions as much as possible. +- [ ] Limitation: Date and Time, String operations, Funcs in groupby agg. + +# Installation +## Requirements +1. Recent version of Linux, Windows or MacOS, with recent C++ compiler that has C++17 (1z) support. (however c++20 is recommended if available for heterogeneous lookup on unordered containers) + - GCC: 9.0 or above (g++ 7.x, 8.x fail to handle fold-expressions due to a compiler bug) + - Clang: 5.0 or above (Recommended) + - MSVC: 2017 or later (2022 or above is recommended) + +2. Monetdb for Hybrid Engine + - On windows, the required libraries and headers are already included in the repo. + - On Linux, see [Monetdb Easy Setup](https://www.monetdb.org/easy-setup/) for instructions. + - On MacOS, Monetdb can be easily installed in homebrew `brew install monetdb`. + +3. Python 3.6 or above and install required packages in requirements.txt by `python3 -m pip install -r requirements.txt` +## Usage +`python3 prompt.py` will launch the interactive command prompt. The server binary will be autometically rebuilt and started. +#### Commands: +- ``: parse sql statement +- `f `: parse all sql statements in file +- `dbg` start debugging session +- `print`: printout parsed sql statements +- `exec`: execute last parsed statement(s) with AQuery Execution Engine. AQuery Execution Engine executes query by compiling it to C++ code and then executing it. + +- `xexec`: execute last parsed statement(s) with Hybrid Execution Engine. Hybrid Execution Engine decouples the query into two parts. The sql-compliant part is executed by an Embedded version of Monetdb and everything else is executed by a post-process module which is generated by AQuery++ Compiler in C++ and then compiled and executed. +- `r`: run the last generated code snippet +- `save `: save current code snippet. will use random filename if not specified. +- `exit`: quit the prompt +#### Example: + `f moving_avg.a`
+ `xexec` diff --git a/aquery_config.py b/aquery_config.py index 0470d20..cb475a3 100644 --- a/aquery_config.py +++ b/aquery_config.py @@ -1,59 +1,59 @@ -# put environment specific configuration here - -## GLOBAL CONFIGURATION FLAGS - -version_string = '0.4.4a' -add_path_to_ldpath = True -rebuild_backend = False -run_backend = True -have_hge = False -cygroot = 'c:/msys64/usr/bin' -msbuildroot = '' -os_platform = 'unknown' -build_driver = 'Makefile' - -def init_config(): - global __config_initialized__, os_platform, msbuildroot -## SETUP ENVIRONMENT VARIABLES - # __config_initialized__ = False - #os_platform = 'unkown' - #msbuildroot = 'd:/gg/vs22/MSBuild/Current/Bin' - import os - from engine.utils import add_dll_dir - # os.environ['CXX'] = 'C:/Program Files/LLVM/bin/clang.exe' - os.environ['THREADING'] = '1' - - if ('__config_initialized__' not in globals() or - not __config_initialized__): - import sys - if os.name == 'nt': - if sys.platform == 'win32': - os_platform = 'win' - elif sys.platform == 'cygwin' or sys.platform == 'msys': - os_platform = 'cygwin' - elif os.name == 'posix': - if sys.platform == 'darwin': - os_platform = 'mac' - elif 'linux' in sys.platform: - os_platform = 'linux' - elif 'bsd' in sys.platform: - os_platform = 'bsd' - elif sys.platform == 'cygwin' or sys.platform == 'msys': - os_platform = 'cygwin' - # deal with msys dependencies: - if os_platform == 'win': - add_dll_dir(cygroot) - add_dll_dir(os.path.abspath('./msc-plugin')) - import vswhere - vsloc = vswhere.find(prerelease = True, latest = True, prop = 'installationPath') - if vsloc: - msbuildroot = vsloc[0] + '/MSBuild/Current/Bin/MSBuild.exe' - else: - print('Warning: No Visual Studio installation found.') - # print("adding path") - else: - import readline - if os_platform == 'cygwin': - add_dll_dir('./lib') - __config_initialized__ = True - +# put environment specific configuration here + +## GLOBAL CONFIGURATION FLAGS + +version_string = '0.4.4a' +add_path_to_ldpath = True +rebuild_backend = False +run_backend = True +have_hge = False +cygroot = 'c:/msys64/usr/bin' +msbuildroot = '' +os_platform = 'unknown' +build_driver = 'Makefile' + +def init_config(): + global __config_initialized__, os_platform, msbuildroot +## SETUP ENVIRONMENT VARIABLES + # __config_initialized__ = False + #os_platform = 'unkown' + #msbuildroot = 'd:/gg/vs22/MSBuild/Current/Bin' + import os + from engine.utils import add_dll_dir + # os.environ['CXX'] = 'C:/Program Files/LLVM/bin/clang.exe' + os.environ['THREADING'] = '1' + + if ('__config_initialized__' not in globals() or + not __config_initialized__): + import sys + if os.name == 'nt': + if sys.platform == 'win32': + os_platform = 'win' + elif sys.platform == 'cygwin' or sys.platform == 'msys': + os_platform = 'cygwin' + elif os.name == 'posix': + if sys.platform == 'darwin': + os_platform = 'mac' + elif 'linux' in sys.platform: + os_platform = 'linux' + elif 'bsd' in sys.platform: + os_platform = 'bsd' + elif sys.platform == 'cygwin' or sys.platform == 'msys': + os_platform = 'cygwin' + # deal with msys dependencies: + if os_platform == 'win': + add_dll_dir(cygroot) + add_dll_dir(os.path.abspath('./msc-plugin')) + import vswhere + vsloc = vswhere.find(prerelease = True, latest = True, prop = 'installationPath') + if vsloc: + msbuildroot = vsloc[0] + '/MSBuild/Current/Bin/MSBuild.exe' + else: + print('Warning: No Visual Studio installation found.') + # print("adding path") + else: + import readline + if os_platform == 'cygwin': + add_dll_dir('./lib') + __config_initialized__ = True + diff --git a/aquery_parser/__init__.py b/aquery_parser/__init__.py index ea4401c..98c23e8 100644 --- a/aquery_parser/__init__.py +++ b/aquery_parser/__init__.py @@ -1,88 +1,88 @@ -# encoding: utf-8 -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this file, -# You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Contact: Kyle Lahnakoski (kyle@lahnakoski.com) -# - -from __future__ import absolute_import, division, unicode_literals - -import json -from threading import Lock - -from aquery_parser.sql_parser import scrub -from aquery_parser.utils import ansi_string, simple_op, normal_op - -parse_locker = Lock() # ENSURE ONLY ONE PARSING AT A TIME -common_parser = None -mysql_parser = None -sqlserver_parser = None - -SQL_NULL = {"null": {}} - - -def parse(sql, null=SQL_NULL, calls=simple_op): - """ - :param sql: String of SQL - :param null: What value to use as NULL (default is the null function `{"null":{}}`) - :return: parse tree - """ - global common_parser - - with parse_locker: - if not common_parser: - common_parser = sql_parser.common_parser() - result = _parse(common_parser, sql, null, calls) - return result - - -def parse_mysql(sql, null=SQL_NULL, calls=simple_op): - """ - PARSE MySQL ASSUME DOUBLE QUOTED STRINGS ARE LITERALS - :param sql: String of SQL - :param null: What value to use as NULL (default is the null function `{"null":{}}`) - :return: parse tree - """ - global mysql_parser - - with parse_locker: - if not mysql_parser: - mysql_parser = sql_parser.mysql_parser() - return _parse(mysql_parser, sql, null, calls) - - -def parse_sqlserver(sql, null=SQL_NULL, calls=simple_op): - """ - PARSE MySQL ASSUME DOUBLE QUOTED STRINGS ARE LITERALS - :param sql: String of SQL - :param null: What value to use as NULL (default is the null function `{"null":{}}`) - :return: parse tree - """ - global sqlserver_parser - - with parse_locker: - if not sqlserver_parser: - sqlserver_parser = sql_parser.sqlserver_parser() - return _parse(sqlserver_parser, sql, null, calls) - - -parse_bigquery = parse_mysql - - -def _parse(parser, sql, null, calls): - utils.null_locations = [] - utils.scrub_op = calls - sql = sql.rstrip().rstrip(";") - parse_result = parser.parse_string(sql, parse_all=True) - output = scrub(parse_result) - for o, n in utils.null_locations: - o[n] = null - return output - - - -_ = json.dumps - -__all__ = ["parse", "format", "parse_mysql", "parse_bigquery", "normal_op", "simple_op"] +# encoding: utf-8 +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this file, +# You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Contact: Kyle Lahnakoski (kyle@lahnakoski.com) +# + +from __future__ import absolute_import, division, unicode_literals + +import json +from threading import Lock + +from aquery_parser.sql_parser import scrub +from aquery_parser.utils import ansi_string, simple_op, normal_op + +parse_locker = Lock() # ENSURE ONLY ONE PARSING AT A TIME +common_parser = None +mysql_parser = None +sqlserver_parser = None + +SQL_NULL = {"null": {}} + + +def parse(sql, null=SQL_NULL, calls=simple_op): + """ + :param sql: String of SQL + :param null: What value to use as NULL (default is the null function `{"null":{}}`) + :return: parse tree + """ + global common_parser + + with parse_locker: + if not common_parser: + common_parser = sql_parser.common_parser() + result = _parse(common_parser, sql, null, calls) + return result + + +def parse_mysql(sql, null=SQL_NULL, calls=simple_op): + """ + PARSE MySQL ASSUME DOUBLE QUOTED STRINGS ARE LITERALS + :param sql: String of SQL + :param null: What value to use as NULL (default is the null function `{"null":{}}`) + :return: parse tree + """ + global mysql_parser + + with parse_locker: + if not mysql_parser: + mysql_parser = sql_parser.mysql_parser() + return _parse(mysql_parser, sql, null, calls) + + +def parse_sqlserver(sql, null=SQL_NULL, calls=simple_op): + """ + PARSE MySQL ASSUME DOUBLE QUOTED STRINGS ARE LITERALS + :param sql: String of SQL + :param null: What value to use as NULL (default is the null function `{"null":{}}`) + :return: parse tree + """ + global sqlserver_parser + + with parse_locker: + if not sqlserver_parser: + sqlserver_parser = sql_parser.sqlserver_parser() + return _parse(sqlserver_parser, sql, null, calls) + + +parse_bigquery = parse_mysql + + +def _parse(parser, sql, null, calls): + utils.null_locations = [] + utils.scrub_op = calls + sql = sql.rstrip().rstrip(";") + parse_result = parser.parse_string(sql, parse_all=True) + output = scrub(parse_result) + for o, n in utils.null_locations: + o[n] = null + return output + + + +_ = json.dumps + +__all__ = ["parse", "format", "parse_mysql", "parse_bigquery", "normal_op", "simple_op"] diff --git a/aquery_parser/keywords.py b/aquery_parser/keywords.py index 479081b..807642b 100644 --- a/aquery_parser/keywords.py +++ b/aquery_parser/keywords.py @@ -1,417 +1,417 @@ -# encoding: utf-8 -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this file, -# You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Contact: Kyle Lahnakoski (kyle@lahnakoski.com) -# - -# SQL CONSTANTS -from mo_parsing import * - -from aquery_parser.utils import SQL_NULL, keyword - -NULL = keyword("null") / (lambda: SQL_NULL) -TRUE = keyword("true") / (lambda: True) -FALSE = keyword("false") / (lambda: False) -NOCASE = keyword("nocase") -ASC = keyword("asc") -DESC = keyword("desc") - -# SIMPLE KEYWORDS -AS = keyword("as").suppress() -ASSUMING = keyword("assuming") -ALL = keyword("all") -BY = keyword("by").suppress() -CAST = keyword("cast") -CONSTRAINT = keyword("constraint").suppress() -CREATE = keyword("create").suppress() -CROSS = keyword("cross") -DISTINCT = keyword("distinct") -EXCEPT = keyword("except") -FETCH = keyword("fetch").suppress() -FROM = keyword("from").suppress() -FULL = keyword("full") -FUNCTION = keyword("function").suppress() -AGGREGATION = keyword("aggregation").suppress() -GROUP = keyword("group").suppress() -HAVING = keyword("having").suppress() -INNER = keyword("inner") -INTERVAL = keyword("interval") -JOIN = keyword("join") -LEFT = keyword("left") -LIKE = keyword("like") -LIMIT = keyword("limit").suppress() -MINUS = keyword("minus") -OFFSET = keyword("offset").suppress() -ON = keyword("on").suppress() -ORDER = keyword("order").suppress() -OUTER = keyword("outer") -OVER = keyword("over").suppress() -PARTITION = keyword("partition").suppress() -# PERCENT = keyword("percent").suppress() -RIGHT = keyword("right") -RLIKE = keyword("rlike") -SELECT = keyword("select").suppress() -TABLE = keyword("table").suppress() -THEN = keyword("then").suppress() -TOP = keyword("top").suppress() -UNION = keyword("union") -INTERSECT = keyword("intersect") -USING = keyword("using").suppress() -WHEN = keyword("when").suppress() -WHERE = keyword("where").suppress() -WITH = keyword("with").suppress() -WITHIN = keyword("within").suppress() -PRIMARY = keyword("primary").suppress() -FOREIGN = keyword("foreign").suppress() -KEY = keyword("key").suppress() -UNIQUE = keyword("unique").suppress() -INDEX = keyword("index").suppress() -REFERENCES = keyword("references").suppress() -RECURSIVE = keyword("recursive").suppress() -VALUES = keyword("values").suppress() -WINDOW = keyword("window") -INTO = keyword("into").suppress() -IF = keyword("if").suppress() -STATIC = keyword("static").suppress() -ELIF = keyword("elif").suppress() -ELSE = keyword("else").suppress() -FOR = keyword("for").suppress() - -PRIMARY_KEY = Group(PRIMARY + KEY).set_parser_name("primary_key") -FOREIGN_KEY = Group(FOREIGN + KEY).set_parser_name("foreign_key") - -# SIMPLE OPERATORS -CONCAT = Literal("||").set_parser_name("concat") -MUL = Literal("*").set_parser_name("mul") -DIV = Literal("/").set_parser_name("div") -MOD = Literal("%").set_parser_name("mod") -NEG = Literal("-").set_parser_name("neg") -ADD = Literal("+").set_parser_name("add") -SUB = Literal("-").set_parser_name("sub") -BINARY_NOT = Literal("~").set_parser_name("binary_not") -BINARY_AND = Literal("&").set_parser_name("binary_and") -BINARY_OR = Literal("|").set_parser_name("binary_or") -GTE = Literal(">=").set_parser_name("gte") -LTE = Literal("<=").set_parser_name("lte") -LT = Literal("<").set_parser_name("lt") -GT = Literal(">").set_parser_name("gt") -EEQ = ( - # conservative equality https://github.com/klahnakoski/jx-sqlite/blob/dev/docs/Logical%20Equality.md#definitions - Literal("==") | Literal("=") -).set_parser_name("eq") -DEQ = ( - # decisive equality - # https://sparkbyexamples.com/apache-hive/hive-relational-arithmetic-logical-operators/ - Literal("<=>").set_parser_name("eq!") -) -IDF = ( - # decisive equality - # https://prestodb.io/docs/current/functions/comparison.html#is-distinct-from-and-is-not-distinct-from - keyword("is distinct from").set_parser_name("eq!") -) -INDF = ( - # decisive equality - # https://prestodb.io/docs/current/functions/comparison.html#is-distinct-from-and-is-not-distinct-from - keyword("is not distinct from").set_parser_name("ne!") -) -FASSIGN = Literal(":=").set_parser_name("fassign") # Assignment in UDFs -PASSIGN = Literal("+=").set_parser_name("passign") -MASSIGN = Literal("-=").set_parser_name("massign") -MULASSIGN = Literal("*=").set_parser_name("mulassign") -DASSIGN = Literal("/=").set_parser_name("dassign") -COLON = Literal(":").set_parser_name("colon") -NEQ = (Literal("!=") | Literal("<>")).set_parser_name("neq") -LAMBDA = Literal("->").set_parser_name("lambda") -DOT = Literal(".").set_parser_name("dot") - -AND = keyword("and") -BETWEEN = keyword("between") -CASE = keyword("case").suppress() -COLLATE = keyword("collate") -END = keyword("end") -ELSE = keyword("else").suppress() -IN = keyword("in") -IS = keyword("is") -NOT = keyword("not") -OR = keyword("or") -LATERAL = keyword("lateral") -VIEW = keyword("view") - -# COMPOUND KEYWORDS - - -joins = ( - ( - Optional(CROSS | OUTER | INNER | ((FULL | LEFT | RIGHT) + Optional(INNER | OUTER))) - + JOIN - + Optional(LATERAL) - ) - | LATERAL + VIEW + Optional(OUTER) -) / (lambda tokens: " ".join(tokens).lower()) - -UNION_ALL = (UNION + ALL).set_parser_name("union_all") -WITHIN_GROUP = Group(WITHIN + GROUP).set_parser_name("within_group") -SELECT_DISTINCT = Group(SELECT + DISTINCT).set_parser_name("select distinct") -PARTITION_BY = Group(PARTITION + BY).set_parser_name("partition by") -GROUP_BY = Group(GROUP + BY).set_parser_name("group by") -ORDER_BY = Group(ORDER + BY).set_parser_name("order by") - -# COMPOUND OPERATORS -AT_TIME_ZONE = Group(keyword("at") + keyword("time") + keyword("zone")) -NOT_BETWEEN = Group(NOT + BETWEEN).set_parser_name("not_between") -NOT_LIKE = Group(NOT + LIKE).set_parser_name("not_like") -NOT_RLIKE = Group(NOT + RLIKE).set_parser_name("not_rlike") -NOT_IN = Group(NOT + IN).set_parser_name("nin") -IS_NOT = Group(IS + NOT).set_parser_name("is_not") - -_SIMILAR = keyword("similar") -_TO = keyword("to") -SIMILAR_TO = Group(_SIMILAR + _TO).set_parser_name("similar_to") -NOT_SIMILAR_TO = Group(NOT + _SIMILAR + _TO).set_parser_name("not_similar_to") - -RESERVED = MatchFirst([ - # ONY INCLUDE SINGLE WORDS - ALL, - AND, - AS, - ASC, - ASSUMING, - BETWEEN, - BY, - CASE, - COLLATE, - CONSTRAINT, - CREATE, - CROSS, - DESC, - DISTINCT, - EXCEPT, - ELSE, - END, - FALSE, - FETCH, - FOREIGN, - FROM, - FULL, - FUNCTION, - GROUP_BY, - GROUP, - HAVING, - IN, - INDEX, - INNER, - INTERSECT, - INTERVAL, - IS_NOT, - IS, - JOIN, - KEY, - LATERAL, - LEFT, - LIKE, - LIMIT, - MINUS, - NOCASE, - NOT, - NULL, - OFFSET, - ON, - OR, - ORDER, - OUTER, - OVER, - PARTITION, - PRIMARY, - REFERENCES, - RIGHT, - RLIKE, - SELECT, - THEN, - TRUE, - UNION, - UNIQUE, - USING, - WHEN, - WHERE, - WINDOW, - WITH, - WITHIN, - INTO, -]) -L_INLINE = Literal("").suppress() -R_INLINE = Literal("").suppress() -LBRACE = Literal("{").suppress() -RBRACE = Literal("}").suppress() -LSB = Literal("[").suppress() -RSB = Literal("]").suppress() -LB = Literal("(").suppress() -RB = Literal(")").suppress() -EQ = Char("=").suppress() - -join_keywords = { - "join", - "full join", - "cross join", - "inner join", - "left join", - "right join", - "full outer join", - "right outer join", - "left outer join", -} - -precedence = { - # https://www.sqlite.org/lang_expr.html - "literal": -1, - "interval": 0, - "cast": 0, - "collate": 0, - "concat": 1, - "mul": 2, - "div": 1.5, - "mod": 2, - "neg": 3, - "add": 3, - "sub": 2.5, - "binary_not": 4, - "binary_and": 4, - "binary_or": 4, - "gte": 5, - "lte": 5, - "lt": 5, - "gt": 6, - "eq": 7, - "neq": 7, - "missing": 7, - "exists": 7, - "at_time_zone": 8, - "between": 8, - "not_between": 8, - "in": 8, - "nin": 8, - "is": 8, - "like": 8, - "not_like": 8, - "rlike": 8, - "not_rlike": 8, - "similar_to": 8, - "not_similar_to": 8, - "and": 10, - "or": 11, - "lambda": 12, - "join": 18, - "list": 18, - "function": 30, - "select": 30, - "from": 30, - "window": 35, - "union": 40, - "union_all": 40, - "except": 40, - "minus": 40, - "intersect": 40, - "order": 50, -} - -KNOWN_OPS = [ - COLLATE, - CONCAT, - MUL | DIV | MOD, - NEG, - ADD | SUB, - BINARY_NOT, - BINARY_AND, - BINARY_OR, - GTE | LTE | LT | GT, - EEQ | NEQ | DEQ | IDF | INDF, - AT_TIME_ZONE, - (BETWEEN, AND), - (NOT_BETWEEN, AND), - IN, - NOT_IN, - IS_NOT, - IS, - LIKE, - NOT_LIKE, - RLIKE, - NOT_RLIKE, - SIMILAR_TO, - NOT_SIMILAR_TO, - NOT, - AND, - OR, - LAMBDA, -] - -times = ["now", "today", "tomorrow", "eod"] - -durations = { - "microseconds": "microsecond", - "microsecond": "microsecond", - "microsecs": "microsecond", - "microsec": "microsecond", - "useconds": "microsecond", - "usecond": "microsecond", - "usecs": "microsecond", - "usec": "microsecond", - "us": "microsecond", - "milliseconds": "millisecond", - "millisecond": "millisecond", - "millisecon": "millisecond", - "mseconds": "millisecond", - "msecond": "millisecond", - "millisecs": "millisecond", - "millisec": "millisecond", - "msecs": "millisecond", - "msec": "millisecond", - "ms": "millisecond", - "seconds": "second", - "second": "second", - "secs": "second", - "sec": "second", - "s": "second", - "minutes": "minute", - "minute": "minute", - "mins": "minute", - "min": "minute", - "m": "minute", - "hours": "hour", - "hour": "hour", - "hrs": "hour", - "hr": "hour", - "h": "hour", - "days": "day", - "day": "day", - "d": "day", - "dayofweek": "dow", - "dow": "dow", - "weekday": "dow", - "weeks": "week", - "week": "week", - "w": "week", - "months": "month", - "month": "month", - "mons": "month", - "mon": "month", - "quarters": "quarter", - "quarter": "quarter", - "years": "year", - "year": "year", - "decades": "decade", - "decade": "decade", - "decs": "decade", - "dec": "decade", - "centuries": "century", - "century": "century", - "cents": "century", - "cent": "century", - "c": "century", - "millennia": "millennium", - "millennium": "millennium", - "mils": "millennium", - "mil": "millennium", - "epoch": "epoch", -} +# encoding: utf-8 +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this file, +# You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Contact: Kyle Lahnakoski (kyle@lahnakoski.com) +# + +# SQL CONSTANTS +from mo_parsing import * + +from aquery_parser.utils import SQL_NULL, keyword + +NULL = keyword("null") / (lambda: SQL_NULL) +TRUE = keyword("true") / (lambda: True) +FALSE = keyword("false") / (lambda: False) +NOCASE = keyword("nocase") +ASC = keyword("asc") +DESC = keyword("desc") + +# SIMPLE KEYWORDS +AS = keyword("as").suppress() +ASSUMING = keyword("assuming") +ALL = keyword("all") +BY = keyword("by").suppress() +CAST = keyword("cast") +CONSTRAINT = keyword("constraint").suppress() +CREATE = keyword("create").suppress() +CROSS = keyword("cross") +DISTINCT = keyword("distinct") +EXCEPT = keyword("except") +FETCH = keyword("fetch").suppress() +FROM = keyword("from").suppress() +FULL = keyword("full") +FUNCTION = keyword("function").suppress() +AGGREGATION = keyword("aggregation").suppress() +GROUP = keyword("group").suppress() +HAVING = keyword("having").suppress() +INNER = keyword("inner") +INTERVAL = keyword("interval") +JOIN = keyword("join") +LEFT = keyword("left") +LIKE = keyword("like") +LIMIT = keyword("limit").suppress() +MINUS = keyword("minus") +OFFSET = keyword("offset").suppress() +ON = keyword("on").suppress() +ORDER = keyword("order").suppress() +OUTER = keyword("outer") +OVER = keyword("over").suppress() +PARTITION = keyword("partition").suppress() +# PERCENT = keyword("percent").suppress() +RIGHT = keyword("right") +RLIKE = keyword("rlike") +SELECT = keyword("select").suppress() +TABLE = keyword("table").suppress() +THEN = keyword("then").suppress() +TOP = keyword("top").suppress() +UNION = keyword("union") +INTERSECT = keyword("intersect") +USING = keyword("using").suppress() +WHEN = keyword("when").suppress() +WHERE = keyword("where").suppress() +WITH = keyword("with").suppress() +WITHIN = keyword("within").suppress() +PRIMARY = keyword("primary").suppress() +FOREIGN = keyword("foreign").suppress() +KEY = keyword("key").suppress() +UNIQUE = keyword("unique").suppress() +INDEX = keyword("index").suppress() +REFERENCES = keyword("references").suppress() +RECURSIVE = keyword("recursive").suppress() +VALUES = keyword("values").suppress() +WINDOW = keyword("window") +INTO = keyword("into").suppress() +IF = keyword("if").suppress() +STATIC = keyword("static").suppress() +ELIF = keyword("elif").suppress() +ELSE = keyword("else").suppress() +FOR = keyword("for").suppress() + +PRIMARY_KEY = Group(PRIMARY + KEY).set_parser_name("primary_key") +FOREIGN_KEY = Group(FOREIGN + KEY).set_parser_name("foreign_key") + +# SIMPLE OPERATORS +CONCAT = Literal("||").set_parser_name("concat") +MUL = Literal("*").set_parser_name("mul") +DIV = Literal("/").set_parser_name("div") +MOD = Literal("%").set_parser_name("mod") +NEG = Literal("-").set_parser_name("neg") +ADD = Literal("+").set_parser_name("add") +SUB = Literal("-").set_parser_name("sub") +BINARY_NOT = Literal("~").set_parser_name("binary_not") +BINARY_AND = Literal("&").set_parser_name("binary_and") +BINARY_OR = Literal("|").set_parser_name("binary_or") +GTE = Literal(">=").set_parser_name("gte") +LTE = Literal("<=").set_parser_name("lte") +LT = Literal("<").set_parser_name("lt") +GT = Literal(">").set_parser_name("gt") +EEQ = ( + # conservative equality https://github.com/klahnakoski/jx-sqlite/blob/dev/docs/Logical%20Equality.md#definitions + Literal("==") | Literal("=") +).set_parser_name("eq") +DEQ = ( + # decisive equality + # https://sparkbyexamples.com/apache-hive/hive-relational-arithmetic-logical-operators/ + Literal("<=>").set_parser_name("eq!") +) +IDF = ( + # decisive equality + # https://prestodb.io/docs/current/functions/comparison.html#is-distinct-from-and-is-not-distinct-from + keyword("is distinct from").set_parser_name("eq!") +) +INDF = ( + # decisive equality + # https://prestodb.io/docs/current/functions/comparison.html#is-distinct-from-and-is-not-distinct-from + keyword("is not distinct from").set_parser_name("ne!") +) +FASSIGN = Literal(":=").set_parser_name("fassign") # Assignment in UDFs +PASSIGN = Literal("+=").set_parser_name("passign") +MASSIGN = Literal("-=").set_parser_name("massign") +MULASSIGN = Literal("*=").set_parser_name("mulassign") +DASSIGN = Literal("/=").set_parser_name("dassign") +COLON = Literal(":").set_parser_name("colon") +NEQ = (Literal("!=") | Literal("<>")).set_parser_name("neq") +LAMBDA = Literal("->").set_parser_name("lambda") +DOT = Literal(".").set_parser_name("dot") + +AND = keyword("and") +BETWEEN = keyword("between") +CASE = keyword("case").suppress() +COLLATE = keyword("collate") +END = keyword("end") +ELSE = keyword("else").suppress() +IN = keyword("in") +IS = keyword("is") +NOT = keyword("not") +OR = keyword("or") +LATERAL = keyword("lateral") +VIEW = keyword("view") + +# COMPOUND KEYWORDS + + +joins = ( + ( + Optional(CROSS | OUTER | INNER | ((FULL | LEFT | RIGHT) + Optional(INNER | OUTER))) + + JOIN + + Optional(LATERAL) + ) + | LATERAL + VIEW + Optional(OUTER) +) / (lambda tokens: " ".join(tokens).lower()) + +UNION_ALL = (UNION + ALL).set_parser_name("union_all") +WITHIN_GROUP = Group(WITHIN + GROUP).set_parser_name("within_group") +SELECT_DISTINCT = Group(SELECT + DISTINCT).set_parser_name("select distinct") +PARTITION_BY = Group(PARTITION + BY).set_parser_name("partition by") +GROUP_BY = Group(GROUP + BY).set_parser_name("group by") +ORDER_BY = Group(ORDER + BY).set_parser_name("order by") + +# COMPOUND OPERATORS +AT_TIME_ZONE = Group(keyword("at") + keyword("time") + keyword("zone")) +NOT_BETWEEN = Group(NOT + BETWEEN).set_parser_name("not_between") +NOT_LIKE = Group(NOT + LIKE).set_parser_name("not_like") +NOT_RLIKE = Group(NOT + RLIKE).set_parser_name("not_rlike") +NOT_IN = Group(NOT + IN).set_parser_name("nin") +IS_NOT = Group(IS + NOT).set_parser_name("is_not") + +_SIMILAR = keyword("similar") +_TO = keyword("to") +SIMILAR_TO = Group(_SIMILAR + _TO).set_parser_name("similar_to") +NOT_SIMILAR_TO = Group(NOT + _SIMILAR + _TO).set_parser_name("not_similar_to") + +RESERVED = MatchFirst([ + # ONY INCLUDE SINGLE WORDS + ALL, + AND, + AS, + ASC, + ASSUMING, + BETWEEN, + BY, + CASE, + COLLATE, + CONSTRAINT, + CREATE, + CROSS, + DESC, + DISTINCT, + EXCEPT, + ELSE, + END, + FALSE, + FETCH, + FOREIGN, + FROM, + FULL, + FUNCTION, + GROUP_BY, + GROUP, + HAVING, + IN, + INDEX, + INNER, + INTERSECT, + INTERVAL, + IS_NOT, + IS, + JOIN, + KEY, + LATERAL, + LEFT, + LIKE, + LIMIT, + MINUS, + NOCASE, + NOT, + NULL, + OFFSET, + ON, + OR, + ORDER, + OUTER, + OVER, + PARTITION, + PRIMARY, + REFERENCES, + RIGHT, + RLIKE, + SELECT, + THEN, + TRUE, + UNION, + UNIQUE, + USING, + WHEN, + WHERE, + WINDOW, + WITH, + WITHIN, + INTO, +]) +L_INLINE = Literal("").suppress() +R_INLINE = Literal("").suppress() +LBRACE = Literal("{").suppress() +RBRACE = Literal("}").suppress() +LSB = Literal("[").suppress() +RSB = Literal("]").suppress() +LB = Literal("(").suppress() +RB = Literal(")").suppress() +EQ = Char("=").suppress() + +join_keywords = { + "join", + "full join", + "cross join", + "inner join", + "left join", + "right join", + "full outer join", + "right outer join", + "left outer join", +} + +precedence = { + # https://www.sqlite.org/lang_expr.html + "literal": -1, + "interval": 0, + "cast": 0, + "collate": 0, + "concat": 1, + "mul": 2, + "div": 1.5, + "mod": 2, + "neg": 3, + "add": 3, + "sub": 2.5, + "binary_not": 4, + "binary_and": 4, + "binary_or": 4, + "gte": 5, + "lte": 5, + "lt": 5, + "gt": 6, + "eq": 7, + "neq": 7, + "missing": 7, + "exists": 7, + "at_time_zone": 8, + "between": 8, + "not_between": 8, + "in": 8, + "nin": 8, + "is": 8, + "like": 8, + "not_like": 8, + "rlike": 8, + "not_rlike": 8, + "similar_to": 8, + "not_similar_to": 8, + "and": 10, + "or": 11, + "lambda": 12, + "join": 18, + "list": 18, + "function": 30, + "select": 30, + "from": 30, + "window": 35, + "union": 40, + "union_all": 40, + "except": 40, + "minus": 40, + "intersect": 40, + "order": 50, +} + +KNOWN_OPS = [ + COLLATE, + CONCAT, + MUL | DIV | MOD, + NEG, + ADD | SUB, + BINARY_NOT, + BINARY_AND, + BINARY_OR, + GTE | LTE | LT | GT, + EEQ | NEQ | DEQ | IDF | INDF, + AT_TIME_ZONE, + (BETWEEN, AND), + (NOT_BETWEEN, AND), + IN, + NOT_IN, + IS_NOT, + IS, + LIKE, + NOT_LIKE, + RLIKE, + NOT_RLIKE, + SIMILAR_TO, + NOT_SIMILAR_TO, + NOT, + AND, + OR, + LAMBDA, +] + +times = ["now", "today", "tomorrow", "eod"] + +durations = { + "microseconds": "microsecond", + "microsecond": "microsecond", + "microsecs": "microsecond", + "microsec": "microsecond", + "useconds": "microsecond", + "usecond": "microsecond", + "usecs": "microsecond", + "usec": "microsecond", + "us": "microsecond", + "milliseconds": "millisecond", + "millisecond": "millisecond", + "millisecon": "millisecond", + "mseconds": "millisecond", + "msecond": "millisecond", + "millisecs": "millisecond", + "millisec": "millisecond", + "msecs": "millisecond", + "msec": "millisecond", + "ms": "millisecond", + "seconds": "second", + "second": "second", + "secs": "second", + "sec": "second", + "s": "second", + "minutes": "minute", + "minute": "minute", + "mins": "minute", + "min": "minute", + "m": "minute", + "hours": "hour", + "hour": "hour", + "hrs": "hour", + "hr": "hour", + "h": "hour", + "days": "day", + "day": "day", + "d": "day", + "dayofweek": "dow", + "dow": "dow", + "weekday": "dow", + "weeks": "week", + "week": "week", + "w": "week", + "months": "month", + "month": "month", + "mons": "month", + "mon": "month", + "quarters": "quarter", + "quarter": "quarter", + "years": "year", + "year": "year", + "decades": "decade", + "decade": "decade", + "decs": "decade", + "dec": "decade", + "centuries": "century", + "century": "century", + "cents": "century", + "cent": "century", + "c": "century", + "millennia": "millennium", + "millennium": "millennium", + "mils": "millennium", + "mil": "millennium", + "epoch": "epoch", +} diff --git a/aquery_parser/sql_parser.py b/aquery_parser/sql_parser.py index c07aea3..d5fa5b5 100644 --- a/aquery_parser/sql_parser.py +++ b/aquery_parser/sql_parser.py @@ -1,723 +1,723 @@ -# encoding: utf-8 -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this file, -# You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Contact: Kyle Lahnakoski (kyle@lahnakoski.com) -# - -from sre_parse import WHITESPACE -from mo_parsing.helpers import restOfLine -from mo_parsing.infix import delimited_list -from mo_parsing.whitespaces import NO_WHITESPACE, Whitespace - -from aquery_parser.keywords import * -from aquery_parser.types import get_column_type, time_functions -from aquery_parser.utils import * -from aquery_parser.windows import window - -digit = Char("0123456789") -simple_ident = ( - Char(FIRST_IDENT_CHAR) - + Char(IDENT_CHAR)[...] # let's not support dashes in var_names. -) -simple_ident = Regex(simple_ident.__regex__()[1]) - - -def common_parser(): - combined_ident = Combine(delimited_list( - ansi_ident | mysql_backtick_ident | simple_ident, separator=".", combine=True, - )).set_parser_name("identifier") - - return parser(ansi_string | mysql_doublequote_string, combined_ident) - - -def mysql_parser(): - mysql_string = ansi_string | mysql_doublequote_string - mysql_ident = Combine(delimited_list( - mysql_backtick_ident | sqlserver_ident | simple_ident, - separator=".", - combine=True, - )).set_parser_name("mysql identifier") - - return parser(mysql_string, mysql_ident) - - -def sqlserver_parser(): - combined_ident = Combine(delimited_list( - ansi_ident - | mysql_backtick_ident - | sqlserver_ident - | Word(FIRST_IDENT_CHAR, IDENT_CHAR), - separator=".", - combine=True, - )).set_parser_name("identifier") - - return parser(ansi_string, combined_ident, sqlserver=True) - - -def parser(literal_string, ident, sqlserver=False): - with Whitespace() as engine: - engine.add_ignore(Literal("--") + restOfLine) - engine.add_ignore(Literal("#") + restOfLine) - engine.add_ignore(Literal("/*") + SkipTo("*/", include=True)) - - var_name = ~RESERVED + ident - - inline_kblock = (L_INLINE + SkipTo(R_INLINE, include=True))("c") - # EXPRESSIONS - expr = Forward() - column_type, column_definition, column_def_references = get_column_type( - expr, var_name, literal_string - ) - - # CASE - case = ( - CASE - + Group(ZeroOrMore( - (WHEN + expr("when") + THEN + expr("then")) / to_when_call - ))("case") - + Optional(ELSE + expr("else")) - + END - ) / to_case_call - - switch = ( - CASE - + expr("value") - + Group(ZeroOrMore( - (WHEN + expr("when") + THEN + expr("then")) / to_when_call - ))("case") - + Optional(ELSE + expr("else")) - + END - ) / to_switch_call - - cast = ( - Group(CAST("op") + LB + expr("params") + AS + column_type("params") + RB) - / to_json_call - ) - - trim = ( - Group( - keyword("trim").suppress() - + LB - + Optional( - (keyword("both") | keyword("trailing") | keyword("leading")) - / (lambda t: t[0].lower()) - )("direction") - + ( - assign("from", expr) - | expr("chars") + Optional(assign("from", expr)) - ) - + RB - ).set_parser_name("trim") - / to_trim_call - ) - - _standard_time_intervals = MatchFirst([ - keyword(d) / (lambda t: durations[t[0].lower()]) for d in durations.keys() - ]).set_parser_name("duration")("params") - - duration = ( - real_num | int_num | literal_string - )("params") + _standard_time_intervals - - interval = ( - INTERVAL + ("'" + delimited_list(duration) + "'" | duration) - ) / to_interval_call - - timestamp = ( - time_functions("op") - + ( - literal_string("params") - | MatchFirst([ - keyword(t) / (lambda t: t.lower()) for t in times - ])("params") - ) - ) / to_json_call - - extract = ( - keyword("extract")("op") - + LB - + (_standard_time_intervals | expr("params")) - + FROM - + expr("params") - + RB - ) / to_json_call - - alias = Optional(( - ( - AS - + (var_name("name") + Optional(LB + delimited_list(ident("col")) + RB)) - | ( - var_name("name") - + Optional( - (LB + delimited_list(ident("col")) + RB) - | (AS + delimited_list(var_name("col"))) - ) - ) - ) - / to_alias - )("name")) - - named_column = Group(Group(expr)("value") + alias) - - stack = ( - keyword("stack")("op") - + LB - + int_num("width") - + "," - + delimited_list(expr)("args") - + RB - ) / to_stack - - # ARRAY[foo], - # ARRAY < STRING > [foo, bar], INVALID - # ARRAY < STRING > [foo, bar], - create_array = ( - keyword("array")("op") - + Optional(LT.suppress() + column_type("type") + GT.suppress()) - + ( - LB + delimited_list(Group(expr))("args") + RB - | (Literal("[") + delimited_list(Group(expr))("args") + Literal("]")) - ) - ) - - if not sqlserver: - # SQL SERVER DOES NOT SUPPORT [] FOR ARRAY CONSTRUCTION (USED FOR IDENTIFIERS) - create_array = ( - Literal("[") + delimited_list(Group(expr))("args") + Literal("]") - | create_array - ) - - create_array = create_array / to_array - - create_map = ( - keyword("map") - + Literal("[") - + expr("keys") - + "," - + expr("values") - + Literal("]") - ) / to_map - - create_struct = ( - keyword("struct")("op") - + Optional( - LT.suppress() + delimited_list(column_type)("types") + GT.suppress() - ) - + LB - + delimited_list(Group((expr("value") + alias) / to_select_call))("args") - + RB - ).set_parser_name("create struct") / to_struct - - distinct = ( - DISTINCT("op") + delimited_list(named_column)("params") - ) / to_json_call - - query = Forward().set_parser_name("query") - - call_function = ( - ident("op") - + LB - + Optional(Group(query) | delimited_list(Group(expr)))("params") - + Optional( - (keyword("respect") | keyword("ignore"))("nulls") - + keyword("nulls").suppress() - ) - + RB - ).set_parser_name("call function") / to_json_call - - with NO_WHITESPACE: - - def scale(tokens): - return {"mul": [tokens[0], tokens[1]]} - - scale_function = ((real_num | int_num) + call_function) / scale - scale_ident = ((real_num | int_num) + ident) / scale - - - - compound = ( - NULL - | TRUE - | FALSE - | NOCASE - | interval - | timestamp - | extract - | case - | switch - | cast - | distinct - | trim - | stack - | create_array - | create_map - | create_struct - | (LB + Group(query) + RB) - | (LB + Group(delimited_list(expr)) / to_tuple_call + RB) - | literal_string.set_parser_name("string") - | hex_num.set_parser_name("hex") - | scale_function - | scale_ident - | real_num.set_parser_name("float") - | int_num.set_parser_name("int") - | call_function - | Combine(var_name + Optional(".*")) - ) - - sort_column = ( - expr("value").set_parser_name("sort1") - + Optional(DESC("sort") | ASC("sort")) - + Optional(assign("nulls", keyword("first") | keyword("last"))) - ) - - window_clause, over_clause = window(expr, var_name, sort_column) - - expr << ( - ( - Literal("*") - | infix_notation( - compound, - [ - ( - Literal("[").suppress() + expr + Literal("]").suppress(), - 1, - LEFT_ASSOC, - to_offset, - ), - ( - Literal(".").suppress() + simple_ident, - 1, - LEFT_ASSOC, - to_offset, - ), - (window_clause, 1, LEFT_ASSOC, to_window_mod), - ( - assign("filter", LB + WHERE + expr + RB), - 1, - LEFT_ASSOC, - to_window_mod, - ), - ] - + [ - ( - o, - 1 if o in unary_ops else (3 if isinstance(o, tuple) else 2), - unary_ops.get(o, LEFT_ASSOC), - to_lambda if o is LAMBDA else to_json_operator, - ) - for o in KNOWN_OPS - ], - ) - )("value").set_parser_name("expression") - ) - - select_column = ( - Group( - expr("value") + alias | Literal("*")("value") - ).set_parser_name("column") - / to_select_call - ) - - table_source = Forward() - - join = ( - Group(joins)("op") - + table_source("join") - + Optional((ON + expr("on")) | (USING + expr("using"))) - | ( - Group(WINDOW)("op") - + Group(var_name("name") + AS + over_clause("value"))("join") - ) - ) / to_join_call - - - definable_name = Forward() - dindex = definable_name("l") + LSB + expr("idx") + RSB - definable_name << var_name | dindex - - # lname = Forward() - # ptr = (lname("l") + LAMBDA + var_name("r")) - # member = (lname("l") + DOT + var_name("r")) - # idx = (expr | COLON) - # index = (lname("l") + LSB + expr("lidx") + "," + idx("ridx") + RSB) - # lname << var_name | ptr | member | index - - assignment = expr("var") + (FASSIGN|PASSIGN|MASSIGN|MULASSIGN|DASSIGN)("op") + expr("expr") - declaration = definable_name("var") + Optional(Suppress(FASSIGN) + expr("expr")) - fassign = Group(assignment + Suppress(";"))("assignment") - static_decl = Group(STATIC + delimited_list(declaration))("static_decl") - stmt = Forward() - elifstmt = Group(ELIF + LB + expr("cond") + RB + stmt)("elif") - elsestmt = Group(ELSE + stmt)("else") - ifstmt = Group(IF + LB + expr("cond") + RB + stmt + - ZeroOrMore(elifstmt) + Optional(elsestmt))("if") - forstmt = Group(FOR + LB + ( delimited_list(assignment)("defs") - + Suppress(";") + expr("cond") + - Suppress(";") + delimited_list(assignment)("tail")) - + RB + stmt)("for") - block = Forward() - stmt << (fassign|ifstmt|forstmt|block|Suppress(";")) - stmts = (ZeroOrMore(stmt("stmt"), Whitespace())) - block << (LBRACE + Optional(stmts) + RBRACE)("code_block") - fbody = (Optional(static_decl) + Optional(stmts) + expr("ret")) - - udf = ( - Optional(AGGREGATION("Agg")) + - FUNCTION - + var_name("fname") - + LB - + Optional(delimited_list(var_name)("params")) - + RB - + LBRACE - + fbody - + RBRACE - )("udf") - - selection = ( - (SELECT + DISTINCT + ON + LB) - + delimited_list(select_column)("distinct_on") - + RB - + delimited_list(select_column)("select") - | SELECT + DISTINCT + delimited_list(select_column)("select_distinct") - | ( - SELECT - + Optional( - TOP - + expr("value") - + Optional(keyword("percent"))("percent") - + Optional(WITH + keyword("ties"))("ties") - )("top") - / to_top_clause - + delimited_list(select_column)("select") - ) - ) - - row = (LB + delimited_list(Group(expr)) + RB) / to_row - values = VALUES + delimited_list(row) / to_values - - unordered_sql = Group( - values - | selection - + Optional(INTO + table_source("into")) - + Optional( - (FROM + delimited_list(table_source) + ZeroOrMore(join))("from") - + Optional(WHERE + expr("where")) - + Optional(GROUP_BY + delimited_list(Group(named_column))("groupby")) - + Optional(HAVING + expr("having")) - ) - ).set_parser_name("unordered sql") - - with NO_WHITESPACE: - - def mult(tokens): - amount = tokens["bytes"] - scale = tokens["scale"].lower() - return { - "bytes": amount - * {"b": 1, "k": 1_000, "m": 1_000_000, "g": 1_000_000_000}[scale] - } - - ts_bytes = ( - (real_num | int_num)("bytes") + Char("bBkKmMgG")("scale") - ) / mult - - tablesample = assign( - "tablesample", - LB - + ( - ( - keyword("bucket")("op") - + int_num("params") - + keyword("out of") - + int_num("params") - + Optional(ON + expr("on")) - ) - / to_json_call - | (real_num | int_num)("percent") + keyword("percent") - | int_num("rows") + keyword("rows") - | ts_bytes - ) - + RB, - ) - - assumption = Group((ASC|DESC) ("sort") + var_name("value")) - assumptions = (ASSUMING + Group(delimited_list(assumption))("assumptions")) - - table_source << Group( - ((LB + query + RB) | stack | call_function | var_name)("value") - + Optional(assumptions) - + Optional(flag("with ordinality")) - + Optional(tablesample) - + alias - ).set_parser_name("table_source") / to_table - - rows = Optional(keyword("row") | keyword("rows")) - limit = ( - Optional(assign("offset", expr) + rows) - & Optional( - FETCH - + Optional(keyword("first") | keyword("next")) - + expr("fetch") - + rows - + Optional(keyword("only")) - ) - & Optional(assign("limit", expr)) - ) - - outfile = Optional( - ( - INTO - + keyword("outfile").suppress() - + literal_string ("loc") - + Optional ( - keyword("fields") - + keyword("terminated") - + keyword("by") - + literal_string ("term") - ) - )("outfile") - ) - ordered_sql = ( - ( - (unordered_sql | (LB + query + RB)) - + ZeroOrMore( - Group( - (UNION | INTERSECT | EXCEPT | MINUS) + Optional(ALL | DISTINCT) - )("op") - + (unordered_sql | (LB + query + RB)) - ) - )("union") - + Optional(ORDER_BY + delimited_list(Group(sort_column))("orderby")) - + limit - + outfile - ).set_parser_name("ordered sql") / to_union_call - - with_expr = delimited_list(Group( - ( - (var_name("name") + Optional(LB + delimited_list(ident("col")) + RB)) - / to_alias - )("name") - + (AS + LB + (query | expr)("value") + RB) - )) - - query << ( - Optional(assign("with recursive", with_expr) | assign("with", with_expr)) - + Group(ordered_sql)("query") - ) / to_query - - ##################################################################### - # DML STATEMENTS - ##################################################################### - - # MySQL's index_type := Using + ( "BTREE" | "HASH" ) - index_type = Optional(assign("using", ident("index_type"))) - - index_column_names = LB + delimited_list(var_name("columns")) + RB - - column_def_delete = assign( - "on delete", - (keyword("cascade") | keyword("set null") | keyword("set default")), - ) - - table_def_foreign_key = FOREIGN_KEY + Optional( - Optional(var_name("index_name")) - + index_column_names - + column_def_references - + Optional(column_def_delete) - ) - - index_options = ZeroOrMore(var_name)("table_constraint_options") - - table_constraint_definition = Optional(CONSTRAINT + var_name("name")) + ( - assign("primary key", index_type + index_column_names + index_options) - | ( - Optional(flag("unique")) - + Optional(INDEX | KEY) - + Optional(var_name("name")) - + index_type - + index_column_names - + index_options - )("index") - | assign("check", LB + expr + RB) - | table_def_foreign_key("foreign_key") - ) - - table_element = ( - column_definition("columns") | table_constraint_definition("constraint") - ) - - create_table = ( - keyword("create") - + Optional(keyword("or") + flag("replace")) - + Optional(flag("temporary")) - + TABLE - + Optional((keyword("if not exists") / (lambda: False))("replace")) - + var_name("name") - + Optional(LB + delimited_list(table_element) + RB) - + ZeroOrMore( - assign("engine", EQ + var_name) - | assign("collate", EQ + var_name) - | assign("auto_increment", EQ + int_num) - | assign("comment", EQ + literal_string) - | assign("default character set", EQ + var_name) - | assign("default charset", EQ + var_name) - ) - + Optional(AS.suppress() + infix_notation(query, [])("query")) - )("create_table") - - create_view = ( - keyword("create") - + Optional(keyword("or") + flag("replace")) - + Optional(flag("temporary")) - + VIEW.suppress() - + Optional((keyword("if not exists") / (lambda: False))("replace")) - + var_name("name") - + AS - + query("query") - )("create_view") - - # CREATE INDEX a ON u USING btree (e); - create_index = ( - keyword("create index") - + Optional(keyword("or") + flag("replace"))(INDEX | KEY) - + Optional((keyword("if not exists") / (lambda: False))("replace")) - + var_name("name") - + ON - + var_name("table") - + index_type - + index_column_names - + index_options - )("create index") - - cache_options = Optional(( - keyword("options").suppress() - + LB - + Dict(delimited_list(Group( - literal_string / (lambda tokens: tokens[0]["literal"]) - + Optional(EQ) - + var_name - ))) - + RB - )("options")) - - create_cache = ( - keyword("cache").suppress() - + Optional(flag("lazy")) - + TABLE - + var_name("name") - + cache_options - + Optional(AS + query("query")) - )("cache") - - drop_table = ( - keyword("drop table") + Optional(flag("if exists")) + var_name("table") - )("drop") - - drop_view = ( - keyword("drop view") + Optional(flag("if exists")) + var_name("view") - )("drop") - - drop_index = ( - keyword("drop index") + Optional(flag("if exists")) + var_name("index") - )("drop") - - insert = ( - keyword("insert").suppress() - + ( - flag("overwrite") + keyword("table").suppress() - | keyword("into").suppress() + Optional(keyword("table").suppress()) - ) - + var_name("table") - + Optional(LB + delimited_list(var_name)("columns") + RB) - + Optional(flag("if exists")) - + (values | query)("query") - ) / to_insert_call - - update = ( - keyword("update")("op") - + var_name("params") - + assign("set", Dict(delimited_list(Group(var_name + EQ + expr)))) - + Optional(assign("where", expr)) - ) / to_json_call - - delete = ( - keyword("delete")("op") - + keyword("from").suppress() - + var_name("params") - + Optional(assign("where", expr)) - ) / to_json_call - - load_data = ( - keyword("data") ("file_type") - + keyword("infile")("loc") - + literal_string ("file") - + INTO - + keyword("table").suppress() - + var_name ("table") - + Optional( - keyword("fields").suppress() - + keyword("terminated").suppress() - + keyword("by").suppress() - + literal_string ("term") - ) - ) - - module_func_def = ( - var_name("fname") - + LB - + delimited_list( - ( - var_name("arg") - + COLON - + var_name("type") - )("vars") - ) - + RB - + LAMBDA - + var_name("ret_type") - ) - - load_module = ( - keyword("module") ("file_type") - + FROM - + literal_string ("file") - + Optional( - keyword("FUNCTIONS").suppress() - + LB - + module_func_def("funcs") - + ZeroOrMore(Suppress(',') - + module_func_def("funcs"), - Whitespace() - ) - + RB - ) - ) - - load = ( - keyword("load") - + (load_data | load_module) - ) ("load") - - - sql_stmts = delimited_list( ( - query - | (insert | update | delete | load) - | (create_table | create_view | create_cache | create_index) - | (drop_table | drop_view | drop_index) - )("stmts"), ";") - - other_stmt = ( - inline_kblock - | udf - ) ("stmts") - - stmts = ZeroOrMore( - sql_stmts - |other_stmt - | keyword(";").suppress() # empty stmt - ) - - return stmts.finalize() +# encoding: utf-8 +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this file, +# You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Contact: Kyle Lahnakoski (kyle@lahnakoski.com) +# + +from sre_parse import WHITESPACE +from mo_parsing.helpers import restOfLine +from mo_parsing.infix import delimited_list +from mo_parsing.whitespaces import NO_WHITESPACE, Whitespace + +from aquery_parser.keywords import * +from aquery_parser.types import get_column_type, time_functions +from aquery_parser.utils import * +from aquery_parser.windows import window + +digit = Char("0123456789") +simple_ident = ( + Char(FIRST_IDENT_CHAR) + + Char(IDENT_CHAR)[...] # let's not support dashes in var_names. +) +simple_ident = Regex(simple_ident.__regex__()[1]) + + +def common_parser(): + combined_ident = Combine(delimited_list( + ansi_ident | mysql_backtick_ident | simple_ident, separator=".", combine=True, + )).set_parser_name("identifier") + + return parser(ansi_string | mysql_doublequote_string, combined_ident) + + +def mysql_parser(): + mysql_string = ansi_string | mysql_doublequote_string + mysql_ident = Combine(delimited_list( + mysql_backtick_ident | sqlserver_ident | simple_ident, + separator=".", + combine=True, + )).set_parser_name("mysql identifier") + + return parser(mysql_string, mysql_ident) + + +def sqlserver_parser(): + combined_ident = Combine(delimited_list( + ansi_ident + | mysql_backtick_ident + | sqlserver_ident + | Word(FIRST_IDENT_CHAR, IDENT_CHAR), + separator=".", + combine=True, + )).set_parser_name("identifier") + + return parser(ansi_string, combined_ident, sqlserver=True) + + +def parser(literal_string, ident, sqlserver=False): + with Whitespace() as engine: + engine.add_ignore(Literal("--") + restOfLine) + engine.add_ignore(Literal("#") + restOfLine) + engine.add_ignore(Literal("/*") + SkipTo("*/", include=True)) + + var_name = ~RESERVED + ident + + inline_kblock = (L_INLINE + SkipTo(R_INLINE, include=True))("c") + # EXPRESSIONS + expr = Forward() + column_type, column_definition, column_def_references = get_column_type( + expr, var_name, literal_string + ) + + # CASE + case = ( + CASE + + Group(ZeroOrMore( + (WHEN + expr("when") + THEN + expr("then")) / to_when_call + ))("case") + + Optional(ELSE + expr("else")) + + END + ) / to_case_call + + switch = ( + CASE + + expr("value") + + Group(ZeroOrMore( + (WHEN + expr("when") + THEN + expr("then")) / to_when_call + ))("case") + + Optional(ELSE + expr("else")) + + END + ) / to_switch_call + + cast = ( + Group(CAST("op") + LB + expr("params") + AS + column_type("params") + RB) + / to_json_call + ) + + trim = ( + Group( + keyword("trim").suppress() + + LB + + Optional( + (keyword("both") | keyword("trailing") | keyword("leading")) + / (lambda t: t[0].lower()) + )("direction") + + ( + assign("from", expr) + | expr("chars") + Optional(assign("from", expr)) + ) + + RB + ).set_parser_name("trim") + / to_trim_call + ) + + _standard_time_intervals = MatchFirst([ + keyword(d) / (lambda t: durations[t[0].lower()]) for d in durations.keys() + ]).set_parser_name("duration")("params") + + duration = ( + real_num | int_num | literal_string + )("params") + _standard_time_intervals + + interval = ( + INTERVAL + ("'" + delimited_list(duration) + "'" | duration) + ) / to_interval_call + + timestamp = ( + time_functions("op") + + ( + literal_string("params") + | MatchFirst([ + keyword(t) / (lambda t: t.lower()) for t in times + ])("params") + ) + ) / to_json_call + + extract = ( + keyword("extract")("op") + + LB + + (_standard_time_intervals | expr("params")) + + FROM + + expr("params") + + RB + ) / to_json_call + + alias = Optional(( + ( + AS + + (var_name("name") + Optional(LB + delimited_list(ident("col")) + RB)) + | ( + var_name("name") + + Optional( + (LB + delimited_list(ident("col")) + RB) + | (AS + delimited_list(var_name("col"))) + ) + ) + ) + / to_alias + )("name")) + + named_column = Group(Group(expr)("value") + alias) + + stack = ( + keyword("stack")("op") + + LB + + int_num("width") + + "," + + delimited_list(expr)("args") + + RB + ) / to_stack + + # ARRAY[foo], + # ARRAY < STRING > [foo, bar], INVALID + # ARRAY < STRING > [foo, bar], + create_array = ( + keyword("array")("op") + + Optional(LT.suppress() + column_type("type") + GT.suppress()) + + ( + LB + delimited_list(Group(expr))("args") + RB + | (Literal("[") + delimited_list(Group(expr))("args") + Literal("]")) + ) + ) + + if not sqlserver: + # SQL SERVER DOES NOT SUPPORT [] FOR ARRAY CONSTRUCTION (USED FOR IDENTIFIERS) + create_array = ( + Literal("[") + delimited_list(Group(expr))("args") + Literal("]") + | create_array + ) + + create_array = create_array / to_array + + create_map = ( + keyword("map") + + Literal("[") + + expr("keys") + + "," + + expr("values") + + Literal("]") + ) / to_map + + create_struct = ( + keyword("struct")("op") + + Optional( + LT.suppress() + delimited_list(column_type)("types") + GT.suppress() + ) + + LB + + delimited_list(Group((expr("value") + alias) / to_select_call))("args") + + RB + ).set_parser_name("create struct") / to_struct + + distinct = ( + DISTINCT("op") + delimited_list(named_column)("params") + ) / to_json_call + + query = Forward().set_parser_name("query") + + call_function = ( + ident("op") + + LB + + Optional(Group(query) | delimited_list(Group(expr)))("params") + + Optional( + (keyword("respect") | keyword("ignore"))("nulls") + + keyword("nulls").suppress() + ) + + RB + ).set_parser_name("call function") / to_json_call + + with NO_WHITESPACE: + + def scale(tokens): + return {"mul": [tokens[0], tokens[1]]} + + scale_function = ((real_num | int_num) + call_function) / scale + scale_ident = ((real_num | int_num) + ident) / scale + + + + compound = ( + NULL + | TRUE + | FALSE + | NOCASE + | interval + | timestamp + | extract + | case + | switch + | cast + | distinct + | trim + | stack + | create_array + | create_map + | create_struct + | (LB + Group(query) + RB) + | (LB + Group(delimited_list(expr)) / to_tuple_call + RB) + | literal_string.set_parser_name("string") + | hex_num.set_parser_name("hex") + | scale_function + | scale_ident + | real_num.set_parser_name("float") + | int_num.set_parser_name("int") + | call_function + | Combine(var_name + Optional(".*")) + ) + + sort_column = ( + expr("value").set_parser_name("sort1") + + Optional(DESC("sort") | ASC("sort")) + + Optional(assign("nulls", keyword("first") | keyword("last"))) + ) + + window_clause, over_clause = window(expr, var_name, sort_column) + + expr << ( + ( + Literal("*") + | infix_notation( + compound, + [ + ( + Literal("[").suppress() + expr + Literal("]").suppress(), + 1, + LEFT_ASSOC, + to_offset, + ), + ( + Literal(".").suppress() + simple_ident, + 1, + LEFT_ASSOC, + to_offset, + ), + (window_clause, 1, LEFT_ASSOC, to_window_mod), + ( + assign("filter", LB + WHERE + expr + RB), + 1, + LEFT_ASSOC, + to_window_mod, + ), + ] + + [ + ( + o, + 1 if o in unary_ops else (3 if isinstance(o, tuple) else 2), + unary_ops.get(o, LEFT_ASSOC), + to_lambda if o is LAMBDA else to_json_operator, + ) + for o in KNOWN_OPS + ], + ) + )("value").set_parser_name("expression") + ) + + select_column = ( + Group( + expr("value") + alias | Literal("*")("value") + ).set_parser_name("column") + / to_select_call + ) + + table_source = Forward() + + join = ( + Group(joins)("op") + + table_source("join") + + Optional((ON + expr("on")) | (USING + expr("using"))) + | ( + Group(WINDOW)("op") + + Group(var_name("name") + AS + over_clause("value"))("join") + ) + ) / to_join_call + + + definable_name = Forward() + dindex = definable_name("l") + LSB + expr("idx") + RSB + definable_name << var_name | dindex + + # lname = Forward() + # ptr = (lname("l") + LAMBDA + var_name("r")) + # member = (lname("l") + DOT + var_name("r")) + # idx = (expr | COLON) + # index = (lname("l") + LSB + expr("lidx") + "," + idx("ridx") + RSB) + # lname << var_name | ptr | member | index + + assignment = expr("var") + (FASSIGN|PASSIGN|MASSIGN|MULASSIGN|DASSIGN)("op") + expr("expr") + declaration = definable_name("var") + Optional(Suppress(FASSIGN) + expr("expr")) + fassign = Group(assignment + Suppress(";"))("assignment") + static_decl = Group(STATIC + delimited_list(declaration))("static_decl") + stmt = Forward() + elifstmt = Group(ELIF + LB + expr("cond") + RB + stmt)("elif") + elsestmt = Group(ELSE + stmt)("else") + ifstmt = Group(IF + LB + expr("cond") + RB + stmt + + ZeroOrMore(elifstmt) + Optional(elsestmt))("if") + forstmt = Group(FOR + LB + ( delimited_list(assignment)("defs") + + Suppress(";") + expr("cond") + + Suppress(";") + delimited_list(assignment)("tail")) + + RB + stmt)("for") + block = Forward() + stmt << (fassign|ifstmt|forstmt|block|Suppress(";")) + stmts = (ZeroOrMore(stmt("stmt"), Whitespace())) + block << (LBRACE + Optional(stmts) + RBRACE)("code_block") + fbody = (Optional(static_decl) + Optional(stmts) + expr("ret")) + + udf = ( + Optional(AGGREGATION("Agg")) + + FUNCTION + + var_name("fname") + + LB + + Optional(delimited_list(var_name)("params")) + + RB + + LBRACE + + fbody + + RBRACE + )("udf") + + selection = ( + (SELECT + DISTINCT + ON + LB) + + delimited_list(select_column)("distinct_on") + + RB + + delimited_list(select_column)("select") + | SELECT + DISTINCT + delimited_list(select_column)("select_distinct") + | ( + SELECT + + Optional( + TOP + + expr("value") + + Optional(keyword("percent"))("percent") + + Optional(WITH + keyword("ties"))("ties") + )("top") + / to_top_clause + + delimited_list(select_column)("select") + ) + ) + + row = (LB + delimited_list(Group(expr)) + RB) / to_row + values = VALUES + delimited_list(row) / to_values + + unordered_sql = Group( + values + | selection + + Optional(INTO + table_source("into")) + + Optional( + (FROM + delimited_list(table_source) + ZeroOrMore(join))("from") + + Optional(WHERE + expr("where")) + + Optional(GROUP_BY + delimited_list(Group(named_column))("groupby")) + + Optional(HAVING + expr("having")) + ) + ).set_parser_name("unordered sql") + + with NO_WHITESPACE: + + def mult(tokens): + amount = tokens["bytes"] + scale = tokens["scale"].lower() + return { + "bytes": amount + * {"b": 1, "k": 1_000, "m": 1_000_000, "g": 1_000_000_000}[scale] + } + + ts_bytes = ( + (real_num | int_num)("bytes") + Char("bBkKmMgG")("scale") + ) / mult + + tablesample = assign( + "tablesample", + LB + + ( + ( + keyword("bucket")("op") + + int_num("params") + + keyword("out of") + + int_num("params") + + Optional(ON + expr("on")) + ) + / to_json_call + | (real_num | int_num)("percent") + keyword("percent") + | int_num("rows") + keyword("rows") + | ts_bytes + ) + + RB, + ) + + assumption = Group((ASC|DESC) ("sort") + var_name("value")) + assumptions = (ASSUMING + Group(delimited_list(assumption))("assumptions")) + + table_source << Group( + ((LB + query + RB) | stack | call_function | var_name)("value") + + Optional(assumptions) + + Optional(flag("with ordinality")) + + Optional(tablesample) + + alias + ).set_parser_name("table_source") / to_table + + rows = Optional(keyword("row") | keyword("rows")) + limit = ( + Optional(assign("offset", expr) + rows) + & Optional( + FETCH + + Optional(keyword("first") | keyword("next")) + + expr("fetch") + + rows + + Optional(keyword("only")) + ) + & Optional(assign("limit", expr)) + ) + + outfile = Optional( + ( + INTO + + keyword("outfile").suppress() + + literal_string ("loc") + + Optional ( + keyword("fields") + + keyword("terminated") + + keyword("by") + + literal_string ("term") + ) + )("outfile") + ) + ordered_sql = ( + ( + (unordered_sql | (LB + query + RB)) + + ZeroOrMore( + Group( + (UNION | INTERSECT | EXCEPT | MINUS) + Optional(ALL | DISTINCT) + )("op") + + (unordered_sql | (LB + query + RB)) + ) + )("union") + + Optional(ORDER_BY + delimited_list(Group(sort_column))("orderby")) + + limit + + outfile + ).set_parser_name("ordered sql") / to_union_call + + with_expr = delimited_list(Group( + ( + (var_name("name") + Optional(LB + delimited_list(ident("col")) + RB)) + / to_alias + )("name") + + (AS + LB + (query | expr)("value") + RB) + )) + + query << ( + Optional(assign("with recursive", with_expr) | assign("with", with_expr)) + + Group(ordered_sql)("query") + ) / to_query + + ##################################################################### + # DML STATEMENTS + ##################################################################### + + # MySQL's index_type := Using + ( "BTREE" | "HASH" ) + index_type = Optional(assign("using", ident("index_type"))) + + index_column_names = LB + delimited_list(var_name("columns")) + RB + + column_def_delete = assign( + "on delete", + (keyword("cascade") | keyword("set null") | keyword("set default")), + ) + + table_def_foreign_key = FOREIGN_KEY + Optional( + Optional(var_name("index_name")) + + index_column_names + + column_def_references + + Optional(column_def_delete) + ) + + index_options = ZeroOrMore(var_name)("table_constraint_options") + + table_constraint_definition = Optional(CONSTRAINT + var_name("name")) + ( + assign("primary key", index_type + index_column_names + index_options) + | ( + Optional(flag("unique")) + + Optional(INDEX | KEY) + + Optional(var_name("name")) + + index_type + + index_column_names + + index_options + )("index") + | assign("check", LB + expr + RB) + | table_def_foreign_key("foreign_key") + ) + + table_element = ( + column_definition("columns") | table_constraint_definition("constraint") + ) + + create_table = ( + keyword("create") + + Optional(keyword("or") + flag("replace")) + + Optional(flag("temporary")) + + TABLE + + Optional((keyword("if not exists") / (lambda: False))("replace")) + + var_name("name") + + Optional(LB + delimited_list(table_element) + RB) + + ZeroOrMore( + assign("engine", EQ + var_name) + | assign("collate", EQ + var_name) + | assign("auto_increment", EQ + int_num) + | assign("comment", EQ + literal_string) + | assign("default character set", EQ + var_name) + | assign("default charset", EQ + var_name) + ) + + Optional(AS.suppress() + infix_notation(query, [])("query")) + )("create_table") + + create_view = ( + keyword("create") + + Optional(keyword("or") + flag("replace")) + + Optional(flag("temporary")) + + VIEW.suppress() + + Optional((keyword("if not exists") / (lambda: False))("replace")) + + var_name("name") + + AS + + query("query") + )("create_view") + + # CREATE INDEX a ON u USING btree (e); + create_index = ( + keyword("create index") + + Optional(keyword("or") + flag("replace"))(INDEX | KEY) + + Optional((keyword("if not exists") / (lambda: False))("replace")) + + var_name("name") + + ON + + var_name("table") + + index_type + + index_column_names + + index_options + )("create index") + + cache_options = Optional(( + keyword("options").suppress() + + LB + + Dict(delimited_list(Group( + literal_string / (lambda tokens: tokens[0]["literal"]) + + Optional(EQ) + + var_name + ))) + + RB + )("options")) + + create_cache = ( + keyword("cache").suppress() + + Optional(flag("lazy")) + + TABLE + + var_name("name") + + cache_options + + Optional(AS + query("query")) + )("cache") + + drop_table = ( + keyword("drop table") + Optional(flag("if exists")) + var_name("table") + )("drop") + + drop_view = ( + keyword("drop view") + Optional(flag("if exists")) + var_name("view") + )("drop") + + drop_index = ( + keyword("drop index") + Optional(flag("if exists")) + var_name("index") + )("drop") + + insert = ( + keyword("insert").suppress() + + ( + flag("overwrite") + keyword("table").suppress() + | keyword("into").suppress() + Optional(keyword("table").suppress()) + ) + + var_name("table") + + Optional(LB + delimited_list(var_name)("columns") + RB) + + Optional(flag("if exists")) + + (values | query)("query") + ) / to_insert_call + + update = ( + keyword("update")("op") + + var_name("params") + + assign("set", Dict(delimited_list(Group(var_name + EQ + expr)))) + + Optional(assign("where", expr)) + ) / to_json_call + + delete = ( + keyword("delete")("op") + + keyword("from").suppress() + + var_name("params") + + Optional(assign("where", expr)) + ) / to_json_call + + load_data = ( + keyword("data") ("file_type") + + keyword("infile")("loc") + + literal_string ("file") + + INTO + + keyword("table").suppress() + + var_name ("table") + + Optional( + keyword("fields").suppress() + + keyword("terminated").suppress() + + keyword("by").suppress() + + literal_string ("term") + ) + ) + + module_func_def = ( + var_name("fname") + + LB + + delimited_list( + ( + var_name("arg") + + COLON + + var_name("type") + )("vars") + ) + + RB + + LAMBDA + + var_name("ret_type") + ) + + load_module = ( + keyword("module") ("file_type") + + FROM + + literal_string ("file") + + Optional( + keyword("FUNCTIONS").suppress() + + LB + + module_func_def("funcs") + + ZeroOrMore(Suppress(',') + + module_func_def("funcs"), + Whitespace() + ) + + RB + ) + ) + + load = ( + keyword("load") + + (load_data | load_module) + ) ("load") + + + sql_stmts = delimited_list( ( + query + | (insert | update | delete | load) + | (create_table | create_view | create_cache | create_index) + | (drop_table | drop_view | drop_index) + )("stmts"), ";") + + other_stmt = ( + inline_kblock + | udf + ) ("stmts") + + stmts = ZeroOrMore( + sql_stmts + |other_stmt + | keyword(";").suppress() # empty stmt + ) + + return stmts.finalize() diff --git a/aquery_parser/types.py b/aquery_parser/types.py index acdd428..24d642a 100644 --- a/aquery_parser/types.py +++ b/aquery_parser/types.py @@ -1,223 +1,223 @@ -# encoding: utf-8 -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this file, -# You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Contact: Kyle Lahnakoski (kyle@lahnakoski.com) -# - - -# KNOWN TYPES -from mo_parsing import Forward, Group, Optional, MatchFirst, Literal, ZeroOrMore, export -from mo_parsing.infix import delimited_list, RIGHT_ASSOC, LEFT_ASSOC - -from aquery_parser.keywords import ( - RB, - LB, - NEG, - NOT, - BINARY_NOT, - NULL, - EQ, - KNOWN_OPS, - LT, - GT, -) -from aquery_parser.utils import ( - keyword, - to_json_call, - int_num, - ansi_string, - ansi_ident, - assign, - flag, -) - -_size = Optional(LB + int_num("params") + RB) -_sizes = Optional(LB + delimited_list(int_num("params")) + RB) - -simple_types = Forward() - -BIGINT = Group(keyword("bigint")("op") + Optional(_size)+Optional(flag("unsigned"))) / to_json_call -BOOL = Group(keyword("bool")("op")) / to_json_call -BOOLEAN = Group(keyword("boolean")("op")) / to_json_call -DOUBLE = Group(keyword("double")("op")) / to_json_call -FLOAT64 = Group(keyword("float64")("op")) / to_json_call -FLOAT = Group(keyword("float")("op")) / to_json_call -GEOMETRY = Group(keyword("geometry")("op")) / to_json_call -INTEGER = Group(keyword("integer")("op")) / to_json_call -INT = (keyword("int")("op") + _size) / to_json_call -INT32 = Group(keyword("int32")("op")) / to_json_call -INT64 = Group(keyword("int64")("op")) / to_json_call -REAL = Group(keyword("real")("op")) / to_json_call -TEXT = Group(keyword("text")("op")) / to_json_call -SMALLINT = Group(keyword("smallint")("op")) / to_json_call -STRING = Group(keyword("string")("op")) / to_json_call - -BLOB = (keyword("blob")("op") + _size) / to_json_call -BYTES = (keyword("bytes")("op") + _size) / to_json_call -CHAR = (keyword("char")("op") + _size) / to_json_call -NCHAR = (keyword("nchar")("op") + _size) / to_json_call -VARCHAR = (keyword("varchar")("op") + _size) / to_json_call -VARCHAR2 = (keyword("varchar2")("op") + _size) / to_json_call -VARBINARY = (keyword("varbinary")("op") + _size) / to_json_call -TINYINT = (keyword("tinyint")("op") + _size) / to_json_call -UUID = Group(keyword("uuid")("op")) / to_json_call - -DECIMAL = (keyword("decimal")("op") + _sizes) / to_json_call -DOUBLE_PRECISION = ( - Group((keyword("double precision") / (lambda: "double_precision"))("op")) - / to_json_call -) -NUMERIC = (keyword("numeric")("op") + _sizes) / to_json_call -NUMBER = (keyword("number")("op") + _sizes) / to_json_call - -MAP_TYPE = ( - keyword("map")("op") + LB + delimited_list(simple_types("params")) + RB -) / to_json_call -ARRAY_TYPE = (keyword("array")("op") + LB + simple_types("params") + RB) / to_json_call - -DATE = keyword("date") -DATETIME = keyword("datetime") -DATETIME_W_TIMEZONE = keyword("datetime with time zone") -TIME = keyword("time") -TIMESTAMP = keyword("timestamp") -TIMESTAMP_W_TIMEZONE = keyword("timestamp with time zone") -TIMESTAMPTZ = keyword("timestamptz") -TIMETZ = keyword("timetz") - -time_functions = DATE | DATETIME | TIME | TIMESTAMP | TIMESTAMPTZ | TIMETZ - -# KNOWNN TIME TYPES -_format = Optional((ansi_string | ansi_ident)("params")) - -DATE_TYPE = (DATE("op") + _format) / to_json_call -DATETIME_TYPE = (DATETIME("op") + _format) / to_json_call -DATETIME_W_TIMEZONE_TYPE = (DATETIME_W_TIMEZONE("op") + _format) / to_json_call -TIME_TYPE = (TIME("op") + _format) / to_json_call -TIMESTAMP_TYPE = (TIMESTAMP("op") + _format) / to_json_call -TIMESTAMP_W_TIMEZONE_TYPE = (TIMESTAMP_W_TIMEZONE("op") + _format) / to_json_call -TIMESTAMPTZ_TYPE = (TIMESTAMPTZ("op") + _format) / to_json_call -TIMETZ_TYPE = (TIMETZ("op") + _format) / to_json_call - -simple_types << MatchFirst([ - ARRAY_TYPE, - BIGINT, - BOOL, - BOOLEAN, - BLOB, - BYTES, - CHAR, - DATE_TYPE, - DATETIME_W_TIMEZONE_TYPE, - DATETIME_TYPE, - DECIMAL, - DOUBLE_PRECISION, - DOUBLE, - FLOAT64, - FLOAT, - GEOMETRY, - MAP_TYPE, - INTEGER, - INT, - INT32, - INT64, - NCHAR, - NUMBER, - NUMERIC, - REAL, - TEXT, - SMALLINT, - STRING, - TIME_TYPE, - TIMESTAMP_W_TIMEZONE_TYPE, - TIMESTAMP_TYPE, - TIMESTAMPTZ_TYPE, - TIMETZ_TYPE, - TINYINT, - UUID, - VARCHAR, - VARCHAR2, - VARBINARY, -]) - -CASTING = (Literal("::").suppress() + simple_types("params")).set_parser_name("cast") -KNOWN_OPS.insert(0, CASTING) - -unary_ops = { - NEG: RIGHT_ASSOC, - NOT: RIGHT_ASSOC, - BINARY_NOT: RIGHT_ASSOC, - CASTING: LEFT_ASSOC, -} - - -def get_column_type(expr, var_name, literal_string): - column_definition = Forward() - column_type = Forward().set_parser_name("column type") - - struct_type = ( - keyword("struct")("op") - + LT.suppress() - + Group(delimited_list(column_definition))("params") - + GT.suppress() - ) / to_json_call - - row_type = ( - keyword("row")("op") - + LB - + Group(delimited_list(column_definition))("params") - + RB - ) / to_json_call - - array_type = ( - keyword("array")("op") - + ( - ( - LT.suppress() - + Group(delimited_list(column_type))("params") - + GT.suppress() - ) - | (LB + Group(delimited_list(column_type))("params") + RB) - ) - ) / to_json_call - - column_type << (struct_type | row_type | array_type | simple_types) - - column_def_identity = ( - assign( - "generated", - (keyword("always") | keyword("by default") / (lambda: "by_default")), - ) - + keyword("as identity").suppress() - + Optional(assign("start with", int_num)) - + Optional(assign("increment by", int_num)) - ) - - column_def_references = assign( - "references", var_name("table") + LB + delimited_list(var_name)("columns") + RB, - ) - - column_options = ZeroOrMore( - ((NOT + NULL) / (lambda: False))("nullable") - | (NULL / (lambda t: True))("nullable") - | flag("unique") - | flag("auto_increment") - | assign("comment", literal_string) - | assign("collate", Optional(EQ) + var_name) - | flag("primary key") - | column_def_identity("identity") - | column_def_references - | assign("check", LB + expr + RB) - | assign("default", expr) - ).set_parser_name("column_options") - - column_definition << Group( - var_name("name") + (column_type | var_name)("type") + column_options - ).set_parser_name("column_definition") - - return column_type, column_definition, column_def_references - - -export("aquery_parser.utils", unary_ops) +# encoding: utf-8 +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this file, +# You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Contact: Kyle Lahnakoski (kyle@lahnakoski.com) +# + + +# KNOWN TYPES +from mo_parsing import Forward, Group, Optional, MatchFirst, Literal, ZeroOrMore, export +from mo_parsing.infix import delimited_list, RIGHT_ASSOC, LEFT_ASSOC + +from aquery_parser.keywords import ( + RB, + LB, + NEG, + NOT, + BINARY_NOT, + NULL, + EQ, + KNOWN_OPS, + LT, + GT, +) +from aquery_parser.utils import ( + keyword, + to_json_call, + int_num, + ansi_string, + ansi_ident, + assign, + flag, +) + +_size = Optional(LB + int_num("params") + RB) +_sizes = Optional(LB + delimited_list(int_num("params")) + RB) + +simple_types = Forward() + +BIGINT = Group(keyword("bigint")("op") + Optional(_size)+Optional(flag("unsigned"))) / to_json_call +BOOL = Group(keyword("bool")("op")) / to_json_call +BOOLEAN = Group(keyword("boolean")("op")) / to_json_call +DOUBLE = Group(keyword("double")("op")) / to_json_call +FLOAT64 = Group(keyword("float64")("op")) / to_json_call +FLOAT = Group(keyword("float")("op")) / to_json_call +GEOMETRY = Group(keyword("geometry")("op")) / to_json_call +INTEGER = Group(keyword("integer")("op")) / to_json_call +INT = (keyword("int")("op") + _size) / to_json_call +INT32 = Group(keyword("int32")("op")) / to_json_call +INT64 = Group(keyword("int64")("op")) / to_json_call +REAL = Group(keyword("real")("op")) / to_json_call +TEXT = Group(keyword("text")("op")) / to_json_call +SMALLINT = Group(keyword("smallint")("op")) / to_json_call +STRING = Group(keyword("string")("op")) / to_json_call + +BLOB = (keyword("blob")("op") + _size) / to_json_call +BYTES = (keyword("bytes")("op") + _size) / to_json_call +CHAR = (keyword("char")("op") + _size) / to_json_call +NCHAR = (keyword("nchar")("op") + _size) / to_json_call +VARCHAR = (keyword("varchar")("op") + _size) / to_json_call +VARCHAR2 = (keyword("varchar2")("op") + _size) / to_json_call +VARBINARY = (keyword("varbinary")("op") + _size) / to_json_call +TINYINT = (keyword("tinyint")("op") + _size) / to_json_call +UUID = Group(keyword("uuid")("op")) / to_json_call + +DECIMAL = (keyword("decimal")("op") + _sizes) / to_json_call +DOUBLE_PRECISION = ( + Group((keyword("double precision") / (lambda: "double_precision"))("op")) + / to_json_call +) +NUMERIC = (keyword("numeric")("op") + _sizes) / to_json_call +NUMBER = (keyword("number")("op") + _sizes) / to_json_call + +MAP_TYPE = ( + keyword("map")("op") + LB + delimited_list(simple_types("params")) + RB +) / to_json_call +ARRAY_TYPE = (keyword("array")("op") + LB + simple_types("params") + RB) / to_json_call + +DATE = keyword("date") +DATETIME = keyword("datetime") +DATETIME_W_TIMEZONE = keyword("datetime with time zone") +TIME = keyword("time") +TIMESTAMP = keyword("timestamp") +TIMESTAMP_W_TIMEZONE = keyword("timestamp with time zone") +TIMESTAMPTZ = keyword("timestamptz") +TIMETZ = keyword("timetz") + +time_functions = DATE | DATETIME | TIME | TIMESTAMP | TIMESTAMPTZ | TIMETZ + +# KNOWNN TIME TYPES +_format = Optional((ansi_string | ansi_ident)("params")) + +DATE_TYPE = (DATE("op") + _format) / to_json_call +DATETIME_TYPE = (DATETIME("op") + _format) / to_json_call +DATETIME_W_TIMEZONE_TYPE = (DATETIME_W_TIMEZONE("op") + _format) / to_json_call +TIME_TYPE = (TIME("op") + _format) / to_json_call +TIMESTAMP_TYPE = (TIMESTAMP("op") + _format) / to_json_call +TIMESTAMP_W_TIMEZONE_TYPE = (TIMESTAMP_W_TIMEZONE("op") + _format) / to_json_call +TIMESTAMPTZ_TYPE = (TIMESTAMPTZ("op") + _format) / to_json_call +TIMETZ_TYPE = (TIMETZ("op") + _format) / to_json_call + +simple_types << MatchFirst([ + ARRAY_TYPE, + BIGINT, + BOOL, + BOOLEAN, + BLOB, + BYTES, + CHAR, + DATE_TYPE, + DATETIME_W_TIMEZONE_TYPE, + DATETIME_TYPE, + DECIMAL, + DOUBLE_PRECISION, + DOUBLE, + FLOAT64, + FLOAT, + GEOMETRY, + MAP_TYPE, + INTEGER, + INT, + INT32, + INT64, + NCHAR, + NUMBER, + NUMERIC, + REAL, + TEXT, + SMALLINT, + STRING, + TIME_TYPE, + TIMESTAMP_W_TIMEZONE_TYPE, + TIMESTAMP_TYPE, + TIMESTAMPTZ_TYPE, + TIMETZ_TYPE, + TINYINT, + UUID, + VARCHAR, + VARCHAR2, + VARBINARY, +]) + +CASTING = (Literal("::").suppress() + simple_types("params")).set_parser_name("cast") +KNOWN_OPS.insert(0, CASTING) + +unary_ops = { + NEG: RIGHT_ASSOC, + NOT: RIGHT_ASSOC, + BINARY_NOT: RIGHT_ASSOC, + CASTING: LEFT_ASSOC, +} + + +def get_column_type(expr, var_name, literal_string): + column_definition = Forward() + column_type = Forward().set_parser_name("column type") + + struct_type = ( + keyword("struct")("op") + + LT.suppress() + + Group(delimited_list(column_definition))("params") + + GT.suppress() + ) / to_json_call + + row_type = ( + keyword("row")("op") + + LB + + Group(delimited_list(column_definition))("params") + + RB + ) / to_json_call + + array_type = ( + keyword("array")("op") + + ( + ( + LT.suppress() + + Group(delimited_list(column_type))("params") + + GT.suppress() + ) + | (LB + Group(delimited_list(column_type))("params") + RB) + ) + ) / to_json_call + + column_type << (struct_type | row_type | array_type | simple_types) + + column_def_identity = ( + assign( + "generated", + (keyword("always") | keyword("by default") / (lambda: "by_default")), + ) + + keyword("as identity").suppress() + + Optional(assign("start with", int_num)) + + Optional(assign("increment by", int_num)) + ) + + column_def_references = assign( + "references", var_name("table") + LB + delimited_list(var_name)("columns") + RB, + ) + + column_options = ZeroOrMore( + ((NOT + NULL) / (lambda: False))("nullable") + | (NULL / (lambda t: True))("nullable") + | flag("unique") + | flag("auto_increment") + | assign("comment", literal_string) + | assign("collate", Optional(EQ) + var_name) + | flag("primary key") + | column_def_identity("identity") + | column_def_references + | assign("check", LB + expr + RB) + | assign("default", expr) + ).set_parser_name("column_options") + + column_definition << Group( + var_name("name") + (column_type | var_name)("type") + column_options + ).set_parser_name("column_definition") + + return column_type, column_definition, column_def_references + + +export("aquery_parser.utils", unary_ops) diff --git a/aquery_parser/utils.py b/aquery_parser/utils.py index 6aeaec5..6c161db 100644 --- a/aquery_parser/utils.py +++ b/aquery_parser/utils.py @@ -1,618 +1,618 @@ -# encoding: utf-8 -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this file, -# You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Contact: Kyle Lahnakoski (kyle@lahnakoski.com) -# - -import ast - -from mo_dots import is_data, is_null, Data, from_data -from mo_future import text, number_types, binary_type, flatten -from mo_imports import expect -from mo_parsing import * -from mo_parsing.utils import is_number, listwrap - -unary_ops = expect("unary_ops") - - -class Call(object): - __slots__ = ["op", "args", "kwargs"] - - def __init__(self, op, args, kwargs): - self.op = op - self.args = args - self.kwargs = kwargs - - -IDENT_CHAR = Regex("[@_$0-9A-Za-zÀ-ÖØ-öø-ƿ]").expr.parser_config.include -FIRST_IDENT_CHAR = "".join(set(IDENT_CHAR) - set("0123456789")) -SQL_NULL = Call("null", [], {}) - -null_locations = [] - - -def keyword(keywords): - return And([ - Keyword(k, caseless=True) for k in keywords.split(" ") - ]).set_parser_name(keywords) / (lambda: keywords.replace(" ", "_")) - - -def flag(keywords): - """ - RETURN {keywords: True} - """ - return (keyword(keywords) / (lambda: True))(keywords.replace(" ", "_")) - - -def assign(key: str, value: ParserElement): - return keyword(key).suppress() + value(key.replace(" ", "_")) - - -def simple_op(op, args, kwargs): - if args is None: - kwargs[op] = {} - else: - kwargs[op] = args - return kwargs - - -def normal_op(op, args, kwargs): - output = Data(op=op) - args = listwrap(args) - if args and (not isinstance(args[0], dict) or args[0]): - output.args = args - if kwargs: - output.kwargs = kwargs - return from_data(output) - - -scrub_op = simple_op - - -def scrub(result): - if result is SQL_NULL: - return SQL_NULL - elif result == None: - return None - elif isinstance(result, text): - return result - elif isinstance(result, binary_type): - return result.decode("utf8") - elif isinstance(result, number_types): - return result - elif isinstance(result, Call): - kwargs = scrub(result.kwargs) - args = scrub(result.args) - if args is SQL_NULL: - null_locations.append((kwargs, result.op)) - return scrub_op(result.op, args, kwargs) - elif isinstance(result, dict) and not result: - return result - elif isinstance(result, list): - output = [rr for r in result for rr in [scrub(r)]] - - if not output: - return None - elif len(output) == 1: - return output[0] - else: - for i, v in enumerate(output): - if v is SQL_NULL: - null_locations.append((output, i)) - return output - else: - # ATTEMPT A DICT INTERPRETATION - try: - kv_pairs = list(result.items()) - except Exception as c: - print(c) - output = {k: vv for k, v in kv_pairs for vv in [scrub(v)] if not is_null(vv)} - if isinstance(result, dict) or output: - for k, v in output.items(): - if v is SQL_NULL: - null_locations.append((output, k)) - return output - return scrub(list(result)) - - -def _chunk(values, size): - acc = [] - for v in values: - acc.append(v) - if len(acc) == size: - yield acc - acc = [] - if acc: - yield acc - - -def to_lambda(tokens): - params, op, expr = list(tokens) - return Call("lambda", [expr], {"params": list(params)}) - - -def to_json_operator(tokens): - # ARRANGE INTO {op: params} FORMAT - length = len(tokens.tokens) - if length == 2: - if tokens.tokens[1].type.parser_name == "cast": - return Call("cast", list(tokens), {}) - # UNARY OPERATOR - op = tokens.tokens[0].type.parser_name - if op == "neg" and is_number(tokens[1]): - return -tokens[1] - return Call(op, [tokens[1]], {}) - elif length == 5: - # TRINARY OPERATOR - return Call( - tokens.tokens[1].type.parser_name, [tokens[0], tokens[2], tokens[4]], {} - ) - - op = tokens[1] - if not isinstance(op, text): - op = op.type.parser_name - op = binary_ops.get(op, op) - if op == "eq": - if tokens[2] is SQL_NULL: - return Call("missing", tokens[0], {}) - elif tokens[0] is SQL_NULL: - return Call("missing", tokens[2], {}) - elif op == "neq": - if tokens[2] is SQL_NULL: - return Call("exists", tokens[0], {}) - elif tokens[0] is SQL_NULL: - return Call("exists", tokens[2], {}) - elif op == "eq!": - if tokens[2] is SQL_NULL: - return Call("missing", tokens[0], {}) - elif tokens[0] is SQL_NULL: - return Call("missing", tokens[2], {}) - elif op == "ne!": - if tokens[2] is SQL_NULL: - return Call("exists", tokens[0], {}) - elif tokens[0] is SQL_NULL: - return Call("exists", tokens[2], {}) - elif op == "is": - if tokens[2] is SQL_NULL: - return Call("missing", tokens[0], {}) - else: - return Call("exists", tokens[0], {}) - elif op == "is_not": - if tokens[2] is SQL_NULL: - return Call("exists", tokens[0], {}) - else: - return Call("missing", tokens[0], {}) - - operands = [tokens[0], tokens[2]] - binary_op = Call(op, operands, {}) - - if op in {"add", "mul", "and", "or"}: - # ASSOCIATIVE OPERATORS - acc = [] - for operand in operands: - while isinstance(operand, ParseResults) and isinstance(operand.type, Group): - # PARENTHESES CAUSE EXTRA GROUP LAYERS - operand = operand[0] - if isinstance(operand, ParseResults) and isinstance( - operand.type, Forward - ): - operand = operand[0] - - if isinstance(operand, Call) and operand.op == op: - acc.extend(operand.args) - elif isinstance(operand, list): - acc.append(operand) - elif isinstance(operand, dict) and operand.get(op): - acc.extend(operand.get(op)) - else: - acc.append(operand) - binary_op = Call(op, acc, {}) - return binary_op - - -def to_offset(tokens): - expr, offset = tokens.tokens - return Call("get", [expr, offset], {}) - - -def to_window_mod(tokens): - expr, window = tokens.tokens - return Call("value", [expr], {**window}) - - -def to_tuple_call(tokens): - # IS THIS ONE VALUE IN (), OR MANY? - tokens = list(tokens) - if len(tokens) == 1: - return [tokens[0]] - if all(isinstance(r, number_types) for r in tokens): - return [tokens] - if all( - isinstance(r, number_types) or (is_data(r) and "literal" in r.keys()) - for r in tokens - ): - candidate = {"literal": [r["literal"] if is_data(r) else r for r in tokens]} - return candidate - - return [tokens] - - -binary_ops = { - "::": "cast", - "COLLATE": "collate", - "||": "concat", - "*": "mul", - "/": "div", - "%": "mod", - "+": "add", - "-": "sub", - "&": "binary_and", - "|": "binary_or", - "<": "lt", - "<=": "lte", - ">": "gt", - ">=": "gte", - "=": "eq", - "==": "eq", - "is distinct from": "eq!", # https://sparkbyexamples.com/apache-hive/hive-relational-arithmetic-logical-operators/ - "is_distinct_from": "eq!", - "is not distinct from": "ne!", - "is_not_distinct_from": "ne!", - "<=>": "eq!", # https://sparkbyexamples.com/apache-hive/hive-relational-arithmetic-logical-operators/ - "!=": "neq", - "<>": "neq", - "not in": "nin", - "in": "in", - "is_not": "neq", - "is": "eq", - "similar_to": "similar_to", - "like": "like", - "rlike": "rlike", - "not like": "not_like", - "not_like": "not_like", - "not rlike": "not_rlike", - "not_rlike": "not_rlike", - "not_simlilar_to": "not_similar_to", - "or": "or", - "and": "and", - "->": "lambda", - "union": "union", - "union_all": "union_all", - "union all": "union_all", - "except": "except", - "minus": "minus", - "intersect": "intersect", -} - -is_set_op = ("union", "union_all", "except", "minus", "intersect") - - -def to_trim_call(tokens): - frum = tokens["from"] - if not frum: - return Call("trim", [tokens["chars"]], {"direction": tokens["direction"]}) - return Call( - "trim", - [frum], - {"characters": tokens["chars"], "direction": tokens["direction"]}, - ) - - -def to_json_call(tokens): - # ARRANGE INTO {op: params} FORMAT - op = tokens["op"].lower() - op = binary_ops.get(op, op) - params = tokens["params"] - if isinstance(params, (dict, str, int, Call)): - args = [params] - else: - args = list(params) - - kwargs = {k: v for k, v in tokens.items() if k not in ("op", "params")} - - return ParseResults( - tokens.type, - tokens.start, - tokens.end, - [Call(op, args, kwargs)], - tokens.failures, - ) - - -def to_interval_call(tokens): - # ARRANGE INTO {interval: [amount, type]} FORMAT - params = tokens["params"] - if not params: - params = {} - if params.length() == 2: - return Call("interval", params, {}) - - return Call("add", [Call("interval", p, {}) for p in _chunk(params, size=2)], {}) - - -def to_case_call(tokens): - cases = list(tokens["case"]) - elze = tokens["else"] - if elze != None: - cases.append(elze) - return Call("case", cases, {}) - - -def to_switch_call(tokens): - # CONVERT TO CLASSIC CASE STATEMENT - value = tokens["value"] - acc = [] - for c in list(tokens["case"]): - acc.append(Call("when", [Call("eq", [value] + c.args, {})], c.kwargs)) - elze = tokens["else"] - if elze != None: - acc.append(elze) - return Call("case", acc, {}) - - -def to_when_call(tokens): - tok = tokens - return Call("when", [tok["when"]], {"then": tok["then"]}) - - -def to_join_call(tokens): - op = " ".join(tokens["op"]) - if tokens["join"]["name"]: - output = {op: { - "name": tokens["join"]["name"], - "value": tokens["join"]["value"], - }} - else: - output = {op: tokens["join"]} - - output["on"] = tokens["on"] - output["using"] = tokens["using"] - return output - - -def to_expression_call(tokens): - if set(tokens.keys()) & {"over", "within", "filter"}: - return - - return ParseResults( - tokens.type, - tokens.start, - tokens.end, - listwrap(tokens["value"]), - tokens.failures, - ) - - -def to_over(tokens): - if not tokens: - return {} - - -def to_alias(tokens): - cols = tokens["col"] - name = tokens["name"] - if cols: - return {name: cols} - return name - - -def to_top_clause(tokens): - value = tokens["value"] - if not value: - return None - - value = value.value() - if tokens["ties"]: - output = {} - output["ties"] = True - if tokens["percent"]: - output["percent"] = value - else: - output["value"] = value - return output - elif tokens["percent"]: - return {"percent": value} - else: - return [value] - - -def to_row(tokens): - columns = list(tokens) - if len(columns) > 1: - return {"select": [{"value": v[0]} for v in columns]} - else: - return {"select": {"value": columns[0]}} - - -def get_literal(value): - if isinstance(value, (int, float)): - return value - elif isinstance(value, Call): - return - elif value is SQL_NULL: - return value - elif "literal" in value: - return value["literal"] - - -def to_values(tokens): - rows = list(tokens) - if len(rows) > 1: - values = [ - [get_literal(s["value"]) for s in listwrap(row["select"])] for row in rows - ] - if all(flatten(values)): - return {"from": {"literal": values}} - return {"union_all": list(tokens)} - else: - return rows - - -def to_stack(tokens): - width = tokens["width"] - args = listwrap(tokens["args"]) - return Call("stack", args, {"width": width}) - - -def to_array(tokens): - types = list(tokens["type"]) - args = list(tokens["args"]) - output = Call("create_array", args, {}) - if types: - output = Call("cast", [output, Call("array", types, {})], {}) - return output - - -def to_map(tokens): - keys = tokens["keys"] - values = tokens["values"] - return Call("create_map", [keys, values], {}) - - -def to_struct(tokens): - types = list(tokens["types"]) - args = list(d for a in tokens["args"] for d in [a if a["name"] else a["value"]]) - - output = Call("create_struct", args, {}) - if types: - output = Call("cast", [output, Call("struct", types, {})], {}) - return output - - -def to_select_call(tokens): - expr = tokens["value"] - if expr == "*": - return ["*"] - try: - call = expr[0][0] - if call.op == "value": - return {"name": tokens["name"], "value": call.args, **call.kwargs} - except: - pass - - -def to_union_call(tokens): - unions = tokens["union"] - if isinstance(unions, dict): - return unions - elif unions.type.parser_name == "unordered sql": - output = {k: v for k, v in unions.items()} # REMOVE THE Group() - else: - unions = list(unions) - sources = [unions[i] for i in range(0, len(unions), 2)] - operators = ["_".join(unions[i]) for i in range(1, len(unions), 2)] - acc = sources[0] - last_union = None - for op, so in list(zip(operators, sources[1:])): - if op == last_union and "union" in op: - acc[op] = acc[op] + [so] - else: - acc = {op: [acc, so]} - last_union = op - - if not tokens["orderby"] and not tokens["offset"] and not tokens["limit"]: - return acc - else: - output = {"from": acc} - - output["orderby"] = tokens["orderby"] - output["limit"] = tokens["limit"] - output["offset"] = tokens["offset"] - output["fetch"] = tokens["fetch"] - output["outfile"] = tokens["outfile"] - return output - - -def to_insert_call(tokens): - options = { - k: v for k, v in tokens.items() if k not in ["columns", "table", "query"] - } - query = tokens["query"] - columns = tokens["columns"] - try: - values = query["from"]["literal"] - if values: - if columns: - data = [dict(zip(columns, row)) for row in values] - return Call("insert", [tokens["table"]], {"values": data, **options}) - else: - return Call("insert", [tokens["table"]], {"values": values, **options}) - except Exception: - pass - - return Call( - "insert", [tokens["table"]], {"columns": columns, "query": query, **options} - ) - - -def to_query(tokens): - output = tokens["query"][0] - try: - output["with"] = tokens["with"] - output["with_recursive"] = tokens["with_recursive"] - - return output - except Exception as cause: - return - - -def to_table(tokens): - output = dict(tokens) - if len(list(output.keys())) > 1: - return output - else: - return output["value"] - - -def unquote(tokens): - val = tokens[0] - if val.startswith("'") and val.endswith("'"): - val = "'" + val[1:-1].replace("''", "\\'") + "'" - elif val.startswith('"') and val.endswith('"'): - val = '"' + val[1:-1].replace('""', '\\"') + '"' - elif val.startswith("`") and val.endswith("`"): - val = '"' + val[1:-1].replace("``", "`").replace('"', '\\"') + '"' - elif val.startswith("[") and val.endswith("]"): - val = '"' + val[1:-1].replace("]]", "]").replace('"', '\\"') + '"' - elif val.startswith("+"): - val = val[1:] - un = ast.literal_eval(val).replace(".", "\\.") - return un - - -def to_string(tokens): - val = tokens[0] - val = "'" + val[1:-1].replace("''", "\\'") + "'" - return {"literal": ast.literal_eval(val)} - - -# NUMBERS -real_num = ( - Regex(r"[+-]?(\d+\.\d*|\.\d+)([eE][+-]?\d+)?").set_parser_name("float") - / (lambda t: float(t[0])) -) - - -def parse_int(tokens): - if "e" in tokens[0].lower(): - return int(float(tokens[0])) - else: - return int(tokens[0]) - - -int_num = Regex(r"[+-]?\d+([eE]\+?\d+)?").set_parser_name("int") / parse_int -hex_num = ( - Regex(r"0x[0-9a-fA-F]+").set_parser_name("hex") / (lambda t: {"hex": t[0][2:]}) -) - -# STRINGS -ansi_string = Regex(r"\'(\'\'|[^'])*\'") / to_string -mysql_doublequote_string = Regex(r'\"(\"\"|[^"])*\"') / to_string - -# BASIC IDENTIFIERS -ansi_ident = Regex(r'\"(\"\"|[^"])*\"') / unquote -mysql_backtick_ident = Regex(r"\`(\`\`|[^`])*\`") / unquote -sqlserver_ident = Regex(r"\[(\]\]|[^\]])*\]") / unquote +# encoding: utf-8 +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this file, +# You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Contact: Kyle Lahnakoski (kyle@lahnakoski.com) +# + +import ast + +from mo_dots import is_data, is_null, Data, from_data +from mo_future import text, number_types, binary_type, flatten +from mo_imports import expect +from mo_parsing import * +from mo_parsing.utils import is_number, listwrap + +unary_ops = expect("unary_ops") + + +class Call(object): + __slots__ = ["op", "args", "kwargs"] + + def __init__(self, op, args, kwargs): + self.op = op + self.args = args + self.kwargs = kwargs + + +IDENT_CHAR = Regex("[@_$0-9A-Za-zÀ-ÖØ-öø-ƿ]").expr.parser_config.include +FIRST_IDENT_CHAR = "".join(set(IDENT_CHAR) - set("0123456789")) +SQL_NULL = Call("null", [], {}) + +null_locations = [] + + +def keyword(keywords): + return And([ + Keyword(k, caseless=True) for k in keywords.split(" ") + ]).set_parser_name(keywords) / (lambda: keywords.replace(" ", "_")) + + +def flag(keywords): + """ + RETURN {keywords: True} + """ + return (keyword(keywords) / (lambda: True))(keywords.replace(" ", "_")) + + +def assign(key: str, value: ParserElement): + return keyword(key).suppress() + value(key.replace(" ", "_")) + + +def simple_op(op, args, kwargs): + if args is None: + kwargs[op] = {} + else: + kwargs[op] = args + return kwargs + + +def normal_op(op, args, kwargs): + output = Data(op=op) + args = listwrap(args) + if args and (not isinstance(args[0], dict) or args[0]): + output.args = args + if kwargs: + output.kwargs = kwargs + return from_data(output) + + +scrub_op = simple_op + + +def scrub(result): + if result is SQL_NULL: + return SQL_NULL + elif result == None: + return None + elif isinstance(result, text): + return result + elif isinstance(result, binary_type): + return result.decode("utf8") + elif isinstance(result, number_types): + return result + elif isinstance(result, Call): + kwargs = scrub(result.kwargs) + args = scrub(result.args) + if args is SQL_NULL: + null_locations.append((kwargs, result.op)) + return scrub_op(result.op, args, kwargs) + elif isinstance(result, dict) and not result: + return result + elif isinstance(result, list): + output = [rr for r in result for rr in [scrub(r)]] + + if not output: + return None + elif len(output) == 1: + return output[0] + else: + for i, v in enumerate(output): + if v is SQL_NULL: + null_locations.append((output, i)) + return output + else: + # ATTEMPT A DICT INTERPRETATION + try: + kv_pairs = list(result.items()) + except Exception as c: + print(c) + output = {k: vv for k, v in kv_pairs for vv in [scrub(v)] if not is_null(vv)} + if isinstance(result, dict) or output: + for k, v in output.items(): + if v is SQL_NULL: + null_locations.append((output, k)) + return output + return scrub(list(result)) + + +def _chunk(values, size): + acc = [] + for v in values: + acc.append(v) + if len(acc) == size: + yield acc + acc = [] + if acc: + yield acc + + +def to_lambda(tokens): + params, op, expr = list(tokens) + return Call("lambda", [expr], {"params": list(params)}) + + +def to_json_operator(tokens): + # ARRANGE INTO {op: params} FORMAT + length = len(tokens.tokens) + if length == 2: + if tokens.tokens[1].type.parser_name == "cast": + return Call("cast", list(tokens), {}) + # UNARY OPERATOR + op = tokens.tokens[0].type.parser_name + if op == "neg" and is_number(tokens[1]): + return -tokens[1] + return Call(op, [tokens[1]], {}) + elif length == 5: + # TRINARY OPERATOR + return Call( + tokens.tokens[1].type.parser_name, [tokens[0], tokens[2], tokens[4]], {} + ) + + op = tokens[1] + if not isinstance(op, text): + op = op.type.parser_name + op = binary_ops.get(op, op) + if op == "eq": + if tokens[2] is SQL_NULL: + return Call("missing", tokens[0], {}) + elif tokens[0] is SQL_NULL: + return Call("missing", tokens[2], {}) + elif op == "neq": + if tokens[2] is SQL_NULL: + return Call("exists", tokens[0], {}) + elif tokens[0] is SQL_NULL: + return Call("exists", tokens[2], {}) + elif op == "eq!": + if tokens[2] is SQL_NULL: + return Call("missing", tokens[0], {}) + elif tokens[0] is SQL_NULL: + return Call("missing", tokens[2], {}) + elif op == "ne!": + if tokens[2] is SQL_NULL: + return Call("exists", tokens[0], {}) + elif tokens[0] is SQL_NULL: + return Call("exists", tokens[2], {}) + elif op == "is": + if tokens[2] is SQL_NULL: + return Call("missing", tokens[0], {}) + else: + return Call("exists", tokens[0], {}) + elif op == "is_not": + if tokens[2] is SQL_NULL: + return Call("exists", tokens[0], {}) + else: + return Call("missing", tokens[0], {}) + + operands = [tokens[0], tokens[2]] + binary_op = Call(op, operands, {}) + + if op in {"add", "mul", "and", "or"}: + # ASSOCIATIVE OPERATORS + acc = [] + for operand in operands: + while isinstance(operand, ParseResults) and isinstance(operand.type, Group): + # PARENTHESES CAUSE EXTRA GROUP LAYERS + operand = operand[0] + if isinstance(operand, ParseResults) and isinstance( + operand.type, Forward + ): + operand = operand[0] + + if isinstance(operand, Call) and operand.op == op: + acc.extend(operand.args) + elif isinstance(operand, list): + acc.append(operand) + elif isinstance(operand, dict) and operand.get(op): + acc.extend(operand.get(op)) + else: + acc.append(operand) + binary_op = Call(op, acc, {}) + return binary_op + + +def to_offset(tokens): + expr, offset = tokens.tokens + return Call("get", [expr, offset], {}) + + +def to_window_mod(tokens): + expr, window = tokens.tokens + return Call("value", [expr], {**window}) + + +def to_tuple_call(tokens): + # IS THIS ONE VALUE IN (), OR MANY? + tokens = list(tokens) + if len(tokens) == 1: + return [tokens[0]] + if all(isinstance(r, number_types) for r in tokens): + return [tokens] + if all( + isinstance(r, number_types) or (is_data(r) and "literal" in r.keys()) + for r in tokens + ): + candidate = {"literal": [r["literal"] if is_data(r) else r for r in tokens]} + return candidate + + return [tokens] + + +binary_ops = { + "::": "cast", + "COLLATE": "collate", + "||": "concat", + "*": "mul", + "/": "div", + "%": "mod", + "+": "add", + "-": "sub", + "&": "binary_and", + "|": "binary_or", + "<": "lt", + "<=": "lte", + ">": "gt", + ">=": "gte", + "=": "eq", + "==": "eq", + "is distinct from": "eq!", # https://sparkbyexamples.com/apache-hive/hive-relational-arithmetic-logical-operators/ + "is_distinct_from": "eq!", + "is not distinct from": "ne!", + "is_not_distinct_from": "ne!", + "<=>": "eq!", # https://sparkbyexamples.com/apache-hive/hive-relational-arithmetic-logical-operators/ + "!=": "neq", + "<>": "neq", + "not in": "nin", + "in": "in", + "is_not": "neq", + "is": "eq", + "similar_to": "similar_to", + "like": "like", + "rlike": "rlike", + "not like": "not_like", + "not_like": "not_like", + "not rlike": "not_rlike", + "not_rlike": "not_rlike", + "not_simlilar_to": "not_similar_to", + "or": "or", + "and": "and", + "->": "lambda", + "union": "union", + "union_all": "union_all", + "union all": "union_all", + "except": "except", + "minus": "minus", + "intersect": "intersect", +} + +is_set_op = ("union", "union_all", "except", "minus", "intersect") + + +def to_trim_call(tokens): + frum = tokens["from"] + if not frum: + return Call("trim", [tokens["chars"]], {"direction": tokens["direction"]}) + return Call( + "trim", + [frum], + {"characters": tokens["chars"], "direction": tokens["direction"]}, + ) + + +def to_json_call(tokens): + # ARRANGE INTO {op: params} FORMAT + op = tokens["op"].lower() + op = binary_ops.get(op, op) + params = tokens["params"] + if isinstance(params, (dict, str, int, Call)): + args = [params] + else: + args = list(params) + + kwargs = {k: v for k, v in tokens.items() if k not in ("op", "params")} + + return ParseResults( + tokens.type, + tokens.start, + tokens.end, + [Call(op, args, kwargs)], + tokens.failures, + ) + + +def to_interval_call(tokens): + # ARRANGE INTO {interval: [amount, type]} FORMAT + params = tokens["params"] + if not params: + params = {} + if params.length() == 2: + return Call("interval", params, {}) + + return Call("add", [Call("interval", p, {}) for p in _chunk(params, size=2)], {}) + + +def to_case_call(tokens): + cases = list(tokens["case"]) + elze = tokens["else"] + if elze != None: + cases.append(elze) + return Call("case", cases, {}) + + +def to_switch_call(tokens): + # CONVERT TO CLASSIC CASE STATEMENT + value = tokens["value"] + acc = [] + for c in list(tokens["case"]): + acc.append(Call("when", [Call("eq", [value] + c.args, {})], c.kwargs)) + elze = tokens["else"] + if elze != None: + acc.append(elze) + return Call("case", acc, {}) + + +def to_when_call(tokens): + tok = tokens + return Call("when", [tok["when"]], {"then": tok["then"]}) + + +def to_join_call(tokens): + op = " ".join(tokens["op"]) + if tokens["join"]["name"]: + output = {op: { + "name": tokens["join"]["name"], + "value": tokens["join"]["value"], + }} + else: + output = {op: tokens["join"]} + + output["on"] = tokens["on"] + output["using"] = tokens["using"] + return output + + +def to_expression_call(tokens): + if set(tokens.keys()) & {"over", "within", "filter"}: + return + + return ParseResults( + tokens.type, + tokens.start, + tokens.end, + listwrap(tokens["value"]), + tokens.failures, + ) + + +def to_over(tokens): + if not tokens: + return {} + + +def to_alias(tokens): + cols = tokens["col"] + name = tokens["name"] + if cols: + return {name: cols} + return name + + +def to_top_clause(tokens): + value = tokens["value"] + if not value: + return None + + value = value.value() + if tokens["ties"]: + output = {} + output["ties"] = True + if tokens["percent"]: + output["percent"] = value + else: + output["value"] = value + return output + elif tokens["percent"]: + return {"percent": value} + else: + return [value] + + +def to_row(tokens): + columns = list(tokens) + if len(columns) > 1: + return {"select": [{"value": v[0]} for v in columns]} + else: + return {"select": {"value": columns[0]}} + + +def get_literal(value): + if isinstance(value, (int, float)): + return value + elif isinstance(value, Call): + return + elif value is SQL_NULL: + return value + elif "literal" in value: + return value["literal"] + + +def to_values(tokens): + rows = list(tokens) + if len(rows) > 1: + values = [ + [get_literal(s["value"]) for s in listwrap(row["select"])] for row in rows + ] + if all(flatten(values)): + return {"from": {"literal": values}} + return {"union_all": list(tokens)} + else: + return rows + + +def to_stack(tokens): + width = tokens["width"] + args = listwrap(tokens["args"]) + return Call("stack", args, {"width": width}) + + +def to_array(tokens): + types = list(tokens["type"]) + args = list(tokens["args"]) + output = Call("create_array", args, {}) + if types: + output = Call("cast", [output, Call("array", types, {})], {}) + return output + + +def to_map(tokens): + keys = tokens["keys"] + values = tokens["values"] + return Call("create_map", [keys, values], {}) + + +def to_struct(tokens): + types = list(tokens["types"]) + args = list(d for a in tokens["args"] for d in [a if a["name"] else a["value"]]) + + output = Call("create_struct", args, {}) + if types: + output = Call("cast", [output, Call("struct", types, {})], {}) + return output + + +def to_select_call(tokens): + expr = tokens["value"] + if expr == "*": + return ["*"] + try: + call = expr[0][0] + if call.op == "value": + return {"name": tokens["name"], "value": call.args, **call.kwargs} + except: + pass + + +def to_union_call(tokens): + unions = tokens["union"] + if isinstance(unions, dict): + return unions + elif unions.type.parser_name == "unordered sql": + output = {k: v for k, v in unions.items()} # REMOVE THE Group() + else: + unions = list(unions) + sources = [unions[i] for i in range(0, len(unions), 2)] + operators = ["_".join(unions[i]) for i in range(1, len(unions), 2)] + acc = sources[0] + last_union = None + for op, so in list(zip(operators, sources[1:])): + if op == last_union and "union" in op: + acc[op] = acc[op] + [so] + else: + acc = {op: [acc, so]} + last_union = op + + if not tokens["orderby"] and not tokens["offset"] and not tokens["limit"]: + return acc + else: + output = {"from": acc} + + output["orderby"] = tokens["orderby"] + output["limit"] = tokens["limit"] + output["offset"] = tokens["offset"] + output["fetch"] = tokens["fetch"] + output["outfile"] = tokens["outfile"] + return output + + +def to_insert_call(tokens): + options = { + k: v for k, v in tokens.items() if k not in ["columns", "table", "query"] + } + query = tokens["query"] + columns = tokens["columns"] + try: + values = query["from"]["literal"] + if values: + if columns: + data = [dict(zip(columns, row)) for row in values] + return Call("insert", [tokens["table"]], {"values": data, **options}) + else: + return Call("insert", [tokens["table"]], {"values": values, **options}) + except Exception: + pass + + return Call( + "insert", [tokens["table"]], {"columns": columns, "query": query, **options} + ) + + +def to_query(tokens): + output = tokens["query"][0] + try: + output["with"] = tokens["with"] + output["with_recursive"] = tokens["with_recursive"] + + return output + except Exception as cause: + return + + +def to_table(tokens): + output = dict(tokens) + if len(list(output.keys())) > 1: + return output + else: + return output["value"] + + +def unquote(tokens): + val = tokens[0] + if val.startswith("'") and val.endswith("'"): + val = "'" + val[1:-1].replace("''", "\\'") + "'" + elif val.startswith('"') and val.endswith('"'): + val = '"' + val[1:-1].replace('""', '\\"') + '"' + elif val.startswith("`") and val.endswith("`"): + val = '"' + val[1:-1].replace("``", "`").replace('"', '\\"') + '"' + elif val.startswith("[") and val.endswith("]"): + val = '"' + val[1:-1].replace("]]", "]").replace('"', '\\"') + '"' + elif val.startswith("+"): + val = val[1:] + un = ast.literal_eval(val).replace(".", "\\.") + return un + + +def to_string(tokens): + val = tokens[0] + val = "'" + val[1:-1].replace("''", "\\'") + "'" + return {"literal": ast.literal_eval(val)} + + +# NUMBERS +real_num = ( + Regex(r"[+-]?(\d+\.\d*|\.\d+)([eE][+-]?\d+)?").set_parser_name("float") + / (lambda t: float(t[0])) +) + + +def parse_int(tokens): + if "e" in tokens[0].lower(): + return int(float(tokens[0])) + else: + return int(tokens[0]) + + +int_num = Regex(r"[+-]?\d+([eE]\+?\d+)?").set_parser_name("int") / parse_int +hex_num = ( + Regex(r"0x[0-9a-fA-F]+").set_parser_name("hex") / (lambda t: {"hex": t[0][2:]}) +) + +# STRINGS +ansi_string = Regex(r"\'(\'\'|[^'])*\'") / to_string +mysql_doublequote_string = Regex(r'\"(\"\"|[^"])*\"') / to_string + +# BASIC IDENTIFIERS +ansi_ident = Regex(r'\"(\"\"|[^"])*\"') / unquote +mysql_backtick_ident = Regex(r"\`(\`\`|[^`])*\`") / unquote +sqlserver_ident = Regex(r"\[(\]\]|[^\]])*\]") / unquote diff --git a/aquery_parser/windows.py b/aquery_parser/windows.py index defc2c4..0c0e450 100644 --- a/aquery_parser/windows.py +++ b/aquery_parser/windows.py @@ -1,107 +1,107 @@ -# encoding: utf-8 -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this file, -# You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Contact: Kyle Lahnakoski (kyle@lahnakoski.com) -# - -from __future__ import absolute_import, division, unicode_literals - -from mo_parsing.infix import delimited_list - -from aquery_parser.keywords import * -from aquery_parser.utils import * - - -# https://docs.microsoft.com/en-us/sql/t-sql/queries/select-over-clause-transact-sql?view=sql-server-ver15 - - -def _to_bound_call(tokens): - zero = tokens["zero"] - if zero: - return {"min": 0, "max": 0} - - direction = scrub(tokens["direction"]) - limit = scrub(tokens["limit"]) - if direction == "preceding": - if limit == "unbounded": - return {"max": 0} - elif is_data(limit): - return {"min": {"neg": limit}, "max": 0} - else: - return {"min": -limit, "max": 0} - else: # following - if limit == "unbounded": - return {"min": 0} - elif is_data(limit): - return {"min": {"neg": limit}, "max": 0} - else: - return {"min": 0, "max": limit} - - -def _to_between_call(tokens): - minn = scrub(tokens["min"]) - maxx = scrub(tokens["max"]) - - if maxx.get("max") == 0: - # following - return { - "min": minn.get("min"), - "max": maxx.get("min"), - } - elif minn.get("min") == 0: - # preceding - return {"min": minn.get("max"), "max": maxx.get("max")} - else: - return { - "min": minn.get("min"), - "max": maxx.get("max"), - } - - -UNBOUNDED = keyword("unbounded") -PRECEDING = keyword("preceding") -FOLLOWING = keyword("following") -CURRENT_ROW = keyword("current row") -ROWS = keyword("rows") -RANGE = keyword("range") - - -def window(expr, var_name, sort_column): - bound_row = ( - CURRENT_ROW("zero") - | (UNBOUNDED | int_num)("limit") + (PRECEDING | FOLLOWING)("direction") - ) / _to_bound_call - bound_expr = ( - CURRENT_ROW("zero") - | (UNBOUNDED | expr)("limit") + (PRECEDING | FOLLOWING)("direction") - ) / _to_bound_call - between_row = ( - BETWEEN + bound_row("min") + AND + bound_row("max") - ) / _to_between_call - between_expr = ( - BETWEEN + bound_expr("min") + AND + bound_expr("max") - ) / _to_between_call - - row_clause = (ROWS.suppress() + (between_row | bound_row)) | ( - RANGE.suppress() + (between_expr | bound_expr) - ) - - over_clause = ( - LB - + Optional(PARTITION_BY + delimited_list(Group(expr))("partitionby")) - + Optional(ORDER_BY + delimited_list(Group(sort_column))("orderby")) - + Optional(row_clause("range")) - + RB - ) - - window_clause = Optional(( - WITHIN_GROUP - + LB - + Optional(ORDER_BY + delimited_list(Group(sort_column))("orderby")) - + RB - )("within")) + ((OVER + (over_clause | var_name) / to_over)("over")) - - return window_clause, over_clause +# encoding: utf-8 +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this file, +# You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Contact: Kyle Lahnakoski (kyle@lahnakoski.com) +# + +from __future__ import absolute_import, division, unicode_literals + +from mo_parsing.infix import delimited_list + +from aquery_parser.keywords import * +from aquery_parser.utils import * + + +# https://docs.microsoft.com/en-us/sql/t-sql/queries/select-over-clause-transact-sql?view=sql-server-ver15 + + +def _to_bound_call(tokens): + zero = tokens["zero"] + if zero: + return {"min": 0, "max": 0} + + direction = scrub(tokens["direction"]) + limit = scrub(tokens["limit"]) + if direction == "preceding": + if limit == "unbounded": + return {"max": 0} + elif is_data(limit): + return {"min": {"neg": limit}, "max": 0} + else: + return {"min": -limit, "max": 0} + else: # following + if limit == "unbounded": + return {"min": 0} + elif is_data(limit): + return {"min": {"neg": limit}, "max": 0} + else: + return {"min": 0, "max": limit} + + +def _to_between_call(tokens): + minn = scrub(tokens["min"]) + maxx = scrub(tokens["max"]) + + if maxx.get("max") == 0: + # following + return { + "min": minn.get("min"), + "max": maxx.get("min"), + } + elif minn.get("min") == 0: + # preceding + return {"min": minn.get("max"), "max": maxx.get("max")} + else: + return { + "min": minn.get("min"), + "max": maxx.get("max"), + } + + +UNBOUNDED = keyword("unbounded") +PRECEDING = keyword("preceding") +FOLLOWING = keyword("following") +CURRENT_ROW = keyword("current row") +ROWS = keyword("rows") +RANGE = keyword("range") + + +def window(expr, var_name, sort_column): + bound_row = ( + CURRENT_ROW("zero") + | (UNBOUNDED | int_num)("limit") + (PRECEDING | FOLLOWING)("direction") + ) / _to_bound_call + bound_expr = ( + CURRENT_ROW("zero") + | (UNBOUNDED | expr)("limit") + (PRECEDING | FOLLOWING)("direction") + ) / _to_bound_call + between_row = ( + BETWEEN + bound_row("min") + AND + bound_row("max") + ) / _to_between_call + between_expr = ( + BETWEEN + bound_expr("min") + AND + bound_expr("max") + ) / _to_between_call + + row_clause = (ROWS.suppress() + (between_row | bound_row)) | ( + RANGE.suppress() + (between_expr | bound_expr) + ) + + over_clause = ( + LB + + Optional(PARTITION_BY + delimited_list(Group(expr))("partitionby")) + + Optional(ORDER_BY + delimited_list(Group(sort_column))("orderby")) + + Optional(row_clause("range")) + + RB + ) + + window_clause = Optional(( + WITHIN_GROUP + + LB + + Optional(ORDER_BY + delimited_list(Group(sort_column))("orderby")) + + RB + )("within")) + ((OVER + (over_clause | var_name) / to_over)("over")) + + return window_clause, over_clause diff --git a/build_instructions.txt b/build_instructions.txt index d5d8297..65e0b99 100644 --- a/build_instructions.txt +++ b/build_instructions.txt @@ -1,14 +1,14 @@ -## Windows -- clang-msvc: - - "%CXX%" -D_CRT_SECURE_NO_WARNINGS -shared server/server.cpp server/winhelper.cpp server/monetdb_conn.cpp -Imonetdb/msvc -Lmonetdb/msvc -lmonetdbe.lib --std=c++2a -o server.so - - os.add_dll_directory(os.path.abspath('./monetdb/msvc')) -- gcc-mingw (link w/ msvc monetdb): - - "%CXX%" -shared -fPIC server/server.cpp server/winhelper.cpp server/monetdb_conn.cpp -Imonetdb/msvc msc-plugin/monetdbe.dll --std=c++2a -o server.so - - os.add_dll_directory('c:/msys64/usr/bin') - - os.add_dll_directory(os.path.abspath('./monetdb/msvc')) -- gcc-mingw (link w/ mingw monetdb, can only load under mingw python): - - $(CXX) server/server.cpp server/monetdb_conn.cpp -fPIC -shared $(OS_SUPPORT) --std=c++1z -O3 -march=native -o server.so -I./monetdb/msys64 -L./lib -lmonetdbe - - add_dll_dir(os.path.abspath('./lib')) -- msvc: - - D:\gg\vs22\MSBuild\Current\Bin\msbuild "d:\gg\AQuery++\server\server.vcxproj" /p:configuration=Release /p:platform=x64 - - os.add_dll_directory(os.path.abspath('./monetdb/msvc')) +## Windows +- clang-msvc: + - "%CXX%" -D_CRT_SECURE_NO_WARNINGS -shared server/server.cpp server/winhelper.cpp server/monetdb_conn.cpp -Imonetdb/msvc -Lmonetdb/msvc -lmonetdbe.lib --std=c++2a -o server.so + - os.add_dll_directory(os.path.abspath('./monetdb/msvc')) +- gcc-mingw (link w/ msvc monetdb): + - "%CXX%" -shared -fPIC server/server.cpp server/winhelper.cpp server/monetdb_conn.cpp -Imonetdb/msvc msc-plugin/monetdbe.dll --std=c++2a -o server.so + - os.add_dll_directory('c:/msys64/usr/bin') + - os.add_dll_directory(os.path.abspath('./monetdb/msvc')) +- gcc-mingw (link w/ mingw monetdb, can only load under mingw python): + - $(CXX) server/server.cpp server/monetdb_conn.cpp -fPIC -shared $(OS_SUPPORT) --std=c++1z -O3 -march=native -o server.so -I./monetdb/msys64 -L./lib -lmonetdbe + - add_dll_dir(os.path.abspath('./lib')) +- msvc: + - D:\gg\vs22\MSBuild\Current\Bin\msbuild "d:\gg\AQuery++\server\server.vcxproj" /p:configuration=Release /p:platform=x64 + - os.add_dll_directory(os.path.abspath('./monetdb/msvc')) diff --git a/data/test.csv b/data/test.csv index 5eb9e8f..443a93e 100644 --- a/data/test.csv +++ b/data/test.csv @@ -1,11 +1,11 @@ -a, b, c, d -1,1,2,2 -1,2,2,2 -1,2,3,4 -4,2,1,4 -2,1,3,4 -1,2,3,4 -1,2,3,3 -3,2,1,2 -2,1,2,2 -1,2,3,1 +a, b, c, d +1,1,2,2 +1,2,2,2 +1,2,3,4 +4,2,1,4 +2,1,3,4 +1,2,3,4 +1,2,3,3 +3,2,1,2 +2,1,2,2 +1,2,3,1 diff --git a/dbconn.py b/dbconn.py index 443fd23..d6a283a 100644 --- a/dbconn.py +++ b/dbconn.py @@ -1,45 +1,45 @@ -import mariadb - -class dbconn: - def __init__(self) -> None: - self.db = None - self.cur = None - def clear(self): - drop_all = f''' - SET FOREIGN_KEY_CHECKS = 0; - - SET @tables = NULL; - - SELECT GROUP_CONCAT('`', table_schema, '`.`', table_name, '`') INTO @tables - FROM information_schema.tables - WHERE table_schema = '{self.db.database}'; - - SET @tables = CONCAT('DROP TABLE ', @tables); - PREPARE stmt FROM @tables; - EXECUTE stmt; - DEALLOCATE PREPARE stmt; - SET FOREIGN_KEY_CHECKS = 1; - ''' - if self.db: - if not self.cur: - self.cur = self.db.cursor() - self.cur.execute(drop_all) - - def connect(self, ip, password = '0508', user = 'root', db = 'db', port = 3306): - try: - self.db = mariadb.connect( - user = user, - password = password, - host = ip, - port = port, - database = db - ) - self.cur = self.db.cursor() - - except mariadb.Error as e: - print(e) - self.db = None - self.cur = None - - def exec(self, sql, params = None): +import mariadb + +class dbconn: + def __init__(self) -> None: + self.db = None + self.cur = None + def clear(self): + drop_all = f''' + SET FOREIGN_KEY_CHECKS = 0; + + SET @tables = NULL; + + SELECT GROUP_CONCAT('`', table_schema, '`.`', table_name, '`') INTO @tables + FROM information_schema.tables + WHERE table_schema = '{self.db.database}'; + + SET @tables = CONCAT('DROP TABLE ', @tables); + PREPARE stmt FROM @tables; + EXECUTE stmt; + DEALLOCATE PREPARE stmt; + SET FOREIGN_KEY_CHECKS = 1; + ''' + if self.db: + if not self.cur: + self.cur = self.db.cursor() + self.cur.execute(drop_all) + + def connect(self, ip, password = '0508', user = 'root', db = 'db', port = 3306): + try: + self.db = mariadb.connect( + user = user, + password = password, + host = ip, + port = port, + database = db + ) + self.cur = self.db.cursor() + + except mariadb.Error as e: + print(e) + self.db = None + self.cur = None + + def exec(self, sql, params = None): self.cur.execute(sql) \ No newline at end of file diff --git a/engine/__init__.py b/engine/__init__.py index f3cfdbb..e3d3498 100644 --- a/engine/__init__.py +++ b/engine/__init__.py @@ -1,37 +1,37 @@ -from engine.ast import Context, ast_node -saved_cxt = None - -def initialize(cxt = None, keep = False): - global saved_cxt - if cxt is None or not keep or type(cxt) is not Context: - if saved_cxt is None or not keep: - cxt = Context() - saved_cxt = cxt - else: - cxt = saved_cxt - cxt.new() - - return cxt - -def generate(ast, cxt): - for k in ast.keys(): - if k in ast_node.types.keys(): - root = ast_node.types[k](None, ast, cxt) - -def exec(stmts, cxt = None, keep = None): - cxt = initialize(cxt, keep) - stmts_stmts = stmts['stmts'] - if type(stmts_stmts) is list: - for s in stmts_stmts: - generate(s, cxt) - else: - generate(stmts_stmts, cxt) - - cxt.Info(cxt.ccode) - with open('out.cpp', 'wb') as outfile: - outfile.write((cxt.finalize()).encode('utf-8')) - - return cxt - - -__all__ = ["initialize", "generate", "exec", "saved_cxt"] +from engine.ast import Context, ast_node +saved_cxt = None + +def initialize(cxt = None, keep = False): + global saved_cxt + if cxt is None or not keep or type(cxt) is not Context: + if saved_cxt is None or not keep: + cxt = Context() + saved_cxt = cxt + else: + cxt = saved_cxt + cxt.new() + + return cxt + +def generate(ast, cxt): + for k in ast.keys(): + if k in ast_node.types.keys(): + root = ast_node.types[k](None, ast, cxt) + +def exec(stmts, cxt = None, keep = None): + cxt = initialize(cxt, keep) + stmts_stmts = stmts['stmts'] + if type(stmts_stmts) is list: + for s in stmts_stmts: + generate(s, cxt) + else: + generate(stmts_stmts, cxt) + + cxt.Info(cxt.ccode) + with open('out.cpp', 'wb') as outfile: + outfile.write((cxt.finalize()).encode('utf-8')) + + return cxt + + +__all__ = ["initialize", "generate", "exec", "saved_cxt"] diff --git a/engine/ast.py b/engine/ast.py index f53f46b..f0a9ef1 100644 --- a/engine/ast.py +++ b/engine/ast.py @@ -1,373 +1,373 @@ -from engine.utils import base62uuid -from copy import copy -from typing import * -# replace column info with this later. -class ColRef: - def __init__(self, cname, _ty, cobj, cnt, table:'TableInfo', name, id, compound = False): - self.cname = cname # column object location - self.cxt_name = None # column object in context - self.type = _ty - self.cobj = cobj - self.cnt = cnt - self.table = table - self.name = name - self.id = id # position in table - self.order_pending = None # order_pending - self.compound = compound # compound field (list as a field) - self.views = [] - self.aux_columns = [] # columns for temperary calculations - # e.g. order by, group by, filter by expressions - - self.__arr__ = (cname, _ty, cobj, cnt, table, name, id) - - def reference(self): - cxt = self.table.cxt - self.table.reference() - if self not in cxt.columns_in_context: - counter = 0 - base_name = self.table.table_name + '_' + self.name - if base_name in cxt.columns_in_context.values(): - while (f'{base_name}_{counter}') in cxt.columns_in_context.values(): - counter += 1 - base_name = f'{base_name}_{counter}' - self.cxt_name = base_name - cxt.columns_in_context[self] = base_name - # TODO: change this to cname; - cxt.emit(f'auto& {base_name} = *(ColRef<{self.type}> *)(&{self.table.cxt_name}->colrefs[{self.id}]);') - elif self.cxt_name is None: - self.cxt_name = cxt.columns_in_context[self] - - return self.cxt_name - - def __getitem__(self, key): - if type(key) is str: - return getattr(self, key) - else: - return self.__arr__[key] - - def __setitem__(self, key, value): - self.__arr__[key] = value - - def __str__(self): - return self.reference() - def __repr__(self): - return self.reference() - -class TableInfo: - - def __init__(self, table_name, cols, cxt:'Context'): - # statics - self.table_name = table_name - self.alias = set([table_name]) - self.columns_byname = dict() # column_name, type - self.columns = [] - self.cxt = cxt - self.cxt_name = None - self.views = set() - #keep track of temp vars - self.local_vars = dict() - self.rec = None # a hook on get_col_d to record tables being referenced in the process - self.groupinfo = None - self.add_cols(cols) - # runtime - self.n_rows = 0 # number of cols - self.order = [] # assumptions - - cxt.tables_byname[self.table_name] = self # construct reverse map - def reference(self): - if self not in self.cxt.tables_in_context: - counter = 0 - base_name = self.table_name - if base_name in self.cxt.tables_in_context.values(): - while (f'{base_name}_{counter}') in self.cxt.tables_in_context.values(): - counter += 1 - base_name = f'{base_name}_{counter}' - self.cxt_name = base_name - self.cxt.tables_in_context[self] = base_name - - type_tags = '<' - for c in self.columns: - type_tags += c.type + ',' - if type_tags.endswith(','): - type_tags = type_tags[:-1] - type_tags += '>' - - self.cxt.emit(f'auto& {base_name} = *(TableInfo{type_tags} *)(cxt->tables["{self.table_name}"]);') - return self.cxt_name - def refer_all(self): - self.reference() - for c in self.columns: - c.reference() - def add_cols(self, cols, new = True): - for i, c in enumerate(cols): - self.add_col(c, new, i) - def add_col(self, c, new = True, i = 0): - _ty = c['type'] - if new: - cname =f'get<{i}>({self.table_name})' - _ty = _ty if type(c) is ColRef else list(_ty.keys())[0] - col_object = ColRef(cname, _ty, c, 1, self,c['name'], len(self.columns)) - else: - col_object = c - cname = c.cname - c.table = self - self.cxt.ccols_byname[cname] = col_object - self.columns_byname[c['name']] = col_object - self.columns.append(col_object) - def get_size(self): - size_tmp = 'tmp_sz_'+base62uuid(6) - self.cxt.emit(f'const auto& {size_tmp} = {self.columns[0].reference()}.size;') - return size_tmp - @property - def n_cols(self): - return len(self.columns) - - def materialize_orderbys(self): - view_stack = '' - stack_name = '' - for o in self.order: - o.materialize() - if len(view_stack) == 0: - view_stack = o.view.name - stack_name = view_stack - else: - view_stack = view_stack+'['+ o.view.name +']' - # TODO: Optimize by doing everything in a stmt - if len(view_stack) > 0: - if len(self.order) > 1: - self.cxt.emit(f'{stack_name}:{view_stack}') - for c in self.columns: - c.order_pending = stack_name - self.order[0].node.view = stack_name - self.order.clear() - - def get_col_d(self, col_name): - col = self.columns_byname[col_name] - if type(self.rec) is set: - self.rec.add(col) - return col - - def get_ccolname_d(self, col_name): - return self.get_col_d(col_name).cname - - def get_col(self, col_name): - self.materialize_orderbys() - col = self.get_col_d(col_name) - if type(col.order_pending) is str: - self.cxt.emit_no_flush(f'{col.cname}:{col.cname}[{col.order_pending}]') - col.order_pending = None - return col - def get_ccolname(self, col_name): - return self.get_col(col_name).cname - - def add_alias(self, alias): - # TODO: Scoping of alias should be constrainted in the query. - if alias in self.cxt.tables_byname.keys(): - print("Error: table alias already exists") - return - self.cxt.tables_byname[alias] = self - self.alias.add(alias) - - def parse_col_names(self, colExpr, materialize = True, raw = False): - # get_col = self.get_col if materialize else self.get_col_d - - parsedColExpr = colExpr.split('.') - ret = None - if len(parsedColExpr) <= 1: - ret = self.get_col_d(colExpr) - else: - datasource = self.cxt.tables_byname[parsedColExpr[0]] - if datasource is None: - raise ValueError(f'Table name/alias not defined{parsedColExpr[0]}') - else: - ret = datasource.parse_col_names(parsedColExpr[1], raw) - from engine.expr import index_expr - string = ret.reference() + index_expr - if self.groupinfo is not None and ret and ret in self.groupinfo.raw_groups: - string = f'get<{self.groupinfo.raw_groups.index(ret)}>({{y}})' - return string, ret if raw else string - -class View: - def __init__(self, context, table = None, tmp = True): - self.table: TableInfo = table - self.name = 'v'+base62uuid(7) - if type(table) is TableInfo: - table.views.add(self) - self.context = context - - def construct(self): - self.context.emit(f'{self.name}:()') - -class Context: - function_head = ''' - extern "C" int __DLLEXPORT__ dllmain(Context* cxt) { - using namespace std; - using namespace types; - - ''' - LOG_INFO = 'INFO' - LOG_ERROR = 'ERROR' - LOG_SILENT = 'SILENT' - from engine.types import Types - type_table : Dict[str, Types] = dict() - - def new(self): - self.tmp_names = set() - self.udf_map = dict() - self.headers = set(['\"./server/libaquery.h\"']) - self.finalized = False - # read header - self.ccode = str() - self.ccodelet = str() - with open('header.cxx', 'r') as outfile: - self.ccode = outfile.read() - # datasource will be availible after `from' clause is parsed - # and will be deactivated when the `from' is out of scope - self.datasource = None - self.ds_stack = [] - self.scans = [] - self.removing_scan = False - - def __init__(self): - self.tables:list[TableInfo] = [] - self.tables_byname = dict() - self.ccols_byname = dict() - self.gc_name = 'gc_' + base62uuid(4) - self.tmp_names = set() - self.udf_map = dict() - self.headers = set(['\"./server/libaquery.h\"']) - self.finalized = False - self.log_level = Context.LOG_SILENT - self.print = print - # read header - self.ccode = str() - self.ccodelet = str() - self.columns_in_context = dict() - self.tables_in_context = dict() - with open('header.cxx', 'r') as outfile: - self.ccode = outfile.read() - # datasource will be availible after `from' clause is parsed - # and will be deactivated when the `from' is out of scope - self.datasource = None - self.ds_stack = [] - self.scans = [] - self.removing_scan = False - def add_table(self, table_name, cols): - tbl = TableInfo(table_name, cols, self) - self.tables.append(tbl) - return tbl - - def gen_tmptable(self): - from engine.utils import base62uuid - return f't{base62uuid(7)}' - def reg_tmp(self, name, f): - self.tmp_names.add(name) - self.emit(f"{self.gc_name}.reg({{{name}, 0,0{'' if f is None else ',{f}'}}});") - - def define_tmp(self, typename, isPtr = True, f = None): - name = 'tmp_' + base62uuid() - if isPtr: - self.emit(f'auto* {name} = new {typename};') - self.reg_tmp(name, f) - else: - self.emit(f'auto {name} = {typename};') - return name - def emit(self, codelet): - self.ccode += self.ccodelet + codelet + '\n' - self.ccodelet = '' - def emit_no_flush(self, codelet): - self.ccode += codelet + '\n' - def emit_flush(self): - self.ccode += self.ccodelet + '\n' - self.ccodelet = '' - def emit_nonewline(self, codelet): - self.ccodelet += codelet - - def datsource_top(self): - if len(self.ds_stack) > 0: - return self.ds_stack[-1] - else: - return None - def datasource_pop(self): - if len(self.ds_stack) > 0: - self.ds_stack.pop() - return self.ds_stack[-1] - else: - return None - def datasource_push(self, ds): - if type(ds) is TableInfo: - self.ds_stack.append(ds) - return ds - else: - return None - def remove_scan(self, scan, str_scan): - self.emit(str_scan) - self.scans.remove(scan) - - def Info(self, msg): - if self.log_level.upper() == Context.LOG_INFO: - self.print(msg) - def Error(self, msg): - if self.log_level.upper() == Context.LOG_ERROR: - self.print(msg) - else: - self.Info(self, msg) - - - def finalize(self): - if not self.finalized: - headers = '' - for h in self.headers: - if h[0] != '"': - headers += '#include <' + h + '>\n' - else: - headers += '#include ' + h + '\n' - self.ccode = headers + self.function_head + self.ccode + 'return 0;\n}' - self.headers = set() - return self.ccode - def __str__(self): - self.finalize() - return self.ccode - def __repr__(self) -> str: - return self.__str__() - - -class ast_node: - types = dict() - header = [] - def __init__(self, parent:"ast_node", node, context:Context = None): - self.context = parent.context if context is None else context - self.parent = parent - self.datasource = None - self.init(node) - self.produce(node) - self.spawn(node) - self.consume(node) - - def emit(self, code): - self.context.emit(code) - def emit_no_ln(self, code): - self.context.emit_nonewline(code) - - name = 'null' - - # each ast node has 3 stages. - # `produce' generates info for child nodes - # `spawn' populates child nodes - # `consume' consumes info from child nodes and finalizes codegen - # For simple operators, there may not be need for some of these stages - def init(self, _): - pass - def produce(self, _): - pass - def spawn(self, _): - pass - def consume(self, _): - pass - -# include classes in module as first order operators -def include(objs): - import inspect - for _, cls in inspect.getmembers(objs): - if inspect.isclass(cls) and issubclass(cls, ast_node) and not cls.name.startswith('_'): +from engine.utils import base62uuid +from copy import copy +from typing import * +# replace column info with this later. +class ColRef: + def __init__(self, cname, _ty, cobj, cnt, table:'TableInfo', name, id, compound = False): + self.cname = cname # column object location + self.cxt_name = None # column object in context + self.type = _ty + self.cobj = cobj + self.cnt = cnt + self.table = table + self.name = name + self.id = id # position in table + self.order_pending = None # order_pending + self.compound = compound # compound field (list as a field) + self.views = [] + self.aux_columns = [] # columns for temperary calculations + # e.g. order by, group by, filter by expressions + + self.__arr__ = (cname, _ty, cobj, cnt, table, name, id) + + def reference(self): + cxt = self.table.cxt + self.table.reference() + if self not in cxt.columns_in_context: + counter = 0 + base_name = self.table.table_name + '_' + self.name + if base_name in cxt.columns_in_context.values(): + while (f'{base_name}_{counter}') in cxt.columns_in_context.values(): + counter += 1 + base_name = f'{base_name}_{counter}' + self.cxt_name = base_name + cxt.columns_in_context[self] = base_name + # TODO: change this to cname; + cxt.emit(f'auto& {base_name} = *(ColRef<{self.type}> *)(&{self.table.cxt_name}->colrefs[{self.id}]);') + elif self.cxt_name is None: + self.cxt_name = cxt.columns_in_context[self] + + return self.cxt_name + + def __getitem__(self, key): + if type(key) is str: + return getattr(self, key) + else: + return self.__arr__[key] + + def __setitem__(self, key, value): + self.__arr__[key] = value + + def __str__(self): + return self.reference() + def __repr__(self): + return self.reference() + +class TableInfo: + + def __init__(self, table_name, cols, cxt:'Context'): + # statics + self.table_name = table_name + self.alias = set([table_name]) + self.columns_byname = dict() # column_name, type + self.columns = [] + self.cxt = cxt + self.cxt_name = None + self.views = set() + #keep track of temp vars + self.local_vars = dict() + self.rec = None # a hook on get_col_d to record tables being referenced in the process + self.groupinfo = None + self.add_cols(cols) + # runtime + self.n_rows = 0 # number of cols + self.order = [] # assumptions + + cxt.tables_byname[self.table_name] = self # construct reverse map + def reference(self): + if self not in self.cxt.tables_in_context: + counter = 0 + base_name = self.table_name + if base_name in self.cxt.tables_in_context.values(): + while (f'{base_name}_{counter}') in self.cxt.tables_in_context.values(): + counter += 1 + base_name = f'{base_name}_{counter}' + self.cxt_name = base_name + self.cxt.tables_in_context[self] = base_name + + type_tags = '<' + for c in self.columns: + type_tags += c.type + ',' + if type_tags.endswith(','): + type_tags = type_tags[:-1] + type_tags += '>' + + self.cxt.emit(f'auto& {base_name} = *(TableInfo{type_tags} *)(cxt->tables["{self.table_name}"]);') + return self.cxt_name + def refer_all(self): + self.reference() + for c in self.columns: + c.reference() + def add_cols(self, cols, new = True): + for i, c in enumerate(cols): + self.add_col(c, new, i) + def add_col(self, c, new = True, i = 0): + _ty = c['type'] + if new: + cname =f'get<{i}>({self.table_name})' + _ty = _ty if type(c) is ColRef else list(_ty.keys())[0] + col_object = ColRef(cname, _ty, c, 1, self,c['name'], len(self.columns)) + else: + col_object = c + cname = c.cname + c.table = self + self.cxt.ccols_byname[cname] = col_object + self.columns_byname[c['name']] = col_object + self.columns.append(col_object) + def get_size(self): + size_tmp = 'tmp_sz_'+base62uuid(6) + self.cxt.emit(f'const auto& {size_tmp} = {self.columns[0].reference()}.size;') + return size_tmp + @property + def n_cols(self): + return len(self.columns) + + def materialize_orderbys(self): + view_stack = '' + stack_name = '' + for o in self.order: + o.materialize() + if len(view_stack) == 0: + view_stack = o.view.name + stack_name = view_stack + else: + view_stack = view_stack+'['+ o.view.name +']' + # TODO: Optimize by doing everything in a stmt + if len(view_stack) > 0: + if len(self.order) > 1: + self.cxt.emit(f'{stack_name}:{view_stack}') + for c in self.columns: + c.order_pending = stack_name + self.order[0].node.view = stack_name + self.order.clear() + + def get_col_d(self, col_name): + col = self.columns_byname[col_name] + if type(self.rec) is set: + self.rec.add(col) + return col + + def get_ccolname_d(self, col_name): + return self.get_col_d(col_name).cname + + def get_col(self, col_name): + self.materialize_orderbys() + col = self.get_col_d(col_name) + if type(col.order_pending) is str: + self.cxt.emit_no_flush(f'{col.cname}:{col.cname}[{col.order_pending}]') + col.order_pending = None + return col + def get_ccolname(self, col_name): + return self.get_col(col_name).cname + + def add_alias(self, alias): + # TODO: Scoping of alias should be constrainted in the query. + if alias in self.cxt.tables_byname.keys(): + print("Error: table alias already exists") + return + self.cxt.tables_byname[alias] = self + self.alias.add(alias) + + def parse_col_names(self, colExpr, materialize = True, raw = False): + # get_col = self.get_col if materialize else self.get_col_d + + parsedColExpr = colExpr.split('.') + ret = None + if len(parsedColExpr) <= 1: + ret = self.get_col_d(colExpr) + else: + datasource = self.cxt.tables_byname[parsedColExpr[0]] + if datasource is None: + raise ValueError(f'Table name/alias not defined{parsedColExpr[0]}') + else: + ret = datasource.parse_col_names(parsedColExpr[1], raw) + from engine.expr import index_expr + string = ret.reference() + index_expr + if self.groupinfo is not None and ret and ret in self.groupinfo.raw_groups: + string = f'get<{self.groupinfo.raw_groups.index(ret)}>({{y}})' + return string, ret if raw else string + +class View: + def __init__(self, context, table = None, tmp = True): + self.table: TableInfo = table + self.name = 'v'+base62uuid(7) + if type(table) is TableInfo: + table.views.add(self) + self.context = context + + def construct(self): + self.context.emit(f'{self.name}:()') + +class Context: + function_head = ''' + extern "C" int __DLLEXPORT__ dllmain(Context* cxt) { + using namespace std; + using namespace types; + + ''' + LOG_INFO = 'INFO' + LOG_ERROR = 'ERROR' + LOG_SILENT = 'SILENT' + from engine.types import Types + type_table : Dict[str, Types] = dict() + + def new(self): + self.tmp_names = set() + self.udf_map = dict() + self.headers = set(['\"./server/libaquery.h\"']) + self.finalized = False + # read header + self.ccode = str() + self.ccodelet = str() + with open('header.cxx', 'r') as outfile: + self.ccode = outfile.read() + # datasource will be availible after `from' clause is parsed + # and will be deactivated when the `from' is out of scope + self.datasource = None + self.ds_stack = [] + self.scans = [] + self.removing_scan = False + + def __init__(self): + self.tables:list[TableInfo] = [] + self.tables_byname = dict() + self.ccols_byname = dict() + self.gc_name = 'gc_' + base62uuid(4) + self.tmp_names = set() + self.udf_map = dict() + self.headers = set(['\"./server/libaquery.h\"']) + self.finalized = False + self.log_level = Context.LOG_SILENT + self.print = print + # read header + self.ccode = str() + self.ccodelet = str() + self.columns_in_context = dict() + self.tables_in_context = dict() + with open('header.cxx', 'r') as outfile: + self.ccode = outfile.read() + # datasource will be availible after `from' clause is parsed + # and will be deactivated when the `from' is out of scope + self.datasource = None + self.ds_stack = [] + self.scans = [] + self.removing_scan = False + def add_table(self, table_name, cols): + tbl = TableInfo(table_name, cols, self) + self.tables.append(tbl) + return tbl + + def gen_tmptable(self): + from engine.utils import base62uuid + return f't{base62uuid(7)}' + def reg_tmp(self, name, f): + self.tmp_names.add(name) + self.emit(f"{self.gc_name}.reg({{{name}, 0,0{'' if f is None else ',{f}'}}});") + + def define_tmp(self, typename, isPtr = True, f = None): + name = 'tmp_' + base62uuid() + if isPtr: + self.emit(f'auto* {name} = new {typename};') + self.reg_tmp(name, f) + else: + self.emit(f'auto {name} = {typename};') + return name + def emit(self, codelet): + self.ccode += self.ccodelet + codelet + '\n' + self.ccodelet = '' + def emit_no_flush(self, codelet): + self.ccode += codelet + '\n' + def emit_flush(self): + self.ccode += self.ccodelet + '\n' + self.ccodelet = '' + def emit_nonewline(self, codelet): + self.ccodelet += codelet + + def datsource_top(self): + if len(self.ds_stack) > 0: + return self.ds_stack[-1] + else: + return None + def datasource_pop(self): + if len(self.ds_stack) > 0: + self.ds_stack.pop() + return self.ds_stack[-1] + else: + return None + def datasource_push(self, ds): + if type(ds) is TableInfo: + self.ds_stack.append(ds) + return ds + else: + return None + def remove_scan(self, scan, str_scan): + self.emit(str_scan) + self.scans.remove(scan) + + def Info(self, msg): + if self.log_level.upper() == Context.LOG_INFO: + self.print(msg) + def Error(self, msg): + if self.log_level.upper() == Context.LOG_ERROR: + self.print(msg) + else: + self.Info(self, msg) + + + def finalize(self): + if not self.finalized: + headers = '' + for h in self.headers: + if h[0] != '"': + headers += '#include <' + h + '>\n' + else: + headers += '#include ' + h + '\n' + self.ccode = headers + self.function_head + self.ccode + 'return 0;\n}' + self.headers = set() + return self.ccode + def __str__(self): + self.finalize() + return self.ccode + def __repr__(self) -> str: + return self.__str__() + + +class ast_node: + types = dict() + header = [] + def __init__(self, parent:"ast_node", node, context:Context = None): + self.context = parent.context if context is None else context + self.parent = parent + self.datasource = None + self.init(node) + self.produce(node) + self.spawn(node) + self.consume(node) + + def emit(self, code): + self.context.emit(code) + def emit_no_ln(self, code): + self.context.emit_nonewline(code) + + name = 'null' + + # each ast node has 3 stages. + # `produce' generates info for child nodes + # `spawn' populates child nodes + # `consume' consumes info from child nodes and finalizes codegen + # For simple operators, there may not be need for some of these stages + def init(self, _): + pass + def produce(self, _): + pass + def spawn(self, _): + pass + def consume(self, _): + pass + +# include classes in module as first order operators +def include(objs): + import inspect + for _, cls in inspect.getmembers(objs): + if inspect.isclass(cls) and issubclass(cls, ast_node) and not cls.name.startswith('_'): ast_node.types[cls.name] = cls \ No newline at end of file diff --git a/engine/ddl.py b/engine/ddl.py index 1d5a6da..2a92ef2 100644 --- a/engine/ddl.py +++ b/engine/ddl.py @@ -1,128 +1,128 @@ -# code-gen for data decl languages - -from engine.orderby import orderby -from engine.ast import ColRef, TableInfo, ast_node, Context, include -from engine.scan import scan -from engine.utils import base62uuid - -class create_table(ast_node): - name = 'create_table' - def __init__(self, parent: "ast_node", node, context: Context = None, cexprs = None, lineage = False): - self.cexprs = cexprs - self.lineage = lineage - super().__init__(parent, node, context) - def produce(self, node): - if type(node) is not TableInfo: - ct = node[self.name] - tbl = self.context.add_table(ct['name'], ct['columns']) - else: - tbl = node - - col_type_str = ','.join([c.type for c in tbl.columns]) - # create tables in c - self.emit(f"auto {tbl.table_name} = new TableInfo<{col_type_str}>(\"{tbl.table_name}\", {tbl.n_cols});") - self.emit("cxt->tables.insert({\"" + tbl.table_name + f"\", {tbl.table_name}"+"});") - self.context.tables_in_context[tbl] = tbl.table_name - tbl.cxt_name = tbl.table_name - tbl.refer_all() - # create an empty new table - if self.cexprs is None: - for c in tbl.columns: - self.emit(f'{c.cxt_name}.init("{c.name}");') - # create an output table - else: - # 1 to 1 lineage. - if len(self.context.scans) == 0: - if self.lineage: - order = 'order_' + base62uuid(6) - self.emit(f'auto {order} = {self.parent.datasource.cxt_name}->order_by<{orderby(self.parent, self.parent.assumptions).result()}>();') - self.lineage = '*' + order - else: - self.lineage = None - for i, c in enumerate(tbl.columns): - self.emit(f'{c.cxt_name}.init("{c.name}");') - self.emit(f"{c.cxt_name} = {self.cexprs[i](self.lineage)};") - self.lineage = None - self.parent.assumptions = None - else: - scanner:scan = self.context.scans[-1] - if self.lineage: - lineage_var = 'lineage_' + base62uuid(6) - counter_var = 'counter_' + base62uuid(6) - scanner.add(f'auto {lineage_var} = {self.datasource.cxt_name}->bind({tbl.cxt_name});', "init") - scanner.add(f'auto {counter_var} = 0;', "init") - scanner.add(f"{lineage_var}.emplace_back({counter_var}++);", "front") - self.lineage = f"{lineage_var}.rid" - for i, c in enumerate(tbl.columns): - scanner.add(f'{c.cxt_name}.init("{c.name}");', "init") - scanner.add(f"{c.cxt_name} = {self.cexprs[i](scanner.it_ver)};") - -class insert(ast_node): - name = 'insert' - def produce(self, node): - ct = node[self.name] - table:TableInfo = self.context.tables_byname[ct] - - values = node['query']['select'] - if len(values) != table.n_cols: - raise ValueError("Column Mismatch") - table.refer_all() - for i, s in enumerate(values): - if 'value' in s: - cname = table.columns[i].cxt_name - self.emit(f"{cname}.emplace_back({s['value']});") - else: - # subquery, dispatch to select astnode - pass - -class c(ast_node): - name='c' - def produce(self, node): - self.emit(node[self.name]) - -class load(ast_node): - name="load" - def produce(self, node): - self.context.headers.add('"csv.h"') - node = node[self.name] - table:TableInfo = self.context.tables_byname[node['table']] - table.refer_all() - csv_reader_name = 'csv_reader_' + base62uuid(6) - col_types = [c.type for c in table.columns] - col_tmp_names = ['tmp_'+base62uuid(8) for _ in range(len(table.columns))] - # col_type_str = ",".join(col_types) - col_names = ','.join([f'"{c.name}"' for c in table.columns]) - - self.emit(f'io::CSVReader<{len(col_types)}> {csv_reader_name}("{node["file"]["literal"]}");') - self.emit(f'{csv_reader_name}.read_header(io::ignore_extra_column, {col_names});') - for t, n in zip(col_types, col_tmp_names): - self.emit(f'{t} {n};') - self.emit(f'while({csv_reader_name}.read_row({",".join(col_tmp_names)})) {{ \n') - for i, c in enumerate(table.columns): - self.emit(f'{c.cxt_name}.emplace_back({col_tmp_names[i]});') - self.emit('}') - - -class outfile(ast_node): - name="_outfile" - def produce(self, node): - out_table:TableInfo = self.parent.out_table - filename = node['loc']['literal'] if 'loc' in node else node['literal'] - sep = ',' if 'term' not in node else node['term']['literal'] - file_pointer = 'fp_' + base62uuid(6) - self.emit(f'FILE* {file_pointer} = fopen("{filename}", "w");') - self.emit(f'{out_table.cxt_name}->printall("{sep}", "\\n", nullptr, {file_pointer});') - self.emit(f'fclose({file_pointer});') - # self.context.headers.add('fstream') - # cout_backup_buffer = 'stdout_' + base62uuid(4) - # ofstream = 'ofstream_' + base62uuid(6) - # self.emit(f'auto {cout_backup_buffer} = cout.rdbuf();') - # self.emit(f'auto {ofstream} = ofstream("{filename}");') - # self.emit(f'cout.rdbuf({ofstream}.rdbuf());') - # TODO: ADD STMTS. - # self.emit(f'cout.rdbuf({cout_backup_buffer});') - # self.emit(f'{ofstream}.close();') - - -import sys +# code-gen for data decl languages + +from engine.orderby import orderby +from engine.ast import ColRef, TableInfo, ast_node, Context, include +from engine.scan import scan +from engine.utils import base62uuid + +class create_table(ast_node): + name = 'create_table' + def __init__(self, parent: "ast_node", node, context: Context = None, cexprs = None, lineage = False): + self.cexprs = cexprs + self.lineage = lineage + super().__init__(parent, node, context) + def produce(self, node): + if type(node) is not TableInfo: + ct = node[self.name] + tbl = self.context.add_table(ct['name'], ct['columns']) + else: + tbl = node + + col_type_str = ','.join([c.type for c in tbl.columns]) + # create tables in c + self.emit(f"auto {tbl.table_name} = new TableInfo<{col_type_str}>(\"{tbl.table_name}\", {tbl.n_cols});") + self.emit("cxt->tables.insert({\"" + tbl.table_name + f"\", {tbl.table_name}"+"});") + self.context.tables_in_context[tbl] = tbl.table_name + tbl.cxt_name = tbl.table_name + tbl.refer_all() + # create an empty new table + if self.cexprs is None: + for c in tbl.columns: + self.emit(f'{c.cxt_name}.init("{c.name}");') + # create an output table + else: + # 1 to 1 lineage. + if len(self.context.scans) == 0: + if self.lineage: + order = 'order_' + base62uuid(6) + self.emit(f'auto {order} = {self.parent.datasource.cxt_name}->order_by<{orderby(self.parent, self.parent.assumptions).result()}>();') + self.lineage = '*' + order + else: + self.lineage = None + for i, c in enumerate(tbl.columns): + self.emit(f'{c.cxt_name}.init("{c.name}");') + self.emit(f"{c.cxt_name} = {self.cexprs[i](self.lineage)};") + self.lineage = None + self.parent.assumptions = None + else: + scanner:scan = self.context.scans[-1] + if self.lineage: + lineage_var = 'lineage_' + base62uuid(6) + counter_var = 'counter_' + base62uuid(6) + scanner.add(f'auto {lineage_var} = {self.datasource.cxt_name}->bind({tbl.cxt_name});', "init") + scanner.add(f'auto {counter_var} = 0;', "init") + scanner.add(f"{lineage_var}.emplace_back({counter_var}++);", "front") + self.lineage = f"{lineage_var}.rid" + for i, c in enumerate(tbl.columns): + scanner.add(f'{c.cxt_name}.init("{c.name}");', "init") + scanner.add(f"{c.cxt_name} = {self.cexprs[i](scanner.it_ver)};") + +class insert(ast_node): + name = 'insert' + def produce(self, node): + ct = node[self.name] + table:TableInfo = self.context.tables_byname[ct] + + values = node['query']['select'] + if len(values) != table.n_cols: + raise ValueError("Column Mismatch") + table.refer_all() + for i, s in enumerate(values): + if 'value' in s: + cname = table.columns[i].cxt_name + self.emit(f"{cname}.emplace_back({s['value']});") + else: + # subquery, dispatch to select astnode + pass + +class c(ast_node): + name='c' + def produce(self, node): + self.emit(node[self.name]) + +class load(ast_node): + name="load" + def produce(self, node): + self.context.headers.add('"csv.h"') + node = node[self.name] + table:TableInfo = self.context.tables_byname[node['table']] + table.refer_all() + csv_reader_name = 'csv_reader_' + base62uuid(6) + col_types = [c.type for c in table.columns] + col_tmp_names = ['tmp_'+base62uuid(8) for _ in range(len(table.columns))] + # col_type_str = ",".join(col_types) + col_names = ','.join([f'"{c.name}"' for c in table.columns]) + + self.emit(f'io::CSVReader<{len(col_types)}> {csv_reader_name}("{node["file"]["literal"]}");') + self.emit(f'{csv_reader_name}.read_header(io::ignore_extra_column, {col_names});') + for t, n in zip(col_types, col_tmp_names): + self.emit(f'{t} {n};') + self.emit(f'while({csv_reader_name}.read_row({",".join(col_tmp_names)})) {{ \n') + for i, c in enumerate(table.columns): + self.emit(f'{c.cxt_name}.emplace_back({col_tmp_names[i]});') + self.emit('}') + + +class outfile(ast_node): + name="_outfile" + def produce(self, node): + out_table:TableInfo = self.parent.out_table + filename = node['loc']['literal'] if 'loc' in node else node['literal'] + sep = ',' if 'term' not in node else node['term']['literal'] + file_pointer = 'fp_' + base62uuid(6) + self.emit(f'FILE* {file_pointer} = fopen("{filename}", "w");') + self.emit(f'{out_table.cxt_name}->printall("{sep}", "\\n", nullptr, {file_pointer});') + self.emit(f'fclose({file_pointer});') + # self.context.headers.add('fstream') + # cout_backup_buffer = 'stdout_' + base62uuid(4) + # ofstream = 'ofstream_' + base62uuid(6) + # self.emit(f'auto {cout_backup_buffer} = cout.rdbuf();') + # self.emit(f'auto {ofstream} = ofstream("{filename}");') + # self.emit(f'cout.rdbuf({ofstream}.rdbuf());') + # TODO: ADD STMTS. + # self.emit(f'cout.rdbuf({cout_backup_buffer});') + # self.emit(f'{ofstream}.close();') + + +import sys include(sys.modules[__name__]) \ No newline at end of file diff --git a/engine/expr.py b/engine/expr.py index 8cddb05..7d17808 100644 --- a/engine/expr.py +++ b/engine/expr.py @@ -1,135 +1,135 @@ -from engine.ast import ast_node, ColRef -start_expr = 'f"' -index_expr = '{\'\' if x is None and y is None else f\'[{x}]\'}' -end_expr = '"' - -class expr(ast_node): - name='expr' - builtin_func_maps = { - 'max': 'max', - 'min': 'min', - 'avg': 'avg', - 'sum': 'sum', - 'count' : 'count', - 'mins': ['mins', 'minw'], - 'maxs': ['maxs', 'maxw'], - 'avgs': ['avgs', 'avgw'], - 'sums': ['sums', 'sumw'], - } - - binary_ops = { - 'sub':'-', - 'add':'+', - 'mul':'*', - 'div':'/', - 'mod':'%', - 'and':'&&', - 'or':'||', - 'xor' : '^', - 'gt':'>', - 'lt':'<', - 'lte':'<=', - 'gte':'>=', - 'neq':'!=', - 'eq':'==' - } - - compound_ops = { - 'missing' : ['missing', lambda x: f'{x[0]} == nullval>'], - } - - unary_ops = { - 'neg' : '-', - 'not' : '!' - } - - coumpound_generating_ops = ['avgs', 'mins', 'maxs', 'sums'] + \ - list( binary_ops.keys()) + list(compound_ops.keys()) + list(unary_ops.keys() ) - - def __init__(self, parent, node, materialize_cols = True, abs_col = False): - self.materialize_cols = materialize_cols - self.raw_col = None - self.__abs = abs_col - self.inside_agg = False - if(type(parent) is expr): - self.inside_agg = parent.inside_agg - self.__abs = parent.__abs - ast_node.__init__(self, parent, node, None) - - def init(self, _): - from engine.projection import projection - parent = self.parent - self.isvector = parent.isvector if type(parent) is expr else False - self.is_compound = parent.is_compound if type(parent) is expr else False - if type(parent) in [projection, expr]: - self.datasource = parent.datasource - else: - self.datasource = self.context.datasource - self.udf_map = parent.context.udf_map - self._expr = '' - self.cexpr = None - self.func_maps = {**self.udf_map, **self.builtin_func_maps} - - def produce(self, node): - if type(node) is dict: - for key, val in node.items(): - if key in self.func_maps: - # TODO: distinguish between UDF agg functions and other UDF functions. - self.inside_agg = True - self.context.headers.add('"./server/aggregations.h"') - if type(val) is list and len(val) > 1: - cfunc = self.func_maps[key] - cfunc = cfunc[len(val) - 1] if type(cfunc) is list else cfunc - self._expr += f"{cfunc}(" - for i, p in enumerate(val): - self._expr += expr(self, p)._expr + (','if i', + 'lt':'<', + 'lte':'<=', + 'gte':'>=', + 'neq':'!=', + 'eq':'==' + } + + compound_ops = { + 'missing' : ['missing', lambda x: f'{x[0]} == nullval>'], + } + + unary_ops = { + 'neg' : '-', + 'not' : '!' + } + + coumpound_generating_ops = ['avgs', 'mins', 'maxs', 'sums'] + \ + list( binary_ops.keys()) + list(compound_ops.keys()) + list(unary_ops.keys() ) + + def __init__(self, parent, node, materialize_cols = True, abs_col = False): + self.materialize_cols = materialize_cols + self.raw_col = None + self.__abs = abs_col + self.inside_agg = False + if(type(parent) is expr): + self.inside_agg = parent.inside_agg + self.__abs = parent.__abs + ast_node.__init__(self, parent, node, None) + + def init(self, _): + from engine.projection import projection + parent = self.parent + self.isvector = parent.isvector if type(parent) is expr else False + self.is_compound = parent.is_compound if type(parent) is expr else False + if type(parent) in [projection, expr]: + self.datasource = parent.datasource + else: + self.datasource = self.context.datasource + self.udf_map = parent.context.udf_map + self._expr = '' + self.cexpr = None + self.func_maps = {**self.udf_map, **self.builtin_func_maps} + + def produce(self, node): + if type(node) is dict: + for key, val in node.items(): + if key in self.func_maps: + # TODO: distinguish between UDF agg functions and other UDF functions. + self.inside_agg = True + self.context.headers.add('"./server/aggregations.h"') + if type(val) is list and len(val) > 1: + cfunc = self.func_maps[key] + cfunc = cfunc[len(val) - 1] if type(cfunc) is list else cfunc + self._expr += f"{cfunc}(" + for i, p in enumerate(val): + self._expr += expr(self, p)._expr + (','if i {self.group_type};') - self.emit(f'unordered_map<{self.group_type}, vector_type, ' - f'transTypes<{self.group_type}, hasher>> {self.group};') - self.n_grps = len(node) - self.scanner = scan(self, self.datasource, expr.toCExpr(first_col)()+'.size') - self.scanner.add(f'{self.group}[forward_as_tuple({g_contents(self.scanner.it_ver)})].emplace_back({self.scanner.it_ver});') - - - def consume(self, _): - self.referenced = self.datasource.rec - self.datasource.rec = None - self.scanner.finalize() - - def deal_with_assumptions(self, assumption:assumption, out:TableInfo): - gscanner = scan(self, self.group) - val_var = 'val_'+base62uuid(7) - gscanner.add(f'auto &{val_var} = {gscanner.it_ver}.second;') - gscanner.add(f'{self.datasource.cxt_name}->order_by<{assumption.result()}>(&{val_var});') - gscanner.finalize() - - def finalize(self, cexprs, out:TableInfo): - gscanner = scan(self, self.group) - key_var = 'key_'+base62uuid(7) - val_var = 'val_'+base62uuid(7) - - gscanner.add(f'auto &{key_var} = {gscanner.it_ver}.first;') - gscanner.add(f'auto &{val_var} = {gscanner.it_ver}.second;') - gscanner.add(';\n'.join([f'{out.columns[i].reference()}.emplace_back({ce(x=val_var, y=key_var)})' for i, ce in enumerate(cexprs)])+';') - - gscanner.finalize() - +from engine.ast import ColRef, TableInfo, ast_node +from engine.orderby import assumption +from engine.scan import scan +from engine.utils import base62uuid +from engine.expr import expr + +class groupby(ast_node): + name = '_groupby' + def init(self, _): + self.context.headers.add('"./server/hasher.h"') + self.context.headers.add('unordered_map') + self.group = 'g' + base62uuid(7) + self.group_type = 'record_type' + base62uuid(7) + self.datasource = self.parent.datasource + self.scanner = None + self.datasource.rec = set() + self.raw_groups = [] + def produce(self, node): + + if type(node) is not list: + node = [node] + g_contents = '' + g_contents_list = [] + first_col = '' + for i, g in enumerate(node): + v = g['value'] + e = expr(self, v) + if type(e.raw_col) is ColRef: + self.raw_groups.append(e.raw_col) + e = e._expr + # if v is compound expr, create tmp cols + if type(v) is not str: + tmpcol = 't' + base62uuid(7) + self.emit(f'auto {tmpcol} = {e};') + e = tmpcol + if i == 0: + first_col = e + g_contents_list.append(e) + g_contents_decltype = [f'decltype({c})' for c in g_contents_list] + g_contents = expr.toCExpr(','.join(g_contents_list)) + self.emit(f'typedef record<{expr.toCExpr(",".join(g_contents_decltype))(0)}> {self.group_type};') + self.emit(f'unordered_map<{self.group_type}, vector_type, ' + f'transTypes<{self.group_type}, hasher>> {self.group};') + self.n_grps = len(node) + self.scanner = scan(self, self.datasource, expr.toCExpr(first_col)()+'.size') + self.scanner.add(f'{self.group}[forward_as_tuple({g_contents(self.scanner.it_ver)})].emplace_back({self.scanner.it_ver});') + + + def consume(self, _): + self.referenced = self.datasource.rec + self.datasource.rec = None + self.scanner.finalize() + + def deal_with_assumptions(self, assumption:assumption, out:TableInfo): + gscanner = scan(self, self.group) + val_var = 'val_'+base62uuid(7) + gscanner.add(f'auto &{val_var} = {gscanner.it_ver}.second;') + gscanner.add(f'{self.datasource.cxt_name}->order_by<{assumption.result()}>(&{val_var});') + gscanner.finalize() + + def finalize(self, cexprs, out:TableInfo): + gscanner = scan(self, self.group) + key_var = 'key_'+base62uuid(7) + val_var = 'val_'+base62uuid(7) + + gscanner.add(f'auto &{key_var} = {gscanner.it_ver}.first;') + gscanner.add(f'auto &{val_var} = {gscanner.it_ver}.second;') + gscanner.add(';\n'.join([f'{out.columns[i].reference()}.emplace_back({ce(x=val_var, y=key_var)})' for i, ce in enumerate(cexprs)])+';') + + gscanner.finalize() + self.datasource.groupinfo = None \ No newline at end of file diff --git a/engine/join.py b/engine/join.py index 9aa462f..b693e71 100644 --- a/engine/join.py +++ b/engine/join.py @@ -1,6 +1,6 @@ -from engine.ast import ast_node - - -class join(ast_node): - name='join' +from engine.ast import ast_node + + +class join(ast_node): + name='join' \ No newline at end of file diff --git a/engine/projection.py b/engine/projection.py index fa199ed..f95aed0 100644 --- a/engine/projection.py +++ b/engine/projection.py @@ -1,180 +1,180 @@ -from engine.ast import ColRef, TableInfo, ast_node, Context, include -from engine.groupby import groupby -from engine.join import join -from engine.expr import expr -from engine.orderby import assumption, orderby -from engine.scan import filter -from engine.utils import base62uuid, enlist, base62alp, has_other -from engine.ddl import create_table, outfile -import copy - -class projection(ast_node): - name='select' - def __init__(self, parent:ast_node, node, context:Context = None, outname = None, disp = True): - self.disp = disp - self.outname = outname - self.group_node = None - self.assumptions = None - self.where = None - ast_node.__init__(self, parent, node, context) - def init(self, _): - if self.outname is None: - self.outname = self.context.gen_tmptable() - - def produce(self, node): - p = node['select'] - self.projections = p if type(p) is list else [p] - self.context.Info(node) - - def spawn(self, node): - self.datasource = None - if 'from' in node: - from_clause = node['from'] - if type(from_clause) is list: - # from joins - join(self, from_clause) - elif type(from_clause) is dict: - if 'value' in from_clause: - value = from_clause['value'] - if type(value) is dict: - if 'select' in value: - # from subquery - projection(self, from_clause, disp = False) - else: - # TODO: from func over table - print(f'from func over table{node}') - elif type(value) is str: - self.datasource = self.context.tables_byname[value] - if 'name' in value: - self.datasource.add_alias(value['name']) - if 'assumptions' in from_clause: - self.assumptions = enlist(from_clause['assumptions']) - - elif type(from_clause) is str: - self.datasource = self.context.tables_byname[from_clause] - - if self.datasource is None: - raise ValueError('spawn error: from clause') - - if self.datasource is not None: - self.datasource_changed = True - self.prev_datasource = self.context.datasource - self.context.datasource = self.datasource - if 'where' in node: - self.where = filter(self, node['where'], True) - # self.datasource = filter(self, node['where'], True).output - # self.context.datasource = self.datasource - - if 'groupby' in node: - self.group_node = groupby(self, node['groupby']) - self.datasource = copy.copy(self.datasource) # shallow copy - self.datasource.groupinfo = self.group_node - else: - self.group_node = None - - def consume(self, node): - self.inv = True - disp_varname = 'd'+base62uuid(7) - has_groupby = self.group_node is not None - cexprs = [] - flatten = False - cols = [] - self.out_table = TableInfo('out_'+base62uuid(4), [], self.context) - if 'outfile' in node: - flatten = True - - new_names = [] - proj_raw_cols = [] - for i, proj in enumerate(self.projections): - cname = '' - compound = False - self.datasource.rec = set() - if type(proj) is dict: - if 'value' in proj: - e = proj['value'] - sname = expr(self, e) - if type(sname.raw_col) is ColRef: - proj_raw_cols.append(sname.raw_col) - sname = sname._expr - fname = expr.toCExpr(sname) # fastest access method at innermost context - absname = expr(self, e, abs_col=True)._expr # absolute name at function scope - # TODO: Make it single pass here. - compound = True # compound column - cexprs.append(fname) - cname = e if type(e) is str else ''.join([a if a in base62alp else '' for a in expr.toCExpr(absname)()]) - if 'name' in proj: # renaming column by AS keyword - cname = proj['name'] - new_names.append(cname) - elif type(proj) is str: - col = self.datasource.get_col_d(proj) - if type(col) is ColRef: - col.reference() - - compound = compound and has_groupby and has_other(self.datasource.rec, self.group_node.referenced) - self.datasource.rec = None - - typename = f'decays' - if not compound: - typename = f'value_type<{typename}>' - - cols.append(ColRef(cname, expr.toCExpr(typename)(), self.out_table, 0, None, cname, i, compound=compound)) - - self.out_table.add_cols(cols, False) - - lineage = None - - if has_groupby: - create_table(self, self.out_table) # creates empty out_table. - if self.assumptions is not None: - self.assumptions = assumption(self, self.assumptions, exclude=self.group_node.raw_groups) - if not self.assumptions.empty(): - self.group_node.deal_with_assumptions(self.assumptions, self.out_table) - self.assumptions = None - self.group_node.finalize(cexprs, self.out_table) - else: - # if all assumptions in projections, treat as orderby - lineage = self.assumptions is not None and has_other(self.assumptions, proj_raw_cols) - spawn = create_table(self, self.out_table, cexprs = cexprs, lineage = lineage) # create and populate out_table. - if lineage and type(spawn.lineage) is str: - lineage = spawn.lineage - self.assumptions = orderby(self, self.assumptions) # do not exclude proj_raw_cols - else: - lineage = None - if self.where is not None: - self.where.finalize() - - if type(lineage) is str: - order = 'order_' + base62uuid(6) - self.emit(f'auto {order} = {self.datasource.cxt_name}->order_by<{self.assumptions.result()}>({lineage});') - self.emit(f'{self.out_table.cxt_name}->materialize(*{order});') - self.assumptions = None - - if self.assumptions is not None: - orderby_node = orderby(self, self.assumptions) - else: - orderby_node = None - - if 'orderby' in node: - self.datasource = self.out_table - self.context.datasource = self.out_table # discard current ds - orderbys = node['orderby'] - orderby_node = orderby(self, orderbys) if orderby_node is None else orderby_node.merge(orderbys) - - if orderby_node is not None: - self.emit(f'auto {disp_varname} = {self.out_table.reference()}->order_by_view<{orderby_node.result()}>();') - else: - disp_varname = f'*{self.out_table.cxt_name}' - - if self.disp: - self.emit(f'print({disp_varname});') - - - if flatten: - outfile(self, node['outfile']) - - if self.datasource_changed: - self.context.datasource = self.prev_datasource - - -import sys +from engine.ast import ColRef, TableInfo, ast_node, Context, include +from engine.groupby import groupby +from engine.join import join +from engine.expr import expr +from engine.orderby import assumption, orderby +from engine.scan import filter +from engine.utils import base62uuid, enlist, base62alp, has_other +from engine.ddl import create_table, outfile +import copy + +class projection(ast_node): + name='select' + def __init__(self, parent:ast_node, node, context:Context = None, outname = None, disp = True): + self.disp = disp + self.outname = outname + self.group_node = None + self.assumptions = None + self.where = None + ast_node.__init__(self, parent, node, context) + def init(self, _): + if self.outname is None: + self.outname = self.context.gen_tmptable() + + def produce(self, node): + p = node['select'] + self.projections = p if type(p) is list else [p] + self.context.Info(node) + + def spawn(self, node): + self.datasource = None + if 'from' in node: + from_clause = node['from'] + if type(from_clause) is list: + # from joins + join(self, from_clause) + elif type(from_clause) is dict: + if 'value' in from_clause: + value = from_clause['value'] + if type(value) is dict: + if 'select' in value: + # from subquery + projection(self, from_clause, disp = False) + else: + # TODO: from func over table + print(f'from func over table{node}') + elif type(value) is str: + self.datasource = self.context.tables_byname[value] + if 'name' in value: + self.datasource.add_alias(value['name']) + if 'assumptions' in from_clause: + self.assumptions = enlist(from_clause['assumptions']) + + elif type(from_clause) is str: + self.datasource = self.context.tables_byname[from_clause] + + if self.datasource is None: + raise ValueError('spawn error: from clause') + + if self.datasource is not None: + self.datasource_changed = True + self.prev_datasource = self.context.datasource + self.context.datasource = self.datasource + if 'where' in node: + self.where = filter(self, node['where'], True) + # self.datasource = filter(self, node['where'], True).output + # self.context.datasource = self.datasource + + if 'groupby' in node: + self.group_node = groupby(self, node['groupby']) + self.datasource = copy.copy(self.datasource) # shallow copy + self.datasource.groupinfo = self.group_node + else: + self.group_node = None + + def consume(self, node): + self.inv = True + disp_varname = 'd'+base62uuid(7) + has_groupby = self.group_node is not None + cexprs = [] + flatten = False + cols = [] + self.out_table = TableInfo('out_'+base62uuid(4), [], self.context) + if 'outfile' in node: + flatten = True + + new_names = [] + proj_raw_cols = [] + for i, proj in enumerate(self.projections): + cname = '' + compound = False + self.datasource.rec = set() + if type(proj) is dict: + if 'value' in proj: + e = proj['value'] + sname = expr(self, e) + if type(sname.raw_col) is ColRef: + proj_raw_cols.append(sname.raw_col) + sname = sname._expr + fname = expr.toCExpr(sname) # fastest access method at innermost context + absname = expr(self, e, abs_col=True)._expr # absolute name at function scope + # TODO: Make it single pass here. + compound = True # compound column + cexprs.append(fname) + cname = e if type(e) is str else ''.join([a if a in base62alp else '' for a in expr.toCExpr(absname)()]) + if 'name' in proj: # renaming column by AS keyword + cname = proj['name'] + new_names.append(cname) + elif type(proj) is str: + col = self.datasource.get_col_d(proj) + if type(col) is ColRef: + col.reference() + + compound = compound and has_groupby and has_other(self.datasource.rec, self.group_node.referenced) + self.datasource.rec = None + + typename = f'decays' + if not compound: + typename = f'value_type<{typename}>' + + cols.append(ColRef(cname, expr.toCExpr(typename)(), self.out_table, 0, None, cname, i, compound=compound)) + + self.out_table.add_cols(cols, False) + + lineage = None + + if has_groupby: + create_table(self, self.out_table) # creates empty out_table. + if self.assumptions is not None: + self.assumptions = assumption(self, self.assumptions, exclude=self.group_node.raw_groups) + if not self.assumptions.empty(): + self.group_node.deal_with_assumptions(self.assumptions, self.out_table) + self.assumptions = None + self.group_node.finalize(cexprs, self.out_table) + else: + # if all assumptions in projections, treat as orderby + lineage = self.assumptions is not None and has_other(self.assumptions, proj_raw_cols) + spawn = create_table(self, self.out_table, cexprs = cexprs, lineage = lineage) # create and populate out_table. + if lineage and type(spawn.lineage) is str: + lineage = spawn.lineage + self.assumptions = orderby(self, self.assumptions) # do not exclude proj_raw_cols + else: + lineage = None + if self.where is not None: + self.where.finalize() + + if type(lineage) is str: + order = 'order_' + base62uuid(6) + self.emit(f'auto {order} = {self.datasource.cxt_name}->order_by<{self.assumptions.result()}>({lineage});') + self.emit(f'{self.out_table.cxt_name}->materialize(*{order});') + self.assumptions = None + + if self.assumptions is not None: + orderby_node = orderby(self, self.assumptions) + else: + orderby_node = None + + if 'orderby' in node: + self.datasource = self.out_table + self.context.datasource = self.out_table # discard current ds + orderbys = node['orderby'] + orderby_node = orderby(self, orderbys) if orderby_node is None else orderby_node.merge(orderbys) + + if orderby_node is not None: + self.emit(f'auto {disp_varname} = {self.out_table.reference()}->order_by_view<{orderby_node.result()}>();') + else: + disp_varname = f'*{self.out_table.cxt_name}' + + if self.disp: + self.emit(f'print({disp_varname});') + + + if flatten: + outfile(self, node['outfile']) + + if self.datasource_changed: + self.context.datasource = self.prev_datasource + + +import sys include(sys.modules[__name__]) \ No newline at end of file diff --git a/engine/scan.py b/engine/scan.py index 812165e..eb8bb96 100644 --- a/engine/scan.py +++ b/engine/scan.py @@ -1,99 +1,99 @@ -from xmlrpc.client import Boolean -from engine.ast import ColRef, TableInfo, View, ast_node, Context -from engine.utils import base62uuid -from engine.expr import expr - -class scan(ast_node): - name = 'scan' - def __init__(self, parent: "ast_node", node, size = None, context: Context = None, const = False): - self.type = type - self.size = size - self.const = "const " if const else "" - super().__init__(parent, node, context) - def init(self, _): - self.datasource = self.context.datasource - self.initializers = '' - self.start = '' - self.front = '' - self.body = '' - self.end = '}' - self.mode = None - self.filters = [] - scan_vars = set(s.it_var for s in self.context.scans) - self.it_ver = 'i' + base62uuid(2) - while(self.it_ver in scan_vars): - self.it_ver = 'i' + base62uuid(6) - self.parent.context.scans.append(self) - def produce(self, node): - if type(node) is ColRef: - self.colref = node - if self.size is None: - self.mode = ["col", node.table] - self.start += f'for ({self.const}auto& {self.it_ver} : {node.reference()}) {{\n' - else: - self.mode = ["idx", node.table] - self.start += f"for (uint32_t {self.it_ver} = 0; {self.it_ver} < {node.reference()}.size; ++{self.it_ver}){{\\n" - elif type(node) is str: - self.mode = ["idx", None] - self.start+= f'for({self.const}auto& {self.it_ver} : {node}) {{\n' - else: - self.mode = ["idx", node] # Node is the TableInfo - self.start += f"for (uint32_t {self.it_ver} = 0; {self.it_ver} < {self.size}; ++{self.it_ver}){{\n" - - def add(self, stmt, position = "body"): - if position == "body": - self.body += stmt + '\n' - elif position == "init": - self.initializers += stmt + '\n' - else: - self.front += stmt + '\n' - - def finalize(self): - for f in self.filters: - self.start += f - self.end += '}' - self.context.remove_scan(self, self.initializers + self.start + self.front + self.body + self.end) - -class filter(ast_node): - name = 'filter' - def __init__(self, parent: "ast_node", node, materialize = False, context = None): - self.materialize = materialize - super().__init__(parent, node, context) - def init(self, _): - self.datasource = self.context.datasource - self.view = View(self.context, self.datasource) - self.value = None - - def spawn(self, node): - # TODO: deal with subqueries - self.modified_node = node - return super().spawn(node) - def __materialize__(self): - if self.materialize: - cols = [] if self.datasource is None else self.datasource.columns - self.output = TableInfo('tn'+base62uuid(6), cols, self.context) - self.output.construct() - if type(self.value) is View: # cond filtered on tables. - self.emit(f'{self.value.name}:&{self.value.name}') - for o, c in zip(self.output.columns,self.value.table.columns): - self.emit(f'{o.cname}:{c.cname}[{self.value.name}]') - elif self.value is not None: # cond is scalar - tmpVar = 't'+base62uuid(7) - self.emit(f'{tmpVar}:{self.value}') - for o, c in zip(self.output.columns, self.datasource.columns): - self.emit(f'{o.cname}:$[{tmpVar};{c.cname};()]') - - def finalize(self): - self.scanner.finalize() - def consume(self, _): - # TODO: optimizations after converting expr to cnf - self.scanner = None - for s in self.context.scans: - if self.datasource == s.mode[1]: - self.scanner = s - break - if self.scanner is None: - self.scanner = scan(self, self.datasource, self.datasource.get_size()) - self.expr = expr(self, self.modified_node) - self.scanner.filters.append(f'if ({self.expr.cexpr(self.scanner.it_ver)}) {{\n') +from xmlrpc.client import Boolean +from engine.ast import ColRef, TableInfo, View, ast_node, Context +from engine.utils import base62uuid +from engine.expr import expr + +class scan(ast_node): + name = 'scan' + def __init__(self, parent: "ast_node", node, size = None, context: Context = None, const = False): + self.type = type + self.size = size + self.const = "const " if const else "" + super().__init__(parent, node, context) + def init(self, _): + self.datasource = self.context.datasource + self.initializers = '' + self.start = '' + self.front = '' + self.body = '' + self.end = '}' + self.mode = None + self.filters = [] + scan_vars = set(s.it_var for s in self.context.scans) + self.it_ver = 'i' + base62uuid(2) + while(self.it_ver in scan_vars): + self.it_ver = 'i' + base62uuid(6) + self.parent.context.scans.append(self) + def produce(self, node): + if type(node) is ColRef: + self.colref = node + if self.size is None: + self.mode = ["col", node.table] + self.start += f'for ({self.const}auto& {self.it_ver} : {node.reference()}) {{\n' + else: + self.mode = ["idx", node.table] + self.start += f"for (uint32_t {self.it_ver} = 0; {self.it_ver} < {node.reference()}.size; ++{self.it_ver}){{\\n" + elif type(node) is str: + self.mode = ["idx", None] + self.start+= f'for({self.const}auto& {self.it_ver} : {node}) {{\n' + else: + self.mode = ["idx", node] # Node is the TableInfo + self.start += f"for (uint32_t {self.it_ver} = 0; {self.it_ver} < {self.size}; ++{self.it_ver}){{\n" + + def add(self, stmt, position = "body"): + if position == "body": + self.body += stmt + '\n' + elif position == "init": + self.initializers += stmt + '\n' + else: + self.front += stmt + '\n' + + def finalize(self): + for f in self.filters: + self.start += f + self.end += '}' + self.context.remove_scan(self, self.initializers + self.start + self.front + self.body + self.end) + +class filter(ast_node): + name = 'filter' + def __init__(self, parent: "ast_node", node, materialize = False, context = None): + self.materialize = materialize + super().__init__(parent, node, context) + def init(self, _): + self.datasource = self.context.datasource + self.view = View(self.context, self.datasource) + self.value = None + + def spawn(self, node): + # TODO: deal with subqueries + self.modified_node = node + return super().spawn(node) + def __materialize__(self): + if self.materialize: + cols = [] if self.datasource is None else self.datasource.columns + self.output = TableInfo('tn'+base62uuid(6), cols, self.context) + self.output.construct() + if type(self.value) is View: # cond filtered on tables. + self.emit(f'{self.value.name}:&{self.value.name}') + for o, c in zip(self.output.columns,self.value.table.columns): + self.emit(f'{o.cname}:{c.cname}[{self.value.name}]') + elif self.value is not None: # cond is scalar + tmpVar = 't'+base62uuid(7) + self.emit(f'{tmpVar}:{self.value}') + for o, c in zip(self.output.columns, self.datasource.columns): + self.emit(f'{o.cname}:$[{tmpVar};{c.cname};()]') + + def finalize(self): + self.scanner.finalize() + def consume(self, _): + # TODO: optimizations after converting expr to cnf + self.scanner = None + for s in self.context.scans: + if self.datasource == s.mode[1]: + self.scanner = s + break + if self.scanner is None: + self.scanner = scan(self, self.datasource, self.datasource.get_size()) + self.expr = expr(self, self.modified_node) + self.scanner.filters.append(f'if ({self.expr.cexpr(self.scanner.it_ver)}) {{\n') \ No newline at end of file diff --git a/engine/utils.py b/engine/utils.py index ac1ce24..9940cd4 100644 --- a/engine/utils.py +++ b/engine/utils.py @@ -1,93 +1,93 @@ -import uuid - -lower_alp = 'abcdefghijklmnopqrstuvwxyz' -upper_alp = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' -nums = '0123456789' -base62alp = nums + lower_alp + upper_alp - -reserved_monet = ['month'] - -def base62uuid(crop=8): - _id = uuid.uuid4().int - ret = '' - - while _id: - ret = base62alp[_id % 62] + ret - _id //= 62 - - return ret[:crop] if len(ret) else '0' - -def get_legal_name(name, lower = True): - if name is not None: - if lower: - name = name.lower() - name = ''.join([n for n in name if n in base62alp or n == '_']) - - if name is None or len(name) == 0 or set(name) == set('_'): - name = base62uuid(8) - if(name[0] in nums): - name = '_' + name - - return name - -def check_legal_name(name): - all_underscores = True - for c in name: - if c not in base62alp and c != '_': - return False - if c != '_': - all_underscores = False - if all_underscores: - return False - if name[0] in nums: - return False - - return True - -def enlist(l): - return l if type(l) is list else [l] - -def seps(s, i, l): - return s if i < len(l) - 1 else '' - -def has_other(a, b): - for ai in a: - if ai not in b: - return True - return False - -def defval(val, default): - return default if val is None else val - -# escape must be readonly -from typing import Set -def remove_last(pattern : str, string : str, escape : Set[str] = set()) -> str: - idx = string.rfind(pattern) - if idx == -1: - return string - else: - if set(string[idx:]).difference(escape): - return string - else: - return string[:idx] + string[idx+1:] - -class _Counter: - def __init__(self, cnt): - self.cnt = cnt - def inc(self, cnt = 1): - self.cnt += cnt - cnt = self.cnt - cnt - return cnt - -import re -ws = re.compile(r'\s+') -import os - -def add_dll_dir(dll: str): - import sys - if sys.version_info.major >= 3 and sys.version_info.minor >7 and os.name == 'nt': - os.add_dll_directory(dll) - else: - os.environ['PATH'] = os.path.abspath(dll) + os.pathsep + os.environ['PATH'] - -nullstream = open(os.devnull, 'w') +import uuid + +lower_alp = 'abcdefghijklmnopqrstuvwxyz' +upper_alp = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' +nums = '0123456789' +base62alp = nums + lower_alp + upper_alp + +reserved_monet = ['month'] + +def base62uuid(crop=8): + _id = uuid.uuid4().int + ret = '' + + while _id: + ret = base62alp[_id % 62] + ret + _id //= 62 + + return ret[:crop] if len(ret) else '0' + +def get_legal_name(name, lower = True): + if name is not None: + if lower: + name = name.lower() + name = ''.join([n for n in name if n in base62alp or n == '_']) + + if name is None or len(name) == 0 or set(name) == set('_'): + name = base62uuid(8) + if(name[0] in nums): + name = '_' + name + + return name + +def check_legal_name(name): + all_underscores = True + for c in name: + if c not in base62alp and c != '_': + return False + if c != '_': + all_underscores = False + if all_underscores: + return False + if name[0] in nums: + return False + + return True + +def enlist(l): + return l if type(l) is list else [l] + +def seps(s, i, l): + return s if i < len(l) - 1 else '' + +def has_other(a, b): + for ai in a: + if ai not in b: + return True + return False + +def defval(val, default): + return default if val is None else val + +# escape must be readonly +from typing import Set +def remove_last(pattern : str, string : str, escape : Set[str] = set()) -> str: + idx = string.rfind(pattern) + if idx == -1: + return string + else: + if set(string[idx:]).difference(escape): + return string + else: + return string[:idx] + string[idx+1:] + +class _Counter: + def __init__(self, cnt): + self.cnt = cnt + def inc(self, cnt = 1): + self.cnt += cnt + cnt = self.cnt - cnt + return cnt + +import re +ws = re.compile(r'\s+') +import os + +def add_dll_dir(dll: str): + import sys + if sys.version_info.major >= 3 and sys.version_info.minor >7 and os.name == 'nt': + os.add_dll_directory(dll) + else: + os.environ['PATH'] = os.path.abspath(dll) + os.pathsep + os.environ['PATH'] + +nullstream = open(os.devnull, 'w') diff --git a/monetdb/msvc/monetdb_config.h b/monetdb/msvc/monetdb_config.h index d47fef2..1e7d12b 100644 --- a/monetdb/msvc/monetdb_config.h +++ b/monetdb/msvc/monetdb_config.h @@ -1,466 +1,466 @@ -/* - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. - * - * Copyright 1997 - July 2008 CWI, August 2008 - 2022 MonetDB B.V. - */ -/* monetdb_config.h.in. Generated from CMakeLists.txt */ - -#ifndef MT_SEEN_MONETDB_CONFIG_H -#define MT_SEEN_MONETDB_CONFIG_H 1 - -#ifdef _MSC_VER - -#if _MSC_VER < 1900 -#error Versions below Visual Studio 2015 are no longer supported -#endif - -/* Prevent pollution through excessive inclusion of include files by Windows.h. */ -#ifndef WIN32_LEAN_AND_MEAN -#define WIN32_LEAN_AND_MEAN 1 -#endif - -/* Visual Studio 8 has deprecated lots of stuff: suppress warnings */ -#ifndef _CRT_SECURE_NO_DEPRECATE -#define _CRT_SECURE_NO_DEPRECATE 1 -#endif - -#define _CRT_RAND_S /* for Windows rand_s, before stdlib.h */ -#define HAVE_RAND_S 1 - -#endif - -#if !defined(_XOPEN_SOURCE) && defined(__CYGWIN__) -#define _XOPEN_SOURCE 700 -#endif - -#include -#if defined(_MSC_VER) && defined(_DEBUG) && defined(_CRTDBG_MAP_ALLOC) -/* In this case, malloc and friends are redefined in crtdbg.h to debug - * versions. We need to include stdlib.h first or else we get - * conflicting declarations. */ -#include -#endif - -#define HAVE_SYS_TYPES_H 1 -#ifdef HAVE_SYS_TYPES_H -# include -#endif - -/* standard C-99 include files */ -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef _MSC_VER - -/* Windows include files */ -#include -#include -#include - -/* indicate to sqltypes.h that windows.h has already been included and - that it doesn't have to define Windows constants */ -#define ALREADY_HAVE_WINDOWS_TYPE 1 - -#define NATIVE_WIN32 1 - -#endif /* _MSC_VER */ - -#if !defined(WIN32) && (defined(__CYGWIN__) || defined(__MINGW32__)) -#define WIN32 1 -#endif - -// Section: monetdb configure defines -/* #undef HAVE_DISPATCH_DISPATCH_H */ -/* #undef HAVE_DLFCN_H */ -#define HAVE_FCNTL_H 1 -#define HAVE_IO_H 1 -/* #undef HAVE_KVM_H */ -/* #undef HAVE_LIBGEN_H */ -/* #undef HAVE_LIBINTL_H */ -/* #undef HAVE_MACH_MACH_INIT_H */ -/* #undef HAVE_MACH_TASK_H */ -/* #undef HAVE_MACH_O_DYLD_H */ -/* #undef HAVE_NETDB_H */ -/* #undef HAVE_NETINET_IN_H */ -/* #undef HAVE_POLL_H */ -/* #undef HAVE_PROCFS_H */ -/* #undef HAVE_PWD_H */ -/* #undef HAVE_STRINGS_H */ -/* #undef HAVE_STROPTS_H */ -/* #undef HAVE_SYS_FILE_H */ -/* #undef HAVE_SYS_IOCTL_H */ -/* #undef HAVE_SYS_SYSCTL_H */ -/* #undef HAVE_SYS_MMAN_H */ -/* #undef HAVE_SYS_PARAM_H */ -/* #undef HAVE_SYS_RANDOM_H */ -/* #undef HAVE_SYS_RESOURCE_H */ -/* #undef HAVE_SYS_TIME_H */ -/* #undef HAVE_SYS_TIMES_H */ -/* #undef HAVE_SYS_UIO_H */ -/* #undef HAVE_SYS_UN_H */ -/* #undef HAVE_SYS_WAIT_H */ -/* #undef HAVE_TERMIOS_H */ -/* #undef HAVE_UNISTD_H */ -/* #undef HAVE_UUID_UUID_H */ -#define HAVE_WINSOCK_H 1 -/* #undef HAVE_SEMAPHORE_H */ -#define HAVE_GETOPT_H 1 - -/* #undef HAVE_STDATOMIC_H */ - -/* #undef HAVE_DIRENT_H */ -/* #undef HAVE_SYS_SOCKET_H */ -/* #undef HAVE_GETTIMEOFDAY */ -#define HAVE_SYS_STAT_H 1 -/* #undef HAVE_FDATASYNC */ -/* #undef HAVE_ACCEPT4 */ -/* #undef HAVE_ASCTIME_R */ -/* #undef HAVE_CLOCK_GETTIME */ -/* #undef HAVE_CTIME_R */ -/* #undef HAVE_DISPATCH_SEMAPHORE_CREATE */ -/* #undef HAVE_FALLOCATE */ -/* #undef HAVE_FCNTL */ -/* #undef HAVE_FORK */ -/* #undef HAVE_FSYNC */ -#define HAVE_FTIME 1 -/* #undef HAVE_GETENTROPY */ -/* #undef HAVE_GETEXECNAME */ -/* #undef HAVE_GETLOGIN */ -#define HAVE_GETOPT_LONG 1 -/* #undef HAVE_GETRLIMIT */ -/* #undef HAVE_GETTIMEOFDAY */ -/* #undef HAVE_GETUID */ -/* #undef HAVE_GMTIME_R */ -/* #undef HAVE_LOCALTIME_R */ -/* #undef HAVE_STRERROR_R */ -/* #undef HAVE_LOCKF */ -/* #undef HAVE_MADVISE */ -/* #undef HAVE_MREMAP */ -/* #undef HAVE_NANOSLEEP */ -/* #undef HAVE_NL_LANGINFO */ -/* #undef HAVE__NSGETEXECUTABLEPATH */ -/* #undef HAVE_PIPE2 */ -/* #undef HAVE_POLL */ -/* #undef HAVE_POPEN */ -/* #undef HAVE_POSIX_FADVISE */ -/* #undef HAVE_POSIX_FALLOCATE */ -/* #undef HAVE_POSIX_MADVISE */ -#define HAVE_PUTENV 1 -/* #undef HAVE_SETSID */ -#define HAVE_SHUTDOWN 1 -/* #undef HAVE_SIGACTION */ -/* #undef HAVE_STPCPY */ -/* #undef HAVE_STRCASESTR */ -/* #undef HAVE_STRNCASECMP */ -/* #undef HAVE_STRPTIME */ -/* #undef HAVE_STRSIGNAL */ -/* #undef HAVE_SYSCONF */ -/* #undef HAVE_TASK_INFO */ -/* #undef HAVE_TIMES */ -/* #undef HAVE_UNAME */ -/* #undef HAVE_SEMTIMEDOP */ -/* #undef HAVE_PTHREAD_KILL */ -/* #undef HAVE_PTHREAD_SIGMASK */ -#define HAVE_GETOPT 1 - -#define ICONV_CONST -#define FLEXIBLE_ARRAY_MEMBER -#define ENABLE_MAPI 1 -#define HAVE_MAPI 1 -// End Section: monetdb configure defines - -// Section: monetdb macro variables -#define HAVE_ICONV 1 -/* #undef HAVE_PTHREAD_H */ -#define HAVE_LIBPCRE 1 -#define HAVE_LIBBZ2 1 -/* #undef HAVE_CURL */ -#define HAVE_LIBLZMA 1 -#define HAVE_LIBXML 1 -#define HAVE_LIBZ 1 -#define HAVE_LIBLZ4 1 -/* #undef HAVE_PROJ */ -/* #undef HAVE_SNAPPY */ -/* #undef HAVE_FITS */ -/* #undef HAVE_UUID */ -/* #undef HAVE_VALGRIND */ -/* #undef HAVE_NETCDF */ -/* #undef HAVE_READLINE */ -/* #undef HAVE_LIBR */ -#define RHOME "/registry" -#define HAVE_GEOM 1 -/* #undef HAVE_SHP */ -#define HAVE_LIBPY3 1 - -// #define SOCKET_LIBRARIES -#define HAVE_GETADDRINFO 1 -/* #undef HAVE_CUDF */ - -#define MAPI_PORT 50000 -#define MAPI_PORT_STR "50000" - -#ifdef _MSC_VER -#define DIR_SEP '\\' -#define PATH_SEP ';' -#define DIR_SEP_STR "\\" -#define SO_PREFIX "" -#else -#define DIR_SEP '/' -#define PATH_SEP ':' -#define DIR_SEP_STR "/" -/* #undef SO_PREFIX */ -#endif -#define SO_EXT ".dll" - -#define BINDIR "C:/cygwin64/home/monet/x86_64/install/bin" -#define LIBDIR "C:/cygwin64/home/monet/x86_64/install/lib" -#define LOCALSTATEDIR "C:/cygwin64/home/monet/x86_64/install/var" - -// End Section: monetdb macro variables - -// Section: monetdb configure misc -#define MONETDB_RELEASE "Jan2022-SP3" - -#define MONETDB_VERSION "11.43.15" -#define MONETDB_VERSION_MAJOR 11 -#define MONETDB_VERSION_MINOR 43 -#define MONETDB_VERSION_PATCH 15 - -#define GDK_VERSION "25.1.0" -#define GDK_VERSION_MAJOR 25 -#define GDK_VERSION_MINOR 1 -#define GDK_VERSION_PATCH 0 -#define MAPI_VERSION "14.0.2" -#define MAPI_VERSION_MAJOR 14 -#define MAPI_VERSION_MINOR 0 -#define MAPI_VERSION_PATCH 2 -#define MONETDB5_VERSION "32.0.6" -#define MONETDB5_VERSION_MAJOR 32 -#define MONETDB5_VERSION_MINOR 0 -#define MONETDB5_VERSION_PATCH 6 -#define MONETDBE_VERSION "3.0.2" -#define MONETDBE_VERSION_MAJOR 3 -#define MONETDBE_VERSION_MINOR 0 -#define MONETDBE_VERSION_PATCH 2 -#define STREAM_VERSION "16.0.1" -#define STREAM_VERSION_MAJOR 16 -#define STREAM_VERSION_MINOR 0 -#define STREAM_VERSION_PATCH 1 -#define SQL_VERSION "12.0.5" -#define SQL_VERSION_MAJOR 12 -#define SQL_VERSION_MINOR 0 -#define SQL_VERSION_PATCH 5 - -/* Host identifier */ -#define HOST "amd64-pc-windows-msvc" - -/* The used password hash algorithm */ -#define MONETDB5_PASSWDHASH "SHA512" - -/* The used password hash algorithm */ -#define MONETDB5_PASSWDHASH_TOKEN SHA512 - -#ifndef _Noreturn -#define _Noreturn __declspec(noreturn) -#endif -#ifndef __cplusplus -/* Does your compiler support `inline' keyword? (C99 feature) */ -#ifndef inline -#define inline __inline -#endif -/* Does your compiler support `__restrict__' keyword? (C99 feature) */ -#ifndef __restrict__ -#define __restrict__ restrict -#endif -#endif - -#ifdef _MSC_VER -#ifndef __restrict__ -#define __restrict__ __restrict -#endif -#endif - -// End Section: monetdb configure misc - -// Section: monetdb configure sizes -#define SIZEOF_SIZE_T 8 - -/* The size of `void *', as computed by sizeof. */ -#define SIZEOF_VOID_P 8 - -#define SIZEOF_CHAR 1 -#define SIZEOF_SHORT 2 -#define SIZEOF_INT 4 -#define SIZEOF_LONG 4 -#define SIZEOF_LONG_LONG 8 -#define SIZEOF_DOUBLE 8 -#define SIZEOF_WCHAR_T 2 -#define HAVE_LONG_LONG 1 /* for ODBC include files */ - -#ifdef _MSC_VER -#ifdef _WIN64 -#define LENP_OR_POINTER_T SQLLEN * -#else -#define LENP_OR_POINTER_T SQLPOINTER -#endif -#else -#define LENP_OR_POINTER_T SQLLEN * -#endif -#define SIZEOF_SQLWCHAR 2 - -/* #undef WORDS_BIGENDIAN */ - -/* Does your compiler support `ssize_t' type? (Posix type) */ -#ifndef ssize_t -#define ssize_t int64_t -#endif - -/* The size of `__int128', as computed by sizeof. */ -/* #undef SIZEOF___INT128 */ - -/* The size of `__int128_t', as computed by sizeof. */ -/* #undef SIZEOF___INT128_T */ - -/* The size of `__uint128_t', as computed by sizeof. */ -/* #undef SIZEOF___UINT128_T */ - -#ifdef SIZEOF___INT128 -typedef __int128 hge; -typedef unsigned __int128 uhge; -#define HAVE_HGE 1 -#define SIZEOF_HGE SIZEOF___INT128 -#elif defined(SIZEOF___INT128_T) && defined(SIZEOF___UINT128_T) -typedef __int128_t hge; -typedef __uint128_t uhge; -#define HAVE_HGE 1 -#define SIZEOF_HGE SIZEOF___INT128_T -#endif - -// End Section: monetdb configure sizes - -/* Does your compiler support `__attribute__' extension? */ -#if !defined(__GNUC__) && !defined(__clang__) && !defined(__attribute__) -#define __attribute__(a) -#endif - -#if !defined(__cplusplus) || (__cplusplus < 201103L&&(!defined(_MSC_VER)||_MSC_VER<1600)) -#ifndef static_assert -/* static_assert is a C11/C++11 feature, defined in assert.h which also exists - * in many other compilers we ignore it if the compiler doesn't support it - * However in C11 static_assert is a macro, while on C++11 is a keyword */ -#define static_assert(expr, mesg) ((void) 0) -#endif -#endif - -#ifdef HAVE_STRINGS_H -#include /* strcasecmp */ -#endif - -#ifdef _MSC_VER - -#define strdup(s) _strdup(s) - -#ifndef strcasecmp -#define strcasecmp(x,y) _stricmp(x,y) -#endif - -/* Define to 1 if you have the `strncasecmp' function. */ -#define HAVE_STRNCASECMP 1 -#ifndef strncasecmp -#define strncasecmp(x,y,z) _strnicmp(x,y,z) -#endif - -#include -#ifdef lstat -#undef lstat -#endif -#define lstat _stat64 -#ifdef stat -#undef stat -#endif -#define stat _stat64 -#ifdef fstat -#undef fstat -#endif -#define fstat _fstat64 - -static inline char * -stpcpy(char *__restrict__ dst, const char *__restrict__ src) -{ - size_t i; - for (i = 0; src[i]; i++) - dst[i] = src[i]; - dst[i] = 0; - return dst + i; -} - -/* Define to 1 if the system has the type `socklen_t'. */ -#define HAVE_SOCKLEN_T 1 -/* type used by connect */ -#define socklen_t int -#define strtok_r(t,d,c) strtok_s(t,d,c) - -#define HAVE_GETOPT_LONG 1 - -/* there is something very similar to localtime_r on Windows: */ -#include -#define HAVE_LOCALTIME_R 1 -static inline struct tm * -localtime_r(const time_t *__restrict__ timep, struct tm *__restrict__ result) -{ - return localtime_s(result, timep) == 0 ? result : NULL; -} -#define HAVE_GMTIME_R 1 -static inline struct tm * -gmtime_r(const time_t *__restrict__ timep, struct tm *__restrict__ result) -{ - return gmtime_s(result, timep) == 0 ? result : NULL; -} - -/* Define if you have ctime_r(time_t*,char *buf,size_t s) */ -#define HAVE_CTIME_R 1 -#define HAVE_CTIME_R3 1 -/* there is something very similar to ctime_r on Windows: */ -#define ctime_r(t,b,s) (ctime_s(b,s,t) ? NULL : (b)) - -#endif /* _MSC_VER */ - -#define HAVE_SOCKLEN_T 1 -#ifndef _MSC_VER -#define SOCKET int -#define closesocket close -#endif - -#ifndef _In_z_ -#define _In_z_ -#endif -#ifndef _Printf_format_string_ -#define _Printf_format_string_ -#endif - -#ifdef _MSC_VER -#define _LIB_STARTUP_FUNC_(f,q) \ - static void f(void); \ - __declspec(allocate(".CRT$XCU")) void (*f##_)(void) = f; \ - __pragma(comment(linker,"/include:" q #f "_")) \ - static void f(void) -#ifdef _WIN64 - #define LIB_STARTUP_FUNC(f) _LIB_STARTUP_FUNC_(f,"") -#else - #define LIB_STARTUP_FUNC(f) _LIB_STARTUP_FUNC_(f,"_") -#endif -#else -#define LIB_STARTUP_FUNC(f) \ - static void f(void) __attribute__((__constructor__)); \ - static void f(void) -#endif - -#endif /* MT_SEEN_MONETDB_CONFIG_H */ +/* + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + * + * Copyright 1997 - July 2008 CWI, August 2008 - 2022 MonetDB B.V. + */ +/* monetdb_config.h.in. Generated from CMakeLists.txt */ + +#ifndef MT_SEEN_MONETDB_CONFIG_H +#define MT_SEEN_MONETDB_CONFIG_H 1 + +#ifdef _MSC_VER + +#if _MSC_VER < 1900 +#error Versions below Visual Studio 2015 are no longer supported +#endif + +/* Prevent pollution through excessive inclusion of include files by Windows.h. */ +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN 1 +#endif + +/* Visual Studio 8 has deprecated lots of stuff: suppress warnings */ +#ifndef _CRT_SECURE_NO_DEPRECATE +#define _CRT_SECURE_NO_DEPRECATE 1 +#endif + +#define _CRT_RAND_S /* for Windows rand_s, before stdlib.h */ +#define HAVE_RAND_S 1 + +#endif + +#if !defined(_XOPEN_SOURCE) && defined(__CYGWIN__) +#define _XOPEN_SOURCE 700 +#endif + +#include +#if defined(_MSC_VER) && defined(_DEBUG) && defined(_CRTDBG_MAP_ALLOC) +/* In this case, malloc and friends are redefined in crtdbg.h to debug + * versions. We need to include stdlib.h first or else we get + * conflicting declarations. */ +#include +#endif + +#define HAVE_SYS_TYPES_H 1 +#ifdef HAVE_SYS_TYPES_H +# include +#endif + +/* standard C-99 include files */ +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _MSC_VER + +/* Windows include files */ +#include +#include +#include + +/* indicate to sqltypes.h that windows.h has already been included and + that it doesn't have to define Windows constants */ +#define ALREADY_HAVE_WINDOWS_TYPE 1 + +#define NATIVE_WIN32 1 + +#endif /* _MSC_VER */ + +#if !defined(WIN32) && (defined(__CYGWIN__) || defined(__MINGW32__)) +#define WIN32 1 +#endif + +// Section: monetdb configure defines +/* #undef HAVE_DISPATCH_DISPATCH_H */ +/* #undef HAVE_DLFCN_H */ +#define HAVE_FCNTL_H 1 +#define HAVE_IO_H 1 +/* #undef HAVE_KVM_H */ +/* #undef HAVE_LIBGEN_H */ +/* #undef HAVE_LIBINTL_H */ +/* #undef HAVE_MACH_MACH_INIT_H */ +/* #undef HAVE_MACH_TASK_H */ +/* #undef HAVE_MACH_O_DYLD_H */ +/* #undef HAVE_NETDB_H */ +/* #undef HAVE_NETINET_IN_H */ +/* #undef HAVE_POLL_H */ +/* #undef HAVE_PROCFS_H */ +/* #undef HAVE_PWD_H */ +/* #undef HAVE_STRINGS_H */ +/* #undef HAVE_STROPTS_H */ +/* #undef HAVE_SYS_FILE_H */ +/* #undef HAVE_SYS_IOCTL_H */ +/* #undef HAVE_SYS_SYSCTL_H */ +/* #undef HAVE_SYS_MMAN_H */ +/* #undef HAVE_SYS_PARAM_H */ +/* #undef HAVE_SYS_RANDOM_H */ +/* #undef HAVE_SYS_RESOURCE_H */ +/* #undef HAVE_SYS_TIME_H */ +/* #undef HAVE_SYS_TIMES_H */ +/* #undef HAVE_SYS_UIO_H */ +/* #undef HAVE_SYS_UN_H */ +/* #undef HAVE_SYS_WAIT_H */ +/* #undef HAVE_TERMIOS_H */ +/* #undef HAVE_UNISTD_H */ +/* #undef HAVE_UUID_UUID_H */ +#define HAVE_WINSOCK_H 1 +/* #undef HAVE_SEMAPHORE_H */ +#define HAVE_GETOPT_H 1 + +/* #undef HAVE_STDATOMIC_H */ + +/* #undef HAVE_DIRENT_H */ +/* #undef HAVE_SYS_SOCKET_H */ +/* #undef HAVE_GETTIMEOFDAY */ +#define HAVE_SYS_STAT_H 1 +/* #undef HAVE_FDATASYNC */ +/* #undef HAVE_ACCEPT4 */ +/* #undef HAVE_ASCTIME_R */ +/* #undef HAVE_CLOCK_GETTIME */ +/* #undef HAVE_CTIME_R */ +/* #undef HAVE_DISPATCH_SEMAPHORE_CREATE */ +/* #undef HAVE_FALLOCATE */ +/* #undef HAVE_FCNTL */ +/* #undef HAVE_FORK */ +/* #undef HAVE_FSYNC */ +#define HAVE_FTIME 1 +/* #undef HAVE_GETENTROPY */ +/* #undef HAVE_GETEXECNAME */ +/* #undef HAVE_GETLOGIN */ +#define HAVE_GETOPT_LONG 1 +/* #undef HAVE_GETRLIMIT */ +/* #undef HAVE_GETTIMEOFDAY */ +/* #undef HAVE_GETUID */ +/* #undef HAVE_GMTIME_R */ +/* #undef HAVE_LOCALTIME_R */ +/* #undef HAVE_STRERROR_R */ +/* #undef HAVE_LOCKF */ +/* #undef HAVE_MADVISE */ +/* #undef HAVE_MREMAP */ +/* #undef HAVE_NANOSLEEP */ +/* #undef HAVE_NL_LANGINFO */ +/* #undef HAVE__NSGETEXECUTABLEPATH */ +/* #undef HAVE_PIPE2 */ +/* #undef HAVE_POLL */ +/* #undef HAVE_POPEN */ +/* #undef HAVE_POSIX_FADVISE */ +/* #undef HAVE_POSIX_FALLOCATE */ +/* #undef HAVE_POSIX_MADVISE */ +#define HAVE_PUTENV 1 +/* #undef HAVE_SETSID */ +#define HAVE_SHUTDOWN 1 +/* #undef HAVE_SIGACTION */ +/* #undef HAVE_STPCPY */ +/* #undef HAVE_STRCASESTR */ +/* #undef HAVE_STRNCASECMP */ +/* #undef HAVE_STRPTIME */ +/* #undef HAVE_STRSIGNAL */ +/* #undef HAVE_SYSCONF */ +/* #undef HAVE_TASK_INFO */ +/* #undef HAVE_TIMES */ +/* #undef HAVE_UNAME */ +/* #undef HAVE_SEMTIMEDOP */ +/* #undef HAVE_PTHREAD_KILL */ +/* #undef HAVE_PTHREAD_SIGMASK */ +#define HAVE_GETOPT 1 + +#define ICONV_CONST +#define FLEXIBLE_ARRAY_MEMBER +#define ENABLE_MAPI 1 +#define HAVE_MAPI 1 +// End Section: monetdb configure defines + +// Section: monetdb macro variables +#define HAVE_ICONV 1 +/* #undef HAVE_PTHREAD_H */ +#define HAVE_LIBPCRE 1 +#define HAVE_LIBBZ2 1 +/* #undef HAVE_CURL */ +#define HAVE_LIBLZMA 1 +#define HAVE_LIBXML 1 +#define HAVE_LIBZ 1 +#define HAVE_LIBLZ4 1 +/* #undef HAVE_PROJ */ +/* #undef HAVE_SNAPPY */ +/* #undef HAVE_FITS */ +/* #undef HAVE_UUID */ +/* #undef HAVE_VALGRIND */ +/* #undef HAVE_NETCDF */ +/* #undef HAVE_READLINE */ +/* #undef HAVE_LIBR */ +#define RHOME "/registry" +#define HAVE_GEOM 1 +/* #undef HAVE_SHP */ +#define HAVE_LIBPY3 1 + +// #define SOCKET_LIBRARIES +#define HAVE_GETADDRINFO 1 +/* #undef HAVE_CUDF */ + +#define MAPI_PORT 50000 +#define MAPI_PORT_STR "50000" + +#ifdef _MSC_VER +#define DIR_SEP '\\' +#define PATH_SEP ';' +#define DIR_SEP_STR "\\" +#define SO_PREFIX "" +#else +#define DIR_SEP '/' +#define PATH_SEP ':' +#define DIR_SEP_STR "/" +/* #undef SO_PREFIX */ +#endif +#define SO_EXT ".dll" + +#define BINDIR "C:/cygwin64/home/monet/x86_64/install/bin" +#define LIBDIR "C:/cygwin64/home/monet/x86_64/install/lib" +#define LOCALSTATEDIR "C:/cygwin64/home/monet/x86_64/install/var" + +// End Section: monetdb macro variables + +// Section: monetdb configure misc +#define MONETDB_RELEASE "Jan2022-SP3" + +#define MONETDB_VERSION "11.43.15" +#define MONETDB_VERSION_MAJOR 11 +#define MONETDB_VERSION_MINOR 43 +#define MONETDB_VERSION_PATCH 15 + +#define GDK_VERSION "25.1.0" +#define GDK_VERSION_MAJOR 25 +#define GDK_VERSION_MINOR 1 +#define GDK_VERSION_PATCH 0 +#define MAPI_VERSION "14.0.2" +#define MAPI_VERSION_MAJOR 14 +#define MAPI_VERSION_MINOR 0 +#define MAPI_VERSION_PATCH 2 +#define MONETDB5_VERSION "32.0.6" +#define MONETDB5_VERSION_MAJOR 32 +#define MONETDB5_VERSION_MINOR 0 +#define MONETDB5_VERSION_PATCH 6 +#define MONETDBE_VERSION "3.0.2" +#define MONETDBE_VERSION_MAJOR 3 +#define MONETDBE_VERSION_MINOR 0 +#define MONETDBE_VERSION_PATCH 2 +#define STREAM_VERSION "16.0.1" +#define STREAM_VERSION_MAJOR 16 +#define STREAM_VERSION_MINOR 0 +#define STREAM_VERSION_PATCH 1 +#define SQL_VERSION "12.0.5" +#define SQL_VERSION_MAJOR 12 +#define SQL_VERSION_MINOR 0 +#define SQL_VERSION_PATCH 5 + +/* Host identifier */ +#define HOST "amd64-pc-windows-msvc" + +/* The used password hash algorithm */ +#define MONETDB5_PASSWDHASH "SHA512" + +/* The used password hash algorithm */ +#define MONETDB5_PASSWDHASH_TOKEN SHA512 + +#ifndef _Noreturn +#define _Noreturn __declspec(noreturn) +#endif +#ifndef __cplusplus +/* Does your compiler support `inline' keyword? (C99 feature) */ +#ifndef inline +#define inline __inline +#endif +/* Does your compiler support `__restrict__' keyword? (C99 feature) */ +#ifndef __restrict__ +#define __restrict__ restrict +#endif +#endif + +#ifdef _MSC_VER +#ifndef __restrict__ +#define __restrict__ __restrict +#endif +#endif + +// End Section: monetdb configure misc + +// Section: monetdb configure sizes +#define SIZEOF_SIZE_T 8 + +/* The size of `void *', as computed by sizeof. */ +#define SIZEOF_VOID_P 8 + +#define SIZEOF_CHAR 1 +#define SIZEOF_SHORT 2 +#define SIZEOF_INT 4 +#define SIZEOF_LONG 4 +#define SIZEOF_LONG_LONG 8 +#define SIZEOF_DOUBLE 8 +#define SIZEOF_WCHAR_T 2 +#define HAVE_LONG_LONG 1 /* for ODBC include files */ + +#ifdef _MSC_VER +#ifdef _WIN64 +#define LENP_OR_POINTER_T SQLLEN * +#else +#define LENP_OR_POINTER_T SQLPOINTER +#endif +#else +#define LENP_OR_POINTER_T SQLLEN * +#endif +#define SIZEOF_SQLWCHAR 2 + +/* #undef WORDS_BIGENDIAN */ + +/* Does your compiler support `ssize_t' type? (Posix type) */ +#ifndef ssize_t +#define ssize_t int64_t +#endif + +/* The size of `__int128', as computed by sizeof. */ +/* #undef SIZEOF___INT128 */ + +/* The size of `__int128_t', as computed by sizeof. */ +/* #undef SIZEOF___INT128_T */ + +/* The size of `__uint128_t', as computed by sizeof. */ +/* #undef SIZEOF___UINT128_T */ + +#ifdef SIZEOF___INT128 +typedef __int128 hge; +typedef unsigned __int128 uhge; +#define HAVE_HGE 1 +#define SIZEOF_HGE SIZEOF___INT128 +#elif defined(SIZEOF___INT128_T) && defined(SIZEOF___UINT128_T) +typedef __int128_t hge; +typedef __uint128_t uhge; +#define HAVE_HGE 1 +#define SIZEOF_HGE SIZEOF___INT128_T +#endif + +// End Section: monetdb configure sizes + +/* Does your compiler support `__attribute__' extension? */ +#if !defined(__GNUC__) && !defined(__clang__) && !defined(__attribute__) +#define __attribute__(a) +#endif + +#if !defined(__cplusplus) || (__cplusplus < 201103L&&(!defined(_MSC_VER)||_MSC_VER<1600)) +#ifndef static_assert +/* static_assert is a C11/C++11 feature, defined in assert.h which also exists + * in many other compilers we ignore it if the compiler doesn't support it + * However in C11 static_assert is a macro, while on C++11 is a keyword */ +#define static_assert(expr, mesg) ((void) 0) +#endif +#endif + +#ifdef HAVE_STRINGS_H +#include /* strcasecmp */ +#endif + +#ifdef _MSC_VER + +#define strdup(s) _strdup(s) + +#ifndef strcasecmp +#define strcasecmp(x,y) _stricmp(x,y) +#endif + +/* Define to 1 if you have the `strncasecmp' function. */ +#define HAVE_STRNCASECMP 1 +#ifndef strncasecmp +#define strncasecmp(x,y,z) _strnicmp(x,y,z) +#endif + +#include +#ifdef lstat +#undef lstat +#endif +#define lstat _stat64 +#ifdef stat +#undef stat +#endif +#define stat _stat64 +#ifdef fstat +#undef fstat +#endif +#define fstat _fstat64 + +static inline char * +stpcpy(char *__restrict__ dst, const char *__restrict__ src) +{ + size_t i; + for (i = 0; src[i]; i++) + dst[i] = src[i]; + dst[i] = 0; + return dst + i; +} + +/* Define to 1 if the system has the type `socklen_t'. */ +#define HAVE_SOCKLEN_T 1 +/* type used by connect */ +#define socklen_t int +#define strtok_r(t,d,c) strtok_s(t,d,c) + +#define HAVE_GETOPT_LONG 1 + +/* there is something very similar to localtime_r on Windows: */ +#include +#define HAVE_LOCALTIME_R 1 +static inline struct tm * +localtime_r(const time_t *__restrict__ timep, struct tm *__restrict__ result) +{ + return localtime_s(result, timep) == 0 ? result : NULL; +} +#define HAVE_GMTIME_R 1 +static inline struct tm * +gmtime_r(const time_t *__restrict__ timep, struct tm *__restrict__ result) +{ + return gmtime_s(result, timep) == 0 ? result : NULL; +} + +/* Define if you have ctime_r(time_t*,char *buf,size_t s) */ +#define HAVE_CTIME_R 1 +#define HAVE_CTIME_R3 1 +/* there is something very similar to ctime_r on Windows: */ +#define ctime_r(t,b,s) (ctime_s(b,s,t) ? NULL : (b)) + +#endif /* _MSC_VER */ + +#define HAVE_SOCKLEN_T 1 +#ifndef _MSC_VER +#define SOCKET int +#define closesocket close +#endif + +#ifndef _In_z_ +#define _In_z_ +#endif +#ifndef _Printf_format_string_ +#define _Printf_format_string_ +#endif + +#ifdef _MSC_VER +#define _LIB_STARTUP_FUNC_(f,q) \ + static void f(void); \ + __declspec(allocate(".CRT$XCU")) void (*f##_)(void) = f; \ + __pragma(comment(linker,"/include:" q #f "_")) \ + static void f(void) +#ifdef _WIN64 + #define LIB_STARTUP_FUNC(f) _LIB_STARTUP_FUNC_(f,"") +#else + #define LIB_STARTUP_FUNC(f) _LIB_STARTUP_FUNC_(f,"_") +#endif +#else +#define LIB_STARTUP_FUNC(f) \ + static void f(void) __attribute__((__constructor__)); \ + static void f(void) +#endif + +#endif /* MT_SEEN_MONETDB_CONFIG_H */ diff --git a/monetdb/msys64/monetdb_config.h b/monetdb/msys64/monetdb_config.h index 3304251..2692546 100644 --- a/monetdb/msys64/monetdb_config.h +++ b/monetdb/msys64/monetdb_config.h @@ -1,473 +1,473 @@ -/* - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. - * - * Copyright 1997 - July 2008 CWI, August 2008 - 2022 MonetDB B.V. - */ -/* monetdb_config.h.in. Generated from CMakeLists.txt */ - -#ifndef MT_SEEN_MONETDB_CONFIG_H -#define MT_SEEN_MONETDB_CONFIG_H 1 - -#ifdef _MSC_VER - -#if _MSC_VER < 1900 -#error Versions below Visual Studio 2015 are no longer supported -#endif - -/* Prevent pollution through excessive inclusion of include files by Windows.h. */ -#ifndef WIN32_LEAN_AND_MEAN -#define WIN32_LEAN_AND_MEAN 1 -#endif - -/* Visual Studio 8 has deprecated lots of stuff: suppress warnings */ -#ifndef _CRT_SECURE_NO_DEPRECATE -#define _CRT_SECURE_NO_DEPRECATE 1 -#endif - -#define _CRT_RAND_S /* for Windows rand_s, before stdlib.h */ -#define HAVE_RAND_S 1 - -#endif - -#if !defined(_XOPEN_SOURCE) && defined(__CYGWIN__) -#define _XOPEN_SOURCE 700 -#endif - -#include -#if defined(_MSC_VER) && defined(_DEBUG) && defined(_CRTDBG_MAP_ALLOC) -/* In this case, malloc and friends are redefined in crtdbg.h to debug - * versions. We need to include stdlib.h first or else we get - * conflicting declarations. */ -#include -#endif - -#define HAVE_SYS_TYPES_H 1 -#ifdef HAVE_SYS_TYPES_H -# include -#endif - -/* standard C-99 include files */ -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef _MSC_VER - -/* Windows include files */ -#include -#include -#include - -/* indicate to sqltypes.h that windows.h has already been included and - that it doesn't have to define Windows constants */ -#define ALREADY_HAVE_WINDOWS_TYPE 1 - -#define NATIVE_WIN32 1 - -#endif /* _MSC_VER */ - -#if !defined(WIN32) && (defined(__CYGWIN__)||defined(__MINGW32__)) -#define WIN32 1 -#endif - -// Section: monetdb configure defines -/* #undef HAVE_DISPATCH_DISPATCH_H */ -#define HAVE_DLFCN_H 1 -#define HAVE_FCNTL_H 1 -#define HAVE_IO_H 1 -/* #undef HAVE_KVM_H */ -#define HAVE_LIBGEN_H 1 -/* #undef HAVE_LIBINTL_H */ -/* #undef HAVE_MACH_MACH_INIT_H */ -/* #undef HAVE_MACH_TASK_H */ -/* #undef HAVE_MACH_O_DYLD_H */ -#define HAVE_NETDB_H 1 -#define HAVE_NETINET_IN_H 1 -#define HAVE_POLL_H 1 -/* #undef HAVE_PROCFS_H */ -#define HAVE_PWD_H 1 -#define HAVE_STRINGS_H 1 -/* #undef HAVE_STROPTS_H */ -#define HAVE_SYS_FILE_H 1 -#define HAVE_SYS_IOCTL_H 1 -/* #undef HAVE_SYS_SYSCTL_H */ -#define HAVE_SYS_MMAN_H 1 -#define HAVE_SYS_PARAM_H 1 -#define HAVE_SYS_RANDOM_H 1 -#define HAVE_SYS_RESOURCE_H 1 -#define HAVE_SYS_TIME_H 1 -#define HAVE_SYS_TIMES_H 1 -#define HAVE_SYS_UIO_H 1 -#define HAVE_SYS_UN_H 1 -#define HAVE_SYS_WAIT_H 1 -#define HAVE_TERMIOS_H 1 -#define HAVE_UNISTD_H 1 -#define HAVE_WINSOCK_H 1 -#define HAVE_SEMAPHORE_H 1 -#define HAVE_GETOPT_H 1 - -#define HAVE_STDATOMIC_H 1 - -#define HAVE_DIRENT_H 1 -#define HAVE_SYS_SOCKET_H 1 -#define HAVE_GETTIMEOFDAY 1 -#define HAVE_SYS_STAT_H 1 -#define HAVE_FDATASYNC 1 -#define HAVE_ACCEPT4 1 -#define HAVE_ASCTIME_R 1 -#define HAVE_CLOCK_GETTIME 1 -#define HAVE_CTIME_R 1 -/* #undef HAVE_DISPATCH_SEMAPHORE_CREATE */ -/* #undef HAVE_FALLOCATE */ -#define HAVE_FCNTL 1 -#define HAVE_FORK 1 -#define HAVE_FSYNC 1 -#define HAVE_FTIME 1 -#define HAVE_GETENTROPY 1 -/* #undef HAVE_GETEXECNAME */ -#define HAVE_GETLOGIN 1 -#define HAVE_GETOPT_LONG 1 -#define HAVE_GETRLIMIT 1 -#define HAVE_GETTIMEOFDAY 1 -#define HAVE_GETUID 1 -#define HAVE_GMTIME_R 1 -#define HAVE_LOCALTIME_R 1 -#define HAVE_STRERROR_R 1 -#define HAVE_LOCKF 1 -#define HAVE_MADVISE 1 -/* #undef HAVE_MREMAP */ -#define HAVE_NANOSLEEP 1 -#define HAVE_NL_LANGINFO 1 -/* #undef HAVE__NSGETEXECUTABLEPATH */ -/* #undef HAVE_PIPE2 */ -#define HAVE_POLL 1 -#define HAVE_POPEN 1 -#define HAVE_POSIX_FADVISE 1 -#define HAVE_POSIX_FALLOCATE 1 -#define HAVE_POSIX_MADVISE 1 -#define HAVE_PUTENV 1 -#define HAVE_SETSID 1 -#define HAVE_SHUTDOWN 1 -#define HAVE_SIGACTION 1 -#define HAVE_STPCPY 1 -#define HAVE_STRCASESTR 1 -#define HAVE_STRNCASECMP 1 -#define HAVE_STRPTIME 1 -#define HAVE_STRSIGNAL 1 -#define HAVE_SYSCONF 1 -/* #undef HAVE_TASK_INFO */ -#define HAVE_TIMES 1 -#define HAVE_UNAME 1 -/* #undef HAVE_SEMTIMEDOP */ -#define HAVE_PTHREAD_KILL 1 -#define HAVE_PTHREAD_SIGMASK 1 -#define HAVE_GETOPT 1 - -#define ICONV_CONST -#define FLEXIBLE_ARRAY_MEMBER -#define ENABLE_MAPI 1 -#define HAVE_MAPI 1 -// End Section: monetdb configure defines - -// Section: monetdb macro variables -/* #undef HAVE_ICONV */ -#define HAVE_PTHREAD_H 1 -/* #undef HAVE_LIBPCRE */ -/* #undef HAVE_LIBBZ2 */ -/* #undef HAVE_CURL */ -/* #undef HAVE_LIBLZMA */ -/* #undef HAVE_LIBXML */ -/* #undef HAVE_LIBZ */ -/* #undef HAVE_LIBLZ4 */ -/* #undef HAVE_PROJ */ -/* #undef HAVE_SNAPPY */ -/* #undef HAVE_FITS */ -/* #undef HAVE_VALGRIND */ -/* #undef HAVE_NETCDF */ -/* #undef HAVE_READLINE */ -/* #undef HAVE_LIBR */ -#define RHOME "/registry" -/* #undef HAVE_GEOM */ -/* #undef HAVE_SHP */ -/* #undef HAVE_LIBPY3 */ - -// #define SOCKET_LIBRARIES -#define HAVE_GETADDRINFO 1 -/* #undef HAVE_CUDF */ - -#define MAPI_PORT 50000 -#define MAPI_PORT_STR "50000" - -#ifdef _MSC_VER -#define DIR_SEP '\\' -#define PATH_SEP ';' -#define DIR_SEP_STR "\\" -#define SO_PREFIX "" -#else -#define DIR_SEP '/' -#define PATH_SEP ':' -#define DIR_SEP_STR "/" -#define SO_PREFIX "lib" -#endif -#define SO_EXT ".dll" - -#define BINDIR "C:/Program Files (x86)/MonetDB/bin" -#define LIBDIR "C:/Program Files (x86)/MonetDB/lib" -#define LOCALSTATEDIR "C:/Program Files (x86)/MonetDB/var" - -// End Section: monetdb macro variables - -// Section: monetdb configure misc -#define MONETDB_RELEASE "unreleased" - -#define MONETDB_VERSION "11.44.0" -#define MONETDB_VERSION_MAJOR 11 -#define MONETDB_VERSION_MINOR 44 -#define MONETDB_VERSION_PATCH 0 - -#define GDK_VERSION "25.1.0" -#define GDK_VERSION_MAJOR 25 -#define GDK_VERSION_MINOR 1 -#define GDK_VERSION_PATCH 0 -#define MAPI_VERSION "14.0.2" -#define MAPI_VERSION_MAJOR 14 -#define MAPI_VERSION_MINOR 0 -#define MAPI_VERSION_PATCH 2 -#define MONETDB5_VERSION "32.0.6" -#define MONETDB5_VERSION_MAJOR 32 -#define MONETDB5_VERSION_MINOR 0 -#define MONETDB5_VERSION_PATCH 6 -#define MONETDBE_VERSION "3.0.2" -#define MONETDBE_VERSION_MAJOR 3 -#define MONETDBE_VERSION_MINOR 0 -#define MONETDBE_VERSION_PATCH 2 -#define STREAM_VERSION "16.0.1" -#define STREAM_VERSION_MAJOR 16 -#define STREAM_VERSION_MINOR 0 -#define STREAM_VERSION_PATCH 1 -#define SQL_VERSION "12.0.5" -#define SQL_VERSION_MAJOR 12 -#define SQL_VERSION_MINOR 0 -#define SQL_VERSION_PATCH 5 - -/* Host identifier */ -#define HOST "amd64-pc-windows-gnu" - -/* The used password hash algorithm */ -#define MONETDB5_PASSWDHASH "SHA512" - -/* The used password hash algorithm */ -#define MONETDB5_PASSWDHASH_TOKEN SHA512 - -#ifndef _Noreturn -#ifdef __cplusplus -#define _Noreturn -#else -/* #undef _Noreturn */ -#endif -#endif -/* Does your compiler support `inline' keyword? (C99 feature) */ -#ifndef inline -#ifdef __cplusplus -#define inline -#else -/* #undef inline */ -#endif -#endif -/* Does your compiler support `restrict' keyword? (C99 feature) */ -#ifndef restrict -#ifdef __cplusplus -#define restrict -#else -/* #undef restrict */ -#endif -#endif - -// End Section: monetdb configure misc - -// Section: monetdb configure sizes -#define SIZEOF_SIZE_T 8 - -/* The size of `void *', as computed by sizeof. */ -#define SIZEOF_VOID_P 8 - -#define SIZEOF_CHAR 1 -#define SIZEOF_SHORT 2 -#define SIZEOF_INT 4 -#define SIZEOF_LONG 8 -#define SIZEOF_LONG_LONG 8 -#define SIZEOF_DOUBLE 8 -#define SIZEOF_WCHAR_T 2 -#define HAVE_LONG_LONG 1 /* for ODBC include files */ - -#ifdef _MSC_VER -#ifdef _WIN64 -#define LENP_OR_POINTER_T SQLLEN * -#else -#define LENP_OR_POINTER_T SQLPOINTER -#endif -#else -/* #undef LENP_OR_POINTER_T */ -#endif -/* #undef SIZEOF_SQLWCHAR */ - -/* #undef WORDS_BIGENDIAN */ - -/* Does your compiler support `ssize_t' type? (Posix type) */ -#ifndef ssize_t -/* #undef ssize_t */ -#endif - -/* The size of `__int128', as computed by sizeof. */ -#define SIZEOF___INT128 16 - -/* The size of `__int128_t', as computed by sizeof. */ -#define SIZEOF___INT128_T 16 - -/* The size of `__uint128_t', as computed by sizeof. */ -#define SIZEOF___UINT128_T 16 - -#define HAVE___INT128 1 -#define HAVE___INT128_T 1 -#define HAVE___UINT128_T 1 -/* #undef HAVE_HGE */ - -#ifdef HAVE_HGE -#ifdef HAVE___INT128 -typedef __int128 hge; -typedef unsigned __int128 uhge; -#define SIZEOF_HGE SIZEOF___INT128 -#elif defined(HAVE___INT128_T) && defined(HAVE___UINT128_T) -typedef __int128_t hge; -typedef __uint128_t uhge; -#define SIZEOF_HGE SIZEOF___INT128_T -#endif -#endif - -// End Section: monetdb configure sizes - -/* Does your compiler support `__attribute__' extension? */ -#if !defined(__GNUC__) && !defined(__clang__) && !defined(__attribute__) -#define __attribute__(a) -#endif - -#if !defined(__cplusplus) || __cplusplus < 201103L -#ifndef static_assert -/* static_assert is a C11/C++11 feature, defined in assert.h which also exists - * in many other compilers we ignore it if the compiler doesn't support it - * However in C11 static_assert is a macro, while on C++11 is a keyword */ -#define static_assert(expr, mesg) ((void) 0) -#endif -#endif - -#ifdef HAVE_STRINGS_H -#include /* strcasecmp */ -#endif - -#ifdef _MSC_VER - -#define strdup(s) _strdup(s) - -#ifndef strcasecmp -#define strcasecmp(x,y) _stricmp(x,y) -#endif - -/* Define to 1 if you have the `strncasecmp' function. */ -#define HAVE_STRNCASECMP 1 -#ifndef strncasecmp -#define strncasecmp(x,y,z) _strnicmp(x,y,z) -#endif - -#include -#ifdef lstat -#undef lstat -#endif -#define lstat _stat64 -#ifdef stat -#undef stat -#endif -#define stat _stat64 -#ifdef fstat -#undef fstat -#endif -#define fstat _fstat64 - -static inline char * -stpcpy(char *restrict dst, const char *restrict src) -{ - size_t i; - for (i = 0; src[i]; i++) - dst[i] = src[i]; - dst[i] = 0; - return dst + i; -} - -/* Define to 1 if the system has the type `socklen_t'. */ -#define HAVE_SOCKLEN_T 1 -/* type used by connect */ -#define socklen_t int -#define strtok_r(t,d,c) strtok_s(t,d,c) - -#define HAVE_GETOPT_LONG 1 - -/* there is something very similar to localtime_r on Windows: */ -#include -#define HAVE_LOCALTIME_R 1 -static inline struct tm * -localtime_r(const time_t *restrict timep, struct tm *restrict result) -{ - return localtime_s(result, timep) == 0 ? result : NULL; -} -#define HAVE_GMTIME_R 1 -static inline struct tm * -gmtime_r(const time_t *restrict timep, struct tm *restrict result) -{ - return gmtime_s(result, timep) == 0 ? result : NULL; -} - -/* Define if you have ctime_r(time_t*,char *buf,size_t s) */ -#define HAVE_CTIME_R 1 -#define HAVE_CTIME_R3 1 -/* there is something very similar to ctime_r on Windows: */ -#define ctime_r(t,b,s) (ctime_s(b,s,t) ? NULL : (b)) - -#endif /* _MSC_VER */ - -/* #undef HAVE_SOCKLEN_T */ -#ifndef _MSC_VER -#define SOCKET int -#define closesocket close -#endif - -#ifndef _In_z_ -#define _In_z_ -#endif -#ifndef _Printf_format_string_ -#define _Printf_format_string_ -#endif - -#ifdef _MSC_VER -#define _LIB_STARTUP_FUNC_(f,q) \ - static void f(void); \ - __declspec(allocate(".CRT$XCU")) void (*f##_)(void) = f; \ - __pragma(comment(linker,"/include:" q #f "_")) \ - static void f(void) -#ifdef _WIN64 - #define LIB_STARTUP_FUNC(f) _LIB_STARTUP_FUNC_(f,"") -#else - #define LIB_STARTUP_FUNC(f) _LIB_STARTUP_FUNC_(f,"_") -#endif -#else -#define LIB_STARTUP_FUNC(f) \ - static void f(void) __attribute__((__constructor__)); \ - static void f(void) -#endif - -#endif /* MT_SEEN_MONETDB_CONFIG_H */ +/* + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + * + * Copyright 1997 - July 2008 CWI, August 2008 - 2022 MonetDB B.V. + */ +/* monetdb_config.h.in. Generated from CMakeLists.txt */ + +#ifndef MT_SEEN_MONETDB_CONFIG_H +#define MT_SEEN_MONETDB_CONFIG_H 1 + +#ifdef _MSC_VER + +#if _MSC_VER < 1900 +#error Versions below Visual Studio 2015 are no longer supported +#endif + +/* Prevent pollution through excessive inclusion of include files by Windows.h. */ +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN 1 +#endif + +/* Visual Studio 8 has deprecated lots of stuff: suppress warnings */ +#ifndef _CRT_SECURE_NO_DEPRECATE +#define _CRT_SECURE_NO_DEPRECATE 1 +#endif + +#define _CRT_RAND_S /* for Windows rand_s, before stdlib.h */ +#define HAVE_RAND_S 1 + +#endif + +#if !defined(_XOPEN_SOURCE) && defined(__CYGWIN__) +#define _XOPEN_SOURCE 700 +#endif + +#include +#if defined(_MSC_VER) && defined(_DEBUG) && defined(_CRTDBG_MAP_ALLOC) +/* In this case, malloc and friends are redefined in crtdbg.h to debug + * versions. We need to include stdlib.h first or else we get + * conflicting declarations. */ +#include +#endif + +#define HAVE_SYS_TYPES_H 1 +#ifdef HAVE_SYS_TYPES_H +# include +#endif + +/* standard C-99 include files */ +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _MSC_VER + +/* Windows include files */ +#include +#include +#include + +/* indicate to sqltypes.h that windows.h has already been included and + that it doesn't have to define Windows constants */ +#define ALREADY_HAVE_WINDOWS_TYPE 1 + +#define NATIVE_WIN32 1 + +#endif /* _MSC_VER */ + +#if !defined(WIN32) && (defined(__CYGWIN__)||defined(__MINGW32__)) +#define WIN32 1 +#endif + +// Section: monetdb configure defines +/* #undef HAVE_DISPATCH_DISPATCH_H */ +#define HAVE_DLFCN_H 1 +#define HAVE_FCNTL_H 1 +#define HAVE_IO_H 1 +/* #undef HAVE_KVM_H */ +#define HAVE_LIBGEN_H 1 +/* #undef HAVE_LIBINTL_H */ +/* #undef HAVE_MACH_MACH_INIT_H */ +/* #undef HAVE_MACH_TASK_H */ +/* #undef HAVE_MACH_O_DYLD_H */ +#define HAVE_NETDB_H 1 +#define HAVE_NETINET_IN_H 1 +#define HAVE_POLL_H 1 +/* #undef HAVE_PROCFS_H */ +#define HAVE_PWD_H 1 +#define HAVE_STRINGS_H 1 +/* #undef HAVE_STROPTS_H */ +#define HAVE_SYS_FILE_H 1 +#define HAVE_SYS_IOCTL_H 1 +/* #undef HAVE_SYS_SYSCTL_H */ +#define HAVE_SYS_MMAN_H 1 +#define HAVE_SYS_PARAM_H 1 +#define HAVE_SYS_RANDOM_H 1 +#define HAVE_SYS_RESOURCE_H 1 +#define HAVE_SYS_TIME_H 1 +#define HAVE_SYS_TIMES_H 1 +#define HAVE_SYS_UIO_H 1 +#define HAVE_SYS_UN_H 1 +#define HAVE_SYS_WAIT_H 1 +#define HAVE_TERMIOS_H 1 +#define HAVE_UNISTD_H 1 +#define HAVE_WINSOCK_H 1 +#define HAVE_SEMAPHORE_H 1 +#define HAVE_GETOPT_H 1 + +#define HAVE_STDATOMIC_H 1 + +#define HAVE_DIRENT_H 1 +#define HAVE_SYS_SOCKET_H 1 +#define HAVE_GETTIMEOFDAY 1 +#define HAVE_SYS_STAT_H 1 +#define HAVE_FDATASYNC 1 +#define HAVE_ACCEPT4 1 +#define HAVE_ASCTIME_R 1 +#define HAVE_CLOCK_GETTIME 1 +#define HAVE_CTIME_R 1 +/* #undef HAVE_DISPATCH_SEMAPHORE_CREATE */ +/* #undef HAVE_FALLOCATE */ +#define HAVE_FCNTL 1 +#define HAVE_FORK 1 +#define HAVE_FSYNC 1 +#define HAVE_FTIME 1 +#define HAVE_GETENTROPY 1 +/* #undef HAVE_GETEXECNAME */ +#define HAVE_GETLOGIN 1 +#define HAVE_GETOPT_LONG 1 +#define HAVE_GETRLIMIT 1 +#define HAVE_GETTIMEOFDAY 1 +#define HAVE_GETUID 1 +#define HAVE_GMTIME_R 1 +#define HAVE_LOCALTIME_R 1 +#define HAVE_STRERROR_R 1 +#define HAVE_LOCKF 1 +#define HAVE_MADVISE 1 +/* #undef HAVE_MREMAP */ +#define HAVE_NANOSLEEP 1 +#define HAVE_NL_LANGINFO 1 +/* #undef HAVE__NSGETEXECUTABLEPATH */ +/* #undef HAVE_PIPE2 */ +#define HAVE_POLL 1 +#define HAVE_POPEN 1 +#define HAVE_POSIX_FADVISE 1 +#define HAVE_POSIX_FALLOCATE 1 +#define HAVE_POSIX_MADVISE 1 +#define HAVE_PUTENV 1 +#define HAVE_SETSID 1 +#define HAVE_SHUTDOWN 1 +#define HAVE_SIGACTION 1 +#define HAVE_STPCPY 1 +#define HAVE_STRCASESTR 1 +#define HAVE_STRNCASECMP 1 +#define HAVE_STRPTIME 1 +#define HAVE_STRSIGNAL 1 +#define HAVE_SYSCONF 1 +/* #undef HAVE_TASK_INFO */ +#define HAVE_TIMES 1 +#define HAVE_UNAME 1 +/* #undef HAVE_SEMTIMEDOP */ +#define HAVE_PTHREAD_KILL 1 +#define HAVE_PTHREAD_SIGMASK 1 +#define HAVE_GETOPT 1 + +#define ICONV_CONST +#define FLEXIBLE_ARRAY_MEMBER +#define ENABLE_MAPI 1 +#define HAVE_MAPI 1 +// End Section: monetdb configure defines + +// Section: monetdb macro variables +/* #undef HAVE_ICONV */ +#define HAVE_PTHREAD_H 1 +/* #undef HAVE_LIBPCRE */ +/* #undef HAVE_LIBBZ2 */ +/* #undef HAVE_CURL */ +/* #undef HAVE_LIBLZMA */ +/* #undef HAVE_LIBXML */ +/* #undef HAVE_LIBZ */ +/* #undef HAVE_LIBLZ4 */ +/* #undef HAVE_PROJ */ +/* #undef HAVE_SNAPPY */ +/* #undef HAVE_FITS */ +/* #undef HAVE_VALGRIND */ +/* #undef HAVE_NETCDF */ +/* #undef HAVE_READLINE */ +/* #undef HAVE_LIBR */ +#define RHOME "/registry" +/* #undef HAVE_GEOM */ +/* #undef HAVE_SHP */ +/* #undef HAVE_LIBPY3 */ + +// #define SOCKET_LIBRARIES +#define HAVE_GETADDRINFO 1 +/* #undef HAVE_CUDF */ + +#define MAPI_PORT 50000 +#define MAPI_PORT_STR "50000" + +#ifdef _MSC_VER +#define DIR_SEP '\\' +#define PATH_SEP ';' +#define DIR_SEP_STR "\\" +#define SO_PREFIX "" +#else +#define DIR_SEP '/' +#define PATH_SEP ':' +#define DIR_SEP_STR "/" +#define SO_PREFIX "lib" +#endif +#define SO_EXT ".dll" + +#define BINDIR "C:/Program Files (x86)/MonetDB/bin" +#define LIBDIR "C:/Program Files (x86)/MonetDB/lib" +#define LOCALSTATEDIR "C:/Program Files (x86)/MonetDB/var" + +// End Section: monetdb macro variables + +// Section: monetdb configure misc +#define MONETDB_RELEASE "unreleased" + +#define MONETDB_VERSION "11.44.0" +#define MONETDB_VERSION_MAJOR 11 +#define MONETDB_VERSION_MINOR 44 +#define MONETDB_VERSION_PATCH 0 + +#define GDK_VERSION "25.1.0" +#define GDK_VERSION_MAJOR 25 +#define GDK_VERSION_MINOR 1 +#define GDK_VERSION_PATCH 0 +#define MAPI_VERSION "14.0.2" +#define MAPI_VERSION_MAJOR 14 +#define MAPI_VERSION_MINOR 0 +#define MAPI_VERSION_PATCH 2 +#define MONETDB5_VERSION "32.0.6" +#define MONETDB5_VERSION_MAJOR 32 +#define MONETDB5_VERSION_MINOR 0 +#define MONETDB5_VERSION_PATCH 6 +#define MONETDBE_VERSION "3.0.2" +#define MONETDBE_VERSION_MAJOR 3 +#define MONETDBE_VERSION_MINOR 0 +#define MONETDBE_VERSION_PATCH 2 +#define STREAM_VERSION "16.0.1" +#define STREAM_VERSION_MAJOR 16 +#define STREAM_VERSION_MINOR 0 +#define STREAM_VERSION_PATCH 1 +#define SQL_VERSION "12.0.5" +#define SQL_VERSION_MAJOR 12 +#define SQL_VERSION_MINOR 0 +#define SQL_VERSION_PATCH 5 + +/* Host identifier */ +#define HOST "amd64-pc-windows-gnu" + +/* The used password hash algorithm */ +#define MONETDB5_PASSWDHASH "SHA512" + +/* The used password hash algorithm */ +#define MONETDB5_PASSWDHASH_TOKEN SHA512 + +#ifndef _Noreturn +#ifdef __cplusplus +#define _Noreturn +#else +/* #undef _Noreturn */ +#endif +#endif +/* Does your compiler support `inline' keyword? (C99 feature) */ +#ifndef inline +#ifdef __cplusplus +#define inline +#else +/* #undef inline */ +#endif +#endif +/* Does your compiler support `restrict' keyword? (C99 feature) */ +#ifndef restrict +#ifdef __cplusplus +#define restrict +#else +/* #undef restrict */ +#endif +#endif + +// End Section: monetdb configure misc + +// Section: monetdb configure sizes +#define SIZEOF_SIZE_T 8 + +/* The size of `void *', as computed by sizeof. */ +#define SIZEOF_VOID_P 8 + +#define SIZEOF_CHAR 1 +#define SIZEOF_SHORT 2 +#define SIZEOF_INT 4 +#define SIZEOF_LONG 8 +#define SIZEOF_LONG_LONG 8 +#define SIZEOF_DOUBLE 8 +#define SIZEOF_WCHAR_T 2 +#define HAVE_LONG_LONG 1 /* for ODBC include files */ + +#ifdef _MSC_VER +#ifdef _WIN64 +#define LENP_OR_POINTER_T SQLLEN * +#else +#define LENP_OR_POINTER_T SQLPOINTER +#endif +#else +/* #undef LENP_OR_POINTER_T */ +#endif +/* #undef SIZEOF_SQLWCHAR */ + +/* #undef WORDS_BIGENDIAN */ + +/* Does your compiler support `ssize_t' type? (Posix type) */ +#ifndef ssize_t +/* #undef ssize_t */ +#endif + +/* The size of `__int128', as computed by sizeof. */ +#define SIZEOF___INT128 16 + +/* The size of `__int128_t', as computed by sizeof. */ +#define SIZEOF___INT128_T 16 + +/* The size of `__uint128_t', as computed by sizeof. */ +#define SIZEOF___UINT128_T 16 + +#define HAVE___INT128 1 +#define HAVE___INT128_T 1 +#define HAVE___UINT128_T 1 +/* #undef HAVE_HGE */ + +#ifdef HAVE_HGE +#ifdef HAVE___INT128 +typedef __int128 hge; +typedef unsigned __int128 uhge; +#define SIZEOF_HGE SIZEOF___INT128 +#elif defined(HAVE___INT128_T) && defined(HAVE___UINT128_T) +typedef __int128_t hge; +typedef __uint128_t uhge; +#define SIZEOF_HGE SIZEOF___INT128_T +#endif +#endif + +// End Section: monetdb configure sizes + +/* Does your compiler support `__attribute__' extension? */ +#if !defined(__GNUC__) && !defined(__clang__) && !defined(__attribute__) +#define __attribute__(a) +#endif + +#if !defined(__cplusplus) || __cplusplus < 201103L +#ifndef static_assert +/* static_assert is a C11/C++11 feature, defined in assert.h which also exists + * in many other compilers we ignore it if the compiler doesn't support it + * However in C11 static_assert is a macro, while on C++11 is a keyword */ +#define static_assert(expr, mesg) ((void) 0) +#endif +#endif + +#ifdef HAVE_STRINGS_H +#include /* strcasecmp */ +#endif + +#ifdef _MSC_VER + +#define strdup(s) _strdup(s) + +#ifndef strcasecmp +#define strcasecmp(x,y) _stricmp(x,y) +#endif + +/* Define to 1 if you have the `strncasecmp' function. */ +#define HAVE_STRNCASECMP 1 +#ifndef strncasecmp +#define strncasecmp(x,y,z) _strnicmp(x,y,z) +#endif + +#include +#ifdef lstat +#undef lstat +#endif +#define lstat _stat64 +#ifdef stat +#undef stat +#endif +#define stat _stat64 +#ifdef fstat +#undef fstat +#endif +#define fstat _fstat64 + +static inline char * +stpcpy(char *restrict dst, const char *restrict src) +{ + size_t i; + for (i = 0; src[i]; i++) + dst[i] = src[i]; + dst[i] = 0; + return dst + i; +} + +/* Define to 1 if the system has the type `socklen_t'. */ +#define HAVE_SOCKLEN_T 1 +/* type used by connect */ +#define socklen_t int +#define strtok_r(t,d,c) strtok_s(t,d,c) + +#define HAVE_GETOPT_LONG 1 + +/* there is something very similar to localtime_r on Windows: */ +#include +#define HAVE_LOCALTIME_R 1 +static inline struct tm * +localtime_r(const time_t *restrict timep, struct tm *restrict result) +{ + return localtime_s(result, timep) == 0 ? result : NULL; +} +#define HAVE_GMTIME_R 1 +static inline struct tm * +gmtime_r(const time_t *restrict timep, struct tm *restrict result) +{ + return gmtime_s(result, timep) == 0 ? result : NULL; +} + +/* Define if you have ctime_r(time_t*,char *buf,size_t s) */ +#define HAVE_CTIME_R 1 +#define HAVE_CTIME_R3 1 +/* there is something very similar to ctime_r on Windows: */ +#define ctime_r(t,b,s) (ctime_s(b,s,t) ? NULL : (b)) + +#endif /* _MSC_VER */ + +/* #undef HAVE_SOCKLEN_T */ +#ifndef _MSC_VER +#define SOCKET int +#define closesocket close +#endif + +#ifndef _In_z_ +#define _In_z_ +#endif +#ifndef _Printf_format_string_ +#define _Printf_format_string_ +#endif + +#ifdef _MSC_VER +#define _LIB_STARTUP_FUNC_(f,q) \ + static void f(void); \ + __declspec(allocate(".CRT$XCU")) void (*f##_)(void) = f; \ + __pragma(comment(linker,"/include:" q #f "_")) \ + static void f(void) +#ifdef _WIN64 + #define LIB_STARTUP_FUNC(f) _LIB_STARTUP_FUNC_(f,"") +#else + #define LIB_STARTUP_FUNC(f) _LIB_STARTUP_FUNC_(f,"_") +#endif +#else +#define LIB_STARTUP_FUNC(f) \ + static void f(void) __attribute__((__constructor__)); \ + static void f(void) +#endif + +#endif /* MT_SEEN_MONETDB_CONFIG_H */ diff --git a/monetdb/msys64/monetdbe.h b/monetdb/msys64/monetdbe.h index 614a540..5c6df57 100644 --- a/monetdb/msys64/monetdbe.h +++ b/monetdb/msys64/monetdbe.h @@ -1,190 +1,190 @@ -/* - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. - * - * Copyright 1997 - July 2008 CWI, August 2008 - 2022 MonetDB B.V. - */ - -#ifndef _MONETDBE_LIB_ -#define _MONETDBE_LIB_ - -#include "monetdb_config.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#include - -#ifdef WIN32 -#ifndef LIBMONETDBE -#define monetdbe_export extern __declspec(dllimport) -#else -#define monetdbe_export extern __declspec(dllexport) -#endif -#else -#define monetdbe_export extern -#endif - -typedef int64_t monetdbe_cnt; - -typedef struct { - unsigned char day; - unsigned char month; - short year; -} monetdbe_data_date; - -typedef struct { - unsigned int ms; - unsigned char seconds; - unsigned char minutes; - unsigned char hours; -} monetdbe_data_time; - -typedef struct { - monetdbe_data_date date; - monetdbe_data_time time; -} monetdbe_data_timestamp; - -typedef struct { - size_t size; - char* data; -} monetdbe_data_blob; - -typedef enum { - monetdbe_bool, monetdbe_int8_t, monetdbe_int16_t, monetdbe_int32_t, monetdbe_int64_t, -#ifdef HAVE_HGE - monetdbe_int128_t, -#endif - monetdbe_size_t, monetdbe_float, monetdbe_double, - monetdbe_str, monetdbe_blob, - monetdbe_date, monetdbe_time, monetdbe_timestamp, - - // should be last: - monetdbe_type_unknown -} monetdbe_types; - -typedef struct { - char* name; - unsigned int scale; - unsigned int digits; -} monetdbe_sql_type; - -typedef struct { - monetdbe_types type; - monetdbe_sql_type sql_type; - void *data; - size_t count; - char* name; -} monetdbe_column; - -typedef struct { - size_t nparam; - monetdbe_types *type; -} monetdbe_statement; - -typedef struct { - monetdbe_cnt nrows; - size_t ncols; - char *name; - monetdbe_cnt last_id; /* last auto incremented id */ -} monetdbe_result; - -typedef void* monetdbe_database; - -typedef struct { - const char *host; - int port; - const char *database; - const char *username; - const char *password; - const char *lang; -} monetdbe_remote; - -typedef struct { - const char *host; - const char* port; - const char* usock; -} monetdbe_mapi_server; - -typedef struct { - int memorylimit; // top off the amount of RAM to be used, in MB - int querytimeout; // graceful terminate query after a few seconds - int sessiontimeout; // graceful terminate the session after a few seconds - int nr_threads; // maximum number of worker treads, limits level of parallelism - monetdbe_remote* remote; - monetdbe_mapi_server* mapi_server; - const char *trace_file; // file to which log output should be written -} monetdbe_options; - -#define DEFAULT_STRUCT_DEFINITION(ctype, typename) \ - typedef struct \ - { \ - monetdbe_types type; \ - monetdbe_sql_type sql_type; \ - ctype *data; \ - size_t count; \ - char *name; \ - ctype null_value; \ - double scale; \ - int (*is_null)(ctype *value); \ - } monetdbe_column_##typename - -DEFAULT_STRUCT_DEFINITION(int8_t, bool); -DEFAULT_STRUCT_DEFINITION(int8_t, int8_t); -DEFAULT_STRUCT_DEFINITION(int16_t, int16_t); -DEFAULT_STRUCT_DEFINITION(int32_t, int32_t); -DEFAULT_STRUCT_DEFINITION(int64_t, int64_t); -#ifdef HAVE_HGE -DEFAULT_STRUCT_DEFINITION(__int128, int128_t); -#endif -DEFAULT_STRUCT_DEFINITION(size_t, size_t); - -DEFAULT_STRUCT_DEFINITION(float, float); -DEFAULT_STRUCT_DEFINITION(double, double); - -DEFAULT_STRUCT_DEFINITION(char *, str); -DEFAULT_STRUCT_DEFINITION(monetdbe_data_blob, blob); - -DEFAULT_STRUCT_DEFINITION(monetdbe_data_date, date); -DEFAULT_STRUCT_DEFINITION(monetdbe_data_time, time); -DEFAULT_STRUCT_DEFINITION(monetdbe_data_timestamp, timestamp); -// UUID, INET, XML ? - -monetdbe_export const char *monetdbe_version(void); - -monetdbe_export int monetdbe_open(monetdbe_database *db, char *url, monetdbe_options *opts); -/* 0 ok, -1 (allocation failed), -2 error in db */ -monetdbe_export int monetdbe_close(monetdbe_database db); - -monetdbe_export char* monetdbe_error(monetdbe_database db); - -monetdbe_export char* monetdbe_get_autocommit(monetdbe_database dbhdl, int* result); -monetdbe_export char* monetdbe_set_autocommit(monetdbe_database dbhdl, int value); -monetdbe_export int monetdbe_in_transaction(monetdbe_database dbhdl); - -monetdbe_export char* monetdbe_query(monetdbe_database dbhdl, char* query, monetdbe_result** result, monetdbe_cnt* affected_rows); -monetdbe_export char* monetdbe_result_fetch(monetdbe_result *mres, monetdbe_column** res, size_t column_index); -monetdbe_export char* monetdbe_cleanup_result(monetdbe_database dbhdl, monetdbe_result* result); - -monetdbe_export char* monetdbe_prepare(monetdbe_database dbhdl, char *query, monetdbe_statement **stmt, monetdbe_result** result); -monetdbe_export char* monetdbe_bind(monetdbe_statement *stmt, void *data, size_t parameter_nr); -monetdbe_export char* monetdbe_execute(monetdbe_statement *stmt, monetdbe_result **result, monetdbe_cnt* affected_rows); -monetdbe_export char* monetdbe_cleanup_statement(monetdbe_database dbhdl, monetdbe_statement *stmt); - -monetdbe_export char* monetdbe_append(monetdbe_database dbhdl, const char* schema, const char* table, monetdbe_column **input, size_t column_count); -monetdbe_export const void* monetdbe_null(monetdbe_database dbhdl, monetdbe_types t); - -monetdbe_export char* monetdbe_get_columns(monetdbe_database dbhdl, const char* schema_name, const char *table_name, size_t *column_count, monetdbe_column **columns); - -monetdbe_export char* monetdbe_dump_database(monetdbe_database dbhdl, const char *backupfile); -monetdbe_export char* monetdbe_dump_table(monetdbe_database dbhdl, const char *schema_name, const char *table_name, const char *backupfile); -monetdbe_export const char* monetdbe_get_mapi_port(void); - -#ifdef __cplusplus -} -#endif - - -#endif +/* + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + * + * Copyright 1997 - July 2008 CWI, August 2008 - 2022 MonetDB B.V. + */ + +#ifndef _MONETDBE_LIB_ +#define _MONETDBE_LIB_ + +#include "monetdb_config.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#ifdef WIN32 +#ifndef LIBMONETDBE +#define monetdbe_export extern __declspec(dllimport) +#else +#define monetdbe_export extern __declspec(dllexport) +#endif +#else +#define monetdbe_export extern +#endif + +typedef int64_t monetdbe_cnt; + +typedef struct { + unsigned char day; + unsigned char month; + short year; +} monetdbe_data_date; + +typedef struct { + unsigned int ms; + unsigned char seconds; + unsigned char minutes; + unsigned char hours; +} monetdbe_data_time; + +typedef struct { + monetdbe_data_date date; + monetdbe_data_time time; +} monetdbe_data_timestamp; + +typedef struct { + size_t size; + char* data; +} monetdbe_data_blob; + +typedef enum { + monetdbe_bool, monetdbe_int8_t, monetdbe_int16_t, monetdbe_int32_t, monetdbe_int64_t, +#ifdef HAVE_HGE + monetdbe_int128_t, +#endif + monetdbe_size_t, monetdbe_float, monetdbe_double, + monetdbe_str, monetdbe_blob, + monetdbe_date, monetdbe_time, monetdbe_timestamp, + + // should be last: + monetdbe_type_unknown +} monetdbe_types; + +typedef struct { + char* name; + unsigned int scale; + unsigned int digits; +} monetdbe_sql_type; + +typedef struct { + monetdbe_types type; + monetdbe_sql_type sql_type; + void *data; + size_t count; + char* name; +} monetdbe_column; + +typedef struct { + size_t nparam; + monetdbe_types *type; +} monetdbe_statement; + +typedef struct { + monetdbe_cnt nrows; + size_t ncols; + char *name; + monetdbe_cnt last_id; /* last auto incremented id */ +} monetdbe_result; + +typedef void* monetdbe_database; + +typedef struct { + const char *host; + int port; + const char *database; + const char *username; + const char *password; + const char *lang; +} monetdbe_remote; + +typedef struct { + const char *host; + const char* port; + const char* usock; +} monetdbe_mapi_server; + +typedef struct { + int memorylimit; // top off the amount of RAM to be used, in MB + int querytimeout; // graceful terminate query after a few seconds + int sessiontimeout; // graceful terminate the session after a few seconds + int nr_threads; // maximum number of worker treads, limits level of parallelism + monetdbe_remote* remote; + monetdbe_mapi_server* mapi_server; + const char *trace_file; // file to which log output should be written +} monetdbe_options; + +#define DEFAULT_STRUCT_DEFINITION(ctype, typename) \ + typedef struct \ + { \ + monetdbe_types type; \ + monetdbe_sql_type sql_type; \ + ctype *data; \ + size_t count; \ + char *name; \ + ctype null_value; \ + double scale; \ + int (*is_null)(ctype *value); \ + } monetdbe_column_##typename + +DEFAULT_STRUCT_DEFINITION(int8_t, bool); +DEFAULT_STRUCT_DEFINITION(int8_t, int8_t); +DEFAULT_STRUCT_DEFINITION(int16_t, int16_t); +DEFAULT_STRUCT_DEFINITION(int32_t, int32_t); +DEFAULT_STRUCT_DEFINITION(int64_t, int64_t); +#ifdef HAVE_HGE +DEFAULT_STRUCT_DEFINITION(__int128, int128_t); +#endif +DEFAULT_STRUCT_DEFINITION(size_t, size_t); + +DEFAULT_STRUCT_DEFINITION(float, float); +DEFAULT_STRUCT_DEFINITION(double, double); + +DEFAULT_STRUCT_DEFINITION(char *, str); +DEFAULT_STRUCT_DEFINITION(monetdbe_data_blob, blob); + +DEFAULT_STRUCT_DEFINITION(monetdbe_data_date, date); +DEFAULT_STRUCT_DEFINITION(monetdbe_data_time, time); +DEFAULT_STRUCT_DEFINITION(monetdbe_data_timestamp, timestamp); +// UUID, INET, XML ? + +monetdbe_export const char *monetdbe_version(void); + +monetdbe_export int monetdbe_open(monetdbe_database *db, char *url, monetdbe_options *opts); +/* 0 ok, -1 (allocation failed), -2 error in db */ +monetdbe_export int monetdbe_close(monetdbe_database db); + +monetdbe_export char* monetdbe_error(monetdbe_database db); + +monetdbe_export char* monetdbe_get_autocommit(monetdbe_database dbhdl, int* result); +monetdbe_export char* monetdbe_set_autocommit(monetdbe_database dbhdl, int value); +monetdbe_export int monetdbe_in_transaction(monetdbe_database dbhdl); + +monetdbe_export char* monetdbe_query(monetdbe_database dbhdl, char* query, monetdbe_result** result, monetdbe_cnt* affected_rows); +monetdbe_export char* monetdbe_result_fetch(monetdbe_result *mres, monetdbe_column** res, size_t column_index); +monetdbe_export char* monetdbe_cleanup_result(monetdbe_database dbhdl, monetdbe_result* result); + +monetdbe_export char* monetdbe_prepare(monetdbe_database dbhdl, char *query, monetdbe_statement **stmt, monetdbe_result** result); +monetdbe_export char* monetdbe_bind(monetdbe_statement *stmt, void *data, size_t parameter_nr); +monetdbe_export char* monetdbe_execute(monetdbe_statement *stmt, monetdbe_result **result, monetdbe_cnt* affected_rows); +monetdbe_export char* monetdbe_cleanup_statement(monetdbe_database dbhdl, monetdbe_statement *stmt); + +monetdbe_export char* monetdbe_append(monetdbe_database dbhdl, const char* schema, const char* table, monetdbe_column **input, size_t column_count); +monetdbe_export const void* monetdbe_null(monetdbe_database dbhdl, monetdbe_types t); + +monetdbe_export char* monetdbe_get_columns(monetdbe_database dbhdl, const char* schema_name, const char *table_name, size_t *column_count, monetdbe_column **columns); + +monetdbe_export char* monetdbe_dump_database(monetdbe_database dbhdl, const char *backupfile); +monetdbe_export char* monetdbe_dump_table(monetdbe_database dbhdl, const char *schema_name, const char *table_name, const char *backupfile); +monetdbe_export const char* monetdbe_get_mapi_port(void); + +#ifdef __cplusplus +} +#endif + + +#endif diff --git a/msc-plugin/launcher.vcxproj b/msc-plugin/launcher.vcxproj index d107496..ae7116e 100644 --- a/msc-plugin/launcher.vcxproj +++ b/msc-plugin/launcher.vcxproj @@ -1,281 +1,281 @@ - - - - - Debug - Win32 - - - Release - Win32 - - - Debug - x64 - - - Release - x64 - - - sharedlib - Win32 - - - sharedlib - x64 - - - - - - - 16.0 - Win32Proj - {C8E25628-0B46-4CBE-90DF-5228F79A5A64} - server - 10.0 - - - - Application - true - v143 - Unicode - false - - - Application - false - v143 - true - Unicode - false - - - Application - false - v143 - true - Unicode - true - - - Application - true - v143 - Unicode - false - - - Application - false - v143 - true - Unicode - false - - - Application - false - v143 - true - Unicode - false - - - - - - - - - - - - - - - - - - - - - - - - - - - true - .exe - $(SolutionDir)..\ - - - false - .exe - $(SolutionDir)..\ - - - false - .exe - $(SolutionDir)..\ - - - true - .exe - $(SolutionDir)..\ - - - false - .exe - $(SolutionDir)..\ - - - false - .exe - $(SolutionDir)..\ - - - - Level3 - true - _CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - true - true - stdcpp17 - stdc17 - $(ProjectDir)\..\monetdb\msvc - - - Console - true - ..\libaquery.lib;$(ProjectDir)\..\monetdb\msvc\monetdbe.lib;%(AdditionalDependencies) - /WHOLEARCHIVE:libaquery.lib %(AdditionalOptions) - - - - - Level3 - true - true - true - _CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - true - true - stdcpp17 - stdc17 - $(ProjectDir)\..\monetdb\msvc - None - - - Console - true - true - false - ..\libaquery.lib;$(ProjectDir)\..\monetdb\msvc\monetdbe.lib;%(AdditionalDependencies) - false - /WHOLEARCHIVE:libaquery.lib %(AdditionalOptions) - - - - - Level3 - true - true - true - _CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - true - true - stdcpp17 - stdc17 - $(ProjectDir)\..\monetdb\msvc - - - Console - true - true - true - $(ProjectDir)\..\monetdb\msvc\monetdbe.lib;%(AdditionalDependencies) - /WHOLEARCHIVE:libaquery.lib %(AdditionalOptions) - - - copy $(OutDir)$(TargetName)$(TargetExt) $(ProjectDir)\..\server.so /y - - - - - Level3 - true - _CRT_SECURE_NO_WARNINGS;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - true - true - stdcpp17 - stdc17 - $(ProjectDir)\..\monetdb\msvc - - - Console - true - ..\libaquery.lib;$(ProjectDir)\..\monetdb\msvc\monetdbe.lib;%(AdditionalDependencies) - /WHOLEARCHIVE:libaquery.lib %(AdditionalOptions) - - - - - Level3 - true - true - true - _CRT_SECURE_NO_WARNINGS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - true - true - stdcpp17 - stdc17 - $(ProjectDir)\..\monetdb\msvc - None - - - Console - true - true - false - ..\libaquery.lib;$(ProjectDir)\..\monetdb\msvc\monetdbe.lib;%(AdditionalDependencies) - false - /WHOLEARCHIVE:libaquery.lib %(AdditionalOptions) - - - - - Level3 - true - true - true - _CRT_SECURE_NO_WARNINGS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - true - true - stdcpp17 - stdc17 - $(ProjectDir)\..\monetdb\msvc - - - Console - true - true - true - $(ProjectDir)\..\monetdb\msvc\monetdbe.lib;%(AdditionalDependencies) - /WHOLEARCHIVE:libaquery.lib %(AdditionalOptions) - - - copy "$(OutDir)$(TargetName)$(TargetExt)" "$(ProjectDir)\..\server.so" /y - - - + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + sharedlib + Win32 + + + sharedlib + x64 + + + + + + + 16.0 + Win32Proj + {C8E25628-0B46-4CBE-90DF-5228F79A5A64} + server + 10.0 + + + + Application + true + v143 + Unicode + false + + + Application + false + v143 + true + Unicode + false + + + Application + false + v143 + true + Unicode + true + + + Application + true + v143 + Unicode + false + + + Application + false + v143 + true + Unicode + false + + + Application + false + v143 + true + Unicode + false + + + + + + + + + + + + + + + + + + + + + + + + + + + true + .exe + $(SolutionDir)..\ + + + false + .exe + $(SolutionDir)..\ + + + false + .exe + $(SolutionDir)..\ + + + true + .exe + $(SolutionDir)..\ + + + false + .exe + $(SolutionDir)..\ + + + false + .exe + $(SolutionDir)..\ + + + + Level3 + true + _CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + true + true + stdcpp17 + stdc17 + $(ProjectDir)\..\monetdb\msvc + + + Console + true + ..\libaquery.lib;$(ProjectDir)\..\monetdb\msvc\monetdbe.lib;%(AdditionalDependencies) + /WHOLEARCHIVE:libaquery.lib %(AdditionalOptions) + + + + + Level3 + true + true + true + _CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + true + true + stdcpp17 + stdc17 + $(ProjectDir)\..\monetdb\msvc + None + + + Console + true + true + false + ..\libaquery.lib;$(ProjectDir)\..\monetdb\msvc\monetdbe.lib;%(AdditionalDependencies) + false + /WHOLEARCHIVE:libaquery.lib %(AdditionalOptions) + + + + + Level3 + true + true + true + _CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + true + true + stdcpp17 + stdc17 + $(ProjectDir)\..\monetdb\msvc + + + Console + true + true + true + $(ProjectDir)\..\monetdb\msvc\monetdbe.lib;%(AdditionalDependencies) + /WHOLEARCHIVE:libaquery.lib %(AdditionalOptions) + + + copy $(OutDir)$(TargetName)$(TargetExt) $(ProjectDir)\..\server.so /y + + + + + Level3 + true + _CRT_SECURE_NO_WARNINGS;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + true + true + stdcpp17 + stdc17 + $(ProjectDir)\..\monetdb\msvc + + + Console + true + ..\libaquery.lib;$(ProjectDir)\..\monetdb\msvc\monetdbe.lib;%(AdditionalDependencies) + /WHOLEARCHIVE:libaquery.lib %(AdditionalOptions) + + + + + Level3 + true + true + true + _CRT_SECURE_NO_WARNINGS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + true + true + stdcpp17 + stdc17 + $(ProjectDir)\..\monetdb\msvc + None + + + Console + true + true + false + ..\libaquery.lib;$(ProjectDir)\..\monetdb\msvc\monetdbe.lib;%(AdditionalDependencies) + false + /WHOLEARCHIVE:libaquery.lib %(AdditionalOptions) + + + + + Level3 + true + true + true + _CRT_SECURE_NO_WARNINGS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + true + true + stdcpp17 + stdc17 + $(ProjectDir)\..\monetdb\msvc + + + Console + true + true + true + $(ProjectDir)\..\monetdb\msvc\monetdbe.lib;%(AdditionalDependencies) + /WHOLEARCHIVE:libaquery.lib %(AdditionalOptions) + + + copy "$(OutDir)$(TargetName)$(TargetExt)" "$(ProjectDir)\..\server.so" /y + + + \ No newline at end of file diff --git a/msc-plugin/libaquery.vcxproj b/msc-plugin/libaquery.vcxproj index 7da895f..146c583 100644 --- a/msc-plugin/libaquery.vcxproj +++ b/msc-plugin/libaquery.vcxproj @@ -1,239 +1,239 @@ - - - - - Debug - Win32 - - - Release - Win32 - - - Debug - x64 - - - Release - x64 - - - - 16.0 - Win32Proj - {b52aacf7-16a6-4fca-90ad-867d367bda4f} - libaquery - 10.0 - - - - StaticLibrary - true - v143 - Unicode - - - StaticLibrary - false - v143 - true - Unicode - - - StaticLibrary - true - v143 - Unicode - - - StaticLibrary - false - v143 - true - Unicode - - - - - - - - - - - - - - - - - - - - - true - $(SolutionDir)..\ - - - false - .lib - $(SolutionDir)..\ - - - true - $(SolutionDir)..\ - - - false - .lib - $(SolutionDir)..\ - - - - Level3 - true - _CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - stdcpp17 - stdc17 - Create - pch.hpp - $(ProjectDir)\..\monetdb\msvc - ../libaquery.pch - - - Console - true - - - $(ProjectDir)\..\monetdb\msvc\monetdbe.lib; - - - - - Level3 - true - true - false - _CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - stdcpp17 - stdc17 - Create - pch.hpp - None - Full - AnySuitable - Speed - true - true - AdvancedVectorExtensions2 - true - true - false - false - false - true - false - $(ProjectDir)\..\monetdb\msvc - ../libaquery.pch - - - Console - true - true - false - false - - - $(ProjectDir)\..\monetdb\msvc\monetdbe.lib; - - - - - Level3 - true - _CRT_SECURE_NO_WARNINGS;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - stdcpp17 - stdc17 - Create - pch.hpp - $(ProjectDir)\..\monetdb\msvc - ../libaquery.pch - - - Console - true - - - $(ProjectDir)\..\monetdb\msvc\monetdbe.lib; - - - - - Level3 - true - true - false - _CRT_SECURE_NO_WARNINGS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - stdcpp17 - stdc17 - Create - pch.hpp - None - Full - AnySuitable - Speed - true - true - AdvancedVectorExtensions2 - true - true - false - false - false - true - false - $(ProjectDir)\..\monetdb\msvc - ../libaquery.pch - - - Console - true - true - false - false - - - $(ProjectDir)\..\monetdb\msvc\monetdbe.lib; - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 16.0 + Win32Proj + {b52aacf7-16a6-4fca-90ad-867d367bda4f} + libaquery + 10.0 + + + + StaticLibrary + true + v143 + Unicode + + + StaticLibrary + false + v143 + true + Unicode + + + StaticLibrary + true + v143 + Unicode + + + StaticLibrary + false + v143 + true + Unicode + + + + + + + + + + + + + + + + + + + + + true + $(SolutionDir)..\ + + + false + .lib + $(SolutionDir)..\ + + + true + $(SolutionDir)..\ + + + false + .lib + $(SolutionDir)..\ + + + + Level3 + true + _CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + stdcpp17 + stdc17 + Create + pch.hpp + $(ProjectDir)\..\monetdb\msvc + ../libaquery.pch + + + Console + true + + + $(ProjectDir)\..\monetdb\msvc\monetdbe.lib; + + + + + Level3 + true + true + false + _CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + stdcpp17 + stdc17 + Create + pch.hpp + None + Full + AnySuitable + Speed + true + true + AdvancedVectorExtensions2 + true + true + false + false + false + true + false + $(ProjectDir)\..\monetdb\msvc + ../libaquery.pch + + + Console + true + true + false + false + + + $(ProjectDir)\..\monetdb\msvc\monetdbe.lib; + + + + + Level3 + true + _CRT_SECURE_NO_WARNINGS;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + stdcpp17 + stdc17 + Create + pch.hpp + $(ProjectDir)\..\monetdb\msvc + ../libaquery.pch + + + Console + true + + + $(ProjectDir)\..\monetdb\msvc\monetdbe.lib; + + + + + Level3 + true + true + false + _CRT_SECURE_NO_WARNINGS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + stdcpp17 + stdc17 + Create + pch.hpp + None + Full + AnySuitable + Speed + true + true + AdvancedVectorExtensions2 + true + true + false + false + false + true + false + $(ProjectDir)\..\monetdb\msvc + ../libaquery.pch + + + Console + true + true + false + false + + + $(ProjectDir)\..\monetdb\msvc\monetdbe.lib; + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/msc-plugin/msc-plugin.vcxproj b/msc-plugin/msc-plugin.vcxproj index ac70b3a..4d50611 100644 --- a/msc-plugin/msc-plugin.vcxproj +++ b/msc-plugin/msc-plugin.vcxproj @@ -1,236 +1,236 @@ - - - - - Debug - Win32 - - - Release - Win32 - - - Debug - x64 - - - Release - x64 - - - - 16.0 - Win32Proj - {8081fdaa-4d13-4b7a-adb2-8224af7f1c81} - Project1 - 10.0 - msc-plugin - - - - DynamicLibrary - true - v143 - Unicode - false - - - DynamicLibrary - false - v143 - true - Unicode - false - - - DynamicLibrary - true - v143 - Unicode - false - false - - - DynamicLibrary - false - v143 - true - Unicode - false - - - - - - - - - - - - - - - - - - - - - - dll.so - - - - dll.so - - - - dll.so - - - - dll.so - - - - Level3 - true - _CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - stdcpp17 - Guard - false - $(ProjectDir)\..\monetdb\msvc - /WL %(AdditionalOptions) - stdc17 - Create - ./server/pch.hpp - ../libaquery.pch - true - - - Console - DebugFull - $(ProjectDir)\..\dll.so - true - $(ProjectDir)\..\libaquery.lib;$(ProjectDir)\..\monetdb\msvc\monetdbe.lib;$(CoreLibraryDependencies);%(AdditionalDependencies) - - - - - Level3 - true - true - false - _CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - stdcpp17 - $(ProjectDir)\..\monetdb\msvc - stdc17 - Create - ./server/pch.hpp - None - Full - AnySuitable - Speed - true - true - AdvancedVectorExtensions2 - true - true - false - false - false - true - false - ../libaquery.pch - false - - - Console - true - true - false - $(ProjectDir)\..\dll.so - $(ProjectDir)\..\libaquery.lib;$(ProjectDir)\..\monetdb\msvc\monetdbe.lib;$(CoreLibraryDependencies);%(AdditionalDependencies) - false - - - - - Level3 - true - _ALLOW_RTCc_IN_STL;_CRT_SECURE_NO_WARNINGS;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - stdcpp17 - Guard - false - ProgramDatabase - Disabled - false - true - false - $(ProjectDir)\..\monetdb\msvc - /WL %(AdditionalOptions) - stdc17 - Create - ./server/pch.hpp - ../libaquery.pch - true - - - Console - DebugFull - $(ProjectDir)\..\dll.so - true - $(ProjectDir)\..\libaquery.lib;$(ProjectDir)\..\monetdb\msvc\monetdbe.lib;$(CoreLibraryDependencies);%(AdditionalDependencies) - - - - - Level3 - true - true - false - _CRT_SECURE_NO_WARNINGS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - stdcpp17 - $(ProjectDir)\..\monetdb\msvc - stdc17 - Create - ./server/pch.hpp - None - Full - AnySuitable - Speed - true - true - AdvancedVectorExtensions2 - true - true - false - false - false - true - false - ../libaquery.pch - false - - - Console - true - true - false - $(ProjectDir)\..\dll.so - $(ProjectDir)\..\libaquery.lib;$(ProjectDir)\..\monetdb\msvc\monetdbe.lib;$(CoreLibraryDependencies);%(AdditionalDependencies) - false - - - - - - - - + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 16.0 + Win32Proj + {8081fdaa-4d13-4b7a-adb2-8224af7f1c81} + Project1 + 10.0 + msc-plugin + + + + DynamicLibrary + true + v143 + Unicode + false + + + DynamicLibrary + false + v143 + true + Unicode + false + + + DynamicLibrary + true + v143 + Unicode + false + false + + + DynamicLibrary + false + v143 + true + Unicode + false + + + + + + + + + + + + + + + + + + + + + + dll.so + + + + dll.so + + + + dll.so + + + + dll.so + + + + Level3 + true + _CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + stdcpp17 + Guard + false + $(ProjectDir)\..\monetdb\msvc + /WL %(AdditionalOptions) + stdc17 + Create + ./server/pch.hpp + ../libaquery.pch + true + + + Console + DebugFull + $(ProjectDir)\..\dll.so + true + $(ProjectDir)\..\libaquery.lib;$(ProjectDir)\..\monetdb\msvc\monetdbe.lib;$(CoreLibraryDependencies);%(AdditionalDependencies) + + + + + Level3 + true + true + false + _CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + stdcpp17 + $(ProjectDir)\..\monetdb\msvc + stdc17 + Create + ./server/pch.hpp + None + Full + AnySuitable + Speed + true + true + AdvancedVectorExtensions2 + true + true + false + false + false + true + false + ../libaquery.pch + false + + + Console + true + true + false + $(ProjectDir)\..\dll.so + $(ProjectDir)\..\libaquery.lib;$(ProjectDir)\..\monetdb\msvc\monetdbe.lib;$(CoreLibraryDependencies);%(AdditionalDependencies) + false + + + + + Level3 + true + _ALLOW_RTCc_IN_STL;_CRT_SECURE_NO_WARNINGS;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + stdcpp17 + Guard + false + ProgramDatabase + Disabled + false + true + false + $(ProjectDir)\..\monetdb\msvc + /WL %(AdditionalOptions) + stdc17 + Create + ./server/pch.hpp + ../libaquery.pch + true + + + Console + DebugFull + $(ProjectDir)\..\dll.so + true + $(ProjectDir)\..\libaquery.lib;$(ProjectDir)\..\monetdb\msvc\monetdbe.lib;$(CoreLibraryDependencies);%(AdditionalDependencies) + + + + + Level3 + true + true + false + _CRT_SECURE_NO_WARNINGS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + stdcpp17 + $(ProjectDir)\..\monetdb\msvc + stdc17 + Create + ./server/pch.hpp + None + Full + AnySuitable + Speed + true + true + AdvancedVectorExtensions2 + true + true + false + false + false + true + false + ../libaquery.pch + false + + + Console + true + true + false + $(ProjectDir)\..\dll.so + $(ProjectDir)\..\libaquery.lib;$(ProjectDir)\..\monetdb\msvc\monetdbe.lib;$(CoreLibraryDependencies);%(AdditionalDependencies) + false + + + + + + + + \ No newline at end of file diff --git a/msc-plugin/sdk_example.vcxproj b/msc-plugin/sdk_example.vcxproj index 09ddc80..861801e 100644 --- a/msc-plugin/sdk_example.vcxproj +++ b/msc-plugin/sdk_example.vcxproj @@ -1,167 +1,167 @@ - - - - - Debug - Win32 - - - Release - Win32 - - - Debug - x64 - - - Release - x64 - - - - 16.0 - Win32Proj - {f954797b-c148-4cbf-9fb4-a9a450efec38} - Project1 - 10.0 - - - - DynamicLibrary - true - v143 - Unicode - - - DynamicLibrary - false - v143 - true - Unicode - - - DynamicLibrary - true - v143 - Unicode - - - DynamicLibrary - false - v143 - true - Unicode - - - - - - - - - - - - - - - - - - - - - $(SolutionDir)..\ - test - .so - - - $(SolutionDir)..\ - test - .so - - - $(SolutionDir)..\ - test - .so - - - $(SolutionDir)..\ - test - .so - - - - Level3 - true - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - stdcpplatest - stdc17 - - - Console - true - - - - - Level3 - true - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - stdcpplatest - stdc17 - - - Console - true - true - true - - - - - Level3 - true - _DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - stdcpplatest - stdc17 - - - Console - true - - - - - Level3 - true - true - true - NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - stdcpplatest - stdc17 - - - Console - true - true - true - - - - - - - - - - - - + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 16.0 + Win32Proj + {f954797b-c148-4cbf-9fb4-a9a450efec38} + Project1 + 10.0 + + + + DynamicLibrary + true + v143 + Unicode + + + DynamicLibrary + false + v143 + true + Unicode + + + DynamicLibrary + true + v143 + Unicode + + + DynamicLibrary + false + v143 + true + Unicode + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)..\ + test + .so + + + $(SolutionDir)..\ + test + .so + + + $(SolutionDir)..\ + test + .so + + + $(SolutionDir)..\ + test + .so + + + + Level3 + true + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + stdcpplatest + stdc17 + + + Console + true + + + + + Level3 + true + true + true + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + stdcpplatest + stdc17 + + + Console + true + true + true + + + + + Level3 + true + _DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + stdcpplatest + stdc17 + + + Console + true + + + + + Level3 + true + true + true + NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + stdcpplatest + stdc17 + + + Console + true + true + true + + + + + + + + + + + + \ No newline at end of file diff --git a/msc-plugin/server.sln b/msc-plugin/server.sln index 34f000e..cde1590 100644 --- a/msc-plugin/server.sln +++ b/msc-plugin/server.sln @@ -1,126 +1,126 @@ - -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio Version 17 -VisualStudioVersion = 17.4.32804.182 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "server", "server.vcxproj", "{031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}" - ProjectSection(ProjectDependencies) = postProject - {B52AACF7-16A6-4FCA-90AD-867D367BDA4F} = {B52AACF7-16A6-4FCA-90AD-867D367BDA4F} - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "msc-plugin", "msc-plugin.vcxproj", "{8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}" -EndProject -Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "msvs-py", "..\msvs-py\msvs-py.pyproj", "{CCC243F5-663E-45B7-A6DE-B2468C58B3A7}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libaquery", "libaquery.vcxproj", "{B52AACF7-16A6-4FCA-90AD-867D367BDA4F}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "launcher", "launcher.vcxproj", "{C8E25628-0B46-4CBE-90DF-5228F79A5A64}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sdk_example", "sdk_example.vcxproj", "{F954797B-C148-4CBF-9FB4-A9A450EFEC38}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|x64 = Debug|x64 - Debug|x86 = Debug|x86 - Release|x64 = Release|x64 - Release|x86 = Release|x86 - shared|x64 = shared|x64 - shared|x86 = shared|x86 - sharedlib|x64 = sharedlib|x64 - sharedlib|x86 = sharedlib|x86 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.Debug|x64.ActiveCfg = Debug|x64 - {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.Debug|x64.Build.0 = Debug|x64 - {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.Debug|x86.ActiveCfg = Debug|Win32 - {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.Debug|x86.Build.0 = Debug|Win32 - {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.Release|x64.ActiveCfg = Release|x64 - {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.Release|x64.Build.0 = Release|x64 - {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.Release|x86.ActiveCfg = Release|Win32 - {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.Release|x86.Build.0 = Release|Win32 - {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.shared|x64.ActiveCfg = sharedlib|x64 - {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.shared|x86.ActiveCfg = sharedlib|Win32 - {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.sharedlib|x64.ActiveCfg = sharedlib|x64 - {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.sharedlib|x64.Build.0 = sharedlib|x64 - {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.sharedlib|x86.ActiveCfg = sharedlib|Win32 - {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.sharedlib|x86.Build.0 = sharedlib|Win32 - {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.Debug|x64.ActiveCfg = Debug|x64 - {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.Debug|x64.Build.0 = Debug|x64 - {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.Debug|x86.ActiveCfg = Debug|Win32 - {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.Debug|x86.Build.0 = Debug|Win32 - {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.Release|x64.ActiveCfg = Release|x64 - {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.Release|x64.Build.0 = Release|x64 - {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.Release|x86.ActiveCfg = Release|Win32 - {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.Release|x86.Build.0 = Release|Win32 - {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.shared|x64.ActiveCfg = Release|x64 - {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.shared|x64.Build.0 = Release|x64 - {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.shared|x86.ActiveCfg = Release|Win32 - {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.shared|x86.Build.0 = Release|Win32 - {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.sharedlib|x64.ActiveCfg = Release|x64 - {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.sharedlib|x64.Build.0 = Release|x64 - {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.sharedlib|x86.ActiveCfg = Release|Win32 - {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.sharedlib|x86.Build.0 = Release|Win32 - {CCC243F5-663E-45B7-A6DE-B2468C58B3A7}.Debug|x64.ActiveCfg = Debug|Any CPU - {CCC243F5-663E-45B7-A6DE-B2468C58B3A7}.Debug|x86.ActiveCfg = Debug|Any CPU - {CCC243F5-663E-45B7-A6DE-B2468C58B3A7}.Release|x64.ActiveCfg = Release|Any CPU - {CCC243F5-663E-45B7-A6DE-B2468C58B3A7}.Release|x86.ActiveCfg = Release|Any CPU - {CCC243F5-663E-45B7-A6DE-B2468C58B3A7}.shared|x64.ActiveCfg = Release|Any CPU - {CCC243F5-663E-45B7-A6DE-B2468C58B3A7}.shared|x86.ActiveCfg = Release|Any CPU - {CCC243F5-663E-45B7-A6DE-B2468C58B3A7}.sharedlib|x64.ActiveCfg = Release|Any CPU - {CCC243F5-663E-45B7-A6DE-B2468C58B3A7}.sharedlib|x86.ActiveCfg = Release|Any CPU - {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.Debug|x64.ActiveCfg = Debug|x64 - {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.Debug|x64.Build.0 = Debug|x64 - {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.Debug|x86.ActiveCfg = Debug|Win32 - {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.Debug|x86.Build.0 = Debug|Win32 - {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.Release|x64.ActiveCfg = Release|x64 - {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.Release|x64.Build.0 = Release|x64 - {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.Release|x86.ActiveCfg = Release|Win32 - {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.Release|x86.Build.0 = Release|Win32 - {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.shared|x64.ActiveCfg = Debug|x64 - {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.shared|x64.Build.0 = Debug|x64 - {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.shared|x86.ActiveCfg = Debug|Win32 - {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.shared|x86.Build.0 = Debug|Win32 - {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.sharedlib|x64.ActiveCfg = Debug|x64 - {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.sharedlib|x64.Build.0 = Debug|x64 - {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.sharedlib|x86.ActiveCfg = Debug|Win32 - {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.sharedlib|x86.Build.0 = Debug|Win32 - {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.Debug|x64.ActiveCfg = Debug|x64 - {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.Debug|x64.Build.0 = Debug|x64 - {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.Debug|x86.ActiveCfg = Debug|Win32 - {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.Debug|x86.Build.0 = Debug|Win32 - {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.Release|x64.ActiveCfg = Release|x64 - {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.Release|x64.Build.0 = Release|x64 - {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.Release|x86.ActiveCfg = Release|Win32 - {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.Release|x86.Build.0 = Release|Win32 - {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.shared|x64.ActiveCfg = sharedlib|x64 - {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.shared|x64.Build.0 = sharedlib|x64 - {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.shared|x86.ActiveCfg = sharedlib|Win32 - {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.shared|x86.Build.0 = sharedlib|Win32 - {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.sharedlib|x64.ActiveCfg = sharedlib|x64 - {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.sharedlib|x64.Build.0 = sharedlib|x64 - {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.sharedlib|x86.ActiveCfg = sharedlib|Win32 - {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.sharedlib|x86.Build.0 = sharedlib|Win32 - {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.Debug|x64.ActiveCfg = Debug|x64 - {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.Debug|x64.Build.0 = Debug|x64 - {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.Debug|x86.ActiveCfg = Debug|Win32 - {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.Debug|x86.Build.0 = Debug|Win32 - {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.Release|x64.ActiveCfg = Release|x64 - {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.Release|x64.Build.0 = Release|x64 - {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.Release|x86.ActiveCfg = Release|Win32 - {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.Release|x86.Build.0 = Release|Win32 - {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.shared|x64.ActiveCfg = Debug|x64 - {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.shared|x64.Build.0 = Debug|x64 - {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.shared|x86.ActiveCfg = Debug|Win32 - {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.shared|x86.Build.0 = Debug|Win32 - {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.sharedlib|x64.ActiveCfg = Debug|x64 - {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.sharedlib|x64.Build.0 = Debug|x64 - {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.sharedlib|x86.ActiveCfg = Debug|Win32 - {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.sharedlib|x86.Build.0 = Debug|Win32 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection - GlobalSection(ExtensibilityGlobals) = postSolution - SolutionGuid = {572EA821-8162-4161-9AC2-464C79F08B47} - EndGlobalSection -EndGlobal + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.4.32804.182 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "server", "server.vcxproj", "{031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}" + ProjectSection(ProjectDependencies) = postProject + {B52AACF7-16A6-4FCA-90AD-867D367BDA4F} = {B52AACF7-16A6-4FCA-90AD-867D367BDA4F} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "msc-plugin", "msc-plugin.vcxproj", "{8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}" +EndProject +Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "msvs-py", "..\msvs-py\msvs-py.pyproj", "{CCC243F5-663E-45B7-A6DE-B2468C58B3A7}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libaquery", "libaquery.vcxproj", "{B52AACF7-16A6-4FCA-90AD-867D367BDA4F}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "launcher", "launcher.vcxproj", "{C8E25628-0B46-4CBE-90DF-5228F79A5A64}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sdk_example", "sdk_example.vcxproj", "{F954797B-C148-4CBF-9FB4-A9A450EFEC38}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|x64 = Release|x64 + Release|x86 = Release|x86 + shared|x64 = shared|x64 + shared|x86 = shared|x86 + sharedlib|x64 = sharedlib|x64 + sharedlib|x86 = sharedlib|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.Debug|x64.ActiveCfg = Debug|x64 + {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.Debug|x64.Build.0 = Debug|x64 + {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.Debug|x86.ActiveCfg = Debug|Win32 + {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.Debug|x86.Build.0 = Debug|Win32 + {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.Release|x64.ActiveCfg = Release|x64 + {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.Release|x64.Build.0 = Release|x64 + {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.Release|x86.ActiveCfg = Release|Win32 + {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.Release|x86.Build.0 = Release|Win32 + {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.shared|x64.ActiveCfg = sharedlib|x64 + {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.shared|x86.ActiveCfg = sharedlib|Win32 + {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.sharedlib|x64.ActiveCfg = sharedlib|x64 + {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.sharedlib|x64.Build.0 = sharedlib|x64 + {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.sharedlib|x86.ActiveCfg = sharedlib|Win32 + {031352C2-AFBB-45AA-9518-DBC1F9EF2AF3}.sharedlib|x86.Build.0 = sharedlib|Win32 + {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.Debug|x64.ActiveCfg = Debug|x64 + {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.Debug|x64.Build.0 = Debug|x64 + {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.Debug|x86.ActiveCfg = Debug|Win32 + {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.Debug|x86.Build.0 = Debug|Win32 + {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.Release|x64.ActiveCfg = Release|x64 + {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.Release|x64.Build.0 = Release|x64 + {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.Release|x86.ActiveCfg = Release|Win32 + {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.Release|x86.Build.0 = Release|Win32 + {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.shared|x64.ActiveCfg = Release|x64 + {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.shared|x64.Build.0 = Release|x64 + {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.shared|x86.ActiveCfg = Release|Win32 + {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.shared|x86.Build.0 = Release|Win32 + {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.sharedlib|x64.ActiveCfg = Release|x64 + {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.sharedlib|x64.Build.0 = Release|x64 + {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.sharedlib|x86.ActiveCfg = Release|Win32 + {8081FDAA-4D13-4B7A-ADB2-8224AF7F1C81}.sharedlib|x86.Build.0 = Release|Win32 + {CCC243F5-663E-45B7-A6DE-B2468C58B3A7}.Debug|x64.ActiveCfg = Debug|Any CPU + {CCC243F5-663E-45B7-A6DE-B2468C58B3A7}.Debug|x86.ActiveCfg = Debug|Any CPU + {CCC243F5-663E-45B7-A6DE-B2468C58B3A7}.Release|x64.ActiveCfg = Release|Any CPU + {CCC243F5-663E-45B7-A6DE-B2468C58B3A7}.Release|x86.ActiveCfg = Release|Any CPU + {CCC243F5-663E-45B7-A6DE-B2468C58B3A7}.shared|x64.ActiveCfg = Release|Any CPU + {CCC243F5-663E-45B7-A6DE-B2468C58B3A7}.shared|x86.ActiveCfg = Release|Any CPU + {CCC243F5-663E-45B7-A6DE-B2468C58B3A7}.sharedlib|x64.ActiveCfg = Release|Any CPU + {CCC243F5-663E-45B7-A6DE-B2468C58B3A7}.sharedlib|x86.ActiveCfg = Release|Any CPU + {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.Debug|x64.ActiveCfg = Debug|x64 + {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.Debug|x64.Build.0 = Debug|x64 + {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.Debug|x86.ActiveCfg = Debug|Win32 + {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.Debug|x86.Build.0 = Debug|Win32 + {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.Release|x64.ActiveCfg = Release|x64 + {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.Release|x64.Build.0 = Release|x64 + {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.Release|x86.ActiveCfg = Release|Win32 + {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.Release|x86.Build.0 = Release|Win32 + {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.shared|x64.ActiveCfg = Debug|x64 + {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.shared|x64.Build.0 = Debug|x64 + {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.shared|x86.ActiveCfg = Debug|Win32 + {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.shared|x86.Build.0 = Debug|Win32 + {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.sharedlib|x64.ActiveCfg = Debug|x64 + {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.sharedlib|x64.Build.0 = Debug|x64 + {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.sharedlib|x86.ActiveCfg = Debug|Win32 + {B52AACF7-16A6-4FCA-90AD-867D367BDA4F}.sharedlib|x86.Build.0 = Debug|Win32 + {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.Debug|x64.ActiveCfg = Debug|x64 + {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.Debug|x64.Build.0 = Debug|x64 + {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.Debug|x86.ActiveCfg = Debug|Win32 + {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.Debug|x86.Build.0 = Debug|Win32 + {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.Release|x64.ActiveCfg = Release|x64 + {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.Release|x64.Build.0 = Release|x64 + {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.Release|x86.ActiveCfg = Release|Win32 + {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.Release|x86.Build.0 = Release|Win32 + {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.shared|x64.ActiveCfg = sharedlib|x64 + {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.shared|x64.Build.0 = sharedlib|x64 + {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.shared|x86.ActiveCfg = sharedlib|Win32 + {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.shared|x86.Build.0 = sharedlib|Win32 + {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.sharedlib|x64.ActiveCfg = sharedlib|x64 + {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.sharedlib|x64.Build.0 = sharedlib|x64 + {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.sharedlib|x86.ActiveCfg = sharedlib|Win32 + {C8E25628-0B46-4CBE-90DF-5228F79A5A64}.sharedlib|x86.Build.0 = sharedlib|Win32 + {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.Debug|x64.ActiveCfg = Debug|x64 + {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.Debug|x64.Build.0 = Debug|x64 + {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.Debug|x86.ActiveCfg = Debug|Win32 + {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.Debug|x86.Build.0 = Debug|Win32 + {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.Release|x64.ActiveCfg = Release|x64 + {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.Release|x64.Build.0 = Release|x64 + {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.Release|x86.ActiveCfg = Release|Win32 + {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.Release|x86.Build.0 = Release|Win32 + {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.shared|x64.ActiveCfg = Debug|x64 + {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.shared|x64.Build.0 = Debug|x64 + {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.shared|x86.ActiveCfg = Debug|Win32 + {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.shared|x86.Build.0 = Debug|Win32 + {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.sharedlib|x64.ActiveCfg = Debug|x64 + {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.sharedlib|x64.Build.0 = Debug|x64 + {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.sharedlib|x86.ActiveCfg = Debug|Win32 + {F954797B-C148-4CBF-9FB4-A9A450EFEC38}.sharedlib|x86.Build.0 = Debug|Win32 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {572EA821-8162-4161-9AC2-464C79F08B47} + EndGlobalSection +EndGlobal diff --git a/msc-plugin/server.vcxproj b/msc-plugin/server.vcxproj index cf517b3..5cff020 100644 --- a/msc-plugin/server.vcxproj +++ b/msc-plugin/server.vcxproj @@ -1,281 +1,281 @@ - - - - - Debug - Win32 - - - Release - Win32 - - - Debug - x64 - - - Release - x64 - - - sharedlib - Win32 - - - sharedlib - x64 - - - - - - - 16.0 - Win32Proj - {031352c2-afbb-45aa-9518-dbc1f9ef2af3} - server - 10.0 - - - - DynamicLibrary - true - v143 - Unicode - false - - - DynamicLibrary - false - v143 - true - Unicode - false - - - DynamicLibrary - false - v143 - true - Unicode - true - - - DynamicLibrary - true - v143 - Unicode - false - - - DynamicLibrary - false - v143 - true - Unicode - false - - - DynamicLibrary - false - v143 - true - Unicode - false - - - - - - - - - - - - - - - - - - - - - - - - - - - true - .so - $(SolutionDir)..\ - - - false - .so - $(SolutionDir)..\ - - - false - .so - $(SolutionDir)..\ - - - true - .so - $(SolutionDir)..\ - - - false - .so - $(SolutionDir)..\ - - - false - .so - $(SolutionDir)..\ - - - - Level3 - true - _CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - true - true - stdcpp17 - stdc17 - $(ProjectDir)\..\monetdb\msvc - - - Console - true - ..\libaquery.lib;$(ProjectDir)\..\monetdb\msvc\monetdbe.lib;%(AdditionalDependencies) - /WHOLEARCHIVE:libaquery.lib %(AdditionalOptions) - - - - - Level3 - true - true - true - _CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - true - true - stdcpp17 - stdc17 - $(ProjectDir)\..\monetdb\msvc - None - - - Console - true - true - false - ..\libaquery.lib;$(ProjectDir)\..\monetdb\msvc\monetdbe.lib;%(AdditionalDependencies) - false - /WHOLEARCHIVE:libaquery.lib %(AdditionalOptions) - - - - - Level3 - true - true - true - _CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - true - true - stdcpp17 - stdc17 - $(ProjectDir)\..\monetdb\msvc - - - Console - true - true - true - $(ProjectDir)\..\monetdb\msvc\monetdbe.lib;%(AdditionalDependencies) - /WHOLEARCHIVE:libaquery.lib %(AdditionalOptions) - - - copy $(OutDir)$(TargetName)$(TargetExt) $(ProjectDir)\..\server.so /y - - - - - Level3 - true - _CRT_SECURE_NO_WARNINGS;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - true - true - stdcpp17 - stdc17 - $(ProjectDir)\..\monetdb\msvc - - - Console - true - ..\libaquery.lib;$(ProjectDir)\..\monetdb\msvc\monetdbe.lib;%(AdditionalDependencies) - /WHOLEARCHIVE:libaquery.lib %(AdditionalOptions) - - - - - Level3 - true - true - true - _CRT_SECURE_NO_WARNINGS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - true - true - stdcpp17 - stdc17 - $(ProjectDir)\..\monetdb\msvc - None - - - Console - true - true - false - ..\libaquery.lib;$(ProjectDir)\..\monetdb\msvc\monetdbe.lib;%(AdditionalDependencies) - false - /WHOLEARCHIVE:libaquery.lib %(AdditionalOptions) - - - - - Level3 - true - true - true - _CRT_SECURE_NO_WARNINGS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - true - true - stdcpp17 - stdc17 - $(ProjectDir)\..\monetdb\msvc - - - Console - true - true - true - $(ProjectDir)\..\monetdb\msvc\monetdbe.lib;%(AdditionalDependencies) - /WHOLEARCHIVE:libaquery.lib %(AdditionalOptions) - - - copy "$(OutDir)$(TargetName)$(TargetExt)" "$(ProjectDir)\..\server.so" /y - - - + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + sharedlib + Win32 + + + sharedlib + x64 + + + + + + + 16.0 + Win32Proj + {031352c2-afbb-45aa-9518-dbc1f9ef2af3} + server + 10.0 + + + + DynamicLibrary + true + v143 + Unicode + false + + + DynamicLibrary + false + v143 + true + Unicode + false + + + DynamicLibrary + false + v143 + true + Unicode + true + + + DynamicLibrary + true + v143 + Unicode + false + + + DynamicLibrary + false + v143 + true + Unicode + false + + + DynamicLibrary + false + v143 + true + Unicode + false + + + + + + + + + + + + + + + + + + + + + + + + + + + true + .so + $(SolutionDir)..\ + + + false + .so + $(SolutionDir)..\ + + + false + .so + $(SolutionDir)..\ + + + true + .so + $(SolutionDir)..\ + + + false + .so + $(SolutionDir)..\ + + + false + .so + $(SolutionDir)..\ + + + + Level3 + true + _CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + true + true + stdcpp17 + stdc17 + $(ProjectDir)\..\monetdb\msvc + + + Console + true + ..\libaquery.lib;$(ProjectDir)\..\monetdb\msvc\monetdbe.lib;%(AdditionalDependencies) + /WHOLEARCHIVE:libaquery.lib %(AdditionalOptions) + + + + + Level3 + true + true + true + _CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + true + true + stdcpp17 + stdc17 + $(ProjectDir)\..\monetdb\msvc + None + + + Console + true + true + false + ..\libaquery.lib;$(ProjectDir)\..\monetdb\msvc\monetdbe.lib;%(AdditionalDependencies) + false + /WHOLEARCHIVE:libaquery.lib %(AdditionalOptions) + + + + + Level3 + true + true + true + _CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + true + true + stdcpp17 + stdc17 + $(ProjectDir)\..\monetdb\msvc + + + Console + true + true + true + $(ProjectDir)\..\monetdb\msvc\monetdbe.lib;%(AdditionalDependencies) + /WHOLEARCHIVE:libaquery.lib %(AdditionalOptions) + + + copy $(OutDir)$(TargetName)$(TargetExt) $(ProjectDir)\..\server.so /y + + + + + Level3 + true + _CRT_SECURE_NO_WARNINGS;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + true + true + stdcpp17 + stdc17 + $(ProjectDir)\..\monetdb\msvc + + + Console + true + ..\libaquery.lib;$(ProjectDir)\..\monetdb\msvc\monetdbe.lib;%(AdditionalDependencies) + /WHOLEARCHIVE:libaquery.lib %(AdditionalOptions) + + + + + Level3 + true + true + true + _CRT_SECURE_NO_WARNINGS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + true + true + stdcpp17 + stdc17 + $(ProjectDir)\..\monetdb\msvc + None + + + Console + true + true + false + ..\libaquery.lib;$(ProjectDir)\..\monetdb\msvc\monetdbe.lib;%(AdditionalDependencies) + false + /WHOLEARCHIVE:libaquery.lib %(AdditionalOptions) + + + + + Level3 + true + true + true + _CRT_SECURE_NO_WARNINGS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + true + true + stdcpp17 + stdc17 + $(ProjectDir)\..\monetdb\msvc + + + Console + true + true + true + $(ProjectDir)\..\monetdb\msvc\monetdbe.lib;%(AdditionalDependencies) + /WHOLEARCHIVE:libaquery.lib %(AdditionalOptions) + + + copy "$(OutDir)$(TargetName)$(TargetExt)" "$(ProjectDir)\..\server.so" /y + + + \ No newline at end of file diff --git a/msvs-py/msvs-py.pyproj b/msvs-py/msvs-py.pyproj index 58b0511..b127807 100644 --- a/msvs-py/msvs-py.pyproj +++ b/msvs-py/msvs-py.pyproj @@ -1,63 +1,63 @@ - - - Debug - 2.0 - ccc243f5-663e-45b7-a6de-b2468c58b3a7 - . - ..\prompt.py - ..\msvs-py - .. - . - msvs-py - msvs-py - False - - - true - false - - - true - false - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + Debug + 2.0 + ccc243f5-663e-45b7-a6de-b2468c58b3a7 + . + ..\prompt.py + ..\msvs-py + .. + . + msvs-py + msvs-py + False + + + true + false + + + true + false + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/reconstruct/TODO.md b/reconstruct/TODO.md index f0c16b4..7714208 100644 --- a/reconstruct/TODO.md +++ b/reconstruct/TODO.md @@ -1,11 +1,11 @@ -# TODO: - -## 1. double scans in projections - - first for special aggrigations and singular columns - - Then in group by node decide if we have special group by aggregations - - If sp_gb_agg exists, the entire groupby aggregation is done in C plugin - - If not, group by is done in SQL - -## 2. ColRef supports multiple objects - - A.a = B.b then in projection A.a B.b will refer to same projection +# TODO: + +## 1. double scans in projections + - first for special aggrigations and singular columns + - Then in group by node decide if we have special group by aggregations + - If sp_gb_agg exists, the entire groupby aggregation is done in C plugin + - If not, group by is done in SQL + +## 2. ColRef supports multiple objects + - A.a = B.b then in projection A.a B.b will refer to same projection - Colref::ProjEq(ColRef v) => this == v or v in this.proj_eqs \ No newline at end of file diff --git a/reconstruct/__init__.py b/reconstruct/__init__.py index c27a9da..b8417c9 100644 --- a/reconstruct/__init__.py +++ b/reconstruct/__init__.py @@ -1,32 +1,32 @@ -from reconstruct.ast import Context, ast_node -saved_cxt = None - -def initialize(cxt = None, keep = False): - global saved_cxt - if cxt is None or not keep or type(cxt) is not Context: - if saved_cxt is None or not keep: - cxt = Context() - saved_cxt = cxt - else: - cxt = saved_cxt - cxt.new() - return cxt - -def generate(ast, cxt): - for k in ast.keys(): - if k in ast_node.types.keys(): - ast_node.types[k](None, ast, cxt) - -def exec(stmts, cxt = None, keep = False): - cxt = initialize(cxt, keep) - stmts_stmts = stmts['stmts'] - if type(stmts_stmts) is list: - for s in stmts_stmts: - generate(s, cxt) - else: - generate(stmts_stmts, cxt) - for q in cxt.queries: - cxt.print(q.strip()) - return cxt - -__all__ = ["initialize", "generate", "exec", "saved_cxt"] +from reconstruct.ast import Context, ast_node +saved_cxt = None + +def initialize(cxt = None, keep = False): + global saved_cxt + if cxt is None or not keep or type(cxt) is not Context: + if saved_cxt is None or not keep: + cxt = Context() + saved_cxt = cxt + else: + cxt = saved_cxt + cxt.new() + return cxt + +def generate(ast, cxt): + for k in ast.keys(): + if k in ast_node.types.keys(): + ast_node.types[k](None, ast, cxt) + +def exec(stmts, cxt = None, keep = False): + cxt = initialize(cxt, keep) + stmts_stmts = stmts['stmts'] + if type(stmts_stmts) is list: + for s in stmts_stmts: + generate(s, cxt) + else: + generate(stmts_stmts, cxt) + for q in cxt.queries: + cxt.print(q.strip()) + return cxt + +__all__ = ["initialize", "generate", "exec", "saved_cxt"] diff --git a/reconstruct/ast.py b/reconstruct/ast.py index 9442ce8..80218bf 100644 --- a/reconstruct/ast.py +++ b/reconstruct/ast.py @@ -1,1048 +1,1048 @@ -from copy import deepcopy -from dataclasses import dataclass -from enum import Enum, auto -from typing import Set, Tuple, Dict, Union, List, Optional -from engine.types import * -from engine.utils import enlist, base62uuid, base62alp, get_legal_name -from reconstruct.storage import Context, TableInfo, ColRef - -class ast_node: - header = [] - types = dict() - first_order = False - - def __init__(self, parent:Optional["ast_node"], node, context:Optional[Context] = None): - self.context = parent.context if context is None else context - self.parent = parent - self.sql = '' - self.ccode = '' - if hasattr(parent, 'datasource'): - self.datasource = parent.datasource - else: - self.datasource = None - self.init(node) - self.produce(node) - self.spawn(node) - self.consume(node) - - def emit(self, code): - self.context.emit(code) - def add(self, code): - self.sql += code + ' ' - def addc(self, code): - self.ccode += code + '\n' - - name = 'null' - - def init(self, _): - if self.parent is None: - self.context.sql_begin() - self.add(self.__class__.name.upper()) - - def produce(self, _): - pass - def spawn(self, _): - pass - - def consume(self, _): - if self.parent is None: - self.emit(self.sql+';\n') - self.context.sql_end() - -from reconstruct.expr import expr, fastscan - - -class projection(ast_node): - name = 'projection' - first_order = 'select' - - def init(self, _): - # skip default init - pass - - def produce(self, node): - p = node['select'] - self.projections = p if type(p) is list else [p] - self.add('SELECT') - if self.parent is None: - self.context.sql_begin() - self.postproc_fname = 'dll_' + base62uuid(6) - self.context.postproc_begin(self.postproc_fname) - - def spawn(self, node): - self.datasource = join(self, [], self.context) # datasource is Join instead of TableInfo - self.assumptions = [] - if 'from' in node: - from_clause = node['from'] - self.datasource = join(self, from_clause) - if 'assumptions' in from_clause: - self.assumptions = enlist(from_clause['assumptions']) - - if self.datasource is not None: - self.datasource_changed = True - self.prev_datasource = self.context.datasource - self.context.datasource = self.datasource - - if 'where' in node: - self.where = filter(self, node['where']) - else: - self.where = None - - - def consume(self, node): - # deal with projections - out_table_varname = 'out_'+base62uuid(6) - if 'into' in node: - out_table_name = node['into'] - else: - out_table_name = out_table_varname - - self.out_table : TableInfo = TableInfo(out_table_name, [], self.context) - self.out_table.contextname_cpp = out_table_varname - - cols = [] - self.col_ext : Set[ColRef]= set() - col_exprs : List[Tuple[str, Types]] = [] - - proj_map : Dict[int, List[Union[Types, int, str, expr]]]= dict() - self.var_table = dict() - # self.sp_refs = set() - for i, proj in enumerate(self.projections): - compound = False - self.datasource.rec = set() - name = '' - this_type = AnyT - if type(proj) is dict: - if 'value' in proj: - e = proj['value'] - proj_expr = expr(self, e) - this_type = proj_expr.type - name = proj_expr.sql - compound = True # compound column - proj_expr.cols_mentioned = self.datasource.rec - alias = '' - if 'name' in proj: # renaming column by AS keyword - alias = proj['name'] - - if not proj_expr.is_special: - y = lambda x:x - name = eval('f\'' + name + '\'') - if name not in self.var_table: - self.var_table[name] = len(col_exprs) - proj_map[i] = [this_type, len(col_exprs), proj_expr] - col_expr = name + ' AS ' + alias if alias else name - if alias: - self.var_table[alias] = len(col_exprs) - col_exprs.append((col_expr, proj_expr.type)) - else: - self.context.headers.add('"./server/aggregations.h"') - if self.datasource.rec is not None: - self.col_ext = self.col_ext.union(self.datasource.rec) - proj_map[i] = [this_type, proj_expr.sql, proj_expr] - - disp_name = get_legal_name(alias if alias else name) - - elif type(proj) is str: - col = self.datasource.get_col(proj) - this_type = col.type - # name = col.name - self.datasource.rec = None - # TODO: Type deduction in Python - cols.append(ColRef(this_type, self.out_table, None, disp_name, i, compound=compound)) - - self.out_table.add_cols(cols, new = False) - - if 'groupby' in node: - self.group_node = groupby(self, node['groupby']) - else: - self.group_node = None - - self.col_ext = [c for c in self.col_ext if c.name not in self.var_table] # remove duplicates in self.var_table - col_ext_names = [c.name for c in self.col_ext] - self.add(', '.join([c[0] for c in col_exprs] + col_ext_names)) - - _base_offset = len(col_exprs) - for i, col in enumerate(col_ext_names): - if col not in self.var_table: - self.var_table[col] = i + _base_offset - - - def finialize(astnode:ast_node): - if(astnode is not None): - self.add(astnode.sql) - finialize(self.datasource) - finialize(self.where) - if self.group_node and not self.group_node.use_sp_gb: - self.add(self.group_node.sql) - - if self.col_ext or self.group_node and self.group_node.use_sp_gb: - self.use_postproc = True - - o = self.assumptions - if 'orderby' in node: - o.extend(enlist(node['orderby'])) - if o: - self.add(orderby(self, o).sql) - - if 'outfile' in node: - self.outfile = outfile(self, node['outfile'], sql = self.sql) - if not self.use_postproc: - self.sql += self.outfile.sql - else: - self.outfile = None - - if self.parent is None: - self.emit(self.sql+';\n') - else: - # TODO: subquery, name create tmp-table from subquery w/ alias as name - pass - - - # cpp module codegen - self.context.has_dll = True - # extract typed-columns from result-set - vid2cname = [0]*len(self.var_table) - self.pyname2cname = dict() - typenames = [c[1] for c in col_exprs] + [c.type for c in self.col_ext] - length_name = 'len_' + base62uuid(6) - self.context.emitc(f'auto {length_name} = server->cnt;') - - for v, idx in self.var_table.items(): - vname = get_legal_name(v) + '_' + base62uuid(3) - self.pyname2cname[v] = vname - self.context.emitc(f'auto {vname} = ColRef<{typenames[idx].cname}>({length_name}, server->getCol({idx}));') - vid2cname[idx] = vname - # Create table into context - out_typenames = [None] * len(proj_map) - - for key, val in proj_map.items(): - if type(val[1]) is str: - x = True - y = lambda t: self.pyname2cname[t] - val[1] = val[2].eval(x, y, gettype=True) - if callable(val[1]): - val[1] = val[1](True) - decltypestring = val[1] - - if val[0] == LazyT: - decltypestring = f'value_type>' - out_typenames[key] = decltypestring - else: - out_typenames[key] = val[0].cname - if (type(val[2].udf_called) is udf and - val[2].udf_called.return_pattern == udf.ReturnPattern.elemental_return - or - self.group_node and self.group_node.use_sp_gb and - val[2].cols_mentioned.intersection( - self.datasource.all_cols.difference(self.group_node.refs)) - ): - out_typenames[key] = f'ColRef<{out_typenames[key]}>' - - outtable_col_nameslist = ', '.join([f'"{c.name}"' for c in self.out_table.columns]) - self.outtable_col_names = 'names_' + base62uuid(4) - self.context.emitc(f'const char* {self.outtable_col_names}[] = {{{outtable_col_nameslist}}};') - # out_typenames = [v[0].cname for v in proj_map.values()] - self.context.emitc(f'auto {self.out_table.contextname_cpp} = new TableInfo<{",".join(out_typenames)}>("{self.out_table.table_name}", {self.outtable_col_names});') - # TODO: Inject custom group by code here and flag them in proj_map - # Type of UDFs? Complex UDFs, ones with static vars? - if self.group_node is not None and self.group_node.use_sp_gb: - gb_vartable : Dict[str, Union[str, int]] = deepcopy(self.pyname2cname) - gb_cexprs : List[str] = [] - - for key, val in proj_map.items(): - col_name = 'col_' + base62uuid(6) - self.context.emitc(f'decltype(auto) {col_name} = {self.out_table.contextname_cpp}->get_col<{key}>();') - gb_cexprs.append((col_name, val[2])) - self.group_node.finalize(gb_cexprs, gb_vartable) - else: - for i, (key, val) in enumerate(proj_map.items()): - if type(val[1]) is int: - self.context.emitc( - f'{self.out_table.contextname_cpp}->get_col<{key}>().initfrom({vid2cname[val[1]]}, "{cols[i].name}");' - ) - else: - # for funcs evaluate f_i(x, ...) - self.context.emitc(f'{self.out_table.contextname_cpp}->get_col<{key}>() = {val[1]};') - # print out col_is - self.context.emitc(f'print(*{self.out_table.contextname_cpp});') - - if self.outfile: - self.outfile.finalize() - - if 'into' in node: - self.context.emitc(select_into(self, node['into']).ccode) - - self.context.emitc(f'puts("done.");') - - if self.parent is None: - self.context.sql_end() - self.context.postproc_end(self.postproc_fname) - - - -class select_into(ast_node): - def init(self, node): - if type(self.parent) is projection: - if self.context.has_dll: - # has postproc put back to monetdb - self.produce = self.produce_cpp - else: - self.produce = self.produce_sql - else: - raise ValueError('parent must be projection') - def produce_cpp(self, node): - assert(type(self.parent) is projection) - if not hasattr(self.parent, 'out_table'): - raise Exception('No out_table found.') - else: - self.context.headers.add('"./server/table_ext_monetdb.hpp"') - self.ccode = f'{self.parent.out_table.contextname_cpp}->monetdb_append_table(cxt->alt_server, \"{node}\");' - - def produce_sql(self, node): - self.sql = f' INTO {node}' - - -class orderby(ast_node): - name = 'order by' - def produce(self, node): - if node is None: - self.sql = '' - return - - node = enlist(node) - o_list = [] - - for o in node: - o_str = expr(self, o['value']).sql - if 'sort' in o and f'{o["sort"]}'.lower() == 'desc': - o_str += ' ' + 'DESC' - o_list.append(o_str) - self.add(', '.join(o_list)) - - -class scan(ast_node): - class Position(Enum): - init = auto() - front = auto() - body = auto() - back = auto() - fin = auto() - # TODO: use this for positions for scanner - class LoopStyle(Enum): - forloop = auto() - foreach = auto() - - name = 'scan' - def __init__(self, parent: "ast_node", node, loop_style = 'for', context: Context = None, const = False): - self.const = "const " if const else "" - self.loop_style = loop_style - super().__init__(parent, node, context) - - def init(self, _): - self.datasource = self.context.datasource - self.initializers = '' - self.start = '' - self.front = '' - self.body = '' - self.end = '}' - scan_vars = set(s.it_var for s in self.context.scans) - self.it_ver = 'i' + base62uuid(2) - while(self.it_ver in scan_vars): - self.it_ver = 'i' + base62uuid(6) - self.parent.context.scans.append(self) - - def produce(self, node): - if self.loop_style == 'for_each': - self.colref = node - self.start += f'for ({self.const}auto& {self.it_ver} : {node}) {{\n' - else: - self.start += f"for (uint32_t {self.it_ver} = 0; {self.it_ver} < {node}; ++{self.it_ver}){{\n" - - def add(self, stmt, position = "body"): - if position == "body": - self.body += stmt + '\n' - elif position == "init": - self.initializers += stmt + '\n' - else: - self.front += stmt + '\n' - - def finalize(self): - self.context.remove_scan(self, self.initializers + self.start + self.front + self.body + self.end) - -class groupby_c(ast_node): - name = '_groupby' - def init(self, node : List[Tuple[expr, Set[ColRef]]]): - self.proj : projection = self.parent - self.glist : List[Tuple[expr, Set[ColRef]]] = node - return super().init(node) - def produce(self, node : List[Tuple[expr, Set[ColRef]]]): - self.context.headers.add('"./server/hasher.h"') - self.context.headers.add('unordered_map') - self.group = 'g' + base62uuid(7) - self.group_type = 'record_type' + base62uuid(7) - self.datasource = self.proj.datasource - self.scanner = None - self.datasource.rec = set() - - g_contents = '' - g_contents_list = [] - first_col = '' - - for g in self.glist: - e = expr(self, g[0].node, c_code=True) - g_str = e.eval(c_code = True, y = lambda c: self.proj.pyname2cname[c]) - # if v is compound expr, create tmp cols - if e.is_ColExpr: - tmpcol = 't' + base62uuid(7) - self.context.emitc(f'auto {tmpcol} = {g_str};') - e = tmpcol - g_contents_list.append(e) - first_col = g_contents_list[0] - g_contents_decltype = [f'decays' for c in g_contents_list] - g_contents = ','.join(g_contents_list) - self.context.emitc(f'typedef record<{",".join(g_contents_decltype)}> {self.group_type};') - self.context.emitc(f'unordered_map<{self.group_type}, vector_type, ' - f'transTypes<{self.group_type}, hasher>> {self.group};') - self.n_grps = len(self.glist) - self.scanner = scan(self, first_col + '.size') - self.scanner.add(f'{self.group}[forward_as_tuple({g_contents}[{self.scanner.it_ver}])].emplace_back({self.scanner.it_ver});') - - def consume(self, _): - self.scanner.finalize() - - # def deal_with_assumptions(self, assumption:assumption, out:TableInfo): - # gscanner = scan(self, self.group) - # val_var = 'val_'+base62uuid(7) - # gscanner.add(f'auto &{val_var} = {gscanner.it_ver}.second;') - # gscanner.add(f'{self.datasource.cxt_name}->order_by<{assumption.result()}>(&{val_var});') - # gscanner.finalize() - - def finalize(self, cexprs : List[Tuple[str, expr]], var_table : Dict[str, Union[str, int]]): - gscanner = scan(self, self.group, loop_style = 'for_each') - key_var = 'key_'+base62uuid(7) - val_var = 'val_'+base62uuid(7) - - gscanner.add(f'auto &{key_var} = {gscanner.it_ver}.first;', position = 'front') - gscanner.add(f'auto &{val_var} = {gscanner.it_ver}.second;', position = 'front') - len_var = None - def define_len_var(): - nonlocal len_var - if len_var is None: - len_var = 'len_'+base62uuid(7) - gscanner.add(f'auto &{len_var} = {val_var}.size;', position = 'front') - - def get_key_idx (varname : str): - for i, g in enumerate(self.glist): - if varname == g[0].eval(): - return i - return var_table[varname] - - def get_var_names (varname : str): - var = get_key_idx(varname) - if type(var) is str: - return f'{var}[{val_var}]' - else: - return f'get<{var}>({key_var})' - - for ce in cexprs: - ex = ce[1] - materialize_builtin = {} - if type(ex.udf_called) is udf: - if '_builtin_len' in ex.udf_called.builtin_used: - define_len_var() - materialize_builtin['_builtin_len'] = len_var - if '_builtin_ret' in ex.udf_called.builtin_used: - define_len_var() - gscanner.add(f'{ce[0]}.emplace_back({{{len_var}}});\n') - materialize_builtin['_builtin_ret'] = f'{ce[0]}.back()' - gscanner.add(f'{ex.eval(c_code = True, y=get_var_names, materialize_builtin = materialize_builtin)};\n') - continue - gscanner.add(f'{ce[0]}.emplace_back({ex.eval(c_code = True, y=get_var_names, materialize_builtin = materialize_builtin)});\n') - - gscanner.finalize() - - self.datasource.groupinfo = None - - -class groupby(ast_node): - name = 'group by' - def produce(self, node): - if type(self.parent) is not projection: - raise ValueError('groupby can only be used in projection') - - node = enlist(node) - o_list = [] - self.refs = set() - self.dedicated_glist : List[Tuple[expr, Set[ColRef]]] = [] - self.use_sp_gb = False - for g in node: - self.datasource.rec = set() - g_expr = expr(self, g['value']) - refs : Set[ColRef] = self.datasource.rec - self.datasource.rec = None - if self.parent.col_ext: - this_sp_ref = refs.difference(self.parent.col_ext) - self.use_sp_gb = self.use_sp_gb or len(this_sp_ref) > 0 - self.refs.update(refs) - self.dedicated_glist.append((g_expr, refs)) - g_str = g_expr.eval(c_code = False) - if 'sort' in g and f'{g["sort"]}'.lower() == 'desc': - g_str = g_str + ' ' + 'DESC' - o_list.append(g_str) - - if not self.use_sp_gb: - self.dedicated_gb = None - self.add(', '.join(o_list)) - else: - for l in self.dedicated_glist: - # l_exist = l[1].difference(self.parent.col_ext) - # for l in l_exist: - # self.parent.var_table. - self.parent.col_ext.update(l[1]) - - def finalize(self, cexprs : List[Tuple[str, expr]], var_table : Dict[str, Union[str, int]]): - if self.use_sp_gb: - self.dedicated_gb = groupby_c(self.parent, self.dedicated_glist) - self.dedicated_gb.finalize(cexprs, var_table) - -class join(ast_node): - name = 'join' - def init(self, _): - self.joins:list = [] - self.tables : List[TableInfo] = [] - self.tables_dir = dict() - self.rec = None - self.top_level = self.parent and type(self.parent) is projection - # self.tmp_name = 'join_' + base62uuid(4) - # self.datasource = TableInfo(self.tmp_name, [], self.context) - def append(self, tbls, __alias = ''): - alias = lambda t : '(' + t + ') ' + __alias if len(__alias) else t - if type(tbls) is join: - self.joins.append(alias(tbls.__str__())) - self.tables += tbls.tables - self.tables_dir = {**self.tables_dir, **tbls.tables_dir} - - elif type(tbls) is TableInfo: - self.joins.append(alias(tbls.table_name)) - self.tables.append(tbls) - self.tables_dir[tbls.table_name] = tbls - for a in tbls.alias: - self.tables_dir[a] = tbls - - elif type(tbls) is projection: - self.joins.append(alias(tbls.finalize())) - - def produce(self, node): - if type(node) is list: - for d in node: - self.append(join(self, d)) - - elif type(node) is dict: - alias = '' - if 'value' in node: - table_name = node['value'] - tbl = None - if 'name' in node: - alias = node['name'] - if type(table_name) is dict: - if 'select' in table_name: - # TODO: subquery, create and register TableInfo in projection - tbl = projection(self, table_name).finalize() - else: - tbl = self.context.tables_byname[table_name] - if 'name' in node: - tbl.add_alias(node['name']) - self.append(tbl, alias) - else: - keys = node.keys() - if keys[0].lower().endswith('join'): - j = join(self, node[keys[0]]) - tablename = f' {keys[0]} {j}' - if keys[1].lower() == 'on': - tablename += f' on {expr(self, node[keys[1]])}' - self.joins.append(tablename) - self.tables += j.tables - self.tables_dir = {**self.tables_dir, **j.tables_dir} - - elif type(node) is str: - if node in self.context.tables_byname: - self.append(self.context.tables_byname[node]) - else: - print(f'Error: table {node} not found.') - - def get_cols(self, colExpr: str) -> ColRef: - for t in self.tables: - if colExpr in t.columns_byname: - col = t.columns_byname[colExpr] - if type(self.rec) is set: - self.rec.add(col) - return col - - def parse_col_names(self, colExpr:str) -> ColRef: - parsedColExpr = colExpr.split('.') - if len(parsedColExpr) <= 1: - return self.get_cols(colExpr) - else: - datasource = self.tables_dir[parsedColExpr[0]] - if datasource is None: - raise ValueError(f'Table name/alias not defined{parsedColExpr[0]}') - else: - return datasource.parse_col_names(parsedColExpr[1]) - @property - def all_cols(self): - return set([c for t in self.tables for c in t.columns]) - def consume(self, node): - self.sql = ', '.join(self.joins) - if node and self.sql and self.top_level: - self.sql = ' FROM ' + self.sql - return super().consume(node) - - def __str__(self): - return ', '.join(self.joins) - def __repr__(self): - return self.__str__() - - -class filter(ast_node): - name = 'where' - def produce(self, node): - self.add(expr(self, node).sql) - - -class create_table(ast_node): - name = 'create_table' - first_order = name - def init(self, node): - if self.parent is None: - self.context.sql_begin() - self.sql = 'CREATE TABLE ' - - def produce(self, node): - ct = node[self.name] - tbl = self.context.add_table(ct['name'], ct['columns']) - self.sql = f'CREATE TABLE {tbl.table_name}(' - columns = [] - for c in tbl.columns: - columns.append(f'{c.name} {c.type.sqlname}') - self.sql += ', '.join(columns) - self.sql += ')' - if self.context.use_columnstore: - self.sql += ' engine=ColumnStore' - -class insert(ast_node): - name = 'insert' - first_order = name - - def produce(self, node): - values = node['query']['select'] - tbl = node['insert'] - self.sql = f'INSERT INTO {tbl} VALUES(' - # if len(values) != table.n_cols: - # raise ValueError("Column Mismatch") - list_values = [] - for i, s in enumerate(values): - if 'value' in s: - list_values.append(f"{s['value']}") - else: - # subquery, dispatch to select astnode - pass - self.sql += ', '.join(list_values) + ')' - - -class load(ast_node): - name="load" - first_order = name - def init(self, node): - self.module = False - if node['load']['file_type'] == 'module': - self.produce = self.produce_module - self.module = True - elif self.context.dialect == 'MonetDB': - self.produce = self.produce_monetdb - else: - self.produce = self.produce_aq - if self.parent is None: - self.context.sql_begin() - - def produce_module(self, node): - # create command for exec engine -> done - # create c++ stub - # create dummy udf obj for parsing - # def decode_type(ty : str) -> str: - # ret = '' - # back = '' - # while(ty.startswith('vec')): - # ret += 'ColRef<' - # back += '>' - # ty = ty[3:] - # ret += ty - # return ret + back - node = node['load'] - file = node['file']['literal'] - self.context.queries.append(f'M{file}') - self.module_name = file - self.functions = {} - if 'funcs' in node: - for f in enlist(node['funcs']): - fname = f['fname'] - self.context.queries.append(f'F{fname}') - ret_type = VoidT - if 'ret_type' in f: - ret_type = Types.decode(f['ret_type']) - nargs = 0 - arglist = '' - if 'vars' in f: - arglist = [] - for v in enlist(f['vars']): - arglist.append(f'{Types.decode(v["type"]).cname} {v["arg"]}') - nargs = len(arglist) - arglist = ', '.join(arglist) - # create c++ stub - cpp_stub = f'{ret_type.cname} (*{fname})({arglist}) = nullptr;' - self.context.module_stubs += cpp_stub + '\n' - self.context.module_map[fname] = cpp_stub - #registration for parser - self.functions[fname] = user_module_function(fname, nargs, ret_type, self.context) - self.context.module_init_loc = len(self.context.queries) - - def produce_aq(self, node): - node = node['load'] - s1 = 'LOAD DATA INFILE ' - s2 = 'INTO TABLE ' - s3 = 'FIELDS TERMINATED BY ' - self.sql = f'{s1} \"{node["file"]["literal"]}\" {s2} {node["table"]}' - if 'term' in node: - self.sql += f' {s3} \"{node["term"]["literal"]}\"' - - def produce_monetdb(self, node): - node = node['load'] - s1 = f'COPY OFFSET 2 INTO {node["table"]} FROM ' - s2 = ' ON SERVER ' - s3 = ' USING DELIMITERS ' - import os - p = os.path.abspath(node['file']['literal']).replace('\\', '/') - - self.sql = f'{s1} \'{p}\' {s2} ' - if 'term' in node: - self.sql += f' {s3} \'{node["term"]["literal"]}\'' - -class outfile(ast_node): - name="_outfile" - def __init__(self, parent, node, context = None, *, sql = None): - self.node = node - super().__init__(parent, node, context) - self.sql = sql if sql else '' - - def init(self, _): - assert(type(self.parent) is projection) - if not self.parent.use_postproc: - if self.context.dialect == 'MonetDB': - self.produce = self.produce_monetdb - else: - self.produce = self.produce_aq - - return super().init(_) - def produce_aq(self, node): - filename = node['loc']['literal'] if 'loc' in node else node['literal'] - self.sql += f'INTO OUTFILE "{filename}"' - if 'term' in node: - self.sql += f' FIELDS TERMINATED BY \"{node["term"]["literal"]}\"' - - def produce_monetdb(self, node): - filename = node['loc']['literal'] if 'loc' in node else node['literal'] - import os - p = os.path.abspath('.').replace('\\', '/') + '/' + filename - self.sql = f'COPY {self.sql} INTO "{p}"' - d = '\t' - e = '\n' - if 'term' in node: - d = node['term']['literal'] - self.sql += f' delimiters \'{d}\', \'{e}\'' - - def finalize(self): - filename = self.node['loc']['literal'] if 'loc' in self.node else self.node['literal'] - sep = ',' if 'term' not in self.node else self.node['term']['literal'] - file_pointer = 'fp_' + base62uuid(6) - self.addc(f'FILE* {file_pointer} = fopen("{filename}", "w");') - self.addc(f'{self.parent.out_table.contextname_cpp}->printall("{sep}", "\\n", nullptr, {file_pointer});') - self.addc(f'fclose({file_pointer});') - self.context.ccode += self.ccode - -class udf(ast_node): - name = 'udf' - first_order = name - @staticmethod - def try_init_udf(context : Context): - if context.udf is None: - context.udf = '/*UDF Start*/\n' - context.headers.add('\"./udf.hpp\"') - - @dataclass - class builtin_var: - enabled : bool = False - _type : Types = AnyT - all = ('_builtin_len', '_builtin_ret') - - def decltypecall(self, c_code = False, *args): - from engine.types import fn_behavior - class dummy: - def __init__(self, name): - self.cname = name + '_gettype' - self.sqlname = self.cname - return fn_behavior(dummy(self.cname), c_code, *args) - - def __call__(self, c_code = False, *args): - from engine.types import fn_behavior - builtin_args = [f'{{{n}()}}' for n, v in self.builtin.items() if v.enabled] - return fn_behavior(self, c_code, *args, *builtin_args) - - def return_type(self, *_ : Types): - return LazyT - - def init(self, _): - self.builtin : Dict[str, udf.builtin_var] = { - '_builtin_len' : udf.builtin_var(False, UIntT), - '_builtin_ret' : udf.builtin_var(False, Types( - 255, name = 'generic_ref', cname = 'auto&' - )) - } - self.var_table = {} - self.args = [] - udf.try_init_udf(self.context) - self.vecs = set() - self.code_list = [] - self.builtin_used = None - - def add(self, *code): - ccode = '' - for c in code: - if type(c) is str: - ccode += c - else: - self.code_list.append(ccode) - self.code_list.append(c) - ccode = '' - if ccode: - self.code_list.append(ccode) - - - def produce(self, node): - from engine.utils import get_legal_name, check_legal_name - node = node[self.name] - # register udf - self.agg = 'Agg' in node - self.cname = get_legal_name(node['fname']) - self.sqlname = self.cname - self.context.udf_map[self.cname] = self - if self.agg: - self.context.udf_agg_map[self.cname] = self - self.add(f'auto {self.cname} = [](') - - def get_block(self, ind, node): - if 'stmt' in node: - old_ind = ind - ind += '\t' - next_stmt = enlist(node['stmt']) - if len(next_stmt) > 1: - self.add(f' {{\n') - self.get_stmt(ind ,next_stmt) - self.add(f'{old_ind}}}\n') - else: - self.get_stmt(ind, next_stmt) - - def get_cname(self, x:str): - return self.var_table[x] - - def get_assignment(self, ind, node, *, types = 'auto', sep = ';\n'): - var_ex = expr(self, node['var'], c_code=True, supress_undefined = True) - ex = expr(self, node['expr'], c_code=True) - var = var_ex.eval(y=self.get_cname) - if var in self.var_table or hasattr(var_ex, 'builtin_var'): - op = '=' - if 'op' in node and node['op'] != ':=': - op = node['op'] - e = ex.eval(y=self.get_cname) - def assign_behavior(decltypestr = False): - nonlocal ind, var, op, e, sep - v = var(decltypestr) if callable(var) else var - _e = e(decltypestr) if callable(e) else e - if v == '_builtin_ret': - return f'{ind}return {_e}{sep}' - elif '_builtin_ret' not in _e: - return f'{ind}{v} {op} {_e}{sep}' - else: - return '' - self.add(assign_behavior) - else: - cvar = get_legal_name(var) - self.var_table[var] = cvar - self.add(f'{ind}{types} {cvar} = ', ex.eval(y=self.get_cname), sep) - - def get_stmt(self, ind, node): - node = enlist(node) - for n in node: - if 'if' in n: - _ifnode = n['if'] - self.add(f'{ind}if(', expr(self, _ifnode["cond"]).eval(y=self.get_cname), ')') - if 'stmt' in _ifnode: - self.get_block(ind, _ifnode) - else: - self.add('\n') - self.get_stmt(ind + '\t', _ifnode) - if 'elif' in _ifnode: - for e in n['elif']: - self.add(f'{ind}else if(', expr(self, e["cond"]).eval(y=self.get_cname), ')') - self.get_block(ind, e) - if 'else' in _ifnode: - self.add(f'{ind}else ') - self.get_block(ind, _ifnode['else']) - - elif 'for' in n: - _fornode = n['for'] - defs = _fornode['defs'] - self.add(f'{ind}for({"auto " if len(enlist(defs["op"])) != 0 else ";"}') - def get_inline_assignments(node, end = '; '): - var = enlist(node['var']) - op = enlist(node['op']) - expr = enlist(node['expr']) - len_node = len(enlist(op)) - for i, (v, o, e) in enumerate(zip(var, op, expr)): - self.get_assignment('', {'var' : v, 'op' : o, 'expr' : e}, types = '', sep = ', ' if i != len_node - 1 else end) - get_inline_assignments(defs) - self.add(expr(self, _fornode["cond"]).eval(y=self.get_cname), '; ') - get_inline_assignments(_fornode['tail'], ') ') - if 'stmt' in _fornode: - self.get_block(ind, _fornode) - else: - self.add('\n') - self.get_stmt(ind + '\t', _fornode) - elif 'assignment' in n: - assign = n['assignment'] - self.get_assignment(ind, assign) - - - def consume(self, node): - from engine.utils import get_legal_name, check_legal_name - node = node[self.name] - - if 'params' in node: - for args in node['params']: - cname = get_legal_name(args) - self.var_table[args] = cname - self.args.append(cname) - front = [*self.code_list, ', '.join([f'const auto& {a}' for a in self.args])] - self.code_list = [] - - self.with_storage = False - self.with_statics = False - self.static_decl : Optional[List[str]] = None - ind = '\t' - if 'static_decl' in node: - self.add(') {\n') - curr = node['static_decl'] - self.with_statics = True - if 'var' in curr and 'expr' in curr: - if len(curr['var']) != len(curr['expr']): - print("Error: every static variable must be initialized.") - self.static_decl = [] - for v, e in zip(curr['var'], curr['expr']): - cname = get_legal_name(v) - self.var_table[v] = cname - self.static_decl.append(f'{cname} = ', expr(self, e, c_code=True).eval(self.get_cname)) - self.add(f'{ind}static auto {"; static auto ".join(self.static_decl)};\n') - self.add(f'{ind}auto reset = [=]() {{ {"; ".join(self.static_decl)}; }};\n') - self.add(f'{ind}auto call = []({", ".join([f"decltype({a}) {a}" for a in self.args])}') - ind = '\t\t' - front = [*front, *self.code_list] - self.code_list = [] - if 'stmt' in node: - self.get_stmt(ind, node['stmt']) - # first scan to determine vec types - # if self.agg: - # for assign in node['assignment']: - # var = fastscan(assign['var']) - # ex = fastscan(assign['expr']) - # self.vecs.union(var.vec_vars) - # self.vecs.union(var.requested_lens) - # self.vecs.union(ex.vec_vars) - # self.vecs.union(ex.requested_lens) - # if len(self.vecs) != 0: - # self.idx_var = 'idx_' + base62uuid(5) - # self.ccode += f'{ind}auto {self.idx_var} = 0;\n' - - ret = node['ret'] - def return_call(decltypestr = False): - if (decltypestr): - return '' - ret = '' - for r in self.return_call: - if callable(r): - ret += r(False) - else: - ret += r - return ret - self.return_call = (f'{ind}return ', expr(self, ret, c_code=True).eval(self.get_cname), ';\n') - self.add(return_call) - if self.with_statics: - self.add('\t};\n') - self.add('\treturn std::make_pair(reset, call);\n') - self.add('};\n') - - #print(self.ccode) - self.builtin_args = [(name, var._type.cname) for name, var in self.builtin.items() if var.enabled] - # self.context.udf += front + builtin_argstr + self.ccode + '\n' - self.finalize(front) - - def finalize(self, front): - builtin_argstr = ', ' if len(self.builtin_args) and len(self.args) else '' - builtin_argstr += ', '.join([f'{t} {n}' for (n, t) in self.builtin_args]) - self.builtin_used = [b for b, v in self.builtin.items() if v.enabled] - ccode = '' - def process_recursion(l, decltypestr = False): - nonlocal ccode - for c in l: - if type(c) is str: - ccode += c - elif callable(c): - ccode += c(decltypestr) # a callback function - else: - raise ValueError(f'Illegal operation in udf code generation: {c}') - process_recursion(front) - ccode += builtin_argstr + ') {\n' - process_recursion(self.code_list) - self.context.udf += ccode + '\n' - ccode = '' - if self.return_pattern == udf.ReturnPattern.elemental_return: - ccode += f'auto {self.cname}_gettype = [](' - process_recursion(front[1:], True) - ccode += ') {\n\tuint32_t _builtin_len = 0;\n' - process_recursion(self.code_list, True) - self.context.udf += ccode + '\n' - - class ReturnPattern(Enum): - bulk_return = auto() - elemental_return = auto() - - @property - def return_pattern(self): - if '_builtin_ret' in self.builtin_used: - return udf.ReturnPattern.elemental_return - else: - return udf.ReturnPattern.bulk_return - -class user_module_function(OperatorBase): - def __init__(self, name, nargs, ret_type, context : Context): - super().__init__(name, nargs, lambda *_: ret_type, call=fn_behavior) - user_module_func[name] = self - # builtin_operators[name] = self - udf.try_init_udf(context) - -def include(objs): - import inspect - for _, cls in inspect.getmembers(objs): - if inspect.isclass(cls) and issubclass(cls, ast_node) and type(cls.first_order) is str: - ast_node.types[cls.first_order] = cls - - -import sys -include(sys.modules[__name__]) +from copy import deepcopy +from dataclasses import dataclass +from enum import Enum, auto +from typing import Set, Tuple, Dict, Union, List, Optional +from engine.types import * +from engine.utils import enlist, base62uuid, base62alp, get_legal_name +from reconstruct.storage import Context, TableInfo, ColRef + +class ast_node: + header = [] + types = dict() + first_order = False + + def __init__(self, parent:Optional["ast_node"], node, context:Optional[Context] = None): + self.context = parent.context if context is None else context + self.parent = parent + self.sql = '' + self.ccode = '' + if hasattr(parent, 'datasource'): + self.datasource = parent.datasource + else: + self.datasource = None + self.init(node) + self.produce(node) + self.spawn(node) + self.consume(node) + + def emit(self, code): + self.context.emit(code) + def add(self, code): + self.sql += code + ' ' + def addc(self, code): + self.ccode += code + '\n' + + name = 'null' + + def init(self, _): + if self.parent is None: + self.context.sql_begin() + self.add(self.__class__.name.upper()) + + def produce(self, _): + pass + def spawn(self, _): + pass + + def consume(self, _): + if self.parent is None: + self.emit(self.sql+';\n') + self.context.sql_end() + +from reconstruct.expr import expr, fastscan + + +class projection(ast_node): + name = 'projection' + first_order = 'select' + + def init(self, _): + # skip default init + pass + + def produce(self, node): + p = node['select'] + self.projections = p if type(p) is list else [p] + self.add('SELECT') + if self.parent is None: + self.context.sql_begin() + self.postproc_fname = 'dll_' + base62uuid(6) + self.context.postproc_begin(self.postproc_fname) + + def spawn(self, node): + self.datasource = join(self, [], self.context) # datasource is Join instead of TableInfo + self.assumptions = [] + if 'from' in node: + from_clause = node['from'] + self.datasource = join(self, from_clause) + if 'assumptions' in from_clause: + self.assumptions = enlist(from_clause['assumptions']) + + if self.datasource is not None: + self.datasource_changed = True + self.prev_datasource = self.context.datasource + self.context.datasource = self.datasource + + if 'where' in node: + self.where = filter(self, node['where']) + else: + self.where = None + + + def consume(self, node): + # deal with projections + out_table_varname = 'out_'+base62uuid(6) + if 'into' in node: + out_table_name = node['into'] + else: + out_table_name = out_table_varname + + self.out_table : TableInfo = TableInfo(out_table_name, [], self.context) + self.out_table.contextname_cpp = out_table_varname + + cols = [] + self.col_ext : Set[ColRef]= set() + col_exprs : List[Tuple[str, Types]] = [] + + proj_map : Dict[int, List[Union[Types, int, str, expr]]]= dict() + self.var_table = dict() + # self.sp_refs = set() + for i, proj in enumerate(self.projections): + compound = False + self.datasource.rec = set() + name = '' + this_type = AnyT + if type(proj) is dict: + if 'value' in proj: + e = proj['value'] + proj_expr = expr(self, e) + this_type = proj_expr.type + name = proj_expr.sql + compound = True # compound column + proj_expr.cols_mentioned = self.datasource.rec + alias = '' + if 'name' in proj: # renaming column by AS keyword + alias = proj['name'] + + if not proj_expr.is_special: + y = lambda x:x + name = eval('f\'' + name + '\'') + if name not in self.var_table: + self.var_table[name] = len(col_exprs) + proj_map[i] = [this_type, len(col_exprs), proj_expr] + col_expr = name + ' AS ' + alias if alias else name + if alias: + self.var_table[alias] = len(col_exprs) + col_exprs.append((col_expr, proj_expr.type)) + else: + self.context.headers.add('"./server/aggregations.h"') + if self.datasource.rec is not None: + self.col_ext = self.col_ext.union(self.datasource.rec) + proj_map[i] = [this_type, proj_expr.sql, proj_expr] + + disp_name = get_legal_name(alias if alias else name) + + elif type(proj) is str: + col = self.datasource.get_col(proj) + this_type = col.type + # name = col.name + self.datasource.rec = None + # TODO: Type deduction in Python + cols.append(ColRef(this_type, self.out_table, None, disp_name, i, compound=compound)) + + self.out_table.add_cols(cols, new = False) + + if 'groupby' in node: + self.group_node = groupby(self, node['groupby']) + else: + self.group_node = None + + self.col_ext = [c for c in self.col_ext if c.name not in self.var_table] # remove duplicates in self.var_table + col_ext_names = [c.name for c in self.col_ext] + self.add(', '.join([c[0] for c in col_exprs] + col_ext_names)) + + _base_offset = len(col_exprs) + for i, col in enumerate(col_ext_names): + if col not in self.var_table: + self.var_table[col] = i + _base_offset + + + def finialize(astnode:ast_node): + if(astnode is not None): + self.add(astnode.sql) + finialize(self.datasource) + finialize(self.where) + if self.group_node and not self.group_node.use_sp_gb: + self.add(self.group_node.sql) + + if self.col_ext or self.group_node and self.group_node.use_sp_gb: + self.use_postproc = True + + o = self.assumptions + if 'orderby' in node: + o.extend(enlist(node['orderby'])) + if o: + self.add(orderby(self, o).sql) + + if 'outfile' in node: + self.outfile = outfile(self, node['outfile'], sql = self.sql) + if not self.use_postproc: + self.sql += self.outfile.sql + else: + self.outfile = None + + if self.parent is None: + self.emit(self.sql+';\n') + else: + # TODO: subquery, name create tmp-table from subquery w/ alias as name + pass + + + # cpp module codegen + self.context.has_dll = True + # extract typed-columns from result-set + vid2cname = [0]*len(self.var_table) + self.pyname2cname = dict() + typenames = [c[1] for c in col_exprs] + [c.type for c in self.col_ext] + length_name = 'len_' + base62uuid(6) + self.context.emitc(f'auto {length_name} = server->cnt;') + + for v, idx in self.var_table.items(): + vname = get_legal_name(v) + '_' + base62uuid(3) + self.pyname2cname[v] = vname + self.context.emitc(f'auto {vname} = ColRef<{typenames[idx].cname}>({length_name}, server->getCol({idx}));') + vid2cname[idx] = vname + # Create table into context + out_typenames = [None] * len(proj_map) + + for key, val in proj_map.items(): + if type(val[1]) is str: + x = True + y = lambda t: self.pyname2cname[t] + val[1] = val[2].eval(x, y, gettype=True) + if callable(val[1]): + val[1] = val[1](True) + decltypestring = val[1] + + if val[0] == LazyT: + decltypestring = f'value_type>' + out_typenames[key] = decltypestring + else: + out_typenames[key] = val[0].cname + if (type(val[2].udf_called) is udf and + val[2].udf_called.return_pattern == udf.ReturnPattern.elemental_return + or + self.group_node and self.group_node.use_sp_gb and + val[2].cols_mentioned.intersection( + self.datasource.all_cols.difference(self.group_node.refs)) + ): + out_typenames[key] = f'ColRef<{out_typenames[key]}>' + + outtable_col_nameslist = ', '.join([f'"{c.name}"' for c in self.out_table.columns]) + self.outtable_col_names = 'names_' + base62uuid(4) + self.context.emitc(f'const char* {self.outtable_col_names}[] = {{{outtable_col_nameslist}}};') + # out_typenames = [v[0].cname for v in proj_map.values()] + self.context.emitc(f'auto {self.out_table.contextname_cpp} = new TableInfo<{",".join(out_typenames)}>("{self.out_table.table_name}", {self.outtable_col_names});') + # TODO: Inject custom group by code here and flag them in proj_map + # Type of UDFs? Complex UDFs, ones with static vars? + if self.group_node is not None and self.group_node.use_sp_gb: + gb_vartable : Dict[str, Union[str, int]] = deepcopy(self.pyname2cname) + gb_cexprs : List[str] = [] + + for key, val in proj_map.items(): + col_name = 'col_' + base62uuid(6) + self.context.emitc(f'decltype(auto) {col_name} = {self.out_table.contextname_cpp}->get_col<{key}>();') + gb_cexprs.append((col_name, val[2])) + self.group_node.finalize(gb_cexprs, gb_vartable) + else: + for i, (key, val) in enumerate(proj_map.items()): + if type(val[1]) is int: + self.context.emitc( + f'{self.out_table.contextname_cpp}->get_col<{key}>().initfrom({vid2cname[val[1]]}, "{cols[i].name}");' + ) + else: + # for funcs evaluate f_i(x, ...) + self.context.emitc(f'{self.out_table.contextname_cpp}->get_col<{key}>() = {val[1]};') + # print out col_is + self.context.emitc(f'print(*{self.out_table.contextname_cpp});') + + if self.outfile: + self.outfile.finalize() + + if 'into' in node: + self.context.emitc(select_into(self, node['into']).ccode) + + self.context.emitc(f'puts("done.");') + + if self.parent is None: + self.context.sql_end() + self.context.postproc_end(self.postproc_fname) + + + +class select_into(ast_node): + def init(self, node): + if type(self.parent) is projection: + if self.context.has_dll: + # has postproc put back to monetdb + self.produce = self.produce_cpp + else: + self.produce = self.produce_sql + else: + raise ValueError('parent must be projection') + def produce_cpp(self, node): + assert(type(self.parent) is projection) + if not hasattr(self.parent, 'out_table'): + raise Exception('No out_table found.') + else: + self.context.headers.add('"./server/table_ext_monetdb.hpp"') + self.ccode = f'{self.parent.out_table.contextname_cpp}->monetdb_append_table(cxt->alt_server, \"{node}\");' + + def produce_sql(self, node): + self.sql = f' INTO {node}' + + +class orderby(ast_node): + name = 'order by' + def produce(self, node): + if node is None: + self.sql = '' + return + + node = enlist(node) + o_list = [] + + for o in node: + o_str = expr(self, o['value']).sql + if 'sort' in o and f'{o["sort"]}'.lower() == 'desc': + o_str += ' ' + 'DESC' + o_list.append(o_str) + self.add(', '.join(o_list)) + + +class scan(ast_node): + class Position(Enum): + init = auto() + front = auto() + body = auto() + back = auto() + fin = auto() + # TODO: use this for positions for scanner + class LoopStyle(Enum): + forloop = auto() + foreach = auto() + + name = 'scan' + def __init__(self, parent: "ast_node", node, loop_style = 'for', context: Context = None, const = False): + self.const = "const " if const else "" + self.loop_style = loop_style + super().__init__(parent, node, context) + + def init(self, _): + self.datasource = self.context.datasource + self.initializers = '' + self.start = '' + self.front = '' + self.body = '' + self.end = '}' + scan_vars = set(s.it_var for s in self.context.scans) + self.it_ver = 'i' + base62uuid(2) + while(self.it_ver in scan_vars): + self.it_ver = 'i' + base62uuid(6) + self.parent.context.scans.append(self) + + def produce(self, node): + if self.loop_style == 'for_each': + self.colref = node + self.start += f'for ({self.const}auto& {self.it_ver} : {node}) {{\n' + else: + self.start += f"for (uint32_t {self.it_ver} = 0; {self.it_ver} < {node}; ++{self.it_ver}){{\n" + + def add(self, stmt, position = "body"): + if position == "body": + self.body += stmt + '\n' + elif position == "init": + self.initializers += stmt + '\n' + else: + self.front += stmt + '\n' + + def finalize(self): + self.context.remove_scan(self, self.initializers + self.start + self.front + self.body + self.end) + +class groupby_c(ast_node): + name = '_groupby' + def init(self, node : List[Tuple[expr, Set[ColRef]]]): + self.proj : projection = self.parent + self.glist : List[Tuple[expr, Set[ColRef]]] = node + return super().init(node) + def produce(self, node : List[Tuple[expr, Set[ColRef]]]): + self.context.headers.add('"./server/hasher.h"') + self.context.headers.add('unordered_map') + self.group = 'g' + base62uuid(7) + self.group_type = 'record_type' + base62uuid(7) + self.datasource = self.proj.datasource + self.scanner = None + self.datasource.rec = set() + + g_contents = '' + g_contents_list = [] + first_col = '' + + for g in self.glist: + e = expr(self, g[0].node, c_code=True) + g_str = e.eval(c_code = True, y = lambda c: self.proj.pyname2cname[c]) + # if v is compound expr, create tmp cols + if e.is_ColExpr: + tmpcol = 't' + base62uuid(7) + self.context.emitc(f'auto {tmpcol} = {g_str};') + e = tmpcol + g_contents_list.append(e) + first_col = g_contents_list[0] + g_contents_decltype = [f'decays' for c in g_contents_list] + g_contents = ','.join(g_contents_list) + self.context.emitc(f'typedef record<{",".join(g_contents_decltype)}> {self.group_type};') + self.context.emitc(f'unordered_map<{self.group_type}, vector_type, ' + f'transTypes<{self.group_type}, hasher>> {self.group};') + self.n_grps = len(self.glist) + self.scanner = scan(self, first_col + '.size') + self.scanner.add(f'{self.group}[forward_as_tuple({g_contents}[{self.scanner.it_ver}])].emplace_back({self.scanner.it_ver});') + + def consume(self, _): + self.scanner.finalize() + + # def deal_with_assumptions(self, assumption:assumption, out:TableInfo): + # gscanner = scan(self, self.group) + # val_var = 'val_'+base62uuid(7) + # gscanner.add(f'auto &{val_var} = {gscanner.it_ver}.second;') + # gscanner.add(f'{self.datasource.cxt_name}->order_by<{assumption.result()}>(&{val_var});') + # gscanner.finalize() + + def finalize(self, cexprs : List[Tuple[str, expr]], var_table : Dict[str, Union[str, int]]): + gscanner = scan(self, self.group, loop_style = 'for_each') + key_var = 'key_'+base62uuid(7) + val_var = 'val_'+base62uuid(7) + + gscanner.add(f'auto &{key_var} = {gscanner.it_ver}.first;', position = 'front') + gscanner.add(f'auto &{val_var} = {gscanner.it_ver}.second;', position = 'front') + len_var = None + def define_len_var(): + nonlocal len_var + if len_var is None: + len_var = 'len_'+base62uuid(7) + gscanner.add(f'auto &{len_var} = {val_var}.size;', position = 'front') + + def get_key_idx (varname : str): + for i, g in enumerate(self.glist): + if varname == g[0].eval(): + return i + return var_table[varname] + + def get_var_names (varname : str): + var = get_key_idx(varname) + if type(var) is str: + return f'{var}[{val_var}]' + else: + return f'get<{var}>({key_var})' + + for ce in cexprs: + ex = ce[1] + materialize_builtin = {} + if type(ex.udf_called) is udf: + if '_builtin_len' in ex.udf_called.builtin_used: + define_len_var() + materialize_builtin['_builtin_len'] = len_var + if '_builtin_ret' in ex.udf_called.builtin_used: + define_len_var() + gscanner.add(f'{ce[0]}.emplace_back({{{len_var}}});\n') + materialize_builtin['_builtin_ret'] = f'{ce[0]}.back()' + gscanner.add(f'{ex.eval(c_code = True, y=get_var_names, materialize_builtin = materialize_builtin)};\n') + continue + gscanner.add(f'{ce[0]}.emplace_back({ex.eval(c_code = True, y=get_var_names, materialize_builtin = materialize_builtin)});\n') + + gscanner.finalize() + + self.datasource.groupinfo = None + + +class groupby(ast_node): + name = 'group by' + def produce(self, node): + if type(self.parent) is not projection: + raise ValueError('groupby can only be used in projection') + + node = enlist(node) + o_list = [] + self.refs = set() + self.dedicated_glist : List[Tuple[expr, Set[ColRef]]] = [] + self.use_sp_gb = False + for g in node: + self.datasource.rec = set() + g_expr = expr(self, g['value']) + refs : Set[ColRef] = self.datasource.rec + self.datasource.rec = None + if self.parent.col_ext: + this_sp_ref = refs.difference(self.parent.col_ext) + self.use_sp_gb = self.use_sp_gb or len(this_sp_ref) > 0 + self.refs.update(refs) + self.dedicated_glist.append((g_expr, refs)) + g_str = g_expr.eval(c_code = False) + if 'sort' in g and f'{g["sort"]}'.lower() == 'desc': + g_str = g_str + ' ' + 'DESC' + o_list.append(g_str) + + if not self.use_sp_gb: + self.dedicated_gb = None + self.add(', '.join(o_list)) + else: + for l in self.dedicated_glist: + # l_exist = l[1].difference(self.parent.col_ext) + # for l in l_exist: + # self.parent.var_table. + self.parent.col_ext.update(l[1]) + + def finalize(self, cexprs : List[Tuple[str, expr]], var_table : Dict[str, Union[str, int]]): + if self.use_sp_gb: + self.dedicated_gb = groupby_c(self.parent, self.dedicated_glist) + self.dedicated_gb.finalize(cexprs, var_table) + +class join(ast_node): + name = 'join' + def init(self, _): + self.joins:list = [] + self.tables : List[TableInfo] = [] + self.tables_dir = dict() + self.rec = None + self.top_level = self.parent and type(self.parent) is projection + # self.tmp_name = 'join_' + base62uuid(4) + # self.datasource = TableInfo(self.tmp_name, [], self.context) + def append(self, tbls, __alias = ''): + alias = lambda t : '(' + t + ') ' + __alias if len(__alias) else t + if type(tbls) is join: + self.joins.append(alias(tbls.__str__())) + self.tables += tbls.tables + self.tables_dir = {**self.tables_dir, **tbls.tables_dir} + + elif type(tbls) is TableInfo: + self.joins.append(alias(tbls.table_name)) + self.tables.append(tbls) + self.tables_dir[tbls.table_name] = tbls + for a in tbls.alias: + self.tables_dir[a] = tbls + + elif type(tbls) is projection: + self.joins.append(alias(tbls.finalize())) + + def produce(self, node): + if type(node) is list: + for d in node: + self.append(join(self, d)) + + elif type(node) is dict: + alias = '' + if 'value' in node: + table_name = node['value'] + tbl = None + if 'name' in node: + alias = node['name'] + if type(table_name) is dict: + if 'select' in table_name: + # TODO: subquery, create and register TableInfo in projection + tbl = projection(self, table_name).finalize() + else: + tbl = self.context.tables_byname[table_name] + if 'name' in node: + tbl.add_alias(node['name']) + self.append(tbl, alias) + else: + keys = node.keys() + if keys[0].lower().endswith('join'): + j = join(self, node[keys[0]]) + tablename = f' {keys[0]} {j}' + if keys[1].lower() == 'on': + tablename += f' on {expr(self, node[keys[1]])}' + self.joins.append(tablename) + self.tables += j.tables + self.tables_dir = {**self.tables_dir, **j.tables_dir} + + elif type(node) is str: + if node in self.context.tables_byname: + self.append(self.context.tables_byname[node]) + else: + print(f'Error: table {node} not found.') + + def get_cols(self, colExpr: str) -> ColRef: + for t in self.tables: + if colExpr in t.columns_byname: + col = t.columns_byname[colExpr] + if type(self.rec) is set: + self.rec.add(col) + return col + + def parse_col_names(self, colExpr:str) -> ColRef: + parsedColExpr = colExpr.split('.') + if len(parsedColExpr) <= 1: + return self.get_cols(colExpr) + else: + datasource = self.tables_dir[parsedColExpr[0]] + if datasource is None: + raise ValueError(f'Table name/alias not defined{parsedColExpr[0]}') + else: + return datasource.parse_col_names(parsedColExpr[1]) + @property + def all_cols(self): + return set([c for t in self.tables for c in t.columns]) + def consume(self, node): + self.sql = ', '.join(self.joins) + if node and self.sql and self.top_level: + self.sql = ' FROM ' + self.sql + return super().consume(node) + + def __str__(self): + return ', '.join(self.joins) + def __repr__(self): + return self.__str__() + + +class filter(ast_node): + name = 'where' + def produce(self, node): + self.add(expr(self, node).sql) + + +class create_table(ast_node): + name = 'create_table' + first_order = name + def init(self, node): + if self.parent is None: + self.context.sql_begin() + self.sql = 'CREATE TABLE ' + + def produce(self, node): + ct = node[self.name] + tbl = self.context.add_table(ct['name'], ct['columns']) + self.sql = f'CREATE TABLE {tbl.table_name}(' + columns = [] + for c in tbl.columns: + columns.append(f'{c.name} {c.type.sqlname}') + self.sql += ', '.join(columns) + self.sql += ')' + if self.context.use_columnstore: + self.sql += ' engine=ColumnStore' + +class insert(ast_node): + name = 'insert' + first_order = name + + def produce(self, node): + values = node['query']['select'] + tbl = node['insert'] + self.sql = f'INSERT INTO {tbl} VALUES(' + # if len(values) != table.n_cols: + # raise ValueError("Column Mismatch") + list_values = [] + for i, s in enumerate(values): + if 'value' in s: + list_values.append(f"{s['value']}") + else: + # subquery, dispatch to select astnode + pass + self.sql += ', '.join(list_values) + ')' + + +class load(ast_node): + name="load" + first_order = name + def init(self, node): + self.module = False + if node['load']['file_type'] == 'module': + self.produce = self.produce_module + self.module = True + elif self.context.dialect == 'MonetDB': + self.produce = self.produce_monetdb + else: + self.produce = self.produce_aq + if self.parent is None: + self.context.sql_begin() + + def produce_module(self, node): + # create command for exec engine -> done + # create c++ stub + # create dummy udf obj for parsing + # def decode_type(ty : str) -> str: + # ret = '' + # back = '' + # while(ty.startswith('vec')): + # ret += 'ColRef<' + # back += '>' + # ty = ty[3:] + # ret += ty + # return ret + back + node = node['load'] + file = node['file']['literal'] + self.context.queries.append(f'M{file}') + self.module_name = file + self.functions = {} + if 'funcs' in node: + for f in enlist(node['funcs']): + fname = f['fname'] + self.context.queries.append(f'F{fname}') + ret_type = VoidT + if 'ret_type' in f: + ret_type = Types.decode(f['ret_type']) + nargs = 0 + arglist = '' + if 'vars' in f: + arglist = [] + for v in enlist(f['vars']): + arglist.append(f'{Types.decode(v["type"]).cname} {v["arg"]}') + nargs = len(arglist) + arglist = ', '.join(arglist) + # create c++ stub + cpp_stub = f'{ret_type.cname} (*{fname})({arglist}) = nullptr;' + self.context.module_stubs += cpp_stub + '\n' + self.context.module_map[fname] = cpp_stub + #registration for parser + self.functions[fname] = user_module_function(fname, nargs, ret_type, self.context) + self.context.module_init_loc = len(self.context.queries) + + def produce_aq(self, node): + node = node['load'] + s1 = 'LOAD DATA INFILE ' + s2 = 'INTO TABLE ' + s3 = 'FIELDS TERMINATED BY ' + self.sql = f'{s1} \"{node["file"]["literal"]}\" {s2} {node["table"]}' + if 'term' in node: + self.sql += f' {s3} \"{node["term"]["literal"]}\"' + + def produce_monetdb(self, node): + node = node['load'] + s1 = f'COPY OFFSET 2 INTO {node["table"]} FROM ' + s2 = ' ON SERVER ' + s3 = ' USING DELIMITERS ' + import os + p = os.path.abspath(node['file']['literal']).replace('\\', '/') + + self.sql = f'{s1} \'{p}\' {s2} ' + if 'term' in node: + self.sql += f' {s3} \'{node["term"]["literal"]}\'' + +class outfile(ast_node): + name="_outfile" + def __init__(self, parent, node, context = None, *, sql = None): + self.node = node + super().__init__(parent, node, context) + self.sql = sql if sql else '' + + def init(self, _): + assert(type(self.parent) is projection) + if not self.parent.use_postproc: + if self.context.dialect == 'MonetDB': + self.produce = self.produce_monetdb + else: + self.produce = self.produce_aq + + return super().init(_) + def produce_aq(self, node): + filename = node['loc']['literal'] if 'loc' in node else node['literal'] + self.sql += f'INTO OUTFILE "{filename}"' + if 'term' in node: + self.sql += f' FIELDS TERMINATED BY \"{node["term"]["literal"]}\"' + + def produce_monetdb(self, node): + filename = node['loc']['literal'] if 'loc' in node else node['literal'] + import os + p = os.path.abspath('.').replace('\\', '/') + '/' + filename + self.sql = f'COPY {self.sql} INTO "{p}"' + d = '\t' + e = '\n' + if 'term' in node: + d = node['term']['literal'] + self.sql += f' delimiters \'{d}\', \'{e}\'' + + def finalize(self): + filename = self.node['loc']['literal'] if 'loc' in self.node else self.node['literal'] + sep = ',' if 'term' not in self.node else self.node['term']['literal'] + file_pointer = 'fp_' + base62uuid(6) + self.addc(f'FILE* {file_pointer} = fopen("{filename}", "w");') + self.addc(f'{self.parent.out_table.contextname_cpp}->printall("{sep}", "\\n", nullptr, {file_pointer});') + self.addc(f'fclose({file_pointer});') + self.context.ccode += self.ccode + +class udf(ast_node): + name = 'udf' + first_order = name + @staticmethod + def try_init_udf(context : Context): + if context.udf is None: + context.udf = '/*UDF Start*/\n' + context.headers.add('\"./udf.hpp\"') + + @dataclass + class builtin_var: + enabled : bool = False + _type : Types = AnyT + all = ('_builtin_len', '_builtin_ret') + + def decltypecall(self, c_code = False, *args): + from engine.types import fn_behavior + class dummy: + def __init__(self, name): + self.cname = name + '_gettype' + self.sqlname = self.cname + return fn_behavior(dummy(self.cname), c_code, *args) + + def __call__(self, c_code = False, *args): + from engine.types import fn_behavior + builtin_args = [f'{{{n}()}}' for n, v in self.builtin.items() if v.enabled] + return fn_behavior(self, c_code, *args, *builtin_args) + + def return_type(self, *_ : Types): + return LazyT + + def init(self, _): + self.builtin : Dict[str, udf.builtin_var] = { + '_builtin_len' : udf.builtin_var(False, UIntT), + '_builtin_ret' : udf.builtin_var(False, Types( + 255, name = 'generic_ref', cname = 'auto&' + )) + } + self.var_table = {} + self.args = [] + udf.try_init_udf(self.context) + self.vecs = set() + self.code_list = [] + self.builtin_used = None + + def add(self, *code): + ccode = '' + for c in code: + if type(c) is str: + ccode += c + else: + self.code_list.append(ccode) + self.code_list.append(c) + ccode = '' + if ccode: + self.code_list.append(ccode) + + + def produce(self, node): + from engine.utils import get_legal_name, check_legal_name + node = node[self.name] + # register udf + self.agg = 'Agg' in node + self.cname = get_legal_name(node['fname']) + self.sqlname = self.cname + self.context.udf_map[self.cname] = self + if self.agg: + self.context.udf_agg_map[self.cname] = self + self.add(f'auto {self.cname} = [](') + + def get_block(self, ind, node): + if 'stmt' in node: + old_ind = ind + ind += '\t' + next_stmt = enlist(node['stmt']) + if len(next_stmt) > 1: + self.add(f' {{\n') + self.get_stmt(ind ,next_stmt) + self.add(f'{old_ind}}}\n') + else: + self.get_stmt(ind, next_stmt) + + def get_cname(self, x:str): + return self.var_table[x] + + def get_assignment(self, ind, node, *, types = 'auto', sep = ';\n'): + var_ex = expr(self, node['var'], c_code=True, supress_undefined = True) + ex = expr(self, node['expr'], c_code=True) + var = var_ex.eval(y=self.get_cname) + if var in self.var_table or hasattr(var_ex, 'builtin_var'): + op = '=' + if 'op' in node and node['op'] != ':=': + op = node['op'] + e = ex.eval(y=self.get_cname) + def assign_behavior(decltypestr = False): + nonlocal ind, var, op, e, sep + v = var(decltypestr) if callable(var) else var + _e = e(decltypestr) if callable(e) else e + if v == '_builtin_ret': + return f'{ind}return {_e}{sep}' + elif '_builtin_ret' not in _e: + return f'{ind}{v} {op} {_e}{sep}' + else: + return '' + self.add(assign_behavior) + else: + cvar = get_legal_name(var) + self.var_table[var] = cvar + self.add(f'{ind}{types} {cvar} = ', ex.eval(y=self.get_cname), sep) + + def get_stmt(self, ind, node): + node = enlist(node) + for n in node: + if 'if' in n: + _ifnode = n['if'] + self.add(f'{ind}if(', expr(self, _ifnode["cond"]).eval(y=self.get_cname), ')') + if 'stmt' in _ifnode: + self.get_block(ind, _ifnode) + else: + self.add('\n') + self.get_stmt(ind + '\t', _ifnode) + if 'elif' in _ifnode: + for e in n['elif']: + self.add(f'{ind}else if(', expr(self, e["cond"]).eval(y=self.get_cname), ')') + self.get_block(ind, e) + if 'else' in _ifnode: + self.add(f'{ind}else ') + self.get_block(ind, _ifnode['else']) + + elif 'for' in n: + _fornode = n['for'] + defs = _fornode['defs'] + self.add(f'{ind}for({"auto " if len(enlist(defs["op"])) != 0 else ";"}') + def get_inline_assignments(node, end = '; '): + var = enlist(node['var']) + op = enlist(node['op']) + expr = enlist(node['expr']) + len_node = len(enlist(op)) + for i, (v, o, e) in enumerate(zip(var, op, expr)): + self.get_assignment('', {'var' : v, 'op' : o, 'expr' : e}, types = '', sep = ', ' if i != len_node - 1 else end) + get_inline_assignments(defs) + self.add(expr(self, _fornode["cond"]).eval(y=self.get_cname), '; ') + get_inline_assignments(_fornode['tail'], ') ') + if 'stmt' in _fornode: + self.get_block(ind, _fornode) + else: + self.add('\n') + self.get_stmt(ind + '\t', _fornode) + elif 'assignment' in n: + assign = n['assignment'] + self.get_assignment(ind, assign) + + + def consume(self, node): + from engine.utils import get_legal_name, check_legal_name + node = node[self.name] + + if 'params' in node: + for args in node['params']: + cname = get_legal_name(args) + self.var_table[args] = cname + self.args.append(cname) + front = [*self.code_list, ', '.join([f'const auto& {a}' for a in self.args])] + self.code_list = [] + + self.with_storage = False + self.with_statics = False + self.static_decl : Optional[List[str]] = None + ind = '\t' + if 'static_decl' in node: + self.add(') {\n') + curr = node['static_decl'] + self.with_statics = True + if 'var' in curr and 'expr' in curr: + if len(curr['var']) != len(curr['expr']): + print("Error: every static variable must be initialized.") + self.static_decl = [] + for v, e in zip(curr['var'], curr['expr']): + cname = get_legal_name(v) + self.var_table[v] = cname + self.static_decl.append(f'{cname} = ', expr(self, e, c_code=True).eval(self.get_cname)) + self.add(f'{ind}static auto {"; static auto ".join(self.static_decl)};\n') + self.add(f'{ind}auto reset = [=]() {{ {"; ".join(self.static_decl)}; }};\n') + self.add(f'{ind}auto call = []({", ".join([f"decltype({a}) {a}" for a in self.args])}') + ind = '\t\t' + front = [*front, *self.code_list] + self.code_list = [] + if 'stmt' in node: + self.get_stmt(ind, node['stmt']) + # first scan to determine vec types + # if self.agg: + # for assign in node['assignment']: + # var = fastscan(assign['var']) + # ex = fastscan(assign['expr']) + # self.vecs.union(var.vec_vars) + # self.vecs.union(var.requested_lens) + # self.vecs.union(ex.vec_vars) + # self.vecs.union(ex.requested_lens) + # if len(self.vecs) != 0: + # self.idx_var = 'idx_' + base62uuid(5) + # self.ccode += f'{ind}auto {self.idx_var} = 0;\n' + + ret = node['ret'] + def return_call(decltypestr = False): + if (decltypestr): + return '' + ret = '' + for r in self.return_call: + if callable(r): + ret += r(False) + else: + ret += r + return ret + self.return_call = (f'{ind}return ', expr(self, ret, c_code=True).eval(self.get_cname), ';\n') + self.add(return_call) + if self.with_statics: + self.add('\t};\n') + self.add('\treturn std::make_pair(reset, call);\n') + self.add('};\n') + + #print(self.ccode) + self.builtin_args = [(name, var._type.cname) for name, var in self.builtin.items() if var.enabled] + # self.context.udf += front + builtin_argstr + self.ccode + '\n' + self.finalize(front) + + def finalize(self, front): + builtin_argstr = ', ' if len(self.builtin_args) and len(self.args) else '' + builtin_argstr += ', '.join([f'{t} {n}' for (n, t) in self.builtin_args]) + self.builtin_used = [b for b, v in self.builtin.items() if v.enabled] + ccode = '' + def process_recursion(l, decltypestr = False): + nonlocal ccode + for c in l: + if type(c) is str: + ccode += c + elif callable(c): + ccode += c(decltypestr) # a callback function + else: + raise ValueError(f'Illegal operation in udf code generation: {c}') + process_recursion(front) + ccode += builtin_argstr + ') {\n' + process_recursion(self.code_list) + self.context.udf += ccode + '\n' + ccode = '' + if self.return_pattern == udf.ReturnPattern.elemental_return: + ccode += f'auto {self.cname}_gettype = [](' + process_recursion(front[1:], True) + ccode += ') {\n\tuint32_t _builtin_len = 0;\n' + process_recursion(self.code_list, True) + self.context.udf += ccode + '\n' + + class ReturnPattern(Enum): + bulk_return = auto() + elemental_return = auto() + + @property + def return_pattern(self): + if '_builtin_ret' in self.builtin_used: + return udf.ReturnPattern.elemental_return + else: + return udf.ReturnPattern.bulk_return + +class user_module_function(OperatorBase): + def __init__(self, name, nargs, ret_type, context : Context): + super().__init__(name, nargs, lambda *_: ret_type, call=fn_behavior) + user_module_func[name] = self + # builtin_operators[name] = self + udf.try_init_udf(context) + +def include(objs): + import inspect + for _, cls in inspect.getmembers(objs): + if inspect.isclass(cls) and issubclass(cls, ast_node) and type(cls.first_order) is str: + ast_node.types[cls.first_order] = cls + + +import sys +include(sys.modules[__name__]) diff --git a/reconstruct/expr.py b/reconstruct/expr.py index 65513ac..4f6085f 100644 --- a/reconstruct/expr.py +++ b/reconstruct/expr.py @@ -1,338 +1,338 @@ -from typing import Optional -from reconstruct.ast import ast_node -from reconstruct.storage import ColRef, Context -from engine.types import * - -# TODO: Decouple expr and upgrade architecture -# C_CODE : get ccode/sql code? -# projections : C/SQL/decltype string -# orderby/joins/where : SQL only -# assumption/groupby : C/sql -# is_udfexpr: C only - -class expr(ast_node): - name='expr' - @property - def udf_decltypecall(self): - return self._udf_decltypecall if self._udf_decltypecall else self.sql - - @udf_decltypecall.setter - def udf_decltypecall(self, val): - self._udf_decltypecall = val - - @property - def need_decltypestr(self): - return self._udf_decltypecall is not None - - def __init__(self, parent, node, *, c_code = None, supress_undefined = False): - from reconstruct.ast import projection, udf - - self.type = None - self.raw_col = None - self.udf : Optional[udf] = None - self.inside_agg = False - self.is_special = False - self.is_ColExpr = False - self.is_recursive_call_inudf = False - self.codlets : list = [] - self.codebuf : Optional[str] = None - self._udf_decltypecall = None - self.node = node - self.supress_undefined = supress_undefined - if(type(parent) is expr): - self.inside_agg = parent.inside_agg - self.is_udfexpr = parent.is_udfexpr - self.is_agg_func = parent.is_agg_func - self.root : expr = parent.root - self.c_code = parent.c_code - self.builtin_vars = parent.builtin_vars - else: - self.is_agg_func = False - self.is_udfexpr = type(parent) is udf - self.root : expr = self - self.c_code = self.is_udfexpr or type(parent) is projection - if self.is_udfexpr: - self.udf : udf = parent - self.builtin_vars = self.udf.builtin.keys() - else: - self.builtin_vars = [] - if type(c_code) is bool: - self.c_code = c_code - - self.udf_called = None - self.cols_mentioned : Optional[set[ColRef]] = None - ast_node.__init__(self, parent, node, None) - - def init(self, _): - from reconstruct.ast import projection - parent = self.parent - self.isvector = parent.isvector if type(parent) is expr else False - self.is_compound = parent.is_compound if type(parent) is expr else False - if type(parent) in [projection, expr]: - self.datasource = parent.datasource - else: - self.datasource = self.context.datasource - self.udf_map = parent.context.udf_map - self.func_maps = {**builtin_func, **self.udf_map, **user_module_func} - self.operators = {**builtin_operators, **self.udf_map, **user_module_func} - - def produce(self, node): - from engine.utils import enlist - from reconstruct.ast import udf - - if type(node) is dict: - for key, val in node.items(): - if key in self.operators: - if key in builtin_func: - if self.is_agg_func: - self.root.is_special = True # Nested Aggregation - else: - self.is_agg_func = True - - op = self.operators[key] - - val = enlist(val) - exp_vals = [expr(self, v, c_code = self.c_code) for v in val] - str_vals = [e.sql for e in exp_vals] - type_vals = [e.type for e in exp_vals] - try: - self.type = op.return_type(*type_vals) - except AttributeError as e: - if type(self.root) is not udf: - # TODO: do something when this is not an error - # print(f'alert: {e}') - pass - self.type = AnyT - - self.sql = op(self.c_code, *str_vals) - special_func = [*self.context.udf_map.keys(), *self.context.module_map.keys(), "maxs", "mins", "avgs", "sums"] - if key in special_func and not self.is_special: - self.is_special = True - if key in self.context.udf_map: - self.root.udf_called = self.context.udf_map[key] - if self.is_udfexpr and key == self.root.udf.name: - self.root.is_recursive_call_inudf = True - elif key in user_module_func.keys(): - udf.try_init_udf(self.context) - # TODO: make udf_called a set! - p = self.parent - while type(p) is expr and not p.udf_called: - p.udf_called = self.udf_called - p = p.parent - p = self.parent - while type(p) is expr and not p.is_special: - p.is_special = True - p = p.parent - - need_decltypestr = any([e.need_decltypestr for e in exp_vals]) - if need_decltypestr or (self.udf_called and type(op) is udf): - decltypestr_vals = [e.udf_decltypecall for e in exp_vals] - self.udf_decltypecall = op(self.c_code, *decltypestr_vals) - - if self.udf_called and type(op) is udf: - self.udf_decltypecall = op.decltypecall(self.c_code, *decltypestr_vals) - - elif self.is_udfexpr: - var_table = self.root.udf.var_table - vec = key.split('.') - _vars = [*var_table, *self.builtin_vars] - def get_vname (node): - if node in self.builtin_vars: - self.root.udf.builtin[node].enabled = True - self.builtin_var = node - return node - else: - return var_table[node] - if vec[0] not in _vars: - # print(f'Use of undefined variable {vec[0]}') - # TODO: do something when this is not an error - pass - else: - vname = get_vname(vec[0]) - val = enlist(val) - if(len(val) > 2): - print('Warning: more than 2 indexes found for subvec operator.') - ex = [expr(self, v, c_code = self.c_code) for v in val] - idxs = ', '.join([e.sql for e in ex]) - self.sql = f'{vname}.subvec({idxs})' - if any([e.need_decltypestr for e in ex]): - self.udf_decltypecall = f'{vname}.subvec({[", ".join([e.udf_decltypecall for e in ex])]})' - if key == 'get' and len(val) > 1: - ex_vname = expr(self, val[0], c_code=self.c_code) - self.sql = f'{ex_vname.sql}[{expr(self, val[1], c_code=self.c_code).sql}]' - if hasattr(ex_vname, 'builtin_var'): - if not hasattr(self, 'builtin_var'): - self.builtin_var = [] - self.builtin_var = [*self.builtin_var, *ex_vname.builtin_var] - self.udf_decltypecall = ex_vname.sql - else: - print(f'Undefined expr: {key}{val}') - - elif type(node) is str: - if self.is_udfexpr: - curr_udf : udf = self.root.udf - var_table = curr_udf.var_table - split = node.split('.') - if split[0] in var_table: - varname = var_table[split[0]] - if curr_udf.agg and varname in curr_udf.vecs: - if len(split) > 1: - if split[1] == 'vec': - self.sql += varname - elif split[1] == 'len': - self.sql += f'{varname}.size' - else: - print(f'no member {split[1]} in object {varname}') - else: - self.sql += f'{varname}[{curr_udf.idx_var}]' - else: - self.sql += varname - elif self.supress_undefined or split[0] in self.builtin_vars: - self.sql += node - if split[0] in self.builtin_vars: - curr_udf.builtin[split[0]].enabled = True - self.builtin_var = split[0] - else: - print(f'Undefined varname: {split[0]}') - - - # get the column from the datasource in SQL context - else: - p = self.parent - while type(p) is expr and not p.isvector: - p.isvector = True - p = p.parent - if self.datasource is not None: - self.raw_col = self.datasource.parse_col_names(node) - self.raw_col = self.raw_col if type(self.raw_col) is ColRef else None - if self.raw_col is not None: - self.is_ColExpr = True - self.sql = self.raw_col.name - self.type = self.raw_col.type - else: - self.sql = node - self.type = StrT - if self.c_code and self.datasource is not None: - self.sql = f'{{y(\"{self.sql}\")}}' - elif type(node) is bool: - self.type = BoolT - if self.c_code: - self.sql = '1' if node else '0' - else: - self.sql = 'TRUE' if node else 'FALSE' - else: - self.sql = f'{node}' - if type(node) is int: - if (node >= 2**63 - 1 or node <= -2**63): - self.type = LongT - else: - self.type = IntT - elif type(node) is float: - self.type = DoubleT - - def finalize(self, override = False): - from reconstruct.ast import udf - if self.codebuf is None or override: - self.codebuf = '' - for c in self.codlets: - if type(c) is str: - self.codebuf += c - elif type(c) is udf: - self.codebuf += c() - elif type(c) is expr: - self.codebuf += c.finalize(override=override) - return self.codebuf - - def __str__(self): - return self.sql - def __repr__(self): - return self.__str__() - - # builtins is readonly, so it's okay to set default value as an object - # eval is only called at root expr. - def eval(self, c_code = None, y = lambda t: t, materialize_builtin = False, _decltypestr = False, *, gettype = False): - assert(self.is_root) - def call(decltypestr = False) -> str: - nonlocal c_code, y, materialize_builtin - if self.udf_called is not None: - loc = locals() - builtin_vars = self.udf_called.builtin_used - for b in self.udf_called.builtin_var.all: - exec(f'loc["{b}"] = lambda: "{{{b}()}}"') - if builtin_vars: - if type(materialize_builtin) is dict: - for b in builtin_vars: - exec(f'loc["{b}"] = lambda: "{materialize_builtin[b]}"') - elif self.is_recursive_call_inudf: - for b in builtin_vars: - exec(f'loc["{b}"] = lambda : "{b}"') - - x = self.c_code if c_code is None else c_code - if decltypestr: - return eval('f\'' + self.udf_decltypecall + '\'') - return eval('f\'' + self.sql + '\'') - if self.is_recursive_call_inudf or (self.need_decltypestr and self.is_udfexpr) or gettype: - return call - else: - return call(_decltypestr) - - @property - def is_root(self): - return self.root == self - - -# For UDFs: first check if agg variable is used as vector -# if not, then check if its length is used -class fastscan(expr): - name = 'fastscan' - - def init(self, _): - self.vec_vars = set() - self.requested_lens = set() - super().init(self, _) - - def process(self, key : str): - segs = key.split('.') - var_table = self.root.udf.var_table - if segs[0] in var_table and len(segs) > 1: - if segs[1] == 'vec': - self.vec_vars.add(segs[0]) - elif segs[1] == 'len': - self.requested_lens.add(segs[0]) - - def produce(self, node): - from engine.utils import enlist - if type(node) is dict: - for key, val in node.items(): - if key in self.operators: - val = enlist(val) - elif self.is_udfexpr: - self.process(key) - [fastscan(self, v, c_code = self.c_code) for v in val] - - elif type(node) is str: - self.process(node) - - -class getrefs(expr): - name = 'getrefs' - - def init(self, _): - self.datasource.rec = set() - self.rec = None - - def produce(self, node): - from engine.utils import enlist - if type(node) is dict: - for key, val in node.items(): - if key in self.operators: - val = enlist(val) - [getrefs(self, v, c_code = self.c_code) for v in val] - - elif type(node) is str: - self.datasource.parse_col_names(node) - - def consume(self, _): - if self.root == self: - self.rec = self.datasource.rec +from typing import Optional +from reconstruct.ast import ast_node +from reconstruct.storage import ColRef, Context +from engine.types import * + +# TODO: Decouple expr and upgrade architecture +# C_CODE : get ccode/sql code? +# projections : C/SQL/decltype string +# orderby/joins/where : SQL only +# assumption/groupby : C/sql +# is_udfexpr: C only + +class expr(ast_node): + name='expr' + @property + def udf_decltypecall(self): + return self._udf_decltypecall if self._udf_decltypecall else self.sql + + @udf_decltypecall.setter + def udf_decltypecall(self, val): + self._udf_decltypecall = val + + @property + def need_decltypestr(self): + return self._udf_decltypecall is not None + + def __init__(self, parent, node, *, c_code = None, supress_undefined = False): + from reconstruct.ast import projection, udf + + self.type = None + self.raw_col = None + self.udf : Optional[udf] = None + self.inside_agg = False + self.is_special = False + self.is_ColExpr = False + self.is_recursive_call_inudf = False + self.codlets : list = [] + self.codebuf : Optional[str] = None + self._udf_decltypecall = None + self.node = node + self.supress_undefined = supress_undefined + if(type(parent) is expr): + self.inside_agg = parent.inside_agg + self.is_udfexpr = parent.is_udfexpr + self.is_agg_func = parent.is_agg_func + self.root : expr = parent.root + self.c_code = parent.c_code + self.builtin_vars = parent.builtin_vars + else: + self.is_agg_func = False + self.is_udfexpr = type(parent) is udf + self.root : expr = self + self.c_code = self.is_udfexpr or type(parent) is projection + if self.is_udfexpr: + self.udf : udf = parent + self.builtin_vars = self.udf.builtin.keys() + else: + self.builtin_vars = [] + if type(c_code) is bool: + self.c_code = c_code + + self.udf_called = None + self.cols_mentioned : Optional[set[ColRef]] = None + ast_node.__init__(self, parent, node, None) + + def init(self, _): + from reconstruct.ast import projection + parent = self.parent + self.isvector = parent.isvector if type(parent) is expr else False + self.is_compound = parent.is_compound if type(parent) is expr else False + if type(parent) in [projection, expr]: + self.datasource = parent.datasource + else: + self.datasource = self.context.datasource + self.udf_map = parent.context.udf_map + self.func_maps = {**builtin_func, **self.udf_map, **user_module_func} + self.operators = {**builtin_operators, **self.udf_map, **user_module_func} + + def produce(self, node): + from engine.utils import enlist + from reconstruct.ast import udf + + if type(node) is dict: + for key, val in node.items(): + if key in self.operators: + if key in builtin_func: + if self.is_agg_func: + self.root.is_special = True # Nested Aggregation + else: + self.is_agg_func = True + + op = self.operators[key] + + val = enlist(val) + exp_vals = [expr(self, v, c_code = self.c_code) for v in val] + str_vals = [e.sql for e in exp_vals] + type_vals = [e.type for e in exp_vals] + try: + self.type = op.return_type(*type_vals) + except AttributeError as e: + if type(self.root) is not udf: + # TODO: do something when this is not an error + # print(f'alert: {e}') + pass + self.type = AnyT + + self.sql = op(self.c_code, *str_vals) + special_func = [*self.context.udf_map.keys(), *self.context.module_map.keys(), "maxs", "mins", "avgs", "sums"] + if key in special_func and not self.is_special: + self.is_special = True + if key in self.context.udf_map: + self.root.udf_called = self.context.udf_map[key] + if self.is_udfexpr and key == self.root.udf.name: + self.root.is_recursive_call_inudf = True + elif key in user_module_func.keys(): + udf.try_init_udf(self.context) + # TODO: make udf_called a set! + p = self.parent + while type(p) is expr and not p.udf_called: + p.udf_called = self.udf_called + p = p.parent + p = self.parent + while type(p) is expr and not p.is_special: + p.is_special = True + p = p.parent + + need_decltypestr = any([e.need_decltypestr for e in exp_vals]) + if need_decltypestr or (self.udf_called and type(op) is udf): + decltypestr_vals = [e.udf_decltypecall for e in exp_vals] + self.udf_decltypecall = op(self.c_code, *decltypestr_vals) + + if self.udf_called and type(op) is udf: + self.udf_decltypecall = op.decltypecall(self.c_code, *decltypestr_vals) + + elif self.is_udfexpr: + var_table = self.root.udf.var_table + vec = key.split('.') + _vars = [*var_table, *self.builtin_vars] + def get_vname (node): + if node in self.builtin_vars: + self.root.udf.builtin[node].enabled = True + self.builtin_var = node + return node + else: + return var_table[node] + if vec[0] not in _vars: + # print(f'Use of undefined variable {vec[0]}') + # TODO: do something when this is not an error + pass + else: + vname = get_vname(vec[0]) + val = enlist(val) + if(len(val) > 2): + print('Warning: more than 2 indexes found for subvec operator.') + ex = [expr(self, v, c_code = self.c_code) for v in val] + idxs = ', '.join([e.sql for e in ex]) + self.sql = f'{vname}.subvec({idxs})' + if any([e.need_decltypestr for e in ex]): + self.udf_decltypecall = f'{vname}.subvec({[", ".join([e.udf_decltypecall for e in ex])]})' + if key == 'get' and len(val) > 1: + ex_vname = expr(self, val[0], c_code=self.c_code) + self.sql = f'{ex_vname.sql}[{expr(self, val[1], c_code=self.c_code).sql}]' + if hasattr(ex_vname, 'builtin_var'): + if not hasattr(self, 'builtin_var'): + self.builtin_var = [] + self.builtin_var = [*self.builtin_var, *ex_vname.builtin_var] + self.udf_decltypecall = ex_vname.sql + else: + print(f'Undefined expr: {key}{val}') + + elif type(node) is str: + if self.is_udfexpr: + curr_udf : udf = self.root.udf + var_table = curr_udf.var_table + split = node.split('.') + if split[0] in var_table: + varname = var_table[split[0]] + if curr_udf.agg and varname in curr_udf.vecs: + if len(split) > 1: + if split[1] == 'vec': + self.sql += varname + elif split[1] == 'len': + self.sql += f'{varname}.size' + else: + print(f'no member {split[1]} in object {varname}') + else: + self.sql += f'{varname}[{curr_udf.idx_var}]' + else: + self.sql += varname + elif self.supress_undefined or split[0] in self.builtin_vars: + self.sql += node + if split[0] in self.builtin_vars: + curr_udf.builtin[split[0]].enabled = True + self.builtin_var = split[0] + else: + print(f'Undefined varname: {split[0]}') + + + # get the column from the datasource in SQL context + else: + p = self.parent + while type(p) is expr and not p.isvector: + p.isvector = True + p = p.parent + if self.datasource is not None: + self.raw_col = self.datasource.parse_col_names(node) + self.raw_col = self.raw_col if type(self.raw_col) is ColRef else None + if self.raw_col is not None: + self.is_ColExpr = True + self.sql = self.raw_col.name + self.type = self.raw_col.type + else: + self.sql = node + self.type = StrT + if self.c_code and self.datasource is not None: + self.sql = f'{{y(\"{self.sql}\")}}' + elif type(node) is bool: + self.type = BoolT + if self.c_code: + self.sql = '1' if node else '0' + else: + self.sql = 'TRUE' if node else 'FALSE' + else: + self.sql = f'{node}' + if type(node) is int: + if (node >= 2**63 - 1 or node <= -2**63): + self.type = LongT + else: + self.type = IntT + elif type(node) is float: + self.type = DoubleT + + def finalize(self, override = False): + from reconstruct.ast import udf + if self.codebuf is None or override: + self.codebuf = '' + for c in self.codlets: + if type(c) is str: + self.codebuf += c + elif type(c) is udf: + self.codebuf += c() + elif type(c) is expr: + self.codebuf += c.finalize(override=override) + return self.codebuf + + def __str__(self): + return self.sql + def __repr__(self): + return self.__str__() + + # builtins is readonly, so it's okay to set default value as an object + # eval is only called at root expr. + def eval(self, c_code = None, y = lambda t: t, materialize_builtin = False, _decltypestr = False, *, gettype = False): + assert(self.is_root) + def call(decltypestr = False) -> str: + nonlocal c_code, y, materialize_builtin + if self.udf_called is not None: + loc = locals() + builtin_vars = self.udf_called.builtin_used + for b in self.udf_called.builtin_var.all: + exec(f'loc["{b}"] = lambda: "{{{b}()}}"') + if builtin_vars: + if type(materialize_builtin) is dict: + for b in builtin_vars: + exec(f'loc["{b}"] = lambda: "{materialize_builtin[b]}"') + elif self.is_recursive_call_inudf: + for b in builtin_vars: + exec(f'loc["{b}"] = lambda : "{b}"') + + x = self.c_code if c_code is None else c_code + if decltypestr: + return eval('f\'' + self.udf_decltypecall + '\'') + return eval('f\'' + self.sql + '\'') + if self.is_recursive_call_inudf or (self.need_decltypestr and self.is_udfexpr) or gettype: + return call + else: + return call(_decltypestr) + + @property + def is_root(self): + return self.root == self + + +# For UDFs: first check if agg variable is used as vector +# if not, then check if its length is used +class fastscan(expr): + name = 'fastscan' + + def init(self, _): + self.vec_vars = set() + self.requested_lens = set() + super().init(self, _) + + def process(self, key : str): + segs = key.split('.') + var_table = self.root.udf.var_table + if segs[0] in var_table and len(segs) > 1: + if segs[1] == 'vec': + self.vec_vars.add(segs[0]) + elif segs[1] == 'len': + self.requested_lens.add(segs[0]) + + def produce(self, node): + from engine.utils import enlist + if type(node) is dict: + for key, val in node.items(): + if key in self.operators: + val = enlist(val) + elif self.is_udfexpr: + self.process(key) + [fastscan(self, v, c_code = self.c_code) for v in val] + + elif type(node) is str: + self.process(node) + + +class getrefs(expr): + name = 'getrefs' + + def init(self, _): + self.datasource.rec = set() + self.rec = None + + def produce(self, node): + from engine.utils import enlist + if type(node) is dict: + for key, val in node.items(): + if key in self.operators: + val = enlist(val) + [getrefs(self, v, c_code = self.c_code) for v in val] + + elif type(node) is str: + self.datasource.parse_col_names(node) + + def consume(self, _): + if self.root == self: + self.rec = self.datasource.rec self.datasource.rec = None \ No newline at end of file diff --git a/reconstruct/storage.py b/reconstruct/storage.py index 6c02db0..6ca95be 100644 --- a/reconstruct/storage.py +++ b/reconstruct/storage.py @@ -1,203 +1,203 @@ -from engine.types import * -from engine.utils import enlist -from typing import List, Dict, Set - -class ColRef: - def __init__(self, _ty, cobj, table:'TableInfo', name, id, compound = False, _ty_args = None): - self.type : Types = AnyT - if type(_ty) is str: - self.type = builtin_types[_ty.lower()] - if _ty_args: - self.type = self.type(enlist(_ty_args)) - elif type(_ty) is Types: - self.type = _ty - self.cobj = cobj - self.table = table - self.name = name - self.alias = set() - self.id = id # position in table - self.compound = compound # compound field (list as a field) - # e.g. order by, group by, filter by expressions - - self.__arr__ = (_ty, cobj, table, name, id) - def __getitem__(self, key): - if type(key) is str: - return getattr(self, key) - else: - return self.__arr__[key] - - def __setitem__(self, key, value): - self.__arr__[key] = value - -class TableInfo: - def __init__(self, table_name, cols, cxt:'Context'): - # statics - self.table_name : str = table_name - self.contextname_cpp : str = '' - self.alias : Set[str] = set([table_name]) - self.columns_byname : Dict[str, ColRef] = dict() # column_name, type - self.columns : List[ColRef] = [] - self.cxt = cxt - # keep track of temp vars - self.rec = None - self.add_cols(cols) - # runtime - self.order = [] # assumptions - - cxt.tables_byname[self.table_name] = self # construct reverse map - - def add_cols(self, cols, new = True): - for i, c in enumerate(cols): - self.add_col(c, new, i) - - def add_col(self, c, new = True, i = 0): - _ty = c['type'] - _ty_args = None - if type(_ty) is dict: - _ty_val = list(_ty.keys())[0] - _ty_args = _ty[_ty_val] - _ty = _ty_val - if new: - col_object = ColRef(_ty, c, self, c['name'], len(self.columns), _ty_args = _ty_args) - else: - col_object = c - c.table = self - self.columns_byname[c['name']] = col_object - self.columns.append(col_object) - - def add_alias(self, alias): - if alias in self.cxt.tables_byname.keys(): - print("Error: table alias already exists") - return - self.cxt.tables_byname[alias] = self - self.alias.add(alias) - - def parse_col_names(self, colExpr) -> ColRef: - parsedColExpr = colExpr.split('.') - if len(parsedColExpr) <= 1: - col = self.columns_byname[colExpr] - if type(self.rec) is set: - self.rec.add(col) - return col - else: - datasource = self.cxt.tables_byname[parsedColExpr[0]] - if datasource is None: - raise ValueError(f'Table name/alias not defined{parsedColExpr[0]}') - else: - return datasource.parse_col_names(parsedColExpr[1]) - - -class Context: - def new(self): - self.headers = set(['\"./server/libaquery.h\"', - '\"./server/monetdb_conn.h\"']) - - self.ccode = '' - self.sql = '' - self.finalized = False - self.udf = None - self.scans = [] - self.procs = [] - self.queries = [] - self.module_init_loc = 0 - - def __init__(self): - self.tables_byname = dict() - self.col_byname = dict() - self.tables = [] - self.cols = [] - self.datasource = None - self.module_stubs = '' - self.module_map = {} - self.udf_map = dict() - self.udf_agg_map = dict() - self.use_columnstore = False - self.print = print - self.has_dll = False - self.dialect = 'MonetDB' - self.is_msvc = False - self.have_hge = False - self.Error = lambda *args: print(*args) - self.Info = lambda *_: None - - def emit(self, sql:str): - self.sql += sql + ' ' - def emitc(self, c:str): - self.ccode += c + '\n' - def add_table(self, table_name, cols): - tbl = TableInfo(table_name, cols, self) - self.tables.append(tbl) - return tbl - def remove_scan(self, scan, str_scan): - self.emitc(str_scan) - self.scans.remove(scan) - - function_deco = '__AQEXPORT__(int) ' - function_head = ('(Context* cxt) {\n' + - '\tusing namespace std;\n' + - '\tusing namespace types;\n' + - '\tauto server = static_cast(cxt->alt_server);\n') - - udf_head = ('#pragma once\n' - '#include \"./server/libaquery.h\"\n' - '#include \"./server/aggregations.h\"\n\n' - ) - - def get_init_func(self): - if not self.module_map: - return '' - ret = '__AQEXPORT__(void) __builtin_init_user_module(Context* cxt){\n' - for fname in self.module_map.keys(): - ret += f'{fname} = (decltype({fname}))(cxt->get_module_function("{fname}"));\n' - self.queries.insert(self.module_init_loc, 'P__builtin_init_user_module') - return ret + '}\n' - - def sql_begin(self): - self.sql = '' - - def sql_end(self): - # eliminate empty queries - s = self.sql.strip() - while(s and s[-1] == ';'): - s = s[:-1].strip() - if s and s.lower() != 'select': - self.queries.append('Q' + self.sql) - self.sql = '' - - def postproc_begin(self, proc_name: str): - self.ccode = self.function_deco + proc_name + self.function_head - - def postproc_end(self, proc_name: str): - self.procs.append(self.ccode + 'return 0;\n}') - self.ccode = '' - self.queries.append('P' + proc_name) - - def finalize_udf(self): - if self.udf is not None: - return (Context.udf_head - + self.module_stubs - + self.get_init_func() - + self.udf - ) - else: - return None - - def finalize(self): - from aquery_config import build_driver, os_platform - if not self.finalized: - headers = '' - if build_driver == 'MSBuild': - headers ='#include \"./server/pch.hpp\"\n' - - for h in self.headers: - if h[0] != '"': - headers += '#include <' + h + '>\n' - else: - headers += '#include ' + h + '\n' - if os_platform == 'win': - headers += '#undef max\n' - headers += '#undef min\n' - - self.ccode = headers + '\n'.join(self.procs) - self.headers = set() - return self.ccode +from engine.types import * +from engine.utils import enlist +from typing import List, Dict, Set + +class ColRef: + def __init__(self, _ty, cobj, table:'TableInfo', name, id, compound = False, _ty_args = None): + self.type : Types = AnyT + if type(_ty) is str: + self.type = builtin_types[_ty.lower()] + if _ty_args: + self.type = self.type(enlist(_ty_args)) + elif type(_ty) is Types: + self.type = _ty + self.cobj = cobj + self.table = table + self.name = name + self.alias = set() + self.id = id # position in table + self.compound = compound # compound field (list as a field) + # e.g. order by, group by, filter by expressions + + self.__arr__ = (_ty, cobj, table, name, id) + def __getitem__(self, key): + if type(key) is str: + return getattr(self, key) + else: + return self.__arr__[key] + + def __setitem__(self, key, value): + self.__arr__[key] = value + +class TableInfo: + def __init__(self, table_name, cols, cxt:'Context'): + # statics + self.table_name : str = table_name + self.contextname_cpp : str = '' + self.alias : Set[str] = set([table_name]) + self.columns_byname : Dict[str, ColRef] = dict() # column_name, type + self.columns : List[ColRef] = [] + self.cxt = cxt + # keep track of temp vars + self.rec = None + self.add_cols(cols) + # runtime + self.order = [] # assumptions + + cxt.tables_byname[self.table_name] = self # construct reverse map + + def add_cols(self, cols, new = True): + for i, c in enumerate(cols): + self.add_col(c, new, i) + + def add_col(self, c, new = True, i = 0): + _ty = c['type'] + _ty_args = None + if type(_ty) is dict: + _ty_val = list(_ty.keys())[0] + _ty_args = _ty[_ty_val] + _ty = _ty_val + if new: + col_object = ColRef(_ty, c, self, c['name'], len(self.columns), _ty_args = _ty_args) + else: + col_object = c + c.table = self + self.columns_byname[c['name']] = col_object + self.columns.append(col_object) + + def add_alias(self, alias): + if alias in self.cxt.tables_byname.keys(): + print("Error: table alias already exists") + return + self.cxt.tables_byname[alias] = self + self.alias.add(alias) + + def parse_col_names(self, colExpr) -> ColRef: + parsedColExpr = colExpr.split('.') + if len(parsedColExpr) <= 1: + col = self.columns_byname[colExpr] + if type(self.rec) is set: + self.rec.add(col) + return col + else: + datasource = self.cxt.tables_byname[parsedColExpr[0]] + if datasource is None: + raise ValueError(f'Table name/alias not defined{parsedColExpr[0]}') + else: + return datasource.parse_col_names(parsedColExpr[1]) + + +class Context: + def new(self): + self.headers = set(['\"./server/libaquery.h\"', + '\"./server/monetdb_conn.h\"']) + + self.ccode = '' + self.sql = '' + self.finalized = False + self.udf = None + self.scans = [] + self.procs = [] + self.queries = [] + self.module_init_loc = 0 + + def __init__(self): + self.tables_byname = dict() + self.col_byname = dict() + self.tables = [] + self.cols = [] + self.datasource = None + self.module_stubs = '' + self.module_map = {} + self.udf_map = dict() + self.udf_agg_map = dict() + self.use_columnstore = False + self.print = print + self.has_dll = False + self.dialect = 'MonetDB' + self.is_msvc = False + self.have_hge = False + self.Error = lambda *args: print(*args) + self.Info = lambda *_: None + + def emit(self, sql:str): + self.sql += sql + ' ' + def emitc(self, c:str): + self.ccode += c + '\n' + def add_table(self, table_name, cols): + tbl = TableInfo(table_name, cols, self) + self.tables.append(tbl) + return tbl + def remove_scan(self, scan, str_scan): + self.emitc(str_scan) + self.scans.remove(scan) + + function_deco = '__AQEXPORT__(int) ' + function_head = ('(Context* cxt) {\n' + + '\tusing namespace std;\n' + + '\tusing namespace types;\n' + + '\tauto server = static_cast(cxt->alt_server);\n') + + udf_head = ('#pragma once\n' + '#include \"./server/libaquery.h\"\n' + '#include \"./server/aggregations.h\"\n\n' + ) + + def get_init_func(self): + if not self.module_map: + return '' + ret = '__AQEXPORT__(void) __builtin_init_user_module(Context* cxt){\n' + for fname in self.module_map.keys(): + ret += f'{fname} = (decltype({fname}))(cxt->get_module_function("{fname}"));\n' + self.queries.insert(self.module_init_loc, 'P__builtin_init_user_module') + return ret + '}\n' + + def sql_begin(self): + self.sql = '' + + def sql_end(self): + # eliminate empty queries + s = self.sql.strip() + while(s and s[-1] == ';'): + s = s[:-1].strip() + if s and s.lower() != 'select': + self.queries.append('Q' + self.sql) + self.sql = '' + + def postproc_begin(self, proc_name: str): + self.ccode = self.function_deco + proc_name + self.function_head + + def postproc_end(self, proc_name: str): + self.procs.append(self.ccode + 'return 0;\n}') + self.ccode = '' + self.queries.append('P' + proc_name) + + def finalize_udf(self): + if self.udf is not None: + return (Context.udf_head + + self.module_stubs + + self.get_init_func() + + self.udf + ) + else: + return None + + def finalize(self): + from aquery_config import build_driver, os_platform + if not self.finalized: + headers = '' + if build_driver == 'MSBuild': + headers ='#include \"./server/pch.hpp\"\n' + + for h in self.headers: + if h[0] != '"': + headers += '#include <' + h + '>\n' + else: + headers += '#include ' + h + '\n' + if os_platform == 'win': + headers += '#undef max\n' + headers += '#undef min\n' + + self.ccode = headers + '\n'.join(self.procs) + self.headers = set() + return self.ccode diff --git a/requirements.txt b/requirements.txt index 9218a72..a1c6694 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,8 +1,9 @@ -mo-future==6.2.21303 -mo-dots==9.173.22126 -mo-parsing==8.183.22158 -mo-imports==7.169.22121 -dataclasses; python_version < '3.7' -readline; sys_platform == 'linux' -numpy - +mo-future==6.2.21303 +mo-dots==9.173.22126 +mo-parsing==8.183.22158 +mo-imports==7.169.22121 +dataclasses; python_version < '3.7' +readline; sys_platform == 'linux' +vswhere; sys_platform == 'win32' +numpy + diff --git a/sample_ast.json b/sample_ast.json index 28709fe..068c35d 100644 --- a/sample_ast.json +++ b/sample_ast.json @@ -1,222 +1,222 @@ -{ - "stmts": { - "udf": { - "fname": "covariances2", - "params": ["x", "y", "w"], - "stmt": [{ - "assignment": { - "var": "xmeans", - "op": ":=", - "expr": 0.0 - } - }, { - "assignment": { - "var": "ymeans", - "op": ":=", - "expr": 0.0 - } - }, { - "assignment": { - "var": "l", - "op": ":=", - "expr": "_builtin_len" - } - }, { - "if": { - "cond": { - "gt": ["w", "l"] - }, - "assignment": { - "var": "w", - "op": ":=", - "expr": "l" - }, - "elif": [{ - "cond": { - "gt": ["w", { - "add": ["l", 2] - }] - }, - "stmt": [{ - "assignment": { - "var": "l", - "op": ":=", - "expr": 3 - } - }, { - "assignment": { - "var": "w", - "op": ":=", - "expr": 4 - } - }] - }, { - "cond": { - "lt": ["w", 99] - }, - "stmt": { - "assignment": { - "var": "l", - "op": ":=", - "expr": 8 - } - } - }, { - "cond": { - "lt": ["w", 999] - }, - "assignment": { - "var": "w", - "op": ":=", - "expr": 6 - } - }], - "else": { - "assignment": { - "var": "l", - "op": ":=", - "expr": { - "div": ["l", 2] - } - } - } - } - }, { - "for": { - "defs": { - "var": ["i", "j"], - "op": [":=", ":="], - "expr": [0, 0] - }, - "cond": { - "lt": ["i", "w"] - }, - "tail": { - "var": "i", - "op": ":=", - "expr": { - "add": ["i", 1] - } - }, - "stmt": [{ - "assignment": { - "var": "xmeans", - "op": "+=", - "expr": { - "get": ["x", "i"] - } - } - }, { - "assignment": { - "var": "ymeans", - "op": "+=", - "expr": { - "get": ["y", "i"] - } - } - }, { - "assignment": { - "var": { - "get": ["_builtin_ret", "i"] - }, - "op": ":=", - "expr": { - "avg": { - "mul": [{ - "sub": [{ - "x": [{ - "sub": ["l", "w"] - }, "l"] - }, "xmeans"] - }, { - "sub": [{ - "y": [{ - "sub": ["l", "w"] - }, "l"] - }, "ymeans"] - }] - } - } - } - }] - } - }, { - "for": { - "defs": { - "var": "i", - "op": ":=", - "expr": 0 - }, - "cond": { - "lt": ["i", "l"] - }, - "tail": { - "var": "i", - "op": "+=", - "expr": 1 - }, - "stmt": [{ - "assignment": { - "var": "xmeans", - "op": "+=", - "expr": { - "div": [{ - "sub": [{ - "get": ["x", "i"] - }, { - "get": ["x", { - "sub": ["i", "w"] - }] - }] - }, "w"] - } - } - }, { - "assignment": { - "var": "ymeans", - "op": "+=", - "expr": { - "div": [{ - "sub": [{ - "get": ["y", "i"] - }, { - "get": ["y", { - "sub": ["i", "w"] - }] - }] - }, "w"] - } - } - }, { - "assignment": { - "var": { - "get": ["_builtin_ret", "i"] - }, - "op": ":=", - "expr": { - "avg": { - "mul": [{ - "sub": [{ - "x": [{ - "sub": ["l", "w"] - }, "l"] - }, "xmeans"] - }, { - "sub": [{ - "y": [{ - "sub": ["l", "w"] - }, "l"] - }, "ymeans"] - }] - } - } - } - }] - } - }], - "ret": { - "null": {} - } - } - } +{ + "stmts": { + "udf": { + "fname": "covariances2", + "params": ["x", "y", "w"], + "stmt": [{ + "assignment": { + "var": "xmeans", + "op": ":=", + "expr": 0.0 + } + }, { + "assignment": { + "var": "ymeans", + "op": ":=", + "expr": 0.0 + } + }, { + "assignment": { + "var": "l", + "op": ":=", + "expr": "_builtin_len" + } + }, { + "if": { + "cond": { + "gt": ["w", "l"] + }, + "assignment": { + "var": "w", + "op": ":=", + "expr": "l" + }, + "elif": [{ + "cond": { + "gt": ["w", { + "add": ["l", 2] + }] + }, + "stmt": [{ + "assignment": { + "var": "l", + "op": ":=", + "expr": 3 + } + }, { + "assignment": { + "var": "w", + "op": ":=", + "expr": 4 + } + }] + }, { + "cond": { + "lt": ["w", 99] + }, + "stmt": { + "assignment": { + "var": "l", + "op": ":=", + "expr": 8 + } + } + }, { + "cond": { + "lt": ["w", 999] + }, + "assignment": { + "var": "w", + "op": ":=", + "expr": 6 + } + }], + "else": { + "assignment": { + "var": "l", + "op": ":=", + "expr": { + "div": ["l", 2] + } + } + } + } + }, { + "for": { + "defs": { + "var": ["i", "j"], + "op": [":=", ":="], + "expr": [0, 0] + }, + "cond": { + "lt": ["i", "w"] + }, + "tail": { + "var": "i", + "op": ":=", + "expr": { + "add": ["i", 1] + } + }, + "stmt": [{ + "assignment": { + "var": "xmeans", + "op": "+=", + "expr": { + "get": ["x", "i"] + } + } + }, { + "assignment": { + "var": "ymeans", + "op": "+=", + "expr": { + "get": ["y", "i"] + } + } + }, { + "assignment": { + "var": { + "get": ["_builtin_ret", "i"] + }, + "op": ":=", + "expr": { + "avg": { + "mul": [{ + "sub": [{ + "x": [{ + "sub": ["l", "w"] + }, "l"] + }, "xmeans"] + }, { + "sub": [{ + "y": [{ + "sub": ["l", "w"] + }, "l"] + }, "ymeans"] + }] + } + } + } + }] + } + }, { + "for": { + "defs": { + "var": "i", + "op": ":=", + "expr": 0 + }, + "cond": { + "lt": ["i", "l"] + }, + "tail": { + "var": "i", + "op": "+=", + "expr": 1 + }, + "stmt": [{ + "assignment": { + "var": "xmeans", + "op": "+=", + "expr": { + "div": [{ + "sub": [{ + "get": ["x", "i"] + }, { + "get": ["x", { + "sub": ["i", "w"] + }] + }] + }, "w"] + } + } + }, { + "assignment": { + "var": "ymeans", + "op": "+=", + "expr": { + "div": [{ + "sub": [{ + "get": ["y", "i"] + }, { + "get": ["y", { + "sub": ["i", "w"] + }] + }] + }, "w"] + } + } + }, { + "assignment": { + "var": { + "get": ["_builtin_ret", "i"] + }, + "op": ":=", + "expr": { + "avg": { + "mul": [{ + "sub": [{ + "x": [{ + "sub": ["l", "w"] + }, "l"] + }, "xmeans"] + }, { + "sub": [{ + "y": [{ + "sub": ["l", "w"] + }, "l"] + }, "ymeans"] + }] + } + } + } + }] + } + }], + "ret": { + "null": {} + } + } + } } \ No newline at end of file diff --git a/sdk/aquery.h b/sdk/aquery.h index 87c860b..3d855ce 100644 --- a/sdk/aquery.h +++ b/sdk/aquery.h @@ -1,89 +1,89 @@ -#ifndef _AQUERY_H -#define _AQUERY_H - -enum Log_level { - LOG_INFO, - LOG_ERROR, - LOG_SILENT -}; - -enum Backend_Type { - BACKEND_AQuery, - BACKEND_MonetDB, - BACKEND_MariaDB -}; - -struct Config{ - int running, new_query, server_mode, - backend_type, has_dll, n_buffers; - int buffer_sizes[]; -}; - -struct Session{ - struct Statistic{ - unsigned long long total_active; - unsigned long long cnt_object; - unsigned long long total_alloc; - }; - void* memory_map; -}; - -struct Context{ - typedef int (*printf_type) (const char *format, ...); - void* module_function_maps = 0; - Config* cfg; - - int n_buffers, *sz_bufs; - void **buffers; - - void* alt_server; - Log_level log_level = LOG_INFO; - - Session current; - - - template - void log(Types... args) { - if (log_level == LOG_INFO) - print(args...); - } - template - void err(Types... args) { - if (log_level <= LOG_ERROR) - print(args...); - } - void init_session(); - void end_session(); - void* get_module_function(const char*); - char remainder[]; -}; - -#ifdef _WIN32 -#define __DLLEXPORT__ __declspec(dllexport) __stdcall -#else -#define __DLLEXPORT__ -#endif - -#define __AQEXPORT__(_Ty) extern "C" _Ty __DLLEXPORT__ - -typedef void (*deallocator_t) (void*); - -extern void* Aalloc(unsigned long long sz); -extern void Afree(void * mem); -extern void register_memory(void* ptr, deallocator_t deallocator); - -__AQEXPORT__(void) init_session(Context* cxt); - -#define __AQ_NO_SESSION__ __AQEXPORT__(void) init_session(Context*) {} -void* memcpy(void*, void*, unsigned long long); -struct ColRef_storage { - void* container; - unsigned int capacity, size; - const char* name; - int ty; // what if enum is not int? - template class VT> - ColRef_storage(const VT& vt) { - memcpy(this, &vt, sizeof(ColRef_storage)); - } -}; +#ifndef _AQUERY_H +#define _AQUERY_H + +enum Log_level { + LOG_INFO, + LOG_ERROR, + LOG_SILENT +}; + +enum Backend_Type { + BACKEND_AQuery, + BACKEND_MonetDB, + BACKEND_MariaDB +}; + +struct Config{ + int running, new_query, server_mode, + backend_type, has_dll, n_buffers; + int buffer_sizes[]; +}; + +struct Session{ + struct Statistic{ + unsigned long long total_active; + unsigned long long cnt_object; + unsigned long long total_alloc; + }; + void* memory_map; +}; + +struct Context{ + typedef int (*printf_type) (const char *format, ...); + void* module_function_maps = 0; + Config* cfg; + + int n_buffers, *sz_bufs; + void **buffers; + + void* alt_server; + Log_level log_level = LOG_INFO; + + Session current; + + + template + void log(Types... args) { + if (log_level == LOG_INFO) + print(args...); + } + template + void err(Types... args) { + if (log_level <= LOG_ERROR) + print(args...); + } + void init_session(); + void end_session(); + void* get_module_function(const char*); + char remainder[]; +}; + +#ifdef _WIN32 +#define __DLLEXPORT__ __declspec(dllexport) __stdcall +#else +#define __DLLEXPORT__ +#endif + +#define __AQEXPORT__(_Ty) extern "C" _Ty __DLLEXPORT__ + +typedef void (*deallocator_t) (void*); + +extern void* Aalloc(unsigned long long sz); +extern void Afree(void * mem); +extern void register_memory(void* ptr, deallocator_t deallocator); + +__AQEXPORT__(void) init_session(Context* cxt); + +#define __AQ_NO_SESSION__ __AQEXPORT__(void) init_session(Context*) {} +void* memcpy(void*, void*, unsigned long long); +struct ColRef_storage { + void* container; + unsigned int capacity, size; + const char* name; + int ty; // what if enum is not int? + template class VT> + ColRef_storage(const VT& vt) { + memcpy(this, &vt, sizeof(ColRef_storage)); + } +}; #endif \ No newline at end of file diff --git a/sdk/aquery_mem.cpp b/sdk/aquery_mem.cpp index ebd9690..e0c3075 100644 --- a/sdk/aquery_mem.cpp +++ b/sdk/aquery_mem.cpp @@ -1,31 +1,31 @@ -#include "aquery.h" - -#include -#include -#include -#include - -Session* session; - - -void* Aalloc(size_t sz, deallocator_t deallocator){ - void* mem = malloc(sz); - auto memmap = (std::unordered_map*) session->memory_map; - memmap->operator[](mem) = deallocator; - return mem; -} - -void Afree(void* mem){ - auto memmap = (std::unordered_map*) session->memory_map; - memmap->operator[](mem)(mem); - memmap->erase(mem); -} - -void register_memory(void* ptr, deallocator_t deallocator){ - auto memmap = (std::unordered_map*) session->memory_map; - memmap->operator[](ptr) = deallocator; -} - -__AQEXPORT__(void) init_session(Context* cxt){ - session = &cxt->current; -} +#include "aquery.h" + +#include +#include +#include +#include + +Session* session; + + +void* Aalloc(size_t sz, deallocator_t deallocator){ + void* mem = malloc(sz); + auto memmap = (std::unordered_map*) session->memory_map; + memmap->operator[](mem) = deallocator; + return mem; +} + +void Afree(void* mem){ + auto memmap = (std::unordered_map*) session->memory_map; + memmap->operator[](mem)(mem); + memmap->erase(mem); +} + +void register_memory(void* ptr, deallocator_t deallocator){ + auto memmap = (std::unordered_map*) session->memory_map; + memmap->operator[](ptr) = deallocator; +} + +__AQEXPORT__(void) init_session(Context* cxt){ + session = &cxt->current; +} diff --git a/server/aggregations.h b/server/aggregations.h index 863585b..fa0ea25 100644 --- a/server/aggregations.h +++ b/server/aggregations.h @@ -1,171 +1,171 @@ -#pragma once -#include "types.h" -#include -#include -#include -#include -#undef max -#undef min -template class VT> -size_t count(const VT& v) { - return v.size; -} - -template -constexpr static inline size_t count(const T&) { return 1; } - -// TODO: Specializations for dt/str/none -template class VT> -types::GetLongType sum(const VT& v) { - types::GetLongType ret = 0; - for (const auto& _v : v) - ret += _v; - return ret; -} -template class VT> -types::GetFPType avg(const VT& v) { - return static_cast>( - sum(v) / static_cast(v.size)); -} - -template class VT> -VT sqrt(const VT& v) { - VT ret {v.size}; - for (uint32_t i = 0; i < v.size; ++i){ - ret[i] = sqrt(v[i]); - } - return ret; -} - -template class VT> -T max(const VT& v) { - T max_v = std::numeric_limits::min(); - for (const auto& _v : v) - max_v = max_v > _v ? max_v : _v; - return max_v; -} -template class VT> -T min(const VT& v) { - T min_v = std::numeric_limits::max(); - for (const auto& _v : v) - min_v = min_v < _v ? min_v : _v; - return min_v; -} -template class VT> -decayed_t mins(const VT& arr) { - const uint32_t& len = arr.size; - std::deque> cache; - decayed_t ret(len); - T min = std::numeric_limits::max(); - for (int i = 0; i < len; ++i) { - if (arr[i] < min) - min = arr[i]; - ret[i] = min; - } - return ret; -} -template class VT> -decayed_t maxs(const VT& arr) { - const uint32_t& len = arr.size; - decayed_t ret(len); - T max = std::numeric_limits::min(); - for (int i = 0; i < len; ++i) { - if (arr[i] > max) - max = arr[i]; - ret[i] = max; - } - return ret; -} - -template class VT> -decayed_t minw(uint32_t w, const VT& arr) { - const uint32_t& len = arr.size; - decayed_t ret{len}; - std::deque> cache; - for (int i = 0; i < len; ++i) { - if (!cache.empty() && cache.front().second == i - w) cache.pop_front(); - while (!cache.empty() && cache.back().first > arr[i]) cache.pop_back(); - cache.push_back({ arr[i], i }); - ret[i] = cache.front().first; - } - return ret; -} - -template class VT> -decayed_t maxw(uint32_t w, const VT& arr) { - const uint32_t& len = arr.size; - decayed_t ret(len); - std::deque> cache; - for (int i = 0; i < len; ++i) { - if (!cache.empty() && cache.front().second == i - w) cache.pop_front(); - while (!cache.empty() && cache.back().first > arr[i]) cache.pop_back(); - cache.push_back({ arr[i], i }); - arr[i] = cache.front().first; - } - return ret; -} - -template class VT> -decayed_t> sums(const VT& arr) { - const uint32_t& len = arr.size; - decayed_t> ret(len); - uint32_t i = 0; - if(len) ret[i++] = arr[0]; - for (; i < len; ++i) - ret[i] = ret[i-1] + arr[i]; - return ret; -} -template class VT> -decayed_t> avgs(const VT& arr) { - const uint32_t& len = arr.size; - typedef types::GetFPType FPType; - decayed_t ret(len); - uint32_t i = 0; - types::GetLongType s; - if(len) s = ret[i++] = arr[0]; - for (; i < len; ++i) - ret[i] = (s+=arr[i])/(FPType)(i+1); - return ret; -} -template class VT> -decayed_t> sumw(uint32_t w, const VT& arr) { - const uint32_t& len = arr.size; - decayed_t> ret(len); - uint32_t i = 0; - w = w > len ? len : w; - if(len) ret[i++] = arr[0]; - for (; i < w; ++i) - ret[i] = ret[i-1] + arr[i]; - for (; i < len; ++i) - ret[i] = ret[i-1] + arr[i] - arr[i-w]; - return ret; -} -template class VT> -decayed_t>> avgw(uint32_t w, const VT& arr) { - typedef types::GetFPType> FPType; - const uint32_t& len = arr.size; - decayed_t ret(len); - uint32_t i = 0; - types::GetLongType s{}; - w = w > len ? len : w; - if(len) s = ret[i++] = arr[0]; - for (; i < w; ++i) - ret[i] = (s += arr[i])/(FPType)(i+1); - for (; i < len; ++i) - ret[i] = ret[i-1] + (arr[i] - arr[i-w])/(FPType)w; - return ret; -} - -template constexpr inline T count(const T& v) { return 1; } -template constexpr inline T max(const T& v) { return v; } -template constexpr inline T min(const T& v) { return v; } -template constexpr inline T avg(const T& v) { return v; } -template constexpr inline T sum(const T& v) { return v; } -template constexpr inline T maxw(uint32_t, const T& v) { return v; } -template constexpr inline T minw(uint32_t, const T& v) { return v; } -template constexpr inline T avgw(uint32_t, const T& v) { return v; } -template constexpr inline T sumw(uint32_t, const T& v) { return v; } -template constexpr inline T maxs(const T& v) { return v; } -template constexpr inline T mins(const T& v) { return v; } -template constexpr inline T avgs(const T& v) { return v; } -template constexpr inline T sums(const T& v) { return v; } +#pragma once +#include "types.h" +#include +#include +#include +#include +#undef max +#undef min +template class VT> +size_t count(const VT& v) { + return v.size; +} + +template +constexpr static inline size_t count(const T&) { return 1; } + +// TODO: Specializations for dt/str/none +template class VT> +types::GetLongType sum(const VT& v) { + types::GetLongType ret = 0; + for (const auto& _v : v) + ret += _v; + return ret; +} +template class VT> +types::GetFPType avg(const VT& v) { + return static_cast>( + sum(v) / static_cast(v.size)); +} + +template class VT> +VT sqrt(const VT& v) { + VT ret {v.size}; + for (uint32_t i = 0; i < v.size; ++i){ + ret[i] = sqrt(v[i]); + } + return ret; +} + +template class VT> +T max(const VT& v) { + T max_v = std::numeric_limits::min(); + for (const auto& _v : v) + max_v = max_v > _v ? max_v : _v; + return max_v; +} +template class VT> +T min(const VT& v) { + T min_v = std::numeric_limits::max(); + for (const auto& _v : v) + min_v = min_v < _v ? min_v : _v; + return min_v; +} +template class VT> +decayed_t mins(const VT& arr) { + const uint32_t& len = arr.size; + std::deque> cache; + decayed_t ret(len); + T min = std::numeric_limits::max(); + for (int i = 0; i < len; ++i) { + if (arr[i] < min) + min = arr[i]; + ret[i] = min; + } + return ret; +} +template class VT> +decayed_t maxs(const VT& arr) { + const uint32_t& len = arr.size; + decayed_t ret(len); + T max = std::numeric_limits::min(); + for (int i = 0; i < len; ++i) { + if (arr[i] > max) + max = arr[i]; + ret[i] = max; + } + return ret; +} + +template class VT> +decayed_t minw(uint32_t w, const VT& arr) { + const uint32_t& len = arr.size; + decayed_t ret{len}; + std::deque> cache; + for (int i = 0; i < len; ++i) { + if (!cache.empty() && cache.front().second == i - w) cache.pop_front(); + while (!cache.empty() && cache.back().first > arr[i]) cache.pop_back(); + cache.push_back({ arr[i], i }); + ret[i] = cache.front().first; + } + return ret; +} + +template class VT> +decayed_t maxw(uint32_t w, const VT& arr) { + const uint32_t& len = arr.size; + decayed_t ret(len); + std::deque> cache; + for (int i = 0; i < len; ++i) { + if (!cache.empty() && cache.front().second == i - w) cache.pop_front(); + while (!cache.empty() && cache.back().first > arr[i]) cache.pop_back(); + cache.push_back({ arr[i], i }); + arr[i] = cache.front().first; + } + return ret; +} + +template class VT> +decayed_t> sums(const VT& arr) { + const uint32_t& len = arr.size; + decayed_t> ret(len); + uint32_t i = 0; + if(len) ret[i++] = arr[0]; + for (; i < len; ++i) + ret[i] = ret[i-1] + arr[i]; + return ret; +} +template class VT> +decayed_t> avgs(const VT& arr) { + const uint32_t& len = arr.size; + typedef types::GetFPType FPType; + decayed_t ret(len); + uint32_t i = 0; + types::GetLongType s; + if(len) s = ret[i++] = arr[0]; + for (; i < len; ++i) + ret[i] = (s+=arr[i])/(FPType)(i+1); + return ret; +} +template class VT> +decayed_t> sumw(uint32_t w, const VT& arr) { + const uint32_t& len = arr.size; + decayed_t> ret(len); + uint32_t i = 0; + w = w > len ? len : w; + if(len) ret[i++] = arr[0]; + for (; i < w; ++i) + ret[i] = ret[i-1] + arr[i]; + for (; i < len; ++i) + ret[i] = ret[i-1] + arr[i] - arr[i-w]; + return ret; +} +template class VT> +decayed_t>> avgw(uint32_t w, const VT& arr) { + typedef types::GetFPType> FPType; + const uint32_t& len = arr.size; + decayed_t ret(len); + uint32_t i = 0; + types::GetLongType s{}; + w = w > len ? len : w; + if(len) s = ret[i++] = arr[0]; + for (; i < w; ++i) + ret[i] = (s += arr[i])/(FPType)(i+1); + for (; i < len; ++i) + ret[i] = ret[i-1] + (arr[i] - arr[i-w])/(FPType)w; + return ret; +} + +template constexpr inline T count(const T& v) { return 1; } +template constexpr inline T max(const T& v) { return v; } +template constexpr inline T min(const T& v) { return v; } +template constexpr inline T avg(const T& v) { return v; } +template constexpr inline T sum(const T& v) { return v; } +template constexpr inline T maxw(uint32_t, const T& v) { return v; } +template constexpr inline T minw(uint32_t, const T& v) { return v; } +template constexpr inline T avgw(uint32_t, const T& v) { return v; } +template constexpr inline T sumw(uint32_t, const T& v) { return v; } +template constexpr inline T maxs(const T& v) { return v; } +template constexpr inline T mins(const T& v) { return v; } +template constexpr inline T avgs(const T& v) { return v; } +template constexpr inline T sums(const T& v) { return v; } diff --git a/server/cpp.hint b/server/cpp.hint index 90950e7..f076301 100644 --- a/server/cpp.hint +++ b/server/cpp.hint @@ -1,6 +1,6 @@ -// Hint files help the Visual Studio IDE interpret Visual C++ identifiers -// such as names of functions and macros. -// For more information see https://go.microsoft.com/fwlink/?linkid=865984 -#define Ops(o) template vector_type::type> operator##o(const vector_type& r) { [[likely]] if (r.size == size) { return add(r); } else if (r.size == 1 || size == 1) { const bool lr = size == 1; const uint32_t& _size = lr ? r.size : size; const auto& _container = lr ? r.container : container; const auto& scalar = *(lr ? container : r.container); vector_type::type> ret(_size); for (int i = 0; i < _size; ++i) ret[i] = _container[i] o scalar; return ret; } } -#define Op(o, x) template vector_type::type> inline x(const vector_type& r) { vector_type::type> ret(size); for (int i = 0; i < size; ++i) ret[i] = container[i] o r[i]; return ret; } -#define _Make_Ops(M) M(+, add) M(-, minus) M(*, multi) M(/, div) M(%, mod) M(&, and) M(|, or) M(^, xor) +// Hint files help the Visual Studio IDE interpret Visual C++ identifiers +// such as names of functions and macros. +// For more information see https://go.microsoft.com/fwlink/?linkid=865984 +#define Ops(o) template vector_type::type> operator##o(const vector_type& r) { [[likely]] if (r.size == size) { return add(r); } else if (r.size == 1 || size == 1) { const bool lr = size == 1; const uint32_t& _size = lr ? r.size : size; const auto& _container = lr ? r.container : container; const auto& scalar = *(lr ? container : r.container); vector_type::type> ret(_size); for (int i = 0; i < _size; ++i) ret[i] = _container[i] o scalar; return ret; } } +#define Op(o, x) template vector_type::type> inline x(const vector_type& r) { vector_type::type> ret(size); for (int i = 0; i < size; ++i) ret[i] = container[i] o r[i]; return ret; } +#define _Make_Ops(M) M(+, add) M(-, minus) M(*, multi) M(/, div) M(%, mod) M(&, and) M(|, or) M(^, xor) diff --git a/server/gc.hpp b/server/gc.hpp index 4c66060..6d233ca 100644 --- a/server/gc.hpp +++ b/server/gc.hpp @@ -1,53 +1,53 @@ -#pragma once -#include -#include -#include -#include -class GC { - template - using vector = vector_type; - template - using tuple = std::tuple; - size_t current_size, max_size, interval, forced_clean; - bool running, alive; -// ptr, dealloc, ref, sz - vector> q; - std::thread handle; - void gc() - { - - } - void reg(void* v, uint32_t ref, uint32_t sz, - void(*f)(void*) = [](void* v) {free (v); }) { - current_size += sz; - if (current_size > max_size) - gc(); - q.push_back({ v, f }); - } - void daemon() { - using namespace std::chrono; - while (alive) { - if (running) { - gc(); - std::this_thread::sleep_for(microseconds(interval)); - } - else { - std::this_thread::sleep_for(10ms); - } - } - } - void start_deamon() { - handle = std::thread(&daemon); - alive = true; - } - void terminate_daemon() { - running = false; - alive = false; - using namespace std::chrono; - - if (handle.joinable()) { - std::this_thread::sleep_for(microseconds(1000 + std::max(static_cast(10000), interval))); - handle.join(); - } - } +#pragma once +#include +#include +#include +#include +class GC { + template + using vector = vector_type; + template + using tuple = std::tuple; + size_t current_size, max_size, interval, forced_clean; + bool running, alive; +// ptr, dealloc, ref, sz + vector> q; + std::thread handle; + void gc() + { + + } + void reg(void* v, uint32_t ref, uint32_t sz, + void(*f)(void*) = [](void* v) {free (v); }) { + current_size += sz; + if (current_size > max_size) + gc(); + q.push_back({ v, f }); + } + void daemon() { + using namespace std::chrono; + while (alive) { + if (running) { + gc(); + std::this_thread::sleep_for(microseconds(interval)); + } + else { + std::this_thread::sleep_for(10ms); + } + } + } + void start_deamon() { + handle = std::thread(&daemon); + alive = true; + } + void terminate_daemon() { + running = false; + alive = false; + using namespace std::chrono; + + if (handle.joinable()) { + std::this_thread::sleep_for(microseconds(1000 + std::max(static_cast(10000), interval))); + handle.join(); + } + } }; \ No newline at end of file diff --git a/server/hasher.h b/server/hasher.h index 780d24c..bde7bf0 100644 --- a/server/hasher.h +++ b/server/hasher.h @@ -1,20 +1,20 @@ -#pragma once - -#include -#include -template -struct hasher { - template typename std::enable_if< i == sizeof...(Types), - size_t>::type hashi(const std::tuple& record) const { - return 0; - } - - template typename std::enable_if< i < sizeof ...(Types), - size_t>::type hashi(const std::tuple& record) const { - using current_type = typename std::decay>::type>::type; - return std::hash()(std::get(record)) ^ hashi(record); - } - size_t operator()(const std::tuple& record) const { - return hashi(record); - } -}; +#pragma once + +#include +#include +template +struct hasher { + template typename std::enable_if< i == sizeof...(Types), + size_t>::type hashi(const std::tuple& record) const { + return 0; + } + + template typename std::enable_if< i < sizeof ...(Types), + size_t>::type hashi(const std::tuple& record) const { + using current_type = typename std::decay>::type>::type; + return std::hash()(std::get(record)) ^ hashi(record); + } + size_t operator()(const std::tuple& record) const { + return hashi(record); + } +}; diff --git a/server/io.h b/server/io.h index 56019d5..348aeca 100644 --- a/server/io.h +++ b/server/io.h @@ -1,81 +1,81 @@ -#pragma once -#include "types.h" -#include -#include -#include -#include -template -std::string generate_printf_string(const char* sep = " ", const char* end = "\n") { - std::string str; - ((str += types::printf_str[types::Types>::getType()], str += sep), ...); - const auto trim = str.size() - strlen(sep); - if (trim > 0) - str.resize(trim); - str += end; - return str; -} - -template -inline decltype(auto) print_hook(const T& v){ - return v; -} - -template<> -inline decltype(auto) print_hook(const bool& v) { - return v? "true" : "false"; -} - -#ifdef __SIZEOF_INT128__ -constexpr struct __int128__struct{ - uint64_t low, high; - // constexpr bool operator==(__int128_t x) const{ - // return (x>>64) == high and (x&0xffffffffffffffffull) == low; - // } - bool operator==(__int128_t x) const{ - return *((const __int128_t*) this) == x; - } -}__int128_max_v = {0x0000000000000000ull, 0x8000000000000000ull}; - -inline const char* get_int128str(__int128_t v, char* buf){ - bool neg = false; - if (v < 0) { - if(__int128_max_v == v) - return "-170141183460469231731687303715884105728"; - v = -v; - neg = true; - } - do { - *--buf = v%10 + '0'; - v /= 10; - } while(v); - if (neg) *--buf = '-'; - return buf; -} - -inline const char* get_uint128str(__uint128_t v, char* buf){ - do { - *--buf = v%10 + '0'; - v /= 10; - } while(v); - return buf; -} -extern char* gbuf; - -void setgbuf(char* buf = 0); - -template<> -inline decltype(auto) print_hook<__int128_t>(const __int128_t& v) { - *(gbuf+=40) = 0; - return get_int128str(v, gbuf++); -} - -template<> -inline decltype(auto) print_hook<__uint128_t>(const __uint128_t& v) { - *(gbuf+=40) = 0; - return get_uint128str(v, gbuf++); -} - -#else - -#define setgbuf() -#endif +#pragma once +#include "types.h" +#include +#include +#include +#include +template +std::string generate_printf_string(const char* sep = " ", const char* end = "\n") { + std::string str; + ((str += types::printf_str[types::Types>::getType()], str += sep), ...); + const auto trim = str.size() - strlen(sep); + if (trim > 0) + str.resize(trim); + str += end; + return str; +} + +template +inline decltype(auto) print_hook(const T& v){ + return v; +} + +template<> +inline decltype(auto) print_hook(const bool& v) { + return v? "true" : "false"; +} + +#ifdef __SIZEOF_INT128__ +constexpr struct __int128__struct{ + uint64_t low, high; + // constexpr bool operator==(__int128_t x) const{ + // return (x>>64) == high and (x&0xffffffffffffffffull) == low; + // } + bool operator==(__int128_t x) const{ + return *((const __int128_t*) this) == x; + } +}__int128_max_v = {0x0000000000000000ull, 0x8000000000000000ull}; + +inline const char* get_int128str(__int128_t v, char* buf){ + bool neg = false; + if (v < 0) { + if(__int128_max_v == v) + return "-170141183460469231731687303715884105728"; + v = -v; + neg = true; + } + do { + *--buf = v%10 + '0'; + v /= 10; + } while(v); + if (neg) *--buf = '-'; + return buf; +} + +inline const char* get_uint128str(__uint128_t v, char* buf){ + do { + *--buf = v%10 + '0'; + v /= 10; + } while(v); + return buf; +} +extern char* gbuf; + +void setgbuf(char* buf = 0); + +template<> +inline decltype(auto) print_hook<__int128_t>(const __int128_t& v) { + *(gbuf+=40) = 0; + return get_int128str(v, gbuf++); +} + +template<> +inline decltype(auto) print_hook<__uint128_t>(const __uint128_t& v) { + *(gbuf+=40) = 0; + return get_uint128str(v, gbuf++); +} + +#else + +#define setgbuf() +#endif diff --git a/server/libaquery.h b/server/libaquery.h index 0475156..4f82403 100644 --- a/server/libaquery.h +++ b/server/libaquery.h @@ -1,79 +1,79 @@ -#ifndef _AQUERY_H -#define _AQUERY_H - -#include "table.h" -#include - -enum Log_level { - LOG_INFO, - LOG_ERROR, - LOG_SILENT -}; - -enum Backend_Type { - BACKEND_AQuery, - BACKEND_MonetDB, - BACKEND_MariaDB -}; -struct Config{ - int running, new_query, server_mode, - backend_type, has_dll, n_buffers; - int buffer_sizes[]; -}; - -struct Session{ - struct Statistic{ - size_t total_active; - size_t cnt_object; - size_t total_alloc; - } stats; - void* memory_map; -}; - -struct Context{ - typedef int (*printf_type) (const char *format, ...); - - void* module_function_maps = 0; - Config* cfg; - - int n_buffers, *sz_bufs; - void **buffers; - - void* alt_server = 0; - Log_level log_level = LOG_INFO; - - Session current; - -#ifdef THREADING - void* thread_pool; -#endif - printf_type print = printf; - Context(); - virtual ~Context(); - template - void log(Types... args) { - if (log_level == LOG_INFO) - print(args...); - } - template - void err(Types... args) { - if (log_level <= LOG_ERROR) - print(args...); - } - void init_session(); - void end_session(); - void* get_module_function(const char*); - std::unordered_map tables; - std::unordered_map cols; -}; - -#ifdef _WIN32 -#define __DLLEXPORT__ __declspec(dllexport) __stdcall -#else -#define __DLLEXPORT__ -#endif - -#define __AQEXPORT__(_Ty) extern "C" _Ty __DLLEXPORT__ -typedef void (*deallocator_t) (void*); - -#endif +#ifndef _AQUERY_H +#define _AQUERY_H + +#include "table.h" +#include + +enum Log_level { + LOG_INFO, + LOG_ERROR, + LOG_SILENT +}; + +enum Backend_Type { + BACKEND_AQuery, + BACKEND_MonetDB, + BACKEND_MariaDB +}; +struct Config{ + int running, new_query, server_mode, + backend_type, has_dll, n_buffers; + int buffer_sizes[]; +}; + +struct Session{ + struct Statistic{ + size_t total_active; + size_t cnt_object; + size_t total_alloc; + } stats; + void* memory_map; +}; + +struct Context{ + typedef int (*printf_type) (const char *format, ...); + + void* module_function_maps = 0; + Config* cfg; + + int n_buffers, *sz_bufs; + void **buffers; + + void* alt_server = 0; + Log_level log_level = LOG_INFO; + + Session current; + +#ifdef THREADING + void* thread_pool; +#endif + printf_type print = printf; + Context(); + virtual ~Context(); + template + void log(Types... args) { + if (log_level == LOG_INFO) + print(args...); + } + template + void err(Types... args) { + if (log_level <= LOG_ERROR) + print(args...); + } + void init_session(); + void end_session(); + void* get_module_function(const char*); + std::unordered_map tables; + std::unordered_map cols; +}; + +#ifdef _WIN32 +#define __DLLEXPORT__ __declspec(dllexport) __stdcall +#else +#define __DLLEXPORT__ +#endif + +#define __AQEXPORT__(_Ty) extern "C" _Ty __DLLEXPORT__ +typedef void (*deallocator_t) (void*); + +#endif diff --git a/server/priority_vector.hpp b/server/priority_vector.hpp index f1c7d6f..b8c4f77 100644 --- a/server/priority_vector.hpp +++ b/server/priority_vector.hpp @@ -1,19 +1,19 @@ -#pragma once -#include "vector_type.hpp" -#include -#include -template -class priority_vector : public vector_type { - const Comparator comp; -public: - priority_vector(Comparator comp = std::less{}) : - comp(comp), vector_type(0) {} - void emplace_back(T val) { - vector_type::emplace_back(val); - std::push_heap(container, container + size, comp); - } - void pop_back() { - std::pop_heap(container, container + size, comp); - --size; - } +#pragma once +#include "vector_type.hpp" +#include +#include +template +class priority_vector : public vector_type { + const Comparator comp; +public: + priority_vector(Comparator comp = std::less{}) : + comp(comp), vector_type(0) {} + void emplace_back(T val) { + vector_type::emplace_back(val); + std::push_heap(container, container + size, comp); + } + void pop_back() { + std::pop_heap(container, container + size, comp); + --size; + } }; \ No newline at end of file diff --git a/server/server.cpp b/server/server.cpp index bdaaefd..0f066e5 100644 --- a/server/server.cpp +++ b/server/server.cpp @@ -1,367 +1,369 @@ -#include "pch.hpp" - -#include "../csv.h" -#include -#include -#include - -#include "libaquery.h" -#include "monetdb_conn.h" -#ifdef THREADING -#include "threading.h" -#endif -#ifdef _WIN32 -#include "winhelper.h" -#else -#include -#include -#include -struct SharedMemory -{ - int hFileMap; - void* pData; - SharedMemory(const char* fname) { - hFileMap = open(fname, O_RDWR, 0); - if (hFileMap != -1) - pData = mmap(NULL, 8, PROT_READ | PROT_WRITE, MAP_SHARED, hFileMap, 0); - else - pData = 0; - } - void FreeMemoryMap() { - - } -}; -#endif - -#include "aggregations.h" -typedef int (*code_snippet)(void*); -typedef void (*module_init_fn)(Context*); - -int test_main(); - -int n_recv = 0; -char** n_recvd = nullptr; - -extern "C" void __DLLEXPORT__ receive_args(int argc, char**argv){ - n_recv = argc; - n_recvd = argv; -} - -enum BinaryInfo_t { - MSVC, MSYS, GCC, CLANG, AppleClang -}; - -extern "C" int __DLLEXPORT__ binary_info() { -#if defined(_MSC_VER) && !defined (__llvm__) - return MSVC; -#elif defined(__CYGWIN__) || defined(__MINGW32__) || defined(__MINGW64__) - return MSYS; -#elif defined(__clang__) - return CLANG; -#elif defined(__GNUC__) - return GCC; -#endif -} - -__AQEXPORT__(bool) have_hge(){ -#if defined(__MONETDB_CONN_H__) - return Server::havehge(); -#else - return false; -#endif -} - -Context::Context() { - current.memory_map = new std::unordered_map; - init_session(); -} - -Context::~Context() { - auto memmap = (std::unordered_map*) this->current.memory_map; - delete memmap; -} - -void Context::init_session(){ - if (log_level == LOG_INFO){ - memset(&(this->current.stats), 0, sizeof(Session::Statistic)); - } - auto memmap = (std::unordered_map*) this->current.memory_map; - memmap->clear(); -} - -void Context::end_session(){ - auto memmap = (std::unordered_map*) this->current.memory_map; - for (auto& mem : *memmap) { - mem.second(mem.first); - } - memmap->clear(); -} - -void* Context::get_module_function(const char* fname){ - auto fmap = static_cast*> - (this->module_function_maps); - printf("%p\n", fmap->find("mydiv")->second); - for (const auto& [key, value] : *fmap){ - printf("%s %p\n", key.c_str(), value); - } - auto ret = fmap->find(fname); - return ret == fmap->end() ? nullptr : ret->second; -} - -void initialize_module(const char* module_name, void* module_handle, Context* cxt){ - auto _init_module = reinterpret_cast(dlsym(module_handle, "init_session")); - if (_init_module) { - _init_module(cxt); - } - else { - printf("Warning: module %s have no session support.\n", module_name); - } -} - -int dll_main(int argc, char** argv, Context* cxt){ - Config *cfg = reinterpret_cast(argv[0]); - std::unordered_map user_module_map; - if (cxt->module_function_maps == 0) - cxt->module_function_maps = new std::unordered_map(); - auto module_fn_map = - static_cast*>(cxt->module_function_maps); - - auto buf_szs = cfg->buffer_sizes; - void** buffers = (void**)malloc(sizeof(void*) * cfg->n_buffers); - for (int i = 0; i < cfg->n_buffers; i++) - buffers[i] = static_cast(argv[i + 1]); - - cxt->buffers = buffers; - cxt->cfg = cfg; - cxt->n_buffers = cfg->n_buffers; - cxt->sz_bufs = buf_szs; - cxt->alt_server = NULL; - - while(cfg->running){ - if (cfg->new_query) { - void *handle = 0; - void *user_module_handle = 0; - if (cfg->backend_type == BACKEND_MonetDB){ - if (cxt->alt_server == 0) - cxt->alt_server = new Server(cxt); - Server* server = reinterpret_cast(cxt->alt_server); - if(n_recv > 0){ - if (cfg->backend_type == BACKEND_AQuery || cfg->has_dll) { - handle = dlopen("./dll.so", RTLD_LAZY); - } - for (const auto& module : user_module_map){ - initialize_module(module.first.c_str(), module.second, cxt); - } - cxt->init_session(); - for(int i = 0; i < n_recv; ++i) - { - //printf("%s, %d\n", n_recvd[i], n_recvd[i][0] == 'Q'); - switch(n_recvd[i][0]){ - case 'Q': // SQL query for monetdbe - { - server->exec(n_recvd[i] + 1); - printf("Exec Q%d: %s", i, n_recvd[i]); - } - break; - case 'P': // Postprocessing procedure - if(handle && !server->haserror()) { - code_snippet c = reinterpret_cast(dlsym(handle, n_recvd[i]+1)); - c(cxt); - } - break; - case 'M': // Load Module - { - auto mname = n_recvd[i] + 1; - user_module_handle = dlopen(mname, RTLD_LAZY); - //getlasterror - -#ifndef _MSC_VER - if (!user_module_handle) - puts(dlerror()); -#endif - user_module_map[mname] = user_module_handle; - initialize_module(mname, user_module_handle, cxt); - } - break; - case 'F': // Register Function in Module - { - auto fname = n_recvd[i] + 1; - printf("F:: %s: %p, %p\n", fname, user_module_handle, dlsym(user_module_handle, fname)); - module_fn_map->insert_or_assign(fname, dlsym(user_module_handle, fname)); - printf("F::: %p\n", module_fn_map->find("mydiv") != module_fn_map->end() ? module_fn_map->find("mydiv")->second : nullptr); - } - break; - case 'U': // Unload Module - { - auto mname = n_recvd[i] + 1; - auto it = user_module_map.find(mname); - if (user_module_handle == it->second) - user_module_handle = 0; - dlclose(it->second); - user_module_map.erase(it); - } - break; - } - } - if(handle) { - dlclose(handle); - handle = 0; - } - cxt->end_session(); - n_recv = 0; - } - if(server->last_error == nullptr){ - // TODO: Add feedback to prompt. - } - else{ - server->last_error = nullptr; - continue; - } - } - - // puts(cfg->has_dll ? "true" : "false"); - if (cfg->backend_type == BACKEND_AQuery) { - handle = dlopen("./dll.so", RTLD_LAZY); - code_snippet c = reinterpret_cast(dlsym(handle, "dllmain")); - c(cxt); - } - if (handle) dlclose(handle); - cfg->new_query = 0; - } - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - } - - return 0; -} - -int launcher(int argc, char** argv){ -#ifdef _WIN32 - constexpr char sep = '\\'; -#else - constexpr char sep = '/'; -#endif - std::string str = " "; - std::string pwd = ""; - if (argc > 0) - pwd = argv[0]; - - auto pos = pwd.find_last_of(sep); - if (pos == std::string::npos) - pos = 0; - pwd = pwd.substr(0, pos); - for (int i = 1; i < argc; i++){ - str += argv[i]; - str += " "; - } - str = std::string("cd ") + pwd + std::string("&& python3 ./prompt.py ") + str; - return system(str.c_str()); -} - -extern "C" int __DLLEXPORT__ main(int argc, char** argv) { -#ifdef __AQ_BUILD_LAUNCHER__ - return launcher(argc, argv); -#endif - puts("running"); - Context* cxt = new Context(); - cxt->log("%d %s\n", argc, argv[1]); - -#ifdef THREADING - auto tp = new ThreadPool(); - cxt->thread_pool = tp; -#endif - - const char* shmname; - if (argc < 0) - return dll_main(argc, argv, cxt); - else if (argc <= 1) - return test_main(); - else - shmname = argv[1]; - SharedMemory shm = SharedMemory(shmname); - if (!shm.pData) - return 1; - bool &running = static_cast(shm.pData)[0], - &ready = static_cast(shm.pData)[1]; - using namespace std::chrono_literals; - cxt->log("running: %s\n", running? "true":"false"); - cxt->log("ready: %s\n", ready? "true":"false"); - while (running) { - std::this_thread::sleep_for(1ms); - if(ready){ - cxt->log("running: %s\n", running? "true":"false"); - cxt->log("ready: %s\n", ready? "true":"false"); - void* handle = dlopen("./dll.so", RTLD_LAZY); - cxt->log("handle: %p\n", handle); - if (handle) { - cxt->log("inner\n"); - code_snippet c = reinterpret_cast(dlsym(handle, "dllmain")); - cxt->log("routine: %p\n", c); - if (c) { - cxt->log("inner\n"); - cxt->err("return: %d\n", c(cxt)); - } - } - ready = false; - } - } - shm.FreeMemoryMap(); - return 0; -} - -#include "utils.h" -#include "table_ext_monetdb.hpp" -int test_main() -{ - Context* cxt = new Context(); - if (cxt->alt_server == 0) - cxt->alt_server = new Server(cxt); - Server* server = reinterpret_cast(cxt->alt_server); - - - //TableInfo table("sibal"); - //int col0[] = { 1,2,3,4,5 }; - //float col1[] = { 5.f, 4.f, 3.f, 2.f, 1.f }; - //table.get_col<0>().initfrom(5, col0, "a"); - //table.get_col<1>().initfrom(5, col1, "b"); - //table.monetdb_append_table(server); - // - //server->exec("select * from sibal;"); - //auto aa = server->getCol(0); - //auto bb = server->getCol(1); - //printf("sibal: %p %p\n", aa, bb); - - const char* qs[]= { - "CREATE TABLE test(a INT, b INT, c INT, d INT);", - "COPY OFFSET 2 INTO test FROM 'c:/Users/sunyi/Desktop/AQuery2/data/test2.csv' ON SERVER USING DELIMITERS ',';", - "SELECT (a + b), a,b,c FROM test ;", - }; - n_recv = sizeof(qs)/(sizeof (char*)); - n_recvd = const_cast(qs); - if (n_recv > 0) { - for (int i = 0; i < n_recv; ++i) - { - server->exec(n_recvd[i]); - printf("Exec Q%d: %s\n", i, n_recvd[i]); - } - n_recv = 0; - } - - cxt->log_level = LOG_INFO; - puts(cpp_17 ?"true":"false"); - void* handle = dlopen("./dll.so", RTLD_LAZY); - cxt->log("handle: %p\n", handle); - if (handle) { - cxt->log("inner\n"); - code_snippet c = reinterpret_cast(dlsym(handle, "dll_ZF5Shg")); - cxt->log("routine: %p\n", c); - if (c) { - cxt->log("inner\n"); - cxt->log("return: %d\n", c(cxt)); - } - dlclose(handle); - } - //static_assert(std::is_same_v()), std::integer_sequence>, ""); - - return 0; -} - +#include "pch.hpp" + +#include "../csv.h" +#include +#include +#include + +#include "libaquery.h" +#include "monetdb_conn.h" +#ifdef THREADING +#include "threading.h" +#endif +#ifdef _WIN32 +#include "winhelper.h" +#else +#include +#include +#include +struct SharedMemory +{ + int hFileMap; + void* pData; + SharedMemory(const char* fname) { + hFileMap = open(fname, O_RDWR, 0); + if (hFileMap != -1) + pData = mmap(NULL, 8, PROT_READ | PROT_WRITE, MAP_SHARED, hFileMap, 0); + else + pData = 0; + } + void FreeMemoryMap() { + + } +}; +#endif + +#include "aggregations.h" +typedef int (*code_snippet)(void*); +typedef void (*module_init_fn)(Context*); + +int test_main(); + +int n_recv = 0; +char** n_recvd = nullptr; + +extern "C" void __DLLEXPORT__ receive_args(int argc, char**argv){ + n_recv = argc; + n_recvd = argv; +} + +enum BinaryInfo_t { + MSVC, MSYS, GCC, CLANG, AppleClang +}; + +extern "C" int __DLLEXPORT__ binary_info() { +#if defined(_MSC_VER) && !defined (__llvm__) + return MSVC; +#elif defined(__CYGWIN__) || defined(__MINGW32__) || defined(__MINGW64__) + return MSYS; +#elif defined(__clang__) + return CLANG; +#elif defined(__GNUC__) + return GCC; +#endif +} + +__AQEXPORT__(bool) have_hge(){ +#if defined(__MONETDB_CONN_H__) + return Server::havehge(); +#else + return false; +#endif +} + +Context::Context() { + current.memory_map = new std::unordered_map; + init_session(); +} + +Context::~Context() { + auto memmap = (std::unordered_map*) this->current.memory_map; + delete memmap; +} + +void Context::init_session(){ + if (log_level == LOG_INFO){ + memset(&(this->current.stats), 0, sizeof(Session::Statistic)); + } + auto memmap = (std::unordered_map*) this->current.memory_map; + memmap->clear(); +} + +void Context::end_session(){ + auto memmap = (std::unordered_map*) this->current.memory_map; + for (auto& mem : *memmap) { + mem.second(mem.first); + } + memmap->clear(); +} + +void* Context::get_module_function(const char* fname){ + auto fmap = static_cast*> + (this->module_function_maps); + printf("%p\n", fmap->find("mydiv")->second); + for (const auto& [key, value] : *fmap){ + printf("%s %p\n", key.c_str(), value); + } + auto ret = fmap->find(fname); + return ret == fmap->end() ? nullptr : ret->second; +} + +void initialize_module(const char* module_name, void* module_handle, Context* cxt){ + auto _init_module = reinterpret_cast(dlsym(module_handle, "init_session")); + if (_init_module) { + _init_module(cxt); + } + else { + printf("Warning: module %s have no session support.\n", module_name); + } +} + +int dll_main(int argc, char** argv, Context* cxt){ + Config *cfg = reinterpret_cast(argv[0]); + std::unordered_map user_module_map; + if (cxt->module_function_maps == 0) + cxt->module_function_maps = new std::unordered_map(); + auto module_fn_map = + static_cast*>(cxt->module_function_maps); + + auto buf_szs = cfg->buffer_sizes; + void** buffers = (void**)malloc(sizeof(void*) * cfg->n_buffers); + for (int i = 0; i < cfg->n_buffers; i++) + buffers[i] = static_cast(argv[i + 1]); + + cxt->buffers = buffers; + cxt->cfg = cfg; + cxt->n_buffers = cfg->n_buffers; + cxt->sz_bufs = buf_szs; + cxt->alt_server = NULL; + + while(cfg->running){ + if (cfg->new_query) { + void *handle = 0; + void *user_module_handle = 0; + if (cfg->backend_type == BACKEND_MonetDB){ + if (cxt->alt_server == 0) + cxt->alt_server = new Server(cxt); + Server* server = reinterpret_cast(cxt->alt_server); + if(n_recv > 0){ + if (cfg->backend_type == BACKEND_AQuery || cfg->has_dll) { + handle = dlopen("./dll.so", RTLD_LAZY); + } + for (const auto& module : user_module_map){ + initialize_module(module.first.c_str(), module.second, cxt); + } + cxt->init_session(); + for(int i = 0; i < n_recv; ++i) + { + //printf("%s, %d\n", n_recvd[i], n_recvd[i][0] == 'Q'); + switch(n_recvd[i][0]){ + case 'Q': // SQL query for monetdbe + { + server->exec(n_recvd[i] + 1); + printf("Exec Q%d: %s", i, n_recvd[i]); + } + break; + case 'P': // Postprocessing procedure + if(handle && !server->haserror()) { + code_snippet c = reinterpret_cast(dlsym(handle, n_recvd[i]+1)); + c(cxt); + } + break; + case 'M': // Load Module + { + auto mname = n_recvd[i] + 1; + user_module_handle = dlopen(mname, RTLD_LAZY); + //getlasterror + + if (!user_module_handle) +#ifndef _MSC_VER + puts(dlerror()); +#else + printf("Fatal Error: Module %s failed to load with error code %d.\n", mname, GetLastError()); +#endif + user_module_map[mname] = user_module_handle; + initialize_module(mname, user_module_handle, cxt); + } + break; + case 'F': // Register Function in Module + { + auto fname = n_recvd[i] + 1; + printf("F:: %s: %p, %p\n", fname, user_module_handle, dlsym(user_module_handle, fname)); + module_fn_map->insert_or_assign(fname, dlsym(user_module_handle, fname)); + printf("F::: %p\n", module_fn_map->find("mydiv") != module_fn_map->end() ? module_fn_map->find("mydiv")->second : nullptr); + } + break; + case 'U': // Unload Module + { + auto mname = n_recvd[i] + 1; + auto it = user_module_map.find(mname); + if (user_module_handle == it->second) + user_module_handle = 0; + dlclose(it->second); + user_module_map.erase(it); + } + break; + } + } + if(handle) { + dlclose(handle); + handle = 0; + } + cxt->end_session(); + n_recv = 0; + } + if(server->last_error == nullptr){ + // TODO: Add feedback to prompt. + } + else{ + server->last_error = nullptr; + continue; + } + } + + // puts(cfg->has_dll ? "true" : "false"); + if (cfg->backend_type == BACKEND_AQuery) { + handle = dlopen("./dll.so", RTLD_LAZY); + code_snippet c = reinterpret_cast(dlsym(handle, "dllmain")); + c(cxt); + } + if (handle) dlclose(handle); + cfg->new_query = 0; + } + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + + return 0; +} + +int launcher(int argc, char** argv){ +#ifdef _WIN32 + constexpr char sep = '\\'; +#else + constexpr char sep = '/'; +#endif + std::string str = " "; + std::string pwd = ""; + if (argc > 0) + pwd = argv[0]; + + auto pos = pwd.find_last_of(sep); + if (pos == std::string::npos) + pos = 0; + pwd = pwd.substr(0, pos); + for (int i = 1; i < argc; i++){ + str += argv[i]; + str += " "; + } + str = std::string("cd ") + pwd + std::string("&& python3 ./prompt.py ") + str; + return system(str.c_str()); +} + +extern "C" int __DLLEXPORT__ main(int argc, char** argv) { +#ifdef __AQ_BUILD_LAUNCHER__ + return launcher(argc, argv); +#endif + puts("running"); + Context* cxt = new Context(); + cxt->log("%d %s\n", argc, argv[1]); + +#ifdef THREADING + auto tp = new ThreadPool(); + cxt->thread_pool = tp; +#endif + + const char* shmname; + if (argc < 0) + return dll_main(argc, argv, cxt); + else if (argc <= 1) + return test_main(); + else + shmname = argv[1]; + SharedMemory shm = SharedMemory(shmname); + if (!shm.pData) + return 1; + bool &running = static_cast(shm.pData)[0], + &ready = static_cast(shm.pData)[1]; + using namespace std::chrono_literals; + cxt->log("running: %s\n", running? "true":"false"); + cxt->log("ready: %s\n", ready? "true":"false"); + while (running) { + std::this_thread::sleep_for(1ms); + if(ready){ + cxt->log("running: %s\n", running? "true":"false"); + cxt->log("ready: %s\n", ready? "true":"false"); + void* handle = dlopen("./dll.so", RTLD_LAZY); + cxt->log("handle: %p\n", handle); + if (handle) { + cxt->log("inner\n"); + code_snippet c = reinterpret_cast(dlsym(handle, "dllmain")); + cxt->log("routine: %p\n", c); + if (c) { + cxt->log("inner\n"); + cxt->err("return: %d\n", c(cxt)); + } + } + ready = false; + } + } + shm.FreeMemoryMap(); + return 0; +} + +#include "utils.h" +#include "table_ext_monetdb.hpp" +int test_main() +{ + Context* cxt = new Context(); + if (cxt->alt_server == 0) + cxt->alt_server = new Server(cxt); + Server* server = reinterpret_cast(cxt->alt_server); + + + //TableInfo table("sibal"); + //int col0[] = { 1,2,3,4,5 }; + //float col1[] = { 5.f, 4.f, 3.f, 2.f, 1.f }; + //table.get_col<0>().initfrom(5, col0, "a"); + //table.get_col<1>().initfrom(5, col1, "b"); + //table.monetdb_append_table(server); + // + //server->exec("select * from sibal;"); + //auto aa = server->getCol(0); + //auto bb = server->getCol(1); + //printf("sibal: %p %p\n", aa, bb); + + const char* qs[]= { + "CREATE TABLE test(a INT, b INT, c INT, d INT);", + "COPY OFFSET 2 INTO test FROM 'c:/Users/sunyi/Desktop/AQuery2/data/test2.csv' ON SERVER USING DELIMITERS ',';", + "SELECT (a + b), a,b,c FROM test ;", + }; + n_recv = sizeof(qs)/(sizeof (char*)); + n_recvd = const_cast(qs); + if (n_recv > 0) { + for (int i = 0; i < n_recv; ++i) + { + server->exec(n_recvd[i]); + printf("Exec Q%d: %s\n", i, n_recvd[i]); + } + n_recv = 0; + } + + cxt->log_level = LOG_INFO; + puts(cpp_17 ?"true":"false"); + void* handle = dlopen("./dll.so", RTLD_LAZY); + cxt->log("handle: %p\n", handle); + if (handle) { + cxt->log("inner\n"); + code_snippet c = reinterpret_cast(dlsym(handle, "dll_ZF5Shg")); + cxt->log("routine: %p\n", c); + if (c) { + cxt->log("inner\n"); + cxt->log("return: %d\n", c(cxt)); + } + dlclose(handle); + } + //static_assert(std::is_same_v()), std::integer_sequence>, ""); + + return 0; +} + diff --git a/server/table.h b/server/table.h index e122f73..3a8e250 100644 --- a/server/table.h +++ b/server/table.h @@ -1,631 +1,631 @@ -// TODO: Replace `cout, printf` with sprintf&fputs and custom buffers - -#ifndef _TABLE_H -#define _TABLE_H - -#include "types.h" -#include "vector_type.hpp" -#include -#include -#include -#include "io.h" -#undef ERROR -template -class vector_type; -template <> -class vector_type; - -#ifdef _MSC_VER -namespace types { - enum Type_t; - template - struct Types; - template - struct Coercion; -} -#endif -template -class ColView; -template -class ColRef : public vector_type<_Ty> -{ -public: - typedef ColRef<_Ty> Decayed_t; - const char* name; - types::Type_t ty = types::Type_t::ERROR; - ColRef(const ColRef<_Ty>& vt) : vector_type<_Ty>(vt) {} - ColRef(ColRef<_Ty>&& vt) : vector_type<_Ty>(std::move(vt)) {} - ColRef() : vector_type<_Ty>(0), name("") {} - ColRef(const uint32_t& size, const char* name = "") : vector_type<_Ty>(size), name(name) {} - ColRef(const char* name) : name(name) {} - ColRef(const uint32_t size, void* data, const char* name = "") : vector_type<_Ty>(size, data), name(name) {} - void init(const char* name = "") { ty = types::Types<_Ty>::getType(); this->size = this->capacity = 0; this->container = 0; this->name = name; } - void initfrom(uint32_t sz, void*container, const char* name = "") { - ty = types::Types<_Ty>::getType(); - this->size = sz; - this->capacity = 0; - this->container = (_Ty*)container; - this->name = name; - } - template