Compare commits
19 commits
master
...
ast2_comme
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ae5a3f7e7a | ||
|
|
6895132749 | ||
|
|
42fbaf4c4f | ||
|
|
83a63881f1 | ||
|
|
14e42a0a22 | ||
|
|
e24ced7826 | ||
|
|
2bd8b297a9 | ||
|
|
2faebddcaa | ||
|
|
63367a16a5 | ||
|
|
418e5db825 | ||
|
|
c083b443e4 | ||
|
|
522178913e | ||
|
|
df3e73e965 | ||
|
|
c78dfd087c | ||
|
|
8ecac1f09c | ||
|
|
6a8d05dae2 | ||
|
|
f2975fde55 | ||
|
|
4ea1f0f340 | ||
|
|
dac496e6c2 |
55 changed files with 4601 additions and 5586 deletions
48
.travis.yml
48
.travis.yml
|
|
@ -1,47 +1,20 @@
|
|||
os: linux
|
||||
dist: bionic
|
||||
language: c
|
||||
os:
|
||||
- windows
|
||||
- linux
|
||||
- osx
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- autopoint
|
||||
|
||||
matrix:
|
||||
include:
|
||||
# Linux - amd64
|
||||
- env: BRANCH=0.20.2
|
||||
- env: BRANCH=1.0.8
|
||||
- env: BRANCH=1.2.4
|
||||
- env: BRANCH=devel
|
||||
language: c
|
||||
|
||||
# Linux - arm64
|
||||
# - arch: arm64
|
||||
# env: BRANCH=1.2.4
|
||||
|
||||
# Linux - ppc64
|
||||
# - arch: ppc64le
|
||||
# env: BRANCH=1.2.4
|
||||
|
||||
# macOS - amd64
|
||||
- os: osx
|
||||
env: BRANCH=0.20.2
|
||||
- os: osx
|
||||
env: BRANCH=1.0.8
|
||||
- os: osx
|
||||
env: BRANCH=1.2.4
|
||||
- os: osx
|
||||
env: BRANCH=devel
|
||||
|
||||
# windows - amd64
|
||||
- os: windows
|
||||
env: BRANCH=0.20.2
|
||||
- os: windows
|
||||
env: BRANCH=1.0.8
|
||||
- os: windows
|
||||
env: BRANCH=1.2.4
|
||||
- os: windows
|
||||
env: BRANCH=devel
|
||||
env:
|
||||
- BRANCH=0.20.2
|
||||
- BRANCH=1.0.6
|
||||
- BRANCH=1.2.0
|
||||
- BRANCH=devel
|
||||
|
||||
cache:
|
||||
directories:
|
||||
|
|
@ -53,7 +26,6 @@ install:
|
|||
- source travis.sh
|
||||
|
||||
script:
|
||||
- set -e
|
||||
- nimble develop -y
|
||||
- nimble test
|
||||
- nimble --verbose --nimbleDir:`pwd`/build/fakenimble install nimterop@#head -y
|
||||
|
|
|
|||
158
CHANGES.md
158
CHANGES.md
|
|
@ -1,158 +0,0 @@
|
|||
# Nimterop Change History
|
||||
|
||||
## Version 0.6.0
|
||||
|
||||
This release adds the ability to download precompiled binaries from [Conan.io](https://conan.io/center) and Julia's [BinaryBuilder.org](https://binarybuilder.org). This alleviates the headache of searching and downloading libraries manually both for wrapper writers as well as end users. There are some known limitations but it should prove to become more useful as these sites expand their capabilities.
|
||||
|
||||
Conan.io shared builds tend to have all dependencies statically linked into the binary so a single so/dll/dylib has everything. For Conan.io static builds and all libraries on BinaryBuilder.org, dependencies are also downloaded and linked as needed. They are returned in the new `const xxxLDeps` in case wrapper writers need it for some reason.
|
||||
|
||||
Known concerns:
|
||||
- Conan.io only compiles Windows builds with Microsoft's VC++ compiler so static .lib files may not always work with MinGW on Windows.
|
||||
- Conan.io compiles all Mac builds on OSX 10.14 so older versions of the OS will grumble when statically linking these libraries.
|
||||
- BinaryBuilder.org does not include static libs for all their projects.
|
||||
|
||||
Refer to the documentation for `getHeader()` for details on how to use this new capability.
|
||||
|
||||
See the full list of changes here:
|
||||
|
||||
https://github.com/nimterop/nimterop/compare/v0.5.9...v0.6.5
|
||||
|
||||
### Breaking changes
|
||||
|
||||
- The legacy algorithm has been removed as promised. `ast2` is now the default and wrappers no longer need to explicitly specify `-f:ast2` in order to use it.
|
||||
|
||||
- All shared libraries installed by `getHeader()` will now get copied into the `libdir` parameter specified. If left blank, `libdir` will default to the directory where the executable binary gets created (outdir). While this is not really a breaking change, it is a change in behavior compared to older versions of nimterop. Note that `Std` libraries are not copied over. [#154][i154]
|
||||
|
||||
- `git.nim` has been removed. This module was an artifact from the early days and was renamed to `build.nim` back in v0.2.0.
|
||||
|
||||
- Nameless enum values are no longer typed to the made-up enum type name, they are instead typed as `cint` to match the underlying type. This allows using such enums without having to depend on the made-up name which could change if enum ordering changes upstream. [#236][i236] (since v0.6.1)
|
||||
|
||||
- Static libraries installed and linked with `getHeader()` now have their `{.passL.}` pragmas forwarded to the generated wrapper. This might lead to link errors in existing wrappers if other dependencies are specified with `{.passL.}` calls and the order of linking is wrong. This can be fixed by changing such explicit `{.passL.}` calls with `cPassL()` which will forward the link call to the generated wrapper as well. (since v0.6.5)
|
||||
|
||||
### New functionality
|
||||
|
||||
- `getHeader()` now detects and links against `.lib` files as part of enabling Conan.io. Not all `.lib` files are compatible with MinGW as already stated above but for those that work, this is a required capability.
|
||||
|
||||
- The `dynlib` command line parameter to `toast` and `cImport()` can also be the path to a shared library (dll|so|dylib) in place of a Nim const string containing the path. This allows for the traditional use case of passing `"xxxLPath"` to `cImport()` as well as simply passing the path to the library on the command line as is. This allows the creation of standalone cached wrappers as well as the usage of the `--check` and the `--stub` functionality that `toast` provides via `cImport()`.
|
||||
|
||||
- `gitPull()` now checks if an existing repository is at the `checkout` value specified. If not, it will pull the latest changes and checkout the specified commit, tag or branch.
|
||||
|
||||
- `cImport()` can now write the generated wrapper output to a user-defined file with the `nimFile` param. [#127][i127] (since v0.6.1)
|
||||
|
||||
- Nimterop now supports anonymous nested structs/unions but it only works correctly for unions when `noHeader` is turned off (the default). This is because Nim does not support nested structs/unions and is unaware of the underlying memory structure. [#237][i237] (since v0.6.1)
|
||||
|
||||
- `xxxJBB` now allows for customizing the base location to search packages with the `jbbFlags` param to `getHeader()`. Specifying `giturl=xxx` where `xxx` could be a full Git URL or just the username for Github.com allows changing the default Git repo. In addition, `url=xxx` is also supported to download project info and binaries compiled with BinaryBuilder.org but hosted at another non-Git location. (since v0.6.3)
|
||||
|
||||
- It is now possible to exclude the contents of specific files or entire directories from the wrapped output using `--exclude | -X` with `toast` or `cExclude()` from a wrapper. This might be required when a header uses `#include` to pull in external dependencies. E.g. `sciter` has a `#include <gtk/gtk.h>` which pulls in the entire GTK ecosystem which is needed for successful preprocessing but we do not want to include those headers in the wrapped output when using `--recurse | -r`. (since v0.6.4)
|
||||
|
||||
- All `cDefine()`, `cIncludeDir()` and `cCompile()` calls now forward relevant pragmas into the generated wrapper further enabling standalone wrappers. [#239][i239]
|
||||
|
||||
- Added `cPassC()` and `cPassL()` to forward C/C++ compilation pragmas into the generated wrapper. These should be used in place of `{.passC.}` and `{.passL.}` and need to be called before `cImport()` to take effect. (since v0.6.5)
|
||||
|
||||
- Added `--compile`, `--passC` and `--passL` flags to `toast` to enable the previous two improvements. (since v0.6.5)
|
||||
|
||||
- Added `renderPragma()` to create pragmas inline in case `cImport()` is not being used. (since v0.6.5)
|
||||
|
||||
- `xxxConan` and `xxxJBB` now allow skipping required dependencies by specifying `skip=pkg1,pkg2` to the `conanFlags` and `jbbFlags` params to `getHeader()`. (since v0.6.6)
|
||||
|
||||
### Other improvements
|
||||
|
||||
- Generated wrappers no longer depend on nimterop being present - no more `import nimterop/types`. Supporting code is directly included in the wrapper output and only when required. E.g. enum macro is only included if wrapper contains enums. [#125][i125] (since v0.6.1)
|
||||
|
||||
- `cImport()` now includes wrapper output from a file rather than inline. Errors in generated wrappers will no longer point to a line in `macros.nim` making debugging easier. (since v0.6.1)
|
||||
|
||||
- `cIncludeDir()` can now accept a `seq[string]` of directories and an optional `exclude` param which sets those include directories to not be included in the wrapped output. (since v0.6.4)
|
||||
|
||||
- `cDefine()` can now accept a `seq[string]` of values. (since v0.6.5)
|
||||
|
||||
|
||||
## Version 0.5.0
|
||||
|
||||
This release introduces a new backend for wrapper generation dubbed `ast2` that leverages the Nim compiler AST and renderer. The new design simplifies feature development and already includes all the functionality of the legacy algorithm plus fixes for several open issues.
|
||||
|
||||
The new backend can be leveraged with the `-f:ast2` flag to `toast` or `flags = "-f:ast2"` to `cImport()`. The legacy algorithm will be the default backend for this release but no new functionality or bugfixes are expected going forward. Usage of the legacy algorithm will display a *deprecated* hint to encourage users to test their wrappers with `-f:ast2` and remove any overrides that the new algorithm supports.
|
||||
|
||||
Version 0.6.0 of Nimterop will make `ast2` the default backend and the legacy algorithm will be removed altogether.
|
||||
|
||||
See the full list of changes here:
|
||||
|
||||
https://github.com/nimterop/nimterop/compare/v0.4.4...v0.5.4
|
||||
|
||||
### Breaking changes
|
||||
|
||||
- Nimterop used to default to C++ mode for preprocessing and tree-sitter parsing in all cases unless explicitly informed to use C mode. This has been changed and is now detected based on the file extension. This means some existing wrappers could break since they might contain C++ code or include C++ headers like `#include <string>` which will not work in C mode. Explicitly setting `mode = "cpp"` or `-mcpp` should fix such issues. [#176][i176]
|
||||
|
||||
- Enums were originally being mapped to `distint int` - this has been changed to `distinct cint` since the sizes are incorrect on 64-bit and is especially noticeable when types or unions have enum fields.
|
||||
|
||||
- `static inline` functions are no longer wrapped by the legacy backend. The `ast2` backend correctly generates wrappers for such functions but they are only generated when `--noHeader | -H` is not in effect. This is because such functions do not exist in the binary and can only be referenced when the header is compiled in.
|
||||
|
||||
- Support for Nim v0.19.6 has been dropped and the test matrix now covers v0.20.2, v1.0.6, v1.2.0 and devel.
|
||||
|
||||
### New functionality
|
||||
|
||||
- Nimterop can now skip generating the `{.header.}` pragma when the `--noHeader | -H` flag is used. This skips the header file `#include` in the generated code and allows creation of wrappers that do not require presence of the header during compile time. Note that `static inline` functions will only be wrapped when the header is compiled in. This change applies to both `ast2` and the legacy backend, although `ast2` can also generate wrappers with both `{.header.}` and `{.dynlib.}` in effect enabling type size checking with `-d:checkAbi`. More information is available in the [README.md](README.md). [#169][i169]
|
||||
|
||||
- `ast2` includes support for various C constructs that were issues with the legacy backend. These changes should reduce the reliance on `cOverride()` and existing wrappers should attempt to clean up such sections where possible.
|
||||
- N-dimensional arrays and pointers - [#54][i54]
|
||||
- Synomyms for types - [#74][i74]
|
||||
- Varargs support - [#76][i76]
|
||||
- Nested structs, unions and enums - [#137][i137] [#147][i147]
|
||||
- Forward declarations of types - [#148][i148]
|
||||
- Nested function pointers - [#155][i155] [#156][i156]
|
||||
- Various enum fixes - [#159][i159] [#171][i171]
|
||||
- Map `int arr[]` to `arr: UncheckedArray[cint]` - [#174][i174]
|
||||
- Global variables including arrays and procs (since v0.5.4)
|
||||
|
||||
- `ast2` also includes an advanced expression parser that can reliably handle constructs typically seen with `#define` statements and enumeration values:
|
||||
- Integers + integer like expressions (hex, octal, suffixes)
|
||||
- Floating point expressions
|
||||
- Strings and character literals, including C's escape characters
|
||||
- Math operators `+ - / *`
|
||||
- Some Unary operators `- ! ~`
|
||||
- Any identifiers
|
||||
- C type descriptors `int char` etc
|
||||
- Boolean values `true false`
|
||||
- Shift, cast, math or sizeof expressions
|
||||
- Most type coercions
|
||||
|
||||
- Wrappers can now point to an external plugin file with `cPluginPath()` instead of having to declaring plugins inline with `cPlugin()`. This allows multiple wrappers to share the same plugin. [#181][i181]
|
||||
|
||||
- `cImport()` adds support for importing multiple headers in a single call - this enables support for libraries that have many header files that include shared headers and typically cannot be imported in multiple `cImport()` calls since it results in duplicate symbols. Calling `toast` with multiple headers uses the same algorithm.
|
||||
|
||||
- `ast2` now creates Nim doc comments instead of reqular comments which get rendered when the wrapper is run through `nim doc` or the `buildDocs()` API. [#197][i197]
|
||||
|
||||
- `toast` now includes `--replace | -G` to manipulate identifier names beyond `--prefix` and `--suffix`. `-G:X=Y` replaces X with Y and `-G:@_[_]+=_` replaces multiple `_` with a single instance using the `@` prefix to enable regular expressions.
|
||||
|
||||
- `toast` also includes `--typeMap | -T` to map C types to another type. E.g. `--typeMap:GLint64=int64` generates a wrapper where all instances of `GLint64` are remapped to the Nim type `int64` and `GLint64` is not defined. (since v0.5.2)
|
||||
|
||||
- CLI flags can now be specified one or more per line in a file and path provided to `toast`. They will be expanded in place. [#196][i196] (since v0.5.3)
|
||||
|
||||
- Nimterop is now able to detect Nim configuration of projects and can better handle cases where defaults such as `nimcacheDir` or `nimblePath` are overridden. This especially enables better interop with workflows that do not depend on Nimble. [#151][i151] [#153][i153]
|
||||
|
||||
- Nimterop defaults to `cmake`, followed by `autoconf` for building libraries with `getHeader()`. It is now possible to change the order of discovery with the `buildType` value. [#200][i200]
|
||||
|
||||
[i54]: https://github.com/nimterop/nimterop/issues/54
|
||||
[i74]: https://github.com/nimterop/nimterop/issues/74
|
||||
[i76]: https://github.com/nimterop/nimterop/issues/76
|
||||
[i125]: https://github.com/nimterop/nimterop/issues/125
|
||||
[i127]: https://github.com/nimterop/nimterop/issues/127
|
||||
[i137]: https://github.com/nimterop/nimterop/issues/137
|
||||
[i147]: https://github.com/nimterop/nimterop/issues/147
|
||||
[i148]: https://github.com/nimterop/nimterop/issues/148
|
||||
[i151]: https://github.com/nimterop/nimterop/issues/151
|
||||
[i153]: https://github.com/nimterop/nimterop/issues/153
|
||||
[i154]: https://github.com/nimterop/nimterop/issues/154
|
||||
[i155]: https://github.com/nimterop/nimterop/issues/155
|
||||
[i156]: https://github.com/nimterop/nimterop/issues/156
|
||||
[i159]: https://github.com/nimterop/nimterop/issues/159
|
||||
[i169]: https://github.com/nimterop/nimterop/issues/169
|
||||
[i171]: https://github.com/nimterop/nimterop/issues/171
|
||||
[i174]: https://github.com/nimterop/nimterop/issues/174
|
||||
[i176]: https://github.com/nimterop/nimterop/issues/176
|
||||
[i181]: https://github.com/nimterop/nimterop/issues/181
|
||||
[i196]: https://github.com/nimterop/nimterop/issues/196
|
||||
[i197]: https://github.com/nimterop/nimterop/issues/197
|
||||
[i200]: https://github.com/nimterop/nimterop/issues/200
|
||||
[i236]: https://github.com/nimterop/nimterop/issues/236
|
||||
[i237]: https://github.com/nimterop/nimterop/issues/237
|
||||
[i239]: https://github.com/nimterop/nimterop/issues/239
|
||||
117
README.md
117
README.md
|
|
@ -28,15 +28,12 @@ This will download and install nimterop in the standard Nimble package location,
|
|||
|
||||
## Usage
|
||||
|
||||
Nimterop can be used in three ways:
|
||||
Nimterop can be used in two ways:
|
||||
- Creating a wrapper file - a `.nim` file that contains calls to the high-level API that can download and build the C library as well as generate the required Nim code to interface with the library. This wrapper file can then be imported into Nim code like any other module and it will be processed at compile time.
|
||||
- Same as the first option except using the `nimFile` param to `cImport()` to write the generated wrapper to a file during build time just once and then importing that generated wrapper into the application like any other Nim module.
|
||||
- Using the command line `toast` tool to generate the Nim code which can then be stored into a file and imported separately.
|
||||
|
||||
Any combination of the above is possible - only download, build or wrapping and nimterop avoids imposing any particular workflow.
|
||||
|
||||
Refer to [CHANGES.md](CHANGES.md) for history and information around breaking changes.
|
||||
|
||||
### Build API
|
||||
|
||||
Creating a wrapper has two parts, the first is to setup the C library. This includes downloading it or finding it if already installed, and building it if applicable. The `getHeader()` high-level API provides all of this functionality as a convenience. The following `.nim` wrapper file is an example of using the high-level `getHeader()` API to perform all building, wrapping and linking automatically:
|
||||
|
|
@ -54,8 +51,6 @@ getHeader(
|
|||
"header.h", # The header file to wrap, full path is returned in `headerPath`
|
||||
giturl = "https://github.com/username/repo", # Git repo URL
|
||||
dlurl = "https://website.org/download/repo-$1.tar.gz", # Download URL for archive or raw file
|
||||
conanuri = "repo/$1", # Conan.io URI
|
||||
jbburi = "repo/$1", # BinaryBuilder.org URI
|
||||
outdir = baseDir, # Where to download/build/search
|
||||
conFlags = "--disable-comp --enable-feature", # Flags to pass configure script
|
||||
cmakeFlags = "-DENABLE_STATIC_LIB=ON" # Flags to pass to Cmake
|
||||
|
|
@ -64,7 +59,7 @@ getHeader(
|
|||
|
||||
# Wrap headerPath as returned from getHeader() and link statically
|
||||
# or dynamically depending on user input
|
||||
when not isDefined(headerStatic):
|
||||
when not defined(headerStatic):
|
||||
cImport(headerPath, recurse = true, dynlib = "headerLPath") # Pass dynlib if not static link
|
||||
else:
|
||||
cImport(headerPath, recurse = true)
|
||||
|
|
@ -72,46 +67,41 @@ else:
|
|||
|
||||
Module documentation for the build API can be found [here](https://nimterop.github.io/nimterop/build.html). Refer to the ```tests``` directory for additional examples on how the library can be used. Also, check out the [wiki](https://github.com/nimterop/nimterop/wiki/Wrappers) for a list of all known wrappers that have been created using nimterop. They will provide real world examples of how to wrap libraries. Please do add your project once you are done so that others can benefit from your work.
|
||||
|
||||
#### Download / Search
|
||||
__Download / Search__
|
||||
|
||||
The above wrapper is generic and allows the end user to control how it works. Note that `headerPath` is derived from `header.h` so if you have `SDL.h` as the argument to `getHeader()`, it generates `SDLPath` and `SDLLPath` and is controlled by `-d:SDLStatic`, `-d:SDLGit` and so forth.
|
||||
|
||||
- If the library is already installed in `/usr/include` then the `-d:headerStd` define to Nim can be used to instruct `getHeader()` to search for `header.h` in the standard system path.
|
||||
- If the library needs to be downloaded, the user can use `-d:headerGit` to clone the source from the specified git URL, `-d:headerDL` to get the source from download URL, `-d:headerConan` to download from https://conan.io/center or `-d:headerJBB` to download from https://binarybuilder.org.
|
||||
- The `-d:headerSetVer=X.Y.Z` flag can be used to specify which version to download. It is used as the tag name for Git and for DL, Conan and JBB, it replaces `$1` in the URL if specified.
|
||||
- If the library needs to be downloaded, the user can use `-d:headerGit` to clone the source from the specified git URL or `-d:headerDL` to get the source from download URL.
|
||||
- The `-d:headerSetVer=X.Y.Z` flag can be used to specify which version to download. It is used as the tag name for Git whereas for DL, it replaces `$1` in the URL if defined.
|
||||
- If no flag is provided, `getHeader()` simply looks for the library in `outdir`. The user could use Git submodules or manually download or check-in the library to that directory and `getHeader()` will use it directly.
|
||||
|
||||
#### Pre build
|
||||
__Pre build__
|
||||
|
||||
`getHeader()` provides a `headerPreBuild()` hook that gets called after the library is downloaded but before it is built. This allows for any manipulations of the source files or build scripts before build. [archive](https://github.com/genotrance/nimarchive/blob/master/nimarchive/archive.nim) has such an example.
|
||||
|
||||
The build API also includes various compile time helper procs that aid in file manipulation, Cmake shortcuts, library linking, etc. Refer to [build](https://nimterop.github.io/nimterop/build.html) for more details.
|
||||
|
||||
#### Build
|
||||
__Build__
|
||||
|
||||
Nimterop currently supports `configure` and `cmake` based building of libraries, with `cmake` taking precedence if a project supports both. Nimterop verifies that the tool selected is available and notifies the user if any issues are found. Bash is required on Windows for `configure` and the binary shipped with Git has been tested.
|
||||
|
||||
Flags can be specified to these tools via `getHeader()` or directly via the underlying `configure()` and `cmake()` calls. Once the build scripts are ready, `getHeader()` then calls `make()`. At every step, `getHeader()` checks for the presence of created artifacts and does not redo steps that have been successfully completed.
|
||||
|
||||
#### Linking
|
||||
__Linking__
|
||||
|
||||
If `-d:headerStatic` is specified, `getHeader()` will return the static library path in `headerLPath`. The wrapper writer can check for this and call `cImport()` accordingly as in the example above. If `-d:headerStatic` is omitted, the dynamic library is returned in `headerLPath`.
|
||||
- If `-d:headerStatic` is specified, `getHeader()` will return the static library path in `headerLPath`. The wrapper writer can check for this and call `cImport()` accordingly as in the example above. If it is omitted, the dynamic library is returned in `headerLPath`.
|
||||
- `getHeader()` searches for libraries based on the header name by default:
|
||||
- `libheader.so` or `libheader.a` on Linux
|
||||
- `libheader.dylib` on OSX
|
||||
- `header.dll` or `header.a` on Windows
|
||||
- If a library has a different header and library binary name, `altNames` can be used to configure an alternate name of library binary.
|
||||
- For example, Bzip2 has `bzlib.h` but the library is `libbz2.so` so `altNames = "bz2"`.
|
||||
- In the example above, `altNames = "hdr"` so `getHeader()` will look for `libhdr.so`, `hdr.dll`, etc.
|
||||
- See [bzlib.nim](https://github.com/genotrance/nimarchive/blob/master/nimarchive/bzlib.nim) for an example.
|
||||
- [lzma.nim](https://github.com/nimterop/nimterop/blob/master/tests/lzma.nim) is an example of a library that allows both static and dynamic linking.
|
||||
|
||||
All dependency libraries (supported by Conan and JBB) will be returned in `headerLDeps`. Static libraries and dependencies are automatically linked using `cPassL()`. Conan shared libs typically include dependencies compiled in whereas JBB shared libs expect the required dependencies to be in the same location or in `LD_LIBRARY_PATH`. `conanFlags` and `jbbFlags` can be used to skip required dependencies from being downloaded in case another source is preferred. This can be done with `skip=pkg1,pkg2` to these flags.
|
||||
|
||||
`getHeader()` searches for libraries based on the header name by default:
|
||||
- `libheader.so` or `libheader.a` on Linux
|
||||
- `libheader.dylib` on OSX
|
||||
- `header.dll`, `header.a` or `header.lib` on Windows
|
||||
|
||||
If a library has a different header and library binary name, `altNames` can be used to configure an alternate name of library binary.
|
||||
- For example, Bzip2 has `bzlib.h` but the library is `libbz2.so` so `altNames = "bz2"`.
|
||||
- In the example above, `altNames = "hdr"` so `getHeader()` will look for `libhdr.so`, `hdr.dll`, etc.
|
||||
- See [bzlib.nim](https://github.com/genotrance/nimarchive/blob/master/nimarchive/bzlib.nim) for an example.
|
||||
|
||||
[lzma.nim](https://github.com/nimterop/nimterop/blob/master/tests/lzma.nim) is an example of a library that allows both static and dynamic linking.
|
||||
|
||||
#### User control
|
||||
__User control__
|
||||
|
||||
The `-d:xxxYYY` Nim define flags have already been described above and can be specified on the command line or in a nim.cfg file. It is also possible to specify them within the wrapper itself using `setDefines()` if required. Further, all defines, regardless of how they are specified, can be generically checked using `isDefined()`.
|
||||
|
||||
|
|
@ -132,18 +122,15 @@ cDefine("HAS_ABC") # Set #defines for preprocessor and compiler
|
|||
cDefine("HAS_ABC", "DEF")
|
||||
|
||||
cIncludeDir("clib/include") # Setup any include directories
|
||||
cExclude("clib/file.h") # Exclude file from wrapped output
|
||||
|
||||
cImport("clib.h") # Generate wrappers for header specified
|
||||
|
||||
cCompile("clib/src/*.c") # Compile in any implementation source files
|
||||
```
|
||||
|
||||
All `{.compileTime.}` procs must be used in a compile time context, like `cDebug()` and `cDisableCaching()` above.
|
||||
|
||||
Module documentation for the wrapper API can be found [here](https://nimterop.github.io/nimterop/cimport.html).
|
||||
|
||||
#### Preprocessing
|
||||
__Preprocessing__
|
||||
|
||||
In order to leverage the preprocessor, certain projects might need `cDefine()` calls to set `#define` values. Simpler library may have documentation that cover this but larger ones will rely on build tools that discover and set values in a `config.h` which is loaded with `#include`. Projects might also require some `cIncludeDir()` calls to specify paths to directories that contain other headers. This might be within the library or refer to another library.
|
||||
|
||||
|
|
@ -151,7 +138,7 @@ The wrapper API always runs headers through the C preprocessor before wrapping.
|
|||
|
||||
By default, the `$CC` environment variable is used for the compiler path. If not found, `toast` defaults to `gcc`.
|
||||
|
||||
#### Wrapping
|
||||
__Wrapping__
|
||||
|
||||
The `cImport()` call invokes the `toast` binary with appropriate command line flags including any `cDefine()` and `cInclude()` parameters configured. The output of `toast` is then pulled into the module as Nim code and printed if `cDebug()` is specified. This allows for an end user to simply import the wrapper into their code and access the library API as Nim types and procs. Output is cached to save time on subsequent runs. It is also possible to just redirect the output to a file and import that instead if preferred.
|
||||
|
||||
|
|
@ -159,44 +146,13 @@ The `recurse` flag can be set to enable the recursion capability which runs thro
|
|||
|
||||
There may be cases where the wrapper generated by `toast` for certain types or procs is not preferred, or may be skipped or altogether wrong due to limitations or bugs. In these instances, the `cOverride()` macro can be used to define consts, types or procs to use in place of the wrapper generated output. `cImport()` will forward this information to `toast` and the values will be inserted in context in the generated wrapper. This allows wrapper authors to work around tool limitations or to improve the wrapper output - say change `ptr X` to `var X` or to create more Nim friendly types or proc signatures.
|
||||
|
||||
Several C libraries also use leading and/or trailing `_` in identifiers and since Nim does not allow this, the `cPlugin()` macro can be used to modify such symbols or `cSkipSymbol()` them altogether. Instead of a full `cPlugin()` section, it might also be preferred to set `flags = "-E_ -F_"` to the `cImport()` call to trim out such characters. These features can also be used to remove common prefixes like `SDL_` to generate a cleaner wrapper. The `--replace | -G` flag can be used for replacements. `cPlugin()` is real Nim code though so anything Nim allows is fair game. Note that `cPlugin()` overrides any `-E -F -G` flags. Also, behind the scenes, `cOverride()` is communicated to `toast` via `cPlugin()`.
|
||||
Several C libraries also use leading and/or trailing `_` in identifiers and since Nim does not allow this, the `cPlugin()` macro can be used to modify such symbols or `cSkipSymbol()` them altogether. Instead of a full `cPlugin()` section, it might also be preferred to set `flags = "-E_ -F_"` to the `cImport()` call to trim out such characters. These features can also be used to remove common prefixes like `SDL_` to generate a cleaner wrapper. `cPlugin()` is real Nim code though so anything Nim allows is fair game. Note that `cPlugin()` overrides any `-E -F` flags. Also, behind the scenes, `cOverride()` is communicated to `toast` via `cPlugin()`.
|
||||
|
||||
If the same `cPlugin()` is needed in multiple wrapper files, the code can be moved into a standalone file and be used with the `cPluginPath()` call.
|
||||
|
||||
Lastly, `c2nImport()` provides access to calling `c2nim` from the wrapper instead of `toast`. Note that `c2nImport()` does not use any of the above described features like `cPlugin()` and needs to be controlled with `c2nim` specific flags via the `flags` param.
|
||||
Lastly, `c2nImport()` provides access to calling `c2nim` from the wrapper instead of `toast`. Note that `c2nImport()` does not use any of the above described features like `cPlugin()` and needs to be controlled with the `flags` param.
|
||||
|
||||
#### Header vs. Dynlib
|
||||
|
||||
Nim provides some flexibility when it comes to using C/C++ libraries. In order to understand this better, some Nim pragmas need to be introduced. The main one is `{.importc.}` which informs Nim to use a symbol defined in a C library. This applies to both types and procs but how Nim should find the symbol is slightly different for each.
|
||||
|
||||
For types, `{.header: "header.h".}` informs Nim that `header.h` has the symbol and to `#include "header.h"` in the generated code. However, types can be mostly recreated in pure Nim so it is also possible to omit both `{.importc.}` and `{.header}` and it will work just fine except with a different name in the generated C code. This allows the user to compile the wrapper without requiring `header.h` to be present.
|
||||
|
||||
For functions, `{.header.}` works the same as types and can be omitted if preferred. The `{.importc.}` pragma is still required, unlike types since functions need to be linked to the implementation in the library. The user will need to provide this information at link time with `cPassL()` and linking to a library with `-lheader` or `path/to/libheader.a`. It is also possible to just use `cCompile()` or `{.compile.}` to compile some C source files which contain the implementation.
|
||||
|
||||
While `{.header.}` can be omitted for convenience, it does prevent wrapping of `static inline` functions as well as type checking of the wrapper ABI with `-d:checkAbi` at compile time. Further, anonymous nested structs/unions within unions will be rendered incorrectly by Nim since it is unaware of the true memory structure of the type. The user will need to choose based on the library in question.
|
||||
|
||||
Going further, the `{.dynlib: "path/to/libheader.so".}` pragma can be used to inform Nim to load the library at runtime and link the function instead of linking at compile time. This enables creation of a wrapper that does not need the library present at compile time.
|
||||
|
||||
Now that this is understood, a user might want any combination of the above in the wrapper rendered by Nimterop. This can be controlled with various flags to `cImport()` and `toast`.
|
||||
- By default, generated wrappers will include the `{.header, importc.}` pragmas for types and procs. This can be disabled with the `--noHeader | -H` flag to `toast` or `flags = "-H"` param to `cImport()` which will remove `{.header}` for both and `{.importc.}` for types only.
|
||||
- By default, generated wrappers will assume that the user will link the library implementation themselves. The `--dynlib | -l` flag to `toast` or `dynlib = "headerLPath"` param to `cImport()` will configure the wrapper to generate `{.dynlib.}` pragmas for procs.
|
||||
|
||||
This results in four supported cases:
|
||||
1. Default: `{.header, importc.}` for both types and procs
|
||||
2. With `--noHeader`, types will be pure Nim and procs will be just `{.importc.}`
|
||||
3. With `--dynlib`, types will still be `{.header, importc.}` but procs will be `{.dynlib, importc.}`
|
||||
4. With `--dynlib` and `--noHeader`, types will be pure Nim, procs will be `{.dynlib, importc.}`
|
||||
|
||||
Creation of a standalone wrapper (case 4) which does not require the header or library at compile time will require an explicit `--noHeader` and `--dynlib`.
|
||||
|
||||
More documentation on on these pragmas can be found in the Nim manual:
|
||||
- [{.importc.}](https://nim-lang.org/docs/manual.html#foreign-function-interface-importc-pragma)
|
||||
- [{.header.}](https://nim-lang.org/docs/manual.html#implementation-specific-pragmas-header-pragma)
|
||||
- [{.dynlib.}](https://nim-lang.org/docs/manual.html#foreign-function-interface-dynlib-pragma-for-import)
|
||||
- [{.passL.}](https://nim-lang.org/docs/manual.html#implementation-specific-pragmas-passl-pragma)
|
||||
- [{.compile.}](https://nim-lang.org/docs/manual.html#implementation-specific-pragmas-compile-pragma)
|
||||
|
||||
#### Compiling the source
|
||||
__Compiling source__
|
||||
|
||||
The job of building and compiling the underlying C library is best left to the build mechanism selected by the library author so using `getHeader()` is recommended. For simpler projects with a few `.c` files though, `cCompile()` should be more than enough. It is not recommended for larger projects which heavily rely on functionality offered by build tools. Recreating reliable logic in Nim can be tedious and one can expect minimal support from that author if their tested build mechanism is not used.
|
||||
|
||||
|
|
@ -206,45 +162,40 @@ Nimterop also provides a [docs](https://nimterop.github.io/nimterop/docs.html) A
|
|||
|
||||
### Command line API
|
||||
|
||||
The `toast` binary can also be used directly on the CLI, similar to `c2nim`. These flags can be specified on the command line or via a file, one or more flags per line, and the path provided to `toast` instead, or a combination. The file contents will be expanded in place.
|
||||
The `toast` binary can also be used directly on the CLI, similar to `c2nim`. The `cPlugin()` interface
|
||||
|
||||
Note: unlike the wrapper API, the `-p | --preprocess` flag is not enabled by default but is *highly* recommended.
|
||||
|
||||
```
|
||||
> toast -h
|
||||
Usage:
|
||||
main [optional-params] C/C++ source/header(s) and command line file(s)
|
||||
main [optional-params] C/C++ source/header
|
||||
Options:
|
||||
-h, --help print this cligen-erated help
|
||||
--help-syntax advanced: prepend,plurals,..
|
||||
-k, --check bool false check generated wrapper with compiler
|
||||
--compile= strings {} create {.compile.} entries in generated wrapper
|
||||
-C=, --convention= string "cdecl" calling convention for wrapped procs
|
||||
-d, --debug bool false enable debug output
|
||||
-D=, --defines= strings {} definitions to pass to preprocessor
|
||||
-l=, --dynlib= string "" {.dynlib.} pragma to import symbols - Nim const string or
|
||||
file path
|
||||
-X=, --exclude= strings {} files or directories to exclude from the wrapped output
|
||||
-l=, --dynlib= string "" import symbols from library in specified Nim string
|
||||
-f=, --feature= Features {} flags to enable experimental features
|
||||
-H, --includeHeader bool false add {.header.} pragma to wrapper
|
||||
-I=, --includeDirs= strings {} include directory to pass to preprocessor
|
||||
-m=, --mode= string "" language parser: c or cpp
|
||||
--nim= string "nim" use a particular Nim executable
|
||||
-c, --noComments bool false exclude top-level comments from output
|
||||
-H, --noHeader bool false skip {.header.} pragma in wrapper
|
||||
-o=, --output= string "" file to output content - default: stdout
|
||||
--passC= strings {} create {.passC.} entries in generated wrapper
|
||||
--passL= strings {} create {.passL.} entries in generated wrapper
|
||||
-c, --nocomments bool false exclude top-level comments from output
|
||||
-o=, --output= string "" file to output content
|
||||
-a, --past bool false print AST output
|
||||
-g, --pgrammar bool false print grammar
|
||||
--pluginSourcePath= string "" nim file to build and load as a plugin
|
||||
-n, --pnim bool false print Nim output
|
||||
-E=, --prefix= strings {} strip prefix from identifiers
|
||||
-p, --preprocess bool false run preprocessor on header
|
||||
-r, --recurse bool false process #include files - implies --preprocess
|
||||
-r, --recurse bool false process #include files
|
||||
-G=, --replace= strings {} replace X with Y in identifiers, X1=Y1,X2=Y2, @X for regex
|
||||
-s, --stub bool false stub out undefined type references as objects
|
||||
-F=, --suffix= strings {} strip suffix from identifiers
|
||||
-O=, --symOverride= strings {} skip generating specified symbols
|
||||
-T=, --typeMap= strings {} map instances of type X to Y - e.g. ABC=cint
|
||||
```
|
||||
|
||||
## Why nimterop
|
||||
|
|
@ -259,7 +210,7 @@ The tree-sitter library is limited though - it may fail on some advanced languag
|
|||
|
||||
It is debatable whether a syntax highlighting engine like `tree-sitter` is the most reliable method to convert C code into AST. However, it is lightweight, cross-platform with no dependencies and handles error conditions gracefully. It has produced usable wrappers for C libraries though things could get murky when considering C++ but that will be a topic for another day. Nimterop relies heavily on the preprocessor, as discussed next, so having an engine which can run anywhere has been worth the compromise. Only time will tell though.
|
||||
|
||||
### Preprocessing
|
||||
__Preprocessing__
|
||||
|
||||
The wrapper API always runs headers through the C preprocessor before wrapping, unlike the command line interface where the `-p | --preprocess` flag is not set by default but *highly* recommended. This is because almost all platform, compiler and package discovery is handled by build tools like `configure` and `cmake` which then use preprocessor `#define` values to tweak what C code is applicable for that platform. While parsing preprocessor macros is possible in tools like `toast`, given how dependent the `#ifdef` branches are on values provided by these and many other build tools, preprocessing seems is best left to them than attempting to self-discover or intercept that information.
|
||||
|
||||
|
|
@ -273,8 +224,6 @@ This is part of the reason why Nimterop provides a wrapper API so that the gener
|
|||
|
||||
Nimterop depends on [tree-sitter](http://tree-sitter.github.io/tree-sitter/) and all licensing terms of [tree-sitter](https://github.com/tree-sitter/tree-sitter/blob/master/LICENSE) apply to the usage of this package. The tree-sitter functionality is pulled and wrapped using nimterop itself.
|
||||
|
||||
Thank you to all the [contributors](https://github.com/nimterop/nimterop/graphs/contributors), issue submitters, various people in [#nim](irc://freenode.net/nim) and users for helping improve Nimterop over the years.
|
||||
|
||||
## Feedback
|
||||
|
||||
Nimterop is a work in progress and any feedback or suggestions are welcome. It is hosted on [GitHub](https://github.com/nimterop/nimterop) with an MIT license so issues, forks and PRs are most appreciated.
|
||||
|
|
|
|||
|
|
@ -10,8 +10,7 @@ matrix:
|
|||
environment:
|
||||
matrix:
|
||||
- NIM_VERSION: 0.20.2
|
||||
- NIM_VERSION: 1.0.8
|
||||
- NIM_VERSION: 1.2.4
|
||||
- NIM_VERSION: 1.0.6
|
||||
|
||||
for:
|
||||
-
|
||||
|
|
|
|||
|
|
@ -1,15 +1,15 @@
|
|||
# Package
|
||||
|
||||
version = "0.6.13"
|
||||
version = "0.5.0"
|
||||
author = "genotrance"
|
||||
description = "C/C++ interop for Nim"
|
||||
license = "MIT"
|
||||
|
||||
bin = @["nimterop/toast", "nimterop/loaf"]
|
||||
bin = @["nimterop/toast"]
|
||||
installDirs = @["nimterop"]
|
||||
|
||||
# Dependencies
|
||||
requires "nim >= 0.20.2", "regex >= 0.15.0", "cligen >= 1.5.3"
|
||||
requires "nim >= 0.20.2", "regex >= 0.14.1", "cligen >= 0.9.45"
|
||||
|
||||
import nimterop/docs
|
||||
import os
|
||||
|
|
@ -18,29 +18,23 @@ proc execCmd(cmd: string) =
|
|||
exec "tests/timeit " & cmd
|
||||
|
||||
proc execTest(test: string, flags = "", runDocs = true) =
|
||||
execCmd "nim c --hints:off -f -d:checkAbi " & flags & " -r " & test
|
||||
let
|
||||
# -d:checkAbi broken in cpp mode until post 1.2.0
|
||||
cppAbi = when (NimMajor, NimMinor) >= (1, 3): "-d:checkAbi " else: ""
|
||||
execCmd "nim cpp --hints:off " & cppAbi & flags & " -r " & test
|
||||
execCmd "nim c --hints:off -f " & flags & " -r " & test
|
||||
execCmd "nim cpp --hints:off " & flags & " -r " & test
|
||||
|
||||
if runDocs:
|
||||
let docPath = "build/html_" & test.extractFileName.changeFileExt("") & "_docs"
|
||||
rmDir docPath
|
||||
mkDir docPath
|
||||
buildDocs(@[test], docPath, nimArgs = "--hints:off " & flags)
|
||||
buildDocs(@[test], docPath, nimArgs = flags)
|
||||
|
||||
task buildToast, "build toast":
|
||||
execCmd("nim c --hints:off nimterop/toast.nim")
|
||||
|
||||
task buildTimeit, "build timer":
|
||||
exec "nim c --hints:off -d:danger tests/timeit"
|
||||
|
||||
task buildLoaf, "build loaf":
|
||||
execCmd("nim c --hints:off -d:danger nimterop/loaf.nim")
|
||||
|
||||
task buildToast, "build toast":
|
||||
execCmd("nim c --hints:off -d:danger nimterop/toast.nim")
|
||||
|
||||
task bt, "build toast":
|
||||
buildToastTask()
|
||||
execCmd("nim c --hints:off -d:danger nimterop/toast.nim")
|
||||
|
||||
task btd, "build toast":
|
||||
execCmd("nim c -g nimterop/toast.nim")
|
||||
|
|
@ -48,25 +42,23 @@ task btd, "build toast":
|
|||
task docs, "Generate docs":
|
||||
buildDocs(@["nimterop/all.nim"], "build/htmldocs")
|
||||
|
||||
task minitest, "Test for Nim CI":
|
||||
exec "nim c -f -d:danger nimterop/loaf.nim"
|
||||
exec "nim c -f -d:danger nimterop/toast"
|
||||
exec "nim c -f -d:checkAbi -r tests/tast2.nim"
|
||||
exec "nim c -f -d:checkAbi -d:zlibJBB -d:zlibSetVer=1.2.11 -r tests/zlib.nim"
|
||||
task test, "Test":
|
||||
buildTimeitTask()
|
||||
buildToastTask()
|
||||
|
||||
task basic, "Basic tests":
|
||||
execTest "tests/tast2.nim"
|
||||
execTest "tests/tast2.nim", "-d:NOHEADER"
|
||||
execTest "tests/tast2.nim", "-d:NOHEADER -d:WRAPPED"
|
||||
execTest "tests/tast2.nim", "-d:HEADER"
|
||||
|
||||
execTest "tests/tnimterop_c.nim"
|
||||
execTest "tests/tnimterop_c.nim", "-d:FLAGS=\"-H\""
|
||||
execTest "tests/tnimterop_c.nim", "-d:FLAGS=\"-f:ast2\""
|
||||
execTest "tests/tnimterop_c.nim", "-d:FLAGS=\"-f:ast2 -H\""
|
||||
|
||||
execCmd "nim cpp --hints:off -f -r tests/tnimterop_cpp.nim"
|
||||
execCmd "./nimterop/toast tests/toast.cfg tests/include/toast.h"
|
||||
execCmd "./nimterop/toast -pnk -E=_ tests/include/toast.h"
|
||||
execCmd "./nimterop/toast -pnk -E=_ -f:ast2 tests/include/toast.h"
|
||||
|
||||
task wrapper, "Wrapper tests":
|
||||
execTest "tests/tpcre.nim"
|
||||
execTest "tests/tpcre.nim", "-d:FLAGS=\"-f:ast2\""
|
||||
|
||||
when defined(Linux):
|
||||
execTest "tests/rsa.nim"
|
||||
|
|
@ -75,35 +67,21 @@ task wrapper, "Wrapper tests":
|
|||
# Platform specific tests
|
||||
when defined(Windows):
|
||||
execTest "tests/tmath.nim"
|
||||
execTest "tests/tmath.nim", "-d:FLAGS=\"-H\""
|
||||
execTest "tests/tmath.nim", "-d:FLAGS=\"-f:ast2\""
|
||||
execTest "tests/tmath.nim", "-d:FLAGS=\"-f:ast2 -H\""
|
||||
if defined(OSX) or defined(Windows) or not existsEnv("TRAVIS"):
|
||||
execTest "tests/tsoloud.nim"
|
||||
execTest "tests/tsoloud.nim", "-d:FLAGS=\"-H\""
|
||||
execTest "tests/tsoloud.nim", "-d:FLAGS=\"-f:ast2\""
|
||||
execTest "tests/tsoloud.nim", "-d:FLAGS=\"-f:ast2 -H\""
|
||||
|
||||
task getheader, "getHeader tests":
|
||||
# getHeader tests
|
||||
withDir("tests"):
|
||||
exec "nim e getheader.nims"
|
||||
|
||||
task package, "Wrapper package tests":
|
||||
if not existsEnv("APPVEYOR"):
|
||||
withDir("tests"):
|
||||
exec "nim e wrappers.nims"
|
||||
|
||||
task test, "Test":
|
||||
rmFile("tests/timeit.txt")
|
||||
|
||||
buildTimeitTask()
|
||||
buildLoafTask()
|
||||
buildToastTask()
|
||||
|
||||
basicTask()
|
||||
|
||||
wrapperTask()
|
||||
|
||||
getheaderTask()
|
||||
|
||||
packageTask()
|
||||
when not defined(Windows):
|
||||
# Skip on Windows since very slow
|
||||
if not existsEnv("APPVEYOR"):
|
||||
exec "nim e wrappers.nims"
|
||||
|
||||
docsTask()
|
||||
|
||||
echo readFile("tests/timeit.txt")
|
||||
echo readFile("tests/timeit.txt")
|
||||
|
|
@ -2,4 +2,4 @@
|
|||
The following modules are available to users of Nimterop.
|
||||
]##
|
||||
|
||||
import "."/[build, cimport, docs, plugin]
|
||||
import "."/[docs, cimport, build, types, plugin]
|
||||
|
|
|
|||
250
nimterop/ast.nim
Normal file
250
nimterop/ast.nim
Normal file
|
|
@ -0,0 +1,250 @@
|
|||
import hashes, macros, os, sets, strformat, strutils, tables
|
||||
|
||||
import regex
|
||||
|
||||
import "."/[getters, globals, treesitter/api]
|
||||
|
||||
proc getHeaderPragma*(gState: State): string =
|
||||
result =
|
||||
if gState.isIncludeHeader():
|
||||
&", header: {gState.currentHeader}"
|
||||
else:
|
||||
""
|
||||
|
||||
proc getDynlib*(gState: State): string =
|
||||
result =
|
||||
if gState.dynlib.nBl:
|
||||
&", dynlib: {gState.dynlib}"
|
||||
else:
|
||||
""
|
||||
|
||||
proc getImportC*(gState: State, origName, nimName: string): string =
|
||||
if nimName != origName:
|
||||
result = &"importc: \"{origName}\"{gState.getHeaderPragma()}"
|
||||
else:
|
||||
result = gState.impShort
|
||||
|
||||
proc getPragma*(gState: State, pragmas: varargs[string]): string =
|
||||
result = ""
|
||||
for pragma in pragmas.items():
|
||||
if pragma.nBl:
|
||||
result &= pragma & ", "
|
||||
if result.nBl:
|
||||
result = " {." & result[0 .. ^3] & ".}"
|
||||
|
||||
result = result.replace(gState.impShort & ", cdecl", gState.impShort & "C")
|
||||
|
||||
let
|
||||
dy = gState.getDynlib()
|
||||
|
||||
if ", cdecl" in result and dy.nBl:
|
||||
result = result.replace(".}", dy & ".}")
|
||||
|
||||
proc saveNodeData(node: TSNode, gState: State): bool =
|
||||
let name = $node.tsNodeType()
|
||||
|
||||
# Atoms are nodes whose values are to be saved
|
||||
if name in gAtoms:
|
||||
let
|
||||
pname = node.getPxName(1)
|
||||
ppname = node.getPxName(2)
|
||||
pppname = node.getPxName(3)
|
||||
ppppname = node.getPxName(4)
|
||||
|
||||
var
|
||||
val = gState.getNodeVal(node)
|
||||
|
||||
# Skip since value already obtained from parent atom
|
||||
if name == "primitive_type" and pname == "sized_type_specifier":
|
||||
return true
|
||||
|
||||
# Skip since value already obtained from parent expression
|
||||
if name in ["number_literal", "identifier"] and pname in gExpressions:
|
||||
return true
|
||||
|
||||
# Add reference point in saved data for bitfield_clause
|
||||
if name in ["number_literal"] and pname == "bitfield_clause":
|
||||
gState.data.add(("bitfield_clause", val))
|
||||
return true
|
||||
|
||||
# Process value as a type
|
||||
if name in ["primitive_type", "sized_type_specifier"]:
|
||||
val = val.getType()
|
||||
|
||||
if node.tsNodePrevNamedSibling().tsNodeIsNull():
|
||||
if pname == "pointer_declarator":
|
||||
if ppname notin ["function_declarator", "array_declarator"]:
|
||||
gState.data.add(("pointer_declarator", ""))
|
||||
elif ppname == "array_declarator":
|
||||
gState.data.add(("array_pointer_declarator", ""))
|
||||
|
||||
# Double pointer
|
||||
if ppname == "pointer_declarator":
|
||||
gState.data.add(("pointer_declarator", ""))
|
||||
elif pname in ["function_declarator", "array_declarator"]:
|
||||
if ppname == "pointer_declarator":
|
||||
gState.data.add(("pointer_declarator", ""))
|
||||
if pppname == "pointer_declarator":
|
||||
gState.data.add(("pointer_declarator", ""))
|
||||
|
||||
gState.data.add((name, val))
|
||||
|
||||
if pname == "pointer_declarator" and
|
||||
ppname == "function_declarator":
|
||||
if name == "field_identifier":
|
||||
if pppname == "pointer_declarator":
|
||||
gState.data.insert(("pointer_declarator", ""), gState.data.len-1)
|
||||
if ppppname == "pointer_declarator":
|
||||
gState.data.insert(("pointer_declarator", ""), gState.data.len-1)
|
||||
gState.data.add(("function_declarator", ""))
|
||||
elif name == "identifier":
|
||||
gState.data.add(("pointer_declarator", ""))
|
||||
|
||||
# Save node value for a top-level expression
|
||||
elif name in gExpressions and name != "escape_sequence":
|
||||
if $node.tsNodeParent.tsNodeType() notin gExpressions:
|
||||
gState.data.add((name, gState.getNodeVal(node)))
|
||||
|
||||
elif name in ["abstract_pointer_declarator", "enumerator", "field_declaration", "function_declarator"]:
|
||||
gState.data.add((name.replace("abstract_", ""), ""))
|
||||
|
||||
return true
|
||||
|
||||
proc searchAstForNode(ast: ref Ast, node: TSNode, gState: State): bool =
|
||||
let
|
||||
childNames = node.getTSNodeNamedChildNames().join()
|
||||
|
||||
if ast.isNil:
|
||||
return
|
||||
|
||||
if gState.debug:
|
||||
gState.nodeBranch.add $node.tsNodeType()
|
||||
gecho "#" & spaces(gState.nodeBranch.len * 2) & gState.nodeBranch[^1]
|
||||
|
||||
if ast.children.nBl:
|
||||
if childNames.contains(ast.regex) or
|
||||
(childNames.Bl and ast.recursive):
|
||||
if node.getTSNodeNamedChildCountSansComments() != 0:
|
||||
var flag = true
|
||||
|
||||
for i in 0 .. node.tsNodeNamedChildCount()-1:
|
||||
if $node.tsNodeNamedChild(i).tsNodeType() != "comment":
|
||||
let
|
||||
nodeChild = node.tsNodeNamedChild(i)
|
||||
astChild =
|
||||
if not ast.recursive:
|
||||
ast.getAstChildByName($nodeChild.tsNodeType())
|
||||
else:
|
||||
ast
|
||||
|
||||
if not searchAstForNode(astChild, nodeChild, gState):
|
||||
flag = false
|
||||
break
|
||||
|
||||
if flag:
|
||||
result = node.saveNodeData(gState)
|
||||
else:
|
||||
result = node.saveNodeData(gState)
|
||||
else:
|
||||
if gState.debug:
|
||||
gecho "#" & spaces(gState.nodeBranch.len * 2) & &" {ast.getRegexForAstChildren()} !=~ {childNames}"
|
||||
elif node.getTSNodeNamedChildCountSansComments() == 0:
|
||||
result = node.saveNodeData(gState)
|
||||
|
||||
if gState.debug:
|
||||
discard gState.nodeBranch.pop()
|
||||
if gState.nodeBranch.Bl:
|
||||
gecho ""
|
||||
|
||||
proc searchAst(root: TSNode, astTable: AstTable, gState: State) =
|
||||
var
|
||||
node = root
|
||||
nextnode: TSNode
|
||||
depth = 0
|
||||
|
||||
while true:
|
||||
if not node.tsNodeIsNull() and depth > -1:
|
||||
let
|
||||
name = $node.tsNodeType()
|
||||
if name in astTable:
|
||||
for ast in astTable[name]:
|
||||
if gState.debug:
|
||||
gecho "\n# " & gState.getNodeVal(node).replace("\n", "\n# ") & "\n"
|
||||
if searchAstForNode(ast, node, gState):
|
||||
ast.tonim(ast, node, gState)
|
||||
if gState.debug:
|
||||
gState.debugStr &= "\n# " & gState.data.join("\n# ") & "\n"
|
||||
break
|
||||
gState.data = @[]
|
||||
else:
|
||||
break
|
||||
|
||||
if $node.tsNodeType() notin astTable and node.tsNodeNamedChildCount() != 0:
|
||||
nextnode = node.tsNodeNamedChild(0)
|
||||
depth += 1
|
||||
else:
|
||||
nextnode = node.tsNodeNextNamedSibling()
|
||||
|
||||
if nextnode.tsNodeIsNull():
|
||||
while true:
|
||||
node = node.tsNodeParent()
|
||||
depth -= 1
|
||||
if depth == -1:
|
||||
break
|
||||
if node == root:
|
||||
break
|
||||
if not node.tsNodeNextNamedSibling().tsNodeIsNull():
|
||||
node = node.tsNodeNextNamedSibling()
|
||||
break
|
||||
else:
|
||||
node = nextnode
|
||||
|
||||
if node == root:
|
||||
break
|
||||
|
||||
proc parseNim*(gState: State, fullpath: string, root: TSNode, astTable: AstTable) =
|
||||
# Generate Nim from tree-sitter AST root node
|
||||
var
|
||||
fp = fullpath.replace("\\", "/")
|
||||
|
||||
gState.currentHeader = getCurrentHeader(fullpath)
|
||||
gState.impShort = gState.currentHeader.replace("header", "imp")
|
||||
gState.sourceFile = fullpath
|
||||
|
||||
if gState.isIncludeHeader():
|
||||
gState.constStr &= &"\n {gState.currentHeader} {{.used.}} = \"{fp}\""
|
||||
|
||||
root.searchAst(astTable, gState)
|
||||
|
||||
proc printNim*(gState: State) =
|
||||
# Print Nim generated by parseNim()
|
||||
if gState.enumStr.nBl:
|
||||
gecho &"{gState.enumStr}\n"
|
||||
|
||||
gState.constStr = gState.getOverrideFinal(nskConst) & gState.constStr
|
||||
if gState.constStr.nBl:
|
||||
gecho &"const{gState.constStr}\n"
|
||||
|
||||
gecho &"""
|
||||
{{.pragma: {gState.impShort}, importc{gState.getHeaderPragma()}.}}
|
||||
{{.pragma: {gState.impShort}C, {gState.impShort}, cdecl{gState.getDynlib()}.}}
|
||||
"""
|
||||
|
||||
gState.typeStr = gState.getOverrideFinal(nskType) & gState.typeStr
|
||||
if gState.typeStr.nBl:
|
||||
gecho &"type{gState.typeStr}\n"
|
||||
|
||||
gState.procStr = gState.getOverrideFinal(nskProc) & gState.procStr
|
||||
if gState.procStr.nBl:
|
||||
gecho &"{gState.procStr}\n"
|
||||
|
||||
if gState.debug:
|
||||
if gState.debugStr.nBl:
|
||||
gecho gState.debugStr
|
||||
|
||||
if gState.skipStr.nBl:
|
||||
let
|
||||
hash = gState.skipStr.hash().abs()
|
||||
sname = getTempDir() / &"nimterop_{$hash}.h"
|
||||
gecho &"# Writing skipped definitions to {sname}\n"
|
||||
writeFile(sname, gState.skipStr)
|
||||
File diff suppressed because it is too large
Load diff
1095
nimterop/build.nim
1095
nimterop/build.nim
File diff suppressed because it is too large
Load diff
|
|
@ -1,105 +0,0 @@
|
|||
import os, strformat, strutils
|
||||
|
||||
import "."/shell
|
||||
|
||||
proc getCompilerMode*(path: string): string =
|
||||
## Determines a target language mode from an input filename, if one is not already specified.
|
||||
let file = path.splitFile()
|
||||
if file.ext in [".hxx", ".hpp", ".hh", ".H", ".h++", ".cpp", ".cxx", ".cc", ".C", ".c++"]:
|
||||
result = "cpp"
|
||||
elif file.ext in [".h", ".c"]:
|
||||
result = "c"
|
||||
|
||||
proc getGccModeArg*(mode: string): string =
|
||||
## Produces a GCC argument that explicitly sets the language mode to be used by the compiler.
|
||||
if mode == "cpp":
|
||||
result = "-xc++"
|
||||
elif mode == "c":
|
||||
result = "-xc"
|
||||
|
||||
proc getCompiler*(): string =
|
||||
var
|
||||
compiler =
|
||||
when defined(gcc):
|
||||
"gcc"
|
||||
elif defined(clang):
|
||||
"clang"
|
||||
else:
|
||||
doAssert false, "Nimterop only supports gcc and clang at this time"
|
||||
|
||||
result = getEnv("CC", compiler)
|
||||
|
||||
proc getGccPaths*(mode: string): seq[string] =
|
||||
var
|
||||
nul = when defined(Windows): "nul" else: "/dev/null"
|
||||
inc = false
|
||||
|
||||
(outp, _) = execAction(&"""{getCompiler()} -Wp,-v {getGccModeArg(mode)} {nul}""", die = false)
|
||||
|
||||
for line in outp.splitLines():
|
||||
if "#include <...> search starts here" in line:
|
||||
inc = true
|
||||
continue
|
||||
elif "End of search list" in line:
|
||||
break
|
||||
if inc:
|
||||
var
|
||||
path = line.strip().normalizedPath()
|
||||
if path notin result:
|
||||
result.add path
|
||||
|
||||
when defined(osx):
|
||||
result.add(execAction("xcrun --show-sdk-path").output.strip() & "/usr/include")
|
||||
|
||||
proc getGccLibPaths*(mode: string): seq[string] =
|
||||
var
|
||||
nul = when defined(Windows): "nul" else: "/dev/null"
|
||||
linker = when defined(OSX): "-Xlinker" else: ""
|
||||
|
||||
(outp, _) = execAction(&"""{getCompiler()} {linker} -v {getGccModeArg(mode)} {nul}""", die = false)
|
||||
|
||||
for line in outp.splitLines():
|
||||
if "LIBRARY_PATH=" in line:
|
||||
for path in line[13 .. ^1].split(PathSep):
|
||||
var
|
||||
path = path.strip().normalizedPath()
|
||||
if path notin result:
|
||||
result.add path
|
||||
break
|
||||
elif '\t' in line:
|
||||
var
|
||||
path = line.strip().normalizedPath()
|
||||
if path notin result:
|
||||
result.add path
|
||||
|
||||
when defined(osx):
|
||||
result.add "/usr/lib"
|
||||
|
||||
proc getGccInfo*(): tuple[arch, os, compiler, version, libc: string] =
|
||||
let
|
||||
(outp, _) = execAction(&"{getCompiler()} -v")
|
||||
for line in outp.splitLines():
|
||||
if line.startsWith("Target: "):
|
||||
result.arch = line.split(' ')[1].split('-')[0]
|
||||
result.os =
|
||||
if "linux" in line:
|
||||
"linux"
|
||||
elif "android" in line:
|
||||
"android"
|
||||
elif "darwin" in line:
|
||||
"macos"
|
||||
elif "w64" in line or "mingw" in line:
|
||||
"windows"
|
||||
else:
|
||||
"unknown"
|
||||
elif " version " in line:
|
||||
result.version = line.split(" version ")[1].split(' ')[0]
|
||||
if "clang" in outp:
|
||||
if result.os == "macos":
|
||||
result.compiler = "apple-clang"
|
||||
else:
|
||||
result.compiler = "clang"
|
||||
else:
|
||||
result.compiler = "gcc"
|
||||
if "musl" in outp:
|
||||
result.libc = "musl"
|
||||
|
|
@ -1,466 +0,0 @@
|
|||
import json, os, strformat, strutils, tables
|
||||
|
||||
|
||||
import ".."/globals
|
||||
import "."/[ccompiler, misc, nimconf, shell]
|
||||
|
||||
when (NimMajor, NimMinor, NimPatch) < (1, 2, 0):
|
||||
import marshal
|
||||
|
||||
type
|
||||
ConanPackage* = ref object
|
||||
## ConanPackage type that stores conan uri and recipes/builds/revisions
|
||||
name*: string
|
||||
version*: string
|
||||
user*: string
|
||||
channel*: string
|
||||
recipes*: OrderedTableRef[string, seq[ConanBuild]]
|
||||
|
||||
arch*, os*, compiler*, compversion*: string
|
||||
|
||||
bhash*: string
|
||||
shared*: bool
|
||||
sharedLibs*: seq[string]
|
||||
staticLibs*: seq[string]
|
||||
requires*: seq[ConanPackage]
|
||||
|
||||
skipRequires*: seq[string]
|
||||
|
||||
ConanBuild* = ref object
|
||||
## Build type that stores build specific info and revisions
|
||||
bhash*: string
|
||||
settings*: TableRef[string, string]
|
||||
options*: TableRef[string, string]
|
||||
requires*: seq[string]
|
||||
recipe_hash*: string
|
||||
revisions*: seq[string]
|
||||
|
||||
const
|
||||
# Conan API urls
|
||||
conanBaseUrl = "https://conan.bintray.com/v2/conans"
|
||||
conanSearchUrl = conanBaseUrl & "/search?q=$query"
|
||||
conanPkgUrl = conanBaseUrl & "/$name/$version/$user/$channel/search$query"
|
||||
conanCfgUrl = conanBaseUrl & "/$name/$version/$user/$channel/revisions/$recipe/packages/$build/revisions"
|
||||
conanDlUrl = conanBaseUrl & "/$name/$version/$user/$channel/revisions/$recipe/packages/$build/revisions/$revision/files/$file"
|
||||
|
||||
# Bintray download sub-URL for explicit `user/channel` (not _/_)
|
||||
conanDlAltUrl = "/download_file?file_path=$user%2F$name%2F$version%2F$channel%2F0%2Fpackage%2F$build%2F0%2F$file"
|
||||
|
||||
# Strings
|
||||
conanInfo = "conaninfo.json"
|
||||
conanPackage = "conan_package.tgz"
|
||||
conanManifest = "conanmanifest.txt"
|
||||
|
||||
var
|
||||
# Bintray download URL for explicit `user/channel`
|
||||
conanBaseAltUrl {.compileTime.} = {
|
||||
"bincrafters": "https://bintray.com/bincrafters/public-conan",
|
||||
"conan": "https://bintray.com/conan-community/conan"
|
||||
}.toTable()
|
||||
|
||||
# Reuse dependencies already downloaded
|
||||
gConanRequires {.compileTime.}: Table[string, ConanPackage]
|
||||
|
||||
proc addAltConanBaseUrl*(name, url: string) =
|
||||
# Add an alternate base URL for a custom conan repo on bintray
|
||||
conanBaseAltUrl[name] = url
|
||||
|
||||
proc jsonGet(url: string): JsonNode =
|
||||
# Make HTTP call and return content as JSON
|
||||
let
|
||||
temp = getTempDir()
|
||||
file = block:
|
||||
var
|
||||
file = temp / url.extractFilename()
|
||||
when defined(Windows):
|
||||
file = file.replace('?', '_')
|
||||
file
|
||||
|
||||
downloadUrl(url, temp, quiet = true)
|
||||
try:
|
||||
result = readFile(file).parseJson()
|
||||
except JsonParsingError:
|
||||
discard
|
||||
rmFile(file)
|
||||
|
||||
proc `==`*(pkg1, pkg2: ConanPackage): bool =
|
||||
## Check if two ConanPackage objects are equal
|
||||
(not pkg1.isNil and not pkg2.isNil and
|
||||
pkg1.name == pkg2.name and
|
||||
pkg1.version == pkg2.version and
|
||||
pkg1.user == pkg2.user and
|
||||
pkg1.channel == pkg2.channel and
|
||||
|
||||
pkg1.arch == pkg2.arch and
|
||||
pkg1.os == pkg2.os and
|
||||
pkg1.compiler == pkg2.compiler and
|
||||
pkg1.compversion == pkg2.compversion and
|
||||
|
||||
pkg1.bhash == pkg2.bhash and
|
||||
pkg1.shared == pkg2.shared)
|
||||
|
||||
proc newConanPackage*(name, version, user = "_", channel = "_", bhash = "", shared = true): ConanPackage =
|
||||
## Create a new ConanPackage with specified name and version
|
||||
result = new(ConanPackage)
|
||||
result.name = name
|
||||
result.version = version
|
||||
result.user = user
|
||||
result.channel = channel
|
||||
result.recipes = newOrderedTable[string, seq[ConanBuild]](2)
|
||||
|
||||
let
|
||||
(arch, os, compiler, compversion, libc) = getGccInfo()
|
||||
doAssert libc != "musl", "Conan does not provide precompiled binaries using musl"
|
||||
|
||||
result.arch = arch
|
||||
result.os = os
|
||||
result.compiler = compiler
|
||||
result.compversion = compversion
|
||||
|
||||
result.bhash = bhash
|
||||
result.shared = shared
|
||||
|
||||
proc newConanPackageFromUri*(uri: string, shared = true): ConanPackage =
|
||||
## Create a new ConanPackage from a conan uri typically formatted as name/version[@user/channel][:bhash]
|
||||
var
|
||||
name, version, user, channel, bhash: string
|
||||
|
||||
spl = uri.split(":")
|
||||
|
||||
if spl.len > 1:
|
||||
bhash = spl[1]
|
||||
|
||||
spl = spl[0].split('/')
|
||||
|
||||
name = spl[0]
|
||||
user = "_"
|
||||
channel = "_"
|
||||
|
||||
if spl.len > 2:
|
||||
channel = spl[2]
|
||||
if spl.len > 1:
|
||||
spl = spl[1].split('@')
|
||||
|
||||
version = spl[0]
|
||||
if spl.len > 1:
|
||||
user = spl[1]
|
||||
|
||||
result = newConanPackage(name, version, user, channel, bhash, shared)
|
||||
|
||||
proc getUriFromConanPackage*(pkg: ConanPackage): string =
|
||||
## Convert a ConanPackage to a conan uri
|
||||
result = pkg.name
|
||||
if pkg.version.nBl:
|
||||
result &= "/" & pkg.version
|
||||
if pkg.user.nBl:
|
||||
result &= "@" & pkg.user
|
||||
if pkg.channel.nBl:
|
||||
result &= "/" & pkg.channel
|
||||
if pkg.bhash.nBl:
|
||||
result &= ":" & pkg.bhash
|
||||
|
||||
proc searchConan*(name: string, version = "", user = "", channel = ""): ConanPackage =
|
||||
## Search for package by `name` and optional `version`, `user` and `channel`
|
||||
##
|
||||
## Search is quite slow so it is preferable to specify a version and use `getConanBuilds()`
|
||||
var
|
||||
query = name
|
||||
if version.nBl:
|
||||
query &= "/" & version
|
||||
if user.nBl:
|
||||
query &= "@" & user
|
||||
if channel.nBl:
|
||||
query &= "/" & channel
|
||||
|
||||
gecho &"# Searching Conan.io for latest version of {name}"
|
||||
|
||||
let
|
||||
j1 = jsonGet(conanSearchUrl % ["query", query])
|
||||
res = j1.getOrDefault("results").getElems()
|
||||
|
||||
# Return latest comparing versions - prefer @_/_
|
||||
var
|
||||
latest = ""
|
||||
latestv = ""
|
||||
for i in 0 ..< res.len:
|
||||
let
|
||||
str = res[i].getStr()
|
||||
if "@_/_" in str:
|
||||
let
|
||||
ver = str.split('/')[1].split('@')[0]
|
||||
if latestv.Bl or compareVersions(ver, latestv) > 0:
|
||||
latestv = ver
|
||||
latest = str
|
||||
|
||||
if latest.nBl:
|
||||
result = newConanPackageFromUri(latest)
|
||||
|
||||
proc searchConan*(pkg: ConanPackage): ConanPackage =
|
||||
## Search for latest package based on incomplete package info
|
||||
result = searchConan(pkg.name, pkg.version, pkg.user, pkg.channel)
|
||||
|
||||
proc getConanBuilds*(pkg: ConanPackage, filter = "") =
|
||||
## Get all builds for a package based on the C compiler's target OS/arch info
|
||||
##
|
||||
## `filter` can be used to tweak search terms
|
||||
## e.g. build_type=Debug&compiler=clang
|
||||
let
|
||||
vsplit = pkg.compversion.split('.')
|
||||
|
||||
vfilter =
|
||||
when defined(OSX):
|
||||
vsplit[0 .. 1].join(".")
|
||||
else:
|
||||
vsplit[0]
|
||||
|
||||
query =
|
||||
if pkg.bhash.Bl:
|
||||
block:
|
||||
var
|
||||
query = &"?q=arch={pkg.arch}&os={pkg.os.capitalizeAscii()}"
|
||||
if "build_type" notin filter:
|
||||
query &= "&build_type=Release"
|
||||
if "shared=" notin filter:
|
||||
query &= &"&options.shared={($pkg.shared).capitalizeAscii()}"
|
||||
if filter.nBl:
|
||||
query &= &"&{filter}"
|
||||
if "compiler=" notin filter and pkg.os != "windows":
|
||||
query &= &"&compiler={pkg.compiler}&compiler.version=" & vfilter
|
||||
if "compiler.runtime=" notin filter and pkg.os == "windows":
|
||||
query &= &"&compiler.runtime=MD"
|
||||
if "compiler.version=" notin filter and pkg.os == "windows":
|
||||
query &= &"&compiler.version=14"
|
||||
|
||||
query.replace("&", "%20and%20")
|
||||
else: ""
|
||||
|
||||
url = conanPkgUrl % [
|
||||
"name", pkg.name,
|
||||
"version", pkg.version,
|
||||
"user", pkg.user,
|
||||
"channel", pkg.channel,
|
||||
"query", query
|
||||
]
|
||||
|
||||
j1 = jsonGet(url)
|
||||
|
||||
if not j1.isNil:
|
||||
for bhash, bdata in j1.getFields():
|
||||
if pkg.bhash.Bl or pkg.bhash == bhash:
|
||||
let
|
||||
bld = new(ConanBuild)
|
||||
settings = bdata.getOrDefault("settings")
|
||||
options = bdata.getOrDefault("options")
|
||||
requires = bdata.getOrDefault("requires")
|
||||
bld.bhash = bhash
|
||||
if not settings.isNil:
|
||||
bld.settings = newTable[string, string](8)
|
||||
for key, value in settings.getFields():
|
||||
bld.settings[key] = value.getStr()
|
||||
if not options.isNil:
|
||||
bld.options = newTable[string, string](8)
|
||||
for key, value in options.getFields():
|
||||
bld.options[key] = value.getStr()
|
||||
for req in requires.to(seq[string]):
|
||||
# Filter skipped dependencies
|
||||
if req.toLowerAscii() notin pkg.skipRequires:
|
||||
bld.requires.add req
|
||||
bld.recipe_hash = bdata.getOrDefault("recipe_hash").getStr()
|
||||
|
||||
if pkg.recipes.hasKey(bld.recipe_hash):
|
||||
pkg.recipes[bld.recipe_hash].add bld
|
||||
else:
|
||||
pkg.recipes[bld.recipe_hash] = @[bld]
|
||||
|
||||
# Only need first or matching build
|
||||
break
|
||||
|
||||
proc getConanRevisions*(pkg: ConanPackage, bld: ConanBuild) =
|
||||
## Get all revisions of a build
|
||||
let
|
||||
url = conanCfgUrl % [
|
||||
"name", pkg.name,
|
||||
"version", pkg.version,
|
||||
"user", pkg.user,
|
||||
"channel", pkg.channel,
|
||||
"recipe", bld.recipe_hash,
|
||||
"build", bld.bhash
|
||||
]
|
||||
|
||||
j1 = jsonGet(url)
|
||||
|
||||
if not j1.isNil:
|
||||
let
|
||||
revs = j1.getOrDefault("revisions")
|
||||
for i in revs:
|
||||
bld.revisions.add i.getOrDefault("revision").getStr()
|
||||
|
||||
proc loadConanInfo*(outdir: string): ConanPackage =
|
||||
## Load cached package info from `outdir/conaninfo.json`
|
||||
let
|
||||
file = fixRelPath(outdir) / conanInfo
|
||||
|
||||
if fileExists(file):
|
||||
when (NimMajor, NimMinor, NimPatch) < (1, 2, 0):
|
||||
result = to[ConanPackage](readFile(file))
|
||||
else:
|
||||
try:
|
||||
result = to(readFile(file).parseJson(), ConanPackage)
|
||||
except:
|
||||
discard
|
||||
|
||||
proc saveConanInfo*(pkg: ConanPackage, outdir: string) =
|
||||
## Save downloaded package info to `outdir/conaninfo.json`
|
||||
let
|
||||
file = fixRelPath(outdir) / conanInfo
|
||||
|
||||
when (NimMajor, NimMinor, NimPatch) < (1, 2, 0):
|
||||
writeFile(file, $$pkg)
|
||||
else:
|
||||
writeFile(file, $(%pkg))
|
||||
|
||||
proc parseConanManifest(pkg: ConanPackage, outdir: string) =
|
||||
# Get all library info from downloaded conan package
|
||||
let
|
||||
file = outdir / conanManifest
|
||||
|
||||
if fileExists(file):
|
||||
let
|
||||
data = readFile(file)
|
||||
for line in data.splitLines():
|
||||
let
|
||||
line = line.split(':')[0]
|
||||
if line.startsWith("lib/"):
|
||||
if line.endsWith(".a") or line.endsWith(".lib"):
|
||||
pkg.staticLibs.add line
|
||||
elif line.endsWith(".so") or line.endsWith(".dylib"):
|
||||
pkg.sharedLibs.add line
|
||||
elif line.startsWith("bin/") and line.endsWith("dll"):
|
||||
pkg.sharedLibs.add line
|
||||
|
||||
proc dlConanBuild*(pkg: ConanPackage, bld: ConanBuild, outdir: string, revision = "") =
|
||||
## Download specific `revision` of `bld` to `outdir`
|
||||
##
|
||||
## If omitted, the latest revision (first) is downloaded
|
||||
doAssert bld.revisions.nBl, "No build revisions found for Conan.io package " & pkg.getUriFromConanPackage()
|
||||
|
||||
let
|
||||
outdir = fixRelPath(outdir)
|
||||
|
||||
revision =
|
||||
if revision.nBl:
|
||||
revision
|
||||
else:
|
||||
bld.revisions[0]
|
||||
|
||||
url =
|
||||
if pkg.user == "_":
|
||||
conanDlUrl % [
|
||||
"name", pkg.name,
|
||||
"version", pkg.version,
|
||||
"user", pkg.user,
|
||||
"channel", pkg.channel,
|
||||
"recipe", bld.recipe_hash,
|
||||
"build", bld.bhash,
|
||||
"revision", revision,
|
||||
"file", conanPackage
|
||||
]
|
||||
else:
|
||||
conanBaseAltUrl[pkg.user] & conanDlAltUrl % [
|
||||
"name", pkg.name,
|
||||
"version", pkg.version,
|
||||
"user", pkg.user,
|
||||
"channel", pkg.channel,
|
||||
"build", bld.bhash,
|
||||
"file", conanPackage
|
||||
]
|
||||
|
||||
downloadUrl(url, outdir, quiet = true)
|
||||
downloadUrl(url.replace(conanPackage, conanManifest), outdir, quiet = true)
|
||||
|
||||
pkg.parseConanManifest(outdir)
|
||||
|
||||
rmFile(outdir / url.extractFilename())
|
||||
rmFile(outdir / conanManifest)
|
||||
|
||||
proc dlConanRequires*(pkg: ConanPackage, bld: ConanBuild, outdir: string)
|
||||
proc downloadConan*(pkg: ConanPackage, outdir: string, main = true) =
|
||||
## Download latest recipe/build/revision of `pkg` to `outdir`
|
||||
##
|
||||
## High-level API that handles the end to end Conan process flow to find
|
||||
## latest package binary and downloads and extracts it to `outdir`.
|
||||
let
|
||||
outdir = fixRelPath(outdir)
|
||||
|
||||
pkg =
|
||||
if pkg.version.Bl:
|
||||
searchConan(pkg)
|
||||
else:
|
||||
pkg
|
||||
|
||||
if main:
|
||||
let
|
||||
cpkg = loadConanInfo(outdir)
|
||||
|
||||
if cpkg == pkg:
|
||||
return
|
||||
|
||||
cleanDir(outdir)
|
||||
|
||||
pkg.getConanBuilds()
|
||||
|
||||
doAssert pkg.recipes.nBl, &"Failed to download {pkg.name} v{pkg.version} from Conan - check https://conan.io/center"
|
||||
|
||||
gecho &"# Downloading {pkg.name} v{pkg.version} from Conan.io"
|
||||
for recipe, builds in pkg.recipes:
|
||||
for build in builds:
|
||||
if pkg.bhash.Bl or pkg.bhash == build.bhash:
|
||||
pkg.getConanRevisions(build)
|
||||
pkg.dlConanBuild(build, outdir)
|
||||
pkg.dlConanRequires(build, outdir)
|
||||
break
|
||||
break
|
||||
|
||||
if main:
|
||||
pkg.saveConanInfo(outdir)
|
||||
|
||||
proc dlConanRequires*(pkg: ConanPackage, bld: ConanBuild, outdir: string) =
|
||||
## Download all required dependencies of this `bld`
|
||||
##
|
||||
## This is not required for shared libs since conan builds them
|
||||
## with all dependencies statically linked in
|
||||
let
|
||||
outdir = fixRelPath(outdir)
|
||||
if bld.options["shared"] == "False":
|
||||
for req in bld.requires:
|
||||
let
|
||||
name = req.split('/')[0]
|
||||
if gConanRequires.hasKey(name):
|
||||
# Reuse dep already downloaded
|
||||
pkg.requires.add gConanRequires[name]
|
||||
else:
|
||||
let
|
||||
rpkg = newConanPackageFromUri(req, shared = false)
|
||||
rpkg.skipRequires = pkg.skipRequires
|
||||
|
||||
downloadConan(rpkg, outdir, main = false)
|
||||
pkg.requires.add rpkg
|
||||
gConanRequires[name] = rpkg
|
||||
|
||||
proc getConanLDeps*(pkg: ConanPackage, outdir: string, main = true): seq[string] =
|
||||
## Get all Conan libs - shared (.so|.dll) or static (.a|.lib) in pkg, including deps
|
||||
## in descending order
|
||||
##
|
||||
## `outdir` is prefixed to each entry
|
||||
let
|
||||
libs = if pkg.shared: pkg.sharedLibs else: pkg.staticLibs
|
||||
str = if pkg.shared: "shared" else: "static"
|
||||
|
||||
doAssert libs.nBl, &"No {str} libs found for {pkg.name} in {outdir}"
|
||||
|
||||
if not main:
|
||||
for lib in libs:
|
||||
result.add outdir / lib
|
||||
|
||||
for cpkg in pkg.requires:
|
||||
result.add cpkg.getConanLDeps(outdir, main = false)
|
||||
|
|
@ -1,568 +0,0 @@
|
|||
import macros, strformat, strutils, tables
|
||||
|
||||
import os except findExe
|
||||
|
||||
import ".."/globals
|
||||
import "."/[ccompiler, conan, jbb, nimconf, shell, tools]
|
||||
|
||||
var
|
||||
gDefines {.compileTime.} = initTable[string, string]()
|
||||
|
||||
macro setDefines*(defs: static openArray[string]): untyped =
|
||||
## Specify `-d:xxx` values in code instead of having to rely on the command
|
||||
## line or `cfg` or `nims` files.
|
||||
##
|
||||
## At this time, Nim does not allow creation of `-d:xxx` defines in code. In
|
||||
## addition, Nim only loads config files for the module being compiled but not
|
||||
## for imported packages. This becomes a challenge when wanting to ship a wrapper
|
||||
## library that wants to control `getHeader()` for an underlying package.
|
||||
##
|
||||
## E.g. nimarchive wanting to set `-d:lzmaStatic`
|
||||
##
|
||||
## The consumer of nimarchive would need to set such defines as part of their
|
||||
## project, making it inconvenient.
|
||||
##
|
||||
## By calling this proc with the defines preferred before importing such a module,
|
||||
## the caller can set the behavior in code instead.
|
||||
##
|
||||
## .. code-block:: nim
|
||||
##
|
||||
## setDefines(@["lzmaStatic", "lzmaDL", "lzmaSetVer=5.2.4"])
|
||||
##
|
||||
## import lzma
|
||||
for def in defs:
|
||||
let
|
||||
nv = def.strip().split("=", maxsplit = 1)
|
||||
if nv.nBl:
|
||||
let
|
||||
n = nv[0]
|
||||
v =
|
||||
if nv.len == 2:
|
||||
nv[1]
|
||||
else:
|
||||
""
|
||||
gDefines[n] = v
|
||||
|
||||
macro clearDefines*(): untyped =
|
||||
## Clear all defines set using `setDefines()`.
|
||||
gDefines.clear()
|
||||
|
||||
macro isDefined*(def: untyped): untyped =
|
||||
## Check if `-d:xxx` is set globally or via `setDefines()`
|
||||
let
|
||||
sdef = gDefines.hasKey(def.strVal())
|
||||
result = newNimNode(nnkStmtList)
|
||||
result.add(quote do:
|
||||
when defined(`def`) or `sdef` != 0:
|
||||
true
|
||||
else:
|
||||
false
|
||||
)
|
||||
|
||||
macro getDefine*(def: untyped): untyped =
|
||||
let version = newIdentNode(def.strVal())
|
||||
let verVal =
|
||||
if gDefines.hasKey(def.strVal()):
|
||||
gDefines[def.strVal()]
|
||||
else:
|
||||
""
|
||||
result = quote do:
|
||||
const `version` {.strdefine.} = `verVal`
|
||||
`version`
|
||||
|
||||
proc getDynlibExt(): string =
|
||||
when defined(Windows):
|
||||
result = "[0-9.\\-]*\\.dll"
|
||||
elif defined(linux) or defined(FreeBSD):
|
||||
result = "\\.so[0-9.]*"
|
||||
elif defined(macosx):
|
||||
result = "[0-9.\\-]*\\.dylib"
|
||||
|
||||
proc getStdPath(header, mode: string): string =
|
||||
for inc in getGccPaths(mode):
|
||||
result = findFile(header, inc, recurse = false, first = true)
|
||||
if result.nBl:
|
||||
break
|
||||
|
||||
proc getStdLibPath(lname, mode: string): string =
|
||||
for lib in getGccLibPaths(mode):
|
||||
result = findFile(lname, lib, recurse = false, first = true, regex = true)
|
||||
if result.nBl:
|
||||
break
|
||||
|
||||
proc getGitPath(header, url, outdir, version: string): string =
|
||||
doAssert url.nBl, "No git url setup for " & header
|
||||
doAssert findExe("git").nBl, "git executable missing"
|
||||
|
||||
gitPull(url, outdir, checkout = version)
|
||||
|
||||
result = findFile(header, outdir)
|
||||
|
||||
proc getDlPath(header, url, outdir, version: string): string =
|
||||
doAssert url.nBl, "No download url setup for " & header
|
||||
|
||||
var
|
||||
dlurl = url
|
||||
if "$#" in url or "$1" in url:
|
||||
doAssert version.nBl, "Need version for download url"
|
||||
dlurl = url % version
|
||||
else:
|
||||
doAssert version.Bl, "Download url does not contain version"
|
||||
|
||||
downloadUrl(dlurl, outdir)
|
||||
|
||||
var
|
||||
dirname = ""
|
||||
for kind, path in walkDir(outdir, relative = true):
|
||||
if kind == pcFile and path != dlurl.extractFilename():
|
||||
dirname = ""
|
||||
break
|
||||
elif kind == pcDir:
|
||||
if dirname.Bl:
|
||||
dirname = path
|
||||
else:
|
||||
dirname = ""
|
||||
break
|
||||
|
||||
if dirname.nBl:
|
||||
for kind, path in walkDir(outdir / dirname, relative = true):
|
||||
mvFile(outdir / dirname / path, outdir / path)
|
||||
|
||||
result = findFile(header, outdir)
|
||||
|
||||
proc getConanPath(header, uri, flags, outdir, version: string, shared: bool): string =
|
||||
var
|
||||
uri = uri
|
||||
|
||||
if "$#" in uri or "$1" in uri:
|
||||
doAssert version.nBl, "Need version for Conan.io uri: " & uri
|
||||
uri = uri % version
|
||||
elif version.nBl:
|
||||
uri = uri & "/" & version
|
||||
|
||||
let
|
||||
pkg = newConanPackageFromUri(uri, shared)
|
||||
|
||||
# Handle `conanFlags`
|
||||
if flags.nBl:
|
||||
for flag in flags.split(" "):
|
||||
if flag.startsWith("skip="):
|
||||
for req in flag["skip=".len .. ^1].split(","):
|
||||
if req.nBl:
|
||||
pkg.skipRequires.add req.toLowerAscii()
|
||||
|
||||
downloadConan(pkg, outdir)
|
||||
|
||||
result = findFile(header, outdir)
|
||||
|
||||
proc getConanLDeps(outdir: string): seq[string] =
|
||||
let
|
||||
pkg = loadConanInfo(outdir)
|
||||
|
||||
result = pkg.getConanLDeps(outdir)
|
||||
|
||||
proc getJBBPath(header, uri, flags, outdir, version: string): string =
|
||||
let
|
||||
spl = uri.split('/', 1)
|
||||
name = spl[0]
|
||||
hasVersion = version.nBl
|
||||
|
||||
var
|
||||
ver = if spl.len == 2: spl[1] else: ""
|
||||
|
||||
if ver.nBl:
|
||||
if "$#" in ver or "$1" in ver:
|
||||
doAssert hasVersion, "Need version for BinaryBuilder.org uri: " & uri
|
||||
ver = ver % version
|
||||
elif hasVersion:
|
||||
doAssert false, "Version in both uri `" & uri & "` and `-d:xxxSetVer=\"" &
|
||||
version & "\"` for BinaryBuilder.org"
|
||||
elif hasVersion:
|
||||
ver = version
|
||||
|
||||
let
|
||||
pkg = newJBBPackage(name, ver)
|
||||
|
||||
# Handle `jbbFlags`
|
||||
if flags.nBl:
|
||||
for flag in flags.split(" "):
|
||||
if flag.startsWith("giturl="):
|
||||
let
|
||||
val = flag["giturl=".len .. ^1]
|
||||
if val.contains("://"):
|
||||
pkg.baseUrl = val
|
||||
else:
|
||||
pkg.baseUrl = "https://github.com/" & val
|
||||
elif flag.startsWith("url="):
|
||||
pkg.baseUrl = flag["url=".len .. ^1]
|
||||
pkg.isGit = false
|
||||
elif flag.startsWith("skip="):
|
||||
for req in flag["skip=".len .. ^1].split(","):
|
||||
if req.nBl:
|
||||
pkg.skipRequires.add req.toLowerAscii()
|
||||
|
||||
downloadJBB(pkg, outdir)
|
||||
|
||||
result = findFile(header, outdir)
|
||||
|
||||
proc getJBBLDeps(outdir: string, shared: bool): seq[string] =
|
||||
let
|
||||
pkg = loadJBBInfo(outdir)
|
||||
|
||||
result = pkg.getJBBLDeps(outdir, shared)
|
||||
|
||||
proc getLocalPath(header, outdir: string): string =
|
||||
if outdir.nBl:
|
||||
result = findFile(header, outdir)
|
||||
|
||||
proc buildLibrary(lname, outdir, conFlags, cmakeFlags, makeFlags: string, buildTypes: openArray[BuildType]): string =
|
||||
var
|
||||
lpath = findFile(lname, outdir, regex = true)
|
||||
makePath = outdir
|
||||
buildStatus: BuildStatus
|
||||
errors: seq[string]
|
||||
|
||||
if lpath.nBl:
|
||||
return lpath
|
||||
|
||||
|
||||
for buildType in buildTypes:
|
||||
case buildType
|
||||
of btCmake:
|
||||
buildStatus = buildWithCmake(makePath, cmakeFlags)
|
||||
of btAutoconf:
|
||||
buildStatus = buildWithAutoConf(makePath, conFlags)
|
||||
|
||||
if buildStatus.built:
|
||||
break
|
||||
elif buildStatus.error.nBl:
|
||||
errors.add buildStatus.error
|
||||
|
||||
if buildStatus.buildPath.len > 0:
|
||||
let libraryExists = findFile(lname, buildStatus.buildPath, regex = true).len > 0
|
||||
|
||||
if not libraryExists and fileExists(buildStatus.buildPath / "Makefile"):
|
||||
make(buildStatus.buildPath, lname, makeFlags, regex = true)
|
||||
buildStatus.built = true
|
||||
|
||||
let error = if errors.len > 0: errors.join("\n") else: "No build files found in " & outdir
|
||||
doAssert buildStatus.built, &"\nBuild configuration failed - {error}\n"
|
||||
|
||||
result = findFile(lname, outdir, regex = true)
|
||||
|
||||
macro getHeader*(
|
||||
header: static[string], giturl: static[string] = "", dlurl: static[string] = "",
|
||||
conanuri: static[string] = "", jbburi: static[string] = "",
|
||||
outdir: static[string] = "", libdir: static[string] = "",
|
||||
conFlags: static[string] = "", cmakeFlags: static[string] = "", makeFlags: static[string] = "",
|
||||
conanFlags: static[string] = "", jbbFlags: static[string] = "", altNames: static[string] = "",
|
||||
buildTypes: static[openArray[BuildType]] = [btCmake, btAutoconf]): untyped =
|
||||
## Get the path to a header file for wrapping with
|
||||
## `cImport() <cimport.html#cImport.m%2C%2Cstring%2Cstring%2Cstring>`_ or
|
||||
## `c2nImport() <cimport.html#c2nImport.m%2C%2Cstring%2Cstring%2Cstring>`_.
|
||||
##
|
||||
## This proc checks `-d:xxx` defines based on the header name (e.g. lzma from lzma.h),
|
||||
## and accordingly employs different ways to obtain the source.
|
||||
##
|
||||
## `-d:xxxStd` - search standard system paths. E.g. `/usr/include` and `/usr/lib` on Linux
|
||||
## `-d:xxxGit` - clone source from a git repo specified in `giturl`
|
||||
## `-d:xxxDL` - download source from `dlurl` and extract if required
|
||||
## `-d:xxxConan` - download headers and binary from Conan.io using `conanuri` with
|
||||
## format `pkgname[/version[@user/channel][:bhash]]`
|
||||
## `-d:xxxJBB` - download headers and binary from BinaryBuilder.org using `jbburi` with
|
||||
## format `pkgname[/version]`
|
||||
##
|
||||
## This allows a single wrapper to be used in different ways depending on the user's needs.
|
||||
## If no `-d:xxx` defines are specified, `outdir` will be searched for the header as is.
|
||||
## The user can opt to download the sources to `outdir` using any other method such as
|
||||
## git sub-modules, vendoring or pointing to a repository that was already cloned.
|
||||
##
|
||||
## If multiple `-d:xxx` defines are specified, precedence is `Std` and then `Git`, `DL`,
|
||||
## `Conan` or `JBB`. This allows using a system installed library if available before
|
||||
## falling back to manual building. The user would need to specify both `-d:xxxStd` and
|
||||
## one of the other methods.
|
||||
##
|
||||
## `-d:xxxSetVer=x.y.z` can be used to specify which version to use. It is used as a tag
|
||||
## name for `Git` whereas for `DL`, `Conan` and `JBB`, it replaces `$1` in the URL
|
||||
## if specified. Specifying `-d:xxxSetVer` without a `$1` will download that version for
|
||||
## `Conan` and `JBB` if available. If no version is specified, the latest release of the
|
||||
## package is downloaded. For `Conan`, `-d:xxxSetVer` can also be used to set additional
|
||||
## URI information:
|
||||
## `-d:xxxSetVer=1.9.0@bincrafters/stable:bhash`
|
||||
##
|
||||
## If `conanuri` or `jbburi` are not defined and `Conan` or `JBB` is selected, the `header`
|
||||
## filename is used instead.
|
||||
##
|
||||
## All defines can also be set in code using `setDefines()` and checked for using
|
||||
## `isDefined()` which checks for defines set from both `-d` and `setDefines()`.
|
||||
##
|
||||
## The library is then configured (with `cmake` or `autotools` if possible) and built
|
||||
## using `make`, unless using `-d:xxxStd` which presumes that the system package
|
||||
## manager was used to install prebuilt headers and binaries, or using `-d:xxxConan`
|
||||
## or `-d:xxxJBB` which download pre-built binaries.
|
||||
##
|
||||
## The header path is stored in `const xxxPath` and can be used in a `cImport()` call
|
||||
## in the calling wrapper. The dynamic library path is stored in `const xxxLPath` and can
|
||||
## be used for the `dynlib` parameter (within quotes) or with `cPassL()`. Any dependency
|
||||
## libraries downloaded by `Conan` or `JBB` are returned in `const xxxLDeps` as a seq[string].
|
||||
##
|
||||
## `libdir` can be used to instruct `getHeader()` to copy shared libraries and their
|
||||
## dependencies to that directory. This prevents any runtime failures if `outdir` gets
|
||||
## removed or its contents changed. By default, `libdir` is set to the output directory
|
||||
## where the program binary will be created. The values of `xxxLPath` and `xxxLDeps` will
|
||||
## reflect this new location. `libdir` is ignored for `Std` mode.
|
||||
##
|
||||
## `-d:xxxStatic` can be specified to statically link with the library instead. This
|
||||
## will automatically add a `cPassL()` call to the static library for convenience. Note
|
||||
## that `-d:xxxConan` and `-d:xxxJBB` download all dependency libs as well and the
|
||||
## `xxxLPath` will include paths to all of them separated by space in the right order for
|
||||
## linking.
|
||||
##
|
||||
## Note also that Conan currently builds all OSX binaries on 10.14 so older versions of
|
||||
## OSX will complain if statically linking to these binaries. Further, all Conan binaries
|
||||
## for Windows are built with Visual Studio so static linking the `.lib` files with gcc
|
||||
## or clang might lead to incompatibility issues if the library uses Visual Studio
|
||||
## specific compiler features.
|
||||
##
|
||||
## `conFlags`, `cmakeFlags` and `makeFlags` allow sending custom parameters to `configure`,
|
||||
## `cmake` and `make` in case additional configuration is required as part of the build
|
||||
## process.
|
||||
##
|
||||
## `conanFlags` and `jbbFlags` allow changing the Conan.io and BinaryBuilder.org defaults:
|
||||
## - `skip=pkg1,pkg2` skips the specified packages which are required dependencies of the
|
||||
## package in question. This enables downloading those dependencies from other sources
|
||||
## if required.
|
||||
##
|
||||
## `jbbFlags` allows two additional customizations:
|
||||
## - `giturl=customUrl` changes the default `https://github.com/JuliaBinaryWrappers` to
|
||||
## another Git URL. If no hostname is specified, `https://github.com` is assumed.
|
||||
## - `url=customUrl` uses regular HTTP instead of Git and looks for `Artifacts.toml` and
|
||||
## `Project.toml` files at that location. `$1` or `$#` are replaced with the version
|
||||
## if specified.
|
||||
##
|
||||
## `altNames` is a list of alternate names for the library - e.g. zlib uses `zlib.h` for
|
||||
## the header but the typical lib name is `libz.so` and not `libzlib.so`. However, it is
|
||||
## libzlib.dll on Windows if built with cmake. In this case, `altNames = "z,zlib"`. Comma
|
||||
## separate for multiple alternate names without spaces.
|
||||
##
|
||||
## The original header name is not included by default if `altNames` is set since it could
|
||||
## cause the wrong lib to be selected. E.g. `SDL2/SDL.h` could pick `libSDL.so` even if
|
||||
## `altNames = "SDL2"`. Explicitly include it in `altNames` like the `zlib` example when
|
||||
## required.
|
||||
##
|
||||
## `buildTypes` specifies a list of ordered build strategies to use when building the
|
||||
## downloaded source files. Default is [btCmake, btAutoconf]
|
||||
##
|
||||
## `xxxPreBuild` is a hook that is called after the source code is pulled from Git or
|
||||
## downloaded but before the library is built. This might be needed if some initial prep
|
||||
## needs to be done before compilation. A few values are provided to the hook to help
|
||||
## provide context:
|
||||
##
|
||||
## `outdir` is the same `outdir` passed in and `header` is the discovered header path
|
||||
## in the downloaded source code.
|
||||
##
|
||||
## Simply define `proc xxxPreBuild(outdir, header: string)` in the wrapper and it will get
|
||||
## called prior to the build process.
|
||||
var
|
||||
origname = header.extractFilename().split(".")[0]
|
||||
name = origname.split(seps = AllChars-Letters-Digits).join()
|
||||
|
||||
# Default to origname if not specified
|
||||
conanuri = if conanuri.nBl: conanuri else: origname
|
||||
jbburi = if jbburi.nBl: jbburi else: origname
|
||||
|
||||
# -d:xxx for this header
|
||||
stdStr = name & "Std"
|
||||
gitStr = name & "Git"
|
||||
dlStr = name & "DL"
|
||||
conanStr = name & "Conan"
|
||||
jbbStr = name & "JBB"
|
||||
|
||||
staticStr = name & "Static"
|
||||
verStr = name & "SetVer"
|
||||
getPath = name & "GetPath"
|
||||
|
||||
# Ident nodes of the -d:xxx to check in when statements
|
||||
nameStd = newIdentNode(stdStr)
|
||||
nameGit = newIdentNode(gitStr)
|
||||
nameDL = newIdentNode(dlStr)
|
||||
nameConan = newIdentNode(conanStr)
|
||||
nameJBB = newIdentNode(jbbStr)
|
||||
|
||||
nameStatic = newIdentNode(staticStr)
|
||||
nameGetPath = newIdentNode(getPath)
|
||||
|
||||
# Consts to generate
|
||||
path = newIdentNode(name & "Path")
|
||||
lpath = newIdentNode(name & "LPath")
|
||||
ldeps = newIdentNode(name & "LDeps")
|
||||
version = newIdentNode(verStr)
|
||||
lname = newIdentNode(name & "LName")
|
||||
preBuild = newIdentNode(name & "PreBuild")
|
||||
|
||||
# Regex for library search
|
||||
lre = "(lib)?$1[_-]?(static)?"
|
||||
|
||||
# If -d:xxx set with setDefines()
|
||||
stdVal = gDefines.hasKey(stdStr)
|
||||
gitVal = gDefines.hasKey(gitStr)
|
||||
dlVal = gDefines.hasKey(dlStr)
|
||||
conanVal = gDefines.hasKey(conanStr)
|
||||
jbbVal = gDefines.hasKey(jbbStr)
|
||||
staticVal = gDefines.hasKey(staticStr)
|
||||
verVal =
|
||||
if gDefines.hasKey(verStr):
|
||||
gDefines[verStr]
|
||||
else:
|
||||
""
|
||||
mode = getCompilerMode(header)
|
||||
|
||||
libdir = if libdir.nBl: libdir else: getOutDir()
|
||||
|
||||
# Use alternate library names if specified for regex search
|
||||
if altNames.nBl:
|
||||
lre = lre % ("(" & altNames.replace(",", "|") & ")")
|
||||
else:
|
||||
lre = lre % origname
|
||||
|
||||
result = newNimNode(nnkStmtList)
|
||||
result.add(quote do:
|
||||
# Need to check -d:xxx or setDefines()
|
||||
const
|
||||
`nameStd`* = when defined(`nameStd`): true else: `stdVal` == 1
|
||||
`nameGit`* = when defined(`nameGit`): true else: `gitVal` == 1
|
||||
`nameDL`* = when defined(`nameDL`): true else: `dlVal` == 1
|
||||
`nameConan`* = when defined(`nameConan`): true else: `conanVal` == 1
|
||||
`nameJBB`* = when defined(`nameJBB`): true else: `jbbVal` == 1
|
||||
`nameStatic`* = when defined(`nameStatic`): true else: `staticVal` == 1
|
||||
|
||||
# Search for header in outdir (after retrieving code) depending on -d:xxx mode
|
||||
proc `nameGetPath`(header, giturl, dlurl, conanuri, conanFlags, jbburi, jbbFlags,
|
||||
outdir, version: string, shared: bool): string =
|
||||
when `nameGit`:
|
||||
getGitPath(header, giturl, outdir, version)
|
||||
elif `nameDL`:
|
||||
getDlPath(header, dlurl, outdir, version)
|
||||
elif `nameConan`:
|
||||
getConanPath(header, conanuri, conanFlags, outdir, version, shared)
|
||||
elif `nameJBB`:
|
||||
getJBBPath(header, jbburi, jbbFlags, outdir, version)
|
||||
else:
|
||||
getLocalPath(header, outdir)
|
||||
|
||||
static:
|
||||
# Don't delete project
|
||||
when not `nameStd` and (`nameGit` or `nameDL` or `nameConan` or `nameJBB`):
|
||||
doAssert `outdir`.len != 0, "getHeader():outdir cannot be blank"
|
||||
doAssert `outdir` != getProjectPath(), "getHeader():outdir cannot be the project path"
|
||||
|
||||
const
|
||||
`version`* {.strdefine.} = `verVal`
|
||||
`lname` =
|
||||
when `nameStatic`:
|
||||
`lre` & "\\.(a|lib)"
|
||||
else:
|
||||
`lre` & getDynlibExt()
|
||||
|
||||
# Look in standard path if requested by user
|
||||
stdPath =
|
||||
when `nameStd`: getStdPath(`header`, `mode`) else: ""
|
||||
stdLPath =
|
||||
when `nameStd`: getStdLibPath(`lname`, `mode`) else: ""
|
||||
|
||||
useStd = stdPath.len != 0 and stdLPath.len != 0
|
||||
|
||||
# Look elsewhere if requested while prioritizing standard paths
|
||||
prePath =
|
||||
when useStd:
|
||||
stdPath
|
||||
else:
|
||||
`nameGetPath`(`header`, `giturl`, `dlurl`, `conanuri`, `conanFlags`, `jbburi`, `jbbFlags`,
|
||||
`outdir`, `version`, not `nameStatic`)
|
||||
|
||||
# Run preBuild hook before building library if not Std, Conan or JBB
|
||||
when not (useStd or `nameConan` or `nameJBB`) and declared(`preBuild`):
|
||||
static:
|
||||
`preBuild`(`outdir`, prePath)
|
||||
|
||||
let
|
||||
# Library binary path - build if not standard / conan / jbb
|
||||
lpath {.compileTime.} =
|
||||
when useStd:
|
||||
stdLPath
|
||||
elif `nameConan` or `nameJBB`:
|
||||
findFile(`lname`, `outdir`, regex = true)
|
||||
else:
|
||||
buildLibrary(`lname`, `outdir`, `conFlags`, `cmakeFlags`, `makeFlags`, `buildTypes`)
|
||||
|
||||
# Library dependecy paths
|
||||
ldeps {.compileTime.}: seq[string] =
|
||||
when not useStd:
|
||||
when `nameConan`:
|
||||
getConanLDeps(`outdir`)
|
||||
elif `nameJBB`:
|
||||
getJBBLDeps(`outdir`, not `nameStatic`)
|
||||
else:
|
||||
@[]
|
||||
else:
|
||||
@[]
|
||||
|
||||
const
|
||||
# Header path - search again in case header is generated in build
|
||||
`path`* =
|
||||
if prePath.len != 0:
|
||||
prePath
|
||||
else:
|
||||
`nameGetPath`(`header`, `giturl`, `dlurl`, `conanuri`, `conanFlags`, `jbburi`, `jbbFlags`,
|
||||
`outdir`, `version`, not `nameStatic`)
|
||||
|
||||
static:
|
||||
doAssert `path`.len != 0, "\nHeader " & `header` & " not found - " &
|
||||
"missing/empty outdir or -d:$1Std -d:$1Git -d:$1DL -d:$1Conan or -d:$1JBB not specified" % `name`
|
||||
doAssert lpath.len != 0, "\nLibrary " & `lname` & " not found"
|
||||
|
||||
when `nameStatic`:
|
||||
const
|
||||
`lpath`* = lpath
|
||||
`ldeps`* = ldeps
|
||||
|
||||
# Automatically link with static library and dependencies
|
||||
static:
|
||||
gecho "# Including library " & lpath
|
||||
gStateCT.passL.add lpath
|
||||
if ldeps.len != 0:
|
||||
gecho "# Including dependencies " & ldeps.join(" ")
|
||||
gStateCT.passL.add ldeps.join(" ")
|
||||
else:
|
||||
const
|
||||
`lpath`* = when not useStd: `libdir` / lpath.extractFilename() else: lpath
|
||||
`ldeps`* =
|
||||
when not useStd:
|
||||
block:
|
||||
var
|
||||
ldeps = ldeps
|
||||
copied: seq[string]
|
||||
for i in 0 ..< ldeps.len:
|
||||
let
|
||||
lname = ldeps[i].extractFilename()
|
||||
ldeptgt = `libdir` / lname
|
||||
if not fileExists(ldeptgt) or getFileDate(ldeps[i]) != getFileDate(ldeptgt):
|
||||
cpFile(ldeps[i], ldeptgt, psymlink = true)
|
||||
copied.add lname
|
||||
ldeps[i] = ldeptgt
|
||||
# Copy downloaded dependencies to `libdir`
|
||||
if copied.len != 0:
|
||||
gecho "# Copying dependencies: " & copied.join(" ") & "\n# to " & `libdir`
|
||||
ldeps
|
||||
else:
|
||||
ldeps
|
||||
|
||||
static:
|
||||
when not useStd:
|
||||
# Copy downloaded shared libraries to `libdir`
|
||||
if not fileExists(`lpath`) or getFileDate(lpath) != getFileDate(`lpath`):
|
||||
gecho "# Copying " & `lpath`.extractFilename() & " to " & `libdir`
|
||||
cpFile(lpath, `lpath`)
|
||||
|
||||
gecho "# Including library " & `lpath`
|
||||
)
|
||||
|
|
@ -1,277 +0,0 @@
|
|||
import json, os, strformat, strutils, tables
|
||||
|
||||
import ".."/globals
|
||||
import "."/[ccompiler, nimconf, shell]
|
||||
|
||||
when (NimMajor, NimMinor, NimPatch) < (1, 2, 0):
|
||||
import marshal
|
||||
|
||||
type
|
||||
JBBPackage* = ref object
|
||||
## JBBPackage type that stores package information
|
||||
name*: string
|
||||
version*: string
|
||||
|
||||
baseUrl*: string # Location to find package
|
||||
isGit*: bool # Git or HTTP
|
||||
|
||||
url*: string # Download URL
|
||||
|
||||
arch*, os*, libc*: string # Target
|
||||
|
||||
sharedLibs*: seq[string]
|
||||
staticLibs*: seq[string]
|
||||
requires*: seq[JBBPackage]
|
||||
|
||||
skipRequires*: seq[string]
|
||||
|
||||
const
|
||||
# JBB URLs
|
||||
jbbBaseUrl = "https://github.com/JuliaBinaryWrappers"
|
||||
|
||||
jbbInfo = "jbbinfo.json"
|
||||
jbbProject = "Project.toml"
|
||||
jbbArtifacts = "Artifacts.toml"
|
||||
|
||||
var
|
||||
# Reuse dependencies already downloaded
|
||||
gJBBRequires {.compileTime.}: Table[string, JBBPackage]
|
||||
|
||||
proc `==`*(pkg1, pkg2: JBBPackage): bool =
|
||||
## Check if two JBBPackage objects are equal
|
||||
(not pkg1.isNil and not pkg2.isNil and
|
||||
pkg1.name == pkg2.name and
|
||||
pkg1.version == pkg2.version and
|
||||
|
||||
pkg1.arch == pkg2.arch and
|
||||
pkg1.os == pkg2.os and
|
||||
pkg1.libc == pkg2.libc)
|
||||
|
||||
proc newJBBPackage*(name, version: string): JBBPackage =
|
||||
## Create a new JBBPackage with specified name and version
|
||||
result = new(JBBPackage)
|
||||
result.name = name
|
||||
result.version = version
|
||||
result.baseUrl = jbbBaseUrl
|
||||
result.isGit = true
|
||||
|
||||
let
|
||||
(arch, os, _, _, libc) = getGccInfo()
|
||||
result.arch = arch
|
||||
result.os = os
|
||||
result.libc = libc
|
||||
|
||||
proc parseJBBProject(pkg: JBBPackage, outdir: string) =
|
||||
# Get all dependencies from Project.toml
|
||||
let
|
||||
file = outdir / jbbProject
|
||||
|
||||
if fileExists(file):
|
||||
let
|
||||
data = readFile(file)
|
||||
var
|
||||
deps = false
|
||||
|
||||
doAssert pkg.version in data, &"{pkg.name} v{pkg.version} not found"
|
||||
|
||||
for line in data.splitLines():
|
||||
let
|
||||
line = line.strip()
|
||||
if line.nBl:
|
||||
if line.startsWith('['):
|
||||
if line == "[deps]":
|
||||
deps = true
|
||||
else:
|
||||
deps = false
|
||||
elif deps:
|
||||
let
|
||||
name = line.split()[0]
|
||||
if name.endsWith("_jll"):
|
||||
# Filter skipped dependencies
|
||||
let
|
||||
pname = name[0 .. ^5]
|
||||
if pname.toLowerAscii() notin pkg.skipRequires:
|
||||
pkg.requires.add newJBBPackage(pname, "")
|
||||
pkg.requires[^1].skipRequires = pkg.skipRequires
|
||||
|
||||
proc parseJBBArtifacts(pkg: JBBPackage, outdir: string) =
|
||||
# Get build information from Artifacts.toml
|
||||
let
|
||||
file = outdir / jbbArtifacts
|
||||
|
||||
if fileExists(file):
|
||||
let
|
||||
data = readFile(file)
|
||||
|
||||
doAssert pkg.version in data, &"{pkg.name} v{pkg.version} not found"
|
||||
|
||||
var
|
||||
found = false
|
||||
for line in data.splitLines():
|
||||
let
|
||||
line = line.strip()
|
||||
if line.nBl:
|
||||
let
|
||||
spl = line.split(" = ", 1)
|
||||
name = spl[0]
|
||||
val = if spl.len == 2: spl[1].strip(chars = {'"', ' '}) else: ""
|
||||
|
||||
# Match arch, os and glibc on Linux to find download URL
|
||||
case name
|
||||
of "arch":
|
||||
if val == pkg.arch and not found: found = true
|
||||
of "os":
|
||||
if val != pkg.os and found: found = false
|
||||
of "libc":
|
||||
when defined(Linux):
|
||||
if found:
|
||||
let libc = if pkg.libc.nBl: pkg.libc else: "glibc"
|
||||
if val != libc: found = false
|
||||
of "url":
|
||||
if found:
|
||||
pkg.url = val
|
||||
break
|
||||
else:
|
||||
discard
|
||||
|
||||
proc findJBBLibs(pkg: JBBPackage, outdir: string) =
|
||||
pkg.sharedLibs = findFiles("(bin|lib)[\\\\/].*\\.(so|dll|dylib)[0-9.]*", outdir)
|
||||
|
||||
for lib in findFiles("lib[\\\\/].*\\.(a|lib)", outdir):
|
||||
if not lib.endsWith(".dll.a"):
|
||||
pkg.staticLibs.add lib
|
||||
|
||||
proc getJBBRepo*(pkg: JBBPackage, outdir: string) =
|
||||
## Clone JBB package repo and checkout version tag if version is
|
||||
## specified in package
|
||||
let
|
||||
path = outdir / "repos" / pkg.name
|
||||
|
||||
if pkg.isGit:
|
||||
# Get package info using Git
|
||||
gitPull(
|
||||
pkg.baseUrl & ("/$1_jll.jl" % pkg.name),
|
||||
outdir = path,
|
||||
plist = "*.toml",
|
||||
"master",
|
||||
quiet = true
|
||||
)
|
||||
|
||||
if pkg.version.nBl:
|
||||
# Checkout correct tag
|
||||
let
|
||||
tags = gitTags(path)
|
||||
for i in tags.len - 1 .. 0:
|
||||
if pkg.version in tags[i] and i != tags.len - 1:
|
||||
gitCheckout(path, tags[i-1])
|
||||
else:
|
||||
# Download package info from HTTP
|
||||
var
|
||||
url = pkg.baseUrl
|
||||
if "$#" in url or "$1" in url:
|
||||
doAssert pkg.version.nBl, "Need version for custom BinaryBuilder.org url: " & url
|
||||
url = url % pkg.version
|
||||
downloadUrl(url & "Artifacts.toml", path, quiet = true)
|
||||
downloadUrl(url & "Project.toml", path, quiet = true)
|
||||
|
||||
pkg.parseJBBProject(path)
|
||||
pkg.parseJBBArtifacts(path)
|
||||
|
||||
proc loadJBBInfo*(outdir: string): JBBPackage =
|
||||
## Load cached package info from `outdir/jbbinfo.json`
|
||||
let
|
||||
file = fixRelPath(outdir) / jbbInfo
|
||||
|
||||
if fileExists(file):
|
||||
when (NimMajor, NimMinor, NimPatch) < (1, 2, 0):
|
||||
result = to[JBBPackage](readFile(file))
|
||||
else:
|
||||
try:
|
||||
result = to(readFile(file).parseJson(), JBBPackage)
|
||||
except:
|
||||
discard
|
||||
|
||||
proc saveJBBInfo*(pkg: JBBPackage, outdir: string) =
|
||||
## Save downloaded package info to `outdir/jbbinfo.json`
|
||||
let
|
||||
file = fixRelPath(outdir) / jbbInfo
|
||||
|
||||
when (NimMajor, NimMinor, NimPatch) < (1, 2, 0):
|
||||
writeFile(file, $$pkg)
|
||||
else:
|
||||
writeFile(file, $(%pkg))
|
||||
|
||||
proc dlJBBRequires*(pkg: JBBPackage, outdir: string)
|
||||
proc downloadJBB*(pkg: JBBPackage, outdir: string, main = true) =
|
||||
## Download `pkg` from BinaryBuilder.org to `outdir`
|
||||
##
|
||||
## High-level API that handles the end to end JBB process flow to find
|
||||
## latest package binary and downloads and extracts it to `outdir`.
|
||||
let
|
||||
outdir = fixRelPath(outdir)
|
||||
|
||||
if main:
|
||||
let
|
||||
cpkg = loadJBBInfo(outdir)
|
||||
|
||||
if cpkg == pkg:
|
||||
return
|
||||
|
||||
cleanDir(outdir)
|
||||
|
||||
pkg.getJBBRepo(outdir)
|
||||
|
||||
if pkg.url.Bl:
|
||||
# No url for deps means no package for that os/arch combo - e.g. Attr
|
||||
doAssert not main, &"Failed to download {pkg.name} info from BinaryBuilder.org"
|
||||
return
|
||||
|
||||
let
|
||||
vstr =
|
||||
if pkg.version.nBl:
|
||||
&" v{pkg.version}"
|
||||
else:
|
||||
""
|
||||
path = outdir / pkg.name
|
||||
gecho &"# Downloading {pkg.name}{vstr} from BinaryBuilder.org"
|
||||
downloadUrl(pkg.url, path, quiet = true)
|
||||
pkg.findJBBLibs(path)
|
||||
|
||||
pkg.dlJBBRequires(outdir)
|
||||
|
||||
if main:
|
||||
pkg.saveJBBInfo(outdir)
|
||||
|
||||
proc dlJBBRequires*(pkg: JBBPackage, outdir: string) =
|
||||
## Download all required dependencies of this `pkg`
|
||||
let
|
||||
outdir = fixRelPath(outdir)
|
||||
for i in 0 ..< pkg.requires.len:
|
||||
let
|
||||
rpkg = pkg.requires[i]
|
||||
if gJBBRequires.hasKey(rpkg.name):
|
||||
# Reuse dep already downloaded
|
||||
pkg.requires[i] = gJBBRequires[rpkg.name]
|
||||
else:
|
||||
downloadJBB(rpkg, outdir, main = false)
|
||||
gJBBRequires[rpkg.name] = rpkg
|
||||
|
||||
proc getJBBLDeps*(pkg: JBBPackage, outdir: string, shared: bool, main = true): seq[string] =
|
||||
## Get all BinaryBuilder.org libs - shared (.so|.dll) or static (.a|.lib) in pkg, including deps
|
||||
## in descending order
|
||||
##
|
||||
## `outdir` is prefixed to each entry
|
||||
let
|
||||
libs = if shared: pkg.sharedLibs else: pkg.staticLibs
|
||||
str = if shared: "shared" else: "static"
|
||||
|
||||
doAssert libs.nBl, &"No {str} libs found for {pkg.name} in {outdir}"
|
||||
|
||||
if not main:
|
||||
for lib in libs:
|
||||
result.add lib
|
||||
|
||||
for cpkg in pkg.requires:
|
||||
# No url for deps means no package for that os/arch combo - e.g. Attr
|
||||
if cpkg.url.nBl:
|
||||
result.add cpkg.getJBBLDeps(outdir, shared, main = false)
|
||||
|
|
@ -1,62 +0,0 @@
|
|||
import os, strutils
|
||||
|
||||
when defined(Windows):
|
||||
import strformat
|
||||
|
||||
import ".."/globals
|
||||
|
||||
proc sanitizePath*(path: string, noQuote = false, sep = $DirSep): string =
|
||||
result = path.multiReplace([("\\\\", sep), ("\\", sep), ("/", sep)])
|
||||
if not noQuote:
|
||||
result = result.quoteShell
|
||||
|
||||
proc getCurrentNimCompiler*(): string =
|
||||
when nimvm:
|
||||
result = getCurrentCompilerExe()
|
||||
when defined(nimsuggest):
|
||||
result = result.replace("nimsuggest", "nim")
|
||||
else:
|
||||
result = gState.nim
|
||||
|
||||
proc compareVersions*(ver1, ver2: string): int =
|
||||
## Compare two version strings x.y.z and return -1, 0, 1
|
||||
##
|
||||
## ver1 < ver2 = -1
|
||||
## ver1 = ver2 = 0
|
||||
## ver1 > ver2 = 1
|
||||
let
|
||||
ver1seq = ver1.replace("-", "").split('.')
|
||||
ver2seq = ver2.replace("-", "").split('.')
|
||||
for i in 0 ..< ver1seq.len:
|
||||
let
|
||||
p1 = ver1seq[i]
|
||||
p2 = if i < ver2seq.len: ver2seq[i] else: "0"
|
||||
|
||||
try:
|
||||
let
|
||||
h1 = p1.parseHexInt()
|
||||
h2 = p2.parseHexInt()
|
||||
|
||||
if h1 < h2: return -1
|
||||
elif h1 > h2: return 1
|
||||
except ValueError:
|
||||
if p1 < p2: return -1
|
||||
elif p1 > p2: return 1
|
||||
|
||||
proc fixCmd*(cmd: string): string =
|
||||
when defined(Windows):
|
||||
# Replace 'cd d:\abc' with 'd: && cd d:\abc`
|
||||
var filteredCmd = cmd
|
||||
if cmd.toLower().startsWith("cd"):
|
||||
var
|
||||
colonIndex = cmd.find(":")
|
||||
driveLetter = cmd.substr(colonIndex-1, colonIndex)
|
||||
if (driveLetter[0].isAlphaAscii() and
|
||||
driveLetter[1] == ':' and
|
||||
colonIndex == 4):
|
||||
filteredCmd = &"{driveLetter} && {cmd}"
|
||||
result = "cmd /c " & filteredCmd
|
||||
elif defined(posix):
|
||||
result = cmd
|
||||
else:
|
||||
doAssert false
|
||||
|
|
@ -1,528 +0,0 @@
|
|||
import hashes, osproc, sets, strformat, strutils
|
||||
|
||||
when not defined(TOAST):
|
||||
import os except findExe, sleep
|
||||
else:
|
||||
import os
|
||||
|
||||
import ".."/globals
|
||||
import "."/[misc, nimconf]
|
||||
|
||||
when not defined(TOAST):
|
||||
proc sleep*(milsecs: int) =
|
||||
## Sleep at compile time
|
||||
let
|
||||
cmd =
|
||||
when defined(Windows):
|
||||
"cmd /c timeout "
|
||||
else:
|
||||
"sleep "
|
||||
|
||||
discard gorgeEx(cmd & $(milsecs / 1000))
|
||||
else:
|
||||
export sleep
|
||||
|
||||
proc execAction*(cmd: string, retry = 0, die = true, cache = false,
|
||||
cacheKey = "", onRetry: proc() = nil,
|
||||
onError: proc(output: string, err: int) = nil): tuple[output: string, ret: int] =
|
||||
## Execute an external command - supported at compile time
|
||||
##
|
||||
## Checks if command exits successfully before returning. If not, an
|
||||
## error is raised. Always caches results to be used in nimsuggest or nimcheck
|
||||
## mode.
|
||||
##
|
||||
## `retry` - number of times command should be retried before error
|
||||
## `die = false` - return on errors
|
||||
## `cache = true` - cache results unless cleared with -f
|
||||
## `cacheKey` - key to create unique cache entry
|
||||
## `onRetry()` - proc to call before retrying
|
||||
## `onError(output, err)` - proc to call on error
|
||||
let
|
||||
ccmd = fixCmd(cmd)
|
||||
|
||||
when nimvm:
|
||||
# Cache results for speedup if cache = true
|
||||
# Else cache for preserving functionality in nimsuggest and nimcheck
|
||||
let
|
||||
hash = (ccmd & cacheKey).hash().abs()
|
||||
cachePath = getNimteropCacheDir() / "execCache" / "nimterop_" & $hash
|
||||
cacheFile = cachePath & ".txt"
|
||||
retFile = cachePath & "_ret.txt"
|
||||
|
||||
when defined(nimsuggest) or defined(nimcheck):
|
||||
# Load results from cache file if generated in previous run
|
||||
if fileExists(cacheFile) and fileExists(retFile):
|
||||
result.output = cacheFile.readFile()
|
||||
result.ret = retFile.readFile().parseInt()
|
||||
elif die:
|
||||
doAssert false, "Results not cached - run nim c/cpp at least once\n" & ccmd
|
||||
else:
|
||||
if cache and fileExists(cacheFile) and fileExists(retFile) and not compileOption("forceBuild"):
|
||||
# Return from cache when requested
|
||||
result.output = cacheFile.readFile()
|
||||
result.ret = retFile.readFile().parseInt()
|
||||
else:
|
||||
# Execute command and store results in cache
|
||||
(result.output, result.ret) = gorgeEx(ccmd)
|
||||
if result.ret == 0 or die == false:
|
||||
# mkdir for execCache dir (circular dependency)
|
||||
let dir = cacheFile.parentDir()
|
||||
if not dirExists(dir):
|
||||
let flag = when not defined(Windows): "-p" else: ""
|
||||
discard execAction(&"mkdir {flag} {dir.sanitizePath}")
|
||||
cacheFile.writeFile(result.output)
|
||||
retFile.writeFile($result.ret)
|
||||
else:
|
||||
# Used by toast
|
||||
(result.output, result.ret) = execCmdEx(ccmd)
|
||||
|
||||
# On failure, retry or die as requested
|
||||
if result.ret != 0:
|
||||
if retry > 0:
|
||||
if not onRetry.isNil:
|
||||
onRetry()
|
||||
sleep(500)
|
||||
result = execAction(cmd, retry = retry - 1, die, cache, cacheKey)
|
||||
else:
|
||||
if not onError.isNil:
|
||||
onError(result.output, result.ret)
|
||||
|
||||
doAssert not die, "Command failed: " & $result.ret & "\ncmd: " & ccmd &
|
||||
"\nresult:\n" & result.output
|
||||
|
||||
when not defined(TOAST):
|
||||
proc findExe*(exe: string): string =
|
||||
## Find the specified executable using the `which`/`where` command - supported
|
||||
## at compile time
|
||||
var
|
||||
cmd =
|
||||
when defined(Windows):
|
||||
"where " & exe
|
||||
else:
|
||||
"which " & exe
|
||||
|
||||
(output, ret) = execAction(cmd, die = false)
|
||||
|
||||
if ret == 0:
|
||||
return output.splitLines()[0].strip().sanitizePath
|
||||
else:
|
||||
export findExe
|
||||
|
||||
proc mkDir*(dir: string) =
|
||||
## Create a directory at compile time
|
||||
##
|
||||
## The `os` module is not available at compile time so a few
|
||||
## crucial helper functions are included with nimterop.
|
||||
if not dirExists(dir):
|
||||
let
|
||||
flag = when not defined(Windows): "-p" else: ""
|
||||
discard execAction(&"mkdir {flag} {dir.sanitizePath}", retry = 2)
|
||||
|
||||
proc cpFile*(source, dest: string, psymlink = false, move = false) =
|
||||
## Copy a file from `source` to `dest` at compile time
|
||||
##
|
||||
## `psymlink = true` preserves symlinks instead of dereferencing on posix
|
||||
let
|
||||
source = source.replace("/", $DirSep)
|
||||
dest = dest.replace("/", $DirSep)
|
||||
cmd =
|
||||
when defined(Windows):
|
||||
if move:
|
||||
"move /y"
|
||||
else:
|
||||
"copy /y"
|
||||
else:
|
||||
if move:
|
||||
"mv -f"
|
||||
else:
|
||||
if psymlink:
|
||||
"cp -fa"
|
||||
else:
|
||||
"cp -f"
|
||||
|
||||
discard execAction(&"{cmd} {source.sanitizePath} {dest.sanitizePath}", retry = 2)
|
||||
|
||||
proc mvFile*(source, dest: string) =
|
||||
## Move a file from `source` to `dest` at compile time
|
||||
cpFile(source, dest, move=true)
|
||||
|
||||
proc rmFile*(source: string, dir = false) =
|
||||
## Remove a file or pattern at compile time
|
||||
let
|
||||
source = source.replace("/", $DirSep)
|
||||
cmd =
|
||||
when defined(Windows):
|
||||
if dir:
|
||||
"rd /s/q"
|
||||
else:
|
||||
"del /q/f"
|
||||
else:
|
||||
"rm -rf"
|
||||
exists =
|
||||
if dir:
|
||||
dirExists(source)
|
||||
else:
|
||||
fileExists(source)
|
||||
|
||||
if exists:
|
||||
discard execAction(&"{cmd} {source.sanitizePath}", retry = 2)
|
||||
|
||||
proc rmDir*(dir: string) =
|
||||
## Remove a directory or pattern at compile time
|
||||
rmFile(dir, dir = true)
|
||||
|
||||
proc cleanDir*(dir: string) =
|
||||
## Remove all contents of a directory at compile time
|
||||
for kind, path in walkDir(dir):
|
||||
if kind == pcDir:
|
||||
rmDir(path)
|
||||
else:
|
||||
rmFile(path)
|
||||
|
||||
proc cpTree*(source, dest: string, move = false) =
|
||||
## Copy contents of source dir to the destination, not the directory itself
|
||||
for kind, path in walkDir(source, relative = true):
|
||||
if kind == pcDir:
|
||||
cpTree(source / path, dest / path, move)
|
||||
if move:
|
||||
rmDir(source / path)
|
||||
else:
|
||||
if not dirExists(dest):
|
||||
mkDir(dest)
|
||||
if move:
|
||||
mvFile(source / path, dest / path)
|
||||
else:
|
||||
cpFile(source / path, dest / path)
|
||||
|
||||
proc mvTree*(source, dest: string) =
|
||||
## Move contents of source dir to the destination, not the directory itself
|
||||
cpTree(source, dest, move = true)
|
||||
|
||||
proc getFileDate*(fullpath: string): string =
|
||||
## Get file date for `fullpath`
|
||||
var
|
||||
ret = 0
|
||||
cmd =
|
||||
when defined(Windows):
|
||||
let
|
||||
(head, tail) = fullpath.splitPath()
|
||||
&"forfiles /P {head.sanitizePath()} /M {tail.sanitizePath} /C \"cmd /c echo @fdate @ftime\""
|
||||
elif defined(Linux):
|
||||
&"stat -c %Y {fullpath.sanitizePath}"
|
||||
elif defined(OSX) or defined(FreeBSD):
|
||||
&"stat -f %m {fullpath.sanitizePath}"
|
||||
|
||||
(result, ret) = execAction(cmd, die=false)
|
||||
|
||||
proc touchFile*(fullpath: string) =
|
||||
## Touch file to update modified date
|
||||
var
|
||||
cmd =
|
||||
when defined(Windows):
|
||||
&"cmd /c copy /b {fullpath.sanitizePath}+"
|
||||
else:
|
||||
&"touch {fullpath.sanitizePath}"
|
||||
|
||||
discard execAction(cmd)
|
||||
|
||||
proc extractZip*(zipfile, outdir: string, quiet = false) =
|
||||
## Extract a zip file using `powershell` on Windows and `unzip` on other
|
||||
## systems to the specified output directory
|
||||
var cmd = "unzip -o $#"
|
||||
if defined(Windows):
|
||||
cmd = "powershell -nologo -noprofile -command \"& { Add-Type -A " &
|
||||
"'System.IO.Compression.FileSystem'; " &
|
||||
"[IO.Compression.ZipFile]::ExtractToDirectory('$#', '.'); }\""
|
||||
|
||||
if not quiet:
|
||||
gecho "# Extracting " & zipfile
|
||||
discard execAction(&"cd {outdir.sanitizePath} && {cmd % zipfile}")
|
||||
|
||||
proc extractTar*(tarfile, outdir: string, quiet = false) =
|
||||
## Extract a tar file using `tar`, `7z` or `7za` to the specified output directory
|
||||
var
|
||||
cmd = ""
|
||||
name = ""
|
||||
|
||||
if findExe("tar").len != 0:
|
||||
let
|
||||
ext = tarfile.splitFile().ext.toLowerAscii()
|
||||
typ =
|
||||
case ext
|
||||
of ".gz", ".tgz": "z"
|
||||
of ".xz": "J"
|
||||
of ".bz2": "j"
|
||||
else: ""
|
||||
|
||||
cmd = "tar xvf" & typ & " " & tarfile.sanitizePath
|
||||
else:
|
||||
for i in ["7z", "7za"]:
|
||||
if findExe(i).len != 0:
|
||||
cmd = i & " x $#" % tarfile.sanitizePath
|
||||
|
||||
name = tarfile.splitFile().name
|
||||
if ".tar" in name.toLowerAscii():
|
||||
cmd &= " && " & i & " x $#" % name.sanitizePath
|
||||
|
||||
break
|
||||
|
||||
doAssert cmd.len != 0, "No extraction tool - tar, 7z, 7za - available for " & tarfile.sanitizePath
|
||||
|
||||
if not quiet:
|
||||
gecho "# Extracting " & tarfile
|
||||
discard execAction(&"cd {outdir.sanitizePath} && {cmd}")
|
||||
if name.len != 0:
|
||||
rmFile(outdir / name)
|
||||
|
||||
proc downloadUrl*(url, outdir: string, quiet = false, retry = 1) =
|
||||
## Download a file using `curl` or `wget` (or `powershell` on Windows) to the specified directory
|
||||
##
|
||||
## If an archive file, it is automatically extracted after download.
|
||||
let
|
||||
file = url.extractFilename()
|
||||
filePath = outdir / file
|
||||
ext = file.splitFile().ext.toLowerAscii()
|
||||
archives = @[".zip", ".xz", ".gz", ".bz2", ".tgz", ".tar"]
|
||||
|
||||
if not (ext in archives and fileExists(filePath)):
|
||||
if not quiet:
|
||||
gecho "# Downloading " & file
|
||||
mkDir(outdir)
|
||||
var cmd = findExe("curl")
|
||||
if cmd.len != 0:
|
||||
cmd &= " -Lk $# -o $#"
|
||||
else:
|
||||
cmd = findExe("wget")
|
||||
if cmd.len != 0:
|
||||
cmd &= " $# -O $#"
|
||||
elif defined(Windows):
|
||||
cmd = "powershell [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; wget $# -OutFile $#"
|
||||
else:
|
||||
doAssert false, "No download tool available - curl, wget"
|
||||
discard execAction(cmd % [url.quoteShell, (filePath).sanitizePath], retry = 3,
|
||||
onRetry = proc() = rmFile(filePath))
|
||||
|
||||
if ext == ".zip":
|
||||
extractZip(file, outdir, quiet)
|
||||
elif ext in archives:
|
||||
extractTar(file, outdir, quiet)
|
||||
|
||||
proc gitReset*(outdir: string) =
|
||||
## Hard reset the git repository at the specified directory
|
||||
gecho "# Resetting " & outdir
|
||||
|
||||
let cmd = &"cd {outdir.sanitizePath} && git reset --hard"
|
||||
while execAction(cmd).output.contains("Permission denied"):
|
||||
sleep(1000)
|
||||
gecho "# Retrying ..."
|
||||
|
||||
proc gitCheckout*(file, outdir: string) =
|
||||
## Checkout the specified `file` in the git repository at `outdir`
|
||||
##
|
||||
## This effectively resets all changes in the file and can be
|
||||
## used to undo any changes that were made to source files to enable
|
||||
## successful wrapping with `cImport()` or `c2nImport()`.
|
||||
gecho "# Resetting " & file
|
||||
let file2 = file.relativePath outdir
|
||||
let cmd = &"cd {outdir.sanitizePath} && git checkout {file2.sanitizePath}"
|
||||
while execAction(cmd).output.contains("Permission denied"):
|
||||
sleep(500)
|
||||
gecho "# Retrying ..."
|
||||
|
||||
proc gitAtCheckout*(outdir, checkout: string): bool =
|
||||
## Check if specified git repository is checked out to the specified
|
||||
## commit hash, tag or branch
|
||||
result = checkout in execAction(
|
||||
&"cd {outdir.sanitizePath} && git log --decorate --no-color -n 1 --format=oneline").output
|
||||
|
||||
proc gitDefaultBranch*(outdir: string): string =
|
||||
## Get the default branch for a git repository before it is pulled
|
||||
result = "master"
|
||||
let
|
||||
output = execAction(
|
||||
&"cd {outdir.sanitizePath} && git remote show origin"
|
||||
).output
|
||||
|
||||
for line in output.splitLines():
|
||||
if "HEAD branch: " in line:
|
||||
result = line.split("branch: ")[1].strip()
|
||||
|
||||
proc gitPull*(url: string, outdir = "", plist = "", checkout = "", quiet = false) =
|
||||
## Pull the specified git repository to the output directory
|
||||
##
|
||||
## `plist` is the list of specific files and directories or wildcards
|
||||
## to sparsely checkout. Multiple values can be specified one entry per
|
||||
## line. It is optional and if omitted, the entire repository will be
|
||||
## checked out.
|
||||
##
|
||||
## `checkout` is the git tag, branch or commit hash to checkout once
|
||||
## the repository is downloaded. This allows for pinning to a specific
|
||||
## version of the code.
|
||||
let
|
||||
outdirQ = outdir.sanitizePath
|
||||
|
||||
if dirExists(outdir/".git"):
|
||||
gitReset(outdir)
|
||||
if checkout.nBl and not gitAtCheckout(outdir, checkout):
|
||||
gecho &"# Updating repository to checkout {checkout}"
|
||||
discard execAction(
|
||||
&"cd {outdirQ} && git clean -fxd && git fetch && git checkout {checkout}", retry = 3)
|
||||
return
|
||||
|
||||
mkDir(outdir)
|
||||
|
||||
if not quiet:
|
||||
gecho "# Setting up Git repo: " & url
|
||||
discard execAction(&"cd {outdirQ} && git init .")
|
||||
discard execAction(&"cd {outdirQ} && git remote add origin {url}")
|
||||
|
||||
if plist.len != 0:
|
||||
# If a specific list of files is required, create a sparse checkout
|
||||
# file for git in its config directory
|
||||
let sparsefile = outdir / ".git/info/sparse-checkout"
|
||||
|
||||
discard execAction(&"cd {outdirQ} && git config core.sparsecheckout true")
|
||||
writeFile(sparsefile, plist)
|
||||
|
||||
# In case directory has old files from another run
|
||||
discard execAction(&"cd {outdirQ} && git clean -fxd")
|
||||
|
||||
# Checkout specified branch/tag/commit or default branch - typically master
|
||||
let
|
||||
checkout = if checkout.Bl: gitDefaultBranch(outdir) else: checkout
|
||||
|
||||
if not quiet:
|
||||
gecho "# Checking out " & checkout
|
||||
discard execAction(&"cd {outdirQ} && git fetch", retry = 3)
|
||||
discard execAction(&"cd {outdirQ} && git checkout {checkout}")
|
||||
|
||||
proc gitTags*(outdir: string): seq[string] =
|
||||
## Get all the git tags in the specified directory
|
||||
let
|
||||
cmd = &"cd {outdir.sanitizePath} && git tag"
|
||||
tags = execAction(cmd).output.splitLines()
|
||||
for tag in tags:
|
||||
let
|
||||
tag = tag.strip()
|
||||
if tag.len != 0:
|
||||
result.add tag
|
||||
|
||||
proc loafExePath(): string =
|
||||
currentSourcePath.parentDir.parentDir / ("loaf".addFileExt ExeExt)
|
||||
|
||||
proc findFiles*(file: string, dir: string, recurse = true, regex = false): seq[string] =
|
||||
## Find all matching files in the specified directory
|
||||
##
|
||||
## `file` is a regular expression if `regex` is true
|
||||
##
|
||||
## Turn off recursive search with `recurse`
|
||||
let
|
||||
loafExe = loafExePath()
|
||||
|
||||
doAssert fileExists(loafExe), "loaf not compiled: " & loafExe.sanitizePath &
|
||||
" make sure 'nimble build' or 'nimble install' built it"
|
||||
|
||||
var
|
||||
cmd = loafExe.quoteShell & " find --rexp $1 \"$2\" $3"
|
||||
recursive = if recurse: "--recurse" else: ""
|
||||
|
||||
var
|
||||
dir = dir
|
||||
file = file
|
||||
# If file = `path/file`, adjust dir = `dir/path` and search for new file
|
||||
if not (recurse or regex):
|
||||
let
|
||||
pdir = file.parentDir()
|
||||
if pdir.len != 0:
|
||||
dir = dir / pdir
|
||||
|
||||
file = file.extractFilename
|
||||
|
||||
cmd = cmd % [recursive, (".*[\\\\/]" & file & "$"), dir.sanitizePath]
|
||||
|
||||
let
|
||||
(files, ret) = execAction(cmd, die = false)
|
||||
if ret == 0:
|
||||
for line in files.splitLines():
|
||||
if line.len != 0:
|
||||
result.add line
|
||||
|
||||
proc findFile*(file: string, dir: string, recurse = true, first = false, regex = false): string =
|
||||
## Find the file in the specified directory
|
||||
##
|
||||
## `file` is a regular expression if `regex` is true
|
||||
##
|
||||
## Turn off recursive search with `recurse` and stop on first match with
|
||||
## `first`. Without it, the shortest match is returned.
|
||||
let
|
||||
matches = findFiles(file, dir, recurse, regex)
|
||||
for match in matches:
|
||||
if (result.len == 0 or result.len > match.len):
|
||||
result = match
|
||||
if first: break
|
||||
|
||||
proc linkLibs*(names: openArray[string], staticLink = true): string =
|
||||
## Create linker flags for specified libraries using pkg-config
|
||||
##
|
||||
## Prepends `lib` to the name so you only need `ssl` for `libssl`.
|
||||
var
|
||||
stat = if staticLink: "--static" else: ""
|
||||
resSet: OrderedSet[string]
|
||||
cmd = &"pkg-config --libs --silence-errors {stat}"
|
||||
resSet.init()
|
||||
|
||||
for name in names:
|
||||
for n in ["lib" & name, name]:
|
||||
# Try libname and name - e.g. MagickWand doesn't have lib
|
||||
let
|
||||
cmd = &"{cmd} {n}"
|
||||
(libs, _) = execAction(cmd, die = false)
|
||||
if libs.len != 0:
|
||||
for lib in libs.split(" "):
|
||||
resSet.incl lib
|
||||
break
|
||||
|
||||
if staticLink:
|
||||
resSet.incl "--static"
|
||||
|
||||
for res in resSet:
|
||||
result &= " " & res
|
||||
|
||||
proc getNumProcs*(): string =
|
||||
## Get number of processors
|
||||
when defined(Windows):
|
||||
getEnv("NUMBER_OF_PROCESSORS").strip()
|
||||
elif defined(linux):
|
||||
execAction("nproc").output.strip()
|
||||
elif defined(macosx) or defined(FreeBSD):
|
||||
execAction("sysctl -n hw.ncpu").output.strip()
|
||||
else:
|
||||
"1"
|
||||
|
||||
proc getProjectCacheDir*(name: string, forceClean = true): string =
|
||||
## Get a cache directory where all nimterop artifacts can be stored
|
||||
##
|
||||
## Projects can use this location to download source code and build binaries
|
||||
## that can be then accessed by multiple apps. This is created under the
|
||||
## per-user Nim cache directory.
|
||||
##
|
||||
## Use `name` to specify the subdirectory name for a project.
|
||||
##
|
||||
## `forceClean` is enabled by default and effectively deletes the folder
|
||||
## if Nim is compiled with the `-f` or `--forceBuild` flag. This allows
|
||||
## any project to start out with a clean cache dir on a forced build.
|
||||
##
|
||||
## NOTE: avoid calling `getProjectCacheDir()` multiple times on the same
|
||||
## `name` when `forceClean = true` else checked out source might get deleted
|
||||
## at the wrong time during build.
|
||||
##
|
||||
## E.g.
|
||||
## `nimgit2` downloads `libgit2` source so `name = "libgit2"`
|
||||
##
|
||||
## `nimarchive` downloads `libarchive`, `bzlib`, `liblzma` and `zlib` so
|
||||
## `name = "nimarchive" / "libarchive"` for `libarchive`, etc.
|
||||
result = getNimteropCacheDir() / name
|
||||
|
||||
if forceClean and compileOption("forceBuild"):
|
||||
gecho "# Removing " & result
|
||||
rmDir(result)
|
||||
|
|
@ -1,256 +0,0 @@
|
|||
import strformat, strutils
|
||||
|
||||
import os except findExe
|
||||
|
||||
import ".."/globals
|
||||
import "."/[misc, shell]
|
||||
|
||||
proc configure*(path, check: string, flags = "") =
|
||||
## Run the GNU `configure` command to generate all Makefiles or other
|
||||
## build scripts in the specified path
|
||||
##
|
||||
## If a `configure` script is not present and an `autogen.sh` script
|
||||
## is present, it will be run before attempting `configure`.
|
||||
##
|
||||
## Next, if `configure.ac` or `configure.in` exist, `autoreconf` will
|
||||
## be executed.
|
||||
##
|
||||
## `check` is a file that will be generated by the `configure` command.
|
||||
## This is required to prevent configure from running on every build. It
|
||||
## is relative to the `path` and should not be an absolute path.
|
||||
##
|
||||
## `flags` are any flags that should be passed to the `configure` command.
|
||||
if (path / check).fileExists():
|
||||
return
|
||||
|
||||
gecho "# Configuring " & path
|
||||
|
||||
if not fileExists(path / "configure"):
|
||||
for i in @["autogen.sh", "build" / "autogen.sh"]:
|
||||
if fileExists(path / i):
|
||||
gecho "# Running autogen.sh"
|
||||
|
||||
when defined(unix):
|
||||
decho execAction(
|
||||
&"cd {(path / i).parentDir().sanitizePath} && ./autogen.sh").output
|
||||
else:
|
||||
decho execAction(
|
||||
&"cd {(path / i).parentDir().sanitizePath} && bash ./autogen.sh").output
|
||||
|
||||
break
|
||||
|
||||
if not fileExists(path / "configure"):
|
||||
for i in @["configure.ac", "configure.in"]:
|
||||
if fileExists(path / i):
|
||||
gecho "# Running autoreconf"
|
||||
|
||||
decho execAction(&"cd {path.sanitizePath} && autoreconf -fi").output
|
||||
|
||||
break
|
||||
|
||||
if fileExists(path / "configure"):
|
||||
gecho "# Running configure " & flags
|
||||
|
||||
when defined(unix):
|
||||
var
|
||||
cmd = &"cd {path.sanitizePath} && ./configure"
|
||||
else:
|
||||
var
|
||||
cmd = &"cd {path.sanitizePath} && bash ./configure"
|
||||
if flags.len != 0:
|
||||
cmd &= &" {flags}"
|
||||
|
||||
decho execAction(cmd).output
|
||||
|
||||
doAssert (path / check).fileExists(), "Configure failed"
|
||||
|
||||
proc getCmakePropertyStr(name, property, value: string): string =
|
||||
&"\nset_target_properties({name} PROPERTIES {property} \"{value}\")\n"
|
||||
|
||||
proc getCmakeIncludePath*(paths: openArray[string]): string =
|
||||
## Create a `cmake` flag to specify custom include paths
|
||||
##
|
||||
## Result can be included in the `flag` parameter for `cmake()` or
|
||||
## the `cmakeFlags` parameter for `getHeader()`.
|
||||
for path in paths:
|
||||
result &= path & ";"
|
||||
result = " -DCMAKE_INCLUDE_PATH=" & result[0 .. ^2].sanitizePath(sep = "/")
|
||||
|
||||
proc setCmakeProperty*(outdir, name, property, value: string) =
|
||||
## Set a `cmake` property in `outdir / CMakeLists.txt` - usable in the `xxxPreBuild` hook
|
||||
## for `getHeader()`
|
||||
##
|
||||
## `set_target_properties(name PROPERTIES property "value")`
|
||||
let
|
||||
cm = outdir / "CMakeLists.txt"
|
||||
if cm.fileExists():
|
||||
cm.writeFile(
|
||||
cm.readFile() & getCmakePropertyStr(name, property, value)
|
||||
)
|
||||
|
||||
proc setCmakeLibName*(outdir, name, prefix = "", oname = "", suffix = "") =
|
||||
## Set a `cmake` property in `outdir / CMakeLists.txt` to specify a custom library output
|
||||
## name - usable in the `xxxPreBuild` hook for `getHeader()`
|
||||
##
|
||||
## `prefix` is typically `lib`
|
||||
## `oname` is the library name
|
||||
## `suffix` is typically `.a`
|
||||
##
|
||||
## Sometimes, `cmake` generates non-standard library names - e.g. zlib compiles to
|
||||
## `libzlibstatic.a` on Windows. This proc can help rename it to `libzlib.a` so that `getHeader()`
|
||||
## can find it after the library is compiled.
|
||||
##
|
||||
## ```
|
||||
## set_target_properties(name PROPERTIES PREFIX "prefix")
|
||||
## set_target_properties(name PROPERTIES OUTPUT_NAME "oname")
|
||||
## set_target_properties(name PROPERTIES SUFFIX "suffix")
|
||||
## ```
|
||||
let
|
||||
cm = outdir / "CMakeLists.txt"
|
||||
if cm.fileExists():
|
||||
var
|
||||
str = ""
|
||||
if prefix.len != 0:
|
||||
str &= getCmakePropertyStr(name, "PREFIX", prefix)
|
||||
if oname.len != 0:
|
||||
str &= getCmakePropertyStr(name, "OUTPUT_NAME", oname)
|
||||
if suffix.len != 0:
|
||||
str &= getCmakePropertyStr(name, "SUFFIX", suffix)
|
||||
if str.len != 0:
|
||||
cm.writeFile(cm.readFile() & str)
|
||||
|
||||
proc setCmakePositionIndependentCode*(outdir: string) =
|
||||
## Set a `cmake` directive to create libraries with -fPIC enabled
|
||||
let
|
||||
cm = outdir / "CMakeLists.txt"
|
||||
if cm.fileExists():
|
||||
let
|
||||
pic = "set(CMAKE_POSITION_INDEPENDENT_CODE ON)"
|
||||
cmd = cm.readFile()
|
||||
if not cmd.contains(pic):
|
||||
cm.writeFile(
|
||||
pic & "\n" & cmd
|
||||
)
|
||||
|
||||
proc cmake*(path, check, flags: string) =
|
||||
## Run the `cmake` command to generate all Makefiles or other
|
||||
## build scripts in the specified path
|
||||
##
|
||||
## `path` will be created since typically `cmake` is run in an
|
||||
## empty directory.
|
||||
##
|
||||
## `check` is a file that will be generated by the `cmake` command.
|
||||
## This is required to prevent `cmake` from running on every build. It
|
||||
## is relative to the `path` and should not be an absolute path.
|
||||
##
|
||||
## `flags` are any flags that should be passed to the `cmake` command.
|
||||
## Unlike `configure`, it is required since typically it will be the
|
||||
## path to the repository, typically `..` when `path` is a subdir.
|
||||
if (path / check).fileExists():
|
||||
return
|
||||
|
||||
gecho "# Running cmake " & flags
|
||||
gecho "# Path: " & path
|
||||
|
||||
mkDir(path)
|
||||
|
||||
let
|
||||
cmd = &"cd {path.sanitizePath} && cmake {flags}"
|
||||
|
||||
decho execAction(cmd).output
|
||||
|
||||
doAssert (path / check).fileExists(), "cmake failed"
|
||||
|
||||
proc make*(path, check: string, flags = "", regex = false) =
|
||||
## Run the `make` command to build all binaries in the specified path
|
||||
##
|
||||
## `check` is a file that will be generated by the `make` command.
|
||||
## This is required to prevent `make` from running on every build. It
|
||||
## is relative to the `path` and should not be an absolute path.
|
||||
##
|
||||
## `flags` are any flags that should be passed to the `make` command.
|
||||
##
|
||||
## `regex` can be set to true if `check` is a regular expression.
|
||||
##
|
||||
## If `make.exe` is missing and `mingw32-make.exe` is available, it will
|
||||
## be copied over to make.exe in the same location.
|
||||
if findFile(check, path, regex = regex).len != 0:
|
||||
return
|
||||
|
||||
gecho "# Running make " & flags
|
||||
gecho "# Path: " & path
|
||||
|
||||
var
|
||||
cmd = findExe("make")
|
||||
|
||||
if cmd.len == 0:
|
||||
cmd = findExe("mingw32-make")
|
||||
if cmd.len != 0:
|
||||
cpFile(cmd, cmd.replace("mingw32-make", "make"))
|
||||
doAssert cmd.len != 0, "Make not found"
|
||||
|
||||
cmd = &"cd {path.sanitizePath} && make -j {getNumProcs()}"
|
||||
if flags.len != 0:
|
||||
cmd &= &" {flags}"
|
||||
|
||||
decho execAction(cmd).output
|
||||
|
||||
doAssert findFile(check, path, regex = regex).len != 0, "make failed"
|
||||
|
||||
proc buildWithCmake*(outdir, flags: string): BuildStatus =
|
||||
if not fileExists(outdir / "Makefile"):
|
||||
if fileExists(outdir / "CMakeLists.txt"):
|
||||
if findExe("cmake").len != 0:
|
||||
var
|
||||
gen = ""
|
||||
when defined(Windows):
|
||||
if findExe("sh").len != 0:
|
||||
let
|
||||
uname = execAction("sh -c uname -a").output.toLowerAscii()
|
||||
if uname.contains("msys"):
|
||||
gen = "MSYS Makefiles".quoteShell
|
||||
elif uname.contains("mingw"):
|
||||
gen = "MinGW Makefiles".quoteShell & " -DCMAKE_SH=\"CMAKE_SH-NOTFOUND\""
|
||||
else:
|
||||
gecho "Unsupported system: " & uname
|
||||
else:
|
||||
gen = "MinGW Makefiles".quoteShell
|
||||
else:
|
||||
gen = "Unix Makefiles".quoteShell
|
||||
if findExe("ccache").len != 0:
|
||||
gen &= " -DCMAKE_C_COMPILER_LAUNCHER=ccache -DCMAKE_CXX_COMPILER_LAUNCHER=ccache"
|
||||
result.buildPath = outdir / "buildcache"
|
||||
cmake(result.buildPath, "Makefile", &".. -G {gen} {flags}")
|
||||
result.built = true
|
||||
else:
|
||||
result.error = "cmake capable but cmake executable missing"
|
||||
else:
|
||||
result.buildPath = outdir
|
||||
|
||||
proc buildWithAutoConf*(outdir, flags: string): BuildStatus =
|
||||
if not fileExists(outdir / "Makefile"):
|
||||
if findExe("bash").len != 0:
|
||||
for file in @["configure", "configure.ac", "configure.in", "autogen.sh", "build/autogen.sh"]:
|
||||
if fileExists(outdir / file):
|
||||
configure(outdir, "Makefile", flags)
|
||||
result.buildPath = outdir
|
||||
result.built = true
|
||||
break
|
||||
else:
|
||||
result.error = "configure capable but bash executable missing"
|
||||
else:
|
||||
result.buildPath = outdir
|
||||
|
||||
proc flagBuild*(base: string, flags: openArray[string]): string =
|
||||
## Simple helper proc to generate flags for `configure`, `cmake`, etc.
|
||||
##
|
||||
## Every entry in `flags` is replaced into the `base` string and
|
||||
## concatenated to the result.
|
||||
##
|
||||
## E.g.
|
||||
## `base = "--disable-$#"`
|
||||
## `flags = @["one", "two"]`
|
||||
##
|
||||
## `flagBuild(base, flags) => " --disable-one --disable-two"`
|
||||
for i in flags:
|
||||
result &= " " & base % i
|
||||
|
|
@ -1,17 +1,90 @@
|
|||
##[
|
||||
This is the main nimterop import file to help with wrapping C/C++ source code.
|
||||
|
||||
Check out `template.nim <https://github.com/nimterop/nimterop/blob/master/nimterop/template.nim>`_
|
||||
as a starting point for wrapping a new library. The template can be copied and
|
||||
trimmed down and modified as required. `templite.nim <https://github.com/nimterop/nimterop/blob/master/nimterop/templite.nim>`_ is a shorter
|
||||
version for more experienced users.
|
||||
|
||||
All `{.compileTime.}` procs must be used in a compile time context, e.g. using:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
static:
|
||||
cAddStdDir()
|
||||
|
||||
]##
|
||||
|
||||
import hashes, macros, os, strformat, strutils
|
||||
|
||||
import "."/[globals, paths]
|
||||
import "."/build/[ccompiler, misc, nimconf, shell]
|
||||
import regex
|
||||
|
||||
import "."/[build, globals, paths, types]
|
||||
export types
|
||||
|
||||
proc interpPath(dir: string): string=
|
||||
# TODO: more robust: needs a DirSep after "$projpath"
|
||||
# disabling this interpolation as this is error prone, but other less
|
||||
# interpolations can be added, eg see https://github.com/nim-lang/Nim/pull/10530
|
||||
# result = dir.replace("$projpath", getProjectPath())
|
||||
result = dir
|
||||
|
||||
proc joinPathIfRel(path1: string, path2: string): string =
|
||||
if path2.isAbsolute:
|
||||
result = path2
|
||||
else:
|
||||
result = joinPath(path1, path2)
|
||||
|
||||
proc findPath(path: string, fail = true): string =
|
||||
# Relative to project path
|
||||
let
|
||||
path = fixRelPath(path)
|
||||
result = path.replace("\\", "/")
|
||||
result = joinPathIfRel(getProjectPath(), path).replace("\\", "/")
|
||||
if not fileExists(result) and not dirExists(result):
|
||||
doAssert (not fail), "File or directory not found: " & path
|
||||
result = ""
|
||||
|
||||
proc walkDirImpl(indir, inext: string, file=true): seq[string] =
|
||||
let
|
||||
dir = joinPathIfRel(getProjectPath(), indir)
|
||||
ext =
|
||||
if inext.nBl:
|
||||
when not defined(Windows):
|
||||
"-name " & inext
|
||||
else:
|
||||
"\\" & inext
|
||||
else:
|
||||
""
|
||||
|
||||
let
|
||||
cmd =
|
||||
when defined(Windows):
|
||||
if file:
|
||||
"cmd /c dir /s/b/a-d " & dir.replace("/", "\\") & ext
|
||||
else:
|
||||
"cmd /c dir /s/b/ad " & dir.replace("/", "\\")
|
||||
else:
|
||||
if file:
|
||||
"find $1 -type f $2" % [dir, ext]
|
||||
else:
|
||||
"find $1 -type d" % dir
|
||||
|
||||
(output, ret) = execAction(cmd, die = false)
|
||||
|
||||
if ret == 0:
|
||||
result = output.splitLines()
|
||||
|
||||
proc getFileDate(fullpath: string): string =
|
||||
var
|
||||
ret = 0
|
||||
cmd =
|
||||
when defined(Windows):
|
||||
&"cmd /c for %a in ({fullpath.sanitizePath}) do echo %~ta"
|
||||
elif defined(Linux):
|
||||
&"stat -c %y {fullpath.sanitizePath}"
|
||||
elif defined(OSX) or defined(FreeBSD):
|
||||
&"stat -f %m {fullpath.sanitizePath}"
|
||||
|
||||
(result, ret) = execAction(cmd)
|
||||
|
||||
proc getCacheValue(fullpath: string): string =
|
||||
if not gStateCT.nocache:
|
||||
result = fullpath.getFileDate()
|
||||
|
|
@ -21,30 +94,47 @@ proc getCacheValue(fullpaths: seq[string]): string =
|
|||
for fullpath in fullpaths:
|
||||
result &= getCacheValue(fullpath)
|
||||
|
||||
proc getNimCheckError(nimFile: string) =
|
||||
proc getToastError(output: string): string =
|
||||
# Filter out preprocessor errors
|
||||
for line in output.splitLines():
|
||||
if "fatal error:" in line.toLowerAscii:
|
||||
if result.len == 0:
|
||||
result = "\n\nFailed in preprocessing, check if `cIncludeDir()` is needed or compiler `mode` is correct (c/cpp)"
|
||||
result &= "\n\nERROR:$1\n" % line.split("fatal error:")[1]
|
||||
|
||||
# Toast error
|
||||
if result.Bl:
|
||||
result = "\n\n" & output
|
||||
|
||||
proc getNimCheckError(output: string): tuple[tmpFile, errors: string] =
|
||||
let
|
||||
hash = output.hash().abs()
|
||||
|
||||
result.tmpFile = getProjectCacheDir("failed", forceClean = false) / "nimterop_" & $hash & ".nim"
|
||||
|
||||
if not fileExists(result.tmpFile) or gStateCT.nocache or compileOption("forceBuild"):
|
||||
mkDir(result.tmpFile.parentDir())
|
||||
writeFile(result.tmpFile, output)
|
||||
|
||||
doAssert fileExists(result.tmpFile), "Failed to write to cache dir: " & result.tmpFile
|
||||
|
||||
let
|
||||
(check, _) = execAction(
|
||||
&"{getCurrentNimCompiler()} check {nimFile.sanitizePath}",
|
||||
&"{getCurrentNimCompiler()} check {result.tmpFile.sanitizePath}",
|
||||
die = false
|
||||
)
|
||||
|
||||
doAssert false, &"\n\n{check}\n\n" &
|
||||
"Codegen limitation or error - review 'nim check' output above generated for " & nimFile
|
||||
result.errors = "\n\n" & check
|
||||
|
||||
proc getToast(fullpaths: seq[string], recurse: bool = false, dynlib: string = "",
|
||||
mode = "c", flags = "", outFile = "", noNimout = false): string =
|
||||
mode = "c", flags = "", noNimout = false): string =
|
||||
var
|
||||
ret = 0
|
||||
cmd = when defined(Windows): "cmd /c " else: ""
|
||||
ext = "h"
|
||||
|
||||
let
|
||||
toastExe = toastExePath()
|
||||
# see https://github.com/nimterop/nimterop/issues/69
|
||||
cacheKey = getCacheValue(toastExe) & getCacheValue(fullpaths)
|
||||
|
||||
let toastExe = toastExePath()
|
||||
doAssert fileExists(toastExe), "toast not compiled: " & toastExe.sanitizePath &
|
||||
" make sure 'nimble build' or 'nimble install' built it"
|
||||
|
||||
cmd &= &"{toastExe} --preprocess -m:{mode}"
|
||||
|
||||
if recurse:
|
||||
|
|
@ -59,21 +149,6 @@ proc getToast(fullpaths: seq[string], recurse: bool = false, dynlib: string = ""
|
|||
for i in gStateCT.includeDirs:
|
||||
cmd.add &" --includeDirs+={i.sanitizePath}"
|
||||
|
||||
for i in gStateCT.exclude:
|
||||
cmd.add &" --exclude+={i.sanitizePath}"
|
||||
|
||||
for i in gStateCT.passC:
|
||||
cmd.add &" --passC+={i.quoteShell}"
|
||||
gStateCT.passC = @[]
|
||||
|
||||
for i in gStateCT.passL:
|
||||
cmd.add &" --passL+={i.quoteShell}"
|
||||
gStateCT.passL = @[]
|
||||
|
||||
for i in gStateCT.compile:
|
||||
cmd.add &" --compile+={i.sanitizePath}"
|
||||
gStateCT.compile = @[]
|
||||
|
||||
if not noNimout:
|
||||
cmd.add &" --pnim"
|
||||
|
||||
|
|
@ -88,349 +163,19 @@ proc getToast(fullpaths: seq[string], recurse: bool = false, dynlib: string = ""
|
|||
if gStateCT.pluginSourcePath.nBl:
|
||||
cmd.add &" --pluginSourcePath={gStateCT.pluginSourcePath.sanitizePath}"
|
||||
|
||||
ext = "nim"
|
||||
|
||||
for fullpath in fullpaths:
|
||||
cmd.add &" {fullpath.sanitizePath}"
|
||||
|
||||
let
|
||||
cacheFile = getNimteropCacheDir() / "toastCache" / "nimterop_" &
|
||||
($(cmd & cacheKey).hash().abs()).addFileExt(ext)
|
||||
|
||||
if outFile.nBl:
|
||||
result = fixRelPath(outFile)
|
||||
else:
|
||||
result = cacheFile
|
||||
|
||||
when defined(Windows):
|
||||
result = result.replace(DirSep, '/')
|
||||
|
||||
let
|
||||
# When to regenerate the wrapper
|
||||
regen =
|
||||
if gStateCT.nocache or compileOption("forceBuild"):
|
||||
# No caching or forced
|
||||
true
|
||||
elif not fileExists(result):
|
||||
# Cache or outfile doesn't exist
|
||||
true
|
||||
elif outFile.nBl and (not fileExists(cacheFile) or
|
||||
result.getFileDate() > cacheFile.getFileDate()):
|
||||
# Outfile exists but cache doesn't or outdated
|
||||
true
|
||||
else:
|
||||
false
|
||||
|
||||
if regen:
|
||||
let
|
||||
dir = result.parentDir()
|
||||
if not dirExists(dir):
|
||||
mkDir(dir)
|
||||
|
||||
cmd.add &" -o {result.sanitizePath}"
|
||||
|
||||
var
|
||||
(output, ret) = execAction(cmd, die = false)
|
||||
if ret != 0:
|
||||
# If toast fails, print failure to output and delete any generated files
|
||||
let errout = if result.fileExists(): result.readFile() & output else: output
|
||||
rmFile(result)
|
||||
doAssert false, "\n\n" & errout & "\n"
|
||||
|
||||
# Write empty cache file to track changes when outFile specified
|
||||
if outFile.nBl:
|
||||
let dir = cacheFile.parentDir()
|
||||
if not dirExists(dir):
|
||||
mkdir(dir)
|
||||
|
||||
writeFile(cacheFile, "")
|
||||
|
||||
proc cDebug*() {.compileTime.} =
|
||||
## Enable debug messages and display the generated Nim code
|
||||
gStateCT.debug = true
|
||||
|
||||
proc cDisableCaching*() {.compileTime.} =
|
||||
## Disable caching of generated Nim code - useful during wrapper development
|
||||
##
|
||||
## If files included by header being processed by
|
||||
## `cImport()` change and affect the generated content, they will be ignored
|
||||
## and the cached value will continue to be used . Use `cDisableCaching()` to
|
||||
## avoid this scenario during development.
|
||||
##
|
||||
## `nim -f` can also be used to flush the cached content.
|
||||
gStateCT.nocache = true
|
||||
|
||||
proc cSearchPath*(path: string): string {.compileTime.} =
|
||||
## Get full path to file or directory `path` in search path configured
|
||||
## using `cAddSearchDir()` and `cAddStdDir()`.
|
||||
##
|
||||
## This can be used to locate files or directories that can be passed onto
|
||||
## `cCompile()`, `cIncludeDir()` and `cImport()`.
|
||||
result = findPath(path, fail = false)
|
||||
if result.Bl:
|
||||
var found = false
|
||||
for inc in gStateCT.searchDirs:
|
||||
result = findPath(inc / path, fail = false)
|
||||
if result.nBl:
|
||||
found = true
|
||||
break
|
||||
doAssert found, "File or directory not found: " & path &
|
||||
" gStateCT.searchDirs: " & $gStateCT.searchDirs
|
||||
|
||||
proc cAddSearchDir*(dir: string) {.compileTime.} =
|
||||
## Add directory `dir` to the search path used in calls to
|
||||
## `cSearchPath()`.
|
||||
runnableExamples:
|
||||
import nimterop/paths, os
|
||||
static:
|
||||
cAddSearchDir testsIncludeDir()
|
||||
doAssert cSearchPath("test.h").fileExists
|
||||
|
||||
if dir notin gStateCT.searchDirs:
|
||||
gStateCT.searchDirs.add(dir)
|
||||
|
||||
proc cAddStdDir*(mode = "c") {.compileTime.} =
|
||||
## Add the standard `c` [default] or `cpp` include paths to search
|
||||
## path used in calls to `cSearchPath()`.
|
||||
runnableExamples:
|
||||
import os
|
||||
static:
|
||||
cAddStdDir()
|
||||
doAssert cSearchPath("math.h").fileExists
|
||||
for inc in getGccPaths(mode):
|
||||
cAddSearchDir inc
|
||||
|
||||
macro cDefine*(name: static[string], val: static[string] = ""): untyped =
|
||||
## `#define` an identifer that is forwarded to the C/C++ preprocessor if
|
||||
## called within `cImport()` or `c2nImport()` as well as to the C/C++
|
||||
## compiler during Nim compilation using `{.passC: "-DXXX".}`
|
||||
##
|
||||
## This needs to be called before `cImport()` to take effect.
|
||||
var str = name
|
||||
if val.nBl:
|
||||
str &= &"={val.quoteShell}"
|
||||
|
||||
if str notin gStateCT.defines:
|
||||
gStateCT.defines.add(str)
|
||||
|
||||
macro cDefine*(values: static seq[string]): untyped =
|
||||
## `#define` multiple identifers that are forwarded to the C/C++ preprocessor
|
||||
## if called within `cImport()` or `c2nImport()` as well as to the C/C++
|
||||
## compiler during Nim compilation using `{.passC: "-DXXX".}`
|
||||
##
|
||||
## This needs to be called before `cImport()` to take effect.
|
||||
for value in values:
|
||||
let
|
||||
spl = value.split("=", maxsplit = 1)
|
||||
name = spl[0]
|
||||
val = if spl.len == 2: spl[1] else: ""
|
||||
discard quote do:
|
||||
cDefine(`name`, `val`)
|
||||
|
||||
macro cIncludeDir*(dirs: static seq[string], exclude: static[bool] = false): untyped =
|
||||
## Add include directories that are forwarded to the C/C++ preprocessor if
|
||||
## called within `cImport()` or `c2nImport()` as well as to the C/C++
|
||||
## compiler during Nim compilation using `{.passC: "-IXXX".}`.
|
||||
##
|
||||
## Set `exclude = true` if the contents of these include directories should
|
||||
## not be included in the wrapped output.
|
||||
##
|
||||
## This needs to be called before `cImport()` to take effect.
|
||||
for dir in dirs:
|
||||
let fullpath = findPath(dir)
|
||||
if fullpath notin gStateCT.includeDirs:
|
||||
gStateCT.includeDirs.add(fullpath)
|
||||
if exclude:
|
||||
gStateCT.exclude.add(fullpath)
|
||||
|
||||
macro cIncludeDir*(dir: static[string], exclude: static[bool] = false): untyped =
|
||||
## Add an include directory that is forwarded to the C/C++ preprocessor if
|
||||
## called within `cImport()` or `c2nImport()` as well as to the C/C++
|
||||
## compiler during Nim compilation using `{.passC: "-IXXX".}`.
|
||||
##
|
||||
## Set `exclude = true` if the contents of this include directory should
|
||||
## not be included in the wrapped output.
|
||||
##
|
||||
## This needs to be called before `cImport()` to take effect.
|
||||
return quote do:
|
||||
cIncludeDir(@[`dir`], `exclude` == 1)
|
||||
|
||||
macro cExclude*(paths: static seq[string]): untyped =
|
||||
## Exclude specified paths - files or directories from the wrapped output
|
||||
##
|
||||
## Full path to file or directory is required.
|
||||
result = newNimNode(nnkStmtList)
|
||||
for path in paths:
|
||||
gStateCT.exclude.add path
|
||||
|
||||
macro cExclude*(path: static string): untyped =
|
||||
## Exclude specified path - file or directory from the wrapped output.
|
||||
##
|
||||
## Full path to file or directory is required.
|
||||
return quote do:
|
||||
cExclude(@[`path`])
|
||||
|
||||
macro cPassC*(value: static string): untyped =
|
||||
## Create a `{.passC.}` entry that gets forwarded to the C/C++ compiler
|
||||
## during Nim compilation.
|
||||
##
|
||||
## `cPassC()` needs to be called before `cImport()` to take effect and gets
|
||||
## consumed and reset so as not to impact subsequent `cImport()` calls.
|
||||
gStateCT.passC.add value
|
||||
|
||||
macro cPassL*(value: static string): untyped =
|
||||
## Create a `{.passL.}` entry that gets forwarded to the C/C++ compiler
|
||||
## during Nim compilation.
|
||||
##
|
||||
## `cPassL()` needs to be called before `cImport()` to take effect and gets
|
||||
## consumed and reset so as not to impact subsequent `cImport()` calls.
|
||||
gStateCT.passL.add value
|
||||
|
||||
macro cCompile*(path: static string, mode: static[string] = "c", exclude: static[string] = ""): untyped =
|
||||
## Compile and link C/C++ implementation into resulting binary using `{.compile.}`
|
||||
##
|
||||
## `path` can be a specific file or contain `*` wildcard for filename:
|
||||
##
|
||||
## .. code-block:: nim
|
||||
##
|
||||
## cCompile("file.c")
|
||||
## cCompile("path/to/*.c")
|
||||
##
|
||||
## `mode` recursively searches for code files in `path`.
|
||||
##
|
||||
## `c` searches for `*.c` whereas `cpp` searches for `*.C *.cpp *.c++ *.cc *.cxx`
|
||||
##
|
||||
## .. code-block:: nim
|
||||
##
|
||||
## cCompile("path/to/dir", "cpp")
|
||||
##
|
||||
## `exclude` can be used to exclude files by partial string match. Comma separated to
|
||||
## specify multiple exclude strings
|
||||
##
|
||||
## .. code-block:: nim
|
||||
##
|
||||
## cCompile("path/to/dir", exclude="test2.c")
|
||||
##
|
||||
## `cCompile()` needs to be called before `cImport()` to take effect and gets
|
||||
## consumed and reset so as not to impact subsequent `cImport()` calls.
|
||||
|
||||
proc fcompile(file: string) =
|
||||
let
|
||||
(_, fn, ext) = file.splitFile()
|
||||
var
|
||||
ufn = fn
|
||||
uniq = 1
|
||||
while ufn in gStateCT.compcache:
|
||||
ufn = fn & $uniq
|
||||
uniq += 1
|
||||
|
||||
# - https://github.com/nim-lang/Nim/issues/10299
|
||||
# - https://github.com/nim-lang/Nim/issues/10486
|
||||
gStateCT.compcache.add(ufn)
|
||||
if fn == ufn:
|
||||
gStateCT.compile.add file.replace("\\", "/")
|
||||
else:
|
||||
# - https://github.com/nim-lang/Nim/issues/9370
|
||||
let
|
||||
hash = file.hash().abs()
|
||||
tmpFile = file.parentDir() / &"_nimterop_{$hash}_{ufn}{ext}"
|
||||
if not tmpFile.fileExists() or file.getFileDate() > tmpFile.getFileDate():
|
||||
cpFile(file, tmpFile)
|
||||
gStateCT.compile.add tmpFile.replace("\\", "/")
|
||||
|
||||
# Due to https://github.com/nim-lang/Nim/issues/9863
|
||||
# cannot use seq[string] for excludes
|
||||
proc notExcluded(file, exclude: string): bool =
|
||||
result = true
|
||||
if "_nimterop_" in file:
|
||||
result = false
|
||||
elif exclude.nBl:
|
||||
for excl in exclude.split(","):
|
||||
if excl in file:
|
||||
result = false
|
||||
|
||||
proc dcompile(dir, exclude: string, ext="") =
|
||||
let
|
||||
(dir, pat) =
|
||||
if "*" in dir:
|
||||
dir.splitPath()
|
||||
else:
|
||||
(dir, "")
|
||||
|
||||
for file in walkDirRec(dir):
|
||||
if ext.nBl or pat.nBl:
|
||||
let
|
||||
fext = file.splitFile().ext
|
||||
if (ext.nBl and fext != ext) or (pat.nBl and fext != pat[1 .. ^1]):
|
||||
continue
|
||||
if file.notExcluded(exclude):
|
||||
fcompile(file)
|
||||
|
||||
if "*" in path:
|
||||
dcompile(path, exclude)
|
||||
else:
|
||||
let fpath = findPath(path)
|
||||
if fileExists(fpath) and fpath.notExcluded(exclude):
|
||||
fcompile(fpath)
|
||||
elif dirExists(fpath):
|
||||
if mode.contains("cpp"):
|
||||
for i in @[".cpp", ".c++", ".cc", ".cxx"]:
|
||||
dcompile(fpath, exclude, i)
|
||||
when not defined(Windows):
|
||||
dcompile(fpath, exclude, ".C")
|
||||
else:
|
||||
dcompile(fpath, exclude, ".c")
|
||||
|
||||
macro renderPragma*(): untyped =
|
||||
## All `cDefine()`, `cIncludeDir()`, `cCompile()`, `cPassC()` and `cPassL()`
|
||||
## content typically gets forwarded via `cImport()` to the generated wrapper to be
|
||||
## rendered as part of the output so as to enable standalone wrappers. If `cImport()`
|
||||
## is not being used for some reason, `renderPragma()` can create these pragmas
|
||||
## in the nimterop wrapper itself. A good example is using `getHeader()` without
|
||||
## calling `cImport()`.
|
||||
##
|
||||
## `c2nImport()` already uses this macro so there's no need to use it when typically
|
||||
## wrapping headers.
|
||||
result = newNimNode(nnkStmtList)
|
||||
|
||||
for i in gStateCT.defines:
|
||||
let str = "-D" & i
|
||||
result.add quote do:
|
||||
{.passC: `str`.}
|
||||
|
||||
for i in gStateCT.includeDirs:
|
||||
let str = &"-I{i.quoteShell}"
|
||||
result.add quote do:
|
||||
{.passC: `str`.}
|
||||
|
||||
for i in gStateCT.passC:
|
||||
result.add quote do:
|
||||
{.passC: `i`.}
|
||||
gStateCT.passC = @[]
|
||||
|
||||
for i in gStateCT.passL:
|
||||
result.add quote do:
|
||||
{.passL: `i`.}
|
||||
gStateCT.passL = @[]
|
||||
|
||||
for i in gStateCT.compile:
|
||||
result.add quote do:
|
||||
{.compile: `i`.}
|
||||
gStateCT.compile = @[]
|
||||
|
||||
proc cSkipSymbol*(skips: seq[string]) {.compileTime.} =
|
||||
## Similar to `cOverride()`, this macro allows filtering out symbols not of
|
||||
## interest from the generated output.
|
||||
##
|
||||
## `cSkipSymbol()` only affects calls to `cImport()` that follow it.
|
||||
runnableExamples:
|
||||
static: cSkipSymbol @["proc1", "Type2"]
|
||||
gStateCT.symOverride.add skips
|
||||
# see https://github.com/nimterop/nimterop/issues/69
|
||||
(result, ret) = execAction(cmd, die = false, cache = (not gStateCT.nocache),
|
||||
cacheKey = getCacheValue(fullpaths))
|
||||
doAssert ret == 0, getToastError(result)
|
||||
|
||||
macro cOverride*(body): untyped =
|
||||
## When the wrapper code generated by nimterop is missing certain symbols or not
|
||||
## accurate, it may be required to hand wrap them. Define them in a `cOverride()`
|
||||
## macro block so that Nimterop uses these definitions instead.
|
||||
## accurate, it may be required to hand wrap them. Define them in a
|
||||
## `cOverride() <cimport.html#cOverride.m>`_ macro block so that Nimterop uses
|
||||
## these definitions instead.
|
||||
##
|
||||
## For example:
|
||||
##
|
||||
|
|
@ -451,9 +196,9 @@ macro cOverride*(body): untyped =
|
|||
## cOverride:
|
||||
## proc svGetCallerInfo(fileName: var cstring; lineNumber: var cint)
|
||||
##
|
||||
## Using the `cOverride()` block, nimterop can be instructed to use this
|
||||
## definition of `svGetCallerInfo()` instead. This works for procs, consts
|
||||
## and types.
|
||||
## Using the `cOverride() <cimport.html#cOverride.m>`_ block, nimterop
|
||||
## can be instructed to use this definition of `svGetCallerInfo()` instead.
|
||||
## This works for procs, consts and types.
|
||||
##
|
||||
## `cOverride()` only affects the next `cImport()` call. This is because any
|
||||
## recognized symbols get overridden in place and any remaining symbols get
|
||||
|
|
@ -516,8 +261,18 @@ proc onSymbolOverride*(sym: var Symbol) {.exportc, dynlib.} =
|
|||
|
||||
gStateCT.symOverride.add name
|
||||
|
||||
if names.nBl:
|
||||
decho "Overriding " & names.join(" ")
|
||||
if gStateCT.debug and names.nBl:
|
||||
echo "# Overriding " & names.join(" ")
|
||||
|
||||
proc cSkipSymbol*(skips: seq[string]) {.compileTime.} =
|
||||
## Similar to `cOverride() <cimport.html#cOverride.m>`_, this macro allows
|
||||
## filtering out symbols not of interest from the generated output.
|
||||
##
|
||||
## `cSkipSymbol() <cimport.html#cSkipSymbol%2Cseq[T][string]>`_ only affects calls to
|
||||
## `cImport() <cimport.html#cImport.m%2C%2Cstring%2Cstring%2Cstring>`_ that follow it.
|
||||
runnableExamples:
|
||||
static: cSkipSymbol @["proc1", "Type2"]
|
||||
gStateCT.symOverride.add skips
|
||||
|
||||
proc cPluginHelper(body: string, imports = "import macros, nimterop/plugin\n\n") =
|
||||
gStateCT.pluginSource = body
|
||||
|
|
@ -538,9 +293,11 @@ proc cPluginHelper(body: string, imports = "import macros, nimterop/plugin\n\n")
|
|||
gStateCT.pluginSourcePath = path
|
||||
|
||||
macro cPlugin*(body): untyped =
|
||||
## When `cOverride()` and `cSkipSymbol()` are not adequate, the `cPlugin()`
|
||||
## macro can be used to customize the generated Nim output. The following
|
||||
## callbacks are available at this time.
|
||||
## When `cOverride() <cimport.html#cOverride.m>`_ and
|
||||
## `cSkipSymbol() <cimport.html#cSkipSymbol%2Cseq[T][string]>`_
|
||||
## are not adequate, the `cPlugin() <cimport.html#cPlugin.m>`_ macro can be used
|
||||
## to customize the generated Nim output. The following callbacks are available at
|
||||
## this time.
|
||||
##
|
||||
## .. code-block:: nim
|
||||
##
|
||||
|
|
@ -570,7 +327,9 @@ macro cPlugin*(body): untyped =
|
|||
## `macros` and `nimterop/plugins` are implicitly imported to provide access to standard
|
||||
## plugin facilities.
|
||||
##
|
||||
## `cPlugin()` only affects calls to `cImport()` that follow it.
|
||||
## `cPlugin() <cimport.html#cPlugin.m>`_ only affects calls to
|
||||
## `cImport() <cimport.html#cImport.m%2C%2Cstring%2Cstring%2Cstring>`_ that
|
||||
## follow it.
|
||||
runnableExamples:
|
||||
cPlugin:
|
||||
import strutils
|
||||
|
|
@ -608,8 +367,204 @@ macro cPluginPath*(path: static[string]): untyped =
|
|||
doAssert fileExists(path), "Plugin file not found: " & path
|
||||
cPluginHelper(readFile(path), imports = "")
|
||||
|
||||
proc cSearchPath*(path: string): string {.compileTime.}=
|
||||
## Get full path to file or directory `path` in search path configured
|
||||
## using `cAddSearchDir() <cimport.html#cAddSearchDir%2Cstring>`_ and
|
||||
## `cAddStdDir() <cimport.html#cAddStdDir,string>`_.
|
||||
##
|
||||
## This can be used to locate files or directories that can be passed onto
|
||||
## `cCompile() <cimport.html#cCompile.m%2C%2Cstring%2Cstring>`_,
|
||||
## `cIncludeDir() <cimport.html#cIncludeDir.m>`_ and
|
||||
## `cImport() <cimport.html#cImport.m%2C%2Cstring%2Cstring%2Cstring>`_.
|
||||
|
||||
result = findPath(path, fail = false)
|
||||
if result.Bl:
|
||||
var found = false
|
||||
for inc in gStateCT.searchDirs:
|
||||
result = findPath(inc / path, fail = false)
|
||||
if result.nBl:
|
||||
found = true
|
||||
break
|
||||
doAssert found, "File or directory not found: " & path &
|
||||
" gStateCT.searchDirs: " & $gStateCT.searchDirs
|
||||
|
||||
proc cDebug*() {.compileTime.} =
|
||||
## Enable debug messages and display the generated Nim code
|
||||
gStateCT.debug = true
|
||||
build.gDebugCT = true
|
||||
|
||||
proc cDisableCaching*() {.compileTime.} =
|
||||
## Disable caching of generated Nim code - useful during wrapper development
|
||||
##
|
||||
## If files included by header being processed by
|
||||
## `cImport() <cimport.html#cImport.m%2C%2Cstring%2Cstring%2Cstring>`_
|
||||
## change and affect the generated content, they will be ignored and the cached
|
||||
## value will continue to be used . Use `cDisableCaching() <cimport.html#cDisableCaching>`_
|
||||
## to avoid this scenario during development.
|
||||
##
|
||||
## `nim -f` was broken prior to 0.19.4 but can also be used to flush the cached content.
|
||||
|
||||
gStateCT.nocache = true
|
||||
|
||||
macro cDefine*(name: static string, val: static string = ""): untyped =
|
||||
## `#define` an identifer that is forwarded to the C/C++ preprocessor if
|
||||
## called within `cImport() <cimport.html#cImport.m%2C%2Cstring%2Cstring%2Cstring>`_
|
||||
## or `c2nImport() <cimport.html#c2nImport.m%2C%2Cstring%2Cstring%2Cstring>`_
|
||||
## as well as to the C/C++ compiler during Nim compilation using `{.passC: "-DXXX".}`
|
||||
|
||||
result = newNimNode(nnkStmtList)
|
||||
|
||||
var str = name
|
||||
# todo: see https://github.com/nimterop/nimterop/issues/100 for
|
||||
# edge case of empty strings
|
||||
if val.nBl:
|
||||
str &= &"={val.quoteShell}"
|
||||
|
||||
if str notin gStateCT.defines:
|
||||
gStateCT.defines.add(str)
|
||||
str = "-D" & str
|
||||
|
||||
result.add quote do:
|
||||
{.passC: `str`.}
|
||||
|
||||
if gStateCT.debug:
|
||||
echo result.repr & "\n"
|
||||
|
||||
proc cAddSearchDir*(dir: string) {.compileTime.} =
|
||||
## Add directory `dir` to the search path used in calls to
|
||||
## `cSearchPath() <cimport.html#cSearchPath,string>`_.
|
||||
runnableExamples:
|
||||
import nimterop/paths, os
|
||||
static:
|
||||
cAddSearchDir testsIncludeDir()
|
||||
doAssert cSearchPath("test.h").existsFile
|
||||
var dir = interpPath(dir)
|
||||
if dir notin gStateCT.searchDirs:
|
||||
gStateCT.searchDirs.add(dir)
|
||||
|
||||
macro cIncludeDir*(dir: static string): untyped =
|
||||
## Add an include directory that is forwarded to the C/C++ preprocessor if
|
||||
## called within `cImport() <cimport.html#cImport.m%2C%2Cstring%2Cstring%2Cstring>`_
|
||||
## or `c2nImport() <cimport.html#c2nImport.m%2C%2Cstring%2Cstring%2Cstring>`_
|
||||
## as well as to the C/C++ compiler during Nim compilation using `{.passC: "-IXXX".}`.
|
||||
|
||||
var dir = interpPath(dir)
|
||||
result = newNimNode(nnkStmtList)
|
||||
|
||||
let fullpath = findPath(dir)
|
||||
if fullpath notin gStateCT.includeDirs:
|
||||
gStateCT.includeDirs.add(fullpath)
|
||||
let str = &"-I{fullpath.quoteShell}"
|
||||
result.add quote do:
|
||||
{.passC: `str`.}
|
||||
if gStateCT.debug:
|
||||
echo result.repr
|
||||
|
||||
proc cAddStdDir*(mode = "c") {.compileTime.} =
|
||||
## Add the standard `c` [default] or `cpp` include paths to search
|
||||
## path used in calls to `cSearchPath() <cimport.html#cSearchPath,string>`_
|
||||
runnableExamples:
|
||||
static: cAddStdDir()
|
||||
import os
|
||||
doAssert cSearchPath("math.h").existsFile
|
||||
for inc in getGccPaths(mode):
|
||||
cAddSearchDir inc
|
||||
|
||||
macro cCompile*(path: static string, mode = "c", exclude = ""): untyped =
|
||||
## Compile and link C/C++ implementation into resulting binary using `{.compile.}`
|
||||
##
|
||||
## `path` can be a specific file or contain wildcards:
|
||||
##
|
||||
## .. code-block:: nim
|
||||
##
|
||||
## cCompile("file.c")
|
||||
## cCompile("path/to/*.c")
|
||||
##
|
||||
## `mode` recursively searches for code files in `path`.
|
||||
##
|
||||
## `c` searches for `*.c` whereas `cpp` searches for `*.C *.cpp *.c++ *.cc *.cxx`
|
||||
##
|
||||
## .. code-block:: nim
|
||||
##
|
||||
## cCompile("path/to/dir", "cpp")
|
||||
##
|
||||
## `exclude` can be used to exclude files by partial string match. Comma separated to
|
||||
## specify multiple exclude strings
|
||||
##
|
||||
## .. code-block:: nim
|
||||
##
|
||||
## cCompile("path/to/dir", exclude="test2.c")
|
||||
|
||||
result = newNimNode(nnkStmtList)
|
||||
|
||||
var
|
||||
stmt = ""
|
||||
|
||||
proc fcompile(file: string): string =
|
||||
let
|
||||
(_, fn, ext) = file.splitFile()
|
||||
var
|
||||
ufn = fn
|
||||
uniq = 1
|
||||
while ufn in gStateCT.compile:
|
||||
ufn = fn & $uniq
|
||||
uniq += 1
|
||||
|
||||
# - https://github.com/nim-lang/Nim/issues/10299
|
||||
# - https://github.com/nim-lang/Nim/issues/10486
|
||||
gStateCT.compile.add(ufn)
|
||||
if fn == ufn:
|
||||
return "{.compile: \"$#\".}\n" % file.replace("\\", "/")
|
||||
else:
|
||||
# - https://github.com/nim-lang/Nim/issues/9370
|
||||
let
|
||||
hash = file.hash().abs()
|
||||
tmpFile = file.parentDir() / &"_nimterop_{$hash}_{ufn}{ext}"
|
||||
if not tmpFile.fileExists() or file.getFileDate() > tmpFile.getFileDate():
|
||||
cpFile(file, tmpFile)
|
||||
return "{.compile: \"$#\".}\n" % tmpFile.replace("\\", "/")
|
||||
|
||||
# Due to https://github.com/nim-lang/Nim/issues/9863
|
||||
# cannot use seq[string] for excludes
|
||||
proc notExcluded(file, exclude: string): bool =
|
||||
result = true
|
||||
if "_nimterop_" in file:
|
||||
result = false
|
||||
elif exclude.nBl:
|
||||
for excl in exclude.split(","):
|
||||
if excl in file:
|
||||
result = false
|
||||
|
||||
proc dcompile(dir, exclude: string, ext=""): string =
|
||||
let
|
||||
files = walkDirImpl(dir, ext)
|
||||
|
||||
for f in files:
|
||||
if f.nBl and f.notExcluded(exclude):
|
||||
result &= fcompile(f)
|
||||
|
||||
if path.contains("*") or path.contains("?"):
|
||||
stmt &= dcompile(path, exclude.strVal())
|
||||
else:
|
||||
let fpath = findPath(path)
|
||||
if fileExists(fpath) and fpath.notExcluded(exclude.strVal()):
|
||||
stmt &= fcompile(fpath)
|
||||
elif dirExists(fpath):
|
||||
if mode.strVal().contains("cpp"):
|
||||
for i in @["*.cpp", "*.c++", "*.cc", "*.cxx"]:
|
||||
stmt &= dcompile(fpath, exclude.strVal(), i)
|
||||
when not defined(Windows):
|
||||
stmt &= dcompile(fpath, exclude.strVal(), "*.C")
|
||||
else:
|
||||
stmt &= dcompile(fpath, exclude.strVal(), "*.c")
|
||||
|
||||
result.add stmt.parseStmt()
|
||||
|
||||
if gStateCT.debug:
|
||||
echo result.repr
|
||||
|
||||
macro cImport*(filenames: static seq[string], recurse: static bool = false, dynlib: static string = "",
|
||||
mode: static string = "c", flags: static string = "", nimFile: static string = ""): untyped =
|
||||
mode: static string = "c", flags: static string = ""): untyped =
|
||||
## Import multiple headers in one shot
|
||||
##
|
||||
## This macro is preferable over multiple individual `cImport()` calls, especially
|
||||
|
|
@ -626,10 +581,10 @@ macro cImport*(filenames: static seq[string], recurse: static bool = false, dynl
|
|||
if gStateCT.pluginSourcePath.Bl:
|
||||
cPluginHelper(gStateCT.pluginSource)
|
||||
|
||||
gecho "# Importing " & fullpaths.join(", ").sanitizePath
|
||||
echo "# Importing " & fullpaths.join(", ").sanitizePath
|
||||
|
||||
let
|
||||
nimFile = getToast(fullpaths, recurse, dynlib, mode, flags, nimFile)
|
||||
output = getToast(fullpaths, recurse, dynlib, mode, flags)
|
||||
|
||||
# Reset plugin and overrides for next cImport
|
||||
if gStateCT.overrides.nBl:
|
||||
|
|
@ -637,28 +592,28 @@ macro cImport*(filenames: static seq[string], recurse: static bool = false, dynl
|
|||
gStateCT.overrides = ""
|
||||
|
||||
if gStateCT.debug:
|
||||
gecho nimFile.readFile()
|
||||
|
||||
gecho "# Saved to " & nimFile
|
||||
echo output
|
||||
|
||||
try:
|
||||
let
|
||||
nimFileNode = newStrLitNode(nimFile.changeFileExt(""))
|
||||
result.add quote do:
|
||||
include `nimFileNode`
|
||||
let body = parseStmt(output)
|
||||
|
||||
result.add body
|
||||
except:
|
||||
getNimCheckError(nimFile)
|
||||
let
|
||||
(tmpFile, errors) = getNimCheckError(output)
|
||||
doAssert false, errors & "\n\nNimterop codegen limitation or error - review 'nim check' output above generated for " & tmpFile
|
||||
|
||||
macro cImport*(filename: static string, recurse: static bool = false, dynlib: static string = "",
|
||||
mode: static string = "c", flags: static string = "", nimFile: static string = ""): untyped =
|
||||
mode: static string = "c", flags: static string = ""): untyped =
|
||||
## Import all supported definitions from specified header file. Generated
|
||||
## content is cached in `nimcache` until `filename` changes unless
|
||||
## `cDisableCaching()` is set. `nim -f` can also be used to flush the cache.
|
||||
## `cDisableCaching() <cimport.html#cDisableCaching>`_ is set. `nim -f`
|
||||
## can also be used after Nim v0.19.4 to flush the cache.
|
||||
##
|
||||
## `recurse` can be used to generate Nim wrappers from `#include` files
|
||||
## referenced in `filename`. This is only done for files in the same
|
||||
## directory as `filename` or in a directory added using
|
||||
## `cIncludeDir()`.
|
||||
## `cIncludeDir() <cimport.html#cIncludeDir.m>`_
|
||||
##
|
||||
## `dynlib` can be used to specify the Nim string to use to specify the dynamic
|
||||
## library to load the imported symbols from. For example:
|
||||
|
|
@ -679,9 +634,9 @@ macro cImport*(filename: static string, recurse: static bool = false, dynlib: st
|
|||
##
|
||||
## cImport("pcre.h", dynlib="dynpcre")
|
||||
##
|
||||
## If `dynlib` is not specified, the C/C++ implementation files can be compiled
|
||||
## in with `cCompile()`, or the `{.passL.}` pragma can be used to specify the
|
||||
## static lib to link.
|
||||
## If `dynlib` is not specified, the C/C++ implementation files can be compiled in
|
||||
## with `cCompile() <cimport.html#cCompile.m%2C%2Cstring%2Cstring>`_, or the
|
||||
## `{.passL.}` pragma can be used to specify the static lib to link.
|
||||
##
|
||||
## `mode` selects the preprocessor and tree-sitter parser to be used to process
|
||||
## the header.
|
||||
|
|
@ -690,89 +645,88 @@ macro cImport*(filename: static string, recurse: static bool = false, dynlib: st
|
|||
## good example would be `--prefix` and `--suffix` which strip leading and
|
||||
## trailing strings from identifiers, `_` being quite common.
|
||||
##
|
||||
## `nimFile` is the location where the generated wrapper should get written.
|
||||
## By default, the generated wrapper is written to `nimcache` and included from
|
||||
## there. `nimFile` makes it possible to write the wrapper to a predetermined
|
||||
## location which can then be directly imported into the main application and
|
||||
## checked into source control if preferred. Importing the nimterop wrapper with
|
||||
## `nimFile` specified still works per usual. If `nimFile` is not an absolute
|
||||
## path, it is relative to the project path.
|
||||
##
|
||||
## `cImport()` consumes and resets preceding `cOverride()` calls. `cPlugin()`
|
||||
## is retained for the next `cImport()` call unless a new `cPlugin()` call is
|
||||
## defined.
|
||||
return quote do:
|
||||
cImport(@[`filename`], bool(`recurse`), `dynlib`, `mode`, `flags`, `nimFile`)
|
||||
cImport(@[`filename`], bool(`recurse`), `dynlib`, `mode`, `flags`)
|
||||
|
||||
macro c2nImport*(filename: static string, recurse: static bool = false, dynlib: static string = "",
|
||||
mode: static string = "c", flags: static string = "", nimFile: static string = ""): untyped =
|
||||
mode: static string = "c", flags: static string = ""): untyped =
|
||||
## Import all supported definitions from specified header file using `c2nim`
|
||||
##
|
||||
## Similar to `cImport()` but uses `c2nim` to generate the Nim wrapper instead
|
||||
## of `toast`. Note that neither `cOverride()`, `cSkipSymbol()` nor `cPlugin()`
|
||||
## have any impact on `c2nim`.
|
||||
## Similar to `cImport() <cimport.html#cImport.m%2C%2Cstring%2Cstring%2Cstring>`_
|
||||
## but uses `c2nim` to generate the Nim wrapper instead of `toast`. Note that neither
|
||||
## `cOverride() <cimport.html#cOverride.m>`_, `cSkipSymbol() <cimport.html#cSkipSymbol%2Cseq[T][string]>`_
|
||||
## nor `cPlugin() <cimport.html#cPlugin.m>`_ have any impact on `c2nim`.
|
||||
##
|
||||
## `toast` is only used to preprocess the header file and `recurse` if specified.
|
||||
## `toast` is only used to preprocess the header file and recurse
|
||||
## if specified.
|
||||
##
|
||||
## `mode` should be set to `cpp` for c2nim to wrap C++ headers.
|
||||
##
|
||||
## `flags` can be used to pass other command line arguments to `c2nim`.
|
||||
##
|
||||
## `nimFile` is the location where the generated wrapper should get written,
|
||||
## similar to `cImport()`.
|
||||
##
|
||||
## `nimterop` does not depend on `c2nim` as a `nimble` dependency so it does not
|
||||
## get installed automatically. Any wrapper or library that requires this proc
|
||||
## needs to install `c2nim` with `nimble install c2nim` or add it as a dependency
|
||||
## in its own `.nimble` file.
|
||||
## `nimterop` does not depend on `c2nim` as a `nimble` dependency so it
|
||||
## does not get installed automatically. Any wrapper or library that requires this proc
|
||||
## needs to install `c2nim` with `nimble install c2nim` or add it as a dependency in
|
||||
## its own `.nimble` file.
|
||||
|
||||
result = newNimNode(nnkStmtList)
|
||||
|
||||
let
|
||||
fullpath = findPath(filename)
|
||||
|
||||
gecho "# Importing " & fullpath & " with c2nim"
|
||||
echo "# Importing " & fullpath & " with c2nim"
|
||||
|
||||
let
|
||||
hFile = getToast(@[fullpath], recurse, dynlib, mode, noNimout = true)
|
||||
nimFile = if nimFile.nBl: fixRelPath(nimFile) else: hFile.changeFileExt("nim")
|
||||
header = "header" & fullpath.splitFile().name.split(seps = {'-', '.'}).join()
|
||||
output = getToast(@[fullpath], recurse, dynlib, mode, noNimout = true)
|
||||
hash = output.hash().abs()
|
||||
hpath = getProjectCacheDir("c2nimCache", forceClean = false) / "nimterop_" & $hash & ".h"
|
||||
npath = hpath[0 .. hpath.rfind('.')] & "nim"
|
||||
header = ("header" & fullpath.splitFile().name.replace(re"[-.]+", ""))
|
||||
|
||||
if not fileExists(nimFile) or gStateCT.nocache or compileOption("forceBuild"):
|
||||
var
|
||||
cmd = when defined(Windows): "cmd /c " else: ""
|
||||
cmd &= &"c2nim {hFile} --header:{header} --out:{nimFile.sanitizePath}"
|
||||
if not fileExists(hpath) or gStateCT.nocache or compileOption("forceBuild"):
|
||||
mkDir(hpath.parentDir())
|
||||
writeFile(hpath, output)
|
||||
|
||||
if dynlib.nBl:
|
||||
cmd.add &" --dynlib:{dynlib}"
|
||||
if mode.contains("cpp"):
|
||||
cmd.add " --cpp"
|
||||
if flags.nBl:
|
||||
cmd.add &" {flags}"
|
||||
doAssert fileExists(hpath), "Unable to write temporary header file: " & hpath
|
||||
|
||||
for i in gStateCT.defines:
|
||||
cmd.add &" --assumedef:{i.quoteShell}"
|
||||
var
|
||||
cmd = when defined(Windows): "cmd /c " else: ""
|
||||
cmd &= &"c2nim {hpath} --header:{header}"
|
||||
|
||||
# Have to create pragmas for c2nim since toast handles this at runtime
|
||||
result.add quote do:
|
||||
renderPragma()
|
||||
if dynlib.nBl:
|
||||
cmd.add &" --dynlib:{dynlib}"
|
||||
if mode.contains("cpp"):
|
||||
cmd.add " --cpp"
|
||||
if flags.nBl:
|
||||
cmd.add &" {flags}"
|
||||
|
||||
let
|
||||
(c2nimout, ret) = execAction(cmd)
|
||||
if ret != 0:
|
||||
rmFile(nimFile)
|
||||
doAssert false, "\n\nc2nim codegen limitation or error - " & c2nimout
|
||||
for i in gStateCT.defines:
|
||||
cmd.add &" --assumedef:{i.quoteShell}"
|
||||
|
||||
nimFile.writeFile(&"const {header} = \"{fullpath}\"\n\n" & readFile(nimFile))
|
||||
let
|
||||
(c2nimout, ret) = execAction(cmd, cache = not gStateCT.nocache,
|
||||
cacheKey = getCacheValue(hpath))
|
||||
|
||||
doAssert ret == 0, "\n\nc2nim codegen limitation or error - " & c2nimout
|
||||
|
||||
var
|
||||
nimout = &"const {header} = \"{fullpath}\"\n\n" & readFile(npath)
|
||||
|
||||
nimout = nimout.
|
||||
replace(re"([u]?int[\d]+)_t", "$1").
|
||||
replace(re"([u]?int)ptr_t", "ptr $1")
|
||||
|
||||
if gStateCT.debug:
|
||||
gecho nimFile.readFile()
|
||||
|
||||
gecho "# Saved to " & nimFile
|
||||
echo nimout
|
||||
|
||||
try:
|
||||
let
|
||||
nimFileNode = newStrLitNode(nimFile.changeFileExt(""))
|
||||
result.add quote do:
|
||||
include `nimFileNode`
|
||||
let body = parseStmt(nimout)
|
||||
|
||||
result.add body
|
||||
except:
|
||||
getNimCheckError(nimFile)
|
||||
let
|
||||
(tmpFile, errors) = getNimCheckError(nimout)
|
||||
doAssert false, errors & "\n\nc2nim codegen limitation or error - review 'nim check' output above generated for " & tmpFile
|
||||
|
|
|
|||
18
nimterop/comphelp.nim
Normal file
18
nimterop/comphelp.nim
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
import compiler/[ast, lineinfos, msgs, options, parser, renderer]
|
||||
|
||||
import "."/[globals, getters]
|
||||
|
||||
proc handleError*(conf: ConfigRef, info: TLineInfo, msg: TMsgKind, arg: string) =
|
||||
# Raise exception in parseString() instead of exiting for errors
|
||||
if msg < warnMin:
|
||||
raise newException(Exception, msgKindToString(msg))
|
||||
|
||||
proc parseString*(gState: State, str: string): PNode =
|
||||
# Parse a string into Nim AST - use custom error handler that raises
|
||||
# an exception rather than exiting on failure
|
||||
try:
|
||||
result = parseString(
|
||||
str, gState.identCache, gState.config, errorHandler = handleError
|
||||
)
|
||||
except:
|
||||
decho getCurrentExceptionMsg()
|
||||
|
|
@ -1,9 +1,12 @@
|
|||
import strformat
|
||||
import macros, strformat
|
||||
|
||||
from os import parentDir, getCurrentCompilerExe, DirSep
|
||||
|
||||
when defined(nimdoc) or (NimMajor, NimMinor) >= (1, 3):
|
||||
from os import paramCount, paramStr
|
||||
|
||||
when defined(nimdoc):
|
||||
from os import getCurrentDir, paramCount, paramStr
|
||||
from os import getCurrentDir
|
||||
|
||||
proc getNimRootDir(): string =
|
||||
#[
|
||||
|
|
@ -32,7 +35,7 @@ proc execAction(cmd: string): string =
|
|||
(result, ret) = gorgeEx(ccmd)
|
||||
doAssert ret == 0, "Command failed: " & $ret & "\ncmd: " & ccmd & "\nresult:\n" & result
|
||||
|
||||
proc buildDocs*(files: openArray[string], path: string, baseDir = getCurrentDir() & $DirSep,
|
||||
proc buildDocs*(files: openArray[string], path: string, baseDir = getProjectPath() & $DirSep,
|
||||
defines: openArray[string] = @[], nimArgs = "") =
|
||||
## Generate docs for all specified nim `files` to the specified `path`
|
||||
##
|
||||
|
|
@ -71,13 +74,13 @@ proc buildDocs*(files: openArray[string], path: string, baseDir = getCurrentDir(
|
|||
for file in files:
|
||||
echo execAction(&"{nim} doc {defStr} {nimArgs} -o:{path} --project --index:on {baseDir & file}")
|
||||
|
||||
echo execAction(&"{nim} buildIndex {nimArgs} -o:{path}/theindex.html {path}")
|
||||
echo execAction(&"{nim} buildIndex -o:{path}/theindex.html {path}")
|
||||
when declared(getNimRootDir):
|
||||
#[
|
||||
this enables doc search, works at least locally with:
|
||||
cd {path} && python -m SimpleHTTPServer 9009
|
||||
]#
|
||||
echo execAction(&"{nim} js {nimArgs} -o:{path}/dochack.js {getNimRootDir()}/tools/dochack/dochack.nim")
|
||||
echo execAction(&"{nim} js -o:{path}/dochack.js {getNimRootDir()}/tools/dochack/dochack.nim")
|
||||
|
||||
for i in 0 .. paramCount():
|
||||
if paramStr(i) == "--publish":
|
||||
|
|
|
|||
|
|
@ -1,42 +0,0 @@
|
|||
import macros
|
||||
|
||||
macro defineEnum(typ: untyped): untyped =
|
||||
result = newNimNode(nnkStmtList)
|
||||
|
||||
# Enum mapped to distinct cint
|
||||
result.add quote do:
|
||||
type `typ`* = distinct cint
|
||||
|
||||
for i in ["+", "-", "*", "div", "mod", "shl", "shr", "or", "and", "xor", "<", "<=", "==", ">", ">="]:
|
||||
let
|
||||
ni = newIdentNode(i)
|
||||
typout = if i[0] in "<=>": newIdentNode("bool") else: typ # comparisons return bool
|
||||
if i[0] == '>': # cannot borrow `>` and `>=` from templates
|
||||
let
|
||||
nopp = if i.len == 2: newIdentNode("<=") else: newIdentNode("<")
|
||||
result.add quote do:
|
||||
proc `ni`*(x: `typ`, y: cint): `typout` = `nopp`(y, x)
|
||||
proc `ni`*(x: cint, y: `typ`): `typout` = `nopp`(y, x)
|
||||
proc `ni`*(x, y: `typ`): `typout` = `nopp`(y, x)
|
||||
else:
|
||||
result.add quote do:
|
||||
proc `ni`*(x: `typ`, y: cint): `typout` {.borrow.}
|
||||
proc `ni`*(x: cint, y: `typ`): `typout` {.borrow.}
|
||||
proc `ni`*(x, y: `typ`): `typout` {.borrow.}
|
||||
result.add quote do:
|
||||
proc `ni`*(x: `typ`, y: int): `typout` = `ni`(x, y.cint)
|
||||
proc `ni`*(x: int, y: `typ`): `typout` = `ni`(x.cint, y)
|
||||
|
||||
let
|
||||
divop = newIdentNode("/") # `/`()
|
||||
dlrop = newIdentNode("$") # `$`()
|
||||
notop = newIdentNode("not") # `not`()
|
||||
result.add quote do:
|
||||
proc `divop`*(x, y: `typ`): `typ` = `typ`((x.float / y.float).cint)
|
||||
proc `divop`*(x: `typ`, y: cint): `typ` = `divop`(x, `typ`(y))
|
||||
proc `divop`*(x: cint, y: `typ`): `typ` = `divop`(`typ`(x), y)
|
||||
proc `divop`*(x: `typ`, y: int): `typ` = `divop`(x, y.cint)
|
||||
proc `divop`*(x: int, y: `typ`): `typ` = `divop`(x.cint, y)
|
||||
|
||||
proc `dlrop`*(x: `typ`): string {.borrow.}
|
||||
proc `notop`*(x: `typ`): `typ` {.borrow.}
|
||||
|
|
@ -1,42 +0,0 @@
|
|||
import macros
|
||||
|
||||
macro defineEnum*(typ: untyped): untyped =
|
||||
result = newNimNode(nnkStmtList)
|
||||
|
||||
# Enum mapped to distinct cint
|
||||
result.add quote do:
|
||||
type `typ`* = distinct cint
|
||||
|
||||
for i in ["+", "-", "*", "div", "mod", "shl", "shr", "or", "and", "xor", "<", "<=", "==", ">", ">="]:
|
||||
let
|
||||
ni = newIdentNode(i)
|
||||
typout = if i[0] in "<=>": newIdentNode("bool") else: typ # comparisons return bool
|
||||
if i[0] == '>': # cannot borrow `>` and `>=` from templates
|
||||
let
|
||||
nopp = if i.len == 2: newIdentNode("<=") else: newIdentNode("<")
|
||||
result.add quote do:
|
||||
proc `ni`*(x: `typ`, y: cint): `typout` = `nopp`(y, x)
|
||||
proc `ni`*(x: cint, y: `typ`): `typout` = `nopp`(y, x)
|
||||
proc `ni`*(x, y: `typ`): `typout` = `nopp`(y, x)
|
||||
else:
|
||||
result.add quote do:
|
||||
proc `ni`*(x: `typ`, y: cint): `typout` {.borrow.}
|
||||
proc `ni`*(x: cint, y: `typ`): `typout` {.borrow.}
|
||||
proc `ni`*(x, y: `typ`): `typout` {.borrow.}
|
||||
result.add quote do:
|
||||
proc `ni`*(x: `typ`, y: int): `typout` = `ni`(x, y.cint)
|
||||
proc `ni`*(x: int, y: `typ`): `typout` = `ni`(x.cint, y)
|
||||
|
||||
let
|
||||
divop = newIdentNode("/") # `/`()
|
||||
dlrop = newIdentNode("$") # `$`()
|
||||
notop = newIdentNode("not") # `not`()
|
||||
result.add quote do:
|
||||
proc `divop`*(x, y: `typ`): `typ` = `typ`((x.float / y.float).cint)
|
||||
proc `divop`*(x: `typ`, y: cint): `typ` = `divop`(x, `typ`(y))
|
||||
proc `divop`*(x: cint, y: `typ`): `typ` = `divop`(`typ`(x), y)
|
||||
proc `divop`*(x: `typ`, y: int): `typ` = `divop`(x, y.cint)
|
||||
proc `divop`*(x: int, y: `typ`): `typ` = `divop`(x.cint, y)
|
||||
|
||||
proc `dlrop`*(x: `typ`): string {.borrow.}
|
||||
proc `notop`*(x: `typ`): `typ` {.borrow.}
|
||||
|
|
@ -1,12 +1,12 @@
|
|||
import strformat, strutils, macros, sets, sequtils
|
||||
import strformat, strutils, macros, sets
|
||||
|
||||
import regex
|
||||
|
||||
import compiler/[ast, renderer]
|
||||
|
||||
import ".."/treesitter/[api, c, cpp]
|
||||
import ".."/globals
|
||||
import "."/[getters, comphelp, tshelp]
|
||||
import "."/treesitter/[api, c, cpp]
|
||||
|
||||
import "."/[globals, getters, comphelp, tshelp]
|
||||
|
||||
# This version of exprparser should be able to handle:
|
||||
#
|
||||
|
|
@ -32,10 +32,6 @@ import "."/[getters, comphelp, tshelp]
|
|||
type
|
||||
ExprParseError* = object of CatchableError
|
||||
|
||||
const
|
||||
CharRegStr = "(\\\\x[[:xdigit:]]{2}|\\\\\\d{3}|\\\\0|\\\\a|\\\\b|\\\\e|\\\\f|\\\\n|\\\\r|\\\\t|\\\\v|\\\\\\\\|\\\\'|\\\\\"|[[:ascii:]])"
|
||||
CharRegex = re(CharRegStr)
|
||||
|
||||
template val(node: TSNode): string =
|
||||
gState.currentExpr.getNodeVal(node)
|
||||
|
||||
|
|
@ -49,14 +45,14 @@ proc getExprIdent*(gState: State, identName: string, kind = nskConst, parent = "
|
|||
##
|
||||
## Returns PNode(nkNone) if the identifier is blank
|
||||
result = newNode(nkNone)
|
||||
if gState.skipIdentValidation or identName notin gState.skippedSyms:
|
||||
if identName notin gState.skippedSyms:
|
||||
var ident = identName
|
||||
if ident != "_":
|
||||
# Process the identifier through cPlugin
|
||||
ident = gState.getIdentifier(ident, kind, parent)
|
||||
if kind == nskType:
|
||||
result = gState.getIdent(ident)
|
||||
elif gState.skipIdentValidation or ident.nBl and ident in gState.constIdentifiers:
|
||||
elif ident.nBl and ident in gState.constIdentifiers:
|
||||
if gState.currentTyCastName.nBl:
|
||||
ident = ident & "." & gState.currentTyCastName
|
||||
result = gState.getIdent(ident)
|
||||
|
|
@ -140,16 +136,17 @@ proc getIntNode(number, suffix: string): PNode {.inline.} =
|
|||
var
|
||||
val: BiggestInt
|
||||
flags: TNodeFlags
|
||||
if number.len > 1 and number[0] == '0':
|
||||
if number[1] in ['x', 'X']:
|
||||
val = parseHexInt(number)
|
||||
flags = {nfBase16}
|
||||
elif number[1] in ['b', 'B']:
|
||||
val = parseBinInt(number)
|
||||
flags = {nfBase2}
|
||||
else:
|
||||
val = parseOctInt(number)
|
||||
flags = {nfBase8}
|
||||
# I realize these regex are wasteful on performance, but
|
||||
# couldn't come up with a better idea.
|
||||
if number.contains(re"0[xX]"):
|
||||
val = parseHexInt(number)
|
||||
flags = {nfBase16}
|
||||
elif number.contains(re"0[bB]"):
|
||||
val = parseBinInt(number)
|
||||
flags = {nfBase2}
|
||||
elif number.contains(re"0[oO]"):
|
||||
val = parseOctInt(number)
|
||||
flags = {nfBase8}
|
||||
else:
|
||||
val = parseInt(number)
|
||||
|
||||
|
|
@ -189,36 +186,25 @@ proc processNumberLiteral(gState: State, node: TSNode): PNode =
|
|||
## Parse a number literal from a TSNode. Can be a float, hex, long, etc
|
||||
result = newNode(nkNone)
|
||||
let nodeVal = node.val
|
||||
var
|
||||
prefix: string
|
||||
number = nodeVal
|
||||
suffix: string
|
||||
|
||||
const
|
||||
singleEndings = ["u", "l", "U", "L"]
|
||||
doubleEndings = ["ul", "UL", "ll", "LL"]
|
||||
tripleEndings = ["ull", "ULL"]
|
||||
var match: RegexMatch
|
||||
const reg = re"(\-)?(0\d+|0[xX][0-9a-fA-F]+|0[bB][01]+|\d+\.\d*[fFlL]?|\d*\.\d+[fFlL]?|\d+)([ulUL]*)"
|
||||
let found = nodeVal.find(reg, match)
|
||||
if found:
|
||||
let
|
||||
prefix = if match.group(0).len > 0: nodeVal[match.group(0)[0]] else: ""
|
||||
number = nodeVal[match.group(1)[0]]
|
||||
suffix = nodeVal[match.group(2)[0]]
|
||||
|
||||
if number.startsWith("-"):
|
||||
number = number[1 ..< number.len]
|
||||
prefix = "-"
|
||||
if tripleEndings.any(proc (s: string): bool = number.endsWith(s)):
|
||||
suffix = number[^3 .. ^1]
|
||||
number = number[0 ..< ^3]
|
||||
elif doubleEndings.any(proc (s: string): bool = number.endsWith(s)):
|
||||
suffix = number[^2 .. ^1]
|
||||
number = number[0 ..< ^2]
|
||||
elif singleEndings.any(proc (s: string): bool = number.endsWith(s)):
|
||||
suffix = $number[number.len - 1]
|
||||
number = number[0 ..< ^1]
|
||||
result = getNumNode(number, suffix)
|
||||
|
||||
result = getNumNode(number, suffix)
|
||||
|
||||
if result.kind != nkNone and prefix == "-":
|
||||
result = nkPrefix.newTree(
|
||||
gState.getIdent("-"),
|
||||
result
|
||||
)
|
||||
if result.kind != nkNone and prefix == "-":
|
||||
result = nkPrefix.newTree(
|
||||
gState.getIdent("-"),
|
||||
result
|
||||
)
|
||||
else:
|
||||
raise newException(ExprParseError, &"Could not find a number in number_literal: \"{nodeVal}\"")
|
||||
|
||||
proc processCharacterLiteral(gState: State, node: TSNode): PNode =
|
||||
# Input => 'G'
|
||||
|
|
@ -248,9 +234,13 @@ proc processStringLiteral(gState: State, node: TSNode): PNode =
|
|||
nodeVal = node.val
|
||||
strVal = nodeVal[1 ..< nodeVal.len - 1]
|
||||
|
||||
const
|
||||
str = "(\\\\x[[:xdigit:]]{2}|\\\\\\d{3}|\\\\0|\\\\a|\\\\b|\\\\e|\\\\f|\\\\n|\\\\r|\\\\t|\\\\v|\\\\\\\\|\\\\'|\\\\\"|[[:ascii:]])"
|
||||
reg = re(str)
|
||||
|
||||
# Convert the c string escape sequences/etc to Nim chars
|
||||
var nimStr = newStringOfCap(nodeVal.len)
|
||||
for m in strVal.findAll(CharRegex):
|
||||
for m in strVal.findAll(reg):
|
||||
nimStr.add(parseChar(strVal[m.group(0)[0]]).chr)
|
||||
|
||||
result = newStrNode(nkStrLit, nimStr)
|
||||
|
|
@ -379,28 +369,18 @@ proc processBinaryExpression(gState: State, node: TSNode, typeofNode: var PNode)
|
|||
|
||||
result.add gState.getIdent(nimSym)
|
||||
let leftNode = gState.processTSNode(left, typeofNode)
|
||||
var tyNode = typeofNode
|
||||
|
||||
if typeofNode.isNil:
|
||||
typeofNode = nkCall.newTree(
|
||||
gState.getIdent("typeof"),
|
||||
leftNode
|
||||
)
|
||||
tyNode = typeofNode
|
||||
|
||||
# Special case of setting the shift left/right type
|
||||
# to be the type of the direct left operand
|
||||
if binarySym in [">>", "<<"]:
|
||||
tyNode = nkCall.newTree(
|
||||
gState.getIdent("typeof"),
|
||||
leftNode
|
||||
)
|
||||
|
||||
let rightNode = gState.processTSNode(right, tyNode)
|
||||
let rightNode = gState.processTSNode(right, typeofNode)
|
||||
|
||||
result.add leftNode
|
||||
result.add nkCall.newTree(
|
||||
tyNode,
|
||||
typeofNode,
|
||||
rightNode
|
||||
)
|
||||
if binarySym == "/":
|
||||
|
|
@ -409,7 +389,7 @@ proc processBinaryExpression(gState: State, node: TSNode, typeofNode: var PNode)
|
|||
# So we need to emulate C here and cast the whole
|
||||
# expression to the type of the first arg
|
||||
result = nkCall.newTree(
|
||||
tyNode,
|
||||
typeofNode,
|
||||
result
|
||||
)
|
||||
|
||||
|
|
@ -545,7 +525,7 @@ proc processTSNode(gState: State, node: TSNode, typeofNode: var PNode): PNode =
|
|||
# once we upgrade
|
||||
of "math_expression", "logical_expression", "relational_expression",
|
||||
"bitwise_expression", "equality_expression", "binary_expression",
|
||||
"shift_expression", "unary_expression":
|
||||
"shift_expression":
|
||||
# Input -> a == b, a != b, !a, ~a, a < b, a > b, a <= b, a >= b, a >> b, a << b
|
||||
# Output ->
|
||||
# typeof(a)(a == typeof(a)(b))
|
||||
|
|
@ -568,29 +548,10 @@ proc processTSNode(gState: State, node: TSNode, typeofNode: var PNode): PNode =
|
|||
# Input -> true, false
|
||||
# Output -> true, false
|
||||
result = gState.parseString(node.val)
|
||||
of "type_descriptor":
|
||||
# Input => int*
|
||||
# (type_descriptor 1 2 4 "int*"
|
||||
# (type_identifier 1 2 3 "int")
|
||||
# (abstract_pointer_declarator 1 3 1 "*")
|
||||
# )
|
||||
#
|
||||
# Output => ptr int
|
||||
#
|
||||
# nkPtrTy(
|
||||
# nkIdent("int")
|
||||
# )
|
||||
let pointerDecl = node.anyChildInTree("abstract_pointer_declarator")
|
||||
|
||||
if pointerDecl.isNil:
|
||||
result = gState.processTSNode(node[0], typeofNode)
|
||||
else:
|
||||
let pointerCount = pointerDecl.getXCount("abstract_pointer_declarator")
|
||||
result = gState.newPtrTree(pointerCount, gState.processTSNode(node[0], typeofNode))
|
||||
of "sized_type_specifier", "primitive_type", "type_identifier":
|
||||
of "type_descriptor", "sized_type_specifier":
|
||||
# Input -> int, unsigned int, long int, etc
|
||||
# Output -> cint, cuint, clong, etc
|
||||
let ty = gState.getType(node.val, parent = node.getName())
|
||||
let ty = getType(node.val)
|
||||
if ty.len > 0:
|
||||
# If ty is not empty, one of C's builtin types has been found
|
||||
result = gState.getExprIdent(ty, nskType, parent=node.getName())
|
||||
|
|
@ -609,10 +570,9 @@ proc processTSNode(gState: State, node: TSNode, typeofNode: var PNode): PNode =
|
|||
else:
|
||||
raise newException(ExprParseError, &"Unsupported node type \"{nodeName}\" for node \"{node.val}\"")
|
||||
|
||||
if result.kind != nkNone:
|
||||
decho "NODE RESULT: ", result
|
||||
decho "NODE RESULT: ", result
|
||||
|
||||
proc parseCExpression*(gState: State, codeRoot: TSNode): PNode =
|
||||
proc parseCExpression*(gState: State, codeRoot: TSNode, name = ""): PNode =
|
||||
## Parse a c expression from a root ts node
|
||||
|
||||
# This var is used for keeping track of the type of the first
|
||||
|
|
@ -628,16 +588,14 @@ proc parseCExpression*(gState: State, codeRoot: TSNode): PNode =
|
|||
decho "UNEXPECTED EXCEPTION: ", e.msg
|
||||
result = newNode(nkNone)
|
||||
|
||||
proc parseCExpression*(gState: State, code: string, name = "", skipIdentValidation = false): PNode =
|
||||
proc parseCExpression*(gState: State, code: string, name = ""): PNode =
|
||||
## Convert the C string to a nim PNode tree
|
||||
gState.currentExpr = code
|
||||
gState.currentTyCastName = name
|
||||
gState.skipIdentValidation = skipIdentValidation
|
||||
|
||||
withCodeAst(gState.currentExpr, gState.mode):
|
||||
result = gState.parseCExpression(root)
|
||||
result = gState.parseCExpression(root, name)
|
||||
|
||||
# Clear the state
|
||||
gState.currentExpr = ""
|
||||
gState.currentTyCastName = ""
|
||||
gState.skipIdentValidation = false
|
||||
gState.currentTyCastName = ""
|
||||
895
nimterop/getters.nim
Normal file
895
nimterop/getters.nim
Normal file
|
|
@ -0,0 +1,895 @@
|
|||
import dynlib, macros, os, sequtils, sets, strformat, strutils, tables, times
|
||||
|
||||
import regex
|
||||
|
||||
import compiler/[ast, idents, lineinfos, msgs, pathutils, renderer]
|
||||
|
||||
import "."/[build, globals, plugin, treesitter/api]
|
||||
|
||||
const gReserved = """
|
||||
addr and as asm
|
||||
bind block break
|
||||
case cast concept const continue converter
|
||||
defer discard distinct div do
|
||||
elif else end enum except export
|
||||
finally for from func
|
||||
if import in include interface is isnot iterator
|
||||
let
|
||||
macro method mixin mod
|
||||
nil not notin
|
||||
of or out
|
||||
proc ptr
|
||||
raise ref return
|
||||
shl shr static
|
||||
template try tuple type
|
||||
using
|
||||
var
|
||||
when while
|
||||
xor
|
||||
yield""".split(Whitespace).toHashSet()
|
||||
|
||||
# Types related
|
||||
|
||||
const
|
||||
gTypeMap* = {
|
||||
# char
|
||||
"char": "cchar",
|
||||
"signed char": "cschar",
|
||||
"unsigned char": "cuchar",
|
||||
|
||||
# short
|
||||
"short": "cshort",
|
||||
"short int": "cshort",
|
||||
"signed short": "cshort",
|
||||
"signed short int": "cshort",
|
||||
"unsigned short": "cushort",
|
||||
"unsigned short int": "cushort",
|
||||
"uShort": "cushort",
|
||||
"u_short": "cushort",
|
||||
|
||||
# int
|
||||
"int": "cint",
|
||||
"signed": "cint",
|
||||
"signed int": "cint",
|
||||
"ssize_t": "int",
|
||||
"unsigned": "cuint",
|
||||
"unsigned int": "cuint",
|
||||
"uInt": "cuint",
|
||||
"u_int": "cuint",
|
||||
"size_t": "uint",
|
||||
|
||||
"int8_t": "int8",
|
||||
"int16_t": "int16",
|
||||
"int32_t": "int32",
|
||||
"int64_t": "int64",
|
||||
|
||||
"intptr_t": "ptr int",
|
||||
|
||||
"Int8": "int8",
|
||||
"Int16": "int16",
|
||||
"Int32": "int32",
|
||||
"Int64": "int64",
|
||||
|
||||
"uint8_t": "uint8",
|
||||
"uint16_t": "uint16",
|
||||
"uint32_t": "uint32",
|
||||
"uint64_t": "uint64",
|
||||
|
||||
"uintptr_t": "ptr uint",
|
||||
|
||||
"Uint8": "uint8",
|
||||
"Uint16": "uint16",
|
||||
"Uint32": "uint32",
|
||||
"Uint64": "uint64",
|
||||
|
||||
# long
|
||||
"long": "clong",
|
||||
"long int": "clong",
|
||||
"signed long": "clong",
|
||||
"signed long int": "clong",
|
||||
"off_t": "clong",
|
||||
"unsigned long": "culong",
|
||||
"unsigned long int": "culong",
|
||||
"uLong": "culong",
|
||||
"u_long": "culong",
|
||||
|
||||
# long long
|
||||
"long long": "clonglong",
|
||||
"long long int": "clonglong",
|
||||
"signed long long": "clonglong",
|
||||
"signed long long int": "clonglong",
|
||||
"off64_t": "clonglong",
|
||||
"unsigned long long": "culonglong",
|
||||
"unsigned long long int": "culonglong",
|
||||
|
||||
# floating point
|
||||
"float": "cfloat",
|
||||
"double": "cdouble",
|
||||
"long double": "clongdouble"
|
||||
}.toTable()
|
||||
|
||||
# Nim type names that shouldn't need to be wrapped again
|
||||
gTypeMapValues* = toSeq(gTypeMap.values).toHashSet()
|
||||
|
||||
proc getType*(str: string): string =
|
||||
if str == "void":
|
||||
return "object"
|
||||
|
||||
result = str.strip(chars={'_'}).replace(re"\s+", " ")
|
||||
|
||||
if gTypeMap.hasKey(result):
|
||||
result = gTypeMap[result]
|
||||
|
||||
# Identifier related
|
||||
|
||||
proc checkIdentifier(name, kind, parent, origName: string) =
|
||||
let
|
||||
parentStr = if parent.nBl: parent & ":" else: ""
|
||||
|
||||
if name.nBl:
|
||||
let
|
||||
origStr = if name != origName: &", originally '{origName}' before 'cPlugin:onSymbol()', still" else: ""
|
||||
errmsg = &"Identifier '{parentStr}{name}' ({kind}){origStr} contains"
|
||||
|
||||
doAssert name[0] != '_' and name[^1] != '_', errmsg & " leading/trailing underscores '_'"
|
||||
|
||||
doAssert (not name.contains("__")): errmsg & " consecutive underscores '_'"
|
||||
|
||||
if parent.nBl:
|
||||
doAssert name.nBl, &"Blank identifier, originally '{parentStr}{origName}' ({kind}), cannot be empty"
|
||||
|
||||
proc getIdentifier*(gState: State, name: string, kind: NimSymKind, parent=""): string =
|
||||
doAssert name.nBl, "Blank identifier error"
|
||||
|
||||
if name notin gState.symOverride or parent.nBl:
|
||||
if gState.onSymbol != nil:
|
||||
# Use onSymbol from plugin provided by user
|
||||
var
|
||||
sym = Symbol(name: name, parent: parent, kind: kind)
|
||||
gState.onSymbol(sym)
|
||||
|
||||
result = sym.name
|
||||
else:
|
||||
result = name
|
||||
|
||||
# Strip out --prefix from CLI if specified
|
||||
for str in gState.prefix:
|
||||
if result.startsWith(str):
|
||||
result = result[str.len .. ^1]
|
||||
|
||||
# Strip out --suffix from CLI if specified
|
||||
for str in gState.suffix:
|
||||
if result.endsWith(str):
|
||||
result = result[0 .. ^(str.len+1)]
|
||||
|
||||
# --replace from CLI if specified
|
||||
for name, value in gState.replace.pairs:
|
||||
if name.len > 1 and name[0] == '@':
|
||||
result = result.replace(re(name[1 .. ^1]), value)
|
||||
else:
|
||||
result = result.replace(name, value)
|
||||
|
||||
checkIdentifier(result, $kind, parent, name)
|
||||
|
||||
if result in gReserved or (result == "object" and kind != nskType):
|
||||
# Enclose in backticks since Nim reserved word
|
||||
result = &"`{result}`"
|
||||
else:
|
||||
# Skip identifier since in symOverride
|
||||
result = ""
|
||||
|
||||
proc getUniqueIdentifier*(gState: State, prefix = ""): string =
|
||||
var
|
||||
name = prefix & "_" & gState.sourceFile.extractFilename().multiReplace([(".", ""), ("-", "")])
|
||||
nimName = name[0] & name[1 .. ^1].replace("_", "").toLowerAscii
|
||||
count = 1
|
||||
|
||||
while (nimName & $count) in gState.identifiers:
|
||||
count += 1
|
||||
|
||||
return name & $count
|
||||
|
||||
proc addNewIdentifer*(gState: State, name: string, override = false): bool =
|
||||
if override or name notin gState.symOverride:
|
||||
let
|
||||
nimName = name[0] & name[1 .. ^1].replace("_", "").toLowerAscii
|
||||
|
||||
if gState.identifiers.hasKey(nimName):
|
||||
doAssert name == gState.identifiers[nimName],
|
||||
&"Identifier '{name}' is a stylistic duplicate of identifier " &
|
||||
&"'{gState.identifiers[nimName]}', use 'cPlugin:onSymbol()' to rename"
|
||||
result = false
|
||||
else:
|
||||
gState.identifiers[nimName] = name
|
||||
result = true
|
||||
|
||||
# Overrides related
|
||||
|
||||
proc getOverride*(gState: State, name: string, kind: NimSymKind): string =
|
||||
# Get cOverride for identifier `name` of `kind` if defined
|
||||
doAssert name.nBl, "Blank identifier error"
|
||||
|
||||
if gState.onSymbolOverride != nil:
|
||||
var
|
||||
nname = gState.getIdentifier(name, kind, "Override")
|
||||
sym = Symbol(name: nname, kind: kind)
|
||||
if nname.nBl:
|
||||
gState.onSymbolOverride(sym)
|
||||
|
||||
if sym.override.nBl and gState.addNewIdentifer(nname, override = true):
|
||||
result = sym.override
|
||||
|
||||
if kind != nskProc:
|
||||
result = result.replace(re"(?m)^(.*?)$", " $1")
|
||||
|
||||
proc getOverrideFinal*(gState: State, kind: NimSymKind): string =
|
||||
# Get all unused cOverride symbols of `kind`
|
||||
let
|
||||
typ = $kind
|
||||
|
||||
if gState.onSymbolOverrideFinal != nil:
|
||||
for i in gState.onSymbolOverrideFinal(typ):
|
||||
result &= "\n" & gState.getOverride(i, kind)
|
||||
|
||||
proc getKeyword*(kind: NimSymKind): string =
|
||||
# Convert `kind` into a Nim keyword
|
||||
# cOverride procs already include `proc` keyword
|
||||
result = ($kind).replace("nsk", "").toLowerAscii()
|
||||
|
||||
# TSNode shortcuts
|
||||
|
||||
proc isNil*(node: TSNode): bool =
|
||||
node.tsNodeIsNull()
|
||||
|
||||
proc len*(node: TSNode): int =
|
||||
if not node.isNil:
|
||||
result = node.tsNodeNamedChildCount().int
|
||||
|
||||
proc `[]`*(node: TSNode, i: SomeInteger): TSNode =
|
||||
if i < type(i)(node.len()):
|
||||
result = node.tsNodeNamedChild(i.uint32)
|
||||
|
||||
proc getName*(node: TSNode): string {.inline.} =
|
||||
if not node.isNil:
|
||||
return $node.tsNodeType()
|
||||
|
||||
proc getNodeVal*(code: var string, node: TSNode): string =
|
||||
if not node.isNil:
|
||||
return code[node.tsNodeStartByte() .. node.tsNodeEndByte()-1].strip()
|
||||
|
||||
proc getNodeVal*(gState: State, node: TSNode): string =
|
||||
gState.code.getNodeVal(node)
|
||||
|
||||
proc getAtom*(node: TSNode): TSNode =
|
||||
if not node.isNil:
|
||||
# Get child node which is topmost atom
|
||||
if node.getName() in gAtoms:
|
||||
return node
|
||||
elif node.len != 0:
|
||||
if node[0].getName() == "type_qualifier":
|
||||
# Skip const, volatile
|
||||
if node.len > 1:
|
||||
return node[1].getAtom()
|
||||
else:
|
||||
return
|
||||
else:
|
||||
return node[0].getAtom()
|
||||
|
||||
proc getStartAtom*(node: TSNode): int =
|
||||
if not node.isNil:
|
||||
# Skip const, volatile and other type qualifiers
|
||||
for i in 0 .. node.len - 1:
|
||||
if node[i].getAtom().getName() notin gAtoms:
|
||||
result += 1
|
||||
else:
|
||||
break
|
||||
|
||||
proc getXCount*(node: TSNode, ntype: string, reverse = false): int =
|
||||
if not node.isNil:
|
||||
# Get number of ntype nodes nested in tree
|
||||
var
|
||||
cnode = node
|
||||
while ntype in cnode.getName():
|
||||
result += 1
|
||||
if reverse:
|
||||
cnode = cnode.tsNodeParent()
|
||||
else:
|
||||
if cnode.len != 0:
|
||||
if cnode[0].getName() == "type_qualifier":
|
||||
# Skip const, volatile
|
||||
if cnode.len > 1:
|
||||
cnode = cnode[1]
|
||||
else:
|
||||
break
|
||||
else:
|
||||
cnode = cnode[0]
|
||||
else:
|
||||
break
|
||||
|
||||
proc getPtrCount*(node: TSNode, reverse = false): int =
|
||||
node.getXCount("pointer_declarator")
|
||||
|
||||
proc getArrayCount*(node: TSNode, reverse = false): int =
|
||||
node.getXCount("array_declarator")
|
||||
|
||||
proc getDeclarator*(node: TSNode): TSNode =
|
||||
if not node.isNil:
|
||||
# Return if child is a function or array declarator
|
||||
if node.getName() in ["function_declarator", "array_declarator"]:
|
||||
return node
|
||||
elif node.len != 0:
|
||||
return node[0].getDeclarator()
|
||||
|
||||
proc getVarargs*(node: TSNode): bool =
|
||||
# Detect ... and add {.varargs.}
|
||||
#
|
||||
# `node` is the param list
|
||||
#
|
||||
# ... is an unnamed node, second last node and ) is last node
|
||||
let
|
||||
nlen = node.tsNodeChildCount()
|
||||
if nlen > 1.uint32:
|
||||
let
|
||||
nval = node.tsNodeChild(nlen - 2.uint32).getName()
|
||||
if nval == "...":
|
||||
result = true
|
||||
|
||||
proc firstChildInTree*(node: TSNode, ntype: string): TSNode =
|
||||
# Search for node type in tree - first children
|
||||
var
|
||||
cnode = node
|
||||
while not cnode.isNil:
|
||||
if cnode.getName() == ntype:
|
||||
return cnode
|
||||
cnode = cnode[0]
|
||||
|
||||
proc anyChildInTree*(node: TSNode, ntype: string): TSNode =
|
||||
# Search for node type anywhere in tree - depth first
|
||||
var
|
||||
cnode = node
|
||||
while not cnode.isNil:
|
||||
if cnode.getName() == ntype:
|
||||
return cnode
|
||||
for i in 0 ..< cnode.len:
|
||||
let
|
||||
ccnode = cnode[i].anyChildInTree(ntype)
|
||||
if not ccnode.isNil:
|
||||
return ccnode
|
||||
if cnode != node:
|
||||
cnode = cnode.tsNodeNextNamedSibling()
|
||||
else:
|
||||
break
|
||||
|
||||
proc mostNestedChildInTree*(node: TSNode): TSNode =
|
||||
# Search for the most nested child of node's type in tree
|
||||
var
|
||||
cnode = node
|
||||
ntype = cnode.getName()
|
||||
while not cnode.isNil and cnode.len != 0 and cnode[0].getName() == ntype:
|
||||
cnode = cnode[0]
|
||||
result = cnode
|
||||
|
||||
proc inChildren*(node: TSNode, ntype: string): bool =
|
||||
# Search for node type in immediate children
|
||||
result = false
|
||||
for i in 0 ..< node.len:
|
||||
if (node[i]).getName() == ntype:
|
||||
result = true
|
||||
break
|
||||
|
||||
proc getLineCol*(code: var string, node: TSNode): tuple[line, col: int] =
|
||||
# Get line number and column info for node
|
||||
let
|
||||
point = node.tsNodeStartPoint()
|
||||
result.line = point.row.int + 1
|
||||
result.col = point.column.int + 1
|
||||
|
||||
proc getLineCol*(gState: State, node: TSNode): tuple[line, col: int] =
|
||||
getLineCol(gState.code, node)
|
||||
|
||||
proc getEndLineCol*(code: var string, node: TSNode): tuple[line, col: int] =
|
||||
# Get line number and column info for node
|
||||
let
|
||||
point = node.tsNodeEndPoint()
|
||||
result.line = point.row.int + 1
|
||||
result.col = point.column.int + 1
|
||||
|
||||
proc getEndLineCol*(gState: State, node: TSNode): tuple[line, col: int] =
|
||||
getEndLineCol(gState.code, node)
|
||||
|
||||
proc getTSNodeNamedChildCountSansComments*(node: TSNode): int =
|
||||
for i in 0 ..< node.len:
|
||||
if node.getName() != "comment":
|
||||
result += 1
|
||||
|
||||
proc getPxName*(node: TSNode, offset: int): string =
|
||||
# Get the xth (grand)parent of the node
|
||||
var
|
||||
np = node
|
||||
count = 0
|
||||
|
||||
while not np.isNil and count < offset:
|
||||
np = np.tsNodeParent()
|
||||
count += 1
|
||||
|
||||
if count == offset and not np.isNil:
|
||||
return np.getName()
|
||||
|
||||
proc printLisp*(code: var string, root: TSNode): string =
|
||||
var
|
||||
node = root
|
||||
nextnode: TSNode
|
||||
depth = 0
|
||||
|
||||
while true:
|
||||
if not node.isNil and depth > -1:
|
||||
result &= spaces(depth)
|
||||
let
|
||||
(line, col) = code.getLineCol(node)
|
||||
result &= &"({$node.tsNodeType()} {line} {col} {node.tsNodeEndByte() - node.tsNodeStartByte()}"
|
||||
let
|
||||
val = code.getNodeVal(node)
|
||||
if "\n" notin val and " " notin val:
|
||||
result &= &" \"{val}\""
|
||||
else:
|
||||
break
|
||||
|
||||
if node.len() != 0:
|
||||
result &= "\n"
|
||||
nextnode = node[0]
|
||||
depth += 1
|
||||
else:
|
||||
result &= ")\n"
|
||||
nextnode = node.tsNodeNextNamedSibling()
|
||||
|
||||
if nextnode.isNil:
|
||||
while true:
|
||||
node = node.tsNodeParent()
|
||||
depth -= 1
|
||||
if depth == -1:
|
||||
break
|
||||
result &= spaces(depth) & ")\n"
|
||||
if node == root:
|
||||
break
|
||||
if not node.tsNodeNextNamedSibling().isNil:
|
||||
node = node.tsNodeNextNamedSibling()
|
||||
break
|
||||
else:
|
||||
node = nextnode
|
||||
|
||||
if node == root:
|
||||
break
|
||||
|
||||
proc printLisp*(gState: State, root: TSNode): string =
|
||||
printLisp(gState.code, root)
|
||||
|
||||
proc getCommented*(str: string): string =
|
||||
"\n# " & str.strip().replace("\n", "\n# ")
|
||||
|
||||
proc printTree*(gState: State, pnode: PNode, offset = ""): string =
|
||||
if not pnode.isNil and gState.debug and pnode.kind != nkNone:
|
||||
result &= "\n# " & offset & $pnode.kind & "("
|
||||
case pnode.kind
|
||||
of nkCharLit:
|
||||
result &= ($pnode.intVal.char).escape & ")"
|
||||
of nkIntLit..nkUInt64Lit:
|
||||
result &= $pnode.intVal & ")"
|
||||
of nkFloatLit..nkFloat128Lit:
|
||||
result &= $pnode.floatVal & ")"
|
||||
of nkStrLit..nkTripleStrLit:
|
||||
result &= pnode.strVal.escape & ")"
|
||||
of nkSym:
|
||||
result &= $pnode.sym & ")"
|
||||
of nkIdent:
|
||||
result &= "\"" & $pnode.ident.s & "\")"
|
||||
else:
|
||||
if pnode.sons.len != 0:
|
||||
for i in 0 ..< pnode.sons.len:
|
||||
result &= gState.printTree(pnode.sons[i], offset & " ")
|
||||
if i != pnode.sons.len - 1:
|
||||
result &= ","
|
||||
result &= "\n# " & offset & ")"
|
||||
else:
|
||||
result &= ")"
|
||||
if offset.len == 0:
|
||||
result &= "\n"
|
||||
|
||||
proc printDebug*(gState: State, node: TSNode) =
|
||||
if gState.debug:
|
||||
gecho ("Input => " & gState.getNodeVal(node)).getCommented()
|
||||
gecho gState.printLisp(node).getCommented()
|
||||
|
||||
proc printDebug*(gState: State, pnode: PNode) =
|
||||
if gState.debug and pnode.kind != nkNone:
|
||||
gecho ("Output => " & $pnode).getCommented()
|
||||
gecho gState.printTree(pnode)
|
||||
|
||||
# Compiler shortcuts
|
||||
|
||||
proc getDefaultLineInfo*(gState: State): TLineInfo =
|
||||
result = newLineInfo(gState.config, gState.sourceFile.AbsoluteFile, 0, 0)
|
||||
|
||||
proc getLineInfo*(gState: State, node: TSNode): TLineInfo =
|
||||
# Get Nim equivalent line:col info from node
|
||||
let
|
||||
(line, col) = gState.getLineCol(node)
|
||||
|
||||
result = newLineInfo(gState.config, gState.sourceFile.AbsoluteFile, line, col)
|
||||
|
||||
proc getIdent*(gState: State, name: string, info: TLineInfo, exported = true): PNode =
|
||||
if name.nBl:
|
||||
# Get ident PNode for name + info
|
||||
let
|
||||
exp = getIdent(gState.identCache, "*")
|
||||
ident = getIdent(gState.identCache, name)
|
||||
|
||||
if exported:
|
||||
result = newNode(nkPostfix)
|
||||
result.add newIdentNode(exp, info)
|
||||
result.add newIdentNode(ident, info)
|
||||
else:
|
||||
result = newIdentNode(ident, info)
|
||||
|
||||
proc getIdent*(gState: State, name: string): PNode =
|
||||
gState.getIdent(name, gState.getDefaultLineInfo(), exported = false)
|
||||
|
||||
proc getIdentName*(node: PNode): string =
|
||||
if not node.isNil:
|
||||
for i in 0 ..< node.len:
|
||||
if node[i].kind == nkIdent and $node[i] != "*":
|
||||
result = $node[i]
|
||||
if result.Bl and node.len > 0:
|
||||
result = node[0].getIdentName()
|
||||
|
||||
proc getNameInfo*(gState: State, node: TSNode, kind: NimSymKind, parent = ""):
|
||||
tuple[name, origname: string, info: TLineInfo] =
|
||||
# Shortcut to get identifier name and info (node value and line:col)
|
||||
result.origname = gState.getNodeVal(node)
|
||||
result.name = gState.getIdentifier(result.origname, kind, parent)
|
||||
if result.name.nBl:
|
||||
if kind == nskType:
|
||||
result.name = result.name.getType()
|
||||
result.info = gState.getLineInfo(node)
|
||||
|
||||
proc getCurrentHeader*(fullpath: string): string =
|
||||
("header" & fullpath.splitFile().name.multiReplace([(".", ""), ("-", "")]))
|
||||
|
||||
proc getPreprocessor*(gState: State, fullpath: string): string =
|
||||
var
|
||||
cmts = if gState.nocomments: "" else: "-CC"
|
||||
cmd = &"""{getCompiler()} -E {cmts} -dD {getGccModeArg(gState.mode)} -w """
|
||||
|
||||
rdata: seq[string] = @[]
|
||||
start = false
|
||||
sfile = fullpath.sanitizePath(noQuote = true)
|
||||
|
||||
for inc in gState.includeDirs:
|
||||
cmd &= &"-I{inc.sanitizePath} "
|
||||
|
||||
for def in gState.defines:
|
||||
cmd &= &"-D{def} "
|
||||
|
||||
# Remove gcc special calls
|
||||
if defined(posix):
|
||||
cmd &= "-D__attribute__\\(x\\)= "
|
||||
else:
|
||||
cmd &= "-D__attribute__(x)= "
|
||||
|
||||
cmd &= "-D__restrict= -D__extension__= "
|
||||
|
||||
cmd &= &"{fullpath.sanitizePath}"
|
||||
|
||||
# Include content only from file
|
||||
for line in execAction(cmd).output.splitLines():
|
||||
# We want to keep blank lines here for comment processing
|
||||
if line.len > 1 and line[0 .. 1] == "# ":
|
||||
start = false
|
||||
let
|
||||
saniLine = line.sanitizePath(noQuote = true)
|
||||
if sfile in saniLine:
|
||||
start = true
|
||||
elif not ("\\" in line) and not ("/" in line) and extractFilename(sfile) in line:
|
||||
start = true
|
||||
elif gState.recurse:
|
||||
let
|
||||
pDir = sfile.expandFilename().parentDir().sanitizePath(noQuote = true)
|
||||
if pDir.Bl or pDir in saniLine:
|
||||
start = true
|
||||
else:
|
||||
for inc in gState.includeDirs:
|
||||
if inc.absolutePath().sanitizePath(noQuote = true) in saniLine:
|
||||
start = true
|
||||
break
|
||||
else:
|
||||
if start:
|
||||
if "#undef" in line:
|
||||
continue
|
||||
rdata.add line
|
||||
return rdata.join("\n")
|
||||
|
||||
converter toString*(kind: Kind): string =
|
||||
return case kind:
|
||||
of exactlyOne:
|
||||
""
|
||||
of oneOrMore:
|
||||
"+"
|
||||
of zeroOrMore:
|
||||
"*"
|
||||
of zeroOrOne:
|
||||
"?"
|
||||
of orWithNext:
|
||||
"!"
|
||||
|
||||
converter toKind*(kind: string): Kind =
|
||||
return case kind:
|
||||
of "+":
|
||||
oneOrMore
|
||||
of "*":
|
||||
zeroOrMore
|
||||
of "?":
|
||||
zeroOrOne
|
||||
of "!":
|
||||
orWithNext
|
||||
else:
|
||||
exactlyOne
|
||||
|
||||
proc getNameKind*(name: string): tuple[name: string, kind: Kind, recursive: bool] =
|
||||
if name[0] == '^':
|
||||
result.recursive = true
|
||||
result.name = name[1 .. ^1]
|
||||
else:
|
||||
result.name = name
|
||||
result.kind = $name[^1]
|
||||
|
||||
if result.kind != exactlyOne:
|
||||
result.name = result.name[0 .. ^2]
|
||||
|
||||
proc getCommentsStr*(gState: State, commentNodes: seq[TSNode]): string =
|
||||
## Generate a comment from a set of comment nodes. Comment is guaranteed
|
||||
## to be able to be rendered using nim doc
|
||||
if commentNodes.len > 0:
|
||||
result = "::"
|
||||
for commentNode in commentNodes:
|
||||
result &= "\n " & gState.getNodeVal(commentNode).strip()
|
||||
|
||||
result = result.replace(re" *(//|/\*\*|\*\*/|/\*|\*/|\*)", "")
|
||||
result = result.multiReplace([("\n", "\n "), ("`", "")]).strip()
|
||||
|
||||
proc getCommentNodes*(gState: State, node: TSNode, maxSearch=1): seq[TSNode] =
|
||||
## Get a set of comment nodes in order of priority. Will search up to ``maxSearch``
|
||||
## nodes before and after the current node
|
||||
##
|
||||
## Priority is (closest line number) > comment before > comment after.
|
||||
## This priority might need to be changed based on the project, but
|
||||
## for now it is good enough
|
||||
|
||||
# Skip this if we don't want comments
|
||||
if gState.nocomments:
|
||||
return
|
||||
|
||||
let (line, _) = gState.getLineCol(node)
|
||||
|
||||
# Keep track of both directions from a node
|
||||
var
|
||||
prevSibling = node.tsNodePrevNamedSibling()
|
||||
nextSibling = node.tsNodeNextNamedSibling()
|
||||
nilNode: TSNode
|
||||
|
||||
var
|
||||
i = 0
|
||||
prevSiblingDistance, nextSiblingDistance: int = int.high
|
||||
lowestDistance: int
|
||||
commentsFound = false
|
||||
|
||||
while not commentsFound and i < maxSearch:
|
||||
# Distance from the current node will tell us approximately if the
|
||||
# comment belongs to the node. The closer it is in terms of line
|
||||
# numbers, the more we can be sure it's the comment we want
|
||||
if not prevSibling.isNil:
|
||||
if prevSibling.getName() == "comment":
|
||||
prevSiblingDistance = abs(gState.getEndLineCol(prevSibling)[0] - line)
|
||||
else:
|
||||
prevSiblingDistance = int.high
|
||||
if not nextSibling.isNil:
|
||||
if nextSibling.getName() == "comment":
|
||||
nextSiblingDistance = abs(gState.getLineCol(nextSibling)[0] - line)
|
||||
else:
|
||||
nextSiblingDistance = int.high
|
||||
|
||||
lowestDistance = min(prevSiblingDistance, nextSiblingDistance)
|
||||
|
||||
if prevSiblingDistance > maxSearch:
|
||||
# If the line is out of range, skip searching
|
||||
prevSibling = nilNode # Can't do `= nil`
|
||||
|
||||
if nextSiblingDistance > maxSearch:
|
||||
# If the line is out of range, skip searching
|
||||
nextSibling = nilNode
|
||||
|
||||
# Search above the current line for comments. When one is found
|
||||
# keep going to retrieve successive comments for cases with multiple
|
||||
# `//` style comments
|
||||
while (
|
||||
not prevSibling.isNil and
|
||||
prevSibling.getName() == "comment" and
|
||||
prevSiblingDistance == lowestDistance
|
||||
):
|
||||
# Put the previous nodes in reverse order so the comments
|
||||
# make logical sense
|
||||
result.insert(prevSibling, 0)
|
||||
prevSibling = prevSibling.tsNodePrevNamedSibling()
|
||||
commentsFound = true
|
||||
|
||||
# If we've already found comments above the current line, quit
|
||||
if commentsFound:
|
||||
break
|
||||
|
||||
# Search below or at the current line for comments. When one is found
|
||||
# keep going to retrieve successive comments for cases with multiple
|
||||
# `//` style comments
|
||||
while (
|
||||
not nextSibling.isNil and
|
||||
nextSibling.getName() == "comment" and
|
||||
nextSiblingDistance == lowestDistance
|
||||
):
|
||||
result.add(nextSibling)
|
||||
nextSibling = nextSibling.tsNodeNextNamedSibling()
|
||||
commentsFound = true
|
||||
|
||||
if commentsFound:
|
||||
break
|
||||
|
||||
# Go to next sibling pair
|
||||
if not prevSibling.isNil:
|
||||
prevSibling = prevSibling.tsNodePrevNamedSibling()
|
||||
if not nextSibling.isNil:
|
||||
nextSibling = nextSibling.tsNodeNextNamedSibling()
|
||||
|
||||
i += 1
|
||||
|
||||
proc getTSNodeNamedChildNames*(node: TSNode): seq[string] =
|
||||
if node.tsNodeNamedChildCount() != 0:
|
||||
for i in 0 .. node.tsNodeNamedChildCount()-1:
|
||||
let
|
||||
name = $node.tsNodeNamedChild(i).tsNodeType()
|
||||
|
||||
if name != "comment":
|
||||
result.add(name)
|
||||
|
||||
proc getRegexForAstChildren*(ast: ref Ast): string =
|
||||
result = "^"
|
||||
for i in 0 .. ast.children.len-1:
|
||||
let
|
||||
kind: string = ast.children[i].kind
|
||||
begin = if result[^1] == '|': "" else: "(?:"
|
||||
case kind:
|
||||
of "!":
|
||||
result &= &"{begin}{ast.children[i].name}|"
|
||||
else:
|
||||
result &= &"{begin}{ast.children[i].name}){kind}"
|
||||
result &= "$"
|
||||
|
||||
proc getAstChildByName*(ast: ref Ast, name: string): ref Ast =
|
||||
for i in 0 .. ast.children.len-1:
|
||||
if name in ast.children[i].name.split("|"):
|
||||
return ast.children[i]
|
||||
|
||||
if ast.children.len == 1 and ast.children[0].name == ".":
|
||||
return ast.children[0]
|
||||
|
||||
proc getNimExpression*(gState: State, expr: string, name = ""): string =
|
||||
# Convert C/C++ expression into Nim - cast identifiers to `name` if specified
|
||||
var
|
||||
clean = expr.multiReplace([("\n", " "), ("\r", "")])
|
||||
ident = ""
|
||||
gen = ""
|
||||
hex = false
|
||||
|
||||
for i in 0 .. clean.len:
|
||||
if i != clean.len:
|
||||
if clean[i] in IdentChars:
|
||||
if clean[i] in Digits and ident.Bl:
|
||||
# Identifiers cannot start with digits
|
||||
gen = $clean[i]
|
||||
elif clean[i] in HexDigits and hex == true:
|
||||
# Part of a hex number
|
||||
gen = $clean[i]
|
||||
elif i > 0 and i < clean.len-1 and clean[i] in ['x', 'X'] and
|
||||
clean[i-1] == '0' and clean[i+1] in HexDigits:
|
||||
# Found a hex number
|
||||
gen = $clean[i]
|
||||
hex = true
|
||||
else:
|
||||
# Part of an identifier
|
||||
ident &= clean[i]
|
||||
hex = false
|
||||
else:
|
||||
gen = (block:
|
||||
if (i == 0 or clean[i-1] != '\'') or
|
||||
(i == clean.len - 1 or clean[i+1] != '\''):
|
||||
# If unquoted, convert logical ops to Nim
|
||||
case clean[i]
|
||||
of '^': " xor "
|
||||
of '&': " and "
|
||||
of '|': " or "
|
||||
of '~': " not "
|
||||
else: $clean[i]
|
||||
else:
|
||||
$clean[i]
|
||||
)
|
||||
hex = false
|
||||
|
||||
if i == clean.len or gen.nBl:
|
||||
# Process identifier
|
||||
if ident.nBl:
|
||||
# Issue #178
|
||||
if ident != "_":
|
||||
ident = gState.getIdentifier(ident, nskConst, name)
|
||||
if name.nBl and ident in gState.constIdentifiers:
|
||||
ident = ident & "." & name
|
||||
result &= ident
|
||||
ident = ""
|
||||
result &= gen
|
||||
gen = ""
|
||||
|
||||
# Convert shift ops to Nim
|
||||
result = result.multiReplace([
|
||||
("<<", " shl "), (">>", " shr ")
|
||||
])
|
||||
|
||||
proc getSplitComma*(joined: seq[string]): seq[string] =
|
||||
for i in joined:
|
||||
result = result.concat(i.split(","))
|
||||
|
||||
template isIncludeHeader*(gState: State): bool =
|
||||
gState.dynlib.Bl and gState.includeHeader
|
||||
|
||||
proc getComments*(gState: State, strip = false): string =
|
||||
if not gState.nocomments and gState.commentStr.nBl:
|
||||
result = "\n" & gState.commentStr
|
||||
if strip:
|
||||
result = result.replace("\n ", "\n")
|
||||
gState.commentStr = ""
|
||||
|
||||
proc dll*(path: string): string =
|
||||
let
|
||||
(dir, name, _) = path.splitFile()
|
||||
|
||||
result = dir / (DynlibFormat % name)
|
||||
|
||||
proc loadPlugin*(gState: State, sourcePath: string) =
|
||||
doAssert fileExists(sourcePath), "Plugin file does not exist: " & sourcePath
|
||||
|
||||
let
|
||||
pdll = sourcePath.dll
|
||||
if not fileExists(pdll) or
|
||||
sourcePath.getLastModificationTime() > pdll.getLastModificationTime():
|
||||
let
|
||||
# Get Nim configuration flags if not already specified in a .cfg file
|
||||
flags =
|
||||
if fileExists(sourcePath & ".cfg"): ""
|
||||
else: getNimConfigFlags(getCurrentDir())
|
||||
|
||||
# Always set output to same directory as source, prevents override
|
||||
outflags = &"--out:\"{pdll}\""
|
||||
|
||||
# Compile plugin as library with `markAndSweep` GC
|
||||
cmd = &"{gState.nim.sanitizePath} c --app:lib --gc:markAndSweep {flags} {outflags} {sourcePath.sanitizePath}"
|
||||
|
||||
discard execAction(cmd)
|
||||
doAssert fileExists(pdll), "No plugin binary generated for " & sourcePath
|
||||
|
||||
let lib = loadLib(pdll)
|
||||
doAssert lib != nil, "Plugin $1 compiled to $2 failed to load" % [sourcePath, pdll]
|
||||
|
||||
gState.onSymbol = cast[OnSymbol](lib.symAddr("onSymbol"))
|
||||
|
||||
gState.onSymbolOverride = cast[OnSymbol](lib.symAddr("onSymbolOverride"))
|
||||
|
||||
gState.onSymbolOverrideFinal = cast[OnSymbolOverrideFinal](lib.symAddr("onSymbolOverrideFinal"))
|
||||
|
||||
proc expandSymlinkAbs*(path: string): string =
|
||||
try:
|
||||
result = path.expandFilename().normalizedPath()
|
||||
except:
|
||||
result = path
|
||||
1
nimterop/git.nim
Normal file
1
nimterop/git.nim
Normal file
|
|
@ -0,0 +1 @@
|
|||
include build
|
||||
|
|
@ -1,167 +1,129 @@
|
|||
import strutils, tables
|
||||
import sequtils, sets, tables, strutils
|
||||
|
||||
import regex
|
||||
|
||||
import "."/plugin
|
||||
|
||||
when defined(TOAST):
|
||||
import sets, sequtils, strutils
|
||||
|
||||
import "."/plugin
|
||||
|
||||
import compiler/[ast, idents, modulegraphs, options]
|
||||
|
||||
# import "."/treesitter/api
|
||||
import "."/treesitter/api
|
||||
|
||||
const
|
||||
gAtoms* {.used.} = @[
|
||||
"field_identifier",
|
||||
"identifier",
|
||||
"number_literal",
|
||||
"char_literal",
|
||||
"preproc_arg",
|
||||
"primitive_type",
|
||||
"sized_type_specifier",
|
||||
"type_identifier"
|
||||
].toHashSet()
|
||||
|
||||
gExpressions* {.used.} = @[
|
||||
"parenthesized_expression",
|
||||
"bitwise_expression",
|
||||
"shift_expression",
|
||||
"math_expression",
|
||||
"escape_sequence"
|
||||
].toHashSet()
|
||||
|
||||
gEnumVals* {.used.} = @[
|
||||
"identifier",
|
||||
"number_literal",
|
||||
"char_literal"
|
||||
].concat(toSeq(gExpressions.items))
|
||||
|
||||
type
|
||||
Feature* = enum
|
||||
ast2
|
||||
Kind* = enum
|
||||
exactlyOne
|
||||
oneOrMore # +
|
||||
zeroOrMore # *
|
||||
zeroOrOne # ?
|
||||
orWithNext # !
|
||||
|
||||
Ast* = object
|
||||
name*: string
|
||||
kind*: Kind
|
||||
recursive*: bool
|
||||
children*: seq[ref Ast]
|
||||
when defined(TOAST):
|
||||
tonim*: proc (ast: ref Ast, node: TSNode, gState: State)
|
||||
regex*: Regex
|
||||
|
||||
AstTable* {.used.} = TableRef[string, seq[ref Ast]]
|
||||
|
||||
State* = ref object
|
||||
# Command line arguments to toast - some forwarded from cimport.nim
|
||||
compile*: seq[string] # `--compile` to create `{.compile.}` entries in generated wrapper
|
||||
convention*: string # `--convention | -C` to change calling convention from cdecl default
|
||||
debug*: bool # `cDebug()` or `--debug | -d` to enable debug mode
|
||||
defines*: seq[string] # Symbols added by `cDefine()` and `--define | -D` for C/C++ preprocessor/compiler
|
||||
dynlib*: string # `cImport(dynlib)` or `--dynlib | -l` to specify variable containing library name
|
||||
exclude*: seq[string] # files or directories to exclude from the wrapped output
|
||||
feature*: seq[Feature] # `--feature | -f` feature flags enabled
|
||||
includeDirs*: seq[string] # Paths added by `cIncludeDir()` and `--includeDirs | -I` for C/C++ preprocessor/compiler
|
||||
mode*: string # `cImport(mode)` or `--mode | -m` to override detected compiler mode - c or cpp
|
||||
nim*: string # `--nim` to specify full path to Nim compiler
|
||||
noComments*: bool # `--noComments | -c` to disable rendering comments in wrappers
|
||||
noHeader*: bool # `--noHeader | -H` to skip {.header.} pragma in wrapper
|
||||
passC*: seq[string] # `--passC` to create `{.passC.}` entries in the generated wrapper
|
||||
passL*: seq[string] # `--passL` to create `{.passL.}` entries in the generated wrapper
|
||||
past*: bool # `--past | -a` to print tree-sitter AST of code
|
||||
pluginSourcePath*: string # `--pluginSourcePath` specified path to plugin file to compile and load
|
||||
pnim*: bool # `--pnim | -n` to render Nim wrapper for header
|
||||
preprocess*: bool # `--preprocess | -p` to enable preprocessing of code before wrapping
|
||||
prefix*: seq[string] # `--prefix` strings to strip from start of identifiers
|
||||
recurse*: bool # `--recurse | -r` to recurse into #include files in headers specified
|
||||
compile*, defines*, headers*, includeDirs*, searchDirs*, prefix*, suffix*, symOverride*: seq[string]
|
||||
|
||||
debug*, includeHeader*, nocache*, nocomments*, past*, preprocess*, pnim*, recurse*: bool
|
||||
|
||||
code*, convention*, dynlib*, mode*, nim*, overrides*, pluginSource*, pluginSourcePath*: string
|
||||
|
||||
replace*: OrderedTableRef[string, string]
|
||||
# `--replace | -G` replacement rules for identifiers
|
||||
suffix*: seq[string] # `--suffix` strings to strip from end of identifiers
|
||||
symOverride*: seq[string] # `cSkipSymbol()`, `cOverride()` and `--symOverride | -O` symbols to skip during wrapping
|
||||
typeMap*: TableRef[string, string]
|
||||
# `--typeMap | -T` to map instances of type X to Y - e.g. ABC=cint
|
||||
|
||||
feature*: seq[Feature]
|
||||
|
||||
onSymbol*, onSymbolOverride*: OnSymbol
|
||||
onSymbolOverrideFinal*: OnSymbolOverrideFinal
|
||||
|
||||
outputHandle*: File
|
||||
|
||||
# All symbols that have been declared so far indexed by nimName
|
||||
identifiers*: TableRef[string, string]
|
||||
|
||||
# All const names for enum casting
|
||||
constIdentifiers*: HashSet[string]
|
||||
|
||||
# All symbols that have been skipped due to
|
||||
# being unwrappable or the user provided
|
||||
# override is blank
|
||||
skippedSyms*: HashSet[string]
|
||||
|
||||
# Legacy ast fields, remove when ast2 becomes default
|
||||
constStr*, enumStr*, procStr*, typeStr*: string
|
||||
|
||||
commentStr*, debugStr*, skipStr*: string
|
||||
|
||||
# Nim compiler objects
|
||||
when defined(TOAST):
|
||||
# Data fields
|
||||
code*: string # Contents of header file currently being processed
|
||||
currentHeader*: string # Const name of header being currently processed
|
||||
impShort*: string # Short base name for pragma in output
|
||||
outputHandle*: File # `--output | -o` open file handle
|
||||
sourceFile*: string # Full path of header being currently processed
|
||||
|
||||
# Plugin callbacks
|
||||
onSymbol*, onSymbolOverride*: OnSymbol
|
||||
onSymbolOverrideFinal*: OnSymbolOverrideFinal
|
||||
|
||||
# Symbol tables
|
||||
constIdentifiers*: HashSet[string] # Const names for enum casting
|
||||
identifiers*: TableRef[string, string] # Symbols that have been declared so far indexed by nimName
|
||||
skippedSyms*: HashSet[string] # Symbols that have been skipped due to being unwrappable or
|
||||
# the user provided override is blank
|
||||
headersProcessed*: HashSet[string] # Headers already processed directly or recursively
|
||||
|
||||
# Nim compiler objects
|
||||
constSection*, enumSection*, pragmaSection*, procSection*, typeSection*, varSection*: PNode
|
||||
identCache*: IdentCache
|
||||
config*: ConfigRef
|
||||
graph*: ModuleGraph
|
||||
|
||||
# Table of symbols to generated AST PNode - used to implement forward declarations
|
||||
# Craeted symbols to generated AST - forward declaration tracking
|
||||
identifierNodes*: TableRef[string, PNode]
|
||||
|
||||
# Used for the exprparser.nim module
|
||||
currentExpr*, currentTyCastName*: string
|
||||
# Controls whether or not the current expression
|
||||
# should validate idents against currently defined idents
|
||||
skipIdentValidation*: bool
|
||||
currentHeader*, impShort*, sourceFile*: string
|
||||
|
||||
# Top level header for wrapper output - include imported types, pragmas and other info
|
||||
wrapperHeader*: string
|
||||
else:
|
||||
# cimport.nim specific
|
||||
compcache*: seq[string] # `cCompile()` list of files already processed
|
||||
nocache*: bool # `cDisableCaching()` to disable caching of artifacts
|
||||
overrides*: string # `cOverride()` code which gets added to `cPlugin()` output
|
||||
pluginSource*: string # `cPlugin()` generated code to write to plugin file from
|
||||
searchDirs*: seq[string] # `cSearchPath()` added directories for header search
|
||||
# Used for the exprparser.nim module
|
||||
currentExpr*, currentTyCastName*: string
|
||||
|
||||
BuildType* = enum
|
||||
btAutoconf, btCmake
|
||||
data*: seq[tuple[name, val: string]]
|
||||
|
||||
BuildStatus* = object
|
||||
built*: bool
|
||||
buildPath*: string
|
||||
error*: string
|
||||
nodeBranch*: seq[string]
|
||||
|
||||
when nimvm:
|
||||
var
|
||||
gStateCT* {.compileTime, used.} = new(State)
|
||||
else:
|
||||
var
|
||||
gState*: State
|
||||
Feature* = enum
|
||||
ast1, ast2
|
||||
|
||||
when defined(TOAST):
|
||||
const
|
||||
gAtoms* {.used.} = @[
|
||||
"field_identifier",
|
||||
"identifier",
|
||||
"number_literal",
|
||||
"char_literal",
|
||||
"preproc_arg",
|
||||
"primitive_type",
|
||||
"sized_type_specifier",
|
||||
"type_identifier"
|
||||
].toHashSet()
|
||||
|
||||
gExpressions* {.used.} = @[
|
||||
"parenthesized_expression",
|
||||
"bitwise_expression",
|
||||
"shift_expression",
|
||||
"math_expression",
|
||||
"escape_sequence",
|
||||
"binary_expression",
|
||||
"unary_expression"
|
||||
].toHashSet()
|
||||
|
||||
gEnumVals* {.used.} = @[
|
||||
"identifier",
|
||||
"number_literal",
|
||||
"char_literal"
|
||||
].concat(toSeq(gExpressions.items))
|
||||
|
||||
type
|
||||
Status* = enum
|
||||
success, unknown, error
|
||||
|
||||
template getCommented*(str: string): string =
|
||||
"\n# " & str.strip().replace("\n", "\n# ")
|
||||
|
||||
# Redirect output to file when required
|
||||
template gecho*(args: string) =
|
||||
when defined(TOAST):
|
||||
when nimvm:
|
||||
echo args
|
||||
else:
|
||||
if gState.outputHandle.isNil:
|
||||
echo args
|
||||
else:
|
||||
gState.outputHandle.writeLine(args)
|
||||
else:
|
||||
echo args
|
||||
|
||||
template decho*(args: varargs[string, `$`]): untyped =
|
||||
let
|
||||
str = join(args, "")
|
||||
when defined(TOAST):
|
||||
if gState.debug:
|
||||
gecho str.getCommented()
|
||||
else:
|
||||
if gStateCT.debug:
|
||||
echo str.getCommented()
|
||||
var
|
||||
gStateCT* {.compiletime, used.} = new(State)
|
||||
|
||||
template nBl*(s: typed): untyped {.used.} =
|
||||
(s.len != 0)
|
||||
|
||||
template Bl*(s: typed): untyped {.used.} =
|
||||
(s.len == 0)
|
||||
(s.len == 0)
|
||||
|
||||
# Redirect output to file when required
|
||||
template gecho*(args: string) =
|
||||
if gState.outputHandle.isNil:
|
||||
stdout.writeLine(args)
|
||||
else:
|
||||
gState.outputHandle.writeLine(args)
|
||||
|
||||
template decho*(args: varargs[string, `$`]): untyped =
|
||||
if gState.debug:
|
||||
gecho join(args, "").getCommented()
|
||||
767
nimterop/grammar.nim
Normal file
767
nimterop/grammar.nim
Normal file
|
|
@ -0,0 +1,767 @@
|
|||
import macros, strformat, strutils, tables
|
||||
|
||||
import regex
|
||||
|
||||
import "."/[ast, getters, globals, lisp, treesitter/api]
|
||||
|
||||
type
|
||||
Grammar = seq[tuple[grammar: string, call: proc(ast: ref Ast, node: TSNode, gState: State) {.nimcall.}]]
|
||||
|
||||
proc getPtrType(str: string): string =
|
||||
result = case str:
|
||||
of "ptr cchar":
|
||||
"cstring"
|
||||
of "ptr ptr cchar":
|
||||
"ptr cstring"
|
||||
of "ptr object":
|
||||
"pointer"
|
||||
of "ptr ptr object":
|
||||
"ptr pointer"
|
||||
of "ptr FILE":
|
||||
"File"
|
||||
else:
|
||||
str
|
||||
|
||||
proc getLit(str: string): string =
|
||||
# Used to convert #define literals into const
|
||||
let
|
||||
str = str.replace(re"/[/*].*?(?:\*/)?$", "").strip()
|
||||
|
||||
if str.contains(re"^[\-]?[\d]*[.]?[\d]+$") or # decimal
|
||||
str.contains(re"^0x[\da-fA-F]+$") or # hexadecimal
|
||||
str.contains(re"^'[[:ascii:]]'$") or # char
|
||||
str.contains(re"""^"[[:ascii:]]+"$"""): # char *
|
||||
return str
|
||||
|
||||
proc initGrammar(): Grammar =
|
||||
# #define X Y
|
||||
result.add(("""
|
||||
(preproc_def
|
||||
(identifier)
|
||||
(preproc_arg)
|
||||
)
|
||||
""",
|
||||
proc (ast: ref Ast, node: TSNode, gState: State) =
|
||||
if gState.debug:
|
||||
gState.debugStr &= "\n# define X Y"
|
||||
|
||||
let
|
||||
name = gState.data[0].val
|
||||
nname = gState.getIdentifier(name, nskConst)
|
||||
val = gState.data[1].val.getLit()
|
||||
|
||||
if not nname.nBl:
|
||||
let
|
||||
override = gState.getOverride(name, nskConst)
|
||||
if override.nBl:
|
||||
gState.constStr &= &"{gState.getComments()}\n{override}"
|
||||
else:
|
||||
gState.constStr &= &"{gState.getComments()}\n # Const '{name}' skipped"
|
||||
if gState.debug:
|
||||
gState.skipStr &= &"\n{gState.getNodeVal(node)}"
|
||||
elif val.nBl and gState.addNewIdentifer(nname):
|
||||
gState.constStr &= &"{gState.getComments()}\n {nname}* = {val}"
|
||||
))
|
||||
|
||||
let
|
||||
typeGrammar = """
|
||||
(type_qualifier?)
|
||||
(primitive_type|type_identifier?)
|
||||
(type_qualifier?)
|
||||
(sized_type_specifier?
|
||||
(primitive_type?)
|
||||
)
|
||||
(struct_specifier|union_specifier|enum_specifier?
|
||||
(type_identifier)
|
||||
)
|
||||
"""
|
||||
|
||||
arrGrammar = &"""
|
||||
(array_declarator!
|
||||
(pointer_declarator!
|
||||
(pointer_declarator!
|
||||
(type_identifier)
|
||||
)
|
||||
(type_identifier)
|
||||
)
|
||||
(type_identifier|identifier)
|
||||
(identifier|number_literal)
|
||||
)
|
||||
"""
|
||||
|
||||
paramListGrammar = &"""
|
||||
(parameter_list
|
||||
(parameter_declaration*
|
||||
{typeGrammar}
|
||||
(identifier|type_identifier?)
|
||||
(pointer_declarator?
|
||||
(type_qualifier?)
|
||||
(pointer_declarator!
|
||||
(type_qualifier?)
|
||||
{arrGrammar}
|
||||
(identifier|type_identifier)
|
||||
)
|
||||
{arrGrammar}
|
||||
(identifier|type_identifier)
|
||||
)
|
||||
{arrGrammar}
|
||||
(abstract_pointer_declarator?
|
||||
(abstract_pointer_declarator?)
|
||||
)
|
||||
)
|
||||
)
|
||||
"""
|
||||
|
||||
funcGrammar = &"""
|
||||
(function_declarator*
|
||||
(identifier|type_identifier!)
|
||||
(pointer_declarator
|
||||
(pointer_declarator!
|
||||
(type_identifier)
|
||||
)
|
||||
(type_identifier|identifier)
|
||||
)
|
||||
{paramListGrammar}
|
||||
(noexcept|throw_specifier?)
|
||||
)
|
||||
"""
|
||||
|
||||
template funcParamCommon(fname, pname, ptyp, pptr, pout, count, i, flen: untyped): untyped =
|
||||
ptyp = gState.getIdentifier(gState.data[i].val, nskType, fname).getType()
|
||||
|
||||
pptr = ""
|
||||
while i+1 < gState.data.len and gState.data[i+1].name == "pointer_declarator":
|
||||
pptr &= "ptr "
|
||||
i += 1
|
||||
|
||||
if i+1 < gState.data.len and gState.data[i+1].name == "identifier":
|
||||
pname = gState.getIdentifier(gState.data[i+1].val, nskParam, fname)
|
||||
i += 2
|
||||
else:
|
||||
pname = "a" & $count
|
||||
count += 1
|
||||
i += 1
|
||||
|
||||
if i < gState.data.len and gState.data[i].name in ["identifier", "number_literal"]:
|
||||
flen = gState.data[i].val
|
||||
if gState.data[i].name == "identifier":
|
||||
flen = gState.getIdentifier(flen, nskConst, fname)
|
||||
|
||||
pout &= &"{pname}: array[{flen}, {getPtrType(pptr&ptyp)}], "
|
||||
i += 1
|
||||
elif pptr.nBl or ptyp != "object":
|
||||
pout &= &"{pname}: {getPtrType(pptr&ptyp)}, "
|
||||
|
||||
# typedef int X
|
||||
# typedef X Y
|
||||
# typedef struct X Y
|
||||
# typedef ?* Y
|
||||
result.add((&"""
|
||||
(type_definition
|
||||
{typeGrammar}
|
||||
(type_identifier!)
|
||||
{arrGrammar}
|
||||
(pointer_declarator!
|
||||
(pointer_declarator!
|
||||
(type_identifier!)
|
||||
{arrGrammar}
|
||||
{funcGrammar}
|
||||
)
|
||||
(type_identifier!)
|
||||
{arrGrammar}
|
||||
{funcGrammar}
|
||||
)
|
||||
{funcGrammar}
|
||||
)
|
||||
""",
|
||||
proc (ast: ref Ast, node: TSNode, gState: State) =
|
||||
if gState.debug:
|
||||
gState.debugStr &= "\n# typedef X Y"
|
||||
|
||||
var
|
||||
i = 0
|
||||
typ = gState.getIdentifier(gState.data[i].val, nskType, "Parent").getType()
|
||||
name = ""
|
||||
nname = ""
|
||||
tptr = ""
|
||||
aptr = ""
|
||||
pragmas: seq[string] = @[]
|
||||
|
||||
i += 1
|
||||
while i < gState.data.len and "pointer" in gState.data[i].name:
|
||||
case gState.data[i].name:
|
||||
of "pointer_declarator":
|
||||
tptr &= "ptr "
|
||||
i += 1
|
||||
of "array_pointer_declarator":
|
||||
aptr &= "ptr "
|
||||
i += 1
|
||||
|
||||
if i < gState.data.len:
|
||||
name = gState.data[i].val
|
||||
nname = gState.getIdentifier(name, nskType)
|
||||
i += 1
|
||||
|
||||
if gState.isIncludeHeader():
|
||||
pragmas.add gState.getImportC(name, nname)
|
||||
|
||||
let
|
||||
pragma = gState.getPragma(pragmas)
|
||||
|
||||
if not nname.nBl:
|
||||
let
|
||||
override = gState.getOverride(name, nskType)
|
||||
if override.nBl:
|
||||
gState.typeStr &= &"{gState.getComments()}\n{override}"
|
||||
elif nname notin gTypeMap and typ.nBl and gState.addNewIdentifer(nname):
|
||||
if i < gState.data.len and gState.data[^1].name == "function_declarator":
|
||||
var
|
||||
fname = nname
|
||||
pout, pname, ptyp, pptr = ""
|
||||
count = 1
|
||||
flen = ""
|
||||
|
||||
while i < gState.data.len:
|
||||
if gState.data[i].name == "function_declarator":
|
||||
break
|
||||
|
||||
funcParamCommon(fname, pname, ptyp, pptr, pout, count, i, flen)
|
||||
|
||||
if pout.nBl and pout[^2 .. ^1] == ", ":
|
||||
pout = pout[0 .. ^3]
|
||||
|
||||
if tptr.nBl or typ != "object":
|
||||
gState.typeStr &= &"{gState.getComments()}\n {nname}*{pragma} = proc({pout}): {getPtrType(tptr&typ)} {{.cdecl.}}"
|
||||
else:
|
||||
gState.typeStr &= &"{gState.getComments()}\n {nname}*{pragma} = proc({pout}) {{.cdecl.}}"
|
||||
else:
|
||||
if i < gState.data.len and gState.data[i].name in ["identifier", "number_literal"]:
|
||||
var
|
||||
flen = gState.data[i].val
|
||||
if gState.data[i].name == "identifier":
|
||||
flen = gState.getIdentifier(flen, nskConst, nname)
|
||||
|
||||
gState.typeStr &= &"{gState.getComments()}\n {nname}*{pragma} = {aptr}array[{flen}, {getPtrType(tptr&typ)}]"
|
||||
else:
|
||||
if nname == typ:
|
||||
pragmas.add "incompleteStruct"
|
||||
let
|
||||
pragma = gState.getPragma(pragmas)
|
||||
gState.typeStr &= &"{gState.getComments()}\n {nname}*{pragma} = object"
|
||||
else:
|
||||
gState.typeStr &= &"{gState.getComments()}\n {nname}*{pragma} = {getPtrType(tptr&typ)}"
|
||||
))
|
||||
|
||||
proc pDupTypeCommon(nname: string, fend: int, gState: State, isEnum=false) =
|
||||
if gState.debug:
|
||||
gState.debugStr &= "\n# pDupTypeCommon()"
|
||||
|
||||
var
|
||||
dname = gState.data[^1].val
|
||||
ndname = gState.getIdentifier(dname, nskType)
|
||||
dptr =
|
||||
if fend == 2:
|
||||
"ptr "
|
||||
else:
|
||||
""
|
||||
|
||||
if ndname.nBl and ndname != nname:
|
||||
if isEnum:
|
||||
if gState.addNewIdentifer(ndname):
|
||||
gState.enumStr &= &"{gState.getComments(true)}\ntype {ndname}* = {dptr}{nname}"
|
||||
else:
|
||||
if gState.addNewIdentifer(ndname):
|
||||
let
|
||||
pragma = gState.getPragma(gState.getImportc(dname, ndname), "bycopy")
|
||||
gState.typeStr &=
|
||||
&"{gState.getComments()}\n {ndname}*{pragma} = {dptr}{nname}"
|
||||
|
||||
proc pStructCommon(ast: ref Ast, node: TSNode, name: string, fstart, fend: int, gState: State) =
|
||||
if gState.debug:
|
||||
gState.debugStr &= "\n# pStructCommon"
|
||||
|
||||
var
|
||||
nname = gState.getIdentifier(name, nskType)
|
||||
prefix = ""
|
||||
union = ""
|
||||
|
||||
case $node.tsNodeType():
|
||||
of "struct_specifier":
|
||||
prefix = "struct "
|
||||
of "union_specifier":
|
||||
prefix = "union "
|
||||
union = ", union"
|
||||
of "type_definition":
|
||||
if node.getTSNodeNamedChildCountSansComments() != 0:
|
||||
for i in 0 .. node.tsNodeNamedChildCount()-1:
|
||||
let
|
||||
nchild = $node.tsNodeNamedChild(i).tsNodeType()
|
||||
if nchild != "comment":
|
||||
case nchild:
|
||||
of "struct_specifier":
|
||||
if fstart == 1:
|
||||
prefix = "struct "
|
||||
of "union_specifier":
|
||||
if fstart == 1:
|
||||
prefix = "union "
|
||||
union = ", union"
|
||||
break
|
||||
|
||||
if not nname.nBl:
|
||||
let
|
||||
override = gState.getOverride(name, nskType)
|
||||
if override.nBl:
|
||||
gState.typeStr &= &"{gState.getComments()}\n{override}"
|
||||
elif gState.addNewIdentifer(nname):
|
||||
if gState.data.len == 1:
|
||||
gState.typeStr &= &"{gState.getComments()}\n {nname}* {{.bycopy{union}.}} = object"
|
||||
else:
|
||||
var
|
||||
pragmas: seq[string] = @[]
|
||||
if gState.isIncludeHeader():
|
||||
pragmas.add gState.getImportC(prefix & name, nname)
|
||||
pragmas.add "bycopy"
|
||||
if union.nBl:
|
||||
pragmas.add "union"
|
||||
|
||||
let
|
||||
pragma = gState.getPragma(pragmas)
|
||||
|
||||
gState.typeStr &= &"{gState.getComments()}\n {nname}*{pragma} = object"
|
||||
|
||||
var
|
||||
i = fstart
|
||||
ftyp, fname: string
|
||||
fptr = ""
|
||||
aptr = ""
|
||||
flen = ""
|
||||
while i < gState.data.len-fend:
|
||||
fptr = ""
|
||||
aptr = ""
|
||||
if gState.data[i].name == "field_declaration":
|
||||
i += 1
|
||||
continue
|
||||
|
||||
if gState.data[i].name notin ["field_identifier", "pointer_declarator", "array_pointer_declarator"]:
|
||||
ftyp = gState.getIdentifier(gState.data[i].val, nskType, nname).getType()
|
||||
i += 1
|
||||
|
||||
while i < gState.data.len-fend and "pointer" in gState.data[i].name:
|
||||
case gState.data[i].name:
|
||||
of "pointer_declarator":
|
||||
fptr &= "ptr "
|
||||
i += 1
|
||||
of "array_pointer_declarator":
|
||||
aptr &= "ptr "
|
||||
i += 1
|
||||
|
||||
fname = gState.getIdentifier(gState.data[i].val, nskField, nname)
|
||||
|
||||
if i+1 < gState.data.len-fend and gState.data[i+1].name in gEnumVals:
|
||||
# Struct field is an array where size is an expression
|
||||
var
|
||||
flen = gState.getNimExpression(gState.data[i+1].val)
|
||||
if "/" in flen:
|
||||
flen = &"({flen}).int"
|
||||
gState.typeStr &= &"{gState.getComments()}\n {fname}*: {aptr}array[{flen}, {getPtrType(fptr&ftyp)}]"
|
||||
i += 2
|
||||
elif i+1 < gState.data.len-fend and gState.data[i+1].name == "bitfield_clause":
|
||||
let
|
||||
size = gState.data[i+1].val
|
||||
gState.typeStr &= &"{gState.getComments()}\n {fname}* {{.bitsize: {size}.}} : {getPtrType(fptr&ftyp)} "
|
||||
i += 2
|
||||
elif i+1 < gState.data.len-fend and gState.data[i+1].name == "function_declarator":
|
||||
var
|
||||
pout, pname, ptyp, pptr = ""
|
||||
count = 1
|
||||
|
||||
i += 2
|
||||
while i < gState.data.len-fend:
|
||||
if gState.data[i].name == "function_declarator":
|
||||
i += 1
|
||||
continue
|
||||
|
||||
if gState.data[i].name == "field_declaration":
|
||||
break
|
||||
|
||||
funcParamCommon(fname, pname, ptyp, pptr, pout, count, i, flen)
|
||||
|
||||
if pout.nBl and pout[^2 .. ^1] == ", ":
|
||||
pout = pout[0 .. ^3]
|
||||
if fptr.nBl or ftyp != "object":
|
||||
gState.typeStr &= &"{gState.getComments()}\n {fname}*: proc({pout}): {getPtrType(fptr&ftyp)} {{.cdecl.}}"
|
||||
else:
|
||||
gState.typeStr &= &"{gState.getComments()}\n {fname}*: proc({pout}) {{.cdecl.}}"
|
||||
i += 1
|
||||
else:
|
||||
if ftyp == "object":
|
||||
gState.typeStr &= &"{gState.getComments()}\n {fname}*: pointer"
|
||||
else:
|
||||
gState.typeStr &= &"{gState.getComments()}\n {fname}*: {getPtrType(fptr&ftyp)}"
|
||||
i += 1
|
||||
|
||||
if node.tsNodeType() == "type_definition" and
|
||||
gState.data[^1].name == "type_identifier" and gState.data[^1].val.nBl:
|
||||
pDupTypeCommon(nname, fend, gState, false)
|
||||
|
||||
let
|
||||
fieldGrammar = &"""
|
||||
(field_identifier!)
|
||||
(bitfield_clause!
|
||||
(number_literal)
|
||||
)
|
||||
(array_declarator!
|
||||
(field_identifier!)
|
||||
(pointer_declarator
|
||||
(pointer_declarator!
|
||||
(field_identifier)
|
||||
)
|
||||
(field_identifier)
|
||||
)
|
||||
(^$1+)
|
||||
)
|
||||
(function_declarator+
|
||||
(pointer_declarator
|
||||
(pointer_declarator!
|
||||
(field_identifier)
|
||||
)
|
||||
(field_identifier)
|
||||
)
|
||||
{paramListGrammar}
|
||||
)
|
||||
""" % gEnumVals.join("|")
|
||||
|
||||
fieldListGrammar = &"""
|
||||
(field_declaration_list?
|
||||
(field_declaration+
|
||||
{typeGrammar}
|
||||
(pointer_declarator!
|
||||
(pointer_declarator!
|
||||
{fieldGrammar}
|
||||
)
|
||||
{fieldGrammar}
|
||||
)
|
||||
{fieldGrammar}
|
||||
)
|
||||
)
|
||||
"""
|
||||
|
||||
# struct X {}
|
||||
result.add((&"""
|
||||
(struct_specifier|union_specifier
|
||||
(type_identifier)
|
||||
{fieldListGrammar}
|
||||
)
|
||||
""",
|
||||
proc (ast: ref Ast, node: TSNode, gState: State) =
|
||||
if gState.debug:
|
||||
gState.debugStr &= "\n# struct X {}"
|
||||
|
||||
pStructCommon(ast, node, gState.data[0].val, 1, 1, gState)
|
||||
))
|
||||
|
||||
# typedef struct X {}
|
||||
result.add((&"""
|
||||
(type_definition
|
||||
(struct_specifier|union_specifier
|
||||
(type_identifier?)
|
||||
{fieldListGrammar}
|
||||
)
|
||||
(type_identifier!)
|
||||
(pointer_declarator
|
||||
(pointer_declarator!
|
||||
(type_identifier)
|
||||
)
|
||||
(type_identifier)
|
||||
)
|
||||
)
|
||||
""",
|
||||
proc (ast: ref Ast, node: TSNode, gState: State) =
|
||||
if gState.debug:
|
||||
gState.debugStr &= "\n# typedef struct X {}"
|
||||
|
||||
var
|
||||
fstart = 0
|
||||
fend = 1
|
||||
|
||||
if gState.data[^2].name == "pointer_declarator":
|
||||
fend = 2
|
||||
|
||||
if gState.data.len > 1 and
|
||||
gState.data[0].name == "type_identifier" and
|
||||
gState.data[1].name notin ["field_identifier", "pointer_declarator"]:
|
||||
|
||||
fstart = 1
|
||||
pStructCommon(ast, node, gState.data[0].val, fstart, fend, gState)
|
||||
else:
|
||||
pStructCommon(ast, node, gState.data[^1].val, fstart, fend, gState)
|
||||
))
|
||||
|
||||
proc pEnumCommon(ast: ref Ast, node: TSNode, name: string, fstart, fend: int, gState: State) =
|
||||
if gState.debug:
|
||||
gState.debugStr &= "\n# pEnumCommon()"
|
||||
|
||||
let nname =
|
||||
if name.Bl:
|
||||
getUniqueIdentifier(gState, "Enum")
|
||||
else:
|
||||
gState.getIdentifier(name, nskType)
|
||||
|
||||
if nname.nBl and gState.addNewIdentifer(nname):
|
||||
gState.enumStr &= &"{gState.getComments(true)}\ndefineEnum({nname})"
|
||||
|
||||
var
|
||||
i = fstart
|
||||
count = 0
|
||||
while i < gState.data.len-fend:
|
||||
if gState.data[i].name == "enumerator":
|
||||
i += 1
|
||||
continue
|
||||
|
||||
let
|
||||
fname = gState.getIdentifier(gState.data[i].val, nskEnumField)
|
||||
|
||||
if i+1 < gState.data.len-fend and
|
||||
gState.data[i+1].name in gEnumVals:
|
||||
if fname.nBl and gState.addNewIdentifer(fname):
|
||||
gState.constStr &= &"{gState.getComments()}\n {fname}* = ({gState.getNimExpression(gState.data[i+1].val)}).{nname}"
|
||||
try:
|
||||
count = gState.data[i+1].val.parseInt() + 1
|
||||
except:
|
||||
count += 1
|
||||
i += 2
|
||||
else:
|
||||
if fname.nBl and gState.addNewIdentifer(fname):
|
||||
gState.constStr &= &"{gState.getComments()}\n {fname}* = {count}.{nname}"
|
||||
i += 1
|
||||
count += 1
|
||||
|
||||
if node.tsNodeType() == "type_definition" and
|
||||
gState.data[^1].name == "type_identifier" and gState.data[^1].val.nBl:
|
||||
pDupTypeCommon(nname, fend, gState, true)
|
||||
|
||||
# enum X {}
|
||||
result.add(("""
|
||||
(enum_specifier
|
||||
(type_identifier?)
|
||||
(enumerator_list
|
||||
(enumerator+
|
||||
(identifier?)
|
||||
(^$1+)
|
||||
)
|
||||
)
|
||||
)
|
||||
""" % gEnumVals.join("|"),
|
||||
proc (ast: ref Ast, node: TSNode, gState: State) =
|
||||
if gState.debug:
|
||||
gState.debugStr &= "\n# enum X {}"
|
||||
|
||||
var
|
||||
name = ""
|
||||
offset = 0
|
||||
|
||||
if gState.data[0].name == "type_identifier":
|
||||
name = gState.data[0].val
|
||||
offset = 1
|
||||
|
||||
pEnumCommon(ast, node, name, offset, 0, gState)
|
||||
))
|
||||
|
||||
# typedef enum {} X
|
||||
result.add((&"""
|
||||
(type_definition
|
||||
{result[^1].grammar}
|
||||
(type_identifier!)
|
||||
(pointer_declarator
|
||||
(pointer_declarator!
|
||||
(type_identifier)
|
||||
)
|
||||
(type_identifier)
|
||||
)
|
||||
)
|
||||
""",
|
||||
proc (ast: ref Ast, node: TSNode, gState: State) =
|
||||
if gState.debug:
|
||||
gState.debugStr &= "\n# typedef enum {}"
|
||||
|
||||
var
|
||||
fstart = 0
|
||||
fend = 1
|
||||
|
||||
if gState.data[^2].name == "pointer_declarator":
|
||||
fend = 2
|
||||
|
||||
if gState.data[0].name == "type_identifier":
|
||||
fstart = 1
|
||||
|
||||
pEnumCommon(ast, node, gState.data[0].val, fstart, fend, gState)
|
||||
else:
|
||||
pEnumCommon(ast, node, gState.data[^1].val, fstart, fend, gState)
|
||||
))
|
||||
|
||||
# typ function(typ param1, ...)
|
||||
result.add((&"""
|
||||
(declaration
|
||||
(storage_class_specifier?)
|
||||
{typeGrammar}
|
||||
(pointer_declarator!
|
||||
(pointer_declarator!
|
||||
{funcGrammar}
|
||||
)
|
||||
{funcGrammar}
|
||||
)
|
||||
{funcGrammar}
|
||||
)
|
||||
""",
|
||||
proc (ast: ref Ast, node: TSNode, gState: State) =
|
||||
if gState.debug:
|
||||
gState.debugStr &= "\n# typ function"
|
||||
|
||||
var
|
||||
fptr = ""
|
||||
i = 1
|
||||
|
||||
while i < gState.data.len:
|
||||
if gState.data[i].name == "function_declarator":
|
||||
i += 1
|
||||
continue
|
||||
|
||||
fptr = ""
|
||||
while i < gState.data.len and gState.data[i].name == "pointer_declarator":
|
||||
fptr &= "ptr "
|
||||
i += 1
|
||||
|
||||
var
|
||||
fname = gState.data[i].val
|
||||
fnname = gState.getIdentifier(fname, nskProc)
|
||||
pout, pname, ptyp, pptr = ""
|
||||
count = 1
|
||||
flen = ""
|
||||
fVar = false
|
||||
|
||||
i += 1
|
||||
if i < gState.data.len and gState.data[i].name == "pointer_declarator":
|
||||
fVar = true
|
||||
i += 1
|
||||
|
||||
while i < gState.data.len:
|
||||
if gState.data[i].name == "function_declarator":
|
||||
break
|
||||
|
||||
funcParamCommon(fnname, pname, ptyp, pptr, pout, count, i, flen)
|
||||
|
||||
if pout.nBl and pout[^2 .. ^1] == ", ":
|
||||
pout = pout[0 .. ^3]
|
||||
|
||||
if not fnname.nBl:
|
||||
let
|
||||
override = gState.getOverride(fname, nskProc)
|
||||
if override.nBl:
|
||||
gState.typeStr &= &"{gState.getComments()}\n{override}"
|
||||
elif gState.addNewIdentifer(fnname):
|
||||
let
|
||||
ftyp = gState.getIdentifier(gState.data[0].val, nskType, fnname).getType()
|
||||
pragma = gState.getPragma(gState.getImportC(fname, fnname), "cdecl")
|
||||
|
||||
if fptr.nBl or ftyp != "object":
|
||||
if fVar:
|
||||
gState.procStr &= &"{gState.getComments(true)}\nvar {fnname}*: proc ({pout}): {getPtrType(fptr&ftyp)}{{.cdecl.}}"
|
||||
else:
|
||||
gState.procStr &= &"{gState.getComments(true)}\nproc {fnname}*({pout}): {getPtrType(fptr&ftyp)}{pragma}"
|
||||
else:
|
||||
if fVar:
|
||||
gState.procStr &= &"{gState.getComments(true)}\nvar {fnname}*: proc ({pout}){{.cdecl.}}"
|
||||
else:
|
||||
gState.procStr &= &"{gState.getComments(true)}\nproc {fnname}*({pout}){pragma}"
|
||||
))
|
||||
|
||||
# // comment
|
||||
result.add((&"""
|
||||
(comment
|
||||
)
|
||||
""",
|
||||
proc (ast: ref Ast, node: TSNode, gState: State) =
|
||||
let
|
||||
cmt = $gState.getNodeVal(node)
|
||||
|
||||
for line in cmt.splitLines():
|
||||
let
|
||||
line = line.multiReplace([("//", ""), ("/*", ""), ("*/", "")])
|
||||
|
||||
gState.commentStr &= &"\n # {line.strip(leading=false)}"
|
||||
))
|
||||
|
||||
# // unknown
|
||||
result.add((&"""
|
||||
(type_definition|struct_specifier|union_specifier|enum_specifier|declaration
|
||||
(^.*)
|
||||
)
|
||||
""",
|
||||
proc (ast: ref Ast, node: TSNode, gState: State) =
|
||||
var
|
||||
done = false
|
||||
for i in gState.data:
|
||||
case $node.tsNodeType()
|
||||
of "declaration":
|
||||
if i.name == "identifier":
|
||||
let
|
||||
override = gState.getOverride(i.val, nskProc)
|
||||
|
||||
if override.nBl:
|
||||
gState.procStr &= &"{gState.getComments(true)}\n{override}"
|
||||
done = true
|
||||
break
|
||||
else:
|
||||
gState.procStr &= &"{gState.getComments(true)}\n# Declaration '{i.val}' skipped"
|
||||
|
||||
else:
|
||||
if i.name == "type_identifier":
|
||||
let
|
||||
override = gState.getOverride(i.val, nskType)
|
||||
|
||||
if override.nBl:
|
||||
gState.typeStr &= &"{gState.getComments()}\n{override}"
|
||||
done = true
|
||||
break
|
||||
else:
|
||||
gState.typeStr &= &"{gState.getComments()}\n # Type '{i.val}' skipped"
|
||||
|
||||
if gState.debug and not done:
|
||||
gState.skipStr &= &"\n{gState.getNodeVal(node)}"
|
||||
))
|
||||
|
||||
proc initRegex(ast: ref Ast) =
|
||||
if ast.children.nBl:
|
||||
if not ast.recursive:
|
||||
for child in ast.children:
|
||||
child.initRegex()
|
||||
|
||||
var
|
||||
reg: string
|
||||
try:
|
||||
reg = ast.getRegexForAstChildren()
|
||||
ast.regex = reg.re()
|
||||
except:
|
||||
echo reg
|
||||
raise newException(Exception, getCurrentExceptionMsg())
|
||||
|
||||
proc parseGrammar*(): AstTable =
|
||||
const grammars = initGrammar()
|
||||
|
||||
result = newTable[string, seq[ref Ast]]()
|
||||
for i in 0 .. grammars.len-1:
|
||||
var
|
||||
ast = grammars[i].grammar.parseLisp()
|
||||
|
||||
ast.tonim = grammars[i].call
|
||||
ast.initRegex()
|
||||
for n in ast.name.split("|"):
|
||||
if n notin result:
|
||||
result[n] = @[ast]
|
||||
else:
|
||||
result[n].add(ast)
|
||||
|
||||
proc printGrammar*(gState: State, astTable: AstTable) =
|
||||
for name in astTable.keys():
|
||||
for ast in astTable[name]:
|
||||
gecho ast.printAst()
|
||||
59
nimterop/lisp.nim
Normal file
59
nimterop/lisp.nim
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
import "."/[getters, globals]
|
||||
|
||||
var
|
||||
gTokens: seq[string]
|
||||
idx = 0
|
||||
|
||||
proc tokenize(tree: string) =
|
||||
var collect = ""
|
||||
|
||||
gTokens = @[]
|
||||
idx = 0
|
||||
for i in tree:
|
||||
case i:
|
||||
of ' ', '\n', '\r', '(', ')':
|
||||
if collect.nBl:
|
||||
gTokens.add(collect)
|
||||
collect = ""
|
||||
if i in ['(', ')']:
|
||||
gTokens.add($i)
|
||||
else:
|
||||
collect &= $i
|
||||
|
||||
proc readFromTokens(): ref Ast =
|
||||
if idx == gTokens.len:
|
||||
doAssert false, "Bad AST " & $(idx: idx)
|
||||
|
||||
if gTokens[idx] == "(":
|
||||
if gTokens.len - idx < 2:
|
||||
doAssert false, "Corrupt AST " & $(gTokensLen: gTokens.len, idx: idx)
|
||||
result = new(Ast)
|
||||
(result.name, result.kind, result.recursive) = gTokens[idx+1].getNameKind()
|
||||
result.children = @[]
|
||||
if result.recursive:
|
||||
result.children.add(result)
|
||||
idx += 2
|
||||
while gTokens[idx] != ")":
|
||||
var res = readFromTokens()
|
||||
if not res.isNil:
|
||||
result.children.add(res)
|
||||
elif gTokens[idx] == ")":
|
||||
doAssert false, "Poor AST " & $(idx: idx)
|
||||
|
||||
idx += 1
|
||||
|
||||
proc printAst*(node: ref Ast, offset=""): string =
|
||||
result = offset & "(" & (if node.recursive: "^" else: "") & node.name & node.kind.toString()
|
||||
|
||||
if node.children.nBl and not node.recursive:
|
||||
result &= "\n"
|
||||
for child in node.children:
|
||||
result &= printAst(child, offset & " ")
|
||||
result &= offset & ")\n"
|
||||
else:
|
||||
result &= ")\n"
|
||||
|
||||
proc parseLisp*(tree: string): ref Ast =
|
||||
tokenize(tree)
|
||||
|
||||
return readFromTokens()
|
||||
|
|
@ -1,39 +0,0 @@
|
|||
import system except find
|
||||
|
||||
import os
|
||||
|
||||
import cligen
|
||||
|
||||
import strutils except find
|
||||
import regex except find
|
||||
|
||||
proc findRec(dir: string, pattern: string | Regex, recurse: bool) =
|
||||
for kind, path in walkDir(dir):
|
||||
if kind in [pcDir, pcLinkToDir]:
|
||||
if recurse: findRec(path, pattern, recurse)
|
||||
elif pattern in path:
|
||||
echo path.absolutePath()
|
||||
|
||||
proc find(recurse = false, rexp = false, args: seq[string]) =
|
||||
var
|
||||
pat = ""
|
||||
rpat: Regex
|
||||
for arg in args:
|
||||
if not arg.startsWith("-"):
|
||||
if dirExists(arg):
|
||||
if rexp:
|
||||
findRec(arg, rpat, recurse)
|
||||
else:
|
||||
findRec(arg, pat, recurse)
|
||||
else:
|
||||
pat = arg
|
||||
if rexp: rpat = re(arg)
|
||||
|
||||
when isMainModule:
|
||||
dispatchMulti([
|
||||
find, help = {
|
||||
"recurse": "recursive search",
|
||||
"rexp": "patterns are regular expressions",
|
||||
"args": "pattern1 dir1 dir2 pattern2 dir3 ..."
|
||||
}
|
||||
])
|
||||
|
|
@ -1,13 +1,8 @@
|
|||
import json, os, osproc, sets, strformat, strutils
|
||||
|
||||
import ".."/globals
|
||||
import "."/misc
|
||||
import json, macros, os, osproc, sets, strformat, strutils
|
||||
|
||||
when nimvm:
|
||||
when (NimMajor, NimMinor, NimPatch) >= (1, 2, 0):
|
||||
import std/compilesettings
|
||||
else:
|
||||
import macros
|
||||
else:
|
||||
discard
|
||||
|
||||
|
|
@ -21,12 +16,11 @@ type
|
|||
paths*: OrderedSet[string]
|
||||
nimblePaths*: OrderedSet[string]
|
||||
nimcacheDir*: string
|
||||
outDir*: string
|
||||
|
||||
proc getJson(projectDir: string): JsonNode =
|
||||
# Get `nim dump` json value for `projectDir`
|
||||
var
|
||||
cmd = &"{getCurrentNimCompiler()} --hints:off --dump.format:json dump dummy"
|
||||
cmd = "nim --hints:off --dump.format:json dump dummy"
|
||||
dump = ""
|
||||
ret = 0
|
||||
|
||||
|
|
@ -43,7 +37,7 @@ proc getJson(projectDir: string): JsonNode =
|
|||
try:
|
||||
result = parseJson(dump)
|
||||
except JsonParsingError as e:
|
||||
gecho "# Failed to parse `nim dump` output: " & e.msg
|
||||
echo "# Failed to parse `nim dump` output: " & e.msg
|
||||
|
||||
proc getOsCacheDir(): string =
|
||||
# OS default cache directory
|
||||
|
|
@ -64,14 +58,36 @@ proc getProjectDir*(): string =
|
|||
else:
|
||||
discard
|
||||
|
||||
proc stripName(path, projectName: string): string =
|
||||
# Remove `pname_d|r` tail from path
|
||||
let
|
||||
(head, tail) = path.splitPath()
|
||||
if projectName in tail:
|
||||
result = head
|
||||
proc getNimcacheDir*(projectDir = ""): string =
|
||||
## Get nimcache directory for current compilation or specified `projectDir`
|
||||
when nimvm:
|
||||
when (NimMajor, NimMinor, NimPatch) >= (1, 2, 0):
|
||||
# Get value at compile time from `std/compilesettings`
|
||||
result = querySetting(SingleValueSetting.nimcacheDir)
|
||||
else:
|
||||
result = path
|
||||
discard
|
||||
|
||||
# Not Nim v1.2.0+ or runtime
|
||||
if result.len == 0:
|
||||
let
|
||||
# Get project directory for < v1.2.0 at compile time
|
||||
projectDir = if projectDir.len != 0: projectDir else: getProjectDir()
|
||||
|
||||
# Use `nim dump` to figure out nimcache for `projectDir`
|
||||
let
|
||||
dumpJson = getJson(projectDir)
|
||||
|
||||
if dumpJson != nil and dumpJson.hasKey("nimcache"):
|
||||
result = dumpJson["nimcache"].getStr()
|
||||
let
|
||||
(head, tail) = result.splitPath()
|
||||
if "dummy" in tail:
|
||||
# Remove `dummy_d` subdir when default nimcache
|
||||
result = head
|
||||
|
||||
# Set to OS defaults if not detectable
|
||||
if result.len == 0:
|
||||
result = getOsCacheDir()
|
||||
|
||||
proc jsonToSeq(node: JsonNode, key: string): seq[string] =
|
||||
# Convert JsonArray to seq[string] for specified `key`
|
||||
|
|
@ -79,13 +95,6 @@ proc jsonToSeq(node: JsonNode, key: string): seq[string] =
|
|||
for elem in node[key].getElems():
|
||||
result.add elem.getStr()
|
||||
|
||||
proc getAbsoluteDir(projectDir, path: string): string =
|
||||
# Path is relative to `projectDir` if not absolute
|
||||
if path.isAbsolute():
|
||||
result = path
|
||||
else:
|
||||
result = (projectDir / path).normalizedPath()
|
||||
|
||||
proc getNimConfig*(projectDir = ""): Config =
|
||||
# Get `paths` - list of paths to be forwarded to Nim
|
||||
result = new(Config)
|
||||
|
|
@ -103,11 +112,7 @@ proc getNimConfig*(projectDir = ""): Config =
|
|||
libPath = getCurrentCompilerExe().parentDir().parentDir() / "lib"
|
||||
lazyPaths = querySettingSeq(MultipleValueSetting.lazyPaths)
|
||||
searchPaths = querySettingSeq(MultipleValueSetting.searchPaths)
|
||||
result.nimcacheDir = stripName(
|
||||
querySetting(SingleValueSetting.nimcacheDir),
|
||||
querySetting(SingleValueSetting.projectName)
|
||||
)
|
||||
result.outDir = querySetting(SingleValueSetting.outDir)
|
||||
result.nimcacheDir = querySetting(SingleValueSetting.nimcacheDir)
|
||||
else:
|
||||
discard
|
||||
|
||||
|
|
@ -131,11 +136,6 @@ proc getNimConfig*(projectDir = ""): Config =
|
|||
# Usually `libPath` is last entry in `searchPaths`
|
||||
libPath = searchPaths[^1]
|
||||
|
||||
if dumpJson.hasKey("nimcache"):
|
||||
result.nimcacheDir = stripName(dumpJson["nimcache"].getStr(), "dummy")
|
||||
if dumpJson.hasKey("outdir"):
|
||||
result.outDir = dumpJson["outdir"].getStr()
|
||||
|
||||
# Parse version
|
||||
if version.len != 0:
|
||||
let
|
||||
|
|
@ -146,15 +146,12 @@ proc getNimConfig*(projectDir = ""): Config =
|
|||
|
||||
# Find non standard lib paths added to `searchPath`
|
||||
for path in searchPaths:
|
||||
let
|
||||
path = getAbsoluteDir(projectDir, path)
|
||||
if libPath notin path:
|
||||
result.paths.incl path
|
||||
|
||||
# Find `nimblePaths` in `lazyPaths`
|
||||
for path in lazyPaths:
|
||||
let
|
||||
path = getAbsoluteDir(projectDir, path)
|
||||
(_, tail) = path.strip(leading = false, chars = {'/', '\\'}).splitPath()
|
||||
if tail == "pkgs":
|
||||
# Nimble path probably
|
||||
|
|
@ -164,8 +161,6 @@ proc getNimConfig*(projectDir = ""): Config =
|
|||
# Have to do this separately since `nimblePaths` could be after
|
||||
# packages in `lazyPaths`
|
||||
for path in lazyPaths:
|
||||
let
|
||||
path = getAbsoluteDir(projectDir, path)
|
||||
var skip = false
|
||||
for npath in result.nimblePaths:
|
||||
if npath in path:
|
||||
|
|
@ -174,11 +169,7 @@ proc getNimConfig*(projectDir = ""): Config =
|
|||
if not skip:
|
||||
result.paths.incl path
|
||||
|
||||
if result.nimcacheDir.len == 0:
|
||||
result.nimcacheDir = getOsCacheDir()
|
||||
|
||||
if result.outDir.len == 0:
|
||||
result.outDir = projectDir
|
||||
result.nimcacheDir = getNimcacheDir(projectDir)
|
||||
|
||||
proc getNimConfigFlags(cfg: Config): string =
|
||||
# Convert configuration into Nim flags for cfg file or command line
|
||||
|
|
@ -217,24 +208,4 @@ proc writeNimConfig*(cfgFile: string, projectDir = "") =
|
|||
let
|
||||
cfg = getNimConfig(projectDir)
|
||||
cfgOut = getNimConfigFlags(cfg)
|
||||
writeFile(cfgFile, cfgOut)
|
||||
|
||||
proc getNimcacheDir*(projectDir = ""): string =
|
||||
## Get nimcache directory for current compilation or specified `projectDir`
|
||||
let
|
||||
cfg = getNimConfig(projectDir)
|
||||
result = cfg.nimcacheDir
|
||||
|
||||
proc getOutDir*(projectDir = ""): string =
|
||||
## Get output directory for current compilation or specified `projectDir`
|
||||
let
|
||||
cfg = getNimConfig(projectDir)
|
||||
result = cfg.outDir
|
||||
|
||||
proc getNimteropCacheDir*(): string =
|
||||
## Get location to cache all nimterop artifacts
|
||||
result = getNimcacheDir() / "nimterop"
|
||||
|
||||
proc fixRelPath*(path: string): string =
|
||||
## If `path` is relative, consider relative to `projectPath`
|
||||
if path.isAbsolute: path else: getProjectDir() / path
|
||||
writeFile(cfgFile, cfgOut)
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
import os
|
||||
|
||||
import "."/build/shell
|
||||
import "."/build
|
||||
|
||||
const
|
||||
cacheDir* = getProjectCacheDir("nimterop", forceClean = false)
|
||||
|
|
|
|||
|
|
@ -1,13 +1,17 @@
|
|||
import os, strutils
|
||||
|
||||
import "."/[paths]
|
||||
import "."/build/[shell]
|
||||
import "."/[build, paths]
|
||||
|
||||
proc treesitterSetup*() =
|
||||
gitPull("https://github.com/tree-sitter/tree-sitter", cacheDir / "treesitter", """
|
||||
lib/include/*
|
||||
lib/src/*
|
||||
""", "0.16.8")
|
||||
""", "0.15.5")
|
||||
|
||||
gitPull("https://github.com/JuliaStrings/utf8proc", cacheDir / "utf8proc", """
|
||||
*.c
|
||||
*.h
|
||||
""")
|
||||
|
||||
let
|
||||
tbase = cacheDir / "treesitter" / "lib"
|
||||
|
|
@ -32,7 +36,11 @@ src/*.h
|
|||
src/*.c
|
||||
src/*.cc
|
||||
src/tree_sitter/parser.h
|
||||
""", "0.16.1")
|
||||
""", "v0.15.0")
|
||||
|
||||
writeFile(cacheDir / "treesitter_c" / "src" / "api.h", """
|
||||
const TSLanguage *tree_sitter_c();
|
||||
""")
|
||||
|
||||
proc treesitterCppSetup*() =
|
||||
gitPull("https://github.com/tree-sitter/tree-sitter-cpp", cacheDir / "treesitter_cpp", """
|
||||
|
|
@ -40,4 +48,8 @@ src/*.h
|
|||
src/*.c
|
||||
src/*.cc
|
||||
src/tree_sitter/parser.h
|
||||
""", "v0.16.0")
|
||||
""", "v0.15.0")
|
||||
|
||||
writeFile(cacheDir / "treesitter_cpp" / "src" / "api.h", """
|
||||
const TSLanguage *tree_sitter_cpp();
|
||||
""")
|
||||
|
|
|
|||
111
nimterop/template.nim
Normal file
111
nimterop/template.nim
Normal file
|
|
@ -0,0 +1,111 @@
|
|||
import os, strutils
|
||||
|
||||
import nimterop/[cimport, build, paths]
|
||||
|
||||
# Documentation:
|
||||
# https://github.com/nimterop/nimterop
|
||||
# https://nimterop.github.io/nimterop/cimport.html
|
||||
|
||||
const
|
||||
# Location where any sources should get downloaded. Adjust depending on
|
||||
# actual location of wrapper file relative to project.
|
||||
baseDir = currentSourcePath.parentDir()/"build"
|
||||
|
||||
# All files and dirs should be inside to baseDir
|
||||
srcDir = baseDir/"project"
|
||||
|
||||
static:
|
||||
# Print generated Nim to output
|
||||
cDebug()
|
||||
|
||||
# Disable caching so that wrapper is generated every time. Useful during
|
||||
# development. Remove once wrapper is working as expected.
|
||||
cDisableCaching()
|
||||
|
||||
# Download C/C++ source code from a git repository
|
||||
gitPull("https://github.com/user/project", outdir = srcDir, plist = """
|
||||
include/*.h
|
||||
src/*.c
|
||||
""", checkout = "tag/branch/hash")
|
||||
|
||||
# Download source from the web - zip files are auto extracted
|
||||
downloadUrl("https://hostname.com/file.h", outdir = srcDir)
|
||||
|
||||
# Run GNU configure on the source
|
||||
when defined(posix):
|
||||
configure(srcDir, fileThatShouldGetGenerated, flagsToConfigure)
|
||||
|
||||
# Run cmake on the source
|
||||
cmake(srcDir/"build", fileThatShouldGetGenerated, flagsToCmake)
|
||||
|
||||
# Run standard file/directory operations with mkDir(), cpFile(), mvFile()
|
||||
|
||||
# Edit file contents if required with readFile(), writeFile() and standard
|
||||
# string operations
|
||||
|
||||
# Run any other external commands with execAction()
|
||||
|
||||
# Skip any symbols from being wrapped
|
||||
cSkipSymbol(@["type1", "proc2"])
|
||||
|
||||
# Manually wrap any symbols since nimterop cannot or incorrectly wraps them
|
||||
cOverride:
|
||||
# Standard Nim code to wrap types, consts, procs, etc.
|
||||
type
|
||||
symbol = object
|
||||
|
||||
# Specify include directories for gcc and Nim
|
||||
cIncludeDir(srcDir/"include")
|
||||
|
||||
# Define global symbols
|
||||
cDefine("SYMBOL", "value")
|
||||
|
||||
# Any global compiler options
|
||||
{.passC: "flags".}
|
||||
|
||||
# Any global linker options
|
||||
{.passL: "flags".}
|
||||
|
||||
# Compile in any common source code
|
||||
cCompile(srcDir/"file.c")
|
||||
|
||||
# Perform OS specific tasks
|
||||
when defined(Windows):
|
||||
# Windows specific symbols, options and files
|
||||
|
||||
# Dynamic library to link against
|
||||
const dynlibFile =
|
||||
when defined(cpu64):
|
||||
"xyz64.dll"
|
||||
else:
|
||||
"xyz32.dll"
|
||||
elif defined(posix):
|
||||
# Common posix symbols, options and files
|
||||
|
||||
when defined(linux):
|
||||
# Linux specific
|
||||
const dynlibFile = "libxyz.so(.2|.1|)"
|
||||
elif defined(osx):
|
||||
# MacOSX specific
|
||||
const dynlibFile = "libxyz(.2|.1|).dylib"
|
||||
else:
|
||||
static: doAssert false
|
||||
else:
|
||||
static: doAssert false
|
||||
|
||||
# Use cPlugin() to make any symbol changes
|
||||
cPlugin:
|
||||
import strutils
|
||||
|
||||
# Symbol renaming examples
|
||||
proc onSymbol*(sym: var Symbol) {.exportc, dynlib.} =
|
||||
# Get rid of leading and trailing underscores
|
||||
sym.name = sym.name.strip(chars = {'_'})
|
||||
|
||||
# Remove prefixes or suffixes from procs
|
||||
if sym.kind == nskProc and sym.name.contains("SDL_"):
|
||||
sym.name = sym.name.replace("SDL_", "")
|
||||
|
||||
# Finally import wrapped header file. Recurse if #include files should also
|
||||
# be wrapped. Set dynlib if binding to dynamic library.
|
||||
cImport(srcDir/"include/file.h", recurse = true, dynlib="dynlibFile")
|
||||
36
nimterop/templite.nim
Normal file
36
nimterop/templite.nim
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
import os, strutils
|
||||
|
||||
import nimterop/[cimport, build, paths]
|
||||
|
||||
const
|
||||
baseDir = currentSourcePath.parentDir()/"build"
|
||||
|
||||
srcDir = baseDir/"project"
|
||||
|
||||
static:
|
||||
cDebug()
|
||||
cDisableCaching()
|
||||
|
||||
gitPull("https://github.com/user/project", outdir = srcDir, plist = """
|
||||
include/*.h
|
||||
src/*.c
|
||||
""", checkout = "tag/branch/hash")
|
||||
|
||||
downloadUrl("https://hostname.com/file.h", outdir = srcDir)
|
||||
|
||||
cIncludeDir(srcDir/"include")
|
||||
|
||||
cDefine("SYMBOL", "value")
|
||||
|
||||
{.passC: "flags".}
|
||||
{.passL: "flags".}
|
||||
|
||||
cCompile(srcDir/"file.c")
|
||||
|
||||
cPlugin:
|
||||
import strutils
|
||||
|
||||
proc onSymbol*(sym: var Symbol) {.exportc, dynlib.} =
|
||||
sym.name = sym.name.strip(chars = {'_'})
|
||||
|
||||
cImport(srcDir/"include/file.h", recurse = true)
|
||||
|
|
@ -1,25 +1,17 @@
|
|||
import os, osproc, sets, strformat, strutils, tables, times
|
||||
import os, osproc, strformat, strutils, tables, times
|
||||
|
||||
import "."/treesitter/[api, c, cpp]
|
||||
|
||||
import "."/[globals]
|
||||
import "."/[ast, ast2, build, globals, getters, grammar, tshelp]
|
||||
|
||||
import "."/toastlib/[ast2, getters, tshelp]
|
||||
|
||||
import "."/build/[ccompiler, misc]
|
||||
|
||||
var
|
||||
# Output generated before main() is called
|
||||
preMainOut = ""
|
||||
|
||||
proc process(gState: State, path: string) =
|
||||
doAssert fileExists(path), &"Invalid path {path}"
|
||||
proc process(gState: State, path: string, astTable: AstTable) =
|
||||
doAssert existsFile(path), &"Invalid path {path}"
|
||||
|
||||
if gState.mode.Bl:
|
||||
gState.mode = getCompilerMode(path)
|
||||
|
||||
if gState.preprocess:
|
||||
gState.getPreprocessor(path)
|
||||
gState.code = gState.getPreprocessor(path)
|
||||
else:
|
||||
gState.code = readFile(path)
|
||||
|
||||
|
|
@ -27,29 +19,29 @@ proc process(gState: State, path: string) =
|
|||
if gState.past:
|
||||
gecho gState.printLisp(root)
|
||||
elif gState.pnim:
|
||||
parseNim(gState, path, root)
|
||||
if Feature.ast2 in gState.feature:
|
||||
ast2.parseNim(gState, path, root)
|
||||
elif Feature.ast1 in gState.feature:
|
||||
ast.parseNim(gState, path, root, astTable)
|
||||
elif gState.preprocess:
|
||||
gecho gState.code
|
||||
|
||||
# CLI processing with default values
|
||||
proc main(
|
||||
check = false,
|
||||
compile: seq[string] = @[],
|
||||
convention = "cdecl",
|
||||
debug = false,
|
||||
defines: seq[string] = @[],
|
||||
dynlib: string = "",
|
||||
exclude: seq[string] = @[],
|
||||
feature: seq[Feature] = @[],
|
||||
feature: seq[Feature] = @[Feature.ast1],
|
||||
includeHeader = false,
|
||||
includeDirs: seq[string] = @[],
|
||||
mode = "",
|
||||
nim: string = "nim",
|
||||
noComments = false,
|
||||
noHeader = false,
|
||||
nocomments = false,
|
||||
output = "",
|
||||
passC: seq[string] = @[],
|
||||
passL: seq[string] = @[],
|
||||
past = false,
|
||||
pgrammar = false,
|
||||
pluginSourcePath: string = "",
|
||||
pnim = false,
|
||||
prefix: seq[string] = @[],
|
||||
|
|
@ -59,26 +51,21 @@ proc main(
|
|||
stub = false,
|
||||
suffix: seq[string] = @[],
|
||||
symOverride: seq[string] = @[],
|
||||
typeMap: seq[string] = @[],
|
||||
source: seq[string]
|
||||
) =
|
||||
|
||||
# Setup global state with arguments
|
||||
gState = State(
|
||||
compile: compile,
|
||||
var gState = State(
|
||||
convention: convention,
|
||||
debug: debug,
|
||||
defines: defines,
|
||||
dynlib: dynlib,
|
||||
exclude: exclude,
|
||||
feature: feature,
|
||||
includeHeader: includeHeader,
|
||||
includeDirs: includeDirs,
|
||||
mode: mode,
|
||||
nim: nim.sanitizePath,
|
||||
noComments: noComments,
|
||||
noHeader: noHeader,
|
||||
passC: passC,
|
||||
passL: passL,
|
||||
nim: nim,
|
||||
nocomments: nocomments,
|
||||
past: past,
|
||||
pluginSourcePath: pluginSourcePath,
|
||||
pnim: pnim,
|
||||
|
|
@ -90,6 +77,13 @@ proc main(
|
|||
symOverride: symOverride
|
||||
)
|
||||
|
||||
# Fail if both includeHeader and dynlib
|
||||
doAssert not (includeHeader == true and dynlib.nBl),
|
||||
"`includeHeader` and `dynlib` cannot be used simultaneously"
|
||||
|
||||
# Set gDebug in build.nim
|
||||
build.gDebug = debug
|
||||
|
||||
# Split some arguments with ,
|
||||
gState.symOverride = gState.symOverride.getSplitComma()
|
||||
gState.prefix = gState.prefix.getSplitComma()
|
||||
|
|
@ -103,14 +97,6 @@ proc main(
|
|||
value = if nv.len == 2: nv[1] else: ""
|
||||
gState.replace[name] = value
|
||||
|
||||
# typeMap => getters.gTypeMap
|
||||
for i in typeMap.getSplitComma():
|
||||
let
|
||||
nv = i.split("=", maxsplit = 1)
|
||||
doAssert nv.len == 2, "`--typeMap` requires X=Y format"
|
||||
gTypeMap[nv[0]] = nv[1]
|
||||
gTypeMapValues.incl nv[1]
|
||||
|
||||
if pluginSourcePath.nBl:
|
||||
gState.loadPlugin(pluginSourcePath)
|
||||
|
||||
|
|
@ -127,30 +113,37 @@ proc main(
|
|||
if check and outputFile.len == 0:
|
||||
outputFile = getTempDir() / "toast_" & ($getTime().toUnix()).addFileExt("nim")
|
||||
|
||||
# Recurse implies preprocess
|
||||
if gState.recurse:
|
||||
gState.preprocess = true
|
||||
|
||||
# Redirect output to file
|
||||
if outputFile.len != 0:
|
||||
doAssert gState.outputHandle.open(outputFile, fmWrite),
|
||||
&"Failed to write to {outputFile}"
|
||||
|
||||
decho &"# Writing output to {outputFile}\n"
|
||||
# Process grammar into AST
|
||||
let
|
||||
astTable =
|
||||
if Feature.ast1 in gState.feature:
|
||||
parseGrammar()
|
||||
else:
|
||||
nil
|
||||
|
||||
if source.nBl:
|
||||
if pgrammar:
|
||||
if Feature.ast1 in gState.feature:
|
||||
# Print AST of grammar
|
||||
gState.printGrammar(astTable)
|
||||
elif source.nBl:
|
||||
# Print source after preprocess or Nim output
|
||||
if gState.pnim:
|
||||
gecho preMainOut
|
||||
gState.printNimHeader()
|
||||
gState.initNim()
|
||||
for src in source:
|
||||
let
|
||||
src = src.expandSymlinkAbs()
|
||||
if src notin gState.headersProcessed:
|
||||
gState.process(src)
|
||||
gState.headersProcessed.incl src
|
||||
gState.process(src.expandSymlinkAbs(), astTable)
|
||||
if gState.pnim:
|
||||
printNim(gState)
|
||||
if Feature.ast2 in gState.feature:
|
||||
ast2.printNim(gState)
|
||||
elif Feature.ast1 in gState.feature:
|
||||
ast.printNim(gState)
|
||||
gecho """{.hint: "The legacy wrapper generation algorithm is deprecated and will be removed in the next release of Nimterop.".}"""
|
||||
gecho """{.hint: "Refer to CHANGES.md for details on migrating to the new backend.".}"""
|
||||
|
||||
# Close outputFile
|
||||
if outputFile.len != 0:
|
||||
|
|
@ -198,74 +191,47 @@ proc main(
|
|||
if check and output.len == 0:
|
||||
stdout.write outputFile.readFile()
|
||||
|
||||
proc mergeParams(cmdNames: seq[string], cmdLine = commandLineParams()): seq[string] =
|
||||
# Load command-line params from `source` if it is a .cfg file
|
||||
if cmdNames.len != 0:
|
||||
# https://github.com/c-blake/cligen/issues/149
|
||||
for param in cmdLine:
|
||||
if param.fileExists() and param.splitFile().ext == ".cfg":
|
||||
preMainOut &= &"# Loading flags from '{param}'\n"
|
||||
for line in param.readFile().splitLines():
|
||||
let
|
||||
line = line.strip()
|
||||
if line.len > 1 and line[0] != '#':
|
||||
result.add line.parseCmdLine()
|
||||
else:
|
||||
result.add param
|
||||
|
||||
if result.len != 0 and "-h" notin result and "--help" notin result:
|
||||
preMainOut &= &"""# Generated @ {$now()}
|
||||
# Command line:
|
||||
# {getAppFilename()} {result.join(" ")}
|
||||
"""
|
||||
else:
|
||||
result = cmdLine
|
||||
|
||||
when isMainModule:
|
||||
# Setup cligen command line help and short flags
|
||||
import cligen
|
||||
dispatch(main, help = {
|
||||
"check": "check generated wrapper with compiler",
|
||||
"compile": "create {.compile.} entries in generated wrapper",
|
||||
"convention": "calling convention for wrapped procs",
|
||||
"convention": "calling convention for wrapped procs - default: cdecl",
|
||||
"debug": "enable debug output",
|
||||
"defines": "definitions to pass to preprocessor",
|
||||
"dynlib": "{.dynlib.} pragma to import symbols - Nim const string or file path",
|
||||
"exclude": "files or directories to exclude from the wrapped output",
|
||||
"dynlib": "import symbols from library in specified Nim string",
|
||||
"feature": "flags to enable experimental features",
|
||||
"includeHeader": "add {.header.} pragma to wrapper",
|
||||
"includeDirs": "include directory to pass to preprocessor",
|
||||
"mode": "language parser: c or cpp",
|
||||
"nim": "use a particular Nim executable",
|
||||
"noComments": "exclude top-level comments from output",
|
||||
"noHeader": "skip {.header.} pragma in wrapper",
|
||||
"nim": "use a particular Nim executable - default: $PATH/nim",
|
||||
"nocomments": "exclude top-level comments from output",
|
||||
"output": "file to output content - default: stdout",
|
||||
"passC": "create {.passC.} entries in generated wrapper",
|
||||
"passL": "create {.passL.} entries in generated wrapper",
|
||||
"past": "print AST output",
|
||||
"pgrammar": "print grammar",
|
||||
"pluginSourcePath": "nim file to build and load as a plugin",
|
||||
"pnim": "print Nim output",
|
||||
"prefix": "strip prefix from identifiers",
|
||||
"preprocess": "run preprocessor on header",
|
||||
"recurse": "process #include files - implies --preprocess",
|
||||
"recurse": "process #include files",
|
||||
"replace": "replace X with Y in identifiers, X1=Y1,X2=Y2, @X for regex",
|
||||
"source" : "C/C++ source/header(s) and command line file(s)",
|
||||
"source" : "C/C++ source/header",
|
||||
"stub": "stub out undefined type references as objects",
|
||||
"suffix": "strip suffix from identifiers",
|
||||
"symOverride": "skip generating specified symbols",
|
||||
"typeMap": "map instances of type X to Y - e.g. ABC=cint"
|
||||
"symOverride": "skip generating specified symbols"
|
||||
}, short = {
|
||||
"check": 'k',
|
||||
"convention": 'C',
|
||||
"debug": 'd',
|
||||
"defines": 'D',
|
||||
"dynlib": 'l',
|
||||
"exclude": 'X',
|
||||
"feature": 'f',
|
||||
"includeHeader": 'H',
|
||||
"includeDirs": 'I',
|
||||
"noComments": 'c',
|
||||
"noHeader": 'H',
|
||||
"nocomments": 'c',
|
||||
"output": 'o',
|
||||
"past": 'a',
|
||||
"pgrammar": 'g',
|
||||
"pnim": 'n',
|
||||
"prefix": 'E',
|
||||
"preprocess": 'p',
|
||||
|
|
@ -273,6 +239,5 @@ when isMainModule:
|
|||
"replace": 'G',
|
||||
"stub": 's',
|
||||
"suffix": 'F',
|
||||
"symOverride": 'O',
|
||||
"typeMap": 'T'
|
||||
"symOverride": 'O'
|
||||
})
|
||||
|
|
|
|||
|
|
@ -1,8 +1,10 @@
|
|||
import os
|
||||
|
||||
# Workaround for C++ scanner.cc causing link error with other C obj files
|
||||
switch("clang.linkerexe", "clang++")
|
||||
switch("gcc.linkerexe", "g++")
|
||||
when defined(MacOSX):
|
||||
switch("clang.linkerexe", "g++")
|
||||
else:
|
||||
switch("gcc.linkerexe", "g++")
|
||||
|
||||
# Workaround for NilAccessError crash on Windows #98
|
||||
# Could also help for OSX/Linux crash
|
||||
|
|
@ -23,8 +25,4 @@ when not defined(danger):
|
|||
switch("out", currentSourcePath.parentDir() / "toast".addFileExt(ExeExt))
|
||||
|
||||
# Define TOAST for globals.nim
|
||||
switch("define", "TOAST")
|
||||
|
||||
# Static for Windows - #248
|
||||
when defined(Windows):
|
||||
switch("passL", "-static")
|
||||
switch("define", "TOAST")
|
||||
|
|
@ -1,147 +0,0 @@
|
|||
import macros, strutils
|
||||
|
||||
import compiler/[ast, idents, lineinfos, msgs, options, parser, pathutils, renderer]
|
||||
|
||||
import ".."/[globals, treesitter/api]
|
||||
import "."/[getters, tshelp]
|
||||
|
||||
proc handleError*(conf: ConfigRef, info: TLineInfo, msg: TMsgKind, arg: string) =
|
||||
# Raise exception in parseString() instead of exiting for errors
|
||||
if msg < warnMin:
|
||||
raise newException(Exception, msgKindToString(msg))
|
||||
|
||||
proc parseString*(gState: State, str: string): PNode =
|
||||
# Parse a string into Nim AST - use custom error handler that raises
|
||||
# an exception rather than exiting on failure
|
||||
try:
|
||||
result = parseString(
|
||||
str, gState.identCache, gState.config, errorHandler = handleError
|
||||
)
|
||||
except:
|
||||
decho getCurrentExceptionMsg()
|
||||
|
||||
proc printTree*(gState: State, pnode: PNode, offset = ""): string =
|
||||
if not pnode.isNil and gState.debug and pnode.kind != nkNone:
|
||||
result &= "\n# " & offset & $pnode.kind & "("
|
||||
case pnode.kind
|
||||
of nkCharLit:
|
||||
result &= ($pnode.intVal.char).escape & ")"
|
||||
of nkIntLit..nkUInt64Lit:
|
||||
result &= $pnode.intVal & ")"
|
||||
of nkFloatLit..nkFloat128Lit:
|
||||
result &= $pnode.floatVal & ")"
|
||||
of nkStrLit..nkTripleStrLit:
|
||||
result &= pnode.strVal.escape & ")"
|
||||
of nkSym:
|
||||
result &= $pnode.sym & ")"
|
||||
of nkIdent:
|
||||
result &= "\"" & $pnode.ident.s & "\")"
|
||||
else:
|
||||
if pnode.sons.len != 0:
|
||||
for i in 0 ..< pnode.sons.len:
|
||||
result &= gState.printTree(pnode.sons[i], offset & " ")
|
||||
if i != pnode.sons.len - 1:
|
||||
result &= ","
|
||||
result &= "\n# " & offset & ")"
|
||||
else:
|
||||
result &= ")"
|
||||
if offset.len == 0:
|
||||
result &= "\n"
|
||||
|
||||
proc printDebug*(gState: State, pnode: PNode) =
|
||||
if gState.debug and pnode.kind != nkNone:
|
||||
gecho ("Output => " & $pnode).getCommented()
|
||||
gecho gState.printTree(pnode)
|
||||
|
||||
proc getDefaultLineInfo*(gState: State): TLineInfo =
|
||||
result = newLineInfo(gState.config, gState.sourceFile.AbsoluteFile, 0, 0)
|
||||
|
||||
proc getLineInfo*(gState: State, node: TSNode): TLineInfo =
|
||||
# Get Nim equivalent line:col info from node
|
||||
let
|
||||
(line, col) = gState.getLineCol(node)
|
||||
|
||||
result = newLineInfo(gState.config, gState.sourceFile.AbsoluteFile, line, col)
|
||||
|
||||
proc getIdent*(gState: State, name: string, info: TLineInfo, exported = true): PNode =
|
||||
if name.nBl:
|
||||
# Get ident PNode for name + info
|
||||
let
|
||||
exp = getIdent(gState.identCache, "*")
|
||||
ident = getIdent(gState.identCache, name)
|
||||
|
||||
if exported:
|
||||
result = newNode(nkPostfix)
|
||||
result.add newIdentNode(exp, info)
|
||||
result.add newIdentNode(ident, info)
|
||||
else:
|
||||
result = newIdentNode(ident, info)
|
||||
|
||||
proc getIdent*(gState: State, name: string): PNode =
|
||||
gState.getIdent(name, gState.getDefaultLineInfo(), exported = false)
|
||||
|
||||
proc getIdentName*(node: PNode): string =
|
||||
if not node.isNil:
|
||||
for i in 0 ..< node.len:
|
||||
if node[i].kind == nkIdent and $node[i] != "*":
|
||||
result = $node[i]
|
||||
if result.Bl and node.len > 0:
|
||||
result = node[0].getIdentName()
|
||||
|
||||
proc getNameInfo*(gState: State, node: TSNode, kind: NimSymKind, parent = ""):
|
||||
tuple[name, origname: string, info: TLineInfo] =
|
||||
# Shortcut to get identifier name and info (node value and line:col)
|
||||
result.origname = gState.getNodeVal(node)
|
||||
result.name = gState.getIdentifier(result.origname, kind, parent)
|
||||
if result.name.nBl:
|
||||
if kind == nskType:
|
||||
result.name = gState.getType(result.name, parent)
|
||||
result.info = gState.getLineInfo(node)
|
||||
|
||||
proc getPtrType*(str: string): string =
|
||||
result = case str:
|
||||
of "cchar":
|
||||
"cstring"
|
||||
of "object":
|
||||
"pointer"
|
||||
of "FILE":
|
||||
"File"
|
||||
else:
|
||||
str
|
||||
|
||||
proc newPtrTree*(gState: State, count: int, typ: PNode): PNode =
|
||||
# Create nkPtrTy tree depending on count
|
||||
#
|
||||
# Reduce by 1 if Nim type available for ptr X - e.g. ptr cchar = cstring
|
||||
result = typ
|
||||
var
|
||||
count = count
|
||||
if typ.kind == nkIdent:
|
||||
let
|
||||
tname = typ.ident.s
|
||||
ptname = getPtrType(tname)
|
||||
if tname != ptname:
|
||||
# If Nim type available, use that ident
|
||||
result = gState.getIdent(ptname, typ.info, exported = false)
|
||||
# One ptr reduced
|
||||
count -= 1
|
||||
if count > 0:
|
||||
# Nested nkPtrTy(typ) depending on count
|
||||
#
|
||||
# [ptr ...] typ
|
||||
#
|
||||
# nkPtrTy(
|
||||
# nkPtrTy(
|
||||
# typ
|
||||
# )
|
||||
# )
|
||||
var
|
||||
nresult = newNode(nkPtrTy)
|
||||
parent = nresult
|
||||
child: PNode
|
||||
for i in 1 ..< count:
|
||||
child = newNode(nkPtrTy)
|
||||
parent.add child
|
||||
parent = child
|
||||
parent.add result
|
||||
result = nresult
|
||||
|
|
@ -1,421 +0,0 @@
|
|||
import dynlib, macros, os, osproc, sequtils, sets, streams, strformat, strutils, tables, times
|
||||
|
||||
import regex
|
||||
|
||||
import ".."/[globals, plugin]
|
||||
import ".."/build/[ccompiler, misc, nimconf, shell]
|
||||
|
||||
const gReserved = """
|
||||
addr and as asm
|
||||
bind block break
|
||||
case cast concept const continue converter
|
||||
defer discard distinct div do
|
||||
elif else end enum except export
|
||||
finally for from func
|
||||
if import in include interface is isnot iterator
|
||||
let
|
||||
macro method mixin mod
|
||||
nil not notin
|
||||
of or out
|
||||
proc ptr
|
||||
raise ref return
|
||||
shl shr static
|
||||
template try tuple type
|
||||
using
|
||||
var
|
||||
when while
|
||||
xor
|
||||
yield""".split(Whitespace).toHashSet()
|
||||
|
||||
# Types related
|
||||
|
||||
const
|
||||
# Enum macro read from file - written into wrapper when required
|
||||
gEnumMacroConst = "import nimterop / enumtypepub"
|
||||
|
||||
var
|
||||
gEnumMacro* = gEnumMacroConst
|
||||
|
||||
gTypeMap* = {
|
||||
# char
|
||||
"char": "cchar",
|
||||
"signed char": "cschar",
|
||||
"unsigned char": "cuchar",
|
||||
|
||||
# short
|
||||
"short": "cshort",
|
||||
"short int": "cshort",
|
||||
"signed short": "cshort",
|
||||
"signed short int": "cshort",
|
||||
"unsigned short": "cushort",
|
||||
"unsigned short int": "cushort",
|
||||
"uShort": "cushort",
|
||||
"u_short": "cushort",
|
||||
|
||||
# int
|
||||
"int": "cint",
|
||||
"signed": "cint",
|
||||
"signed int": "cint",
|
||||
"ssize_t": "int",
|
||||
"unsigned": "cuint",
|
||||
"unsigned int": "cuint",
|
||||
"uInt": "cuint",
|
||||
"u_int": "cuint",
|
||||
"size_t": "uint",
|
||||
|
||||
"int8_t": "int8",
|
||||
"int16_t": "int16",
|
||||
"int32_t": "int32",
|
||||
"int64_t": "int64",
|
||||
|
||||
"intptr_t": "ptr int",
|
||||
|
||||
"Int8": "int8",
|
||||
"Int16": "int16",
|
||||
"Int32": "int32",
|
||||
"Int64": "int64",
|
||||
|
||||
"uint8_t": "uint8",
|
||||
"uint16_t": "uint16",
|
||||
"uint32_t": "uint32",
|
||||
"uint64_t": "uint64",
|
||||
|
||||
"uintptr_t": "ptr uint",
|
||||
|
||||
"Uint8": "uint8",
|
||||
"Uint16": "uint16",
|
||||
"Uint32": "uint32",
|
||||
"Uint64": "uint64",
|
||||
|
||||
# long
|
||||
"long": "clong",
|
||||
"long int": "clong",
|
||||
"signed long": "clong",
|
||||
"signed long int": "clong",
|
||||
"off_t": "clong",
|
||||
"unsigned long": "culong",
|
||||
"unsigned long int": "culong",
|
||||
"uLong": "culong",
|
||||
"u_long": "culong",
|
||||
|
||||
# long long
|
||||
"long long": "clonglong",
|
||||
"long long int": "clonglong",
|
||||
"signed long long": "clonglong",
|
||||
"signed long long int": "clonglong",
|
||||
"off64_t": "clonglong",
|
||||
"unsigned long long": "culonglong",
|
||||
"unsigned long long int": "culonglong",
|
||||
|
||||
# floating point
|
||||
"float": "cfloat",
|
||||
"double": "cdouble",
|
||||
"long double": "clongdouble",
|
||||
|
||||
# Misc Nim types
|
||||
"Bool": "bool",
|
||||
"ptrdiff_t": "ByteAddress"
|
||||
}.toTable()
|
||||
|
||||
# Nim type names that shouldn't need to be wrapped again
|
||||
gTypeMapValues* = toSeq(gTypeMap.values).toHashSet()
|
||||
|
||||
# Types to import from C/Nim if used in wrapper
|
||||
gTypeImport* = {
|
||||
"time_t": """
|
||||
import std/time_t as std_time_t
|
||||
type time_t* = std_time_t.Time
|
||||
""",
|
||||
|
||||
"time64_t": """
|
||||
import std/time_t as std_time64_t
|
||||
type time64_t* = std_time64_t.Time
|
||||
""",
|
||||
|
||||
"wchar_t": """
|
||||
when defined(cpp):
|
||||
# http://www.cplusplus.com/reference/cwchar/wchar_t/
|
||||
# In C++, wchar_t is a distinct fundamental type (and thus it is
|
||||
# not defined in <cwchar> nor any other header).
|
||||
type wchar_t* {.importc.} = object
|
||||
else:
|
||||
type wchar_t* {.importc, header:"stddef.h".} = object
|
||||
""",
|
||||
|
||||
"va_list": """
|
||||
type va_list* {.importc, header:"<stdarg.h>".} = object
|
||||
"""}.toTable()
|
||||
|
||||
proc getType*(gState: State, str, parent: string): string =
|
||||
if str == "void":
|
||||
return "object"
|
||||
|
||||
result = str.strip(chars={'_'}).splitWhitespace().join(" ")
|
||||
|
||||
if gTypeMap.hasKey(result):
|
||||
result = gTypeMap[result]
|
||||
elif parent.nBl and gTypeImport.hasKey(result) and not gState.identifierNodes.hasKey(result):
|
||||
# Include C/Nim type imports once if a field/param and not already declared
|
||||
gState.wrapperHeader &= "\n" & gTypeImport[result]
|
||||
gTypeImport.del result
|
||||
|
||||
# Identifier related
|
||||
|
||||
proc checkIdentifier(name, kind, parent, origName: string) =
|
||||
let
|
||||
parentStr = if parent.nBl: parent & ":" else: ""
|
||||
|
||||
if name.nBl:
|
||||
let
|
||||
origStr = if name != origName: &", originally '{origName}' before 'cPlugin:onSymbol()'," else: ""
|
||||
errmsg = &"Identifier '{parentStr}{name}' ({kind}){origStr} $1 " &
|
||||
"which Nim does not allow. Use toast flag '$2' or 'cPlugin()' to modify."
|
||||
|
||||
doAssert name[0] != '_' and name[^1] != '_', errmsg % ["has leading/trailing underscores '_'", "--prefix or --suffix"]
|
||||
|
||||
doAssert (not name.contains("__")): errmsg % ["has consecutive underscores '_'", "--replace"]
|
||||
|
||||
doAssert not name[0].isDigit(), errmsg % [&"starts with a digit '{name[0]}'", "--prefix"]
|
||||
|
||||
# Cannot blank out symbols which are fields or params
|
||||
#
|
||||
# `IgnoreSkipSymbol` is used to `getIdentifier()` even if symbol is in `symOverride` list
|
||||
# so that any prefix/suffix/replace or `onSymbol()` processing can occur. This is only used
|
||||
# for `cOverride()` since it also depends on `symOverride`.
|
||||
if parent.nBl and parent != "IgnoreSkipSymbol":
|
||||
doAssert name.nBl, &"Blank identifier, originally '{parentStr}{origName}' ({kind}), cannot be empty"
|
||||
|
||||
proc getIdentifier*(gState: State, name: string, kind: NimSymKind, parent=""): string =
|
||||
doAssert name.nBl, "Blank identifier error"
|
||||
|
||||
if name notin gState.symOverride or parent.nBl:
|
||||
if gState.onSymbol != nil:
|
||||
# Use onSymbol from plugin provided by user
|
||||
var
|
||||
sym = Symbol(name: name, parent: parent, kind: kind)
|
||||
gState.onSymbol(sym)
|
||||
|
||||
result = sym.name
|
||||
else:
|
||||
result = name
|
||||
|
||||
# Strip out --prefix from CLI if specified
|
||||
for str in gState.prefix:
|
||||
if result.startsWith(str):
|
||||
result = result[str.len .. ^1]
|
||||
|
||||
# Strip out --suffix from CLI if specified
|
||||
for str in gState.suffix:
|
||||
if result.endsWith(str):
|
||||
result = result[0 .. ^(str.len+1)]
|
||||
|
||||
# --replace from CLI if specified
|
||||
for name, value in gState.replace.pairs:
|
||||
if name.len > 1 and name[0] == '@':
|
||||
result = result.replace(re(name[1 .. ^1]), value)
|
||||
else:
|
||||
result = result.replace(name, value)
|
||||
|
||||
checkIdentifier(result, $kind, parent, name)
|
||||
|
||||
if result in gReserved or (result == "object" and kind != nskType):
|
||||
# Enclose in backticks since Nim reserved word
|
||||
result = &"`{result}`"
|
||||
else:
|
||||
# Skip identifier since in symOverride
|
||||
result = ""
|
||||
|
||||
proc getUniqueIdentifier*(gState: State, prefix = ""): string =
|
||||
var
|
||||
name = prefix & "_" & gState.sourceFile.extractFilename().multiReplace([(".", ""), ("-", "")])
|
||||
nimName = name[0] & name[1 .. ^1].replace("_", "").toLowerAscii
|
||||
count = 1
|
||||
|
||||
while (nimName & $count) in gState.identifiers:
|
||||
count += 1
|
||||
|
||||
return name & $count
|
||||
|
||||
proc addNewIdentifer*(gState: State, name: string, override = false): bool =
|
||||
if override or name notin gState.symOverride:
|
||||
let
|
||||
nimName = name[0] & name[1 .. ^1].replace("_", "").toLowerAscii
|
||||
|
||||
if gState.identifiers.hasKey(nimName):
|
||||
doAssert name == gState.identifiers[nimName],
|
||||
&"Identifier '{name}' is a stylistic duplicate of identifier " &
|
||||
&"'{gState.identifiers[nimName]}', use 'cPlugin:onSymbol()' to rename"
|
||||
result = false
|
||||
else:
|
||||
gState.identifiers[nimName] = name
|
||||
result = true
|
||||
|
||||
# Overrides related
|
||||
|
||||
proc getOverride*(gState: State, name: string, kind: NimSymKind): string =
|
||||
# Get cOverride for identifier `name` of `kind` if defined
|
||||
doAssert name.nBl, "Blank identifier error"
|
||||
|
||||
if gState.onSymbolOverride != nil:
|
||||
var
|
||||
nname = gState.getIdentifier(name, kind, "IgnoreSkipSymbol")
|
||||
sym = Symbol(name: nname, kind: kind)
|
||||
if nname.nBl:
|
||||
gState.onSymbolOverride(sym)
|
||||
|
||||
if sym.override.nBl and gState.addNewIdentifer(nname, override = true):
|
||||
result = sym.override
|
||||
|
||||
if kind != nskProc:
|
||||
result = " " & result.replace("\n", "\n ")
|
||||
|
||||
proc getOverrideFinal*(gState: State, kind: NimSymKind): string =
|
||||
# Get all unused cOverride symbols of `kind`
|
||||
let
|
||||
typ = $kind
|
||||
|
||||
if gState.onSymbolOverrideFinal != nil:
|
||||
for i in gState.onSymbolOverrideFinal(typ):
|
||||
result &= "\n" & gState.getOverride(i, kind)
|
||||
|
||||
proc getKeyword*(kind: NimSymKind): string =
|
||||
# Convert `kind` into a Nim keyword
|
||||
# cOverride procs already include `proc` keyword
|
||||
result = ($kind).replace("nsk", "").toLowerAscii()
|
||||
|
||||
proc getCurrentHeader*(fullpath: string): string =
|
||||
("header" & fullpath.splitFile().name.multiReplace([(".", ""), ("-", "")]))
|
||||
|
||||
proc isIncluded(gState: State, file: string): bool {.inline.} =
|
||||
# Check if the specified file should be excluded from wrapped output
|
||||
if gState.exclude.nBl:
|
||||
for excl in gState.exclude:
|
||||
if file.startsWith(excl):
|
||||
return
|
||||
result = true
|
||||
|
||||
proc getPreprocessor*(gState: State, fullpath: string) =
|
||||
# Get preprocessed output from the C/C++ compiler
|
||||
var
|
||||
args: seq[string]
|
||||
start = false
|
||||
sfile = fullpath.sanitizePath(noQuote = true)
|
||||
|
||||
sfileName = sfile.extractFilename()
|
||||
pDir = sfile.expandFilename().parentDir()
|
||||
includeDirs: seq[string]
|
||||
|
||||
args.add @["-E", "-dD", getGccModeArg(gState.mode), "-w"]
|
||||
if not gState.noComments:
|
||||
args.add "-CC"
|
||||
|
||||
for inc in gState.includeDirs:
|
||||
args.add &"-I{inc.sanitizePath}"
|
||||
includeDirs.add inc.absolutePath().sanitizePath(noQuote = true)
|
||||
|
||||
for def in gState.defines:
|
||||
args.add &"-D{def}"
|
||||
|
||||
# Remove gcc special calls
|
||||
# https://github.com/tree-sitter/tree-sitter-c/issues/43
|
||||
args.add @["-D__attribute__(x)=", "-D__restrict=", "-D__restrict__=", "-D__extension__=", "-D__inline__=inline",
|
||||
"-D__inline=inline", "-D_Noreturn=", &"{fullpath.sanitizePath}"]
|
||||
|
||||
# Include content only from file
|
||||
var
|
||||
p = startProcess(getCompiler(), args = args, options = {poStdErrToStdOut, poUsePath})
|
||||
outp = p.outputStream()
|
||||
line = ""
|
||||
newHeaders: HashSet[string]
|
||||
|
||||
# Include content only from file
|
||||
gState.code = ""
|
||||
while true:
|
||||
if outp.readLine(line):
|
||||
# We want to keep blank lines here for comment processing
|
||||
if line.len > 10 and line[0] == '#' and line[1] == ' ' and line.contains('"'):
|
||||
# # 1 "path/to/file.h" 1
|
||||
start = false
|
||||
line = line.split('"')[1].sanitizePath(noQuote = true)
|
||||
if sfile == line or
|
||||
(DirSep notin line and sfileName == line):
|
||||
start = true
|
||||
elif gState.recurse:
|
||||
if (pDir.Bl or pDir in line) and line notin gState.headersProcessed:
|
||||
newHeaders.incl line
|
||||
start = gState.isIncluded(line)
|
||||
else:
|
||||
for inc in includeDirs:
|
||||
if line.startsWith(inc) and line notin gState.headersProcessed:
|
||||
newHeaders.incl line
|
||||
start = gState.isIncluded(line)
|
||||
if start:
|
||||
break
|
||||
elif ": fatal error:" in line:
|
||||
doAssert false,
|
||||
"\n\nFailed in preprocessing, check if `cIncludeDir()` is needed or compiler `mode` is correct (c/cpp)" &
|
||||
"\n\nERROR:$1\n" % line.split(": fatal error:")[1]
|
||||
else:
|
||||
if start:
|
||||
if "#undef" in line:
|
||||
continue
|
||||
gState.code.add line & "\n"
|
||||
elif not p.running(): break
|
||||
p.close()
|
||||
assert p.peekExitCode() == 0,
|
||||
gState.code & "\n\nFailed in preprocessing:\n " &
|
||||
getCompiler() & " " & args.join(" ")
|
||||
gState.headersProcessed.incl newHeaders
|
||||
|
||||
# Plugin related
|
||||
|
||||
proc dll*(path: string): string =
|
||||
let
|
||||
(dir, name, _) = path.splitFile()
|
||||
|
||||
result = dir / (DynlibFormat % name)
|
||||
|
||||
proc loadPlugin*(gState: State, sourcePath: string) =
|
||||
doAssert fileExists(sourcePath), "Plugin file does not exist: " & sourcePath
|
||||
|
||||
let
|
||||
pdll = sourcePath.dll
|
||||
if not fileExists(pdll) or
|
||||
sourcePath.getLastModificationTime() > pdll.getLastModificationTime():
|
||||
let
|
||||
# Get Nim configuration flags if not already specified in a .cfg file
|
||||
flags =
|
||||
if fileExists(sourcePath & ".cfg"): ""
|
||||
else: getNimConfigFlags(getCurrentDir())
|
||||
|
||||
# Always set output to same directory as source, prevents override
|
||||
outflags = &"--out:\"{pdll}\""
|
||||
|
||||
# Compile plugin as library with `markAndSweep` GC
|
||||
cmd = &"{gState.nim} c --app:lib --gc:markAndSweep {flags} {outflags} {sourcePath.sanitizePath}"
|
||||
|
||||
(output, ret) = execAction(cmd, die = false)
|
||||
doAssert ret == 0, output & "\nFailed to compile cPlugin()\n\ncmd: " & cmd
|
||||
doAssert fileExists(pdll), "No plugin binary generated for " & sourcePath
|
||||
|
||||
let lib = loadLib(pdll)
|
||||
doAssert lib != nil, "Plugin $1 compiled to $2 failed to load" % [sourcePath, pdll]
|
||||
|
||||
gState.onSymbol = cast[OnSymbol](lib.symAddr("onSymbol"))
|
||||
|
||||
gState.onSymbolOverride = cast[OnSymbol](lib.symAddr("onSymbolOverride"))
|
||||
|
||||
gState.onSymbolOverrideFinal = cast[OnSymbolOverrideFinal](lib.symAddr("onSymbolOverrideFinal"))
|
||||
|
||||
# Misc toast helpers
|
||||
|
||||
proc getSplitComma*(joined: seq[string]): seq[string] =
|
||||
for i in joined:
|
||||
result = result.concat(i.split(","))
|
||||
|
||||
proc expandSymlinkAbs*(path: string): string =
|
||||
try:
|
||||
result = path.expandFilename().normalizedPath()
|
||||
except:
|
||||
result = path
|
||||
result = result.sanitizePath(noQuote = true)
|
||||
|
|
@ -1,410 +0,0 @@
|
|||
import sets, strformat, strutils
|
||||
|
||||
import ".."/treesitter/[api, c, cpp]
|
||||
import ".."/globals
|
||||
|
||||
template withCodeAst*(code: string, mode: string, body: untyped): untyped =
|
||||
## A simple template to inject the TSNode into a body of code
|
||||
mixin treeSitterC
|
||||
mixin treeSitterCpp
|
||||
|
||||
var parser = tsParserNew()
|
||||
defer:
|
||||
parser.tsParserDelete()
|
||||
|
||||
doAssert code.nBl, "Empty code or preprocessor error"
|
||||
|
||||
if mode == "c":
|
||||
doAssert parser.tsParserSetLanguage(treeSitterC()), "Failed to load C parser"
|
||||
elif mode == "cpp":
|
||||
doAssert parser.tsParserSetLanguage(treeSitterCpp()), "Failed to load C++ parser"
|
||||
else:
|
||||
doAssert false, "Invalid parser " & mode
|
||||
|
||||
var
|
||||
tree = parser.tsParserParseString(nil, code.cstring, code.len.uint32)
|
||||
root {.inject.} = tree.tsTreeRootNode()
|
||||
|
||||
body
|
||||
|
||||
defer:
|
||||
tree.tsTreeDelete()
|
||||
|
||||
proc isNil*(node: TSNode): bool =
|
||||
node.tsNodeIsNull()
|
||||
|
||||
proc len*(node: TSNode): int =
|
||||
if not node.isNil:
|
||||
result = node.tsNodeNamedChildCount().int
|
||||
|
||||
proc `[]`*(node: TSNode, i: SomeInteger): TSNode =
|
||||
if i < type(i)(node.len()):
|
||||
result = node.tsNodeNamedChild(i.uint32)
|
||||
|
||||
proc getName*(node: TSNode): string {.inline.} =
|
||||
if not node.isNil:
|
||||
return $node.tsNodeType()
|
||||
|
||||
proc getNodeVal*(code: var string, node: TSNode): string =
|
||||
if not node.isNil:
|
||||
return code[node.tsNodeStartByte() .. node.tsNodeEndByte()-1]
|
||||
|
||||
proc getNodeVal*(gState: State, node: TSNode): string =
|
||||
gState.code.getNodeVal(node)
|
||||
|
||||
proc getAtom*(node: TSNode): TSNode =
|
||||
if not node.isNil:
|
||||
# Get child node which is topmost atom
|
||||
if node.getName() in gAtoms:
|
||||
return node
|
||||
elif node.len != 0:
|
||||
if node[0].getName() in ["type_qualifier", "comment"]:
|
||||
# Skip const, volatile
|
||||
if node.len > 1:
|
||||
return node[1].getAtom()
|
||||
else:
|
||||
return
|
||||
else:
|
||||
return node[0].getAtom()
|
||||
|
||||
proc getStartAtom*(node: TSNode): int =
|
||||
if not node.isNil:
|
||||
# Skip const, volatile and other type qualifiers
|
||||
for i in 0 .. node.len - 1:
|
||||
if node[i].getAtom().getName() notin gAtoms:
|
||||
result += 1
|
||||
else:
|
||||
break
|
||||
|
||||
proc getConstQualifier*(gState: State, node: TSNode): bool =
|
||||
# Check if node siblings have type_qualifier = `const`
|
||||
var
|
||||
curr = node.tsNodePrevNamedSibling()
|
||||
while not curr.isNil:
|
||||
# Check previous siblings
|
||||
if curr.getName() == "type_qualifier" and
|
||||
gState.getNodeVal(curr) == "const":
|
||||
return true
|
||||
curr = curr.tsNodePrevNamedSibling()
|
||||
|
||||
# Check immediate next sibling
|
||||
curr = node.tsNodePrevNamedSibling()
|
||||
if curr.getName() == "type_qualifier" and
|
||||
gState.getNodeVal(curr) == "const":
|
||||
return true
|
||||
|
||||
proc getXCount*(node: TSNode, ntype: string, reverse = false): int =
|
||||
if not node.isNil:
|
||||
# Get number of ntype nodes nested in tree
|
||||
var
|
||||
cnode = node
|
||||
while ntype in cnode.getName():
|
||||
result += 1
|
||||
if reverse:
|
||||
cnode = cnode.tsNodeParent()
|
||||
else:
|
||||
if cnode.len != 0:
|
||||
if cnode[0].getName() == "type_qualifier":
|
||||
# Skip const, volatile
|
||||
if cnode.len > 1:
|
||||
cnode = cnode[1]
|
||||
else:
|
||||
break
|
||||
else:
|
||||
cnode = cnode[0]
|
||||
else:
|
||||
break
|
||||
|
||||
proc getPtrCount*(node: TSNode, reverse = false): int =
|
||||
node.getXCount("pointer_declarator", reverse)
|
||||
|
||||
proc getArrayCount*(node: TSNode, reverse = false): int =
|
||||
node.getXCount("array_declarator")
|
||||
|
||||
proc getDeclarator*(node: TSNode): TSNode =
|
||||
if not node.isNil:
|
||||
# Return if child is a function or array declarator
|
||||
if node.getName() in ["function_declarator", "array_declarator"]:
|
||||
return node
|
||||
elif node.len != 0:
|
||||
return node[0].getDeclarator()
|
||||
|
||||
proc getVarargs*(node: TSNode): bool =
|
||||
# Detect ... and add {.varargs.}
|
||||
#
|
||||
# `node` is the param list
|
||||
#
|
||||
# ... is an unnamed node, second last node and ) is last node
|
||||
let
|
||||
nlen = node.tsNodeChildCount()
|
||||
if nlen > 1.uint32:
|
||||
let
|
||||
nval = node.tsNodeChild(nlen - 2.uint32).getName()
|
||||
if nval == "...":
|
||||
result = true
|
||||
|
||||
proc firstChildInTree*(node: TSNode, ntype: string): TSNode =
|
||||
# Search for node type in tree - first children
|
||||
var
|
||||
cnode = node
|
||||
while not cnode.isNil:
|
||||
if cnode.getName() == ntype:
|
||||
return cnode
|
||||
if cnode.len != 0:
|
||||
for i in 0 ..< cnode.len:
|
||||
if cnode[i].getName() != "comment":
|
||||
cnode = cnode[i]
|
||||
break
|
||||
else:
|
||||
cnode = cnode[0]
|
||||
|
||||
proc anyChildInTree*(node: TSNode, ntype: string): TSNode =
|
||||
# Search for node type anywhere in tree - depth first
|
||||
var
|
||||
cnode = node
|
||||
while not cnode.isNil:
|
||||
if cnode.getName() == ntype:
|
||||
return cnode
|
||||
for i in 0 ..< cnode.len:
|
||||
let
|
||||
ccnode = cnode[i].anyChildInTree(ntype)
|
||||
if not ccnode.isNil:
|
||||
return ccnode
|
||||
if cnode != node:
|
||||
cnode = cnode.tsNodeNextNamedSibling()
|
||||
else:
|
||||
break
|
||||
|
||||
proc mostNestedChildInTree*(node: TSNode): TSNode =
|
||||
# Search for the most nested child of node's type in tree
|
||||
var
|
||||
cnode = node
|
||||
ntype = cnode.getName()
|
||||
while not cnode.isNil and cnode.len != 0 and cnode[0].getName() == ntype:
|
||||
cnode = cnode[0]
|
||||
result = cnode
|
||||
|
||||
proc inChildren*(node: TSNode, ntype: string): bool =
|
||||
# Search for node type in immediate children
|
||||
result = false
|
||||
for i in 0 ..< node.len:
|
||||
if (node[i]).getName() == ntype:
|
||||
result = true
|
||||
break
|
||||
|
||||
proc getLineCol*(code: var string, node: TSNode): tuple[line, col: int] =
|
||||
# Get line number and column info for node
|
||||
let
|
||||
point = node.tsNodeStartPoint()
|
||||
result.line = point.row.int + 1
|
||||
result.col = point.column.int + 1
|
||||
|
||||
proc getLineCol*(gState: State, node: TSNode): tuple[line, col: int] =
|
||||
getLineCol(gState.code, node)
|
||||
|
||||
proc getEndLineCol*(code: var string, node: TSNode): tuple[line, col: int] =
|
||||
# Get line number and column info for node
|
||||
let
|
||||
point = node.tsNodeEndPoint()
|
||||
result.line = point.row.int + 1
|
||||
result.col = point.column.int + 1
|
||||
|
||||
proc getEndLineCol*(gState: State, node: TSNode): tuple[line, col: int] =
|
||||
getEndLineCol(gState.code, node)
|
||||
|
||||
proc getTSNodeNamedChildCountSansComments*(node: TSNode): int =
|
||||
for i in 0 ..< node.len:
|
||||
if node.getName() != "comment":
|
||||
result += 1
|
||||
|
||||
proc getPxName*(node: TSNode, offset: int): string =
|
||||
# Get the xth (grand)parent of the node
|
||||
var
|
||||
np = node
|
||||
count = 0
|
||||
|
||||
while not np.isNil and count < offset:
|
||||
np = np.tsNodeParent()
|
||||
count += 1
|
||||
|
||||
if count == offset and not np.isNil:
|
||||
return np.getName()
|
||||
|
||||
proc printLisp*(code: var string, root: TSNode): string =
|
||||
var
|
||||
node = root
|
||||
nextnode: TSNode
|
||||
depth = 0
|
||||
|
||||
while true:
|
||||
if not node.isNil and depth > -1:
|
||||
result &= spaces(depth)
|
||||
let
|
||||
(line, col) = code.getLineCol(node)
|
||||
result &= &"({$node.tsNodeType()} {line} {col} {node.tsNodeEndByte() - node.tsNodeStartByte()}"
|
||||
let
|
||||
val = code.getNodeVal(node)
|
||||
if "\n" notin val and " " notin val:
|
||||
result &= &" \"{val}\""
|
||||
else:
|
||||
break
|
||||
|
||||
if node.len() != 0:
|
||||
result &= "\n"
|
||||
nextnode = node[0]
|
||||
depth += 1
|
||||
else:
|
||||
result &= ")\n"
|
||||
nextnode = node.tsNodeNextNamedSibling()
|
||||
|
||||
if nextnode.isNil:
|
||||
while true:
|
||||
node = node.tsNodeParent()
|
||||
depth -= 1
|
||||
if depth == -1:
|
||||
break
|
||||
result &= spaces(depth) & ")\n"
|
||||
if node == root:
|
||||
break
|
||||
if not node.tsNodeNextNamedSibling().isNil:
|
||||
node = node.tsNodeNextNamedSibling()
|
||||
break
|
||||
else:
|
||||
node = nextnode
|
||||
|
||||
if node == root:
|
||||
break
|
||||
|
||||
proc printLisp*(gState: State, root: TSNode): string =
|
||||
printLisp(gState.code, root)
|
||||
|
||||
proc printDebug*(gState: State, node: TSNode) =
|
||||
if gState.debug:
|
||||
gecho ("Input => " & gState.getNodeVal(node)).getCommented()
|
||||
gecho gState.printLisp(node).getCommented()
|
||||
|
||||
proc getCommentsStr*(gState: State, commentNodes: seq[TSNode]): string =
|
||||
## Generate a comment from a set of comment nodes. Comment is guaranteed
|
||||
## to be able to be rendered using nim doc
|
||||
if commentNodes.len > 0:
|
||||
for commentNode in commentNodes:
|
||||
result &= "\n " & gState.getNodeVal(commentNode).strip()
|
||||
|
||||
result = "```\n " & result.multiReplace(
|
||||
{
|
||||
"/**": "", "**/": "", "/*": "",
|
||||
"*/": "", "/*": "", "//": "",
|
||||
"\n": "\n ", "`": ""
|
||||
}
|
||||
# need to replace this last otherwise it supercedes other replacements
|
||||
).replace(" *", "").strip() & "\n```"
|
||||
|
||||
proc getCommentNodes*(gState: State, node: TSNode, maxSearch=1): seq[TSNode] =
|
||||
## Get a set of comment nodes in order of priority. Will search up to ``maxSearch``
|
||||
## nodes before and after the current node
|
||||
##
|
||||
## Priority is (closest line number) > comment before > comment after.
|
||||
## This priority might need to be changed based on the project, but
|
||||
## for now it is good enough
|
||||
|
||||
# Skip this if we don't want comments
|
||||
if gState.noComments:
|
||||
return
|
||||
|
||||
let (line, _) = gState.getLineCol(node)
|
||||
|
||||
# Keep track of both directions from a node
|
||||
var
|
||||
prevSibling = node.tsNodePrevNamedSibling()
|
||||
nextSibling = node.tsNodeNextNamedSibling()
|
||||
nilNode: TSNode
|
||||
|
||||
var
|
||||
i = 0
|
||||
prevSiblingDistance, nextSiblingDistance: int = int.high
|
||||
lowestDistance: int
|
||||
commentsFound = false
|
||||
|
||||
while not commentsFound and i < maxSearch:
|
||||
# Distance from the current node will tell us approximately if the
|
||||
# comment belongs to the node. The closer it is in terms of line
|
||||
# numbers, the more we can be sure it's the comment we want
|
||||
if not prevSibling.isNil:
|
||||
if prevSibling.getName() == "comment":
|
||||
prevSiblingDistance = abs(gState.getEndLineCol(prevSibling)[0] - line)
|
||||
else:
|
||||
prevSiblingDistance = int.high
|
||||
if not nextSibling.isNil:
|
||||
if nextSibling.getName() == "comment":
|
||||
nextSiblingDistance = abs(gState.getLineCol(nextSibling)[0] - line)
|
||||
else:
|
||||
nextSiblingDistance = int.high
|
||||
|
||||
lowestDistance = min(prevSiblingDistance, nextSiblingDistance)
|
||||
|
||||
if prevSiblingDistance > maxSearch:
|
||||
# If the line is out of range, skip searching
|
||||
prevSibling = nilNode # Can't do `= nil`
|
||||
|
||||
if nextSiblingDistance > maxSearch:
|
||||
# If the line is out of range, skip searching
|
||||
nextSibling = nilNode
|
||||
|
||||
# Search above the current line for comments. When one is found
|
||||
# keep going to retrieve successive comments for cases with multiple
|
||||
# `//` style comments
|
||||
while (
|
||||
not prevSibling.isNil and
|
||||
prevSibling.getName() == "comment" and
|
||||
prevSiblingDistance == lowestDistance
|
||||
):
|
||||
# Put the previous nodes in reverse order so the comments
|
||||
# make logical sense
|
||||
result.insert(prevSibling, 0)
|
||||
prevSibling = prevSibling.tsNodePrevNamedSibling()
|
||||
commentsFound = true
|
||||
|
||||
# If we've already found comments above the current line, quit
|
||||
if commentsFound:
|
||||
break
|
||||
|
||||
# Search below or at the current line for comments. When one is found
|
||||
# keep going to retrieve successive comments for cases with multiple
|
||||
# `//` style comments
|
||||
while (
|
||||
not nextSibling.isNil and
|
||||
nextSibling.getName() == "comment" and
|
||||
nextSiblingDistance == lowestDistance
|
||||
):
|
||||
result.add(nextSibling)
|
||||
nextSibling = nextSibling.tsNodeNextNamedSibling()
|
||||
commentsFound = true
|
||||
|
||||
if commentsFound:
|
||||
break
|
||||
|
||||
# Go to next sibling pair
|
||||
if not prevSibling.isNil:
|
||||
prevSibling = prevSibling.tsNodePrevNamedSibling()
|
||||
if not nextSibling.isNil:
|
||||
nextSibling = nextSibling.tsNodeNextNamedSibling()
|
||||
|
||||
i += 1
|
||||
|
||||
proc getTSNodeNamedChildNames*(node: TSNode): seq[string] =
|
||||
if node.tsNodeNamedChildCount() != 0:
|
||||
for i in 0 .. node.tsNodeNamedChildCount()-1:
|
||||
let
|
||||
name = $node.tsNodeNamedChild(i).tsNodeType()
|
||||
|
||||
if name != "comment":
|
||||
result.add(name)
|
||||
|
||||
proc getNodeError*(gState: State, node: TSNode): bool =
|
||||
let
|
||||
err = node.anyChildInTree("ERROR")
|
||||
if not err.isNil:
|
||||
# Bail on errors
|
||||
gState.printDebug(node)
|
||||
gecho &"# tree-sitter parse error: '{gState.getNodeVal(node).splitLines()[0]}', skipped"
|
||||
result = true
|
||||
|
|
@ -2,81 +2,66 @@
|
|||
|
||||
import strutils, os
|
||||
|
||||
include ".."/enumtype
|
||||
import ".."/[paths, setup]
|
||||
import ".."/[setup, paths, types]
|
||||
|
||||
static:
|
||||
treesitterSetup()
|
||||
|
||||
const sourcePath = cacheDir / "treesitter" / "lib"
|
||||
|
||||
when defined(Linux) and defined(gcc):
|
||||
when defined(Linux):
|
||||
{.passC: "-std=c11".}
|
||||
|
||||
{.passC: "-DUTF8PROC_STATIC".}
|
||||
{.passC: "-I$1" % (sourcePath / "include").}
|
||||
{.passC: "-I$1" % (sourcePath / "src").}
|
||||
{.passC: "-I$1" % (sourcePath / ".." / ".." / "utf8proc").}
|
||||
|
||||
{.compile: sourcePath / "src" / "lib.c".}
|
||||
|
||||
### Generated below
|
||||
|
||||
{.push hint[ConvFromXtoItselfNotNeeded]: off.}
|
||||
{.pragma: impapiHdr, header: sourcePath / "include" / "tree_sitter" / "api.h".}
|
||||
{.hint[ConvFromXtoItselfNotNeeded]: off.}
|
||||
|
||||
defineEnum(TSInputEncoding)
|
||||
defineEnum(TSSymbolType)
|
||||
defineEnum(TSLogType)
|
||||
defineEnum(TSQueryPredicateStepType)
|
||||
defineEnum(TSQueryError)
|
||||
|
||||
const
|
||||
TREE_SITTER_LANGUAGE_VERSION* = 11
|
||||
TREE_SITTER_MIN_COMPATIBLE_LANGUAGE_VERSION* = 9
|
||||
TSInputEncodingUTF8* = (0).TSInputEncoding
|
||||
TSInputEncodingUTF16* = (TSInputEncodingUTF8 + 1).TSInputEncoding
|
||||
TSSymbolTypeRegular* = (0).TSSymbolType
|
||||
TSSymbolTypeAnonymous* = (TSSymbolTypeRegular + 1).TSSymbolType
|
||||
TSSymbolTypeAuxiliary* = (TSSymbolTypeAnonymous + 1).TSSymbolType
|
||||
TSLogTypeParse* = (0).TSLogType
|
||||
TSLogTypeLex* = (TSLogTypeParse + 1).TSLogType
|
||||
TSQueryPredicateStepTypeDone* = (0).TSQueryPredicateStepType
|
||||
TSQueryPredicateStepTypeCapture* = (TSQueryPredicateStepTypeDone + 1).TSQueryPredicateStepType
|
||||
TSQueryPredicateStepTypeString* = (TSQueryPredicateStepTypeCapture + 1).TSQueryPredicateStepType
|
||||
TSQueryErrorNone* = (0).TSQueryError
|
||||
TSQueryErrorSyntax* = (TSQueryErrorNone + 1).TSQueryError
|
||||
TSQueryErrorNodeType* = (TSQueryErrorSyntax + 1).TSQueryError
|
||||
TSQueryErrorField* = (TSQueryErrorNodeType + 1).TSQueryError
|
||||
TSQueryErrorCapture* = (TSQueryErrorField + 1).TSQueryError
|
||||
|
||||
headerapi {.used.} = sourcePath / "include" / "tree_sitter" / "api.h"
|
||||
TREE_SITTER_LANGUAGE_VERSION* = 9
|
||||
TSInputEncodingUTF8* = 0.TSInputEncoding
|
||||
TSInputEncodingUTF16* = 1.TSInputEncoding
|
||||
TSSymbolTypeRegular* = 0.TSSymbolType
|
||||
TSSymbolTypeAnonymous* = 1.TSSymbolType
|
||||
TSSymbolTypeAuxiliary* = 2.TSSymbolType
|
||||
TSLogTypeParse* = 0.TSLogType
|
||||
TSLogTypeLex* = 1.TSLogType
|
||||
type
|
||||
TSSymbol* {.importc, impapiHdr.} = uint16
|
||||
TSFieldId* {.importc, impapiHdr.} = uint16
|
||||
TSLanguage* {.importc, impapiHdr, incompleteStruct.} = object
|
||||
TSParser* {.importc, impapiHdr, incompleteStruct.} = object
|
||||
TSTree* {.importc, impapiHdr, incompleteStruct.} = object
|
||||
TSQuery* {.importc, impapiHdr, incompleteStruct.} = object
|
||||
TSQueryCursor* {.importc, impapiHdr, incompleteStruct.} = object
|
||||
TSPoint* {.bycopy, importc, impapiHdr.} = object
|
||||
TSSymbol* = uint16
|
||||
TSLanguage* = object
|
||||
TSParser* = object
|
||||
TSTree* = object
|
||||
TSPoint* {.importc, header: headerapi, bycopy.} = object
|
||||
row*: uint32
|
||||
column*: uint32
|
||||
|
||||
TSRange* {.bycopy, importc, impapiHdr.} = object
|
||||
TSRange* {.importc, header: headerapi, bycopy.} = object
|
||||
start_point*: TSPoint
|
||||
end_point*: TSPoint
|
||||
start_byte*: uint32
|
||||
end_byte*: uint32
|
||||
|
||||
TSInput* {.bycopy, importc, impapiHdr.} = object
|
||||
TSInput* {.importc, header: headerapi, bycopy.} = object
|
||||
payload*: pointer
|
||||
read*: proc (payload: pointer; byte_index: uint32; position: TSPoint;
|
||||
bytes_read: ptr uint32): cstring {.cdecl.}
|
||||
bytes_read: ptr uint32): cstring {.nimcall.}
|
||||
encoding*: TSInputEncoding
|
||||
|
||||
TSLogger* {.bycopy, importc, impapiHdr.} = object
|
||||
TSLogger* {.importc, header: headerapi, bycopy.} = object
|
||||
payload*: pointer
|
||||
log*: proc (payload: pointer; a2: TSLogType; a3: cstring) {.cdecl.}
|
||||
log*: proc (payload: pointer; a1: TSLogType; a2: cstring) {.nimcall.}
|
||||
|
||||
TSInputEdit* {.bycopy, importc, impapiHdr.} = object
|
||||
TSInputEdit* {.importc, header: headerapi, bycopy.} = object
|
||||
start_byte*: uint32
|
||||
old_end_byte*: uint32
|
||||
new_end_byte*: uint32
|
||||
|
|
@ -84,176 +69,106 @@ type
|
|||
old_end_point*: TSPoint
|
||||
new_end_point*: TSPoint
|
||||
|
||||
TSNode* {.bycopy, importc, impapiHdr.} = object
|
||||
TSNode* {.importc, header: headerapi, bycopy.} = object
|
||||
context*: array[4, uint32]
|
||||
id*: pointer
|
||||
tree*: ptr TSTree
|
||||
|
||||
TSTreeCursor* {.bycopy, importc, impapiHdr.} = object
|
||||
TSTreeCursor* {.importc, header: headerapi, bycopy.} = object
|
||||
tree*: pointer
|
||||
id*: pointer
|
||||
context*: array[2, uint32]
|
||||
|
||||
TSQueryCapture* {.bycopy, importc, impapiHdr.} = object
|
||||
node*: TSNode
|
||||
index*: uint32
|
||||
|
||||
TSQueryMatch* {.bycopy, importc, impapiHdr.} = object
|
||||
id*: uint32
|
||||
pattern_index*: uint16
|
||||
capture_count*: uint16
|
||||
captures*: ptr TSQueryCapture
|
||||
|
||||
TSQueryPredicateStep* {.bycopy, importc, impapiHdr.} = object
|
||||
`type`*: TSQueryPredicateStepType
|
||||
value_id*: uint32
|
||||
|
||||
proc ts_parser_new*(): ptr TSParser {.importc, cdecl, impapiHdr.}
|
||||
proc ts_parser_delete*(parser: ptr TSParser) {.importc, cdecl, impapiHdr.}
|
||||
proc ts_parser_set_language*(self: ptr TSParser; language: ptr TSLanguage): bool {.
|
||||
importc, cdecl, impapiHdr.}
|
||||
proc ts_parser_language*(self: ptr TSParser): ptr TSLanguage {.importc, cdecl, impapiHdr.}
|
||||
proc ts_parser_set_included_ranges*(self: ptr TSParser; ranges: ptr TSRange;
|
||||
length: uint32) {.importc, cdecl, impapiHdr.}
|
||||
proc ts_parser_included_ranges*(self: ptr TSParser; length: ptr uint32): ptr TSRange {.
|
||||
importc, cdecl, impapiHdr.}
|
||||
proc ts_parser_parse*(self: ptr TSParser; old_tree: ptr TSTree; input: TSInput): ptr TSTree {.
|
||||
importc, cdecl, impapiHdr.}
|
||||
proc ts_parser_parse_string*(self: ptr TSParser; old_tree: ptr TSTree; string: cstring;
|
||||
length: uint32): ptr TSTree {.importc, cdecl, impapiHdr.}
|
||||
proc ts_parser_parse_string_encoding*(self: ptr TSParser; old_tree: ptr TSTree;
|
||||
string: cstring; length: uint32;
|
||||
encoding: TSInputEncoding): ptr TSTree {.
|
||||
importc, cdecl, impapiHdr.}
|
||||
proc ts_parser_reset*(self: ptr TSParser) {.importc, cdecl, impapiHdr.}
|
||||
proc ts_parser_set_timeout_micros*(self: ptr TSParser; timeout: uint64) {.importc,
|
||||
cdecl, impapiHdr.}
|
||||
proc ts_parser_timeout_micros*(self: ptr TSParser): uint64 {.importc, cdecl, impapiHdr.}
|
||||
proc ts_parser_set_cancellation_flag*(self: ptr TSParser; flag: ptr uint) {.importc,
|
||||
cdecl, impapiHdr.}
|
||||
proc ts_parser_cancellation_flag*(self: ptr TSParser): ptr uint {.importc, cdecl,
|
||||
impapiHdr.}
|
||||
proc ts_parser_set_logger*(self: ptr TSParser; logger: TSLogger) {.importc, cdecl,
|
||||
impapiHdr.}
|
||||
proc ts_parser_logger*(self: ptr TSParser): TSLogger {.importc, cdecl, impapiHdr.}
|
||||
proc ts_parser_print_dot_graphs*(self: ptr TSParser; file: cint) {.importc, cdecl,
|
||||
impapiHdr.}
|
||||
proc ts_parser_halt_on_error*(self: ptr TSParser; halt: bool) {.importc, cdecl,
|
||||
impapiHdr.}
|
||||
proc ts_tree_copy*(self: ptr TSTree): ptr TSTree {.importc, cdecl, impapiHdr.}
|
||||
proc ts_tree_delete*(self: ptr TSTree) {.importc, cdecl, impapiHdr.}
|
||||
proc ts_tree_root_node*(self: ptr TSTree): TSNode {.importc, cdecl, impapiHdr.}
|
||||
proc ts_tree_language*(a1: ptr TSTree): ptr TSLanguage {.importc, cdecl, impapiHdr.}
|
||||
proc ts_tree_edit*(self: ptr TSTree; edit: ptr TSInputEdit) {.importc, cdecl, impapiHdr.}
|
||||
proc ts_tree_get_changed_ranges*(old_tree: ptr TSTree; new_tree: ptr TSTree;
|
||||
length: ptr uint32): ptr TSRange {.importc, cdecl,
|
||||
impapiHdr.}
|
||||
proc ts_tree_print_dot_graph*(a1: ptr TSTree; a2: File) {.importc, cdecl, impapiHdr.}
|
||||
proc ts_node_type*(a1: TSNode): cstring {.importc, cdecl, impapiHdr.}
|
||||
proc ts_node_symbol*(a1: TSNode): TSSymbol {.importc, cdecl, impapiHdr.}
|
||||
proc ts_node_start_byte*(a1: TSNode): uint32 {.importc, cdecl, impapiHdr.}
|
||||
proc ts_node_start_point*(a1: TSNode): TSPoint {.importc, cdecl, impapiHdr.}
|
||||
proc ts_node_end_byte*(a1: TSNode): uint32 {.importc, cdecl, impapiHdr.}
|
||||
proc ts_node_end_point*(a1: TSNode): TSPoint {.importc, cdecl, impapiHdr.}
|
||||
proc ts_node_string*(a1: TSNode): cstring {.importc, cdecl, impapiHdr.}
|
||||
proc ts_node_is_null*(a1: TSNode): bool {.importc, cdecl, impapiHdr.}
|
||||
proc ts_node_is_named*(a1: TSNode): bool {.importc, cdecl, impapiHdr.}
|
||||
proc ts_node_is_missing*(a1: TSNode): bool {.importc, cdecl, impapiHdr.}
|
||||
proc ts_node_is_extra*(a1: TSNode): bool {.importc, cdecl, impapiHdr.}
|
||||
proc ts_node_has_changes*(a1: TSNode): bool {.importc, cdecl, impapiHdr.}
|
||||
proc ts_node_has_error*(a1: TSNode): bool {.importc, cdecl, impapiHdr.}
|
||||
proc ts_node_parent*(a1: TSNode): TSNode {.importc, cdecl, impapiHdr.}
|
||||
proc ts_node_child*(a1: TSNode; a2: uint32): TSNode {.importc, cdecl, impapiHdr.}
|
||||
proc ts_node_child_count*(a1: TSNode): uint32 {.importc, cdecl, impapiHdr.}
|
||||
proc ts_node_named_child*(a1: TSNode; a2: uint32): TSNode {.importc, cdecl, impapiHdr.}
|
||||
proc ts_node_named_child_count*(a1: TSNode): uint32 {.importc, cdecl, impapiHdr.}
|
||||
proc ts_node_child_by_field_name*(self: TSNode; field_name: cstring;
|
||||
field_name_length: uint32): TSNode {.importc,
|
||||
cdecl, impapiHdr.}
|
||||
proc ts_node_child_by_field_id*(a1: TSNode; a2: TSFieldId): TSNode {.importc, cdecl,
|
||||
impapiHdr.}
|
||||
proc ts_node_next_sibling*(a1: TSNode): TSNode {.importc, cdecl, impapiHdr.}
|
||||
proc ts_node_prev_sibling*(a1: TSNode): TSNode {.importc, cdecl, impapiHdr.}
|
||||
proc ts_node_next_named_sibling*(a1: TSNode): TSNode {.importc, cdecl, impapiHdr.}
|
||||
proc ts_node_prev_named_sibling*(a1: TSNode): TSNode {.importc, cdecl, impapiHdr.}
|
||||
proc ts_node_first_child_for_byte*(a1: TSNode; a2: uint32): TSNode {.importc, cdecl,
|
||||
impapiHdr.}
|
||||
proc ts_parser_new*(): ptr TSParser {.importc, header: headerapi.}
|
||||
proc ts_parser_delete*(a1: ptr TSParser) {.importc, header: headerapi.}
|
||||
proc ts_parser_language*(a1: ptr TSParser): ptr TSLanguage {.importc, header: headerapi.}
|
||||
proc ts_parser_set_language*(a1: ptr TSParser; a2: ptr TSLanguage): bool {.importc,
|
||||
header: headerapi.}
|
||||
proc ts_parser_logger*(a1: ptr TSParser): TSLogger {.importc, header: headerapi.}
|
||||
proc ts_parser_set_logger*(a1: ptr TSParser; a2: TSLogger) {.importc, header: headerapi.}
|
||||
proc ts_parser_print_dot_graphs*(a1: ptr TSParser; a2: cint) {.importc,
|
||||
header: headerapi.}
|
||||
proc ts_parser_halt_on_error*(a1: ptr TSParser; a2: bool) {.importc, header: headerapi.}
|
||||
proc ts_parser_parse*(a1: ptr TSParser; a2: ptr TSTree; a3: TSInput): ptr TSTree {.importc,
|
||||
header: headerapi.}
|
||||
proc ts_parser_parse_string*(a1: ptr TSParser; a2: ptr TSTree; a3: cstring; a4: uint32): ptr TSTree {.
|
||||
importc, header: headerapi.}
|
||||
proc ts_parser_parse_string_encoding*(a1: ptr TSParser; a2: ptr TSTree; a3: cstring;
|
||||
a4: uint32; a5: TSInputEncoding): ptr TSTree {.
|
||||
importc, header: headerapi.}
|
||||
proc ts_parser_enabled*(a1: ptr TSParser): bool {.importc, header: headerapi.}
|
||||
proc ts_parser_set_enabled*(a1: ptr TSParser; a2: bool) {.importc, header: headerapi.}
|
||||
proc ts_parser_operation_limit*(a1: ptr TSParser): cuint {.importc, header: headerapi.}
|
||||
proc ts_parser_set_operation_limit*(a1: ptr TSParser; a2: cuint) {.importc,
|
||||
header: headerapi.}
|
||||
proc ts_parser_reset*(a1: ptr TSParser) {.importc, header: headerapi.}
|
||||
proc ts_parser_set_included_ranges*(a1: ptr TSParser; a2: ptr TSRange; a3: uint32) {.
|
||||
importc, header: headerapi.}
|
||||
proc ts_parser_included_ranges*(a1: ptr TSParser; a2: ptr uint32): ptr TSRange {.importc,
|
||||
header: headerapi.}
|
||||
proc ts_tree_copy*(a1: ptr TSTree): ptr TSTree {.importc, header: headerapi.}
|
||||
proc ts_tree_delete*(a1: ptr TSTree) {.importc, header: headerapi.}
|
||||
proc ts_tree_root_node*(a1: ptr TSTree): TSNode {.importc, header: headerapi.}
|
||||
proc ts_tree_edit*(a1: ptr TSTree; a2: ptr TSInputEdit) {.importc, header: headerapi.}
|
||||
proc ts_tree_get_changed_ranges*(a1: ptr TSTree; a2: ptr TSTree; a3: ptr uint32): ptr TSRange {.
|
||||
importc, header: headerapi.}
|
||||
proc ts_tree_print_dot_graph*(a1: ptr TSTree; a2: ptr FILE) {.importc, header: headerapi.}
|
||||
proc ts_tree_language*(a1: ptr TSTree): ptr TSLanguage {.importc, header: headerapi.}
|
||||
proc ts_node_start_byte*(a1: TSNode): uint32 {.importc, header: headerapi.}
|
||||
proc ts_node_start_point*(a1: TSNode): TSPoint {.importc, header: headerapi.}
|
||||
proc ts_node_end_byte*(a1: TSNode): uint32 {.importc, header: headerapi.}
|
||||
proc ts_node_end_point*(a1: TSNode): TSPoint {.importc, header: headerapi.}
|
||||
proc ts_node_symbol*(a1: TSNode): TSSymbol {.importc, header: headerapi.}
|
||||
proc ts_node_type*(a1: TSNode): cstring {.importc, header: headerapi.}
|
||||
proc ts_node_string*(a1: TSNode): cstring {.importc, header: headerapi.}
|
||||
proc ts_node_eq*(a1: TSNode; a2: TSNode): bool {.importc, header: headerapi.}
|
||||
proc ts_node_is_null*(a1: TSNode): bool {.importc, header: headerapi.}
|
||||
proc ts_node_is_named*(a1: TSNode): bool {.importc, header: headerapi.}
|
||||
proc ts_node_is_missing*(a1: TSNode): bool {.importc, header: headerapi.}
|
||||
proc ts_node_has_changes*(a1: TSNode): bool {.importc, header: headerapi.}
|
||||
proc ts_node_has_error*(a1: TSNode): bool {.importc, header: headerapi.}
|
||||
proc ts_node_parent*(a1: TSNode): TSNode {.importc, header: headerapi.}
|
||||
proc ts_node_child*(a1: TSNode; a2: uint32): TSNode {.importc, header: headerapi.}
|
||||
proc ts_node_named_child*(a1: TSNode; a2: uint32): TSNode {.importc, header: headerapi.}
|
||||
proc ts_node_child_count*(a1: TSNode): uint32 {.importc, header: headerapi.}
|
||||
proc ts_node_named_child_count*(a1: TSNode): uint32 {.importc, header: headerapi.}
|
||||
proc ts_node_next_sibling*(a1: TSNode): TSNode {.importc, header: headerapi.}
|
||||
proc ts_node_next_named_sibling*(a1: TSNode): TSNode {.importc, header: headerapi.}
|
||||
proc ts_node_prev_sibling*(a1: TSNode): TSNode {.importc, header: headerapi.}
|
||||
proc ts_node_prev_named_sibling*(a1: TSNode): TSNode {.importc, header: headerapi.}
|
||||
proc ts_node_first_child_for_byte*(a1: TSNode; a2: uint32): TSNode {.importc,
|
||||
header: headerapi.}
|
||||
proc ts_node_first_named_child_for_byte*(a1: TSNode; a2: uint32): TSNode {.importc,
|
||||
cdecl, impapiHdr.}
|
||||
header: headerapi.}
|
||||
proc ts_node_descendant_for_byte_range*(a1: TSNode; a2: uint32; a3: uint32): TSNode {.
|
||||
importc, cdecl, impapiHdr.}
|
||||
proc ts_node_descendant_for_point_range*(a1: TSNode; a2: TSPoint; a3: TSPoint): TSNode {.
|
||||
importc, cdecl, impapiHdr.}
|
||||
importc, header: headerapi.}
|
||||
proc ts_node_named_descendant_for_byte_range*(a1: TSNode; a2: uint32; a3: uint32): TSNode {.
|
||||
importc, cdecl, impapiHdr.}
|
||||
importc, header: headerapi.}
|
||||
proc ts_node_descendant_for_point_range*(a1: TSNode; a2: TSPoint; a3: TSPoint): TSNode {.
|
||||
importc, header: headerapi.}
|
||||
proc ts_node_named_descendant_for_point_range*(a1: TSNode; a2: TSPoint; a3: TSPoint): TSNode {.
|
||||
importc, cdecl, impapiHdr.}
|
||||
proc ts_node_edit*(a1: ptr TSNode; a2: ptr TSInputEdit) {.importc, cdecl, impapiHdr.}
|
||||
proc ts_node_eq*(a1: TSNode; a2: TSNode): bool {.importc, cdecl, impapiHdr.}
|
||||
proc ts_tree_cursor_new*(a1: TSNode): TSTreeCursor {.importc, cdecl, impapiHdr.}
|
||||
proc ts_tree_cursor_delete*(a1: ptr TSTreeCursor) {.importc, cdecl, impapiHdr.}
|
||||
proc ts_tree_cursor_reset*(a1: ptr TSTreeCursor; a2: TSNode) {.importc, cdecl, impapiHdr.}
|
||||
proc ts_tree_cursor_current_node*(a1: ptr TSTreeCursor): TSNode {.importc, cdecl,
|
||||
impapiHdr.}
|
||||
proc ts_tree_cursor_current_field_name*(a1: ptr TSTreeCursor): cstring {.importc,
|
||||
cdecl, impapiHdr.}
|
||||
proc ts_tree_cursor_current_field_id*(a1: ptr TSTreeCursor): TSFieldId {.importc,
|
||||
cdecl, impapiHdr.}
|
||||
proc ts_tree_cursor_goto_parent*(a1: ptr TSTreeCursor): bool {.importc, cdecl,
|
||||
impapiHdr.}
|
||||
proc ts_tree_cursor_goto_next_sibling*(a1: ptr TSTreeCursor): bool {.importc, cdecl,
|
||||
impapiHdr.}
|
||||
proc ts_tree_cursor_goto_first_child*(a1: ptr TSTreeCursor): bool {.importc, cdecl,
|
||||
impapiHdr.}
|
||||
importc, header: headerapi.}
|
||||
proc ts_node_edit*(a1: ptr TSNode; a2: ptr TSInputEdit) {.importc, header: headerapi.}
|
||||
proc ts_tree_cursor_new*(a1: TSNode): TSTreeCursor {.importc, header: headerapi.}
|
||||
proc ts_tree_cursor_delete*(a1: ptr TSTreeCursor) {.importc, header: headerapi.}
|
||||
proc ts_tree_cursor_reset*(a1: ptr TSTreeCursor; a2: TSNode) {.importc,
|
||||
header: headerapi.}
|
||||
proc ts_tree_cursor_current_node*(a1: ptr TSTreeCursor): TSNode {.importc,
|
||||
header: headerapi.}
|
||||
proc ts_tree_cursor_goto_parent*(a1: ptr TSTreeCursor): bool {.importc,
|
||||
header: headerapi.}
|
||||
proc ts_tree_cursor_goto_next_sibling*(a1: ptr TSTreeCursor): bool {.importc,
|
||||
header: headerapi.}
|
||||
proc ts_tree_cursor_goto_first_child*(a1: ptr TSTreeCursor): bool {.importc,
|
||||
header: headerapi.}
|
||||
proc ts_tree_cursor_goto_first_child_for_byte*(a1: ptr TSTreeCursor; a2: uint32): int64 {.
|
||||
importc, cdecl, impapiHdr.}
|
||||
proc ts_tree_cursor_copy*(a1: ptr TSTreeCursor): TSTreeCursor {.importc, cdecl,
|
||||
impapiHdr.}
|
||||
proc ts_query_new*(language: ptr TSLanguage; source: cstring; source_len: uint32;
|
||||
error_offset: ptr uint32; error_type: ptr TSQueryError): ptr TSQuery {.
|
||||
importc, cdecl, impapiHdr.}
|
||||
proc ts_query_delete*(a1: ptr TSQuery) {.importc, cdecl, impapiHdr.}
|
||||
proc ts_query_pattern_count*(a1: ptr TSQuery): uint32 {.importc, cdecl, impapiHdr.}
|
||||
proc ts_query_capture_count*(a1: ptr TSQuery): uint32 {.importc, cdecl, impapiHdr.}
|
||||
proc ts_query_string_count*(a1: ptr TSQuery): uint32 {.importc, cdecl, impapiHdr.}
|
||||
proc ts_query_start_byte_for_pattern*(a1: ptr TSQuery; a2: uint32): uint32 {.importc,
|
||||
cdecl, impapiHdr.}
|
||||
proc ts_query_predicates_for_pattern*(self: ptr TSQuery; pattern_index: uint32;
|
||||
length: ptr uint32): ptr TSQueryPredicateStep {.
|
||||
importc, cdecl, impapiHdr.}
|
||||
proc ts_query_capture_name_for_id*(a1: ptr TSQuery; id: uint32; length: ptr uint32): cstring {.
|
||||
importc, cdecl, impapiHdr.}
|
||||
proc ts_query_string_value_for_id*(a1: ptr TSQuery; id: uint32; length: ptr uint32): cstring {.
|
||||
importc, cdecl, impapiHdr.}
|
||||
proc ts_query_disable_capture*(a1: ptr TSQuery; a2: cstring; a3: uint32) {.importc,
|
||||
cdecl, impapiHdr.}
|
||||
proc ts_query_cursor_new*(): ptr TSQueryCursor {.importc, cdecl, impapiHdr.}
|
||||
proc ts_query_cursor_delete*(a1: ptr TSQueryCursor) {.importc, cdecl, impapiHdr.}
|
||||
proc ts_query_cursor_exec*(a1: ptr TSQueryCursor; a2: ptr TSQuery; a3: TSNode) {.importc,
|
||||
cdecl, impapiHdr.}
|
||||
proc ts_query_cursor_set_byte_range*(a1: ptr TSQueryCursor; a2: uint32; a3: uint32) {.
|
||||
importc, cdecl, impapiHdr.}
|
||||
proc ts_query_cursor_set_point_range*(a1: ptr TSQueryCursor; a2: TSPoint; a3: TSPoint) {.
|
||||
importc, cdecl, impapiHdr.}
|
||||
proc ts_query_cursor_next_match*(a1: ptr TSQueryCursor; match: ptr TSQueryMatch): bool {.
|
||||
importc, cdecl, impapiHdr.}
|
||||
proc ts_query_cursor_remove_match*(a1: ptr TSQueryCursor; id: uint32) {.importc, cdecl,
|
||||
impapiHdr.}
|
||||
proc ts_query_cursor_next_capture*(a1: ptr TSQueryCursor; match: ptr TSQueryMatch;
|
||||
capture_index: ptr uint32): bool {.importc, cdecl,
|
||||
impapiHdr.}
|
||||
proc ts_language_symbol_count*(a1: ptr TSLanguage): uint32 {.importc, cdecl, impapiHdr.}
|
||||
importc, header: headerapi.}
|
||||
proc ts_language_symbol_count*(a1: ptr TSLanguage): uint32 {.importc,
|
||||
header: headerapi.}
|
||||
proc ts_language_symbol_name*(a1: ptr TSLanguage; a2: TSSymbol): cstring {.importc,
|
||||
cdecl, impapiHdr.}
|
||||
proc ts_language_symbol_for_name*(self: ptr TSLanguage; string: cstring;
|
||||
length: uint32; is_named: bool): TSSymbol {.importc,
|
||||
cdecl, impapiHdr.}
|
||||
proc ts_language_field_count*(a1: ptr TSLanguage): uint32 {.importc, cdecl, impapiHdr.}
|
||||
proc ts_language_field_name_for_id*(a1: ptr TSLanguage; a2: TSFieldId): cstring {.
|
||||
importc, cdecl, impapiHdr.}
|
||||
proc ts_language_field_id_for_name*(a1: ptr TSLanguage; a2: cstring; a3: uint32): TSFieldId {.
|
||||
importc, cdecl, impapiHdr.}
|
||||
header: headerapi.}
|
||||
proc ts_language_symbol_for_name*(a1: ptr TSLanguage; a2: cstring): TSSymbol {.importc,
|
||||
header: headerapi.}
|
||||
proc ts_language_symbol_type*(a1: ptr TSLanguage; a2: TSSymbol): TSSymbolType {.
|
||||
importc, cdecl, impapiHdr.}
|
||||
proc ts_language_version*(a1: ptr TSLanguage): uint32 {.importc, cdecl, impapiHdr.}
|
||||
{.pop.}
|
||||
importc, header: headerapi.}
|
||||
proc ts_language_version*(a1: ptr TSLanguage): uint32 {.importc, header: headerapi.}
|
||||
|
|
|
|||
|
|
@ -13,4 +13,4 @@ import "."/api
|
|||
|
||||
{.compile: srcDir / "parser.c".}
|
||||
|
||||
proc treeSitterC*(): ptr TSLanguage {.importc: "tree_sitter_c".}
|
||||
proc treeSitterC*(): ptr TSLanguage {.importc: "tree_sitter_c", header: srcDir / "api.h".}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
import strutils, os
|
||||
|
||||
import ".."/[setup, paths]
|
||||
import ".."/build/shell
|
||||
import ".."/[build, setup, paths]
|
||||
|
||||
static:
|
||||
treesitterCppSetup()
|
||||
|
|
@ -18,4 +17,4 @@ static:
|
|||
{.compile: srcDir / "parser_cpp.c".}
|
||||
{.compile: srcDir / "scanner.cc".}
|
||||
|
||||
proc treeSitterCpp*(): ptr TSLanguage {.importc: "tree_sitter_cpp".}
|
||||
proc treeSitterCpp*(): ptr TSLanguage {.importc: "tree_sitter_cpp", header: srcDir / "api.h".}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,14 @@ import os
|
|||
|
||||
import nimterop/[cimport, paths]
|
||||
|
||||
cPlugin:
|
||||
import strutils
|
||||
|
||||
proc onSymbol*(sym: var Symbol) {.exportc, dynlib.} =
|
||||
if "_CRT" in sym.name:
|
||||
sym.name = sym.name.strip(chars={'_'})
|
||||
|
||||
static:
|
||||
cDebug()
|
||||
|
||||
cImport(cacheDir / "treesitter" / "lib" / "include" / "tree_sitter" / "api.h", flags = "-E_ -c")
|
||||
cImport(cacheDir / "treesitter" /"lib" / "include" / "tree_sitter" / "api.h")
|
||||
|
|
|
|||
28
nimterop/tshelp.nim
Normal file
28
nimterop/tshelp.nim
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
import "."/treesitter/[c, cpp]
|
||||
|
||||
template withCodeAst*(code: string, mode: string, body: untyped): untyped =
|
||||
## A simple template to inject the TSNode into a body of code
|
||||
mixin treeSitterC
|
||||
mixin treeSitterCpp
|
||||
|
||||
var parser = tsParserNew()
|
||||
defer:
|
||||
parser.tsParserDelete()
|
||||
|
||||
doAssert code.nBl, "Empty code or preprocessor error"
|
||||
|
||||
if mode == "c":
|
||||
doAssert parser.tsParserSetLanguage(treeSitterC()), "Failed to load C parser"
|
||||
elif mode == "cpp":
|
||||
doAssert parser.tsParserSetLanguage(treeSitterCpp()), "Failed to load C++ parser"
|
||||
else:
|
||||
doAssert false, "Invalid parser " & mode
|
||||
|
||||
var
|
||||
tree = parser.tsParserParseString(nil, code.cstring, code.len.uint32)
|
||||
root {.inject.} = tree.tsTreeRootNode()
|
||||
|
||||
body
|
||||
|
||||
defer:
|
||||
tree.tsTreeDelete()
|
||||
74
nimterop/types.nim
Normal file
74
nimterop/types.nim
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
# see https://github.com/nimterop/nimterop/issues/79
|
||||
|
||||
import std/time_t as time_t_temp
|
||||
type
|
||||
time_t* = time_t_temp.Time
|
||||
time64_t* = time_t_temp.Time
|
||||
|
||||
when defined(cpp):
|
||||
# http://www.cplusplus.com/reference/cwchar/wchar_t/
|
||||
# In C++, wchar_t is a distinct fundamental type (and thus it is
|
||||
# not defined in <cwchar> nor any other header).
|
||||
type
|
||||
wchar_t* {.importc.} = object
|
||||
else:
|
||||
type
|
||||
wchar_t* {.importc, header:"<cwchar>".} = object
|
||||
|
||||
type
|
||||
ptrdiff_t* = ByteAddress
|
||||
|
||||
type
|
||||
va_list* {.importc, header:"<stdarg.h>".} = object
|
||||
|
||||
template enumOp*(op, typ, typout) =
|
||||
proc op*(x: typ, y: cint): typout {.borrow.}
|
||||
proc op*(x: cint, y: typ): typout {.borrow.}
|
||||
proc op*(x, y: typ): typout {.borrow.}
|
||||
|
||||
proc op*(x: typ, y: int): typout = op(x, y.cint)
|
||||
proc op*(x: int, y: typ): typout = op(x.cint, y)
|
||||
|
||||
template defineEnum*(typ) =
|
||||
# Create a `distinct cint` type for C enums since Nim enums
|
||||
# need to be in order and cannot have duplicates.
|
||||
type
|
||||
typ* = distinct cint
|
||||
|
||||
# Enum operations allowed
|
||||
enumOp(`+`, typ, typ)
|
||||
enumOp(`-`, typ, typ)
|
||||
enumOp(`*`, typ, typ)
|
||||
enumOp(`<`, typ, bool)
|
||||
enumOp(`<=`, typ, bool)
|
||||
enumOp(`==`, typ, bool)
|
||||
enumOp(`div`, typ, typ)
|
||||
enumOp(`mod`, typ, typ)
|
||||
|
||||
# These don't work with `enumOp()` for some reason
|
||||
proc `shl`*(x: typ, y: cint): typ {.borrow.}
|
||||
proc `shl`*(x: cint, y: typ): typ {.borrow.}
|
||||
proc `shl`*(x, y: typ): typ {.borrow.}
|
||||
|
||||
proc `shr`*(x: typ, y: cint): typ {.borrow.}
|
||||
proc `shr`*(x: cint, y: typ): typ {.borrow.}
|
||||
proc `shr`*(x, y: typ): typ {.borrow.}
|
||||
|
||||
proc `or`*(x: typ, y: cint): typ {.borrow.}
|
||||
proc `or`*(x: cint, y: typ): typ {.borrow.}
|
||||
proc `or`*(x, y: typ): typ {.borrow.}
|
||||
|
||||
proc `and`*(x: typ, y: cint): typ {.borrow.}
|
||||
proc `and`*(x: cint, y: typ): typ {.borrow.}
|
||||
proc `and`*(x, y: typ): typ {.borrow.}
|
||||
|
||||
proc `xor`*(x: typ, y: cint): typ {.borrow.}
|
||||
proc `xor`*(x: cint, y: typ): typ {.borrow.}
|
||||
proc `xor`*(x, y: typ): typ {.borrow.}
|
||||
|
||||
proc `/`*(x, y: typ): typ =
|
||||
return (x.float / y.float).cint.typ
|
||||
proc `/`*(x: typ, y: cint): typ = `/`(x, y.typ)
|
||||
proc `/`*(x: cint, y: typ): typ = `/`(x.typ, y)
|
||||
|
||||
proc `$`*(x: typ): string {.borrow.}
|
||||
|
|
@ -14,19 +14,13 @@ proc testCall(cmd, output: string, exitCode: int, delete = true) =
|
|||
doAssert outp.contains(output), outp
|
||||
|
||||
var
|
||||
cmd = "nim c -f --hints:off -d:checkAbi"
|
||||
cmd = "nim c -f --hints:off -d:FLAGS=\"-f:ast2\""
|
||||
lrcmd = " -r lzma.nim"
|
||||
zrcmd = " -r zlib.nim"
|
||||
sshcmd = " -r libssh2.nim"
|
||||
lexp = "liblzma version = "
|
||||
zexp = "zlib version = "
|
||||
|
||||
when (NimMajor, NimMinor, NimPatch) >= (1, 2, 0):
|
||||
cmd &= " --gc:arc"
|
||||
|
||||
testCall(cmd & lrcmd, "No build files found", 1)
|
||||
testCall(cmd & " -d:libssh2Conan" & sshcmd, "Need version for Conan.io uri", 1)
|
||||
testCall(cmd & " -d:libssh2JBB -d:libssh2SetVer=1.9.0" & sshcmd, "Version in both uri", 1)
|
||||
|
||||
when defined(posix):
|
||||
# stdlib
|
||||
|
|
@ -41,17 +35,10 @@ when defined(posix):
|
|||
testCall(cmd & " -d:lzmaGit -d:lzmaSetVer=v5.2.0" & lrcmd, lexp & "5.2.0", 0)
|
||||
testCall(cmd & " -d:lzmaGit -d:lzmaStatic -d:lzmaSetVer=v5.2.0" & lrcmd, lexp & "5.2.0", 0, delete = false)
|
||||
|
||||
# conan static
|
||||
testCall(cmd & " -d:libssh2Conan -d:libssh2SetVer=1.9.0 -d:libssh2Static" & sshcmd, zexp, 0)
|
||||
else:
|
||||
# conan static for Windows
|
||||
testCall(cmd & " -d:zlibConan -d:zlibSetVer=1.2.11 -d:zlibStatic" & zrcmd, zexp, 0)
|
||||
|
||||
# JBB
|
||||
testCall(cmd & " -d:libssh2JBB" & sshcmd, zexp, 0)
|
||||
testCall(cmd & " -d:zlibJBB -d:zlibSetVer=1.2.11" & zrcmd, zexp, 0)
|
||||
testCall(cmd & " -d:zlibJBB -d:zlibSetVer=1.2.11 -d:zlibStatic" & zrcmd, zexp, 0, delete = false)
|
||||
testCall(cmd & " -d:lzmaJBB -d:lzmaSetVer=5.2.4" & lrcmd, lexp & "5.2.4", 0)
|
||||
# dl - remove from Windows to save some time
|
||||
testCall(cmd & " -d:lzmaDL" & lrcmd, "Need version", 1)
|
||||
testCall(cmd & " -d:lzmaDL -d:lzmaSetVer=5.2.4" & lrcmd, lexp & "5.2.4", 0)
|
||||
testCall(cmd & " -d:lzmaDL -d:lzmaStatic -d:lzmaSetVer=5.2.4" & lrcmd, lexp & "5.2.4", 0, delete = false)
|
||||
|
||||
# git
|
||||
testCall(cmd & " -d:envTest" & zrcmd, zexp, 0)
|
||||
|
|
@ -61,15 +48,6 @@ testCall(cmd & " -d:envTestStatic" & zrcmd, zexp, 0, delete = false)
|
|||
testCall(cmd & " -d:zlibGit -d:zlibSetVer=v1.2.10" & zrcmd, zexp & "1.2.10", 0)
|
||||
testCall(cmd & " -d:zlibGit -d:zlibStatic -d:zlibSetVer=v1.2.10" & zrcmd, zexp & "1.2.10", 0, delete = false)
|
||||
|
||||
# dl
|
||||
testCall(cmd & " -d:lzmaDL" & lrcmd, "Need version", 1)
|
||||
testCall(cmd & " -d:lzmaDL -d:lzmaSetVer=5.2.4" & lrcmd, lexp & "5.2.4", 0)
|
||||
testCall(cmd & " -d:lzmaDL -d:lzmaStatic -d:lzmaSetVer=5.2.4" & lrcmd, lexp & "5.2.4", 0, delete = false)
|
||||
|
||||
# dl
|
||||
testCall(cmd & " -d:zlibDL -d:zlibSetVer=1.2.11" & zrcmd, zexp & "1.2.11", 0)
|
||||
testCall(cmd & " -d:zlibDL -d:zlibStatic -d:zlibSetVer=1.2.11" & zrcmd, zexp & "1.2.11", 0, delete = false)
|
||||
|
||||
# conan
|
||||
testCall(cmd & " -d:libssh2Conan -d:libssh2SetVer=1.9.0" & sshcmd, zexp, 0)
|
||||
testCall(cmd & " -d:lzmaConan -d:lzmaSetVer=5.2.4" & lrcmd, lexp & "5.2.4", 0)
|
||||
|
|
@ -7,7 +7,6 @@ extern "C" {
|
|||
#define C 0x10
|
||||
#define D "hello"
|
||||
#define E 'c'
|
||||
#define F 01234
|
||||
|
||||
#define UEXPR (1234u << 1)
|
||||
#define ULEXPR (1234ul << 2)
|
||||
|
|
@ -21,8 +20,6 @@ extern "C" {
|
|||
#define COERCE 645635634896ull + 35436
|
||||
#define COERCE2 645635634896 + 35436ul
|
||||
#define BINEXPR ~(-(1u << !-1)) ^ (10 >> 1)
|
||||
#define POINTEREXPR (int*)0
|
||||
#define POINTERPOINTERPOINTEREXPR (int***)0
|
||||
#define BOOL true
|
||||
#define MATHEXPR (1 + 2/3*20 - 100)
|
||||
#define ANDEXPR (100 & 11000)
|
||||
|
|
@ -35,7 +32,6 @@ extern "C" {
|
|||
#define EQ4 AVAL < BVAL
|
||||
#define EQ5 AVAL != BVAL
|
||||
#define EQ6 AVAL == BVAL
|
||||
#define SX_NEAR_ZERO (1.0f / (1 << 28))
|
||||
|
||||
// testing integer out of long int range
|
||||
#define INT_FAST16_MIN (-9223372036854775807L-1)
|
||||
|
|
@ -44,30 +40,13 @@ extern "C" {
|
|||
#define REG_STR "regular string"
|
||||
#define NOTSUPPORTEDSTR "not a " REG_STR
|
||||
|
||||
#define NULLCHAR '\0'
|
||||
#define OCTCHAR '\012'
|
||||
#define NULLCHAR '\0'/* comments should not break things*/
|
||||
#define OCTCHAR '\012' // nor should this comment
|
||||
#define HEXCHAR '\xFE'
|
||||
#define TRICKYSTR "\x4E\034\nfoo\0\'\"\r\v\a\b\e\f\t\\\?bar"
|
||||
|
||||
#define ALLSHL (SHL1 | SHL2 | SHL3)
|
||||
|
||||
#ifdef NIMTEROP
|
||||
#define SOME_CONST 8
|
||||
#endif
|
||||
|
||||
struct some_struct_s
|
||||
{
|
||||
int x;
|
||||
};
|
||||
|
||||
struct parent_struct_s
|
||||
{
|
||||
/* Random comment */
|
||||
struct some_struct_s s[SOME_CONST];
|
||||
};
|
||||
|
||||
typedef struct some_struct_s SOME_ARRAY[SOME_CONST];
|
||||
|
||||
struct A0;
|
||||
struct A1 {};
|
||||
typedef struct A2;
|
||||
|
|
@ -93,10 +72,10 @@ typedef char *(*A11)[3];
|
|||
typedef struct A0 *A111[12];
|
||||
|
||||
typedef int
|
||||
**(*A12)(int, int b, int *c, int *, int /*out*/ *count[4], int (*func)(int, int)),
|
||||
**(*A12)(int, int b, int *c, int *, int *count[4], int (*func)(int, int)),
|
||||
**(*A121)(float, float b, float *c, float *, float *count[4], float (*func)(float, float)),
|
||||
**(*A122)(char, char b, char *c, char *, char *count[4], char (*func)(char, char));
|
||||
typedef int (*A13)(int, int, void (*func)(void));
|
||||
typedef int A13(int, int, void (*func)(void));
|
||||
|
||||
struct A14 { volatile char a1; };
|
||||
struct A15 { char *a1; const int *a2[1]; };
|
||||
|
|
@ -110,9 +89,6 @@ typedef struct A20 { char a1; } A20, A21, *A21p;
|
|||
//Expression
|
||||
typedef struct A22 { const int **f1; int *f2[123+132]; } A22;
|
||||
|
||||
// #231
|
||||
typedef const char *(*A23)();
|
||||
|
||||
//Unions
|
||||
union U1 {int f1; float f2; };
|
||||
typedef union U2 { const int **f1; int abc[123+132]; } U2;
|
||||
|
|
@ -244,7 +220,6 @@ typedef struct {
|
|||
struct { int f1; } f2;
|
||||
|
||||
struct NT3 {
|
||||
/* Random comment */
|
||||
struct {
|
||||
int f1;
|
||||
union NU1 {
|
||||
|
|
@ -266,106 +241,17 @@ static inline int sitest1(int f1) {
|
|||
return f1 * 2;
|
||||
}
|
||||
|
||||
// Issue #196
|
||||
typedef int MyInt;
|
||||
struct TestMyInt {
|
||||
MyInt f1;
|
||||
};
|
||||
|
||||
// Issue #237
|
||||
typedef union sx_ivec3 {
|
||||
struct {
|
||||
int x;
|
||||
int y;
|
||||
struct {
|
||||
int z;
|
||||
};
|
||||
};
|
||||
|
||||
int n[3];
|
||||
} sx_ivec3;
|
||||
|
||||
// Issue #236
|
||||
enum {
|
||||
SG_INVALID_ID = 0,
|
||||
SG_NUM_SHADER_STAGES = 2,
|
||||
SG_MAX_MIPMAPS = 16,
|
||||
SG_MAX_TEXTUREARRAY_LAYERS = 128
|
||||
};
|
||||
|
||||
struct parenpoin {
|
||||
void (*gtk_reserved1);
|
||||
};
|
||||
|
||||
|
||||
// DUPLICATES
|
||||
|
||||
#ifdef NOHEADER
|
||||
#ifndef HEADER
|
||||
|
||||
#define A 1
|
||||
#define B 1.0
|
||||
#define C 0x10
|
||||
#define D "hello"
|
||||
#define E 'c'
|
||||
#define F 01234
|
||||
|
||||
#define UEXPR (1234u << 1)
|
||||
#define ULEXPR (1234ul << 2)
|
||||
#define ULLEXPR (1234ull << 3)
|
||||
#define LEXPR (1234l << 4)
|
||||
#define LLEXPR (1234ll << 5)
|
||||
|
||||
#define SHL1 (1u << 1)
|
||||
#define SHL2 (1u << 2)
|
||||
#define SHL3 (1u << 3)
|
||||
#define COERCE 645635634896ull + 35436
|
||||
#define COERCE2 645635634896 + 35436ul
|
||||
#define BINEXPR ~(-(1u << !-1)) ^ (10 >> 1)
|
||||
#define POINTEREXPR (int*)0
|
||||
#define POINTERPOINTERPOINTEREXPR (int***)0
|
||||
#define BOOL true
|
||||
#define MATHEXPR (1 + 2/3*20 - 100)
|
||||
#define ANDEXPR (100 & 11000)
|
||||
#define CASTEXPR (char) 34
|
||||
#define AVAL 100
|
||||
#define BVAL 200
|
||||
#define EQ1 AVAL <= BVAL
|
||||
#define EQ2 AVAL >= BVAL
|
||||
#define EQ3 AVAL > BVAL
|
||||
#define EQ4 AVAL < BVAL
|
||||
#define EQ5 AVAL != BVAL
|
||||
#define EQ6 AVAL == BVAL
|
||||
|
||||
// testing integer out of long int range
|
||||
#define INT_FAST16_MIN (-9223372036854775807L-1)
|
||||
|
||||
#define SIZEOF sizeof(char)
|
||||
#define REG_STR "regular string"
|
||||
#define NOTSUPPORTEDSTR "not a " REG_STR
|
||||
|
||||
#define NULLCHAR '\0'
|
||||
#define OCTCHAR '\012'
|
||||
#define HEXCHAR '\xFE'
|
||||
#define TRICKYSTR "\x4E\034\nfoo\0\'\"\r\v\a\b\e\f\t\\\?bar"
|
||||
|
||||
#define ALLSHL (SHL1 | SHL2 | SHL3)
|
||||
|
||||
#ifdef NIMTEROP
|
||||
#define SOME_CONST 8
|
||||
#endif
|
||||
|
||||
struct some_struct_s
|
||||
{
|
||||
int x;
|
||||
};
|
||||
|
||||
struct parent_struct_s
|
||||
{
|
||||
/* Random comment */
|
||||
struct some_struct_s s[SOME_CONST];
|
||||
};
|
||||
|
||||
typedef struct some_struct_s SOME_ARRAY[SOME_CONST];
|
||||
|
||||
struct A0;
|
||||
struct A1 {};
|
||||
|
|
@ -392,10 +278,10 @@ typedef char *(*A11)[3];
|
|||
typedef struct A0 *A111[12];
|
||||
|
||||
typedef int
|
||||
**(*A12)(int, int b, int *c, int *, int /*out*/ *count[4], int (*func)(int, int)),
|
||||
**(*A12)(int, int b, int *c, int *, int *count[4], int (*func)(int, int)),
|
||||
**(*A121)(float, float b, float *c, float *, float *count[4], float (*func)(float, float)),
|
||||
**(*A122)(char, char b, char *c, char *, char *count[4], char (*func)(char, char));
|
||||
typedef int (*A13)(int, int, void (*func)(void));
|
||||
typedef int A13(int, int, void (*func)(void));
|
||||
|
||||
struct A14 { volatile char a1; };
|
||||
struct A15 { char *a1; const int *a2[1]; };
|
||||
|
|
@ -409,9 +295,6 @@ typedef struct A20 { char a1; } A20, A21, *A21p;
|
|||
//Expression
|
||||
typedef struct A22 { const int **f1; int *f2[123+132]; } A22;
|
||||
|
||||
// #231
|
||||
typedef const char *(*A23)();
|
||||
|
||||
//Unions
|
||||
union U1 {int f1; float f2; };
|
||||
typedef union U2 { const int **f1; int abc[123+132]; } U2;
|
||||
|
|
@ -495,16 +378,6 @@ void
|
|||
|
||||
int sqlite3_bind_blob(struct A1*, int, const void*, int n, void(*)(void*));
|
||||
|
||||
// Issue #174 - type name[] => UncheckedArray[type]
|
||||
int ucArrFunc1(int text[]);
|
||||
int ucArrFunc2(int text[][5], int (*func)(int text[]));
|
||||
|
||||
typedef int ucArrType1[][5];
|
||||
struct ucArrType2 {
|
||||
float f1[5][5];
|
||||
int *f2[][5];
|
||||
};
|
||||
|
||||
typedef struct fieldfuncfunc {
|
||||
int *(*func1)(int f1, int *(*sfunc1)(int f1, int *(*ssfunc1)(int f1, ...)));
|
||||
};
|
||||
|
|
@ -543,7 +416,6 @@ typedef struct {
|
|||
struct { int f1; } f2;
|
||||
|
||||
struct NT3 {
|
||||
/* Random comment */
|
||||
struct {
|
||||
int f1;
|
||||
union NU1 {
|
||||
|
|
@ -565,36 +437,7 @@ static inline int sitest1(int f1) {
|
|||
return f1 * 2;
|
||||
}
|
||||
|
||||
// Issue #196
|
||||
typedef int MyInt;
|
||||
struct TestMyInt {
|
||||
MyInt f1;
|
||||
};
|
||||
|
||||
// Issue #237
|
||||
typedef union sx_ivec3 {
|
||||
struct {
|
||||
int x;
|
||||
int y;
|
||||
struct z {
|
||||
int z;
|
||||
};
|
||||
};
|
||||
|
||||
int n[3];
|
||||
} sx_ivec3;
|
||||
|
||||
// Issue #236
|
||||
enum {
|
||||
SG_INVALID_ID = 0,
|
||||
SG_NUM_SHADER_STAGES = 2,
|
||||
SG_MAX_MIPMAPS = 16,
|
||||
SG_MAX_TEXTUREARRAY_LAYERS = 128
|
||||
};
|
||||
|
||||
struct parenpoin {
|
||||
void (*__gtk_reserved1);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
|||
|
|
@ -1,47 +0,0 @@
|
|||
import nimterop/[build, cimport]
|
||||
|
||||
const
|
||||
outdir = getProjectCacheDir("libssh2")
|
||||
|
||||
getHeader(
|
||||
header = "libssh2.h",
|
||||
conanuri = "libssh2/$1",
|
||||
jbburi = "libssh2/1.9.0",
|
||||
outdir = outdir
|
||||
)
|
||||
|
||||
cOverride:
|
||||
type
|
||||
stat = object
|
||||
stat64 = object
|
||||
SOCKET = object
|
||||
|
||||
when not libssh2Static:
|
||||
cImport(libssh2Path, recurse = true, dynlib = "libssh2LPath", flags = "-c -E_ -F_")
|
||||
|
||||
when not defined(Windows) and not isDefined(libssh2JBB):
|
||||
proc zlibVersion(): cstring {.importc, dynlib: libssh2LPath.}
|
||||
else:
|
||||
cPassL("-lpthread")
|
||||
|
||||
cImport(libssh2Path, recurse = true, flags = "-c -E_ -F_")
|
||||
|
||||
when not defined(Windows) and not isDefined(libssh2JBB):
|
||||
proc zlibVersion(): cstring {.importc.}
|
||||
|
||||
assert libssh2_init(0) == 0
|
||||
|
||||
let
|
||||
session = libssh2_session_init_ex(nil, nil, nil, nil)
|
||||
|
||||
if session == nil:
|
||||
quit(1)
|
||||
|
||||
libssh2_session_set_blocking(session, 0.cint)
|
||||
|
||||
echo "zlib version = " & (block:
|
||||
when not defined(Windows) and not isDefined(libssh2JBB):
|
||||
$zlibVersion()
|
||||
else:
|
||||
""
|
||||
)
|
||||
|
|
@ -24,8 +24,6 @@ getHeader(
|
|||
"lzma.h",
|
||||
giturl = "https://github.com/xz-mirror/xz",
|
||||
dlurl = "https://tukaani.org/xz/xz-$1.tar.gz",
|
||||
conanuri = "xz_utils",
|
||||
jbburi = "xz",
|
||||
outdir = baseDir,
|
||||
conFlags = "--disable-xz --disable-xzdec --disable-lzmadec --disable-lzmainfo"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -35,13 +35,12 @@ cPlugin:
|
|||
cOverride:
|
||||
proc OPENSSL_die*(assertion: cstring; file: cstring; line: cint) {.importc.}
|
||||
|
||||
cPassL(cryptoLPath)
|
||||
|
||||
# Skip comments for https://github.com/tree-sitter/tree-sitter-c/issues/44
|
||||
cImport(@[
|
||||
basePath / "rsa.h",
|
||||
basePath / "err.h",
|
||||
], recurse = true, flags = "-s -c " & FLAGS)
|
||||
], recurse = true, flags = "-f:ast2 -s " & FLAGS)
|
||||
|
||||
{.passL: cryptoLPath.}
|
||||
|
||||
OpensslInit()
|
||||
echo $OPENSSL_VERSION_TEXT
|
||||
|
|
|
|||
|
|
@ -2,8 +2,6 @@ import macros, os, sets, strutils
|
|||
|
||||
import nimterop/[cimport]
|
||||
|
||||
cPassC("-DNIMTEROP")
|
||||
|
||||
static:
|
||||
# Skip casting on lower nim compilers because
|
||||
# the VM does not support it
|
||||
|
|
@ -13,17 +11,17 @@ static:
|
|||
const
|
||||
path = currentSourcePath.parentDir() / "include" / "tast2.h"
|
||||
|
||||
when defined(NOHEADER):
|
||||
cDefine("NOHEADER")
|
||||
when defined(HEADER):
|
||||
cDefine("HEADER")
|
||||
const
|
||||
flags = " -H"
|
||||
pHeader: seq[string] = @[]
|
||||
pHeaderImp: seq[string] = @[]
|
||||
pHeader = @["header:" & path.replace("\\", "/")]
|
||||
pHeaderImp = @["importc"] & pHeader
|
||||
else:
|
||||
const
|
||||
flags = ""
|
||||
pHeader = @["header:" & path.replace("\\", "/")]
|
||||
pHeaderImp = @["importc"] & pHeader
|
||||
pHeader: seq[string] = @[]
|
||||
pHeaderImp: seq[string] = @[]
|
||||
|
||||
const
|
||||
pHeaderImpBy = @["bycopy"] & pHeaderImp
|
||||
|
|
@ -37,12 +35,7 @@ cOverride:
|
|||
type
|
||||
A1* = A0
|
||||
|
||||
cDefine("SOME_CONST=100")
|
||||
|
||||
when not defined(WRAPPED):
|
||||
cImport(path, flags="-f:ast2 -ENK_,SDL_ -GVICE=SLICE -TMyInt=cint" & flags, nimFile = "tast2wrapped.nim")
|
||||
else:
|
||||
import tast2wrapped
|
||||
cImport(path, flags="-f:ast2 -ENK_,SDL_ -GVICE=SLICE" & flags)
|
||||
|
||||
proc getPragmas(n: NimNode): HashSet[string] =
|
||||
# Find all pragmas in AST, return as "name" or "name:value" in set
|
||||
|
|
@ -75,7 +68,7 @@ macro checkPragmas(t: typed, pragmas: static[seq[string]], istype: static[bool]
|
|||
ast = t.getImpl()
|
||||
prag = ast.getPragmas()
|
||||
exprag = pragmas.toHashSet()
|
||||
when not defined(NOHEADER):
|
||||
when defined(HEADER):
|
||||
if not istype:
|
||||
if "union" in exprag:
|
||||
exprag.incl "importc:union " & $t
|
||||
|
|
@ -114,7 +107,6 @@ assert B == 1.0
|
|||
assert C == 0x10
|
||||
assert D == "hello"
|
||||
assert E == 'c'
|
||||
assert F == 0o1234
|
||||
|
||||
assert not defined(NOTSUPPORTEDSTR)
|
||||
|
||||
|
|
@ -134,8 +126,6 @@ assert EQ4 == (AVAL < BVAL)
|
|||
assert EQ5 == (AVAL != BVAL)
|
||||
assert EQ6 == (AVAL == BVAL)
|
||||
|
||||
assert SX_NEAR_ZERO == 3.725290298461914e-09
|
||||
|
||||
assert SIZEOF == 1
|
||||
|
||||
assert COERCE == 645635670332'u64
|
||||
|
|
@ -160,14 +150,8 @@ assert SHL1 == (1.uint shl 1)
|
|||
assert SHL2 == (1.uint shl 2)
|
||||
assert SHL3 == (1.uint shl 3)
|
||||
|
||||
assert typeof(POINTEREXPR) is (ptr cint)
|
||||
assert typeof(POINTERPOINTERPOINTEREXPR) is (ptr ptr ptr cint)
|
||||
|
||||
assert ALLSHL == (SHL1 or SHL2 or SHL3)
|
||||
|
||||
assert typeof(parent_struct_s().s) is array[100, some_struct_s]
|
||||
assert typeof(SOME_ARRAY) is array[100, some_struct_s]
|
||||
|
||||
assert A0 is object
|
||||
testFields(A0, "f1!cint")
|
||||
checkPragmas(A0, pHeaderBy, istype = false)
|
||||
|
|
@ -182,7 +166,7 @@ a1.f1 = 2
|
|||
assert A2 is object
|
||||
testFields(A2)
|
||||
checkPragmas(A2, pHeaderInc, istype = false)
|
||||
when defined(NOHEADER):
|
||||
when not defined(HEADER):
|
||||
# typedef struct X; is invalid
|
||||
var a2: A2
|
||||
|
||||
|
|
@ -339,10 +323,6 @@ checkPragmas(A22, pHeaderBy, istype = false)
|
|||
var a22: A22
|
||||
a22.f1 = addr a15.a2[0]
|
||||
|
||||
assert A23 is proc(): cstring {.cdecl.}
|
||||
checkPragmas(A23, pHeaderImp & "cdecl")
|
||||
var a23: A23
|
||||
|
||||
assert U1 is object
|
||||
assert sizeof(U1) == sizeof(cfloat)
|
||||
checkPragmas(U1, pHeaderBy & @["union"], istype = false)
|
||||
|
|
@ -355,7 +335,6 @@ checkPragmas(U2, pHeaderBy & @["union"], istype = false)
|
|||
var u2: U2
|
||||
u2.f1 = addr a15.a2[0]
|
||||
|
||||
assert PANEL_WINDOW is nk_panel_type
|
||||
assert PANEL_WINDOW == 1
|
||||
assert PANEL_GROUP == 2
|
||||
assert PANEL_POPUP == 4
|
||||
|
|
@ -493,32 +472,6 @@ assert nested is object
|
|||
testFields(nested, "f1|f2|f3|f4|f5|f6|f7|f8!NT1|Type_tast2h1|NT3|Type_tast2h3|NU2|Union_tast2h1|NE1|Enum_tast2h2")
|
||||
checkPragmas(nested, pHeaderImpBy)
|
||||
|
||||
when not defined(NOHEADER):
|
||||
when defined(HEADER):
|
||||
assert sitest1(5) == 10
|
||||
assert sitest1(10) == 20
|
||||
|
||||
when declared(MyInt):
|
||||
assert false, "MyInt is defined!"
|
||||
testFields(TestMyInt, "f1!cint")
|
||||
checkPragmas(TestMyInt, pHeaderBy, isType = false)
|
||||
|
||||
# #237
|
||||
assert sx_ivec3 is object
|
||||
testFields(sx_ivec3, "x|y|z|n!cint|cint|cint|array[3, cint]")
|
||||
checkPragmas(sx_ivec3, pHeaderBy & @["union"], istype = false)
|
||||
var sx: sx_ivec3
|
||||
sx.x = 5
|
||||
assert sx.n[0] == 5
|
||||
when not defined(NOHEADER):
|
||||
# Nim doesn't know of the anonymous nested struct so when the header
|
||||
# isn't present, the test below breaks
|
||||
sx.n[1] = 4
|
||||
assert sx.y == 4
|
||||
|
||||
# #236
|
||||
assert SG_MAX_MIPMAPS is cint
|
||||
assert SG_MAX_MIPMAPS == 16
|
||||
|
||||
assert parenpoin is object
|
||||
var pp: parenpoin
|
||||
assert pp.gtk_reserved1 is pointer
|
||||
|
|
@ -70,7 +70,7 @@ var
|
|||
|
||||
e: ENUM
|
||||
e2: ENUM2 = enum5
|
||||
e3 = enum7
|
||||
e3: Enum_testh1 = enum7
|
||||
e4: ENUM4 = enum11
|
||||
|
||||
vptr: VOIDPTR
|
||||
|
|
|
|||
|
|
@ -1,2 +0,0 @@
|
|||
--preprocess
|
||||
-nk -E=_
|
||||
|
|
@ -28,8 +28,6 @@ cPlugin:
|
|||
|
||||
proc onSymbol*(sym: var Symbol) {.exportc, dynlib.} =
|
||||
sym.name = sym.name.replace("pcre_", "")
|
||||
if sym.name.startsWith("pcre16_") or sym.name.startsWith("pcre32_"):
|
||||
sym.name = ""
|
||||
|
||||
const FLAGS {.strdefine.} = ""
|
||||
cImport(pcreH, dynlib="dynpcre", flags="--mode=c " & FLAGS)
|
||||
|
|
|
|||
|
|
@ -24,15 +24,15 @@ cIncludeDir(incl)
|
|||
|
||||
when defined(osx):
|
||||
cDefine("WITH_COREAUDIO")
|
||||
cPassL("-framework CoreAudio -framework AudioToolbox")
|
||||
{.passL: "-framework CoreAudio -framework AudioToolbox".}
|
||||
cCompile(src/"backend/coreaudio/*.cpp")
|
||||
elif defined(Linux):
|
||||
cPassL("-lpthread")
|
||||
{.passL: "-lpthread".}
|
||||
cDefine("WITH_OSS")
|
||||
cCompile(src/"backend/oss/*.cpp")
|
||||
elif defined(Windows):
|
||||
cPassC("-msse")
|
||||
cPassL("-lwinmm")
|
||||
{.passC: "-msse".}
|
||||
{.passL: "-lwinmm".}
|
||||
cDefine("WITH_WINMM")
|
||||
cCompile(src/"backend/winmm/*.cpp")
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -65,12 +65,9 @@ when zlibGit or zlibDL:
|
|||
when dirExists(baseDir / "buildcache"):
|
||||
cIncludeDir(baseDir / "buildcache")
|
||||
|
||||
when not isDefined(zlibStatic):
|
||||
when not zlibStatic:
|
||||
cImport(zlibPath, recurse = true, dynlib = "zlibLPath", flags = FLAGS)
|
||||
else:
|
||||
when isDefined(zlibJBB):
|
||||
cPassL("-no-pie")
|
||||
|
||||
cImport(zlibPath, recurse = true, flags = FLAGS)
|
||||
|
||||
echo "zlib version = " & $zlibVersion()
|
||||
echo "zlib version = " & $zlibVersion()
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue