From 7d5b7fcefc67cde3fd934d924bcf9aeaf29fb6e0 Mon Sep 17 00:00:00 2001 From: JIankai Xing Date: Wed, 17 Jan 2024 14:38:18 +0800 Subject: [PATCH] add python api --- .gitmodules | 6 + CMakeLists.txt | 18 ++ pyproject.toml | 89 ++++++++ src/CMakeLists.txt | 13 ++ src/apps/cli.cpp | 2 +- src/base/CMakeLists.txt | 5 + src/base/film.h | 1 + src/base/integrator.cpp | 16 ++ src/base/integrator.h | 5 + src/base/pipeline.cpp | 31 +++ src/base/pipeline.h | 4 + src/base/texture.h | 6 +- src/compute | 2 +- src/ext/CMakeLists.txt | 22 ++ src/ext/dlpack | 1 + src/ext/pybind11 | 1 + src/films/color.cpp | 10 +- src/python/CMakeLists.txt | 22 ++ src/python/bak.rb | 22 ++ src/python/dlpack.h | 318 +++++++++++++++++++++++++++++ src/python/lrapi.cpp | 233 +++++++++++++++++++++ src/python/luisarender/__init__.py | 5 + src/python/render_python.cpp | 110 ++++++++++ src/python/test.cpp | 43 ++++ src/sdl/CMakeLists.txt | 4 + src/sdl/scene_parser_json.cpp | 20 ++ src/sdl/scene_parser_json.h | 6 + src/shapes/mesh.cpp | 5 + src/tests/test_ad_torch.py | 187 +++++++++++++++++ src/textures/constant.cpp | 7 +- src/util/CMakeLists.txt | 4 + src/util/loop_subdiv.cpp | 4 +- 32 files changed, 1214 insertions(+), 8 deletions(-) create mode 100644 pyproject.toml create mode 160000 src/ext/dlpack create mode 160000 src/ext/pybind11 create mode 100644 src/python/CMakeLists.txt create mode 100644 src/python/bak.rb create mode 100644 src/python/dlpack.h create mode 100644 src/python/lrapi.cpp create mode 100644 src/python/luisarender/__init__.py create mode 100644 src/python/render_python.cpp create mode 100644 src/python/test.cpp create mode 100644 src/tests/test_ad_torch.py diff --git a/.gitmodules b/.gitmodules index 52446c6f..efbdf172 100644 --- a/.gitmodules +++ b/.gitmodules @@ -17,3 +17,9 @@ [submodule "src/ext/json"] path = src/ext/json url = https://github.com/nlohmann/json.git +[submodule "src/ext/pybind11"] + path = src/ext/pybind11 + url = https://github.com/pybind/pybind11.git +[submodule "src/ext/dlpack"] + path = src/ext/dlpack + url = https://github.com/dmlc/dlpack.git diff --git a/CMakeLists.txt b/CMakeLists.txt index 2e89d5aa..6bd80b56 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -58,4 +58,22 @@ endif () set(CMAKE_FIND_PACKAGE_SORT_ORDER NATURAL) set(CMAKE_FIND_PACKAGE_SORT_DIRECTION DEC) +if (SKBUILD) + set(CMAKE_INSTALL_INCLUDEDIR ${SKBUILD_NULL_DIR}) + set(CMAKE_INSTALL_DOCDIR ${SKBUILD_NULL_DIR}) + if (WIN32) + set(CMAKE_INSTALL_LIBDIR ${SKBUILD_NULL_DIR}) + else () + set(CMAKE_INSTALL_LIBDIR ${SKBUILD_PLATLIB_DIR}/luisarender/dylibs) + endif () + set(CMAKE_INSTALL_BINDIR ${SKBUILD_PLATLIB_DIR}/luisarender/dylibs) +else () + include(GNUInstallDirs) +endif () + +if (CMAKE_CXX_COMPILER_ID MATCHES "MSVC") + add_compile_options("/wd4996") +endif () + add_subdirectory(src) + diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..5e13fb79 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,89 @@ +[build-system] +requires = ["pybind11", "scikit-build-core>=0.3.2", "ninja"] +build-backend = "scikit_build_core.build" + +[project] +name = "luisarender-python" +version = "0.0.2" +description = "A High-Performance Rendering Framework with Layered and Unified Interfaces on Stream Architectures" +readme = "README.md" +authors = [ + { name = "LuisaGroup" }, +] +homepage = "luisa-render.com" +repository = "https://github.com/LuisaGroup/LuisaRender" +requires-python = ">=3.10" +classifiers = [ + "Development Status :: 4 - Beta", + "License :: OSI Approved :: BSD License", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", +] + +license = { file = "LICENSE" } + +[tool.scikit-build] +cmake.minimum-version = "3.18" +cmake.build-type = "Release" +cmake.args = ["-GNinja","-Wno-dev"] +sdist.reproducible = false +wheel.expand-macos-universal-tags = true +wheel.packages = [] +build-dir = "build/{wheel_tag}" + +[project.optional-dependencies] +test = ["pytest"] + +[tool.pytest.ini_options] +minversion = "6.0" +addopts = ["-ra", "--showlocals", "--strict-markers", "--strict-config"] +xfail_strict = true +log_cli_level = "INFO" +filterwarnings = [ + "error", +] +testpaths = ["tests"] + +[tool.cibuildwheel] +test-command = "pytest {project}/tests" +test-extras = ["test"] +test-skip = ["*universal2:arm64"] +build-verbosity = 1 + +[tool.ruff] +src = ["src"] + +[tool.ruff.lint] +extend-select = [ + "B", # flake8-bugbear + "I", # isort + "ARG", # flake8-unused-arguments + "C4", # flake8-comprehensions + "EM", # flake8-errmsg + "ICN", # flake8-import-conventions + "G", # flake8-logging-format + "PGH", # pygrep-hooks + "PIE", # flake8-pie + "PL", # pylint + "PT", # flake8-pytest-style + "PTH", # flake8-use-pathlib + "RET", # flake8-return + "RUF", # Ruff-specific + "SIM", # flake8-simplify + "T20", # flake8-print + "UP", # pyupgrade + "YTT", # flake8-2020 + "EXE", # flake8-executable + "NPY", # NumPy specific rules + "PD", # pandas-vet +] +ignore = [ + "PLR09", # Too many X + "PLR2004", # Magic comparison +] +isort.required-imports = ["from __future__ import annotations"] + +[tool.ruff.lint.per-file-ignores] +"tests/**" = ["T20"] diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 652d503a..5a981410 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -31,7 +31,12 @@ endif () set(LUISA_COMPUTE_ENABLE_LTO OFF CACHE BOOL "" FORCE) set(LUISA_COMPUTE_ENABLE_PYTHON OFF CACHE BOOL "" FORCE) set(LUISA_COMPUTE_ENABLE_UNITY_BUILD ${LUISA_RENDER_ENABLE_UNITY_BUILD} CACHE BOOL "" FORCE) + +set(SKBUILD_OLD ${SKBUILD}) +set(SKBUILD OFF) add_subdirectory(compute) +set(SKBUILD ${SKBUILD_OLD}) + target_include_directories(luisa-render-include INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/compute/include/luisa) add_subdirectory(sdl) @@ -55,6 +60,10 @@ function(luisa_render_add_plugin name) set_target_properties(${lib_name} PROPERTIES UNITY_BUILD OFF DEBUG_POSTFIX "") + install(TARGETS ${lib_name} + LIBRARY DESTINATION ${CMAKE_INSTALL_BINDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) endfunction() add_subdirectory(films) @@ -103,3 +112,7 @@ add_subdirectory(apps) if (LUISA_RENDER_BUILD_TESTS) add_subdirectory(tests) endif () + +if (SKBUILD) + add_subdirectory(python) +endif () diff --git a/src/apps/cli.cpp b/src/apps/cli.cpp index 1d5f7c32..99621a2a 100644 --- a/src/apps/cli.cpp +++ b/src/apps/cli.cpp @@ -110,7 +110,7 @@ using namespace luisa::render; int main(int argc, char *argv[]) { - log_level_info(); + log_level_verbose(); luisa::compute::Context context{argv[0]}; auto macros = parse_cli_macros(argc, argv); for (auto &&[k, v] : macros) { diff --git a/src/base/CMakeLists.txt b/src/base/CMakeLists.txt index 87977da2..d64eba96 100644 --- a/src/base/CMakeLists.txt +++ b/src/base/CMakeLists.txt @@ -30,3 +30,8 @@ target_link_libraries(luisa-render-base PUBLIC set_target_properties(luisa-render-base PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS ON UNITY_BUILD ${LUISA_RENDER_ENABLE_UNITY_BUILD}) + +install(TARGETS luisa-render-base + LIBRARY DESTINATION ${CMAKE_INSTALL_BINDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) diff --git a/src/base/film.h b/src/base/film.h index 9da0f672..3695d81d 100644 --- a/src/base/film.h +++ b/src/base/film.h @@ -42,6 +42,7 @@ class Film : public SceneNode { virtual void clear(CommandBuffer &command_buffer) noexcept = 0; virtual void download(CommandBuffer &command_buffer, float4 *framebuffer) const noexcept = 0; virtual bool show(CommandBuffer &command_buffer) const noexcept { return false; } + virtual void *export_image(CommandBuffer &command_buffer) { return nullptr; } virtual void release() noexcept = 0; }; diff --git a/src/base/integrator.cpp b/src/base/integrator.cpp index 3894fdab..b9598628 100644 --- a/src/base/integrator.cpp +++ b/src/base/integrator.cpp @@ -48,6 +48,22 @@ void ProgressiveIntegrator::Instance::render(Stream &stream) noexcept { } } +luisa::vector ProgressiveIntegrator::Instance::render_with_return(Stream &stream) noexcept { + CommandBuffer command_buffer{&stream}; + luisa::vector result; + for (auto i = 0u; i < pipeline().camera_count(); i++) { + auto camera = pipeline().camera(i); + auto resolution = camera->film()->node()->resolution(); + auto pixel_count = resolution.x * resolution.y; + camera->film()->prepare(command_buffer); + _render_one_camera(command_buffer, camera); + command_buffer << compute::synchronize(); + result.push_back(camera->film()->export_image(command_buffer)); + } + return result; +} + + void ProgressiveIntegrator::Instance::_render_one_camera( CommandBuffer &command_buffer, Camera::Instance *camera) noexcept { diff --git a/src/base/integrator.h b/src/base/integrator.h index 9a826649..e8880d44 100644 --- a/src/base/integrator.h +++ b/src/base/integrator.h @@ -42,6 +42,10 @@ class Integrator : public SceneNode { [[nodiscard]] auto light_sampler() noexcept { return _light_sampler.get(); } [[nodiscard]] auto light_sampler() const noexcept { return _light_sampler.get(); } virtual void render(Stream &stream) noexcept = 0; + virtual luisa::vector render_with_return(Stream &stream) { + LUISA_INFO("Not implemented!"); + return luisa::vector{}; + } }; private: @@ -72,6 +76,7 @@ class ProgressiveIntegrator : public Integrator { const ProgressiveIntegrator *node) noexcept; ~Instance() noexcept override; void render(Stream &stream) noexcept override; + luisa::vector render_with_return(Stream &stream) noexcept override; }; public: diff --git a/src/base/pipeline.cpp b/src/base/pipeline.cpp index 2a32bdaf..160d5786 100644 --- a/src/base/pipeline.cpp +++ b/src/base/pipeline.cpp @@ -112,10 +112,29 @@ bool Pipeline::update(CommandBuffer &command_buffer, float time) noexcept { return updated; } + +void Pipeline::update_texture(Stream &stream, uint texture_id, float4 new_texture) noexcept { + auto device_float = make_float4(new_texture); + LUISA_INFO("LuisaRender Create device_buffer"); + _textures.begin()->second->update_by_buffer(stream, device_float); +} + +void Pipeline::update_mesh(uint mesh_id, uint64_t vertex_buffer) noexcept { + // if (auto iter = _geometry->instances().find(mesh_id); iter != _geometry->instances().end()) { + // iter->second->update(vertex_buffer); + // //return true; + // } + //return false; +} + void Pipeline::render(Stream &stream) noexcept { _integrator->render(stream); } +luisa::vector Pipeline::render_with_return(Stream &stream) noexcept { + return _integrator->render_with_return(stream); +} + const Texture::Instance *Pipeline::build_texture(CommandBuffer &command_buffer, const Texture *texture) noexcept { if (texture == nullptr) { return nullptr; } if (auto iter = _textures.find(texture); iter != _textures.end()) { @@ -186,4 +205,16 @@ Float4 Pipeline::constant(Expr index) const noexcept { return _constant_buffer->read(index); } +void Pipeline::update_constant(Stream &stream, uint index, float4 new_value) const noexcept { + if(!_constant_buffer) { + LUISA_INFO("Pipeline::update_constant: constant_buffer is nullptr"); + return; + } + LUISA_INFO("{} {} {}",index, new_value, _constant_buffer.size()); + stream << _constant_buffer.view(index, 1u).copy_from(&new_value) << compute::commit(); + LUISA_INFO("Pipeline::update_constant"); +} + + + }// namespace luisa::render diff --git a/src/base/pipeline.h b/src/base/pipeline.h index fd8d4bdc..16cd7f93 100644 --- a/src/base/pipeline.h +++ b/src/base/pipeline.h @@ -211,7 +211,10 @@ class Pipeline { [[nodiscard]] const Filter::Instance *build_filter(CommandBuffer &command_buffer, const Filter *filter) noexcept; [[nodiscard]] const PhaseFunction::Instance *build_phasefunction(CommandBuffer &command_buffer, const PhaseFunction *phasefunction) noexcept; bool update(CommandBuffer &command_buffer, float time) noexcept; + void update_texture(Stream &stream, uint texture_id, float4 new_value) noexcept; + void update_mesh(uint mesh_id, uint64_t vertex_buffer) noexcept; void render(Stream &stream) noexcept; + luisa::vector render_with_return(Stream &stream) noexcept; [[nodiscard]] auto &printer() noexcept { return *_printer; } [[nodiscard]] auto &printer() const noexcept { return *_printer; } [[nodiscard]] uint named_id(luisa::string_view name) const noexcept; @@ -234,6 +237,7 @@ class Pipeline { [[nodiscard]] Float4x4 transform(const Transform *transform) const noexcept; [[nodiscard]] Float4 constant(Expr index) const noexcept; + void update_constant(Stream &stream, uint index, float4 new_value) const noexcept; template [[nodiscard]] auto shader(luisa::string_view name, CallArgs &&...call_args) const noexcept { diff --git a/src/base/texture.h b/src/base/texture.h index 0c795488..788256ff 100644 --- a/src/base/texture.h +++ b/src/base/texture.h @@ -30,8 +30,8 @@ class Texture : public SceneNode { class Instance { private: - const Pipeline &_pipeline; const Texture *_texture; + const Pipeline &_pipeline; protected: [[nodiscard]] Spectrum::Decode _evaluate_static_albedo_spectrum( @@ -57,6 +57,10 @@ class Texture : public SceneNode { const Interaction &it, const SampledWavelengths &swl, Expr time) const noexcept; [[nodiscard]] virtual Spectrum::Decode evaluate_illuminant_spectrum( const Interaction &it, const SampledWavelengths &swl, Expr time) const noexcept; + virtual void update_by_buffer(Stream &stream, float4 texture_buffer){ + LUISA_WARNING_WITH_LOCATION( + "Texture::update_by_buffer() is not implemented for this texture type."); + } }; public: diff --git a/src/compute b/src/compute index b854470c..16318d30 160000 --- a/src/compute +++ b/src/compute @@ -1 +1 @@ -Subproject commit b854470cdac1d3bf2a931e1e96cb8ceab3fbdb70 +Subproject commit 16318d3003a37a852a43a9638f7bd1a54de9b471 diff --git a/src/ext/CMakeLists.txt b/src/ext/CMakeLists.txt index 76ade159..9fc59884 100644 --- a/src/ext/CMakeLists.txt +++ b/src/ext/CMakeLists.txt @@ -7,6 +7,11 @@ add_library(fast_float INTERFACE) target_include_directories(fast_float INTERFACE fast_float/include) target_link_libraries(luisa-render-ext INTERFACE fast_float) +install(TARGETS luisa-render-ext + LIBRARY DESTINATION ${CMAKE_INSTALL_BINDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) + # shared by assimp and tinyexr find_package(ZLIB) @@ -23,6 +28,8 @@ if (ZLIB_FOUND) else () set(ASSIMP_BUILD_ZLIB ON CACHE BOOL "" FORCE) endif () + +set(ASSIMP_BIN_INSTALL_DIR ${CMAKE_INSTALL_BINDIR}) set(ASSIMP_BUILD_ASSIMP_TOOLS OFF CACHE BOOL "" FORCE) set(ASSIMP_BUILD_TESTS OFF CACHE BOOL "" FORCE) set(ASSIMP_INSTALL ON CACHE BOOL "" FORCE) @@ -52,6 +59,11 @@ else () target_compile_definitions(tinyexr PUBLIC TINYEXR_USE_MINIZ=1) target_include_directories(tinyexr PRIVATE tinyexr/deps/miniz) endif () + +install(TARGETS tinyexr + LIBRARY DESTINATION ${CMAKE_INSTALL_BINDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) target_include_directories(tinyexr PUBLIC tinyexr) target_link_libraries(tinyexr PUBLIC ${CMAKE_DL_LIBS}) set_target_properties(tinyexr PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS ON) @@ -61,3 +73,13 @@ target_link_libraries(luisa-render-ext INTERFACE tinyexr) add_library(nlohmann_json INTERFACE) target_include_directories(nlohmann_json INTERFACE json/single_include) target_link_libraries(luisa-render-ext INTERFACE nlohmann_json) + + +install(TARGETS nlohmann_json + LIBRARY DESTINATION ${CMAKE_INSTALL_BINDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) + +if (SKBUILD OR LUISA_COMPUTE_FORCE_PYTHON_BINDINGS) + add_subdirectory(pybind11) +endif () \ No newline at end of file diff --git a/src/ext/dlpack b/src/ext/dlpack new file mode 160000 index 00000000..2a7e9f12 --- /dev/null +++ b/src/ext/dlpack @@ -0,0 +1 @@ +Subproject commit 2a7e9f1256ddc48186c86dff7a00e189b47e5310 diff --git a/src/ext/pybind11 b/src/ext/pybind11 new file mode 160000 index 00000000..f29def9e --- /dev/null +++ b/src/ext/pybind11 @@ -0,0 +1 @@ +Subproject commit f29def9ea467c7fde754440733047953fa4b990c diff --git a/src/films/color.cpp b/src/films/color.cpp index 07a95cb8..d3556cda 100644 --- a/src/films/color.cpp +++ b/src/films/color.cpp @@ -65,6 +65,14 @@ class ColorFilmInstance final : public Film::Instance { public: ColorFilmInstance(Device &device, Pipeline &pipeline, const ColorFilm *film) noexcept; void prepare(CommandBuffer &command_buffer) noexcept override; + void* export_image(CommandBuffer &command_buffer) override { + _check_prepared(); + auto resolution = node()->resolution(); + auto pixel_count = resolution.x * resolution.y; + command_buffer << _convert_image.get()(_image, _converted).dispatch(pixel_count); + command_buffer << compute::synchronize(); + return _converted.native_handle(); + } void download(CommandBuffer &command_buffer, float4 *framebuffer) const noexcept override; [[nodiscard]] Film::Accumulation read(Expr pixel) const noexcept override; void release() noexcept override; @@ -76,14 +84,12 @@ class ColorFilmInstance final : public Film::Instance { ColorFilmInstance::ColorFilmInstance(Device &device, Pipeline &pipeline, const ColorFilm *film) noexcept : Film::Instance{pipeline, film} { - Kernel1D clear_image_kernel = [](BufferFloat4 image) noexcept { image.write(dispatch_x(), make_float4(0.f)); }; _clear_image = global_thread_pool().async([&device, clear_image_kernel] { return device.compile(clear_image_kernel); }); - Kernel1D convert_image_kernel = [this](BufferFloat4 accum, BufferFloat4 output) noexcept { auto i = dispatch_x(); auto c = accum.read(i); diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt new file mode 100644 index 00000000..83ee5385 --- /dev/null +++ b/src/python/CMakeLists.txt @@ -0,0 +1,22 @@ +set(LUISARENDER_PYTHON_SOURCES + lrapi.cpp) + +#find_package(Python REQUIRED COMPONENTS Interpreter Development.Module) +#find_package(pybind11 CONFIG REQUIRED) + +#python_add_library(_lrapi MODULE ${LUISARENDER_PYTHON_SOURCES} WITH_SOABI) +pybind11_add_module(_lrapi ${LUISARENDER_PYTHON_SOURCES}) + +if (WIN32 AND CMAKE_CXX_COMPILER_ID MATCHES "GNU") + target_compile_options(_lrapi PRIVATE -Wa,-mbig-obj) +endif () +target_link_libraries(_lrapi PRIVATE luisa::render) +target_link_libraries(_lrapi PRIVATE pybind11::headers) + +if (SKBUILD) + install(DIRECTORY luisarender + DESTINATION ${SKBUILD_PLATLIB_DIR}) + install(TARGETS _lrapi + LIBRARY DESTINATION ${CMAKE_INSTALL_BINDIR} + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) +endif () diff --git a/src/python/bak.rb b/src/python/bak.rb new file mode 100644 index 00000000..30f3e89d --- /dev/null +++ b/src/python/bak.rb @@ -0,0 +1,22 @@ + + py:class_ + py:class_(m,"SceneDesc") + .def(py::init<>()) + py:class_(m, "Scene") + .def("load_node", &Scene::load_node) + .def("load_camera", &Scene::load_camera) + .def("load_film", &Scene::load_film) + .def("load_filter", &Scene::load_filter) + .def("load_integrator", &Scene::load_integrator) + .def("load_surface", &Scene::load_surface) + .def("load_light", &Scene::load_light) + .def("load_sampler", &Scene::load_sampler) + .def("load_shape", &Scene::load_shape) + .def("load_transform", &Scene::load_transform) + .def("load_light_sampler", &Scene::load_light_sampler) + .def("load_environment", &Scene::load_environment) + .def("load_texture", &Scene::load_texture) + .def("load_texture_mapping", &Scene::load_texture_mapping) + .def("load_spectrum", &Scene::load_spectrum) + .def("load_medium", &Scene::load_medium) + .def("load_phase_function", &Scene::load_phase_function) \ No newline at end of file diff --git a/src/python/dlpack.h b/src/python/dlpack.h new file mode 100644 index 00000000..0bdc1fe0 --- /dev/null +++ b/src/python/dlpack.h @@ -0,0 +1,318 @@ +/*! + * Copyright (c) 2017 by Contributors + * \file dlpack.h + * \brief The common header of DLPack. + */ +#ifndef DLPACK_DLPACK_H_ +#define DLPACK_DLPACK_H_ + +/** + * \brief Compatibility with C++ + */ +#ifdef __cplusplus +#define DLPACK_EXTERN_C extern "C" +#else +#define DLPACK_EXTERN_C +#endif + +/*! \brief The current major version of dlpack */ +#define DLPACK_MAJOR_VERSION 1 + +/*! \brief The current minor version of dlpack */ +#define DLPACK_MINOR_VERSION 0 + +/*! \brief DLPACK_DLL prefix for windows */ +#ifdef _WIN32 +#ifdef DLPACK_EXPORTS +#define DLPACK_DLL __declspec(dllexport) +#else +#define DLPACK_DLL __declspec(dllimport) +#endif +#else +#define DLPACK_DLL +#endif + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + * \brief The DLPack version. + * + * A change in major version indicates that we have changed the + * data layout of the ABI - DLManagedTensorVersioned. + * + * A change in minor version indicates that we have added new + * code, such as a new device type, but the ABI is kept the same. + * + * If an obtained DLPack tensor has a major version that disagrees + * with the version number specified in this header file + * (i.e. major != DLPACK_MAJOR_VERSION), the consumer must call the deleter + * (and it is safe to do so). It is not safe to access any other fields + * as the memory layout will have changed. + * + * In the case of a minor version mismatch, the tensor can be safely used as + * long as the consumer knows how to interpret all fields. Minor version + * updates indicate the addition of enumeration values. + */ +typedef struct { + /*! \brief DLPack major version. */ + uint32_t major; + /*! \brief DLPack minor version. */ + uint32_t minor; +} DLPackVersion; + +/*! + * \brief The device type in DLDevice. + */ +#ifdef __cplusplus +typedef enum : int32_t { +#else +typedef enum { +#endif + /*! \brief CPU device */ + kDLCPU = 1, + /*! \brief CUDA GPU device */ + kDLCUDA = 2, + /*! + * \brief Pinned CUDA CPU memory by cudaMallocHost + */ + kDLCUDAHost = 3, + /*! \brief OpenCL devices. */ + kDLOpenCL = 4, + /*! \brief Vulkan buffer for next generation graphics. */ + kDLVulkan = 7, + /*! \brief Metal for Apple GPU. */ + kDLMetal = 8, + /*! \brief Verilog simulator buffer */ + kDLVPI = 9, + /*! \brief ROCm GPUs for AMD GPUs */ + kDLROCM = 10, + /*! + * \brief Pinned ROCm CPU memory allocated by hipMallocHost + */ + kDLROCMHost = 11, + /*! + * \brief Reserved extension device type, + * used for quickly test extension device + * The semantics can differ depending on the implementation. + */ + kDLExtDev = 12, + /*! + * \brief CUDA managed/unified memory allocated by cudaMallocManaged + */ + kDLCUDAManaged = 13, + /*! + * \brief Unified shared memory allocated on a oneAPI non-partititioned + * device. Call to oneAPI runtime is required to determine the device + * type, the USM allocation type and the sycl context it is bound to. + * + */ + kDLOneAPI = 14, + /*! \brief GPU support for next generation WebGPU standard. */ + kDLWebGPU = 15, + /*! \brief Qualcomm Hexagon DSP */ + kDLHexagon = 16, +} DLDeviceType; + +/*! + * \brief A Device for Tensor and operator. + */ +typedef struct { + /*! \brief The device type used in the device. */ + DLDeviceType device_type; + /*! + * \brief The device index. + * For vanilla CPU memory, pinned memory, or managed memory, this is set to 0. + */ + int32_t device_id; +} DLDevice; + +/*! + * \brief The type code options DLDataType. + */ +typedef enum { + /*! \brief signed integer */ + kDLInt = 0U, + /*! \brief unsigned integer */ + kDLUInt = 1U, + /*! \brief IEEE floating point */ + kDLFloat = 2U, + /*! + * \brief Opaque handle type, reserved for testing purposes. + * Frameworks need to agree on the handle data type for the exchange to be well-defined. + */ + kDLOpaqueHandle = 3U, + /*! \brief bfloat16 */ + kDLBfloat = 4U, + /*! + * \brief complex number + * (C/C++/Python layout: compact struct per complex number) + */ + kDLComplex = 5U, + /*! \brief boolean */ + kDLBool = 6U, +} DLDataTypeCode; + +/*! + * \brief The data type the tensor can hold. The data type is assumed to follow the + * native endian-ness. An explicit error message should be raised when attempting to + * export an array with non-native endianness + * + * Examples + * - float: type_code = 2, bits = 32, lanes = 1 + * - float4(vectorized 4 float): type_code = 2, bits = 32, lanes = 4 + * - int8: type_code = 0, bits = 8, lanes = 1 + * - std::complex: type_code = 5, bits = 64, lanes = 1 + * - bool: type_code = 6, bits = 8, lanes = 1 (as per common array library convention, the underlying storage size of bool is 8 bits) + */ +typedef struct { + /*! + * \brief Type code of base types. + * We keep it uint8_t instead of DLDataTypeCode for minimal memory + * footprint, but the value should be one of DLDataTypeCode enum values. + * */ + uint8_t code; + /*! + * \brief Number of bits, common choices are 8, 16, 32. + */ + uint8_t bits; + /*! \brief Number of lanes in the type, used for vector types. */ + uint16_t lanes; +} DLDataType; + +/*! + * \brief Plain C Tensor object, does not manage memory. + */ +typedef struct { + /*! + * \brief The data pointer points to the allocated data. This will be CUDA + * device pointer or cl_mem handle in OpenCL. It may be opaque on some device + * types. This pointer is always aligned to 256 bytes as in CUDA. The + * `byte_offset` field should be used to point to the beginning of the data. + * + * Note that as of Nov 2021, multiply libraries (CuPy, PyTorch, TensorFlow, + * TVM, perhaps others) do not adhere to this 256 byte aligment requirement + * on CPU/CUDA/ROCm, and always use `byte_offset=0`. This must be fixed + * (after which this note will be updated); at the moment it is recommended + * to not rely on the data pointer being correctly aligned. + * + * For given DLTensor, the size of memory required to store the contents of + * data is calculated as follows: + * + * \code{.c} + * static inline size_t GetDataSize(const DLTensor* t) { + * size_t size = 1; + * for (tvm_index_t i = 0; i < t->ndim; ++i) { + * size *= t->shape[i]; + * } + * size *= (t->dtype.bits * t->dtype.lanes + 7) / 8; + * return size; + * } + * \endcode + */ + void* data; + /*! \brief The device of the tensor */ + DLDevice device; + /*! \brief Number of dimensions */ + int32_t ndim; + /*! \brief The data type of the pointer*/ + DLDataType dtype; + /*! \brief The shape of the tensor */ + int64_t* shape; + /*! + * \brief strides of the tensor (in number of elements, not bytes) + * can be NULL, indicating tensor is compact and row-majored. + */ + int64_t* strides; + /*! \brief The offset in bytes to the beginning pointer to data */ + uint64_t byte_offset; +} DLTensor; + +/*! + * \brief C Tensor object, manage memory of DLTensor. This data structure is + * intended to facilitate the borrowing of DLTensor by another framework. It is + * not meant to transfer the tensor. When the borrowing framework doesn't need + * the tensor, it should call the deleter to notify the host that the resource + * is no longer needed. + * + * \note This data structure is used as Legacy DLManagedTensor + * in DLPack exchange and is deprecated after DLPack v0.8 + * Use DLManagedTensorVersioned instead. + * This data structure may get renamed or deleted in future versions. + * + * \sa DLManagedTensorVersioned + */ +typedef struct DLManagedTensor { + /*! \brief DLTensor which is being memory managed */ + DLTensor dl_tensor; + /*! \brief the context of the original host framework of DLManagedTensor in + * which DLManagedTensor is used in the framework. It can also be NULL. + */ + void * manager_ctx; + /*! + * \brief Destructor - this should be called + * to destruct the manager_ctx which backs the DLManagedTensor. It can be + * NULL if there is no way for the caller to provide a reasonable destructor. + * The destructors deletes the argument self as well. + */ + void (*deleter)(struct DLManagedTensor * self); +} DLManagedTensor; + +// bit masks used in in the DLManagedTensorVersioned + +/*! \brief bit mask to indicate that the tensor is read only. */ +#define DLPACK_FLAG_BITMASK_READ_ONLY (1UL << 0UL) + +/*! + * \brief A versioned and managed C Tensor object, manage memory of DLTensor. + * + * This data structure is intended to facilitate the borrowing of DLTensor by + * another framework. It is not meant to transfer the tensor. When the borrowing + * framework doesn't need the tensor, it should call the deleter to notify the + * host that the resource is no longer needed. + * + * \note This is the current standard DLPack exchange data structure. + */ +struct DLManagedTensorVersioned { + /*! + * \brief The API and ABI version of the current managed Tensor + */ + DLPackVersion version; + /*! + * \brief the context of the original host framework. + * + * Stores DLManagedTensorVersioned is used in the + * framework. It can also be NULL. + */ + void *manager_ctx; + /*! + * \brief Destructor. + * + * This should be called to destruct manager_ctx which holds the DLManagedTensorVersioned. + * It can be NULL if there is no way for the caller to provide a reasonable + * destructor. The destructors deletes the argument self as well. + */ + void (*deleter)(struct DLManagedTensorVersioned *self); + /*! + * \brief Additional bitmask flags information about the tensor. + * + * By default the flags should be set to 0. + * + * \note Future ABI changes should keep everything until this field + * stable, to ensure that deleter can be correctly called. + * + * \sa DLPACK_FLAG_BITMASK_READ_ONLY + */ + uint64_t flags; + /*! \brief DLTensor which is being memory managed */ + DLTensor dl_tensor; +}; + +#ifdef __cplusplus +} // DLPACK_EXTERN_C +#endif +#endif // DLPACK_DLPACK_H_ diff --git a/src/python/lrapi.cpp b/src/python/lrapi.cpp new file mode 100644 index 00000000..a3132d1d --- /dev/null +++ b/src/python/lrapi.cpp @@ -0,0 +1,233 @@ +// This file exports LuisaRender functionalities to a python library using pybind11. + +#include +#include +#include +#include +#include "dlpack.h" + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include + + +using namespace luisa; +using namespace luisa::compute; +using namespace luisa::render; + +namespace py = pybind11; + +int add(int i, int j) { + return i + j; +} + +[[nodiscard]] auto parse_cli_options(int argc, const char *const *argv) noexcept { + cxxopts::Options cli{"luisa-render-cli"}; + cli.add_option("", "b", "backend", "Compute backend name", cxxopts::value(), ""); + cli.add_option("", "d", "device", "Compute device index", cxxopts::value()->default_value("-1"), ""); + cli.add_option("", "", "scene", "Path to scene description file", cxxopts::value(), ""); + cli.add_option("", "D", "define", "Parameter definitions to override scene description macros.", + cxxopts::value>()->default_value(""), "="); + cli.add_option("", "h", "help", "Display this help message", cxxopts::value()->default_value("false"), ""); + cli.allow_unrecognised_options(); + cli.positional_help(""); + cli.parse_positional("scene"); + auto options = [&] { + try { + return cli.parse(argc, argv); + } catch (const std::exception &e) { + LUISA_WARNING_WITH_LOCATION( + "Failed to parse command line arguments: {}.", + e.what()); + std::cout << cli.help() << std::endl; + exit(-1); + } + }(); + if (options["help"].as()) { + std::cout << cli.help() << std::endl; + exit(0); + } + if (options["scene"].count() == 0u) [[unlikely]] { + LUISA_WARNING_WITH_LOCATION("Scene file not specified."); + std::cout << cli.help() << std::endl; + exit(-1); + } + if (auto unknown = options.unmatched(); !unknown.empty()) [[unlikely]] { + luisa::string opts{unknown.front()}; + for (auto &&u : luisa::span{unknown}.subspan(1)) { + opts.append("; ").append(u); + } + LUISA_WARNING_WITH_LOCATION( + "Unrecognized options: {}", opts); + } + return options; +} + +[[nodiscard]] auto parse_cli_macros(int &argc, char *argv[]) { + SceneParser::MacroMap macros; + + auto parse_macro = [¯os](luisa::string_view d) noexcept { + if (auto p = d.find('='); p == luisa::string::npos) [[unlikely]] { + LUISA_WARNING_WITH_LOCATION( + "Invalid definition: {}", d); + } else { + auto key = d.substr(0, p); + auto value = d.substr(p + 1); + LUISA_VERBOSE_WITH_LOCATION("Parameter definition: {} = '{}'", key, value); + if (auto iter = macros.find(key); iter != macros.end()) { + LUISA_WARNING_WITH_LOCATION( + "Duplicate definition: {} = '{}'. " + "Ignoring the previous one: {} = '{}'.", + key, value, key, iter->second); + iter->second = value; + } else { + macros.emplace(key, value); + } + } + }; + // parse all options starting with '-D' or '--define' + for (int i = 1; i < argc; i++) { + auto arg = luisa::string_view{argv[i]}; + std::cout<<"arg "<(new_end - argv); + return macros; +} + +class ScenePython{ +public: + luisa::unique_ptr _scene; + luisa::unique_ptr _device; + luisa::unique_ptr _pipeline; + luisa::unique_ptr _stream; +}scene_python; + +PYBIND11_MODULE(_lrapi, m) { + m.doc() = "LuisaRender API";// optional module docstring + // log + m.def("log_info_test", [](const char *msg) { LUISA_INFO("{}", msg); }); + // util function for uniform encoding + m.def("add", &add, R"pbdoc( + Add two numbers + Some other explanation about the add function. + )pbdoc"); + m.def("init", []() { + log_level_info(); + LUISA_INFO("LuisaRender API init"); + }); + m.def("load_scene", [](std::vector &argvs){ + int argc = argvs.size(); + LUISA_INFO("Argc: {}", argc); + vector pointerVec(argc); + for(unsigned i = 0; i < argc; ++i) + { + LUISA_INFO("Argv: {} {}", i, argvs[i]); + pointerVec[i] = argvs[i].data(); + } + char** argv = pointerVec.data(); + luisa::compute::Context context{argv[0]}; + auto macros = parse_cli_macros(argc, argv); + for (auto &&[k, v] : macros) { + LUISA_INFO("Found CLI Macro: {} = {}", k, v); + } + auto options = parse_cli_options(argc, argv); + auto backend = options["backend"].as(); + auto index = options["device"].as(); + auto path = options["scene"].as(); + + compute::DeviceConfig config; + config.device_index = index; + config.inqueue_buffer_limit = false; // Do not limit the number of in-queue buffers --- we are doing offline rendering! + auto device = context.create_device(backend, &config); + scene_python._device = luisa::make_unique(std::move(device)); + auto scene_desc = SceneParser::parse(path, macros); + + auto stream = scene_python._device->create_stream(StreamTag::GRAPHICS); + scene_python._scene = Scene::create(context, scene_desc.get()); + scene_python._stream = luisa::make_unique(std::move(stream)); + scene_python._pipeline = Pipeline::create(*scene_python._device, *scene_python._stream, *scene_python._scene); + }); + + m.def("render", []() { + LUISA_INFO("LuisaRender API render_scene"); + auto res = scene_python._pipeline->render_with_return(*scene_python._stream); + scene_python._stream->synchronize(); + std::vector res_vec(res.size()); + for (int i = 0; i < res.size(); i++) { + res_vec[i] = reinterpret_cast(res[i]); + } + LUISA_INFO("res_vec: {}",res_vec[0]); + return res_vec; + }); + + m.def("update_texture", [](uint tex_id, float4 texture_buffer) { + LUISA_INFO("LuisaRender Update Texture"); + scene_python._pipeline->update_texture(*scene_python._stream, tex_id, texture_buffer); + }); + + + m.def("update_mesh", [](uint mesh_id, uint64_t vertex_buffer) { + LUISA_INFO("LuisaRender Update Mesh"); + scene_python._pipeline->update_mesh(mesh_id, vertex_buffer); + }); + + m.def("render_backward" [](uint64_t grad_ptr){ + + }); + + // py::class_(m, "SceneDesc") + // .def(py::init<>()) + // .def("nodes", &SceneDesc::nodes) + // .def("node", &SceneDesc::node) + // .def("root", &SceneDesc::root) + // .def("reference", &SceneDesc::reference) + // .def("define", &SceneDesc::define) + // .def("define_root", &SceneDesc::define_root) + // .def("register_path", &SceneDesc::register_path); + + // py::class_(m, "Pipeline") + // .def(py::init<>()) + // .def("render", &Pipeline::render); + // .def("update_texture", &Pipeline::update_texture); + // .def("update_mesh", &Pipeline::update_mesh); + +} + diff --git a/src/python/luisarender/__init__.py b/src/python/luisarender/__init__.py new file mode 100644 index 00000000..2f960e14 --- /dev/null +++ b/src/python/luisarender/__init__.py @@ -0,0 +1,5 @@ +from __future__ import annotations + +from .dylibs._lrapi import add, render, load_scene, init, update_mesh, update_texture + +__all__ = ["__doc__", "render", "add", "load_scene", "init", "update_texture", "update_mesh"] \ No newline at end of file diff --git a/src/python/render_python.cpp b/src/python/render_python.cpp new file mode 100644 index 00000000..543dd76b --- /dev/null +++ b/src/python/render_python.cpp @@ -0,0 +1,110 @@ +// +// Created by Mike on 2021/12/7. +// + +#include +#include + +#include + +#include +#include +#include +#include +#include + +[[nodiscard]] auto parse_cli_options(int argc, const char *const *argv) noexcept { + cxxopts::Options cli{"luisa-render-cli"}; + cli.add_option("", "b", "backend", "Compute backend name", cxxopts::value(), ""); + cli.add_option("", "d", "device", "Compute device index", cxxopts::value()->default_value("-1"), ""); + cli.add_option("", "", "scene", "Path to scene description file", cxxopts::value(), ""); + cli.add_option("", "D", "define", "Parameter definitions to override scene description macros.", + cxxopts::value>()->default_value(""), "="); + cli.add_option("", "h", "help", "Display this help message", cxxopts::value()->default_value("false"), ""); + cli.allow_unrecognised_options(); + cli.positional_help(""); + cli.parse_positional("scene"); + auto options = [&] { + try { + return cli.parse(argc, argv); + } catch (const std::exception &e) { + LUISA_WARNING_WITH_LOCATION( + "Failed to parse command line arguments: {}.", + e.what()); + std::cout << cli.help() << std::endl; + exit(-1); + } + }(); + if (options["help"].as()) { + std::cout << cli.help() << std::endl; + exit(0); + } + if (options["scene"].count() == 0u) [[unlikely]] { + LUISA_WARNING_WITH_LOCATION("Scene file not specified."); + std::cout << cli.help() << std::endl; + exit(-1); + } + if (auto unknown = options.unmatched(); !unknown.empty()) [[unlikely]] { + luisa::string opts{unknown.front()}; + for (auto &&u : luisa::span{unknown}.subspan(1)) { + opts.append("; ").append(u); + } + LUISA_WARNING_WITH_LOCATION( + "Unrecognized options: {}", opts); + } + return options; +} + +using namespace luisa; +using namespace luisa::compute; +using namespace luisa::render; + +[[nodiscard]] auto parse_cli_macros(int &argc, char *argv[]) { + SceneParser::MacroMap macros; + auto parse_macro = [¯os](luisa::string_view d) noexcept { + if (auto p = d.find('='); p == luisa::string::npos) [[unlikely]] { + LUISA_WARNING_WITH_LOCATION( + "Invalid definition: {}", d); + } else { + auto key = d.substr(0, p); + auto value = d.substr(p + 1); + LUISA_VERBOSE_WITH_LOCATION("Parameter definition: {} = '{}'", key, value); + if (auto iter = macros.find(key); iter != macros.end()) { + LUISA_WARNING_WITH_LOCATION( + "Duplicate definition: {} = '{}'. " + "Ignoring the previous one: {} = '{}'.", + key, value, key, iter->second); + iter->second = value; + } else { + macros.emplace(key, value); + } + } + }; + // parse all options starting with '-D' or '--define' + for (int i = 1; i < argc; i++) { + auto arg = luisa::string_view{argv[i]}; + if (arg == "-D" || arg == "--define") { + if (i + 1 == argc) { + LUISA_WARNING_WITH_LOCATION( + "Missing definition after {}.", arg); + // remove the option + argv[i] = nullptr; + } else { + parse_macro(argv[i + 1]); + // remove the option and its argument + argv[i] = nullptr; + argv[++i] = nullptr; + } + } else if (arg.starts_with("-D")) { + parse_macro(arg.substr(2)); + // remove the option + argv[i] = nullptr; + } + } + // remove all nullptrs + auto new_end = std::remove(argv, argv + argc, nullptr); + argc = static_cast(new_end - argv); + return macros; +} + + diff --git a/src/python/test.cpp b/src/python/test.cpp new file mode 100644 index 00000000..f5269d3e --- /dev/null +++ b/src/python/test.cpp @@ -0,0 +1,43 @@ +#include + +#define STRINGIFY(x) #x +#define MACRO_STRINGIFY(x) STRINGIFY(x) + +int add(int i, int j) { + return i + j; +} + +namespace py = pybind11; + +PYBIND11_MODULE(_lrapi, m) { + m.doc() = R"pbdoc( + Pybind11 example plugin + ----------------------- + + .. currentmodule:: scikit_build_example + + .. autosummary:: + :toctree: _generate + + add + subtract + )pbdoc"; + + m.def("add", &add, R"pbdoc( + Add two numbers + + Some other explanation about the add function. + )pbdoc"); + + m.def("log_info_test", [](int i, int j) { return i - j; }, R"pbdoc( + Subtract two numbers + + Some other explanation about the subtract function. + )pbdoc"); + +#ifdef VERSION_INFO + m.attr("__version__") = MACRO_STRINGIFY(VERSION_INFO); +#else + m.attr("__version__") = "dev"; +#endif +} diff --git a/src/sdl/CMakeLists.txt b/src/sdl/CMakeLists.txt index 61bd4424..5e138f27 100644 --- a/src/sdl/CMakeLists.txt +++ b/src/sdl/CMakeLists.txt @@ -13,3 +13,7 @@ target_link_libraries(luisa-render-sdl PUBLIC set_target_properties(luisa-render-sdl PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS ON UNITY_BUILD ${LUISA_RENDER_ENABLE_UNITY_BUILD}) +install(TARGETS luisa-render-sdl + LIBRARY DESTINATION ${CMAKE_INSTALL_BINDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) \ No newline at end of file diff --git a/src/sdl/scene_parser_json.cpp b/src/sdl/scene_parser_json.cpp index 3636a95c..74f3a8f0 100644 --- a/src/sdl/scene_parser_json.cpp +++ b/src/sdl/scene_parser_json.cpp @@ -19,6 +19,12 @@ SceneParserJSON::SceneParserJSON(SceneDesc &desc, const std::filesystem::path &p : _desc{desc}, _cli_macros{cli_macros}, _location{desc.register_path(std::filesystem::canonical(path))} {} + +// SceneParserJSON::SceneParserJSON(SceneDesc &desc, std::string json_source, +// const MacroMap &cli_macros) noexcept +// : _desc{desc}, _cli_macros{cli_macros}, +// json_source{json_source} {} + void SceneParserJSON::parse() const noexcept { auto root = [this] { std::ifstream ifs{*_location.file()}; @@ -29,6 +35,20 @@ void SceneParserJSON::parse() const noexcept { _parse_root(root); } +// void SceneParserJSON::parse() const noexcept { +// auto root = [this] { +// if(_location.file()== nullptr) { +// return json::parse(json_source, nullptr, false, true); +// } +// std::ifstream ifs{*_location.file()}; +// std::string src{std::istreambuf_iterator{ifs}, +// std::istreambuf_iterator{}}; +// return json::parse(src, nullptr, false, true); +// }(); +// _parse_root(root); +// } + + void SceneParserJSON::_parse_node(SceneNodeDesc &desc, const json &node) const noexcept { auto parse_internal = [&desc, &node, this](luisa::string_view key, const json &n) noexcept { diff --git a/src/sdl/scene_parser_json.h b/src/sdl/scene_parser_json.h index 3433d04f..18971610 100644 --- a/src/sdl/scene_parser_json.h +++ b/src/sdl/scene_parser_json.h @@ -30,6 +30,7 @@ class SceneParserJSON { SceneDesc &_desc; const MacroMap &_cli_macros; SceneNodeDesc::SourceLocation _location; + std::string json_source; private: void _parse_root(const json &root) const noexcept; @@ -40,6 +41,11 @@ class SceneParserJSON { public: SceneParserJSON(SceneDesc &desc, const std::filesystem::path &path, const MacroMap &cli_macros) noexcept; + + + // SceneParserJSON(SceneDesc &desc, const std::string json_source, + // const MacroMap &cli_macros) noexcept; + void parse() const noexcept; }; diff --git a/src/shapes/mesh.cpp b/src/shapes/mesh.cpp index 7c1b9af3..294f4572 100644 --- a/src/shapes/mesh.cpp +++ b/src/shapes/mesh.cpp @@ -159,6 +159,11 @@ class Mesh : public Shape { [[nodiscard]] bool is_mesh() const noexcept override { return true; } [[nodiscard]] MeshView mesh() const noexcept override { return _loader.get().mesh(); } [[nodiscard]] uint vertex_properties() const noexcept override { return _loader.get().properties(); } + // [[nodiscard]] void update(SceneNodeDesc *desc ) { + // _loader.update(desc->property_path("vertex_position"), + // desc->property_path("vertex_normal"), + // desc->property_path("vertex_uv")); + // } }; using MeshWrapper = diff --git a/src/tests/test_ad_torch.py b/src/tests/test_ad_torch.py new file mode 100644 index 00000000..4648cb13 --- /dev/null +++ b/src/tests/test_ad_torch.py @@ -0,0 +1,187 @@ + +#import luisarender + +import torch +import cupy +import numpy as np +import luisa +from luisa import * +from luisa.builtin import * +from luisa.types import * +from luisa.util import * +import luisarender + +luisa.init('cuda') +luisarender.init() + +def cu_device_ptr_to_torch_tensor(ptr, shape, dtype=cupy.float32): + """ + Convert a CUdeviceptr to a PyTorch tensor. + + Args: + ptr (ctypes.c_uint64): CUdeviceptr pointing to the GPU memory. + shape (tuple): Shape of the tensor. + dtype (cupy.dtype): Data type of the tensor. Default is cupy.float32. + + Returns: + torch.Tensor: PyTorch tensor. + """ + + size_bytes = cupy.dtype(dtype).itemsize * np.prod(shape) + + # Create an UnownedMemory view of the CUdeviceptr + umem = cupy.cuda.memory.UnownedMemory(int(ptr), size_bytes, owner=None) + memptr = cupy.cuda.memory.MemoryPointer(umem, 0) + + # Convert the MemoryPointer to a CuPy ndarray + array = cupy.ndarray(shape, dtype=dtype, memptr=memptr) + + # Convert the CuPy ndarray to a DLPack tensor and then to a PyTorch tensor + return torch.utils.dlpack.from_dlpack(array.toDlpack()) + +def torch_to_lc_buffer(tensor): + assert tensor.dtype is torch.float32 # TODO + size = np.prod(tensor.shape) + buf = luisa.Buffer.import_external_memory( + tensor.contiguous().data_ptr(), + size, dtype=float) + return buf + +def lc_buffer_to_torch(buf): + assert buf.dtype is float # TODO + shape = (buf.size,) + return cu_device_ptr_to_torch_tensor(buf.native_handle, shape) + +def is_torch_tensor(a): + return getattr(a, '__module__', None) == 'torch' \ + and type(a).__name__ == 'Tensor' + +def torch_ensure_grad_shape(a, b): + if is_torch_tensor(a) and a.dtype in [torch.float, torch.float32, torch.float64]: + return a.reshape(b.shape) + else: + return a + +def torch_to_luisa_scene(args): + return tuple(torch_to_lc_buffer(a) if is_torch_tensor(a) else a for a in args) + +class RenderWithLuisa(torch.autograd.Function): + @staticmethod + def forward(ctx, *args): + ctx.args = args + ctx.scene_luisa = torch_to_luisa_scene(args) + #luisa.enable_grad(ctx.args_luisa) + res = luisarender.render(*ctx.scene_luisa) + ctx.res_luisa = (res,) if not isinstance(res, tuple) else res + return lc_buffer_to_torch(res) + + @staticmethod + @torch.autograd.function.once_differentiable + def backward(ctx, *grad_output): + luisarender.set_grad(ctx.res_luisa, grad_output) + luisarender.render_backward() + args_grad = luisarender.get_grad(ctx.scene_luisa) + del ctx.scene_luisa, ctx.res_luisa + return args_grad + + + +gt_args = ["C:/Users/jiankai/anaconda3/Lib/site-packages/luisarender/dylibs","-b","cuda", "D:/cbox/cbox.luisa"] +init_args = ["C:/Users/jiankai/anaconda3/Lib/site-packages/luisarender/dylibs","-b","cuda", "C:/Users/jiankai/Downloads/bathroom/scene.luisa"] +differentiable_params_list = [ + #{"type":"mesh","idx":0,"param":"vertex_position"}, + {"type":"texture","idx":0,"param":"base_color"} +] +scene_torch = [#torch.tensor([[-1.01, 0.00, 0.99]]), + torch.tensor([[0.9, 0.9, 0.9]]) + ] + +grad = torch.ones((1024*1024,4),device='cuda') +grad_luisa = torch_to_lc_buffer(grad) +luisarender.render_backward(grad_luisa.native_handle) +exit() +wall = torch.tensor([0.0, 0.0, 1.0, 0.0],device='cuda',requires_grad=True) +wall_lc = torch_to_lc_buffer(wall) +luisarender.load_scene(gt_args) +luisarender.update_texture(0,float4(0.0,0.0,1.0,0.0)) +img = luisarender.render() +print(img[0]) +torch_tensor = cu_device_ptr_to_torch_tensor(img[0], (1024*1024,4)) + +img = torch_tensor.cpu().numpy().reshape((1024,1024,4)) +import matplotlib.pyplot as plt +import cv2 +print(img.shape) +# cv2.imshow('image',img[...,:3]) +# cv2.waitKey(0) +imgplot = plt.imshow(img) +plt.show() +print(torch_tensor) +luisa.synchronize() + +#gt_img = lc_buffer_to_torch(luisarender.render_scene()) +# +#luisarender.load_scene(init_args) +#init_img = lc_buffer_to_torch(luisarender.render_scene()) + +# luisarender.regist_differentiable(differentiable_params_list) + +# optimizer = torch.optim.Adam(scene_torch, lr=0.01) + +# for i in range(1000): +# optimizer.zero_grad() +# image = RenderWithLuisa.apply(scene_torch) +# loss = (gt_img-image)**2 +# loss.backward() +# optimizer.step() + + +# class ToTorch(luisa.CustomOp): +# def eval(self, *args): +# self.args = args +# self.argstorch = drjit_totorch(args, enable_grad=True) +# self.restorch = func(*self.argstorch) +# return torch_toluisajit(self.restorch) + +# def forward(self): +# raise TypeError("warp_ad(): forward-mode AD is not supported!") + +# def backward(self): +# grad_outtorch = drjit_totorch(self.grad_out()) +# grad_outtorch = torch_ensure_grad_shape(grad_outtorch, self.restorch) +# def flatten(values): +# """Flatten structure in a consistent arbitrary order""" +# result = [] +# def traverse(values): +# if isinstance(values, _Sequence): +# for v in values: +# traverse(v) +# elif isinstance(values, _Mapping): +# for _, v in sorted(values.items(), key=lambda item: item[0]): +# traverse(v) +# else: +# result.append(values) +# traverse(values) + +# # Single item should not be wrapped into a list +# if not isinstance(values, _Sequence) and not isinstance(values, _Mapping): +# result = result[0] + +# return result + +# torch.autograd.backward(flatten(self.restorch), flatten(grad_outtorch)) + +# def get_grads(args): +# if isinstance(args, _Sequence) and not isinstance(args, str): +# return tuple(get_grads(b) for b in args) +# elif isinstance(args, _Mapping): +# return {k: get_grads(v) for k, v in args.items()} +# elif istorch_tensor(args): +# return getattr(args, 'grad', None) +# else: +# return None + +# args_gradtorch = get_grads(self.argstorch) +# args_grad = torch_toluisajit(args_gradtorch) +# self.set_grad_in('args', args_grad) + diff --git a/src/textures/constant.cpp b/src/textures/constant.cpp index 1c6a3b52..2f52aa8f 100644 --- a/src/textures/constant.cpp +++ b/src/textures/constant.cpp @@ -20,7 +20,7 @@ class ConstantTexture final : public Texture { public: ConstantTexture(Scene *scene, const SceneNodeDesc *desc) noexcept : Texture{scene, desc}, - _should_inline{desc->property_bool_or_default("inline", true)} { + _should_inline{desc->property_bool_or_default("inline", false)} { auto scale = desc->property_float_or_default("scale", 1.f); auto v = desc->property_float_list_or_default("v"); if (v.empty()) [[unlikely]] { @@ -77,6 +77,11 @@ class ConstantTextureInstance final : public Texture::Instance { texture->should_inline()) { return texture->v(); } return pipeline().constant(_constant_slot); } + + [[nodiscard]] void update_by_buffer(Stream &stream, float4 new_value){ + LUISA_INFO("Constant::update_by_buffer {}", _constant_slot); + pipeline().update_constant(stream, _constant_slot, new_value); + } }; luisa::unique_ptr ConstantTexture::build( diff --git a/src/util/CMakeLists.txt b/src/util/CMakeLists.txt index 2e7b1fe0..5f7d2a6d 100644 --- a/src/util/CMakeLists.txt +++ b/src/util/CMakeLists.txt @@ -29,3 +29,7 @@ target_link_libraries(luisa-render-util PUBLIC set_target_properties(luisa-render-util PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS ON UNITY_BUILD ${LUISA_RENDER_ENABLE_UNITY_BUILD}) +install(TARGETS luisa-render-util + LIBRARY DESTINATION ${CMAKE_INSTALL_BINDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) \ No newline at end of file diff --git a/src/util/loop_subdiv.cpp b/src/util/loop_subdiv.cpp index 0b13ecce..23a46c6f 100644 --- a/src/util/loop_subdiv.cpp +++ b/src/util/loop_subdiv.cpp @@ -143,13 +143,13 @@ SubdivMesh loop_subdivide(luisa::span vertices, luisa::vector vs; luisa::vector faces; // Allocate _LoopSubdiv_ vertices and faces - auto verts = luisa::make_unique(vertices.size()); + auto verts = luisa::vector(vertices.size()); for (auto i = 0u; i < vertices.size(); ++i) { verts[i] = SDVertex{vertices[i].position()}; vs.emplace_back(&verts[i]); } auto nFaces = triangles.size(); - auto fs = luisa::make_unique(nFaces); + auto fs = luisa::vector(nFaces); for (int i = 0; i < nFaces; ++i) { fs[i].baseTriangle = i; faces.emplace_back(&fs[i]);