#c #windows #pytorch #g #libtorch
Вопрос:
Я хочу скомпилировать libtorch в Windows с использованием g . Возможно ли это в настоящее время? В настоящее время я загрузил последнюю версию libtorch и пытаюсь скомпилировать следующий код:
#include <torch/torch.h>
#include <iostream>
int main()
{
torch::Tensor tensor = torch::zeros({2, 2});
std::cout << tensor << std::endl;
return 0;
}
Я компилирую с помощью следующей команды:
g torch.cpp -o torch.o -std=c 20 -pthread -MD -MP -D_GLIBCXX_USE_CXX11_ABI=0 -IC:libtorchinclude -IC:libtorchincludetorchcsrcapiinclude -LC:libtorchlib -ltorch -ltorch_gpu -lc10 -lgomp
Но я получаю эти ошибки компиляции (исключая некоторые части из-за максимального количества символов):
> In file included from C:libtorchinclude/c10/core/Device.h:5,
> from C:libtorchinclude/c10/core/Allocator.h:6,
> from C:libtorchinclude/ATen/ATen.h:7,
> from C:libtorchincludetorchcsrcapiinclude/torch/types.h:3,
> from C:libtorchincludetorchcsrcapiinclude/torch/data/dataloader_options.h:4,
> from C:libtorchincludetorchcsrcapiinclude/torch/data/dataloader/base.h:3,
> from C:libtorchincludetorchcsrcapiinclude/torch/data/dataloader/stateful.h:3,
> from C:libtorchincludetorchcsrcapiinclude/torch/data/dataloader.h:3,
> from C:libtorchincludetorchcsrcapiinclude/torch/data.h:3,
> from C:libtorchincludetorchcsrcapiinclude/torch/all.h:8,
> from C:libtorchincludetorchcsrcapiinclude/torch/torch.h:3,
> from torch.cpp:2: C:libtorchinclude/c10/util/Exception.h:351:28: warning: inline
> function 'const char* c10::detail::torchCheckMsgImpl(const char*)'
> declared as dllimport: attribute ignored [-Wattributes] 351 | inline
> C10_API const char* torchCheckMsgImpl(const char* msg) {
> | ^~~~~~~~~~~~~~~~~ C:libtorchinclude/c10/util/Exception.h:355:28: warning: inline
> function 'const char* c10::detail::torchCheckMsgImpl(const char*,
> const char*)' declared as dllimport: attribute ignored [-Wattributes]
> 355 | inline C10_API const char* torchCheckMsgImpl(
> | ^~~~~~~~~~~~~~~~~ C:libtorchinclude/c10/util/Exception.h:401:34: warning: inline
> function 'void c10::detail::torchInternalAssertFail(const char*, const
> char*, uint32_t, const char*, c10::detail::CompileTimeEmptyString)'
> declared as dllimport: attribute ignored [-Wattributes] 401 |
> [[noreturn]] inline C10_API void torchInternalAssertFail(
> | ^~~~~~~~~~~~~~~~~~~~~~~ In file included from C:libtorchinclude/c10/core/DeviceType.h:8,
> from C:libtorchinclude/c10/core/Device.h:3,
> from C:libtorchinclude/c10/core/Allocator.h:6,
> from C:libtorchinclude/ATen/ATen.h:7,
> from C:libtorchincludetorchcsrcapiinclude/torch/types.h:3,
> from C:libtorchincludetorchcsrcapiinclude/torch/data/dataloader_options.h:4,
> from C:libtorchincludetorchcsrcapiinclude/torch/data/dataloader/base.h:3,
> from C:libtorchincludetorchcsrcapiinclude/torch/data/dataloader/stateful.h:3,
> from C:libtorchincludetorchcsrcapiinclude/torch/data/dataloader.h:3,
> from C:libtorchincludetorchcsrcapiinclude/torch/data.h:3,
> from C:libtorchincludetorchcsrcapiinclude/torch/all.h:8,
> from C:libtorchincludetorchcsrcapiinclude/torch/torch.h:3,
> from torch.cpp:2: C:libtorchinclude/c10/util/TypeCast.h: In function 'dest_t
> c10::fetch_and_cast(c10::ScalarType, const void*) [with dest_t =
> c10::qint8]': C:libtorchinclude/c10/macros/Macros.h:350:5: error:
> '__assert_fail' was not declared in this scope 350 |
> __assert_fail(
> | ^~~~~~~~~~~~~ C:libtorchinclude/c10/util/TypeCast.h:154:5: note: in expansion of
> macro 'CUDA_KERNEL_ASSERT' 154 |
> CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == src_type);
> | ^~~~~~~~~~~~~~~~~~ C:libtorchinclude/c10/core/ScalarType.h:179:3: note: in expansion of
> macro 'DEFINE_UNCASTABLE' 179 | _(c10::qint8, QInt8)
> | ^ C:libtorchinclude/c10/util/TypeCast.h:164:1: note: in expansion of macro 'AT_FORALL_QINT_TYPES' 164 |
> AT_FORALL_QINT_TYPES(DEFINE_UNCASTABLE)
> | ^~~~~~~~~~~~~~~~~~~~ C:libtorchinclude/c10/util/TypeCast.h: In function 'void c10::cast_and_store(c10::ScalarType, void*, src_t)
> [with src_t = c10::qint8]':
> C:libtorchinclude/c10/macros/Macros.h:350:5: error: '__assert_fail'
> was not declared in this scope 350 | __assert_fail(
>
> | ^~~~~~~~~~~~~ C:libtorchinclude/c10/util/TypeCast.h:160:5: note: in expansion of
> macro 'CUDA_KERNEL_ASSERT' 160 |
> CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == dest_type);
> | ^~~~~~~~~~~~~~~~~~ C:libtorchinclude/c10/core/ScalarType.h:179:3: note: in expansion of
> macro 'DEFINE_UNCASTABLE' 179 | _(c10::qint8, QInt8)
> | ^ C:libtorchinclude/c10/util/TypeCast.h:164:1: note: in expansion of macro 'AT_FORALL_QINT_TYPES' 164 |
> AT_FORALL_QINT_TYPES(DEFINE_UNCASTABLE)
> | ^~~~~~~~~~~~~~~~~~~~ C:libtorchinclude/c10/util/TypeCast.h: In function 'dest_t c10::fetch_and_cast(c10::ScalarType, const void*)
> [with dest_t = c10::quint8]':
> C:libtorchinclude/c10/macros/Macros.h:350:5: error: '__assert_fail'
> was not declared in this scope 350 | __assert_fail(
>
> | ^~~~~~~~~~~~~ C:libtorchinclude/c10/util/TypeCast.h:154:5: note: in expansion of
> macro 'CUDA_KERNEL_ASSERT' 154 |
> CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == src_type);
> | ^~~~~~~~~~~~~~~~~~ C:libtorchinclude/c10/core/ScalarType.h:180:3: note: in expansion of
> macro 'DEFINE_UNCASTABLE' 180 | _(c10::quint8, QUInt8)
> | ^ C:libtorchinclude/c10/util/TypeCast.h:164:1: note: in expansion of macro 'AT_FORALL_QINT_TYPES' 164 |
> AT_FORALL_QINT_TYPES(DEFINE_UNCASTABLE)
> | ^~~~~~~~~~~~~~~~~~~~ C:libtorchinclude/c10/util/TypeCast.h: In function 'void c10::cast_and_store(c10::ScalarType, void*, src_t)
> [with src_t = c10::quint8 ':
> C:libtorchinclude/c10/macros/Macros.h:350:5: error: '__assert_fail'
> was not declared in this scope 350 | __assert_fail(
>
> | ^~~~~~~~~~~~~ C:libtorchinclude/c10/util/TypeCast.h:160:5: note: in expansion of
> macro 'CUDA_KERNEL_ASSERT' 160 |
> CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == dest_type);
> | ^~~~~~~~~~~~~~~~~~ C:libtorchinclude/c10/core/ScalarType.h:180:3: note: in expansion of
> macro 'DEFINE_UNCASTABLE' 180 | _(c10::quint8, QUInt8)
> | ^ C:libtorchinclude/c10/util/TypeCast.h:164:1: note: in expansion of macro 'AT_FORALL_QINT_TYPES' 164 |
> AT_FORALL_QINT_TYPES(DEFINE_UNCASTABLE)
> | ^~~~~~~~~~~~~~~~~~~~ C:libtorchinclude/c10/util/TypeCast.h: In function 'dest_t c10::fetch_and_cast(c10::ScalarType, const void*)
> [with dest_t = c10::qint32]':
> C:libtorchinclude/c10/macros/Macros.h:350:5: error: '__assert_fail'
> was not declared in this scope 350 | __assert_fail(
>
> | ^~~~~~~~~~~~~ C:libtorchinclude/c10/util/TypeCast.h:154:5: note: in expansion of
> macro 'CUDA_KERNEL_ASSERT' 154 |
> CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == src_type);
> | ^~~~~~~~~~~~~~~~~~ C:libtorchinclude/c10/core/ScalarType.h:181:3: note: in expansion of
> macro 'DEFINE_UNCASTABLE' 181 | _(c10::qint32, QInt32)
> | ^ C:libtorchinclude/c10/util/TypeCast.h:164:1: note: in expansion of macro 'AT_FORALL_QINT_TYPES' 164 |
> AT_FORALL_QINT_TYPES(DEFINE_UNCASTABLE)
> | ^~~~~~~~~~~~~~~~~~~~ C:libtorchinclude/c10/util/TypeCast.h: In function 'void c10::cast_and_store(c10::ScalarType, void*, src_t)
> [with src_t = c10::qint32 ':
> C:libtorchinclude/c10/macros/Macros.h:350:5: error: '__assert_fail'
> was not declared in this scope 350 | __assert_fail(
>
> | ^~~~~~~~~~~~~ C:libtorchinclude/c10/util/TypeCast.h:160:5: note: in expansion of
> macro 'CUDA_KERNEL_ASSERT' 160 |
> CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == dest_type);
> | ^~~~~~~~~~~~~~~~~~ C:libtorchinclude/c10/core/ScalarType.h:181:3: note: in expansion of
> macro 'DEFINE_UNCASTABLE' 181 | _(c10::qint32, QInt32)
> | ^ C:libtorchinclude/c10/util/TypeCast.h:164:1: note: in expansion of macro 'AT_FORALL_QINT_TYPES' 164 |
> AT_FORALL_QINT_TYPES(DEFINE_UNCASTABLE)
> | ^~~~~~~~~~~~~~~~~~~~ C:libtorchinclude/c10/util/TypeCast.h: In function 'dest_t c10::fetch_and_cast(c10::ScalarType, const void*)
> [with dest_t = c10::quint4x2]':
> C:libtorchinclude/c10/macros/Macros.h:350:5: error: '__assert_fail'
> was not declared in this scope 350 | __assert_fail(
>
> | ^~~~~~~~~~~~~ C:libtorchinclude/c10/util/TypeCast.h:154:5: note: in expansion of
> macro 'CUDA_KERNEL_ASSERT' 154 |
> CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == src_type);
> | ^~~~~~~~~~~~~~~~~~ C:libtorchinclude/c10/core/ScalarType.h:182:3: note: in expansion of
> macro 'DEFINE_UNCASTABLE' 182 | _(c10::quint4x2, QUInt4x2)
> | ^ C:libtorchinclude/c10/util/TypeCast.h:164:1: note: in expansion of macro 'AT_FORALL_QINT_TYPES' 164 |
> AT_FORALL_QINT_TYPES(DEFINE_UNCASTABLE)
> | ^~~~~~~~~~~~~~~~~~~~ C:libtorchinclude/c10/util/TypeCast.h: In function 'void c10::cast_and_store(c10::ScalarType, void*, src_t)
> [with src_t = c10::quint4x2]':
> C:libtorchinclude/c10/macros/Macros.h:350:5: error: '__assert_fail'
> was not declared in this scope 350 | __assert_fail(
>
> | ^~~~~~~~~~~~~ C:libtorchinclude/c10/util/TypeCast.h:160:5: note: in expansion of
> macro 'CUDA_KERNEL_ASSERT' 160 |
> CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == dest_type);
> | ^~~~~~~~~~~~~~~~~~ C:libtorchinclude/c10/core/ScalarType.h:182:3: note: in expansion of
> macro 'DEFINE_UNCASTABLE' 182 | _(c10::quint4x2, QUInt4x2)
> | ^ C:libtorchinclude/c10/util/TypeCast.h:164:1: note: in expansion of macro C:libtorchinclude/ATen/core/dispatch/Dispatcher.h:542:13: warning:
> 'void c10::Dispatcher::callBoxed(const c10::OperatorHandleamp;,
> c10::Stack*) const' redeclared without dllimport attribute after being
> referenced with dll linkage 542 | inline void
> Dispatcher::callBoxed(const OperatorHandleamp; op, Stack* stack) const {
> | ^~~~~~~~~~ C:libtorchinclude/ATen/core/dispatch/Dispatcher.h:575:13: warning:
> 'void c10::Dispatcher::redispatchBoxed(const c10::OperatorHandleamp;,
> c10::DispatchKeySet, c10::Stack*) const' redeclared without dllimport
> attribute after being referenced with dll linkage 575 | inline void
> Dispatcher::redispatchBoxed(const OperatorHandleamp; op, DispatchKeySet
> dispatchKeySet, Stack* stack) const {
> | ^~~~~~~~~~ In file included from C:libtorchinclude/torch/csrc/jit/api/function_impl.h:4,
> from C:libtorchinclude/torch/csrc/jit/api/method.h:5,
> from C:libtorchinclude/torch/csrc/jit/api/object.h:6,
> from C:libtorchinclude/torch/csrc/jit/frontend/tracer.h:9,
> from C:libtorchinclude/torch/csrc/autograd/generated/variable_factories.h:12,
> from C:libtorchincludetorchcsrcapiinclude/torch/types.h:7,
> from C:libtorchincludetorchcsrcapiinclude/torch/data/dataloader_options.h:4,
> from C:libtorchincludetorchcsrcapiinclude/torch/data/dataloader/base.h:3,
> from C:libtorchincludetorchcsrcapiinclude/torch/data/dataloader/stateful.h:3,
> from C:libtorchincludetorchcsrcapiinclude/torch/data/dataloader.h:3,
> from C:libtorchincludetorchcsrcapiinclude/torch/data.h:3,
> from C:libtorchincludetorchcsrcapiinclude/torch/all.h:8,
> from C:libtorchincludetorchcsrcapiinclude/torch/torch.h:3,
> from torch.cpp:2: C:libtorchinclude/torch/csrc/jit/ir/ir.h:271:42: error: function
> 'std::shared_ptr<torch::jit::Wrap<torch::jit::Value> >
> torch::jit::Value::wrap()' definition is marked dllimport 271 |
> TORCH_API std::shared_ptr<Wrap<Value>> wrap() {
> | ^~~~ In file included from C:libtorchinclude/torch/csrc/jit/api/function_impl.h:4,
> from C:libtorchinclude/torch/csrc/jit/api/method.h:5,
> from C:libtorchinclude/torch/csrc/jit/api/object.h:6,
> from C:libtorchinclude/torch/csrc/jit/frontend/tracer.h:9,
> from C:libtorchinclude/torch/csrc/autograd/generated/variable_factories.h:12,
> from C:libtorchincludetorchcsrcapiinclude/torch/types.h:7,
> from C:libtorchincludetorchcsrcapiinclude/torch/data/dataloader_options.h:4,
> from C:libtorchincludetorchcsrcapiinclude/torch/data/dataloader/base.h:3,
> from C:libtorchincludetorchcsrcapiinclude/torch/data/dataloader/stateful.h:3,
> from C:libtorchincludetorchcsrcapiinclude/torch/data/dataloader.h:3,
> from C:libtorchincludetorchcsrcapiinclude/torch/data.h:3,
> from C:libtorchincludetorchcsrcapiinclude/torch/all.h:8,
> from C:libtorchincludetorchcsrcapiinclude/torch/torch.h:3,
> from torch.cpp:2: C:libtorchinclude/torch/csrc/jit/ir/ir.h:1047:42: error: function
> 'std::shared_ptr<torch::jit::Wrap<torch::jit::Block> >
> torch::jit::Block::wrap()' definition is marked dllimport 1047 |
> TORCH_API std::shared_ptr<Wrap<Block>> wrap() {
> | ^~~~ In file included from C:libtorchinclude/torch/csrc/jit/runtime/graph_executor.h:8,
> from C:libtorchinclude/torch/csrc/jit/api/function_impl.h:5,
> from C:libtorchinclude/torch/csrc/jit/api/method.h:5,
> from C:libtorchinclude/torch/csrc/jit/api/object.h:6,
> from C:libtorchinclude/torch/csrc/jit/frontend/tracer.h:9,
> from C:libtorchinclude/torch/csrc/autograd/generated/variable_factories.h:12,
> from C:libtorchincludetorchcsrcapiinclude/torch/types.h:7,
> from C:libtorchincludetorchcsrcapiinclude/torch/data/dataloader_options.h:4,
> from C:libtorchincludetorchcsrcapiinclude/torch/data/dataloader/base.h:3,
> from C:libtorchincludetorchcsrcapiinclude/torch/data/dataloader/stateful.h:3,
> from C:libtorchincludetorchcsrcapiinclude/torch/data/dataloader.h:3,
> from C:libtorchincludetorchcsrcapiinclude/torch/data.h:3,
> from C:libtorchincludetorchcsrcapiinclude/torch/all.h:8,
> from C:libtorchincludetorchcsrcapiinclude/torch/torch.h:3,
> from torch.cpp:2: C:libtorchinclude/torch/csrc/jit/runtime/argument_spec.h:68:10:
> warning: 'template<class _Tp> struct std::is_pod' is deprecated: use
> is_standard_layout amp;amp; is_trivial instead [-Wdeprecated-declarations]
> 68 | std::is_pod<ArgumentInfo>::value,
> | ^~~~~~ In file included from c:mingw64includec 10.3.0bitsmove.h:57,
> from c:mingw64includec 10.3.0bitsnested_exception.h:40,
> from c:mingw64includec 10.3.0exception:148,
> from c:mingw64includec 10.3.0ios:39,
> from c:mingw64includec 10.3.0ostream:38,
> from c:mingw64includec 10.3.0iostream:39,
> from torch.cpp:1: c:mingw64includec 10.3.0type_traits:697:5: note: declared here
> 697 | is_pod
> | ^~~~~~ In file included from C:libtorchincludetorchcsrcapiinclude/torch/utils.h:3,
> from C:libtorchincludetorchcsrcapiinclude/torch/nn/cloneable.h:5,
> from C:libtorchincludetorchcsrcapiinclude/torch/nn.h:3,
> from C:libtorchincludetorchcsrcapiinclude/torch/all.h:13,
> from C:libtorchincludetorchcsrcapiinclude/torch/torch.h:3,
> from torch.cpp:2: C:libtorchinclude/ATen/Parallel.h:31:23: warning: inline function
> 'void at::internal::lazy_init_num_threads()' declared as dllimport:
> attribute ignored [-Wattributes] 31 | inline TORCH_API void
> lazy_init_num_threads() {
> | ^~~~~~~~~~~~~~~~~~~~~
Есть какие-нибудь идеи о том, как это решить?