-
Notifications
You must be signed in to change notification settings - Fork 91
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
385102f
commit f584ccb
Showing
23 changed files
with
290 additions
and
372 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,23 +1,69 @@ | ||
// RUN: rm -rf %T/pytorch/ATen | ||
// RUN: mkdir -p %T/pytorch/ATen/src | ||
// RUN: cp %S/ATen.cu %T/pytorch/ATen/src/ | ||
// RUN: cp %S/user_defined_rule_pytorch.yaml %T/pytorch/ATen/ | ||
// RUN: cp -r %S/pytorch_cuda_inc %T/pytorch/ATen/ | ||
// RUN: cp -r %S/pytorch_inc %T/pytorch/ATen/ | ||
// RUN: cd %T/pytorch/ATen | ||
// RUN: mkdir dpct_out | ||
// RUN: dpct --out-root dpct_out %T/pytorch/ATen/src/ATen.cu --extra-arg="-I%T/pytorch/ATen/pytorch_cuda_inc" --cuda-include-path="%cuda-path/include" --rule-file=%T/pytorch/ATen/user_defined_rule_pytorch.yaml --analysis-scope-path %T/pytorch/ATen/pytorch_cuda_inc --analysis-scope-path %T/pytorch/ATen/src --in-root %T/pytorch/ATen/src | ||
// RUN: dpct --out-root dpct_out %T/pytorch/ATen/src/ATen.cu --extra-arg="-I%T/pytorch/ATen/pytorch_inc" --cuda-include-path="%cuda-path/include" --rule-file=%S/../../../tools/dpct/DpctOptRules/pytorch_api.yaml --analysis-scope-path %T/pytorch/ATen/pytorch_inc --analysis-scope-path %T/pytorch/ATen/src --in-root %T/pytorch/ATen/src | ||
// RUN: FileCheck --input-file %T/pytorch/ATen/dpct_out/ATen.dp.cpp --match-full-lines %T/pytorch/ATen/src/ATen.cu | ||
// RUN: %if build_lit %{icpx -c -fsycl -DNO_BUILD_TEST %T/pytorch/ATen/dpct_out/ATen.dp.cpp -o %T/pytorch/ATen/dpct_out/ATen.dp.o %} | ||
// RUN: %if build_lit %{icpx -c -fsycl -DBUILD_TEST %T/pytorch/ATen/dpct_out/ATen.dp.cpp -o %T/pytorch/ATen/dpct_out/ATen.dp.o %} | ||
|
||
#ifndef NO_BUILD_TEST | ||
// CHECK: #include <c10/xpu/XPUStream.h> | ||
#include <iostream> | ||
// CHECK: #include <ATen/xpu/XPUContext.h> | ||
#include <ATen/cuda/CUDAContext.h> | ||
// CHECK: #include <ATen/core/Tensor.h> | ||
#include <ATen/core/Tensor.h> | ||
|
||
// CHECK: #include <ATen/Tensor.h> | ||
// CHECK-NEXT: #include <c10/util/Half.h> | ||
#include <ATen/cuda/CUDATensorMethods.cuh> | ||
|
||
#define AT_CUDA_CHECK(stmt) (stmt) | ||
|
||
// CHECK: #define BE_AT_CHECK | ||
#define BE_AT_CHECK AT_CUDA_CHECK | ||
|
||
|
||
__global__ void kernel() {} | ||
|
||
void test_CUDAStream_as_arg() { | ||
dim3 gridSize(2, 2, 1); | ||
dim3 blockSize(8, 8, 1); | ||
void *args[] = {nullptr}; | ||
|
||
// CHECK: ([&]() { | ||
// CHECK-NEXT: ((sycl::queue *)(c10::xpu::getCurrentXPUStream())) | ||
// CHECK-NEXT: ->parallel_for(sycl::nd_range<3>(gridSize * blockSize, blockSize), | ||
// CHECK-NEXT: [=](sycl::nd_item<3> item_ct1) { | ||
// CHECK-NEXT: kernel(); | ||
// CHECK-NEXT: }); | ||
// CHECK-NEXT: return 0; | ||
// CHECK-NEXT: }()); | ||
AT_CUDA_CHECK(cudaLaunchKernel((const void *)kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream())); | ||
} | ||
|
||
int main() { | ||
// CHECK: dpct::queue_ptr st = | ||
// CHECK-NEXT: &static_cast<sycl::queue &>(c10::xpu::getCurrentXPUStream()); | ||
cudaStream_t st = 0; | ||
|
||
// stream APIs | ||
at::DeviceIndex devInd = 1; | ||
|
||
// CHECK: auto currentStream = c10::xpu::getCurrentXPUStream(); | ||
auto currentStream = at::cuda::getCurrentCUDAStream(); | ||
// CHECK: auto deviceStream = c10::xpu::getCurrentXPUStream(devInd); | ||
auto deviceStream = at::cuda::getCurrentCUDAStream(devInd); | ||
|
||
// CHECK: dpct::queue_ptr curr_cuda_st = | ||
// CHECK-NEXT: &static_cast<sycl::queue &>(c10::xpu::getCurrentXPUStream().queue()); | ||
cudaStream_t curr_cuda_st = at::cuda::getCurrentCUDAStream().stream(); | ||
// CHECK: dpct::queue_ptr dev_cuda_st = &static_cast<sycl::queue &>( | ||
// CHECK-NEXT: c10::xpu::getCurrentXPUStream(devInd).queue()); | ||
cudaStream_t dev_cuda_st = at::cuda::getCurrentCUDAStream(devInd).stream(); | ||
|
||
test_CUDAStream_as_arg(); | ||
|
||
return 0; | ||
} | ||
#endif |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
101 changes: 0 additions & 101 deletions
101
clang/test/dpct/pytorch/pytorch_cuda_inc/ATen/core/Tensor.h
This file was deleted.
Oops, something went wrong.
8 changes: 0 additions & 8 deletions
8
clang/test/dpct/pytorch/pytorch_cuda_inc/ATen/cuda/CUDAContext.h
This file was deleted.
Oops, something went wrong.
14 changes: 0 additions & 14 deletions
14
clang/test/dpct/pytorch/pytorch_cuda_inc/c10/cuda/CUDAStream.h
This file was deleted.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
#pragma once |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
#pragma once |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,7 @@ | ||
#pragma once | ||
|
||
#include <c10/cuda/CUDAStream.h> | ||
|
||
namespace at { | ||
using namespace c10; | ||
} |
8 changes: 8 additions & 0 deletions
8
clang/test/dpct/pytorch/pytorch_inc/ATen/cuda/CUDATensorMethods.cuh
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,8 @@ | ||
#pragma once | ||
|
||
#include <ATen/Tensor.h> | ||
#include <c10/util/Half.h> | ||
|
||
#include <cuda.h> | ||
#include <cuda_runtime.h> | ||
#include <cuda_fp16.h> |
File renamed without changes.
File renamed without changes.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,5 @@ | ||
#define C10_CUDA_BUILD_SHARED_LIBS | ||
#define C10_CUDA_EXPORT | ||
#define C10_CUDA_IMPORT | ||
#define C10_CUDA_API | ||
#define C10_CUDA_BUILD_MAIN_LIB |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,22 @@ | ||
#include <cuda_runtime.h> | ||
#include "CUDAFunctions.h" | ||
|
||
namespace c10 { | ||
namespace cuda { | ||
class CUDAStream { | ||
public: | ||
CUDAStream() {} | ||
cudaStream_t stream() { return 0; } | ||
|
||
operator cudaStream_t() const { | ||
return stream(); | ||
} | ||
cudaStream_t stream() const; | ||
}; | ||
|
||
CUDAStream getCurrentCUDAStream(DeviceIndex device_index = -1) { | ||
return CUDAStream(); | ||
} | ||
|
||
} // namespace cuda | ||
} // namespace c10 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
#pragma once |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,6 @@ | ||
namespace torch { | ||
class Tensor { | ||
public: | ||
bool is_cuda(); | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,29 @@ | ||
// RUN: rm -rf %T/pytorch/torch | ||
// RUN: mkdir -p %T/pytorch/torch/src | ||
// RUN: cp %S/torch.cu %T/pytorch/torch/src/ | ||
// RUN: cp -r %S/pytorch_inc %T/pytorch/torch/ | ||
// RUN: cd %T/pytorch/torch | ||
// RUN: mkdir dpct_out | ||
// RUN: dpct --out-root dpct_out %T/pytorch/torch/src/torch.cu --extra-arg="-I%T/pytorch/torch/pytorch_inc" --cuda-include-path="%cuda-path/include" --rule-file=%S/../../../tools/dpct/DpctOptRules/pytorch_api.yaml --analysis-scope-path %T/pytorch/torch/pytorch_inc --analysis-scope-path %T/pytorch/torch/src --in-root %T/pytorch/torch/src | ||
// RUN: FileCheck --input-file %T/pytorch/torch/dpct_out/torch.dp.cpp --match-full-lines %T/pytorch/torch/src/torch.cu | ||
// RUN: %if build_lit %{icpx -c -fsycl -DNO_BUILD_TEST %T/pytorch/torch/dpct_out/torch.dp.cpp -o %T/pytorch/torch/dpct_out/torch.dp.o %} | ||
|
||
#include <cuda.h> | ||
#include <iostream> | ||
#include <stdexcept> | ||
#include <torch/torch.h> | ||
|
||
#define MY_CHECK(condition, message) \ | ||
do { \ | ||
if (!(condition)) { \ | ||
throw std::runtime_error("Error: " + std::string(message)); \ | ||
} \ | ||
} while (0) | ||
|
||
// void foo(torch::Tensor x) { | ||
void foo(torch::Tensor x) { | ||
// CHECK: MY_CHECK(x.is_xpu(), "x must reside on device"); | ||
MY_CHECK(x.is_cuda(), "x must reside on device"); | ||
|
||
return 0; | ||
} |
Oops, something went wrong.