Skip to content

Commit 23bc121

Browse files
jeffdailyfacebook-github-bot
authored andcommitted
remove caffe2 from hipify
Summary: X-link: meta-pytorch/torchcomms#544 Reland of pytorch/pytorch#172796. Which replaced pytorch/pytorch#151845 due to infra issue with pytorchbot PAT and ROCm fork where PR branch was originally hosted. Which was a reland of pytorch/pytorch#137157. - "MasqueradingAsCUDA" files and classes thinly wrap their corresponding CUDA classes. - Do not rename "CUDA" classes to "HIP". cc sunway513 jithunnair-amd pruthvistony ROCmSupport jataylo hongxiayang naromero77amd pragupta jerrymannil xinyazhang voznesenskym penguinwu EikanWang jgong5 Guobing-Chen XiaobingSuper zhuhaozhe blzheng wenzhe-nrv jiayisunx ipiszy kadeng muchulee8 amjames chauhang aakhundov coconutruben X-link: pytorch/pytorch#174087 Reviewed By: malfet Differential Revision: D92063294 Pulled By: atalman
1 parent 5baa7ba commit 23bc121

5 files changed

Lines changed: 5 additions & 5 deletions

File tree

csrc/attention/ck/fmha/hip_decoder/attention_forward_decoder.hip

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ at::Tensor& efficient_attention_forward_decoder_ck_out_impl(
100100
int32_t smem_output = K_MAX * sizeof(float) *
101101
threads.y; // 4 * threadsPerBlock * sizeof(float) == sizeof(O[b][0][h][:])
102102
const size_t lds_bytes = max(smem_softmax, smem_output);
103-
auto stream = at::hip::getCurrentHIPStream().stream();
103+
auto stream = at::cuda::getCurrentHIPStream().stream();
104104

105105
AT_DISPATCH_SWITCH_3(
106106
at::ScalarType::Half,

csrc/attention/ck/fmha/hip_decoder/attention_forward_splitk.hip

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ at::Tensor& efficient_attention_forward_decoder_splitk_ck_out_impl(
143143
WavefrontsPerBlock; // 4 * threadsPerBlock * sizeof(float) ==
144144
// sizeof(O[b][0][h][:])
145145
const size_t attn_lds_bytes = max(smem_softmax, smem_output);
146-
auto stream = at::hip::getCurrentHIPStream().stream();
146+
auto stream = at::cuda::getCurrentHIPStream().stream();
147147

148148
AT_DISPATCH_SWITCH_3(
149149
at::ScalarType::Half,

csrc/attention/ck/fmha/hip_fmha/attention_backward_generic_ck_tiled.hip

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ efficient_attention_backward_ck(
115115
TORCH_CHECK(max_seqlen_k_.has_value());
116116
}
117117

118-
hipStream_t stream = at::hip::getCurrentHIPStream().stream();
118+
hipStream_t stream = at::cuda::getCurrentHIPStream().stream();
119119

120120
int64_t B = query.size(0);
121121
int64_t M = query.size(1);

csrc/attention/ck/fmha/hip_fmha/attention_ck_rand_uniform.hip

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ at::Tensor rand_uniform_int(
3737
int M = out_pattern.size(2);
3838
int N = out_pattern.size(3);
3939

40-
hipStream_t stream = at::hip::getCurrentHIPStream().stream();
40+
hipStream_t stream = at::cuda::getCurrentHIPStream().stream();
4141

4242
at::CUDAGeneratorImpl* gen =
4343
at::get_generator_or_default<at::CUDAGeneratorImpl>(

csrc/attention/ck/fmha/hip_fmha/attention_forward_generic_ck_tiled.hip

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ efficient_attention_forward_ck(
120120
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(key);
121121
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(value);
122122

123-
hipStream_t stream = at::hip::getCurrentHIPStream().stream();
123+
hipStream_t stream = at::cuda::getCurrentHIPStream().stream();
124124

125125
int64_t B = query.size(0);
126126
int64_t M = query.size(1);

0 commit comments

Comments
 (0)