Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions infini_train/src/core/cuda/cuda_guard_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,10 @@ void CudaGuardImpl::SynchronizeDevice(Device device) const {
SetDevice(original_device);
}

void CudaGuardImpl::SynchronizeStream(Stream *stream) const {
CUDA_CHECK(cudaStreamSynchronize(dynamic_cast<CudaStream *>(stream)->cuda_stream()));
}

// blas
BlasHandle *CudaGuardImpl::GetBlasHandle(Device device) const {
CheckCudaDevice(device);
Expand Down
2 changes: 2 additions & 0 deletions infini_train/src/core/cuda/cuda_guard_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,8 @@ class CudaGuardImpl final : public DeviceGuardImpl {
// sync
void SynchronizeDevice(Device device) const override;

void SynchronizeStream(Stream *) const override;

// blas
BlasHandle *GetBlasHandle(Device device) const override;

Expand Down
23 changes: 15 additions & 8 deletions infini_train/src/kernels/cuda/concat.cu
Original file line number Diff line number Diff line change
Expand Up @@ -113,18 +113,22 @@ std::shared_ptr<Tensor> ConcatForward(const std::vector<std::shared_ptr<Tensor>>
int64_t *device_offsets = nullptr;

CUDA_CHECK(cudaMallocAsync(&device_input_ptrs, sizeof(T *) * num_inputs, stream));
CUDA_CHECK(cudaMemcpyAsync(device_input_ptrs, host_input_ptrs.data(), sizeof(T *) * num_inputs,
cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpy(device_input_ptrs, host_input_ptrs.data(), sizeof(T *) * num_inputs,
cudaMemcpyHostToDevice));

CUDA_CHECK(cudaMallocAsync(&device_offsets, sizeof(int64_t) * (num_inputs + 1), stream));
CUDA_CHECK(cudaMemcpyAsync(device_offsets, host_offsets.data(), sizeof(int64_t) * (num_inputs + 1),
cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpy(device_offsets, host_offsets.data(), sizeof(int64_t) * (num_inputs + 1),
cudaMemcpyHostToDevice));

ConcatForwardKernel<T><<<num_blocks, threads_per_block, 0, stream>>>(
device_input_ptrs, static_cast<T *>(output->DataPtr()), device_offsets, N, D, num_inputs, K_total);

CUDA_CHECK(cudaFreeAsync(device_input_ptrs, stream));
CUDA_CHECK(cudaFreeAsync(device_offsets, stream));
// NOTE(dcj):
// Synchronize the stream here to ensure all preceding H2D/D2H memcpy
// operations have completed before the host buffers go out of scope.
CUDA_CHECK(cudaStreamSynchronize(stream));
},
"CUDA ConcatForward");

Expand Down Expand Up @@ -219,18 +223,21 @@ std::vector<std::shared_ptr<Tensor>> ConcatBackward(const std::shared_ptr<Tensor
int64_t *device_offsets = nullptr;

CUDA_CHECK(cudaMallocAsync(&device_ptrs, sizeof(T *) * num_inputs, stream));
CUDA_CHECK(cudaMemcpyAsync(device_ptrs, host_ptrs.data(), sizeof(T *) * num_inputs, cudaMemcpyHostToDevice,
stream));
CUDA_CHECK(cudaMemcpy(device_ptrs, host_ptrs.data(), sizeof(T *) * num_inputs, cudaMemcpyHostToDevice));

CUDA_CHECK(cudaMallocAsync(&device_offsets, sizeof(int64_t) * (num_inputs + 1), stream));
CUDA_CHECK(cudaMemcpyAsync(device_offsets, host_offsets.data(), sizeof(int64_t) * (num_inputs + 1),
cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpy(device_offsets, host_offsets.data(), sizeof(int64_t) * (num_inputs + 1),
cudaMemcpyHostToDevice));

ConcatBackwardKernel<T><<<num_blocks, threads_per_block, 0, stream>>>(
static_cast<const T *>(grad_output->DataPtr()), device_ptrs, device_offsets, N, D, num_inputs, K_total);

CUDA_CHECK(cudaFreeAsync(device_ptrs, stream));
CUDA_CHECK(cudaFreeAsync(device_offsets, stream));
// NOTE(dcj):
// Synchronize the stream here to ensure all preceding H2D/D2H memcpy
// operations have completed before the host buffers go out of scope.
CUDA_CHECK(cudaStreamSynchronize(stream));
},
"CUDA ConcatBackward");

Expand Down
24 changes: 17 additions & 7 deletions infini_train/src/kernels/cuda/elementwise.cu
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ void LaunchForward(Func func, const std::shared_ptr<Tensor> &output, const Input
auto out_stride_host = ComputeStrides(out_shape);

int64_t *device_buffer;
cudaMallocAsync(&device_buffer, 5 * ndim * sizeof(int64_t), cuda_stream);
CUDA_CHECK(cudaMallocAsync(&device_buffer, 5 * ndim * sizeof(int64_t), cuda_stream));

int64_t *device_a_strides, *device_b_strides, *device_out_strides, *device_a_shape, *device_b_shape;
device_a_strides = device_buffer + ndim * 0;
Expand All @@ -123,8 +123,8 @@ void LaunchForward(Func func, const std::shared_ptr<Tensor> &output, const Input
host_buffer.insert(host_buffer.end(), a_shape.begin(), a_shape.end());
host_buffer.insert(host_buffer.end(), b_shape.begin(), b_shape.end());

cudaMemcpyAsync(device_buffer, host_buffer.data(), 5 * ndim * sizeof(int64_t), cudaMemcpyHostToDevice,
cuda_stream);
CUDA_CHECK(cudaMemcpyAsync(device_buffer, host_buffer.data(), 5 * ndim * sizeof(int64_t),
cudaMemcpyHostToDevice, cuda_stream));

LaunchKernel<BLOCK_SIZE, T>(
[&](dim3 grid, dim3 block, size_t offset, const T *a_ptr, const T *b_ptr) {
Expand All @@ -134,7 +134,12 @@ void LaunchForward(Func func, const std::shared_ptr<Tensor> &output, const Input
},
output, inputs...);

cudaFreeAsync(device_buffer, cuda_stream);
CUDA_CHECK(cudaFreeAsync(device_buffer, cuda_stream));

// NOTE(dcj):
// Synchronize the stream here to ensure all preceding H2D/D2H memcpy
// operations have completed before the host buffers go out of scope.
CUDA_CHECK(cudaStreamSynchronize(cuda_stream));
} else {
static_assert(sizeof...(inputs) == 1 || sizeof...(inputs) == 2,
"LaunchForward currently only supports unary and binary operations.");
Expand Down Expand Up @@ -538,7 +543,7 @@ void LaunchBackward(FuncA fun_a, FuncB fun_b, const std::shared_ptr<Tensor> &out
auto out_stride_host = ComputeStrides(out_shape);

int64_t *device_buffer;
cudaMallocAsync(&device_buffer, 5 * ndim * sizeof(int64_t), stream);
CUDA_CHECK(cudaMallocAsync(&device_buffer, 5 * ndim * sizeof(int64_t), stream));

int64_t *device_a_strides, *device_b_strides, *device_out_strides, *device_a_shape, *device_b_shape;
device_a_strides = device_buffer + ndim * 0;
Expand All @@ -554,7 +559,8 @@ void LaunchBackward(FuncA fun_a, FuncB fun_b, const std::shared_ptr<Tensor> &out
host_buffer.insert(host_buffer.end(), a_shape.begin(), a_shape.end());
host_buffer.insert(host_buffer.end(), b_shape.begin(), b_shape.end());

cudaMemcpyAsync(device_buffer, host_buffer.data(), 5 * ndim * sizeof(int64_t), cudaMemcpyHostToDevice, stream);
CUDA_CHECK(
cudaMemcpyAsync(device_buffer, host_buffer.data(), 5 * ndim * sizeof(int64_t), cudaMemcpyHostToDevice, stream));

const size_t num_elements = grad_output->NumElements();

Expand Down Expand Up @@ -616,7 +622,11 @@ void LaunchBackward(FuncA fun_a, FuncB fun_b, const std::shared_ptr<Tensor> &out
},
output_a, inputs...);
}
cudaFreeAsync(device_buffer, stream);
CUDA_CHECK(cudaFreeAsync(device_buffer, stream));
// NOTE(dcj):
// Synchronize the stream here to ensure all preceding H2D/D2H memcpy
// operations have completed before the host buffers go out of scope.
CUDA_CHECK(cudaStreamSynchronize(stream));
}

template <typename Func> std::shared_ptr<Tensor> UnaryForward(const std::shared_ptr<Tensor> &input, Func unary_fn) {
Expand Down
28 changes: 17 additions & 11 deletions infini_train/src/kernels/cuda/gather.cu
Original file line number Diff line number Diff line change
Expand Up @@ -89,12 +89,9 @@ std::shared_ptr<Tensor> IndexGatherForward(const std::shared_ptr<Tensor> &input,
int64_t *in_strides_dev = dev_buf + 1 * num_dims;
int64_t *out_strides_dev = dev_buf + 2 * num_dims;

CUDA_CHECK(
cudaMemcpyAsync(out_dims_dev, idx_dims.data(), num_dims * sizeof(int64_t), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(
cudaMemcpyAsync(in_strides_dev, in_strides.data(), num_dims * sizeof(int64_t), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpyAsync(out_strides_dev, out_strides.data(), num_dims * sizeof(int64_t), cudaMemcpyHostToDevice,
stream));
CUDA_CHECK(cudaMemcpy(out_dims_dev, idx_dims.data(), num_dims * sizeof(int64_t), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(in_strides_dev, in_strides.data(), num_dims * sizeof(int64_t), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(out_strides_dev, out_strides.data(), num_dims * sizeof(int64_t), cudaMemcpyHostToDevice));

const int threads = 256;
const int blocks = (total_elements + threads - 1) / threads;
Expand All @@ -110,6 +107,12 @@ std::shared_ptr<Tensor> IndexGatherForward(const std::shared_ptr<Tensor> &input,
"CUDA IndexGatherForward");

CUDA_CHECK(cudaFreeAsync(dev_buf, stream));

// NOTE(dcj):
// Synchronize the stream here to ensure all preceding H2D/D2H memcpy
// operations have completed before the host buffers go out of scope.
CUDA_CHECK(cudaStreamSynchronize(stream));

return out;
}

Expand Down Expand Up @@ -198,11 +201,10 @@ std::shared_ptr<Tensor> IndexGatherBackward(const std::shared_ptr<Tensor> &grad_
int64_t *in_strides_dev = out_dims_dev + n_out;
int64_t *out_strides_dev = in_strides_dev + n_in_strides;

CUDA_CHECK(cudaMemcpyAsync(out_dims_dev, idx_dims.data(), n_out * sizeof(int64_t), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpyAsync(in_strides_dev, in_strides.data(), n_in_strides * sizeof(int64_t),
cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpyAsync(out_strides_dev, out_strides.data(), n_out_strides * sizeof(int64_t),
cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpy(out_dims_dev, idx_dims.data(), n_out * sizeof(int64_t), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(in_strides_dev, in_strides.data(), n_in_strides * sizeof(int64_t), cudaMemcpyHostToDevice));
CUDA_CHECK(
cudaMemcpy(out_strides_dev, out_strides.data(), n_out_strides * sizeof(int64_t), cudaMemcpyHostToDevice));

const int threads = 256;
const int blocks = (int)((total_elements + threads - 1) / threads);
Expand All @@ -218,6 +220,10 @@ std::shared_ptr<Tensor> IndexGatherBackward(const std::shared_ptr<Tensor> &grad_
"CUDA IndexGatherBackward");

CUDA_CHECK(cudaFreeAsync(dev_buf, stream));
// NOTE(dcj):
// Synchronize the stream here to ensure all preceding H2D/D2H memcpy
// operations have completed before the host buffers go out of scope.
CUDA_CHECK(cudaStreamSynchronize(stream));
return grad_input;
}

Expand Down
58 changes: 37 additions & 21 deletions infini_train/src/kernels/cuda/slice.cu
Original file line number Diff line number Diff line change
Expand Up @@ -73,21 +73,24 @@ std::shared_ptr<Tensor> SliceForward(const std::shared_ptr<Tensor> &input, const
infini_train::core::GetDeviceGuardImpl(device.type())->GetStream(device))
->cuda_stream();

cudaMallocAsync(&new_dims_dev,
(ends.size() + starts.size() + steps.size() + dims.size() + new_dims.size()) * sizeof(int64_t),
stream);
CUDA_CHECK(cudaMallocAsync(
&new_dims_dev, (ends.size() + starts.size() + steps.size() + dims.size() + new_dims.size()) * sizeof(int64_t),
stream));
starts_dev = new_dims_dev + ends.size();
steps_dev = starts_dev + starts.size();
input_strides_dev = steps_dev + steps.size();
output_strides_dev = input_strides_dev + dims.size();

cudaMemcpyAsync(new_dims_dev, new_dims.data(), ends.size() * sizeof(int64_t), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(starts_dev, starts.data(), starts.size() * sizeof(int64_t), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(steps_dev, steps.data(), steps.size() * sizeof(int64_t), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(input_strides_dev, src_strides.data(), dims.size() * sizeof(int64_t), cudaMemcpyHostToDevice,
stream);
cudaMemcpyAsync(output_strides_dev, dst_strides.data(), new_dims.size() * sizeof(int64_t), cudaMemcpyHostToDevice,
stream);
CUDA_CHECK(
cudaMemcpyAsync(new_dims_dev, new_dims.data(), ends.size() * sizeof(int64_t), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(
cudaMemcpyAsync(starts_dev, starts.data(), starts.size() * sizeof(int64_t), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(
cudaMemcpyAsync(steps_dev, steps.data(), steps.size() * sizeof(int64_t), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpyAsync(input_strides_dev, src_strides.data(), dims.size() * sizeof(int64_t),
cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpyAsync(output_strides_dev, dst_strides.data(), new_dims.size() * sizeof(int64_t),
cudaMemcpyHostToDevice, stream));

int threads_per_block = 256;
int num_blocks = (total_elements + threads_per_block - 1) / threads_per_block;
Expand All @@ -103,6 +106,11 @@ std::shared_ptr<Tensor> SliceForward(const std::shared_ptr<Tensor> &input, const

cudaFreeAsync(new_dims_dev, stream);

// NOTE(dcj):
// Synchronize the stream here to ensure all preceding H2D/D2H memcpy
// operations have completed before the host buffers go out of scope.
CUDA_CHECK(cudaStreamSynchronize(stream));

return new_tensor;
}

Expand Down Expand Up @@ -167,21 +175,24 @@ std::shared_ptr<Tensor> SliceBackward(const std::shared_ptr<Tensor> &grad_output
const auto &stream = dynamic_cast<infini_train::core::cuda::CudaStream *>(
infini_train::core::GetDeviceGuardImpl(device.type())->GetStream(device))
->cuda_stream();
cudaMallocAsync(&new_dims_dev,
(ends.size() + starts.size() + steps.size() + dims.size() + new_dims.size()) * sizeof(int64_t),
stream);
CUDA_CHECK(cudaMallocAsync(
&new_dims_dev, (ends.size() + starts.size() + steps.size() + dims.size() + new_dims.size()) * sizeof(int64_t),
stream));
starts_dev = new_dims_dev + ends.size();
steps_dev = starts_dev + starts.size();
input_strides_dev = steps_dev + steps.size();
output_strides_dev = input_strides_dev + dims.size();

cudaMemcpyAsync(new_dims_dev, new_dims.data(), ends.size() * sizeof(int64_t), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(starts_dev, starts.data(), starts.size() * sizeof(int64_t), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(steps_dev, steps.data(), steps.size() * sizeof(int64_t), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(input_strides_dev, src_strides.data(), dims.size() * sizeof(int64_t), cudaMemcpyHostToDevice,
stream);
cudaMemcpyAsync(output_strides_dev, dst_strides.data(), new_dims.size() * sizeof(int64_t), cudaMemcpyHostToDevice,
stream);
CUDA_CHECK(
cudaMemcpyAsync(new_dims_dev, new_dims.data(), ends.size() * sizeof(int64_t), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(
cudaMemcpyAsync(starts_dev, starts.data(), starts.size() * sizeof(int64_t), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(
cudaMemcpyAsync(steps_dev, steps.data(), steps.size() * sizeof(int64_t), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpyAsync(input_strides_dev, src_strides.data(), dims.size() * sizeof(int64_t),
cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpyAsync(output_strides_dev, dst_strides.data(), new_dims.size() * sizeof(int64_t),
cudaMemcpyHostToDevice, stream));

int threads_per_block = 256;
int num_blocks = (total_elements + threads_per_block - 1) / threads_per_block;
Expand All @@ -195,7 +206,12 @@ std::shared_ptr<Tensor> SliceBackward(const std::shared_ptr<Tensor> &grad_output
},
"CUDA SliceBackward");

cudaFreeAsync(new_dims_dev, stream);
CUDA_CHECK(cudaFreeAsync(new_dims_dev, stream));

// NOTE(dcj):
// Synchronize the stream here to ensure all preceding H2D/D2H memcpy
// operations have completed before the host buffers go out of scope.
CUDA_CHECK(cudaStreamSynchronize(stream));

return grad_input;
}
Expand Down
16 changes: 11 additions & 5 deletions infini_train/src/kernels/cuda/split.cu
Original file line number Diff line number Diff line change
Expand Up @@ -133,18 +133,19 @@ std::shared_ptr<Tensor> LaunchSplitBackward(const std::vector<int64_t> &input_di
void *device_ptr;
const T **device_grad_output_ptrs;
int64_t *device_H_outs;
cudaMallocAsync(&device_ptr, (sizeof(T *) + sizeof(int64_t)) * num_splits, stream);
CUDA_CHECK(cudaMallocAsync(&device_ptr, (sizeof(T *) + sizeof(int64_t)) * num_splits, stream));
device_grad_output_ptrs = (const T **)(device_ptr);
device_H_outs = reinterpret_cast<int64_t *>(device_grad_output_ptrs + num_splits);

cudaMemcpyAsync(device_grad_output_ptrs, host_grad_output_ptrs.data(), sizeof(T *) * num_splits,
cudaMemcpyHostToDevice, stream);
CUDA_CHECK(cudaMemcpyAsync(device_grad_output_ptrs, host_grad_output_ptrs.data(), sizeof(T *) * num_splits,
cudaMemcpyHostToDevice, stream));

// init H_out for each split
std::vector<int64_t> H_outs(num_splits);
for (int i = 0; i < num_splits; ++i) { H_outs[i] = std::min(split_size, H_in - i * split_size); }

cudaMemcpyAsync(device_H_outs, H_outs.data(), sizeof(int64_t) * num_splits, cudaMemcpyHostToDevice, stream);
CUDA_CHECK(
cudaMemcpyAsync(device_H_outs, H_outs.data(), sizeof(int64_t) * num_splits, cudaMemcpyHostToDevice, stream));

int64_t total_elements = N * H_in * W;
int threads_per_block = 256;
Expand All @@ -154,7 +155,12 @@ std::shared_ptr<Tensor> LaunchSplitBackward(const std::vector<int64_t> &input_di
static_cast<T *>(grad_input->DataPtr()), N, H_in,
W, split_size, num_splits, device_H_outs);

cudaFreeAsync(device_ptr, stream);
CUDA_CHECK(cudaFreeAsync(device_ptr, stream));

// NOTE(dcj):
// Synchronize the stream here to ensure all preceding H2D/D2H memcpy
// operations have completed before the host buffers go out of scope.
CUDA_CHECK(cudaStreamSynchronize(stream));

return grad_input;
}
Expand Down
Loading