1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
|
Description: fix spelling-error-in-manpage and other typos
Author: Andreas Beckmann <anbe@debian.org>
Author: Graham Inggs <graham@nerve.org.za>
--- a/nvidia-cuda/cuda_cudart/include/cuda.h
+++ b/nvidia-cuda/cuda_cudart/include/cuda.h
@@ -5173,7 +5173,7 @@ CUresult CUDAAPI cuDeviceGetAttribute(in
*
* At least one of these flags must be set, failing which the API
* returns ::CUDA_ERROR_INVALID_VALUE. Both the flags are orthogonal
- * to one another: a developer may set both these flags that allows to
+ * to one another: a developer may set both these flags that allows one to
* set both wait and signal specific attributes in the same \p nvSciSyncAttrList.
*
* Note that this API updates the input \p nvSciSyncAttrList with values equivalent
@@ -5550,7 +5550,7 @@ CUresult CUDAAPI cuDevicePrimaryCtxRelea
/**
* \brief Set flags for the primary context
*
- * Sets the flags for the primary context on the device overwriting perviously
+ * Sets the flags for the primary context on the device overwriting previously
* set ones.
*
* The three LSBs of the \p flags parameter can be used to control how the OS
--- a/nvidia-cuda/cuda_cudart/include/cuda_runtime.h
+++ b/nvidia-cuda/cuda_cudart/include/cuda_runtime.h
@@ -176,8 +176,8 @@ struct __device_builtin__ __nv_lambda_p
* \p stream specifies a stream the invocation is associated to.
*
* \param func - Device function symbol
- * \param gridDim - Grid dimentions
- * \param blockDim - Block dimentions
+ * \param gridDim - Grid dimensions
+ * \param blockDim - Block dimensions
* \param args - Arguments
* \param sharedMem - Shared memory (defaults to 0)
* \param stream - Stream identifier (defaults to NULL)
@@ -315,8 +315,8 @@ static __inline__ __host__ cudaError_t c
* \p stream specifies a stream the invocation is associated to.
*
* \param func - Device function symbol
- * \param gridDim - Grid dimentions
- * \param blockDim - Block dimentions
+ * \param gridDim - Grid dimensions
+ * \param blockDim - Block dimensions
* \param args - Arguments
* \param sharedMem - Shared memory (defaults to 0)
* \param stream - Stream identifier (defaults to NULL)
@@ -548,7 +548,7 @@ static __inline__ __host__ cudaError_t c
* ::cudaStreamAttachMemAsync will be required to enable access on such devices.
*
* If the association is later changed via ::cudaStreamAttachMemAsync to
- * a single stream, the default association, as specifed during ::cudaMallocManaged,
+ * a single stream, the default association, as specified during ::cudaMallocManaged,
* is restored when that stream is destroyed. For __managed__ variables, the
* default association is always ::cudaMemAttachGlobal. Note that destroying a
* stream is an asynchronous operation, and as a result, the change to default
@@ -1612,7 +1612,7 @@ __host__ cudaError_t cudaFuncSetSharedMe
* streaming multiprocessor for the device function.
*
* \param numBlocks - Returned occupancy
- * \param func - Kernel function for which occupancy is calulated
+ * \param func - Kernel function for which occupancy is calculated
* \param blockSize - Block size the kernel is intended to be launched with
* \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes
*
@@ -1663,7 +1663,7 @@ static __inline__ __host__ cudaError_t c
* section of the Maxwell tuning guide.
*
* \param numBlocks - Returned occupancy
- * \param func - Kernel function for which occupancy is calulated
+ * \param func - Kernel function for which occupancy is calculated
* \param blockSize - Block size the kernel is intended to be launched with
* \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes
* \param flags - Requested behavior for the occupancy calculator
@@ -2046,7 +2046,7 @@ static __inline__ __host__ cudaError_t c
}
/**
- * \brief Returns grid and block size that achived maximum potential occupancy for a device function with the specified flags
+ * \brief Returns grid and block size that achieved maximum potential occupancy for a device function with the specified flags
*
* Returns in \p *minGridSize and \p *blocksize a suggested grid /
* block size pair that achieves the best potential occupancy
--- a/nvidia-cuda/cuda_cudart/include/cuda_runtime_api.h
+++ b/nvidia-cuda/cuda_cudart/include/cuda_runtime_api.h
@@ -440,7 +440,7 @@ extern __host__ cudaError_t CUDARTAPI cu
* - ::cudaLimitMallocHeapSize is the size in bytes of the heap used by the
* ::malloc() and ::free() device system calls.
* - ::cudaLimitDevRuntimeSyncDepth is the maximum grid depth at which a
- * thread can isssue the device runtime call ::cudaDeviceSynchronize()
+ * thread can issue the device runtime call ::cudaDeviceSynchronize()
* to wait on child grid launches to complete. This functionality is removed
* for devices of compute capability >= 9.0, and hence will return error
* ::cudaErrorUnsupportedLimit on such devices.
@@ -867,7 +867,7 @@ extern __host__ cudaError_t CUDARTAPI cu
/**
* \brief Attempts to close memory mapped with cudaIpcOpenMemHandle
*
- * Decrements the reference count of the memory returnd by ::cudaIpcOpenMemHandle by 1.
+ * Decrements the reference count of the memory returned by ::cudaIpcOpenMemHandle by 1.
* When the reference count reaches 0, this API unmaps the memory. The original allocation
* in the exporting process as well as imported mappings in other processes
* will be unaffected.
@@ -2089,7 +2089,7 @@ extern __host__ cudaError_t CUDARTAPI cu
*
* At least one of these flags must be set, failing which the API
* returns ::cudaErrorInvalidValue. Both the flags are orthogonal
- * to one another: a developer may set both these flags that allows to
+ * to one another: a developer may set both these flags that allows one to
* set both wait and signal specific attributes in the same \p nvSciSyncAttrList.
*
* Note that this API updates the input \p nvSciSyncAttrList with values equivalent
@@ -4371,8 +4371,8 @@ extern __host__ cudaError_t CUDARTAPI cu
* \p stream specifies a stream the invocation is associated to.
*
* \param func - Device function symbol
- * \param gridDim - Grid dimentions
- * \param blockDim - Block dimentions
+ * \param gridDim - Grid dimensions
+ * \param blockDim - Block dimensions
* \param args - Arguments
* \param sharedMem - Shared memory
* \param stream - Stream identifier
@@ -4493,8 +4493,8 @@ extern __host__ cudaError_t CUDARTAPI cu
* \p stream specifies a stream the invocation is associated to.
*
* \param func - Device function symbol
- * \param gridDim - Grid dimentions
- * \param blockDim - Block dimentions
+ * \param gridDim - Grid dimensions
+ * \param blockDim - Block dimensions
* \param args - Arguments
* \param sharedMem - Shared memory
* \param stream - Stream identifier
@@ -4536,7 +4536,7 @@ extern __host__ cudaError_t CUDARTAPI cu
*
* The same kernel must be launched on all devices. Note that any __device__ or __constant__
* variables are independently instantiated on every device. It is the application's
- * responsiblity to ensure these variables are initialized and used appropriately.
+ * responsibility to ensure these variables are initialized and used appropriately.
*
* The size of the grids as specified in blocks, the size of the blocks themselves and the
* amount of shared memory used by each thread block must also match across all launched kernels.
@@ -5213,7 +5213,7 @@ extern __host__ __cudart_builtin__ cudaE
* ::cudaStreamAttachMemAsync will be required to enable access on such devices.
*
* If the association is later changed via ::cudaStreamAttachMemAsync to
- * a single stream, the default association, as specifed during ::cudaMallocManaged,
+ * a single stream, the default association, as specified during ::cudaMallocManaged,
* is restored when that stream is destroyed. For __managed__ variables, the
* default association is always ::cudaMemAttachGlobal. Note that destroying a
* stream is an asynchronous operation, and as a result, the change to default
@@ -6211,7 +6211,7 @@ struct cudaMemcpy3DParms {
};
\endcode
*
- * ::cudaMemcpy3D() copies data betwen two 3D objects. The source and
+ * ::cudaMemcpy3D() copies data between two 3D objects. The source and
* destination objects may be in either host memory, device memory, or a CUDA
* array. The source, destination, extent, and kind of copy performed is
* specified by the ::cudaMemcpy3DParms struct which should be initialized to
@@ -6348,7 +6348,7 @@ struct cudaMemcpy3DParms {
};
\endcode
*
- * ::cudaMemcpy3DAsync() copies data betwen two 3D objects. The source and
+ * ::cudaMemcpy3DAsync() copies data between two 3D objects. The source and
* destination objects may be in either host memory, device memory, or a CUDA
* array. The source, destination, extent, and kind of copy performed is
* specified by the ::cudaMemcpy3DParms struct which should be initialized to
--- a/nvidia-cuda/libcublas/include/cublasXt.h
+++ b/nvidia-cuda/libcublas/include/cublasXt.h
@@ -73,7 +73,7 @@ cublasStatus_t CUBLASWINAPI cublasXtMaxB
/* This routine selects the Gpus that the user want to use for CUBLAS-XT */
cublasStatus_t CUBLASWINAPI cublasXtDeviceSelect(cublasXtHandle_t handle, int nbDevices, int deviceId[]);
-/* This routine allows to change the dimension of the tiles ( blockDim x blockDim ) */
+/* This routine allows one to change the dimension of the tiles ( blockDim x blockDim ) */
cublasStatus_t CUBLASWINAPI cublasXtSetBlockDim(cublasXtHandle_t handle, int blockDim);
cublasStatus_t CUBLASWINAPI cublasXtGetBlockDim(cublasXtHandle_t handle, int* blockDim);
--- a/nvidia-cuda/cuda_cudart/include/driver_types.h
+++ b/nvidia-cuda/cuda_cudart/include/driver_types.h
@@ -1819,7 +1819,7 @@ enum __device_builtin__ cudaLimit
*/
enum __device_builtin__ cudaMemoryAdvise
{
- cudaMemAdviseSetReadMostly = 1, /**< Data will mostly be read and only occassionally be written to */
+ cudaMemAdviseSetReadMostly = 1, /**< Data will mostly be read and only occasionally be written to */
cudaMemAdviseUnsetReadMostly = 2, /**< Undo the effect of ::cudaMemAdviseSetReadMostly */
cudaMemAdviseSetPreferredLocation = 3, /**< Set the preferred location for the data as the specified device */
cudaMemAdviseUnsetPreferredLocation = 4, /**< Clear the preferred location for the data */
@@ -1832,7 +1832,7 @@ enum __device_builtin__ cudaMemoryAdvise
*/
enum __device_builtin__ cudaMemRangeAttribute
{
- cudaMemRangeAttributeReadMostly = 1, /**< Whether the range will mostly be read and only occassionally be written to */
+ cudaMemRangeAttributeReadMostly = 1, /**< Whether the range will mostly be read and only occasionally be written to */
cudaMemRangeAttributePreferredLocation = 2, /**< The preferred location of the range */
cudaMemRangeAttributeAccessedBy = 3, /**< Memory range has ::cudaMemAdviseSetAccessedBy set for specified device */
cudaMemRangeAttributeLastPrefetchLocation = 4 /**< The last location to which the range was prefetched */
@@ -2945,8 +2945,8 @@ enum __device_builtin__ cudaCGScope {
struct __device_builtin__ cudaLaunchParams
{
void *func; /**< Device function symbol */
- dim3 gridDim; /**< Grid dimentions */
- dim3 blockDim; /**< Block dimentions */
+ dim3 gridDim; /**< Grid dimensions */
+ dim3 blockDim; /**< Block dimensions */
void **args; /**< Arguments */
size_t sharedMem; /**< Shared memory */
cudaStream_t stream; /**< Stream identifier */
--- a/nvidia-cuda/cuda_nvml_dev/include/nvml.h
+++ b/nvidia-cuda/cuda_nvml_dev/include/nvml.h
@@ -970,7 +970,7 @@ typedef enum nvmlPStates_enum
/**
* GPU Operation Mode
*
- * GOM allows to reduce power usage and optimize GPU throughput by disabling GPU features.
+ * GOM allows one to reduce power usage and optimize GPU throughput by disabling GPU features.
*
* Each GOM is designed to meet specific user needs.
*/
@@ -1026,7 +1026,7 @@ typedef enum nvmlReturn_enum
NVML_ERROR_IN_USE = 19, //!< An operation cannot be performed because the GPU is currently in use
NVML_ERROR_MEMORY = 20, //!< Insufficient memory
NVML_ERROR_NO_DATA = 21, //!< No data
- NVML_ERROR_VGPU_ECC_NOT_SUPPORTED = 22, //!< The requested vgpu operation is not available on target device, becasue ECC is enabled
+ NVML_ERROR_VGPU_ECC_NOT_SUPPORTED = 22, //!< The requested vgpu operation is not available on target device, because ECC is enabled
NVML_ERROR_INSUFFICIENT_RESOURCES = 23, //!< Ran out of critical resources, other than memory
NVML_ERROR_FREQ_NOT_SUPPORTED = 24, //!< Ran out of critical resources, other than memory
NVML_ERROR_ARGUMENT_VERSION_MISMATCH = 25, //!< The provided version is invalid/unsupported
@@ -2961,7 +2961,7 @@ nvmlReturn_t DECLDIR nvmlSystemGetNVMLVe
*
* For all products.
*
- * The CUDA driver version returned will be retreived from the currently installed version of CUDA.
+ * The CUDA driver version returned will be retrieved from the currently installed version of CUDA.
* If the cuda library is not found, this function will return a known supported version number.
*
* @param cudaDriverVersion Reference in which to return the version identifier
--- a/nvidia-cuda/cuda_cupti/extras/CUPTI/samples/openacc_trace/openacc_trace.cpp
+++ b/nvidia-cuda/cuda_cupti/extras/CUPTI/samples/openacc_trace/openacc_trace.cpp
@@ -77,7 +77,7 @@ SetupCupti()
}
// acc_register_library is defined by the OpenACC tools interface
-// and allows to register this library with the OpenACC runtime.
+// and allows one to register this library with the OpenACC runtime.
extern "C" void
acc_register_library(
--- a/nvidia-cuda/libcusolver/include/cusolverMg.h
+++ b/nvidia-cuda/libcusolver/include/cusolverMg.h
@@ -87,7 +87,7 @@ extern "C" {
/**
* \brief Allocates resources related to the shared memory device grid.
- * \param[out] grid the opaque data strcuture that holds the grid
+ * \param[out] grid the opaque data structure that holds the grid
* \param[in] numRowDevices number of devices in the row
* \param[in] numColDevices number of devices in the column
* \param[in] deviceId This array of size height * width stores the
@@ -104,14 +104,14 @@ extern "C" {
/**
* \brief Releases the allocated resources related to the distributed grid.
- * \param[in] grid the opaque data strcuture that holds the distributed grid
+ * \param[in] grid the opaque data structure that holds the distributed grid
* \returns the status code
*/
cusolverStatus_t CUSOLVERAPI cusolverMgDestroyGrid(cudaLibMgGrid_t grid);
/**
* \brief Allocates resources related to the distributed matrix descriptor.
- * \param[out] desc the opaque data strcuture that holds the descriptor
+ * \param[out] desc the opaque data structure that holds the descriptor
* \param[in] numRows number of total rows
* \param[in] numCols number of total columns
* \param[in] rowBlockSize row block size
@@ -131,7 +131,7 @@ extern "C" {
/**
* \brief Releases the allocated resources related to the distributed matrix
- * descriptor. \param[in] desc the opaque data strcuture that holds the
+ * descriptor. \param[in] desc the opaque data structure that holds the
* descriptor \returns the status code
*/
cusolverStatus_t CUSOLVERAPI
--- a/nvidia-cuda/cuda_cudart/include/cuda_egl_interop.h
+++ b/nvidia-cuda/cuda_cudart/include/cuda_egl_interop.h
@@ -379,7 +379,7 @@ extern __host__ cudaError_t CUDARTAPI cu
*
* Disconnect CUDA as a consumer to EGLStreamKHR.
*
- * \param conn - Conection to disconnect.
+ * \param conn - Connection to disconnect.
*
* \return
* ::cudaSuccess,
@@ -477,7 +477,7 @@ extern __host__ cudaError_t CUDARTAPI cu
*
* Disconnect CUDA as a producer to EGLStreamKHR.
*
- * \param conn - Conection to disconnect.
+ * \param conn - Connection to disconnect.
*
* \return
* ::cudaSuccess,
|