1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
|
Description: fix typos found by lintian
Author: Andreas Beckmann <anbe@debian.org>
--- a/kernel-open/nvidia-uvm/uvm_mmu.c
+++ b/kernel-open/nvidia-uvm/uvm_mmu.c
@@ -1360,7 +1360,7 @@ static NV_STATUS try_get_ptes(uvm_page_t
// entry
NvU32 addr_bit_shift = hal->num_va_bits();
- // track depth upon which the invalidate occured
+ // track depth upon which the invalidate occurred
NvS32 invalidate_depth = -1;
uvm_page_directory_t *dir = tree->root;
--- a/src/common/nvlink/kernel/nvlink/core/nvlink_link_mgmt.c
+++ b/src/common/nvlink/kernel/nvlink/core/nvlink_link_mgmt.c
@@ -303,7 +303,7 @@ nvlink_core_poll_link_state
if (!timeout)
{
NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS,
- "%s: Timeout occured while polling on link.\n",
+ "%s: Timeout occurred while polling on link.\n",
__FUNCTION__));
NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS,
@@ -427,7 +427,7 @@ nvlink_core_poll_tx_sublink_state
if (!timeout)
{
NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS,
- "%s: Timeout occured while polling on link.\n",
+ "%s: Timeout occurred while polling on link.\n",
__FUNCTION__));
NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS,
@@ -490,7 +490,7 @@ nvlink_core_poll_rx_sublink_state
if (!timeout)
{
NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS,
- "%s: Timeout occured while polling on link.\n",
+ "%s: Timeout occurred while polling on link.\n",
__FUNCTION__));
NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS,
--- a/src/nvidia-modeset/include/nvkms-types.h
+++ b/src/nvidia-modeset/include/nvkms-types.h
@@ -1900,7 +1900,7 @@ typedef struct _NVDispApiHeadStateEvoRec
*
* The numVblankSyncObjectsCreated will ideally always be equal to
* NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD, but could be lower if errors
- * occured during syncpt allocation in nvRMSetupEvoCoreChannel().
+ * occurred during syncpt allocation in nvRMSetupEvoCoreChannel().
*/
NvU8 numVblankSyncObjectsCreated;
NVVblankSyncObjectRec vblankSyncObjects[NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD];
--- a/src/nvidia-modeset/src/nvkms-dma.c
+++ b/src/nvidia-modeset/src/nvkms-dma.c
@@ -428,7 +428,7 @@ NvU32 nvEvoReadCRC32Notifier(volatile Nv
case NVEvoCrc32NotifierFlagCrcOverflow:
if (flag) {
count = 0;
- nvEvoLog(EVO_LOG_ERROR, "CRC Overflow occured, "
+ nvEvoLog(EVO_LOG_ERROR, "CRC Overflow occurred, "
"CRC value unable to be processed fast enough.\n"
"Failing flag index in status_info array: %d",
k);
--- a/src/nvidia/src/kernel/diagnostics/gpu_acct.c
+++ b/src/nvidia/src/kernel/diagnostics/gpu_acct.c
@@ -523,7 +523,7 @@ gpuacctSampleGpuUtil_out:
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR,
- "Error sheduling callback for util 0x%x\n",
+ "Error scheduling callback for util 0x%x\n",
status);
return status;
}
--- a/src/nvidia/src/kernel/gpu/bus/arch/maxwell/kern_bus_gm107.c
+++ b/src/nvidia/src/kernel/gpu/bus/arch/maxwell/kern_bus_gm107.c
@@ -3868,7 +3868,7 @@ kbusVerifyBar2_GM107
(NvU8 *)(pOffset - pKernelBus->virtualBar2[GPU_GFID_PF].pCpuMapping),
testMemorySize);
NV_PRINTF_COND(IS_EMULATION(pGpu), LEVEL_NOTICE, LEVEL_INFO,
- "MMUTest The physical address being targetted is 0x%llx\n",
+ "MMUTest The physical address being targeted is 0x%llx\n",
testMemoryOffset);
for(index = 0; index < testMemorySize; index += 4)
{
--- a/src/nvidia/src/kernel/gpu/mem_mgr/arch/maxwell/fbsr_gm107.c
+++ b/src/nvidia/src/kernel/gpu/mem_mgr/arch/maxwell/fbsr_gm107.c
@@ -68,7 +68,7 @@
// buffer is allocated, we need to use memdescLock to lock the buffer in physical
// memory so that CE can access it and then use memdescUnlock to unlock it.
//
-// Mechanisms #5 and #6 are targetted for WDDM, a large VA section (Paged region)
+// Mechanisms #5 and #6 are targeted for WDDM, a large VA section (Paged region)
// and a small pinned region are committed on boot.
//
// Mechanism #5 (TYPE_WDDM_FAST_DMA_DEFERRED_NONPAGED):
--- a/src/nvidia/src/kernel/gpu/mem_mgr/mem_mgr.c
+++ b/src/nvidia/src/kernel/gpu/mem_mgr/mem_mgr.c
@@ -2144,7 +2144,7 @@ memmgrSetPartitionableMem_IMPL
//
// Find out the first and the last region for which internal heap or
// bRsvdRegion is true. In Ampere we should never have more than two
- // discontigous RM reserved region
+ // discontiguous RM reserved region
// To-Do - Bug 2301972 - Make sure that reserved memory is aligned to VMMU
// segments
//
@@ -2181,14 +2181,14 @@ memmgrSetPartitionableMem_IMPL
else
{
//
- // Make sure we don't have discontigous reserved regions as
+ // Make sure we don't have discontiguous reserved regions as
// they are not supported by HW also and we need to support
// these by using blacklisting mechanism.
//
if (topRegionIdx != 0xFFFF)
{
NV_PRINTF(LEVEL_ERROR,
- "More than two discontigous rsvd regions found. "
+ "More than two discontiguous rsvd regions found. "
"Rsvd region base - 0x%llx, Rsvd region Size - 0x%llx\n",
pMemoryManager->Ram.fbRegion[i].base, rsvdSize);
NV_ASSERT(0);
--- a/src/nvidia/src/kernel/platform/hwbc.c
+++ b/src/nvidia/src/kernel/platform/hwbc.c
@@ -1009,7 +1009,7 @@ clFindBR04_IMPL
//
// Nvidia_BR04_ShiftAliasingRegisters : Remaps the aliasing registers for
-// the targetted BR04 to the first available empty slot
+// the targeted BR04 to the first available empty slot
//
static NV_STATUS
Nvidia_BR04_ShiftAliasingRegisters
--- a/src/nvidia/src/libraries/mmu/mmu_walk_commit.c
+++ b/src/nvidia/src/libraries/mmu/mmu_walk_commit.c
@@ -93,7 +93,7 @@ _mmuWalkCommitPDEs
{
const MMU_FMT_LEVEL *pLevelFmt = (const MMU_FMT_LEVEL *) pOpParams->pOpCtx;
- // If this level is not the targetted page level.
+ // If this level is not the targeted page level.
if (pLevel->pFmt != pLevelFmt)
{
NV_ASSERT_OR_RETURN(0 != pLevel->pFmt->numSubLevels, NV_ERR_INVALID_ARGUMENT);
--- a/src/nvidia/src/libraries/mmu/mmu_walk_map.c
+++ b/src/nvidia/src/libraries/mmu/mmu_walk_map.c
@@ -119,7 +119,7 @@ _mmuWalkMap
NV_ASSERT_OR_RETURN(NULL != pLevelInst, NV_ERR_INVALID_ARGUMENT);
NV_ASSERT_OR_RETURN(NULL != pLevel, NV_ERR_INVALID_ARGUMENT);
- // If this level is not the targetted page level.
+ // If this level is not the targeted page level.
if (pLevel->pFmt != pTarget->pLevelFmt)
{
NV_ASSERT_OR_RETURN(0 != pLevel->pFmt->numSubLevels, NV_ERR_INVALID_ARGUMENT);
--- a/src/nvidia/src/libraries/mmu/mmu_walk_reserve.c
+++ b/src/nvidia/src/libraries/mmu/mmu_walk_reserve.c
@@ -164,7 +164,7 @@ _mmuWalkReserveEntries
{
const MMU_FMT_LEVEL *pLevelFmt = (const MMU_FMT_LEVEL *) pOpParams->pOpCtx;
- // If this level is not the targetted page level.
+ // If this level is not the targeted page level.
if (pLevel->pFmt != pLevelFmt)
{
NV_ASSERT_OR_RETURN(0 != pLevel->pFmt->numSubLevels, NV_ERR_INVALID_ARGUMENT);
@@ -215,7 +215,7 @@ _mmuWalkReleaseEntries
{
const MMU_FMT_LEVEL *pLevelFmt = (const MMU_FMT_LEVEL *) pOpParams->pOpCtx;
- // If this level is not the targetted page level.
+ // If this level is not the targeted page level.
if (pLevel->pFmt != pLevelFmt)
{
NV_ASSERT_OR_RETURN(0 != pLevel->pFmt->numSubLevels, NV_ERR_INVALID_ARGUMENT);
|