@@ -1201,17 +1201,21 @@ pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode)
1201
1201
pn_t pn ;
1202
1202
1203
1203
KASSERT ((pa & L3_OFFSET ) == 0 ,
1204
- ("pmap_kenter_device : Invalid physical address" ));
1204
+ ("%s : Invalid physical address" , __func__ ));
1205
1205
KASSERT ((sva & L3_OFFSET ) == 0 ,
1206
- ("pmap_kenter_device : Invalid virtual address" ));
1206
+ ("%s : Invalid virtual address" , __func__ ));
1207
1207
KASSERT ((size & PAGE_MASK ) == 0 ,
1208
- ("pmap_kenter_device: Mapping is not page-sized" ));
1208
+ ("%s: Mapping is not page-sized" , __func__ ));
1209
+
1210
+ if (size == 0 )
1211
+ return ;
1209
1212
1210
1213
memattr = pmap_memattr_bits (mode );
1211
1214
va = sva ;
1212
- while (size != 0 ) {
1213
- l3 = pmap_l3 (kernel_pmap , va );
1214
- KASSERT (l3 != NULL , ("Invalid page table, va: 0x%lx" , va ));
1215
+ l3 = pmap_l3 (kernel_pmap , va );
1216
+ while (true) {
1217
+ KASSERT (l3 != NULL , ("%s: Invalid page table, va: 0x%lx" ,
1218
+ __func__ , va ));
1215
1219
1216
1220
pn = (pa / PAGE_SIZE );
1217
1221
entry = PTE_KERN ;
@@ -1222,6 +1226,11 @@ pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode)
1222
1226
va += PAGE_SIZE ;
1223
1227
pa += PAGE_SIZE ;
1224
1228
size -= PAGE_SIZE ;
1229
+ if (size == 0 )
1230
+ break ;
1231
+ /* only need to re-read l3 when crossing the page boundary */
1232
+ if (__is_aligned (++ l3 , PAGE_SIZE ))
1233
+ l3 = pmap_l3 (kernel_pmap , va );
1225
1234
}
1226
1235
pmap_invalidate_range (kernel_pmap , sva , va );
1227
1236
}
@@ -1255,18 +1264,27 @@ pmap_kremove_device(vm_offset_t sva, vm_size_t size)
1255
1264
vm_offset_t va ;
1256
1265
1257
1266
KASSERT ((sva & L3_OFFSET ) == 0 ,
1258
- ("pmap_kremove_device : Invalid virtual address" ));
1267
+ ("%s : Invalid virtual address" , __func__ ));
1259
1268
KASSERT ((size & PAGE_MASK ) == 0 ,
1260
- ("pmap_kremove_device: Mapping is not page-sized" ));
1269
+ ("%s: Mapping is not page-sized" , __func__ ));
1270
+
1271
+ if (size == 0 )
1272
+ return ;
1261
1273
1262
1274
va = sva ;
1263
- while (size != 0 ) {
1264
- l3 = pmap_l3 (kernel_pmap , va );
1265
- KASSERT (l3 != NULL , ("Invalid page table, va: 0x%lx" , va ));
1275
+ l3 = pmap_l3 (kernel_pmap , va );
1276
+ while (true) {
1277
+ KASSERT (l3 != NULL , ("%s: Invalid page table, va: 0x%lx" ,
1278
+ __func__ , va ));
1266
1279
pmap_clear (l3 );
1267
1280
1268
1281
va += PAGE_SIZE ;
1269
1282
size -= PAGE_SIZE ;
1283
+ if (size == 0 )
1284
+ break ;
1285
+ /* only need to re-read l3 when crossing the page boundary */
1286
+ if (__is_aligned (++ l3 , PAGE_SIZE ))
1287
+ l3 = pmap_l3 (kernel_pmap , va );
1270
1288
}
1271
1289
1272
1290
pmap_invalidate_range (kernel_pmap , sva , va );
@@ -1311,19 +1329,29 @@ pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
1311
1329
pn_t pn ;
1312
1330
int i ;
1313
1331
1332
+ if (count == 0 )
1333
+ return ;
1334
+
1314
1335
va = sva ;
1315
- for (i = 0 ; i < count ; i ++ ) {
1336
+ l3 = pmap_l3 (kernel_pmap , va );
1337
+ for (i = 0 ; ; ) {
1338
+ KASSERT (l3 != NULL , ("%s: Invalid page table, va: 0x%lx" ,
1339
+ __func__ , va ));
1316
1340
m = ma [i ];
1317
1341
pa = VM_PAGE_TO_PHYS (m );
1318
1342
pn = (pa / PAGE_SIZE );
1319
- l3 = pmap_l3 (kernel_pmap , va );
1320
1343
1321
1344
entry = PTE_KERN ;
1322
1345
entry |= pmap_memattr_bits (m -> md .pv_memattr );
1323
1346
entry |= (pn << PTE_PPN0_S );
1324
1347
pmap_store (l3 , entry );
1325
1348
1326
- va += L3_SIZE ;
1349
+ va += PAGE_SIZE ;
1350
+ if (++ i >= count )
1351
+ break ;
1352
+ /* only need to re-read l3 when crossing the page boundary */
1353
+ if (__is_aligned (++ l3 , PAGE_SIZE ))
1354
+ l3 = pmap_l3 (kernel_pmap , va );
1327
1355
}
1328
1356
pmap_invalidate_range (kernel_pmap , sva , va );
1329
1357
}
@@ -1341,10 +1369,21 @@ pmap_qremove(vm_offset_t sva, int count)
1341
1369
1342
1370
KASSERT (sva >= VM_MIN_KERNEL_ADDRESS , ("usermode va %lx" , sva ));
1343
1371
1344
- for (va = sva ; count -- > 0 ; va += PAGE_SIZE ) {
1345
- l3 = pmap_l3 (kernel_pmap , va );
1346
- KASSERT (l3 != NULL , ("pmap_kremove: Invalid address" ));
1372
+ if (count == 0 )
1373
+ return ;
1374
+
1375
+ va = sva ;
1376
+ l3 = pmap_l3 (kernel_pmap , va );
1377
+ while (true) {
1378
+ KASSERT (l3 != NULL , ("%s: Invalid page table, va: 0x%lx" ,
1379
+ __func__ , va ));
1347
1380
pmap_clear (l3 );
1381
+ va += PAGE_SIZE ;
1382
+ if (-- count == 0 )
1383
+ break ;
1384
+ /* only need to re-read l3 when crossing the page boundary */
1385
+ if (__is_aligned (++ l3 , PAGE_SIZE ))
1386
+ l3 = pmap_l3 (kernel_pmap , va );
1348
1387
}
1349
1388
pmap_invalidate_range (kernel_pmap , sva , va );
1350
1389
}
0 commit comments