@@ -440,6 +440,7 @@ struct dmar_rmrr_unit {
440
440
u64 end_address ; /* reserved end address */
441
441
struct dmar_dev_scope * devices ; /* target devices */
442
442
int devices_cnt ; /* target device count */
443
+ struct iommu_resv_region * resv ; /* reserved region handle */
443
444
};
444
445
445
446
struct dmar_atsr_unit {
@@ -4246,27 +4247,40 @@ static inline void init_iommu_pm_ops(void) {}
4246
4247
int __init dmar_parse_one_rmrr (struct acpi_dmar_header * header , void * arg )
4247
4248
{
4248
4249
struct acpi_dmar_reserved_memory * rmrr ;
4250
+ int prot = DMA_PTE_READ |DMA_PTE_WRITE ;
4249
4251
struct dmar_rmrr_unit * rmrru ;
4252
+ size_t length ;
4250
4253
4251
4254
rmrru = kzalloc (sizeof (* rmrru ), GFP_KERNEL );
4252
4255
if (!rmrru )
4253
- return - ENOMEM ;
4256
+ goto out ;
4254
4257
4255
4258
rmrru -> hdr = header ;
4256
4259
rmrr = (struct acpi_dmar_reserved_memory * )header ;
4257
4260
rmrru -> base_address = rmrr -> base_address ;
4258
4261
rmrru -> end_address = rmrr -> end_address ;
4262
+
4263
+ length = rmrr -> end_address - rmrr -> base_address + 1 ;
4264
+ rmrru -> resv = iommu_alloc_resv_region (rmrr -> base_address , length , prot ,
4265
+ IOMMU_RESV_DIRECT );
4266
+ if (!rmrru -> resv )
4267
+ goto free_rmrru ;
4268
+
4259
4269
rmrru -> devices = dmar_alloc_dev_scope ((void * )(rmrr + 1 ),
4260
4270
((void * )rmrr ) + rmrr -> header .length ,
4261
4271
& rmrru -> devices_cnt );
4262
- if (rmrru -> devices_cnt && rmrru -> devices == NULL ) {
4263
- kfree (rmrru );
4264
- return - ENOMEM ;
4265
- }
4272
+ if (rmrru -> devices_cnt && rmrru -> devices == NULL )
4273
+ goto free_all ;
4266
4274
4267
4275
list_add (& rmrru -> list , & dmar_rmrr_units );
4268
4276
4269
4277
return 0 ;
4278
+ free_all :
4279
+ kfree (rmrru -> resv );
4280
+ free_rmrru :
4281
+ kfree (rmrru );
4282
+ out :
4283
+ return - ENOMEM ;
4270
4284
}
4271
4285
4272
4286
static struct dmar_atsr_unit * dmar_find_atsr (struct acpi_dmar_atsr * atsr )
@@ -4480,6 +4494,7 @@ static void intel_iommu_free_dmars(void)
4480
4494
list_for_each_entry_safe (rmrru , rmrr_n , & dmar_rmrr_units , list ) {
4481
4495
list_del (& rmrru -> list );
4482
4496
dmar_free_dev_scope (& rmrru -> devices , & rmrru -> devices_cnt );
4497
+ kfree (rmrru -> resv );
4483
4498
kfree (rmrru );
4484
4499
}
4485
4500
@@ -5203,6 +5218,45 @@ static void intel_iommu_remove_device(struct device *dev)
5203
5218
iommu_device_unlink (iommu -> iommu_dev , dev );
5204
5219
}
5205
5220
5221
+ static void intel_iommu_get_resv_regions (struct device * device ,
5222
+ struct list_head * head )
5223
+ {
5224
+ struct iommu_resv_region * reg ;
5225
+ struct dmar_rmrr_unit * rmrr ;
5226
+ struct device * i_dev ;
5227
+ int i ;
5228
+
5229
+ rcu_read_lock ();
5230
+ for_each_rmrr_units (rmrr ) {
5231
+ for_each_active_dev_scope (rmrr -> devices , rmrr -> devices_cnt ,
5232
+ i , i_dev ) {
5233
+ if (i_dev != device )
5234
+ continue ;
5235
+
5236
+ list_add_tail (& rmrr -> resv -> list , head );
5237
+ }
5238
+ }
5239
+ rcu_read_unlock ();
5240
+
5241
+ reg = iommu_alloc_resv_region (IOAPIC_RANGE_START ,
5242
+ IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1 ,
5243
+ 0 , IOMMU_RESV_RESERVED );
5244
+ if (!reg )
5245
+ return ;
5246
+ list_add_tail (& reg -> list , head );
5247
+ }
5248
+
5249
+ static void intel_iommu_put_resv_regions (struct device * dev ,
5250
+ struct list_head * head )
5251
+ {
5252
+ struct iommu_resv_region * entry , * next ;
5253
+
5254
+ list_for_each_entry_safe (entry , next , head , list ) {
5255
+ if (entry -> type == IOMMU_RESV_RESERVED )
5256
+ kfree (entry );
5257
+ }
5258
+ }
5259
+
5206
5260
#ifdef CONFIG_INTEL_IOMMU_SVM
5207
5261
#define MAX_NR_PASID_BITS (20)
5208
5262
static inline unsigned long intel_iommu_get_pts (struct intel_iommu * iommu )
@@ -5333,19 +5387,21 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
5333
5387
#endif /* CONFIG_INTEL_IOMMU_SVM */
5334
5388
5335
5389
static const struct iommu_ops intel_iommu_ops = {
5336
- .capable = intel_iommu_capable ,
5337
- .domain_alloc = intel_iommu_domain_alloc ,
5338
- .domain_free = intel_iommu_domain_free ,
5339
- .attach_dev = intel_iommu_attach_device ,
5340
- .detach_dev = intel_iommu_detach_device ,
5341
- .map = intel_iommu_map ,
5342
- .unmap = intel_iommu_unmap ,
5343
- .map_sg = default_iommu_map_sg ,
5344
- .iova_to_phys = intel_iommu_iova_to_phys ,
5345
- .add_device = intel_iommu_add_device ,
5346
- .remove_device = intel_iommu_remove_device ,
5347
- .device_group = pci_device_group ,
5348
- .pgsize_bitmap = INTEL_IOMMU_PGSIZES ,
5390
+ .capable = intel_iommu_capable ,
5391
+ .domain_alloc = intel_iommu_domain_alloc ,
5392
+ .domain_free = intel_iommu_domain_free ,
5393
+ .attach_dev = intel_iommu_attach_device ,
5394
+ .detach_dev = intel_iommu_detach_device ,
5395
+ .map = intel_iommu_map ,
5396
+ .unmap = intel_iommu_unmap ,
5397
+ .map_sg = default_iommu_map_sg ,
5398
+ .iova_to_phys = intel_iommu_iova_to_phys ,
5399
+ .add_device = intel_iommu_add_device ,
5400
+ .remove_device = intel_iommu_remove_device ,
5401
+ .get_resv_regions = intel_iommu_get_resv_regions ,
5402
+ .put_resv_regions = intel_iommu_put_resv_regions ,
5403
+ .device_group = pci_device_group ,
5404
+ .pgsize_bitmap = INTEL_IOMMU_PGSIZES ,
5349
5405
};
5350
5406
5351
5407
static void quirk_iommu_g4x_gfx (struct pci_dev * dev )
0 commit comments