Skip to content

Commit 51515bf

Browse files
mrathor99liuw
authored andcommitted
mshv: make field names descriptive in a header struct
When struct fields use very common names like "pages" or "type", it makes it difficult to find uses of these fields with tools like grep, cscope, etc when the struct is in a header file included in many places. Add prefix mreg_ to some fields in struct mshv_mem_region to make it easier to find them. There is no functional change. Signed-off-by: Mukesh R <[email protected]> Signed-off-by: Wei Liu <[email protected]>
1 parent afefdb2 commit 51515bf

3 files changed

Lines changed: 40 additions & 40 deletions

File tree

drivers/hv/mshv_regions.c

Lines changed: 30 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ static long mshv_region_process_chunk(struct mshv_mem_region *region,
8888
struct page *page;
8989
int stride, ret;
9090

91-
page = region->pages[page_offset];
91+
page = region->mreg_pages[page_offset];
9292
if (!page)
9393
return -EINVAL;
9494

@@ -98,7 +98,7 @@ static long mshv_region_process_chunk(struct mshv_mem_region *region,
9898

9999
/* Start at stride since the first stride is validated */
100100
for (count = stride; count < page_count; count += stride) {
101-
page = region->pages[page_offset + count];
101+
page = region->mreg_pages[page_offset + count];
102102

103103
/* Break if current page is not present */
104104
if (!page)
@@ -152,7 +152,7 @@ static int mshv_region_process_range(struct mshv_mem_region *region,
152152

153153
while (page_count) {
154154
/* Skip non-present pages */
155-
if (!region->pages[page_offset]) {
155+
if (!region->mreg_pages[page_offset]) {
156156
page_offset++;
157157
page_count--;
158158
continue;
@@ -190,7 +190,7 @@ struct mshv_mem_region *mshv_region_create(u64 guest_pfn, u64 nr_pages,
190190
if (flags & BIT(MSHV_SET_MEM_BIT_EXECUTABLE))
191191
region->hv_map_flags |= HV_MAP_GPA_EXECUTABLE;
192192

193-
kref_init(&region->refcount);
193+
kref_init(&region->mreg_refcount);
194194

195195
return region;
196196
}
@@ -204,7 +204,7 @@ static int mshv_region_chunk_share(struct mshv_mem_region *region,
204204
flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
205205

206206
return hv_call_modify_spa_host_access(region->partition->pt_id,
207-
region->pages + page_offset,
207+
region->mreg_pages + page_offset,
208208
page_count,
209209
HV_MAP_GPA_READABLE |
210210
HV_MAP_GPA_WRITABLE,
@@ -229,7 +229,7 @@ static int mshv_region_chunk_unshare(struct mshv_mem_region *region,
229229
flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
230230

231231
return hv_call_modify_spa_host_access(region->partition->pt_id,
232-
region->pages + page_offset,
232+
region->mreg_pages + page_offset,
233233
page_count, 0,
234234
flags, false);
235235
}
@@ -254,7 +254,7 @@ static int mshv_region_chunk_remap(struct mshv_mem_region *region,
254254
return hv_call_map_gpa_pages(region->partition->pt_id,
255255
region->start_gfn + page_offset,
256256
page_count, flags,
257-
region->pages + page_offset);
257+
region->mreg_pages + page_offset);
258258
}
259259

260260
static int mshv_region_remap_pages(struct mshv_mem_region *region,
@@ -277,10 +277,10 @@ int mshv_region_map(struct mshv_mem_region *region)
277277
static void mshv_region_invalidate_pages(struct mshv_mem_region *region,
278278
u64 page_offset, u64 page_count)
279279
{
280-
if (region->type == MSHV_REGION_TYPE_MEM_PINNED)
281-
unpin_user_pages(region->pages + page_offset, page_count);
280+
if (region->mreg_type == MSHV_REGION_TYPE_MEM_PINNED)
281+
unpin_user_pages(region->mreg_pages + page_offset, page_count);
282282

283-
memset(region->pages + page_offset, 0,
283+
memset(region->mreg_pages + page_offset, 0,
284284
page_count * sizeof(struct page *));
285285
}
286286

@@ -297,7 +297,7 @@ int mshv_region_pin(struct mshv_mem_region *region)
297297
int ret;
298298

299299
for (done_count = 0; done_count < region->nr_pages; done_count += ret) {
300-
pages = region->pages + done_count;
300+
pages = region->mreg_pages + done_count;
301301
userspace_addr = region->start_uaddr +
302302
done_count * HV_HYP_PAGE_SIZE;
303303
nr_pages = min(region->nr_pages - done_count,
@@ -348,11 +348,11 @@ static int mshv_region_unmap(struct mshv_mem_region *region)
348348
static void mshv_region_destroy(struct kref *ref)
349349
{
350350
struct mshv_mem_region *region =
351-
container_of(ref, struct mshv_mem_region, refcount);
351+
container_of(ref, struct mshv_mem_region, mreg_refcount);
352352
struct mshv_partition *partition = region->partition;
353353
int ret;
354354

355-
if (region->type == MSHV_REGION_TYPE_MEM_MOVABLE)
355+
if (region->mreg_type == MSHV_REGION_TYPE_MEM_MOVABLE)
356356
mshv_region_movable_fini(region);
357357

358358
if (mshv_partition_encrypted(partition)) {
@@ -374,12 +374,12 @@ static void mshv_region_destroy(struct kref *ref)
374374

375375
void mshv_region_put(struct mshv_mem_region *region)
376376
{
377-
kref_put(&region->refcount, mshv_region_destroy);
377+
kref_put(&region->mreg_refcount, mshv_region_destroy);
378378
}
379379

380380
int mshv_region_get(struct mshv_mem_region *region)
381381
{
382-
return kref_get_unless_zero(&region->refcount);
382+
return kref_get_unless_zero(&region->mreg_refcount);
383383
}
384384

385385
/**
@@ -405,16 +405,16 @@ static int mshv_region_hmm_fault_and_lock(struct mshv_mem_region *region,
405405
int ret;
406406

407407
range->notifier_seq = mmu_interval_read_begin(range->notifier);
408-
mmap_read_lock(region->mni.mm);
408+
mmap_read_lock(region->mreg_mni.mm);
409409
ret = hmm_range_fault(range);
410-
mmap_read_unlock(region->mni.mm);
410+
mmap_read_unlock(region->mreg_mni.mm);
411411
if (ret)
412412
return ret;
413413

414-
mutex_lock(&region->mutex);
414+
mutex_lock(&region->mreg_mutex);
415415

416416
if (mmu_interval_read_retry(range->notifier, range->notifier_seq)) {
417-
mutex_unlock(&region->mutex);
417+
mutex_unlock(&region->mreg_mutex);
418418
cond_resched();
419419
return -EBUSY;
420420
}
@@ -438,7 +438,7 @@ static int mshv_region_range_fault(struct mshv_mem_region *region,
438438
u64 page_offset, u64 page_count)
439439
{
440440
struct hmm_range range = {
441-
.notifier = &region->mni,
441+
.notifier = &region->mreg_mni,
442442
.default_flags = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE,
443443
};
444444
unsigned long *pfns;
@@ -461,12 +461,12 @@ static int mshv_region_range_fault(struct mshv_mem_region *region,
461461
goto out;
462462

463463
for (i = 0; i < page_count; i++)
464-
region->pages[page_offset + i] = hmm_pfn_to_page(pfns[i]);
464+
region->mreg_pages[page_offset + i] = hmm_pfn_to_page(pfns[i]);
465465

466466
ret = mshv_region_remap_pages(region, region->hv_map_flags,
467467
page_offset, page_count);
468468

469-
mutex_unlock(&region->mutex);
469+
mutex_unlock(&region->mreg_mutex);
470470
out:
471471
kfree(pfns);
472472
return ret;
@@ -520,7 +520,7 @@ static bool mshv_region_interval_invalidate(struct mmu_interval_notifier *mni,
520520
{
521521
struct mshv_mem_region *region = container_of(mni,
522522
struct mshv_mem_region,
523-
mni);
523+
mreg_mni);
524524
u64 page_offset, page_count;
525525
unsigned long mstart, mend;
526526
int ret = -EPERM;
@@ -533,8 +533,8 @@ static bool mshv_region_interval_invalidate(struct mmu_interval_notifier *mni,
533533
page_count = HVPFN_DOWN(mend - mstart);
534534

535535
if (mmu_notifier_range_blockable(range))
536-
mutex_lock(&region->mutex);
537-
else if (!mutex_trylock(&region->mutex))
536+
mutex_lock(&region->mreg_mutex);
537+
else if (!mutex_trylock(&region->mreg_mutex))
538538
goto out_fail;
539539

540540
mmu_interval_set_seq(mni, cur_seq);
@@ -546,12 +546,12 @@ static bool mshv_region_interval_invalidate(struct mmu_interval_notifier *mni,
546546

547547
mshv_region_invalidate_pages(region, page_offset, page_count);
548548

549-
mutex_unlock(&region->mutex);
549+
mutex_unlock(&region->mreg_mutex);
550550

551551
return true;
552552

553553
out_unlock:
554-
mutex_unlock(&region->mutex);
554+
mutex_unlock(&region->mreg_mutex);
555555
out_fail:
556556
WARN_ONCE(ret,
557557
"Failed to invalidate region %#llx-%#llx (range %#lx-%#lx, event: %u, pages %#llx-%#llx, mm: %#llx): %d\n",
@@ -568,21 +568,21 @@ static const struct mmu_interval_notifier_ops mshv_region_mni_ops = {
568568

569569
void mshv_region_movable_fini(struct mshv_mem_region *region)
570570
{
571-
mmu_interval_notifier_remove(&region->mni);
571+
mmu_interval_notifier_remove(&region->mreg_mni);
572572
}
573573

574574
bool mshv_region_movable_init(struct mshv_mem_region *region)
575575
{
576576
int ret;
577577

578-
ret = mmu_interval_notifier_insert(&region->mni, current->mm,
578+
ret = mmu_interval_notifier_insert(&region->mreg_mni, current->mm,
579579
region->start_uaddr,
580580
region->nr_pages << HV_HYP_PAGE_SHIFT,
581581
&mshv_region_mni_ops);
582582
if (ret)
583583
return false;
584584

585-
mutex_init(&region->mutex);
585+
mutex_init(&region->mreg_mutex);
586586

587587
return true;
588588
}

drivers/hv/mshv_root.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -82,16 +82,16 @@ enum mshv_region_type {
8282

8383
struct mshv_mem_region {
8484
struct hlist_node hnode;
85-
struct kref refcount;
85+
struct kref mreg_refcount;
8686
u64 nr_pages;
8787
u64 start_gfn;
8888
u64 start_uaddr;
8989
u32 hv_map_flags;
9090
struct mshv_partition *partition;
91-
enum mshv_region_type type;
92-
struct mmu_interval_notifier mni;
93-
struct mutex mutex; /* protects region pages remapping */
94-
struct page *pages[];
91+
enum mshv_region_type mreg_type;
92+
struct mmu_interval_notifier mreg_mni;
93+
struct mutex mreg_mutex; /* protects region pages remapping */
94+
struct page *mreg_pages[];
9595
};
9696

9797
struct mshv_irq_ack_notifier {

drivers/hv/mshv_root_main.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -650,7 +650,7 @@ static bool mshv_handle_gpa_intercept(struct mshv_vp *vp)
650650
return false;
651651

652652
/* Only movable memory ranges are supported for GPA intercepts */
653-
if (region->type == MSHV_REGION_TYPE_MEM_MOVABLE)
653+
if (region->mreg_type == MSHV_REGION_TYPE_MEM_MOVABLE)
654654
ret = mshv_region_handle_gfn_fault(region, gfn);
655655
else
656656
ret = false;
@@ -1193,12 +1193,12 @@ static int mshv_partition_create_region(struct mshv_partition *partition,
11931193
return PTR_ERR(rg);
11941194

11951195
if (is_mmio)
1196-
rg->type = MSHV_REGION_TYPE_MMIO;
1196+
rg->mreg_type = MSHV_REGION_TYPE_MMIO;
11971197
else if (mshv_partition_encrypted(partition) ||
11981198
!mshv_region_movable_init(rg))
1199-
rg->type = MSHV_REGION_TYPE_MEM_PINNED;
1199+
rg->mreg_type = MSHV_REGION_TYPE_MEM_PINNED;
12001200
else
1201-
rg->type = MSHV_REGION_TYPE_MEM_MOVABLE;
1201+
rg->mreg_type = MSHV_REGION_TYPE_MEM_MOVABLE;
12021202

12031203
rg->partition = partition;
12041204

@@ -1315,7 +1315,7 @@ mshv_map_user_memory(struct mshv_partition *partition,
13151315
if (ret)
13161316
return ret;
13171317

1318-
switch (region->type) {
1318+
switch (region->mreg_type) {
13191319
case MSHV_REGION_TYPE_MEM_PINNED:
13201320
ret = mshv_prepare_pinned_region(region);
13211321
break;

0 commit comments

Comments
 (0)