From 361974032ae1b0eec36c51a8f1cd9b447864fcbd Mon Sep 17 00:00:00 2001 From: Sugizaki Yukimasa Date: Fri, 5 Jan 2018 00:44:00 +0900 Subject: [PATCH 150/454] vcsm: Unify cache manipulating functions Signed-off-by: Sugizaki Yukimasa --- drivers/char/broadcom/vc_sm/vmcs_sm.c | 309 +++++++++++--------------- 1 file changed, 132 insertions(+), 177 deletions(-) --- a/drivers/char/broadcom/vc_sm/vmcs_sm.c +++ b/drivers/char/broadcom/vc_sm/vmcs_sm.c @@ -1256,61 +1256,106 @@ static const struct vm_operations_struct .fault = vcsm_vma_fault, }; -/* Walks a VMA and clean each valid page from the cache */ -static void vcsm_vma_cache_clean_page_range(unsigned long addr, - unsigned long end) +static int clean_invalid_mem_2d(const void __user *addr, + const size_t block_count, const size_t block_size, const size_t stride, + const unsigned cache_op) { - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - unsigned long pgd_next, pud_next, pmd_next; - - if (addr >= end) - return; - - /* Walk PGD */ - pgd = pgd_offset(current->mm, addr); - do { - pgd_next = pgd_addr_end(addr, end); - - if (pgd_none(*pgd) || pgd_bad(*pgd)) - continue; - - /* Walk PUD */ - pud = pud_offset(pgd, addr); - do { - pud_next = pud_addr_end(addr, pgd_next); - if (pud_none(*pud) || pud_bad(*pud)) - continue; - - /* Walk PMD */ - pmd = pmd_offset(pud, addr); - do { - pmd_next = pmd_addr_end(addr, pud_next); - if (pmd_none(*pmd) || pmd_bad(*pmd)) - continue; - - /* Walk PTE */ - pte = pte_offset_map(pmd, addr); - do { - if (pte_none(*pte) - || !pte_present(*pte)) - continue; - - /* Clean + invalidate */ - dmac_flush_range((const void *) addr, - (const void *) - (addr + PAGE_SIZE)); - - } while (pte++, addr += - PAGE_SIZE, addr != pmd_next); - pte_unmap(pte); + size_t i; + void (*op_fn)(const void*, const void*); - } while (pmd++, addr = pmd_next, addr != pud_next); + if (block_size <= 0) { + pr_err("[%s]: size cannot be 0\n", __func__); + return -EINVAL; + } + + switch (cache_op) { + case VCSM_CACHE_OP_INV: + op_fn = dmac_inv_range; + break; + case VCSM_CACHE_OP_CLEAN: + op_fn = dmac_clean_range; + break; + case VCSM_CACHE_OP_FLUSH: + op_fn = dmac_flush_range; + break; + default: + pr_err("[%s]: Invalid cache_op: 0x%08x\n", __func__, cache_op); + return -EINVAL; + } + + for (i = 0; i < block_count; i ++, addr += stride) + op_fn(addr, addr + block_size); + + return 0; +} + +static int clean_invalid_mem(const void __user *addr, const size_t size, + const unsigned cache_op) +{ + return clean_invalid_mem_2d(addr, 1, size, 0, cache_op); +} + +static int clean_invalid_resource(const void __user *addr, const size_t size, + const unsigned cache_op, const int usr_hdl, + struct sm_resource_t *resource) +{ + int err; + enum sm_stats_t stat_attempt, stat_failure; + void __user *res_addr; + + if (resource == NULL) { + pr_err("[%s]: resource is NULL\n", __func__); + return -EINVAL; + } + if (resource->res_cached != VMCS_SM_CACHE_HOST && + resource->res_cached != VMCS_SM_CACHE_BOTH) + return 0; + + switch (cache_op) { + case VCSM_CACHE_OP_INV: + stat_attempt = INVALID; + stat_failure = INVALID_FAIL; + break; + case VCSM_CACHE_OP_CLEAN: + /* Like the original VMCS_SM_CMD_CLEAN_INVALID ioctl handler does. */ + stat_attempt = FLUSH; + stat_failure = FLUSH_FAIL; + break; + case VCSM_CACHE_OP_FLUSH: + stat_attempt = FLUSH; + stat_failure = FLUSH_FAIL; + break; + default: + pr_err("[%s]: Invalid cache_op: 0x%08x\n", __func__, cache_op); + return -EINVAL; + } + resource->res_stats[stat_attempt]++; - } while (pud++, addr = pud_next, addr != pgd_next); - } while (pgd++, addr = pgd_next, addr != end); + if (size > resource->res_size) { + pr_err("[%s]: size (0x%08zu) is larger than res_size (0x%08zu)\n", + __func__, size, resource->res_size); + return -EFAULT; + } + res_addr = (void __user*) vmcs_sm_usr_address_from_pid_and_usr_handle( + current->tgid, usr_hdl); + if (res_addr == NULL) { + pr_err("[%s]: Failed to get user address " + "from pid (%d) and user handle (%d)\n", __func__, current->tgid, + resource->res_handle); + return -EINVAL; + } + if (!(res_addr <= addr && addr + size <= res_addr + resource->res_size)) { + pr_err("[%s]: Addr (0x%p-0x%p) out of range (0x%p-0x%p)\n", + __func__, addr, addr + size, res_addr, + res_addr + resource->res_size); + return -EFAULT; + } + + err = clean_invalid_mem(addr, size, cache_op); + if (err) + resource->res_stats[stat_failure]++; + + return err; } /* Map an allocated data into something that the user space. */ @@ -1952,14 +1997,13 @@ static int vc_sm_ioctl_unlock(struct sm_ list_for_each_entry(map, &resource->map_list, resource_map_list) { if (map->vma) { - unsigned long start; - unsigned long end; - - start = map->vma->vm_start; - end = map->vma->vm_end; + const unsigned long start = map->vma->vm_start; + const unsigned long end = map->vma->vm_end; - vcsm_vma_cache_clean_page_range( - start, end); + ret = clean_invalid_mem((void __user*) start, end - start, + VCSM_CACHE_OP_FLUSH); + if (ret) + goto error; } } up_read(¤t->mm->mmap_sem); @@ -2833,41 +2877,17 @@ static long vc_sm_ioctl(struct file *fil /* Locate resource from GUID. */ resource = vmcs_sm_acquire_resource(file_data, ioparam.handle); - - if ((resource != NULL) && resource->res_cached) { - dma_addr_t phys_addr = 0; - - resource->res_stats[FLUSH]++; - - phys_addr = - (dma_addr_t)((uint32_t) - resource->res_base_mem & - 0x3FFFFFFF); - phys_addr += (dma_addr_t)mm_vc_mem_phys_addr; - - /* L1 cache flush */ - down_read(¤t->mm->mmap_sem); - vcsm_vma_cache_clean_page_range((unsigned long) - ioparam.addr, - (unsigned long) - ioparam.addr + - ioparam.size); - up_read(¤t->mm->mmap_sem); - - /* L2 cache flush */ - outer_clean_range(phys_addr, - phys_addr + - (size_t) ioparam.size); - } else if (resource == NULL) { + if (resource == NULL) { ret = -EINVAL; goto out; } - if (resource) - vmcs_sm_release_resource(resource, 0); - - /* Done. */ - goto out; + ret = clean_invalid_resource((void __user*) ioparam.addr, + ioparam.size, VCSM_CACHE_OP_FLUSH, ioparam.handle, + resource); + vmcs_sm_release_resource(resource, 0); + if (ret) + goto out; } break; @@ -2888,41 +2908,16 @@ static long vc_sm_ioctl(struct file *fil /* Locate resource from GUID. */ resource = vmcs_sm_acquire_resource(file_data, ioparam.handle); - - if ((resource != NULL) && resource->res_cached) { - dma_addr_t phys_addr = 0; - - resource->res_stats[INVALID]++; - - phys_addr = - (dma_addr_t)((uint32_t) - resource->res_base_mem & - 0x3FFFFFFF); - phys_addr += (dma_addr_t)mm_vc_mem_phys_addr; - - /* L2 cache invalidate */ - outer_inv_range(phys_addr, - phys_addr + - (size_t) ioparam.size); - - /* L1 cache invalidate */ - down_read(¤t->mm->mmap_sem); - vcsm_vma_cache_clean_page_range((unsigned long) - ioparam.addr, - (unsigned long) - ioparam.addr + - ioparam.size); - up_read(¤t->mm->mmap_sem); - } else if (resource == NULL) { + if (resource == NULL) { ret = -EINVAL; goto out; } - if (resource) - vmcs_sm_release_resource(resource, 0); - - /* Done. */ - goto out; + ret = clean_invalid_resource((void __user*) ioparam.addr, + ioparam.size, VCSM_CACHE_OP_INV, ioparam.handle, resource); + vmcs_sm_release_resource(resource, 0); + if (ret) + goto out; } break; @@ -2941,43 +2936,27 @@ static long vc_sm_ioctl(struct file *fil goto out; } for (i = 0; i < sizeof(ioparam.s) / sizeof(*ioparam.s); i++) { - switch (ioparam.s[i].cmd) { - case VCSM_CACHE_OP_INV: /* L1/L2 invalidate virtual range */ - case VCSM_CACHE_OP_FLUSH: /* L1/L2 clean physical range */ - case VCSM_CACHE_OP_CLEAN: /* L1/L2 clean+invalidate all */ - /* Locate resource from GUID. */ - resource = - vmcs_sm_acquire_resource(file_data, ioparam.s[i].handle); - - if ((resource != NULL) && resource->res_cached) { - unsigned long base = ioparam.s[i].addr & ~(PAGE_SIZE - 1); - unsigned long end = (ioparam.s[i].addr + ioparam.s[i].size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1); - - resource->res_stats[ioparam.s[i].cmd == 1 ? INVALID : FLUSH]++; - - /* L1/L2 cache flush */ - down_read(¤t->mm->mmap_sem); - vcsm_vma_cache_clean_page_range(base, end); - up_read(¤t->mm->mmap_sem); - } else if (resource == NULL) { - ret = -EINVAL; - goto out; - } - - if (resource) - vmcs_sm_release_resource(resource, 0); - - break; - default: - break; /* NOOP */ + /* Locate resource from GUID. */ + resource = + vmcs_sm_acquire_resource(file_data, ioparam.s[i].handle); + if (resource == NULL) { + ret = -EINVAL; + goto out; } + + ret = clean_invalid_resource((void __user*) ioparam.s[i].addr, + ioparam.s[i].size, ioparam.s[i].cmd, + ioparam.s[i].handle, resource); + vmcs_sm_release_resource(resource, 0); + if (ret) + goto out; } } break; /* Flush/Invalidate the cache for a given mapping. */ case VMCS_SM_CMD_CLEAN_INVALID2: { - int i, j; + int i; struct vmcs_sm_ioctl_clean_invalid2 ioparam; struct vmcs_sm_ioctl_clean_invalid_block *block = NULL; @@ -3006,36 +2985,12 @@ static long vc_sm_ioctl(struct file *fil for (i = 0; i < ioparam.op_count; i++) { const struct vmcs_sm_ioctl_clean_invalid_block * const op = block + i; - void (*op_fn)(const void *, const void *); - switch(op->invalidate_mode & 3) { - case VCSM_CACHE_OP_INV: - op_fn = dmac_inv_range; - break; - case VCSM_CACHE_OP_CLEAN: - op_fn = dmac_clean_range; - break; - case VCSM_CACHE_OP_FLUSH: - op_fn = dmac_flush_range; - break; - default: - op_fn = 0; - break; - } - - if ((op->invalidate_mode & ~3) != 0) { - ret = -EINVAL; - break; - } - - if (op_fn == 0) - continue; - - for (j = 0; j < op->block_count; ++j) { - const char * const base = (const char *)op->start_address + j * op->inter_block_stride; - const char * const end = base + op->block_size; - op_fn(base, end); - } + ret = clean_invalid_mem_2d((void __user*) op->start_address, + op->block_count, op->block_size, + op->inter_block_stride, op->invalidate_mode); + if (ret) + goto out; } kfree(block); }