diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 621bce7e101c1..cf2c61cf4e890 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include #define ADSP_DOMAIN_ID (0) #define MDSP_DOMAIN_ID (1) @@ -37,7 +39,8 @@ #define FASTRPC_CTX_MAX (256) #define FASTRPC_INIT_HANDLE 1 #define FASTRPC_DSP_UTILITIES_HANDLE 2 -#define FASTRPC_CTXID_MASK (0xFF0) +#define FASTRPC_MAX_STATIC_HANDLE (20) +#define FASTRPC_CTXID_MASK (0xFF00) #define INIT_FILELEN_MAX (2 * 1024 * 1024) #define INIT_FILE_NAMELEN_MAX (128) #define FASTRPC_DEVICE_NAME "fastrpc" @@ -105,6 +108,20 @@ #define miscdev_to_fdevice(d) container_of(d, struct fastrpc_device, miscdev) +/* Poll response number from remote processor for call completion */ +#define FASTRPC_POLL_RESPONSE (0xdecaf) + +/* Polling mode timeout limit */ +#define FASTRPC_POLL_MAX_TIMEOUT_US (10000) + +/* Response types supported for RPC calls */ +enum fastrpc_response_flags { + /* normal job completion glink response */ + NORMAL_RESPONSE = 0, + /* process updates poll memory instead of glink response */ + POLL_MODE = 1, +}; + struct fastrpc_phy_page { u64 addr; /* physical address */ u64 size; /* size of contiguous region */ @@ -233,9 +250,16 @@ struct fastrpc_invoke_ctx { int pid; int client_id; u32 sc; + u64 *fdlist; u32 *crc; + /* Poll memory that DSP updates */ + u32 *poll; u64 ctxid; u64 msg_sz; + /* work done status flag */ + bool is_work_done; + /* response flags from remote processor */ + enum fastrpc_response_flags rsp_flags; struct kref refcount; struct list_head node; /* list of ctxs */ struct completion work; @@ -486,7 +510,7 @@ static void fastrpc_context_free(struct kref *ref) fastrpc_buf_free(ctx->buf); spin_lock_irqsave(&cctx->lock, flags); - idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4); + idr_remove(&cctx->ctx_idr, ctx->ctxid >> 8); spin_unlock_irqrestore(&cctx->lock, flags); kfree(ctx->maps); @@ -622,7 +646,7 @@ static struct fastrpc_invoke_ctx *fastrpc_context_alloc( spin_unlock_irqrestore(&cctx->lock, flags); goto err_idr; } - ctx->ctxid = ret << 4; + ctx->ctxid = ret << 8; spin_unlock_irqrestore(&cctx->lock, flags); kref_init(&ctx->refcount); @@ -890,7 +914,8 @@ static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx) sizeof(struct fastrpc_invoke_buf) + sizeof(struct fastrpc_phy_page)) * ctx->nscalars + sizeof(u64) * FASTRPC_MAX_FDLIST + - sizeof(u32) * FASTRPC_MAX_CRCLIST; + sizeof(u32) * FASTRPC_MAX_CRCLIST + + sizeof(u32); return size; } @@ -985,6 +1010,9 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx) rpra = ctx->buf->virt; list = fastrpc_invoke_buf_start(rpra, ctx->nscalars); pages = fastrpc_phy_page_start(list, ctx->nscalars); + ctx->fdlist = (u64 *)(pages + ctx->nscalars); + ctx->crc = (u32 *)(ctx->fdlist + FASTRPC_MAX_FDLIST); + ctx->poll = (u32 *)(ctx->crc + FASTRPC_MAX_CRCLIST); args = (uintptr_t)ctx->buf->virt + metalen; rlen = pkt_size - metalen; ctx->rpra = rpra; @@ -1087,18 +1115,10 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx, union fastrpc_remote_arg *rpra = ctx->rpra; struct fastrpc_user *fl = ctx->fl; struct fastrpc_map *mmap = NULL; - struct fastrpc_invoke_buf *list; - struct fastrpc_phy_page *pages; - u64 *fdlist; - int i, inbufs, outbufs, handles; + int i, inbufs; int ret = 0; inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); - outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc); - handles = REMOTE_SCALARS_INHANDLES(ctx->sc) + REMOTE_SCALARS_OUTHANDLES(ctx->sc); - list = fastrpc_invoke_buf_start(rpra, ctx->nscalars); - pages = fastrpc_phy_page_start(list, ctx->nscalars); - fdlist = (uint64_t *)(pages + inbufs + outbufs + handles); for (i = inbufs; i < ctx->nbufs; ++i) { if (!ctx->maps[i]) { @@ -1120,9 +1140,9 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx, cleanup_fdlist: /* Clean up fdlist which is updated by DSP */ for (i = 0; i < FASTRPC_MAX_FDLIST; i++) { - if (!fdlist[i]) + if (!ctx->fdlist[i]) break; - if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap)) + if (!fastrpc_map_lookup(fl, (int)ctx->fdlist[i], &mmap)) fastrpc_map_put(mmap); } @@ -1161,6 +1181,83 @@ static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx, } +static inline u32 fastrpc_poll_op(void *p) +{ + struct fastrpc_invoke_ctx *ctx = p; + + dma_rmb(); + return READ_ONCE(*ctx->poll); +} + +static int poll_for_remote_response(struct fastrpc_invoke_ctx *ctx) +{ + u32 val; + int ret; + + /* + * Poll until DSP writes FASTRPC_POLL_RESPONSE into *ctx->poll + * or until another path marks the work done. + */ + ret = read_poll_timeout_atomic(fastrpc_poll_op, val, + (val == FASTRPC_POLL_RESPONSE) || + ctx->is_work_done, 1, + FASTRPC_POLL_MAX_TIMEOUT_US, false, ctx); + + if (!ret && val == FASTRPC_POLL_RESPONSE) { + ctx->is_work_done = true; + ctx->retval = 0; + } + + if (ret == -ETIMEDOUT) + ret = -EIO; + + return ret; +} + +static inline int fastrpc_wait_for_response(struct fastrpc_invoke_ctx *ctx, + u32 kernel) +{ + int err = 0; + + if (kernel) { + if (!wait_for_completion_timeout(&ctx->work, 10 * HZ)) + err = -ETIMEDOUT; + } else { + err = wait_for_completion_interruptible(&ctx->work); + } + + return err; +} + +static int fastrpc_wait_for_completion(struct fastrpc_invoke_ctx *ctx, + u32 kernel) +{ + int err; + + do { + switch (ctx->rsp_flags) { + case NORMAL_RESPONSE: + err = fastrpc_wait_for_response(ctx, kernel); + if (err || ctx->is_work_done) + return err; + break; + case POLL_MODE: + err = poll_for_remote_response(ctx); + /* If polling timed out, move to normal response mode */ + if (err) + ctx->rsp_flags = NORMAL_RESPONSE; + break; + default: + err = -EBADR; + dev_dbg(ctx->fl->sctx->dev, + "unsupported response type:0x%x\n", ctx->rsp_flags); + break; + } + } while (!ctx->is_work_done); + + return err; +} + static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, u32 handle, u32 sc, struct fastrpc_invoke_args *args) @@ -1196,16 +1293,19 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, if (err) goto bail; - if (kernel) { - if (!wait_for_completion_timeout(&ctx->work, 10 * HZ)) - err = -ETIMEDOUT; - } else { - err = wait_for_completion_interruptible(&ctx->work); - } + if (handle > FASTRPC_MAX_STATIC_HANDLE && fl->pd == USER_PD) + ctx->rsp_flags = POLL_MODE; + err = fastrpc_wait_for_completion(ctx, kernel); if (err) goto bail; + if (!ctx->is_work_done) { + err = -ETIMEDOUT; + dev_dbg(fl->sctx->dev, "Invalid workdone state for handle 0x%x, sc 0x%x\n", + handle, sc); + goto bail; + } /* make sure that all memory writes by DSP are seen by CPU */ dma_rmb(); /* populate all the output buffers with results */ @@ -1809,24 +1909,33 @@ static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp) return 0; } -static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *buf) +static int fastrpc_req_munmap_dsp(struct fastrpc_user *fl, u64 raddr, u64 size) { struct fastrpc_invoke_args args[1] = { [0] = { 0 } }; struct fastrpc_munmap_req_msg req_msg; - struct device *dev = fl->sctx->dev; int err; u32 sc; req_msg.client_id = fl->client_id; - req_msg.size = buf->size; - req_msg.vaddr = buf->raddr; + req_msg.size = size; + req_msg.vaddr = raddr; - args[0].ptr = (u64) (uintptr_t) &req_msg; + args[0].ptr = (u64) &req_msg; args[0].length = sizeof(req_msg); sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0); err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]); + + return err; +} + +static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *buf) +{ + struct device *dev = fl->sctx->dev; + int err; + + err = fastrpc_req_munmap_dsp(fl, buf->raddr, buf->size); if (!err) { dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr); spin_lock(&fl->lock); @@ -1843,8 +1952,10 @@ static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf * static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp) { struct fastrpc_buf *buf = NULL, *iter, *b; + struct fastrpc_map *map = NULL, *iterm, *m; struct fastrpc_req_munmap req; struct device *dev = fl->sctx->dev; + int err; if (copy_from_user(&req, argp, sizeof(req))) return -EFAULT; @@ -1858,35 +1969,91 @@ static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp) } spin_unlock(&fl->lock); - if (!buf) { - dev_err(dev, "mmap\t\tpt 0x%09llx [len 0x%08llx] not in list\n", + if (buf) { + err = fastrpc_req_munmap_impl(fl, buf); + if (err) { + spin_lock(&fl->lock); + list_add_tail(&buf->node, &fl->mmaps); + spin_unlock(&fl->lock); + } + return err; + } + + spin_lock(&fl->lock); + list_for_each_entry_safe(iterm, m, &fl->maps, node) { + if (iterm->raddr == req.vaddrout) { + map = iterm; + list_del(&iterm->node); + break; + } + } + spin_unlock(&fl->lock); + if (!map) { + dev_dbg(dev, "buffer/map not found addr 0x%09llx, len 0x%08llx\n", req.vaddrout, req.size); return -EINVAL; } - return fastrpc_req_munmap_impl(fl, buf); + err = fastrpc_req_munmap_dsp(fl, map->raddr, map->size); + if (err) { + dev_dbg(dev, "unmmap\tpt fd = %d, 0x%09llx error\n", map->fd, map->raddr); + spin_lock(&fl->lock); + list_add_tail(&map->node, &fl->maps); + spin_unlock(&fl->lock); + } else { + fastrpc_map_put(map); + } + return err; } -static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) +static int fastrpc_req_map_dsp(struct fastrpc_user *fl, u64 phys, + u64 size, u32 flag, u64 vaddrin, + u64 *raddr) { struct fastrpc_invoke_args args[3] = { [0 ... 2] = { 0 } }; - struct fastrpc_buf *buf = NULL; struct fastrpc_mmap_req_msg req_msg; struct fastrpc_mmap_rsp_msg rsp_msg; struct fastrpc_phy_page pages; - struct fastrpc_req_mmap req; - struct device *dev = fl->sctx->dev; int err; u32 sc; - if (copy_from_user(&req, argp, sizeof(req))) - return -EFAULT; + req_msg.client_id = fl->client_id; + req_msg.flags = flag; + req_msg.vaddr = vaddrin; + req_msg.num = sizeof(pages); - if (req.flags != ADSP_MMAP_ADD_PAGES && req.flags != ADSP_MMAP_REMOTE_HEAP_ADDR) { - dev_err(dev, "flag not supported 0x%x\n", req.flags); + args[0].ptr = (u64)&req_msg; + args[0].length = sizeof(req_msg); - return -EINVAL; + pages.addr = phys; + pages.size = size; + + args[1].ptr = (u64)&pages; + args[1].length = sizeof(pages); + + args[2].ptr = (u64)&rsp_msg; + args[2].length = sizeof(rsp_msg); + sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1); + err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, + &args[0]); + + if (err) { + dev_err(fl->sctx->dev, "mmap error (len 0x%08llx)\n", size); + return err; } + *raddr = rsp_msg.vaddr; + + return 0; +} + +static int fastrpc_req_buf_alloc(struct fastrpc_user *fl, + struct fastrpc_req_mmap req, + char __user *argp) +{ + struct device *dev = fl->sctx->dev; + struct fastrpc_buf *buf = NULL; + u64 raddr = 0; + int err; if (req.vaddrin) { dev_err(dev, "adding user allocated pages is not supported\n"); @@ -1903,26 +2070,8 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) return err; } - req_msg.client_id = fl->client_id; - req_msg.flags = req.flags; - req_msg.vaddr = req.vaddrin; - req_msg.num = sizeof(pages); - - args[0].ptr = (u64) (uintptr_t) &req_msg; - args[0].length = sizeof(req_msg); - - pages.addr = buf->phys; - pages.size = buf->size; - - args[1].ptr = (u64) (uintptr_t) &pages; - args[1].length = sizeof(pages); - - args[2].ptr = (u64) (uintptr_t) &rsp_msg; - args[2].length = sizeof(rsp_msg); - - sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1); - err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, - &args[0]); + err = fastrpc_req_map_dsp(fl, buf->phys, buf->size, req.flags, + req.vaddrin, &raddr); if (err) { dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size); fastrpc_buf_free(buf); @@ -1930,10 +2079,10 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) } /* update the buffer to be able to deallocate the memory on the DSP */ - buf->raddr = (uintptr_t) rsp_msg.vaddr; + buf->raddr = (uintptr_t)raddr; /* let the client know the address to use */ - req.vaddrout = rsp_msg.vaddr; + req.vaddrout = raddr; /* Add memory to static PD pool, protection thru hypervisor */ if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) { @@ -1968,6 +2117,69 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) return err; } +static int fastrpc_req_map_create(struct fastrpc_user *fl, + struct fastrpc_req_mmap req, + char __user *argp) +{ + struct fastrpc_map *map = NULL; + struct device *dev = fl->sctx->dev; + u64 raddr = 0; + int err; + + err = fastrpc_map_create(fl, req.fd, req.size, 0, &map); + if (err) { + dev_err(dev, "failed to map buffer, fd = %d\n", req.fd); + return err; + } + + err = fastrpc_req_map_dsp(fl, map->phys, map->size, req.flags, + req.vaddrin, &raddr); + if (err) + goto err_invoke; + + /* update the buffer to be able to deallocate the memory on the DSP */ + map->raddr = (u64)raddr; + + /* let the client know the address to use */ + req.vaddrout = raddr; + dev_dbg(dev, "mmap\t\tpt 0x%09llx OK [len 0x%08llx]\n", + map->raddr, map->size); + + if (copy_to_user((void __user *)argp, &req, sizeof(req))) { + err = -EFAULT; + goto err_copy; + } + + return 0; +err_copy: + fastrpc_req_munmap_dsp(fl, map->raddr, map->size); +err_invoke: + fastrpc_map_put(map); + + return err; +} + +static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) +{ + struct fastrpc_req_mmap req; + int err; + + if (copy_from_user(&req, argp, sizeof(req))) + return -EFAULT; + + if ((req.flags == ADSP_MMAP_ADD_PAGES || + req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR)) { + err = fastrpc_req_buf_alloc(fl, req, argp); + if (err) + return err; + } else { + err = fastrpc_req_map_create(fl, req, argp); + if (err) + return err; + } + return 0; +} + static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_mem_unmap *req) { struct fastrpc_invoke_args args[1] = { [0] = { 0 } }; @@ -2455,7 +2667,7 @@ static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data, if (len < sizeof(*rsp)) return -EINVAL; - ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4); + ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 8); spin_lock_irqsave(&cctx->lock, flags); ctx = idr_find(&cctx->ctx_idr, ctxid); @@ -2468,6 +2680,7 @@ static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data, ctx->retval = rsp->retval; complete(&ctx->work); + ctx->is_work_done = true; /* * The DMA buffer associated with the context cannot be freed in