From 2324b6fff518bebf6ec6363afe932c227f9d0b09 Mon Sep 17 00:00:00 2001 From: Dave Stevenson Date: Wed, 14 Feb 2018 17:04:26 +0000 Subject: [PATCH 392/454] staging: bcm2835-camera: Do not bulk receive from service thread vchi_bulk_queue_receive will queue up to a default of 4 bulk receives on a connection before blocking. If called from the VCHI service_callback thread, then that thread is unable to service the VCHI_CALLBACK_BULK_RECEIVED events that would enable the queue call to succeed. Add a workqueue to schedule the call vchi_bulk_queue_receive in an alternate context to avoid the lock up. Signed-off-by: Dave Stevenson --- .../vc04_services/bcm2835-camera/mmal-vchiq.c | 101 ++++++++++-------- 1 file changed, 59 insertions(+), 42 deletions(-) --- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c +++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c @@ -118,8 +118,10 @@ struct mmal_msg_context { union { struct { - /* work struct for defered callback - must come first */ + /* work struct for buffer_cb callback */ struct work_struct work; + /* work struct for deferred callback */ + struct work_struct buffer_to_host_work; /* mmal instance */ struct vchiq_mmal_instance *instance; /* mmal port */ @@ -174,6 +176,9 @@ struct vchiq_mmal_instance { /* component to use next */ int component_idx; struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS]; + + /* ordered workqueue to process all bulk operations */ + struct workqueue_struct *bulk_wq; }; static int __must_check @@ -320,7 +325,44 @@ static void buffer_work_cb(struct work_s msg_context->u.bulk.mmal_flags, msg_context->u.bulk.dts, msg_context->u.bulk.pts); +} + +/* workqueue scheduled callback to handle receiving buffers + * + * VCHI will allow up to 4 bulk receives to be scheduled before blocking. + * If we block in the service_callback context then we can't process the + * VCHI_CALLBACK_BULK_RECEIVED message that would otherwise allow the blocked + * vchi_bulk_queue_receive() call to complete. + */ +static void buffer_to_host_work_cb(struct work_struct *work) +{ + struct mmal_msg_context *msg_context = + container_of(work, struct mmal_msg_context, + u.bulk.buffer_to_host_work); + struct vchiq_mmal_instance *instance = msg_context->instance; + unsigned long len = msg_context->u.bulk.buffer_used; + int ret; + if (!len) + /* Dummy receive to ensure the buffers remain in order */ + len = 8; + /* queue the bulk submission */ + vchi_service_use(instance->handle); + ret = vchi_bulk_queue_receive(instance->handle, + msg_context->u.bulk.buffer->buffer, + /* Actual receive needs to be a multiple + * of 4 bytes + */ + (len + 3) & ~3, + VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE | + VCHI_FLAGS_BLOCK_UNTIL_QUEUED, + msg_context); + + vchi_service_release(instance->handle); + + if (ret != 0) + pr_err("%s: ctx: %p, vchi_bulk_queue_receive failed %d\n", + __func__, msg_context, ret); } /* enqueue a bulk receive for a given message context */ @@ -329,7 +371,6 @@ static int bulk_receive(struct vchiq_mma struct mmal_msg_context *msg_context) { unsigned long rd_len; - int ret; rd_len = msg->u.buffer_from_host.buffer_header.length; @@ -365,45 +406,10 @@ static int bulk_receive(struct vchiq_mma msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts; msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts; - /* queue the bulk submission */ - vchi_service_use(instance->handle); - ret = vchi_bulk_queue_receive(instance->handle, - msg_context->u.bulk.buffer->buffer, - /* Actual receive needs to be a multiple - * of 4 bytes - */ - (rd_len + 3) & ~3, - VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE | - VCHI_FLAGS_BLOCK_UNTIL_QUEUED, - msg_context); - - vchi_service_release(instance->handle); - - return ret; -} - -/* enque a dummy bulk receive for a given message context */ -static int dummy_bulk_receive(struct vchiq_mmal_instance *instance, - struct mmal_msg_context *msg_context) -{ - int ret; - - /* zero length indicates this was a dummy transfer */ - msg_context->u.bulk.buffer_used = 0; - - /* queue the bulk submission */ - vchi_service_use(instance->handle); - - ret = vchi_bulk_queue_receive(instance->handle, - instance->bulk_scratch, - 8, - VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE | - VCHI_FLAGS_BLOCK_UNTIL_QUEUED, - msg_context); + queue_work(msg_context->instance->bulk_wq, + &msg_context->u.bulk.buffer_to_host_work); - vchi_service_release(instance->handle); - - return ret; + return 0; } /* data in message, memcpy from packet into output buffer */ @@ -451,6 +457,8 @@ buffer_from_host(struct vchiq_mmal_insta /* initialise work structure ready to schedule callback */ INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb); + INIT_WORK(&msg_context->u.bulk.buffer_to_host_work, + buffer_to_host_work_cb); /* prep the buffer from host message */ memset(&m, 0xbc, sizeof(m)); /* just to make debug clearer */ @@ -531,7 +539,7 @@ static void buffer_to_host_cb(struct vch if (msg->u.buffer_from_host.buffer_header.flags & MMAL_BUFFER_HEADER_FLAG_EOS) { msg_context->u.bulk.status = - dummy_bulk_receive(instance, msg_context); + bulk_receive(instance, msg, msg_context); if (msg_context->u.bulk.status == 0) return; /* successful bulk submission, bulk * completion will trigger callback @@ -1862,6 +1870,9 @@ int vchiq_mmal_finalise(struct vchiq_mma mutex_unlock(&instance->vchiq_mutex); + flush_workqueue(instance->bulk_wq); + destroy_workqueue(instance->bulk_wq); + vfree(instance->bulk_scratch); mmal_context_map_destroy(&instance->context_map); @@ -1935,6 +1946,11 @@ int vchiq_mmal_init(struct vchiq_mmal_in params.callback_param = instance; + instance->bulk_wq = alloc_ordered_workqueue("mmal-vchiq", + WQ_MEM_RECLAIM); + if (!instance->bulk_wq) + goto err_free; + status = vchi_service_open(vchi_instance, ¶ms, &instance->handle); if (status) { pr_err("Failed to open VCHI service connection (status=%d)\n", @@ -1949,8 +1965,9 @@ int vchiq_mmal_init(struct vchiq_mmal_in return 0; err_close_services: - vchi_service_close(instance->handle); + destroy_workqueue(instance->bulk_wq); +err_free: vfree(instance->bulk_scratch); kfree(instance); return -ENODEV;