#define _PCIDEV_IMAGE_C #define _DEFAULT_SOURCE #define _BSD_SOURCE #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include #include #include #include #include "private.h" #include "model.h" #include "events.h" #define FIND_REG(var, bank, name) \ ctx->var = pcilib_find_register(pcilib, bank, name); \ if (ctx->var == PCILIB_REGISTER_INVALID) { \ err = PCILIB_ERROR_NOTFOUND; \ pcilib_error("Unable to find a %s register", name); \ } #define GET_REG(reg, var) \ if (!err) { \ err = pcilib_read_register_by_id(pcilib, ctx->reg, &var); \ if (err) { \ pcilib_error("Error reading %s register", model_info->registers[ctx->reg].name); \ } \ } #define SET_REG(reg, val) \ if (!err) { \ err = pcilib_write_register_by_id(pcilib, ctx->reg, val); \ if (err) { \ pcilib_error("Error writting %s register", model_info->registers[ctx->reg].name); \ } \ } #define CHECK_REG(reg, check) \ if (!err) { \ err = pcilib_read_register_by_id(pcilib, ctx->reg, &value); \ if (err) { \ pcilib_error("Error reading %s register", model_info->registers[ctx->reg].name); \ } \ if (value != check) { \ pcilib_error("Unexpected value (0x%lx) of register %s", value, model_info->registers[ctx->reg].name); \ err = PCILIB_ERROR_INVALID_DATA; \ } \ } #define CHECK_STATUS() //CHECK_REG(status_reg, PCIDEV_GET_EXPECTED_STATUS(ctx)) #define CHECK_VALUE(value, val) \ if ((!err)&&(value != val)) { \ pcilib_error("Unexpected value (0x%x) in data stream (0x%x is expected)", value, val); \ err = PCILIB_ERROR_INVALID_DATA; \ } #define CHECK_FLAG(flag, check, ...) \ if ((!err)&&(!(check))) { \ pcilib_error("Unexpected value (0x%x) of " flag, __VA_ARGS__); \ err = PCILIB_ERROR_INVALID_DATA; \ } #define LOCK(lock_name) \ err = pcilib_try_lock(ctx->lock_name##_lock); \ if (err) { \ pcilib_error("IPECamera is busy"); \ return PCILIB_ERROR_BUSY; \ } \ ctx->lock_name##_locked = 1; #define UNLOCK(lock_name) \ if (ctx->lock_name##_locked) { \ pcilib_unlock(ctx->lock_name##_lock); \ ctx->lock_name##_locked = 0; \ } pcilib_context_t *pcidev_init(pcilib_t *pcilib) { int err = 0; pcidev_t *ctx = malloc(sizeof(pcidev_t)); if (ctx) { memset(ctx, 0, sizeof(pcidev_t)); ctx->run_lock = pcilib_get_lock(pcilib, PCILIB_LOCK_FLAGS_DEFAULT, "pcidev"); ctx->stream_lock = pcilib_get_lock(pcilib, PCILIB_LOCK_FLAGS_DEFAULT, "pcidev/stream"); ctx->trigger_lock = pcilib_get_lock(pcilib, PCILIB_LOCK_FLAGS_DEFAULT, "pcidev/trigger"); if (!ctx->run_lock||!ctx->stream_lock||!ctx->trigger_lock) { free(ctx); pcilib_error("Failed to initialize locks to protect pcidev operation"); return NULL; } ctx->buffer_size = PCIDEV_DEFAULT_BUFFER_SIZE; ctx->rdma = PCILIB_DMA_ENGINE_INVALID; if (err) { free(ctx); return NULL; } } return (pcilib_context_t*)ctx; } void pcidev_free(pcilib_context_t *vctx) { if (vctx) { pcidev_t *ctx = (pcidev_t*)vctx; pcidev_stop(vctx, PCILIB_EVENT_FLAGS_DEFAULT); if (ctx->trigger_lock) pcilib_return_lock(vctx->pcilib, PCILIB_LOCK_FLAGS_DEFAULT, ctx->trigger_lock); if (ctx->stream_lock) pcilib_return_lock(vctx->pcilib, PCILIB_LOCK_FLAGS_DEFAULT, ctx->stream_lock); if (ctx->run_lock) pcilib_return_lock(vctx->pcilib, PCILIB_LOCK_FLAGS_DEFAULT, ctx->run_lock); free(ctx); } } pcilib_dma_context_t *pcidev_init_dma(pcilib_context_t *vctx) { const pcilib_model_description_t *model_info = pcilib_get_model_description(vctx->pcilib); if ((!model_info->dma)||(!model_info->dma->api)||(!model_info->dma->api->init)) { pcilib_error("The DMA engine is not configured in model"); return NULL; } return model_info->dma->api->init(vctx->pcilib, "pcidev", NULL); } int pcidev_set_buffer_size(pcidev_t *ctx, int size) { if (ctx->started) { pcilib_error("Can't change buffer size while grabbing"); return PCILIB_ERROR_INVALID_REQUEST; } if (size < 2) { pcilib_error("The buffer size is too small"); return PCILIB_ERROR_INVALID_REQUEST; } if ((size^(size-1)) < size) { pcilib_error("The buffer size is not power of 2"); } ctx->buffer_size = size; return 0; } int pcidev_start(pcilib_context_t *vctx, pcilib_event_t event_mask, pcilib_event_flags_t flags) { int i; int err = 0; pcidev_t *ctx = (pcidev_t*)vctx; // pcilib_register_value_t value; if (!ctx) { pcilib_error("pcidev event engine is not initialized"); return PCILIB_ERROR_NOTINITIALIZED; } if (ctx->started) { pcilib_error("pcidev event grabbing is already started"); return PCILIB_ERROR_INVALID_REQUEST; } LOCK(run); pcidev_debug(API, "pcidev: starting"); ctx->event_id = 0; ctx->buffer_pos = 0; ctx->process_data = (flags&PCILIB_EVENT_FLAG_RAW_DATA_ONLY)?0:1; ctx->event_size = PCIDEV_EVENT_SIZE * sizeof(size_t); memset(&ctx->eio_timestamp, 0, sizeof(struct timeval)); ctx->buffer = malloc(ctx->event_size * ctx->buffer_size); if (!ctx->buffer) { pcidev_stop(vctx, PCILIB_EVENT_FLAGS_DEFAULT); pcilib_error("Unable to allocate ring buffer (%lu bytes)", ctx->event_size * ctx->buffer_size); return PCILIB_ERROR_MEMORY; } ctx->event = (pcidev_event_t*)malloc(ctx->buffer_size * sizeof(pcidev_event_t)); if (!ctx->event) { pcidev_stop(vctx, PCILIB_EVENT_FLAGS_DEFAULT); pcilib_error("Unable to allocate event-info buffer"); return PCILIB_ERROR_MEMORY; } memset(ctx->event, 0, ctx->buffer_size * sizeof(pcidev_event_t)); for (i = 0; i < ctx->buffer_size; i++) { err = pthread_rwlock_init(&ctx->event[i].mutex, NULL); if (err) break; } ctx->event_mutex_destroy = i; if (!err) { ctx->rdma = pcilib_find_dma_by_addr(vctx->pcilib, PCILIB_DMA_FROM_DEVICE, PCIDEV_DMA_ADDRESS); if (ctx->rdma == PCILIB_DMA_ENGINE_INVALID) { err = PCILIB_ERROR_NOTFOUND; pcilib_error("The C2S channel of DMA Engine (%u) is not found", PCIDEV_DMA_ADDRESS); } else { err = pcilib_start_dma(vctx->pcilib, ctx->rdma, PCILIB_DMA_FLAGS_DEFAULT); if (err) { ctx->rdma = PCILIB_DMA_ENGINE_INVALID; pcilib_error("Failed to initialize C2S channel of DMA Engine (%u)", PCIDEV_DMA_ADDRESS); } } } if (err) { pcidev_stop(vctx, PCILIB_EVENT_FLAGS_DEFAULT); return err; } if (vctx->params.autostop.duration) { gettimeofday(&ctx->autostop.timestamp, NULL); ctx->autostop.timestamp.tv_usec += vctx->params.autostop.duration % 1000000; if (ctx->autostop.timestamp.tv_usec > 999999) { ctx->autostop.timestamp.tv_sec += 1 + vctx->params.autostop.duration / 1000000; ctx->autostop.timestamp.tv_usec -= 1000000; } else { ctx->autostop.timestamp.tv_sec += vctx->params.autostop.duration / 1000000; } } if (vctx->params.autostop.max_events) { ctx->autostop.evid = vctx->params.autostop.max_events; } ctx->started = 1; // Technically here we need to start DAQ and processing threads. DAQ thread will pull raw data from DMA and distribute it between processing threads... // Current example doesn't spawn threads, but expects that enough buffering is performed by hardware and we can just allow user pull the data directly from hardware... // So, here pcidev_stream combines functions of 3 threads: DAQ thread pulling and buffering raw data, processing threads generating event data, and stream function feading processed event data to user callback pcidev_debug(API, "pcidev: started"); return 0; } int pcidev_stop(pcilib_context_t *vctx, pcilib_event_flags_t flags) { int i; pcidev_t *ctx = (pcidev_t*)vctx; if (!ctx) { pcilib_error("pcidev event engine is not initialized"); return PCILIB_ERROR_NOTINITIALIZED; } ctx->run_streamer = 0; if (flags&PCILIB_EVENT_FLAG_STOP_ONLY) return 0; pcidev_debug(API, "pcidev: stopping"); if (ctx->started) { // Here we would also normally wait untill all spawned threads are terminated while (ctx->streaming) { usleep(PCIDEV_NOEVENT_SLEEP); } } if (ctx->event_mutex_destroy) { for (i = 0; i < ctx->event_mutex_destroy; i++) { pthread_rwlock_destroy(&ctx->event[i].mutex); } ctx->event_mutex_destroy = 0; } if (ctx->rdma != PCILIB_DMA_ENGINE_INVALID) { pcilib_stop_dma(vctx->pcilib, ctx->rdma, PCILIB_DMA_FLAGS_DEFAULT); ctx->rdma = PCILIB_DMA_ENGINE_INVALID; } if (ctx->event) { free(ctx->event); ctx->event = NULL; } if (ctx->buffer) { free(ctx->buffer); ctx->buffer = NULL; } memset(&ctx->autostop, 0, sizeof(pcidev_autostop_t)); memset(&ctx->eio_timestamp, 0, sizeof(struct timeval)); ctx->event_id = 0; ctx->buffer_pos = 0; ctx->started = 0; pcidev_debug(API, "pcidev: stopped"); UNLOCK(run); return 0; } int pcidev_reset(pcilib_context_t *vctx) { pcidev_t *ctx = (pcidev_t*)vctx; if (!ctx) { pcilib_error("pcidev event engine is not initialized"); return PCILIB_ERROR_NOTINITIALIZED; } // Technically we need to stop event generation, reset device, drop all pending data on DMA channels, restart device pcidev_debug(API, "pcidev: reset done"); return 0; } int pcidev_trigger(pcilib_context_t *vctx, pcilib_event_t event, size_t trigger_size, void *trigger_data) { return PCILIB_ERROR_NOTSUPPORTED; } typedef struct { pcidev_t *pcidev; pcilib_event_callback_t event_cb; void *event_cb_ctx; } pcidev_data_callback_user_t; static int pcidev_data_callback(void *user, pcilib_dma_flags_t flags, size_t bufsize, void *buf) { int i; int res; int eoi = 0; //**< end-of-integration flag */ struct timeval packet_timestamp; //**< hardware timestamp of the packet (set by FPGA) */ static unsigned long packet_id = 0; pcidev_data_callback_user_t *datacb_ctx = (pcidev_data_callback_user_t*)user; pcidev_t *ctx = datacb_ctx->pcidev; pcilib_event_callback_t event_callback = datacb_ctx->event_cb; void *event_callback_ctx = datacb_ctx->event_cb_ctx; packet_id++; pcidev_debug_buffer(RAW_PACKETS, bufsize, buf, PCILIB_DEBUG_BUFFER_MKDIR, "event%4lu/packet%9lu", ctx->event_id, packet_id); // Packet analysis may come here // This sample illustrates time-based events (e.g. histograms). Instead, we also can have size based events when we receive data until expected size is reached (e.g. from camera) // We can check packet consistency here (e.g. verify magic number, packet seq number, and check CRC/MD5), return negative error code on the error // If we deal with multi-packet events and started to receive data not from the beginning of the event, here we would skip data until new event begins by returning PCILIB_STREAMING_CONTINUE // If we deal with multi-packet events and the event data is not yet complete, we would buffer it and return PCILIB_STREAMING_REQ_FRAGMENT // We need to get the actual packet time-stamp here. The demo uses host timestamp instead gettimeofday(&packet_timestamp, NULL); // Check if integration time is over. if (pcilib_timecmp(&packet_timestamp, &ctx->eio_timestamp) > 0) { // initialize timing on a first packet if (ctx->eio_timestamp.tv_sec == 0) { gettimeofday(&ctx->eio_timestamp, NULL); pcilib_add_timeout(&ctx->eio_timestamp, PCIDEV_INTEGRATION_PERIOD); } else eoi = 1; } if ((!ctx->run_streamer) ||((ctx->event_id == ctx->autostop.evid)&&(ctx->event_id)) ||(pcilib_check_deadline(&ctx->autostop.timestamp, 0)) ) { ctx->run_streamer = 0; return PCILIB_STREAMING_STOP; } if (eoi) { pcidev_event_info_t info; ctx->event[ctx->buffer_pos].event.info.type = PCILIB_EVENT0; ctx->event[ctx->buffer_pos].event.info.flags = 0; memcpy(&info, ctx->event + ctx->buffer_pos, sizeof(pcidev_event_info_t)); if (event_callback) { res = event_callback(ctx->event_id + 1, (pcilib_event_info_t*)&info, event_callback_ctx); if (res <= 0) { ctx->run_streamer = 0; if (res < 0) return -res; return PCILIB_STREAMING_STOP; } } ctx->buffer_pos = (++ctx->event_id) % ctx->buffer_size; // Compute next period. We don't handle case when the processing is not fast enough and we get behind... We should in real driver, it often happens. pcilib_add_timeout(&ctx->eio_timestamp, PCIDEV_INTEGRATION_PERIOD); } if (ctx->process_data) { if (eoi) { memset(ctx->buffer + ctx->buffer_pos * ctx->event_size, 0, ctx->event_size); } for (i = 0; i < bufsize; i++) { ((size_t*)ctx->buffer)[((uint8_t*)buf)[i]]++; } } if (ctx->pcictx.params.rawdata.callback) { res = ctx->pcictx.params.rawdata.callback(ctx->event_id, (pcilib_event_info_t*)(ctx->event + ctx->buffer_pos), (eoi?PCILIB_EVENT_FLAG_EOF:PCILIB_EVENT_FLAGS_DEFAULT), bufsize, buf, ctx->pcictx.params.rawdata.user); if (res <= 0) { if (res < 0) return res; return PCILIB_STREAMING_STOP; } } return PCILIB_STREAMING_REQ_FRAGMENT; } int pcidev_stream(pcilib_context_t *vctx, pcilib_event_callback_t callback, void *user) { int err = 0; int do_stop = 0; pcidev_t *ctx = (pcidev_t*)vctx; if (!ctx) { pcilib_error("pcidev event engine is not initialized"); return PCILIB_ERROR_NOTINITIALIZED; } pcidev_debug(API, "pcidev: start streaming"); pcidev_data_callback_user_t datacb_ctx; datacb_ctx.pcidev = ctx; datacb_ctx.event_cb = callback; datacb_ctx.event_cb_ctx = user; LOCK(stream); ctx->streaming = 1; ctx->run_streamer = 1; if (!ctx->started) { err = pcidev_start(vctx, PCILIB_EVENTS_ALL, PCILIB_EVENT_FLAGS_DEFAULT); if (err) { ctx->streaming = 0; return err; } do_stop = 1; } while (ctx->run_streamer) { err = pcilib_stream_dma(ctx->pcictx.pcilib, ctx->rdma, 0, 0, PCILIB_DMA_FLAG_MULTIPACKET, PCIDEV_DMA_TIMEOUT, &pcidev_data_callback, &datacb_ctx); if (err) { if (err == PCILIB_ERROR_TIMEOUT) { if (pcilib_check_deadline(&ctx->autostop.timestamp, 0)) { ctx->run_streamer = 0; break; } usleep(PCIDEV_NOEVENT_SLEEP); } else pcilib_error("DMA error while reading pcidev events, error: %i", err); } } ctx->run_streamer = 0; ctx->streaming = 0; UNLOCK(stream); pcidev_debug(API, "pcidev: streaming finished"); if (do_stop) { pcidev_stop(vctx, PCILIB_EVENT_FLAGS_DEFAULT); } return err; } static int pcidev_resolve_event_id(pcidev_t *ctx, pcilib_event_id_t evid) { pcilib_event_id_t diff; if (evid > ctx->event_id) { diff = (((pcilib_event_id_t)-1) - ctx->event_id) + evid; if (diff >= ctx->buffer_size) return -1; } else { diff = ctx->event_id - evid; if (diff >= ctx->buffer_size) return -1; } return (evid - 1) % ctx->buffer_size; } int pcidev_get_data(pcilib_context_t *vctx, pcilib_event_id_t event_id, pcilib_event_data_type_t data_type, size_t arg_size, void *arg, size_t *size, void **ret) { int buf_ptr; pcidev_t *ctx = (pcidev_t*)vctx; void *data = *ret; if (!ctx) { pcilib_error("pcidev event engine is not initialized"); return PCILIB_ERROR_NOTINITIALIZED; } pcidev_debug(API, "pcidev: get_data"); buf_ptr = pcidev_resolve_event_id(ctx, event_id); if (buf_ptr < 0) { pcidev_debug(HARDWARE, "The data of the requested event %zu has been meanwhile overwritten", event_id); return PCILIB_ERROR_OVERWRITTEN; } switch ((pcidev_data_type_t)data_type) { case PCIDEV_RAW_DATA: pcilib_error("The raw data is not buffered for the performance reasons and can only be accessed via rawdata-callback mechanism"); return PCILIB_ERROR_NOTSUPPORTED; case PCIDEV_STANDARD_DATA: // We will lock the data for non-raw data to prevent ocasional overwritting. pthread_rwlock_rdlock(&ctx->event[buf_ptr].mutex); // Check if data is still not overwritten if (pcidev_resolve_event_id(ctx, event_id) < 0) { pthread_rwlock_unlock(&ctx->event[buf_ptr].mutex); pcidev_debug(HARDWARE, "The data of the requested event %zu has been meanwhile overwritten", event_id); return PCILIB_ERROR_OVERWRITTEN; } if (data) { if ((!size)||(*size < ctx->event_size)) { pthread_rwlock_unlock(&ctx->event[buf_ptr].mutex); pcilib_warning("The size of event data is too big (%zu bytes) for user supplied buffer (%zu bytes)", ctx->event_size, (size?*size:0)); return PCILIB_ERROR_TOOBIG; } memcpy(data, ctx->buffer + buf_ptr * ctx->event_size, ctx->event_size); pthread_rwlock_unlock(&ctx->event[buf_ptr].mutex); *size = ctx->event_size; return 0; } if (size) *size = ctx->event_size; *ret = ctx->buffer + buf_ptr * ctx->event_size; return 0; default: pcilib_error("Unknown data type (%li) is requested", data_type); return PCILIB_ERROR_INVALID_REQUEST; } } int pcidev_return_data(pcilib_context_t *vctx, pcilib_event_id_t event_id, pcilib_event_data_type_t data_type, void *data) { pcidev_t *ctx = (pcidev_t*)vctx; if (!ctx) { pcilib_error("pcidev event engine is not initialized"); return PCILIB_ERROR_NOTINITIALIZED; } if ((pcidev_data_type_t)data_type == PCIDEV_RAW_DATA) { return PCILIB_ERROR_NOTSUPPORTED; } else { int buf_ptr = (event_id - 1) % ctx->buffer_size; pthread_rwlock_unlock(&ctx->event[buf_ptr].mutex); } pcidev_debug(API, "pcidev: return_data"); return 0; }