summaryrefslogtreecommitdiffstats
path: root/dma.c
diff options
context:
space:
mode:
authorSuren A. Chilingaryan <csa@suren.me>2023-05-25 22:41:04 +0200
committerSuren A. Chilingaryan <csa@suren.me>2023-05-25 22:41:04 +0200
commit6f4af841f6fdd099b97d071ae64c8be60f809456 (patch)
treed4f9a18b38e1ce3cfc0a5336215d5ce3afe830d2 /dma.c
downloadpcidev-6f4af841f6fdd099b97d071ae64c8be60f809456.tar.gz
pcidev-6f4af841f6fdd099b97d071ae64c8be60f809456.tar.bz2
pcidev-6f4af841f6fdd099b97d071ae64c8be60f809456.tar.xz
pcidev-6f4af841f6fdd099b97d071ae64c8be60f809456.zip
A sample event engine for pcitool (not requiring any PCIe hardware). Initial (barely tested and intended only as an example) release
Diffstat (limited to 'dma.c')
-rw-r--r--dma.c450
1 files changed, 450 insertions, 0 deletions
diff --git a/dma.c b/dma.c
new file mode 100644
index 0000000..94ff203
--- /dev/null
+++ b/dma.c
@@ -0,0 +1,450 @@
+#define _PCIDEV_DMA_C
+#define _BSD_SOURCE
+#define _DEFAULT_SOURCE
+#define _POSIX_C_SOURCE 199309L
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sched.h>
+#include <time.h>
+#include <sys/time.h>
+#include <arpa/inet.h>
+
+#include <pcilib.h>
+#include <pcilib/kmem.h>
+#include <pcilib/error.h>
+#include <pcilib/debug.h>
+
+#include "dma.h"
+#include "dma_private.h"
+
+ // We will use get_block_ba for real bus-mapped buffers
+#define pcilib_kmem_get_block_addr pcilib_kmem_get_block_pa
+
+
+pcilib_dma_context_t *pcidev_dma_init(pcilib_t *pcilib, const char *model, const void *arg) {
+ pcilib_register_value_t version_value;
+
+ pcidev_dma_t *ctx = malloc(sizeof(pcidev_dma_t));
+
+ if (ctx) {
+ memset(ctx, 0, sizeof(pcidev_dma_t));
+ ctx->dmactx.pcilib = pcilib;
+
+ if (!pcilib_read_register(ctx->dmactx.pcilib, "dmaconf", "dma_timeout", &version_value))
+ ctx->version = version_value;
+ else
+ ctx->version = PCILIB_VERSION;
+
+
+ pcilib_info("Sample DMA engine, version %lu", ctx->version);
+
+ }
+
+ return (pcilib_dma_context_t*)ctx;
+}
+
+void pcidev_dma_free(pcilib_dma_context_t *vctx) {
+ pcidev_dma_t *ctx = (pcidev_dma_t*)vctx;
+
+ if (ctx) {
+ pcidev_dma_stop(vctx, PCILIB_DMA_ENGINE_ALL, PCILIB_DMA_FLAGS_DEFAULT);
+ free(ctx);
+ }
+}
+
+static void pcidev_dma_disable(pcidev_dma_t *ctx) {
+ // We calling this function before cleaning memory to ensure no DMA write operations into this memory might be performed after return from this function
+ return;
+}
+
+static void pcidev_dma_enable(pcidev_dma_t *ctx) {
+ // Sequence to enable DMA engine and start streaming data. Memory should be ready and configured
+ return;
+}
+
+static size_t pcidev_dma_find_buffer_by_bus_addr(pcidev_dma_t *ctx, uintptr_t bus_addr) {
+ size_t i;
+
+ for (i = 0; i < ctx->ring_size; i++) {
+ uintptr_t buf_addr = pcilib_kmem_get_block_addr(ctx->dmactx.pcilib, ctx->pages, i);
+
+ if (bus_addr == buf_addr)
+ return i;
+ }
+
+ return (size_t)-1;
+}
+
+
+int pcidev_dma_start(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dma_flags_t flags) {
+ pcidev_dma_t *ctx = (pcidev_dma_t*)vctx;
+
+ pcidev_dma_desc_t *hw;
+
+ int preserve = 0;
+ pcilib_kmem_flags_t kflags;
+ pcilib_kmem_reuse_state_t reuse_desc, reuse_pages;
+ pcilib_kmem_handle_t *desc = NULL;
+ pcilib_kmem_handle_t *pages = NULL;
+
+ pcilib_register_value_t value;
+
+ uintptr_t dma_region = 0;
+
+ // Support single bank for now. We can support multiple banks with little modifications, e.g. bank number can be mapped to network port...
+ if (dma == PCILIB_DMA_ENGINE_INVALID) return 0;
+ else if (dma > 1) return PCILIB_ERROR_INVALID_BANK;
+
+ if (!ctx->started) ctx->started = 1;
+
+ if (flags&PCILIB_DMA_FLAG_PERSISTENT) ctx->preserve = 1;
+
+ if (ctx->pages) return 0;
+
+ // Get DMA configuration
+ if (!pcilib_read_register(ctx->dmactx.pcilib, "dmaconf", "dma_timeout", &value))
+ ctx->dma_timeout = value;
+ else
+ ctx->dma_timeout = PCIDEV_DMA_TIMEOUT;
+
+ if (!pcilib_read_register(ctx->dmactx.pcilib, "dmaconf", "dma_page_size", &value)) {
+ if (value % PCIDEV_PAGE_SIZE) {
+ pcilib_error("Invalid DMA page size (%lu) is configured", value);
+ return PCILIB_ERROR_INVALID_ARGUMENT;
+ }
+
+ ctx->page_size = value;
+ } else
+ ctx->page_size = PCIDEV_PAGE_SIZE;
+
+ if ((!pcilib_read_register(ctx->dmactx.pcilib, "dmaconf", "dma_pages", &value))&&(value > 0))
+ ctx->ring_size = value;
+ else
+ ctx->ring_size = PCIDEV_DMA_PAGES;
+
+ if (!pcilib_read_register(ctx->dmactx.pcilib, "dmaconf", "dma_region_low", &value)) {
+ dma_region = value;
+ if (!pcilib_read_register(ctx->dmactx.pcilib, "dmaconf", "dma_region_low", &value))
+ dma_region |= ((uintptr_t)value)<<32;
+ }
+
+ if (!pcilib_read_register(ctx->dmactx.pcilib, "dmaconf", "dma_flags", &value))
+ ctx->dma_flags = value;
+ else
+ ctx->dma_flags = 0;
+
+
+ // Allocate/map shared memory. There is two structures: 'desc' is descriptor which normally would be controlled by DMA engine to inform about its operations and 'pages' with actual data.
+ kflags = PCILIB_KMEM_FLAG_REUSE|PCILIB_KMEM_FLAG_EXCLUSIVE|PCILIB_KMEM_FLAG_HARDWARE|(ctx->preserve?PCILIB_KMEM_FLAG_PERSISTENT:0);
+
+ desc = pcilib_alloc_kernel_memory(ctx->dmactx.pcilib, PCILIB_KMEM_TYPE_CONSISTENT, 1, PCIDEV_DMA_DESCRIPTOR_SIZE, PCIDEV_DMA_DESCRIPTOR_ALIGNMENT, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_RING, 0x00), kflags);
+ if (dma_region)
+ pages = pcilib_alloc_kernel_memory(ctx->dmactx.pcilib, PCILIB_KMEM_TYPE_REGION_C2S, ctx->ring_size, ctx->page_size, dma_region, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_PAGES, 0x00), kflags);
+ else
+ pages = pcilib_alloc_kernel_memory(ctx->dmactx.pcilib, PCILIB_KMEM_TYPE_DMA_C2S_PAGE, ctx->ring_size, ctx->page_size, 0, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_PAGES, 0x00), kflags);
+
+ if (!desc||!pages) {
+ if (pages) pcilib_free_kernel_memory(ctx->dmactx.pcilib, pages, PCILIB_KMEM_FLAG_REUSE);
+ if (desc) pcilib_free_kernel_memory(ctx->dmactx.pcilib, desc, PCILIB_KMEM_FLAG_REUSE);
+ pcilib_error("Can't allocate required kernel memory for PCIDEV DMA engine (%lu pages of %lu bytes + %lu byte descriptor)", ctx->ring_size, ctx->page_size, (unsigned long)PCIDEV_DMA_DESCRIPTOR_SIZE);
+ return PCILIB_ERROR_MEMORY;
+ }
+ reuse_desc = pcilib_kmem_is_reused(ctx->dmactx.pcilib, desc);
+ reuse_pages = pcilib_kmem_is_reused(ctx->dmactx.pcilib, pages);
+
+ hw = (pcidev_dma_desc_t*)pcilib_kmem_get_ua(ctx->dmactx.pcilib, desc);
+
+ // Try to get status of DMA engine if shared memory already initialized and check for consistency. Re-initialize or return the error if the memory is inconsistent.
+ if ((reuse_pages & PCILIB_KMEM_REUSE_PARTIAL)||(reuse_desc & PCILIB_KMEM_REUSE_PARTIAL)) {
+ pcidev_dma_disable(ctx);
+
+ pcilib_free_kernel_memory(ctx->dmactx.pcilib, pages, PCILIB_KMEM_FLAG_REUSE);
+ pcilib_free_kernel_memory(ctx->dmactx.pcilib, desc, PCILIB_KMEM_FLAG_REUSE);
+
+ if (((flags&PCILIB_DMA_FLAG_STOP) == 0)||(dma_region)) {
+ pcilib_error("Inconsistent DMA buffers are found (buffers are only partially re-used). This is very wrong, please stop DMA engine and correct configuration...");
+ return PCILIB_ERROR_INVALID_STATE;
+ }
+
+ pcilib_warning("Inconsistent DMA buffers are found (buffers are only partially re-used), reinitializing...");
+ desc = pcilib_alloc_kernel_memory(ctx->dmactx.pcilib, PCILIB_KMEM_TYPE_CONSISTENT, 1, PCIDEV_DMA_DESCRIPTOR_SIZE, PCIDEV_DMA_DESCRIPTOR_ALIGNMENT, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_RING, 0x00), kflags|PCILIB_KMEM_FLAG_MASS);
+ pages = pcilib_alloc_kernel_memory(ctx->dmactx.pcilib, PCILIB_KMEM_TYPE_DMA_C2S_PAGE, ctx->ring_size, ctx->page_size, 0, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_PAGES, 0x00), kflags|PCILIB_KMEM_FLAG_MASS);
+
+ if (!desc||!pages) {
+ if (pages) pcilib_free_kernel_memory(ctx->dmactx.pcilib, pages, PCILIB_KMEM_FLAG_REUSE);
+ if (desc) pcilib_free_kernel_memory(ctx->dmactx.pcilib, desc, PCILIB_KMEM_FLAG_REUSE);
+ return PCILIB_ERROR_MEMORY;
+ }
+
+ hw = (pcidev_dma_desc_t*)pcilib_kmem_get_ua(ctx->dmactx.pcilib, desc);
+ } else if (reuse_desc != reuse_pages) {
+ pcilib_warning("Inconsistent DMA buffers (modes of ring and page buffers does not match), reinitializing....");
+ } else if (reuse_desc & PCILIB_KMEM_REUSE_REUSED) {
+ if ((reuse_desc & PCILIB_KMEM_REUSE_PERSISTENT) == 0) pcilib_warning("Lost DMA buffers are found (non-persistent mode), reinitializing...");
+ else if ((reuse_desc & PCILIB_KMEM_REUSE_HARDWARE) == 0) pcilib_warning("Lost DMA buffers are found (missing HW reference), reinitializing...");
+ else {
+ if (hw->page_count != ctx->ring_size)
+ pcilib_warning("Inconsistent DMA buffers are found (Number of allocated buffers (%lu) does not match current request (%lu)), reinitializing...", hw->page_count, PCIDEV_DMA_PAGES);
+ else
+ preserve = 1;
+ }
+ }
+
+ // get page size if default size was used
+ if (!ctx->page_size)
+ ctx->page_size = pcilib_kmem_get_block_size(ctx->dmactx.pcilib, pages, 0);
+
+ if (preserve) {
+ ctx->reused = 1;
+ ctx->preserve = 1;
+ } else {
+ ctx->reused = 0;
+
+ pcidev_dma_disable(ctx);
+ pcidev_dma_enable(ctx);
+
+ ctx->last_read = ctx->ring_size - 1;
+
+ // Normally this should be maintained by hardware
+ hw->page_count = ctx->ring_size;
+ hw->last_read_addr = pcilib_kmem_get_block_addr(ctx->dmactx.pcilib, pages, ctx->ring_size - 1);
+ hw->last_write_addr = pcilib_kmem_get_block_addr(ctx->dmactx.pcilib, pages, ctx->ring_size - 2);
+ }
+
+ ctx->desc = desc;
+ ctx->pages = pages;
+
+ ctx->last_read = pcidev_dma_find_buffer_by_bus_addr(ctx, hw->last_read_addr);
+
+ return 0;
+}
+
+int pcidev_dma_stop(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dma_flags_t flags) {
+ pcilib_kmem_flags_t kflags;
+
+ pcidev_dma_t *ctx = (pcidev_dma_t*)vctx;
+
+ if (!ctx->started) return 0;
+
+ if ((dma != PCILIB_DMA_ENGINE_INVALID)&&(dma > 1)) return PCILIB_ERROR_INVALID_BANK;
+
+ // ignoring previous setting if flag specified
+ if (flags&PCILIB_DMA_FLAG_PERSISTENT) {
+ ctx->preserve = 0;
+ }
+
+ if (ctx->preserve) {
+ kflags = PCILIB_KMEM_FLAG_REUSE;
+ } else {
+ kflags = PCILIB_KMEM_FLAG_HARDWARE|PCILIB_KMEM_FLAG_PERSISTENT;
+
+ ctx->started = 0;
+
+ pcidev_dma_disable(ctx);
+ }
+
+ // Clean buffers
+ if (ctx->desc) {
+ pcilib_free_kernel_memory(ctx->dmactx.pcilib, ctx->desc, kflags);
+ ctx->desc = NULL;
+ }
+
+ if (ctx->pages) {
+ pcilib_free_kernel_memory(ctx->dmactx.pcilib, ctx->pages, kflags);
+ ctx->pages = NULL;
+ }
+
+ return 0;
+}
+
+
+int pcidev_dma_get_status(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dma_engine_status_t *status, size_t n_buffers, pcilib_dma_buffer_status_t *buffers) {
+ size_t i;
+ pcidev_dma_t *ctx = (pcidev_dma_t*)vctx;
+
+ pcidev_dma_desc_t *hw = (pcidev_dma_desc_t*)pcilib_kmem_get_ua(ctx->dmactx.pcilib, ctx->desc);
+
+ if (!status) return -1;
+
+ pcilib_debug(DMA, "Current DMA status - last read: %4u, last_read_addr: %4u (0x%x), last_write_addr: %4lu (0x%lx)", ctx->last_read,
+ pcidev_dma_find_buffer_by_bus_addr(ctx, hw->last_read_addr), hw->last_read_addr,
+ pcidev_dma_find_buffer_by_bus_addr(ctx, hw->last_write_addr), hw->last_write_addr
+ );
+
+
+ status->started = ctx->started;
+ status->ring_size = ctx->ring_size;
+ status->buffer_size = ctx->page_size;
+ status->written_buffers = 0;
+ status->written_bytes = 0;
+
+ // For simplicity, we keep last_read here, and fix in the end
+ status->ring_tail = ctx->last_read;
+
+ status->ring_head = pcidev_dma_find_buffer_by_bus_addr(ctx, hw->last_write_addr);
+
+ if (status->ring_head == (size_t)-1) {
+ if (hw->last_write_addr) {
+ pcilib_warning("DMA is in unknown state, last_written_addr does not correspond any of available buffers");
+ return PCILIB_ERROR_FAILED;
+ }
+ status->ring_head = 0;
+ status->ring_tail = 0;
+ }
+
+ if (n_buffers > ctx->ring_size) n_buffers = ctx->ring_size;
+
+ if (buffers)
+ memset(buffers, 0, n_buffers * sizeof(pcilib_dma_buffer_status_t));
+
+ if (status->ring_head >= status->ring_tail) {
+ for (i = status->ring_tail + 1; i <= status->ring_head; i++) {
+ status->written_buffers++;
+ status->written_bytes += ctx->page_size;
+
+ if ((buffers)&&(i < n_buffers)) {
+ buffers[i].used = 1;
+ buffers[i].size = ctx->page_size;
+ buffers[i].first = 1;
+ buffers[i].last = 1;
+ }
+ }
+ } else {
+ for (i = 0; i <= status->ring_head; i++) {
+ status->written_buffers++;
+ status->written_bytes += ctx->page_size;
+
+ if ((buffers)&&(i < n_buffers)) {
+ buffers[i].used = 1;
+ buffers[i].size = ctx->page_size;
+ buffers[i].first = 1;
+ buffers[i].last = 1;
+ }
+ }
+
+ for (i = status->ring_tail + 1; i < status->ring_size; i++) {
+ status->written_buffers++;
+ status->written_bytes += ctx->page_size;
+
+ if ((buffers)&&(i < n_buffers)) {
+ buffers[i].used = 1;
+ buffers[i].size = ctx->page_size;
+ buffers[i].first = 1;
+ buffers[i].last = 1;
+ }
+ }
+ }
+
+ // We actually keep last_read in the ring_tail, so need to increase
+ if (status->ring_tail != status->ring_head) {
+ status->ring_tail++;
+ if (status->ring_tail == status->ring_size) status->ring_tail = 0;
+ }
+
+ return 0;
+}
+
+
+int pcidev_dma_stream_read(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, uintptr_t addr, size_t size, pcilib_dma_flags_t flags, pcilib_timeout_t timeout, pcilib_dma_callback_t cb, void *cbattr) {
+ int err, ret = PCILIB_STREAMING_REQ_PACKET;
+
+ pcilib_timeout_t wait = 0;
+ struct timeval start, cur;
+
+ pcilib_dma_flags_t packet_flags = PCILIB_DMA_FLAG_EOP;
+
+ size_t nodata_sleep;
+ struct timespec sleep_ts = {0};
+
+ size_t cur_read;
+
+ pcidev_dma_t *ctx = (pcidev_dma_t*)vctx;
+
+ // We auto-start if not started yet (then, we will also stop DMA on finishing the app)
+ err = pcidev_dma_start(vctx, dma, PCILIB_DMA_FLAGS_DEFAULT);
+ if (err) return err;
+
+ pcidev_dma_desc_t *hw = (pcidev_dma_desc_t*)pcilib_kmem_get_ua(ctx->dmactx.pcilib, ctx->desc);
+
+
+ switch (sched_getscheduler(0)) {
+ case SCHED_FIFO:
+ case SCHED_RR:
+ if (ctx->dma_flags&PCIDEV_DMA_FLAG_NOSLEEP)
+ nodata_sleep = 0;
+ else
+ nodata_sleep = PCIDEV_DMA_NODATA_SLEEP;
+ break;
+ default:
+ pcilib_info_once("Streaming DMA data using non real-time thread (may cause extra CPU load)", errno);
+ nodata_sleep = 0;
+ }
+
+ do {
+ switch (ret&PCILIB_STREAMING_TIMEOUT_MASK) {
+ case PCILIB_STREAMING_CONTINUE:
+ wait = ctx->dma_timeout;
+ break;
+ case PCILIB_STREAMING_WAIT:
+ wait = (timeout > ctx->dma_timeout)?timeout:ctx->dma_timeout;
+ break;
+ }
+
+ pcilib_debug(DMA, "Waiting for data in %4u - last_read: %4u, last_read_addr: %4u (0x%08x), last_written: %4u (0x%08x)", ctx->last_read + 1, ctx->last_read,
+ pcidev_dma_find_buffer_by_bus_addr(ctx, hw->last_read_addr), hw->last_read_addr,
+ pcidev_dma_find_buffer_by_bus_addr(ctx, hw->last_write_addr), hw->last_write_addr
+ );
+
+ // Wait for data. In this example we always have data, but normally DMA engine will maintain last_write_addr...
+ gettimeofday(&start, NULL);
+ memcpy(&cur, &start, sizeof(struct timeval));
+ while (((hw->last_write_addr == 0)||(hw->last_write_addr == hw->last_read_addr))&&((wait == PCILIB_TIMEOUT_INFINITE)||(((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec)) < wait))) {
+ if (nodata_sleep) {
+ sleep_ts.tv_nsec = nodata_sleep;
+ nanosleep(&sleep_ts, NULL);
+ }
+
+ gettimeofday(&cur, NULL);
+ }
+
+ // Failing out if we exited on timeout
+ if ((hw->last_write_addr == 0)||(hw->last_write_addr == hw->last_read_addr)) {
+ return (ret&PCILIB_STREAMING_FAIL)?PCILIB_ERROR_TIMEOUT:0;
+ }
+
+ // Getting next page to read
+ cur_read = ctx->last_read + 1;
+ if (cur_read == ctx->ring_size) cur_read = 0;
+
+ pcilib_debug(DMA, "Got buffer %4u - last read: %4u, last_read_addr: %4u (0x%x), last_written: %4u (0x%x)", cur_read, ctx->last_read,
+ pcidev_dma_find_buffer_by_bus_addr(ctx, hw->last_read_addr), hw->last_read_addr,
+ pcidev_dma_find_buffer_by_bus_addr(ctx, hw->last_write_addr), hw->last_write_addr
+ );
+
+ // In real case, we would need to sync to ensure that DMA transfer is finished and cashes are coherent
+ //if ((ctx->dma_flags&PCIDEV_DMA_FLAG_NOSYNC) == 0)
+ // pcilib_kmem_sync_block(ctx->dmactx.pcilib, ctx->pages, PCILIB_KMEM_SYNC_FROMDEVICE, cur_read);
+
+ void *buf = (void*)pcilib_kmem_get_block_ua(ctx->dmactx.pcilib, ctx->pages, cur_read);
+ ret = cb(cbattr, packet_flags, ctx->page_size, buf);
+ if (ret < 0) return -ret;
+
+ // Implementing a real DMA engine, here we might need to put buffer back into the DMA queue
+ hw->last_read_addr = pcilib_kmem_get_block_addr(ctx->dmactx.pcilib, ctx->pages, cur_read);
+ hw->last_write_addr = pcilib_kmem_get_block_addr(ctx->dmactx.pcilib, ctx->pages, ctx->last_read);
+
+ ctx->last_read = cur_read;
+
+ pcilib_debug(DMA, "Buffer returned %4u - last read: %4u, last_read_addr: %4u (0x%x), last_written: %4u (0x%x)", cur_read, ctx->last_read,
+ pcidev_dma_find_buffer_by_bus_addr(ctx, hw->last_read_addr), hw->last_read_addr,
+ pcidev_dma_find_buffer_by_bus_addr(ctx, hw->last_write_addr), hw->last_write_addr
+ );
+
+
+ } while (ret);
+
+ return 0;
+}