summaryrefslogtreecommitdiffstats
path: root/dma
diff options
context:
space:
mode:
authorSuren A. Chilingaryan <csa@suren.me>2015-11-17 06:45:55 +0100
committerSuren A. Chilingaryan <csa@suren.me>2015-11-17 06:45:55 +0100
commit2455a677448e0e0c17d7193bf405b734b758811b (patch)
tree52ac392aec2e7869ef1e003dde4302407f9e0a66 /dma
parent9efee359b26d7a54464ff20f38f319216fb248af (diff)
downloadpcitool-2455a677448e0e0c17d7193bf405b734b758811b.tar.gz
pcitool-2455a677448e0e0c17d7193bf405b734b758811b.tar.bz2
pcitool-2455a677448e0e0c17d7193bf405b734b758811b.tar.xz
pcitool-2455a677448e0e0c17d7193bf405b734b758811b.zip
IPEDMA Update
Diffstat (limited to 'dma')
-rw-r--r--dma/ipe.c70
-rw-r--r--dma/ipe.h17
-rw-r--r--dma/ipe_benchmark.c36
-rw-r--r--dma/ipe_private.h20
4 files changed, 107 insertions, 36 deletions
diff --git a/dma/ipe.c b/dma/ipe.c
index 63f2fa3..ef3196d 100644
--- a/dma/ipe.c
+++ b/dma/ipe.c
@@ -1,12 +1,14 @@
#define _PCILIB_DMA_IPE_C
#define _BSD_SOURCE
#define _DEFAULT_SOURCE
+#define _POSIX_C_SOURCE 199309L
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sched.h>
+#include <time.h>
#include <sys/time.h>
#include <arpa/inet.h>
@@ -45,6 +47,10 @@ pcilib_dma_context_t *dma_ipe_init(pcilib_t *pcilib, const char *model, const vo
ctx->dma_bank = model_info->banks + dma_bank;
ctx->base_addr = pcilib_resolve_register_address(pcilib, ctx->dma_bank->bar, ctx->dma_bank->read_addr);
+
+ RD(IPEDMA_REG_VERSION, value);
+ ctx->version = value;
+
RD(IPEDMA_REG_PCIE_GEN, value);
#ifdef IPEDMA_ENFORCE_64BIT_MODE
@@ -101,7 +107,27 @@ int dma_ipe_start(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dm
if (flags&PCILIB_DMA_FLAG_PERSISTENT) ctx->preserve = 1;
if (ctx->pages) return 0;
-
+
+ if (!pcilib_read_register(ctx->dmactx.pcilib, "dmaconf", "dma_timeout", &value))
+ ctx->dma_timeout = value;
+ else
+ ctx->dma_timeout = IPEDMA_DMA_TIMEOUT;
+
+ if (!pcilib_read_register(ctx->dmactx.pcilib, "dmaconf", "dma_page_size", &value))
+ ctx->dma_page_size = value;
+ else
+ ctx->dma_page_size = IPEDMA_PAGE_SIZE;
+
+ if (!pcilib_read_register(ctx->dmactx.pcilib, "dmaconf", "dma_pages", &value))
+ ctx->dma_pages = value;
+ else
+ ctx->dma_pages = IPEDMA_DMA_PAGES;
+
+ if (!pcilib_read_register(ctx->dmactx.pcilib, "dmaconf", "ipedma_flags", &value))
+ ctx->dma_flags = value;
+ else
+ ctx->dma_flags = 0;
+
kflags = PCILIB_KMEM_FLAG_REUSE|PCILIB_KMEM_FLAG_EXCLUSIVE|PCILIB_KMEM_FLAG_HARDWARE|(ctx->preserve?PCILIB_KMEM_FLAG_PERSISTENT:0);
pcilib_kmem_handle_t *desc = pcilib_alloc_kernel_memory(ctx->dmactx.pcilib, PCILIB_KMEM_TYPE_CONSISTENT, 1, IPEDMA_DESCRIPTOR_SIZE, IPEDMA_DESCRIPTOR_ALIGNMENT, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_RING, 0x00), kflags);
pcilib_kmem_handle_t *pages = pcilib_alloc_kernel_memory(ctx->dmactx.pcilib, PCILIB_KMEM_TYPE_DMA_C2S_PAGE, IPEDMA_DMA_PAGES, 0, 0, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_PAGES, 0x00), kflags);
@@ -409,17 +435,9 @@ int dma_ipe_stream_read(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, uin
volatile uint32_t *empty_detected_ptr;
pcilib_dma_flags_t packet_flags = PCILIB_DMA_FLAG_EOP;
- size_t nodata_sleep;
- switch (sched_getscheduler(0)) {
- case SCHED_FIFO:
- case SCHED_RR:
- nodata_sleep = IPEDMA_NODATA_SLEEP;
- break;
- default:
- pcilib_info_once("Streaming DMA data using non real-time thread (may cause extra CPU load)", errno);
- nodata_sleep = 0;
- }
+ size_t nodata_sleep;
+ struct timespec sleep_ts = {0};
size_t cur_read;
@@ -435,6 +453,19 @@ int dma_ipe_stream_read(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, uin
empty_detected_ptr = last_written_addr_ptr - 2;
+ switch (sched_getscheduler(0)) {
+ case SCHED_FIFO:
+ case SCHED_RR:
+ if (ctx->dma_flags&IPEDMA_FLAG_NOSLEEP)
+ nodata_sleep = 0;
+ else
+ nodata_sleep = IPEDMA_NODATA_SLEEP;
+ break;
+ default:
+ pcilib_info_once("Streaming DMA data using non real-time thread (may cause extra CPU load)", errno);
+ nodata_sleep = 0;
+ }
+
do {
switch (ret&PCILIB_STREAMING_TIMEOUT_MASK) {
case PCILIB_STREAMING_CONTINUE:
@@ -444,10 +475,10 @@ int dma_ipe_stream_read(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, uin
wait = 0;
else
#endif /* IPEDMA_SUPPORT_EMPTY_DETECTED */
- wait = IPEDMA_DMA_TIMEOUT;
+ wait = ctx->dma_timeout;
break;
case PCILIB_STREAMING_WAIT:
- wait = (timeout > IPEDMA_DMA_TIMEOUT)?timeout:IPEDMA_DMA_TIMEOUT;
+ wait = (timeout > ctx->dma_timeout)?timeout:ctx->dma_timeout;
break;
// case PCILIB_STREAMING_CHECK: wait = 0; break;
}
@@ -460,8 +491,10 @@ int dma_ipe_stream_read(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, uin
gettimeofday(&start, NULL);
memcpy(&cur, &start, sizeof(struct timeval));
while (((*last_written_addr_ptr == 0)||(ctx->last_read_addr == (*last_written_addr_ptr)))&&((wait == PCILIB_TIMEOUT_INFINITE)||(((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec)) < wait))) {
- if (nodata_sleep)
- usleep(nodata_sleep);
+ if (nodata_sleep) {
+ sleep_ts.tv_nsec = nodata_sleep;
+ nanosleep(&sleep_ts, NULL);
+ }
#ifdef IPEDMA_SUPPORT_EMPTY_DETECTED
if ((ret != PCILIB_STREAMING_REQ_PACKET)&&(*empty_detected_ptr)) break;
@@ -494,13 +527,14 @@ int dma_ipe_stream_read(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, uin
else packet_flags = 0;
#endif /* IPEDMA_DETECT_PACKETS */
- pcilib_kmem_sync_block(ctx->dmactx.pcilib, ctx->pages, PCILIB_KMEM_SYNC_FROMDEVICE, cur_read);
+ if ((ctx->dma_flags&IPEDMA_FLAG_NOSYNC) == 0)
+ pcilib_kmem_sync_block(ctx->dmactx.pcilib, ctx->pages, PCILIB_KMEM_SYNC_FROMDEVICE, cur_read);
void *buf = pcilib_kmem_get_block_ua(ctx->dmactx.pcilib, ctx->pages, cur_read);
ret = cb(cbattr, packet_flags, ctx->page_size, buf);
if (ret < 0) return -ret;
// We don't need this because hardware does not intend to read anything from the memory
-// pcilib_kmem_sync_block(ctx->dmactx.pcilib, ctx->pages, PCILIB_KMEM_SYNC_TODEVICE, cur_read);
+ //pcilib_kmem_sync_block(ctx->dmactx.pcilib, ctx->pages, PCILIB_KMEM_SYNC_TODEVICE, cur_read);
// Return buffer into the DMA pool when processed
if (ctx->streaming) {
@@ -510,7 +544,7 @@ int dma_ipe_stream_read(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, uin
else last_free = IPEDMA_DMA_PAGES - 1;
uintptr_t buf_ba = pcilib_kmem_get_block_ba(ctx->dmactx.pcilib, ctx->pages, last_free);
- WR(IPEDMA_REG_PAGE_ADDR, buf_ba);
+ WR(IPEDMA_REG_PAGE_ADDR, buf_ba);
# ifdef IPEDMA_STREAMING_CHECKS
pcilib_register_value_t streaming_status;
RD(IPEDMA_REG_STREAMING_STATUS, streaming_status);
diff --git a/dma/ipe.h b/dma/ipe.h
index ce3ea4f..174e36e 100644
--- a/dma/ipe.h
+++ b/dma/ipe.h
@@ -5,6 +5,11 @@
#include "pcilib.h"
#include "version.h"
+#define IPEDMA_PAGE_SIZE 4096 /**< page size */
+#define IPEDMA_DMA_PAGES 1024 /**< number of DMA pages in the ring buffer to allocate */
+
+#define IPEDMA_DMA_TIMEOUT 100000 /**< us, overrides PCILIB_DMA_TIMEOUT (actual hardware timeout is 50ms according to Lorenzo) */
+
pcilib_dma_context_t *dma_ipe_init(pcilib_t *ctx, const char *model, const void *arg);
void dma_ipe_free(pcilib_dma_context_t *vctx);
@@ -68,6 +73,7 @@ static const pcilib_register_description_t ipe_dma_registers[] = {
{0x0018, 0, 32, 0, 0x00000000, PCILIB_REGISTER_R , PCILIB_REGISTER_STANDARD, PCILIB_REGISTER_BANK_DMA, "dma_mode_flags", "DMA operation mode"},
{0x0018, 0, 4, 0, 0x00000000, PCILIB_REGISTER_R , PCILIB_REGISTER_BITS, PCILIB_REGISTER_BANK_DMA, "pcie_gen", "PCIe version 2/3 depending on the used XILINX core"},
{0x0018, 4, 1, 0, 0x00000000, PCILIB_REGISTER_R , PCILIB_REGISTER_BITS, PCILIB_REGISTER_BANK_DMA, "streaming_dma", "Streaming mode (enabled/disabled)"},
+ {0x0020, 0, 32, 0, 0x00000000, PCILIB_REGISTER_R , PCILIB_REGISTER_STANDARD, PCILIB_REGISTER_BANK_DMA, "dma_firmware", "Version of DMA firmware"},
{0x0028, 0, 32, 0, 0x00000000, PCILIB_REGISTER_R , PCILIB_REGISTER_STANDARD, PCILIB_REGISTER_BANK_DMA, "mwr_perf", "MWR Performance"},
{0x003C, 0, 32, 0, 0x00000000, PCILIB_REGISTER_R , PCILIB_REGISTER_STANDARD, PCILIB_REGISTER_BANK_DMA, "cfg_lnk_width", "Negotiated and max width of PCIe Link"},
{0x003C, 0, 6, 0, 0x00000000, PCILIB_REGISTER_R , PCILIB_REGISTER_BITS, PCILIB_REGISTER_BANK_DMA, "cfg_cap_max_lnk_width", "Max link width"},
@@ -81,9 +87,18 @@ static const pcilib_register_description_t ipe_dma_registers[] = {
{0x0058, 0, 32, 0, 0x00000000, PCILIB_REGISTER_RW , PCILIB_REGISTER_STANDARD, PCILIB_REGISTER_BANK_DMA, "last_descriptor_read", "Last descriptor read by the host"},
{0x005C, 0, 32, 0, 0x00000000, PCILIB_REGISTER_RW , PCILIB_REGISTER_STANDARD, PCILIB_REGISTER_BANK_DMA, "desc_mem_addr", "Number of descriptors configured"},
{0x0060, 0, 32, 0, 0x00000000, PCILIB_REGISTER_RW , PCILIB_REGISTER_STANDARD, PCILIB_REGISTER_BANK_DMA, "update_thresh", "Update threshold of progress register"},
- {0x0000, 0, 32, PCILIB_VERSION, 0x00000000, PCILIB_REGISTER_R , PCILIB_REGISTER_STANDARD, PCILIB_REGISTER_BANK_DMACONF, "dma_version", "Version of DMA engine"},
+ {0x0000, 0, 32, PCILIB_VERSION, 0x00000000, PCILIB_REGISTER_R , PCILIB_REGISTER_STANDARD, PCILIB_REGISTER_BANK_DMACONF, "dma_version", "Version of DMA engine"},
+ {0x0004, 0, 32, IPEDMA_DMA_TIMEOUT, 0x00000000, PCILIB_REGISTER_RW , PCILIB_REGISTER_STANDARD, PCILIB_REGISTER_BANK_DMACONF, "dma_timeout", "Default DMA timeout"},
+ {0x0008, 0, 32, IPEDMA_DMA_PAGES, 0x00000000, PCILIB_REGISTER_RW , PCILIB_REGISTER_STANDARD, PCILIB_REGISTER_BANK_DMACONF, "dma_pages", "Number of buffers in DMA page ring"},
+ {0x000C, 0, 32, IPEDMA_PAGE_SIZE, 0x00000000, PCILIB_REGISTER_RW , PCILIB_REGISTER_STANDARD, PCILIB_REGISTER_BANK_DMACONF, "dma_page_size", "Size of a page in DMA page ring (multiple of 4K)"},
+ {0x0010, 0, 32, 0, 0xFFFFFFFF, PCILIB_REGISTER_RW , PCILIB_REGISTER_STANDARD, PCILIB_REGISTER_BANK_DMACONF, "ipedma_flags", "DMA Control Register"},
+ {0x0010, 0, 1, 0, 0xFFFFFFFF, PCILIB_REGISTER_RW , PCILIB_REGISTER_BITS, PCILIB_REGISTER_BANK_DMACONF, "ipedma_nosync", "Do not synchronize DMA pages"},
+ {0x0010, 1, 1, 0, 0xFFFFFFFF, PCILIB_REGISTER_RW , PCILIB_REGISTER_BITS, PCILIB_REGISTER_BANK_DMACONF, "ipedma_nosleep", "Do not sleep while there is no data"},
{0, 0, 0, 0, 0x00000000, 0, 0, 0, NULL, NULL}
};
+
+
+
#endif /* _PCILIB_EXPORT_C */
diff --git a/dma/ipe_benchmark.c b/dma/ipe_benchmark.c
index fa89f4b..57e5646 100644
--- a/dma/ipe_benchmark.c
+++ b/dma/ipe_benchmark.c
@@ -1,6 +1,7 @@
#define _PCILIB_DMA_IPE_C
#define _BSD_SOURCE
#define _DEFAULT_SOURCE
+#define _POSIX_C_SOURCE 200112L
#include <stdio.h>
#include <stdlib.h>
@@ -84,6 +85,7 @@ double dma_ipe_benchmark(pcilib_dma_context_t *vctx, pcilib_dma_engine_addr_t dm
read_dma = pcilib_read_dma_custom;
// There is no significant difference and we can remove this when testing phase is over.
+ // DS: With large number of buffers this is quite slow due to skimming of initially written buffers
if (getenv("PCILIB_BENCHMARK_STREAMING")) {
size_t dma_buffer_space;
pcilib_dma_engine_status_t dma_status;
@@ -97,7 +99,7 @@ double dma_ipe_benchmark(pcilib_dma_context_t *vctx, pcilib_dma_engine_addr_t dm
WR(IPEDMA_REG_CONTROL, 0x1);
gettimeofday(&start, NULL);
- pcilib_calc_deadline(&start, IPEDMA_DMA_TIMEOUT * IPEDMA_DMA_PAGES);
+ pcilib_calc_deadline(&start, ctx->dma_timeout * IPEDMA_DMA_PAGES);
#ifdef IPEDMA_BUG_LAST_READ
dma_buffer_space = (IPEDMA_DMA_PAGES - 2) * IPEDMA_PAGE_SIZE;
@@ -106,8 +108,8 @@ double dma_ipe_benchmark(pcilib_dma_context_t *vctx, pcilib_dma_engine_addr_t dm
#endif /* IPEDMA_BUG_LAST_READ */
// Allocate memory and prepare data
- buf = malloc(size + dma_buffer_space);
- if (!buf) return -1;
+ err = posix_memalign(&buf, 4096, size + dma_buffer_space);
+ if ((err)||(!buf)) return -1;
// Wait all DMA buffers are filled
memset(&dma_status, 0, sizeof(dma_status));
@@ -127,7 +129,7 @@ double dma_ipe_benchmark(pcilib_dma_context_t *vctx, pcilib_dma_engine_addr_t dm
gettimeofday(&start, NULL);
for (iter = 0; iter < iterations; iter++) {
for (bytes = 0; bytes < (size + dma_buffer_space); bytes += rbytes) {
- err = read_dma(ctx->dmactx.pcilib, 0, addr, size + dma_buffer_space - bytes, PCILIB_DMA_FLAG_MULTIPACKET, PCILIB_DMA_TIMEOUT, buf + bytes, &rbytes);
+ err = read_dma(ctx->dmactx.pcilib, 0, addr, size + dma_buffer_space - bytes, PCILIB_DMA_FLAG_MULTIPACKET, ctx->dma_timeout, buf + bytes, &rbytes);
if (err) {
pcilib_error("Can't read data from DMA, error %i", err);
return -1;
@@ -141,12 +143,15 @@ double dma_ipe_benchmark(pcilib_dma_context_t *vctx, pcilib_dma_engine_addr_t dm
// Stopping DMA
WR(IPEDMA_REG_CONTROL, 0x0);
+ usleep(IPEDMA_RESET_DELAY);
+
pcilib_skip_dma(ctx->dmactx.pcilib, 0);
} else {
if (read_dma == dma_ipe_skim_dma_custom)
pcilib_info_once("Benchmarking the DMA hardware (without memcpy)");
WR(IPEDMA_REG_CONTROL, 0x0);
+ usleep(IPEDMA_RESET_DELAY);
err = pcilib_skip_dma(ctx->dmactx.pcilib, 0);
if (err) {
@@ -155,35 +160,44 @@ double dma_ipe_benchmark(pcilib_dma_context_t *vctx, pcilib_dma_engine_addr_t dm
}
// Allocate memory and prepare data
- buf = malloc(size);
- if (!buf) return -1;
+ err = posix_memalign(&buf, 4096, size);
+ if ((err)||(!buf)) return -1;
- for (iter = 0; iter < iterations; iter++) {
+ for (iter = 0; iter <= iterations; iter++) {
gettimeofday(&start, NULL);
// Starting DMA
WR(IPEDMA_REG_CONTROL, 0x1);
for (bytes = 0; bytes < size; bytes += rbytes) {
- err = read_dma(ctx->dmactx.pcilib, 0, addr, size - bytes, PCILIB_DMA_FLAG_MULTIPACKET, PCILIB_DMA_TIMEOUT, buf + bytes, &rbytes);
+ err = read_dma(ctx->dmactx.pcilib, 0, addr, size - bytes, PCILIB_DMA_FLAG_MULTIPACKET, ctx->dma_timeout, buf + bytes, &rbytes);
if (err) {
pcilib_error("Can't read data from DMA (iteration: %zu, offset: %zu), error %i", iter, bytes, err);
return -1;
}
}
+ gettimeofday(&cur, NULL);
+
// Stopping DMA
WR(IPEDMA_REG_CONTROL, 0x0);
+ usleep(IPEDMA_RESET_DELAY);
if (err) break;
- gettimeofday(&cur, NULL);
- us += ((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec));
-
+ // Heating up during the first iteration
+ if (iter)
+ us += ((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec));
+
+ pcilib_info("Iteration %-4i latency: %lu", iter, ((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec)));
+
+
err = pcilib_skip_dma(ctx->dmactx.pcilib, 0);
if (err) {
pcilib_error("Can't start iteration, devices continuously writes unexpected data using DMA engine");
break;
}
+
+ usleep(ctx->dma_timeout);
}
}
diff --git a/dma/ipe_private.h b/dma/ipe_private.h
index 5054a58..fdc2d83 100644
--- a/dma/ipe_private.h
+++ b/dma/ipe_private.h
@@ -10,8 +10,6 @@
#define IPEDMA_STREAMING_MODE /**< Enables streaming DMA operation mode instead of ring-buffer, the page is written once and forgotten and need to be pushed in queue again */
//#define IPEDMA_STREAMING_CHECKS /**< Enables status checks in streaming mode (it will cause _significant_ performance penalty, max ~ 2 GB/s) */
-#define IPEDMA_PAGE_SIZE 4096
-#define IPEDMA_DMA_PAGES 1024 /**< number of DMA pages in the ring buffer to allocate */
#define IPEDMA_DMA_PROGRESS_THRESHOLD 1 /**< how many pages the DMA engine should fill before reporting progress */
#define IPEDMA_DESCRIPTOR_SIZE 128
#define IPEDMA_DESCRIPTOR_ALIGNMENT 64
@@ -21,16 +19,13 @@
//#define IPEDMA_DETECT_PACKETS /**< Using empty_deceted flag */
#define IPEDMA_SUPPORT_EMPTY_DETECTED /**< Avoid waiting for data when empty_detected flag is set in hardware */
-#define IPEDMA_DMA_TIMEOUT 100000 /**< us, overrides PCILIB_DMA_TIMEOUT (actual hardware timeout is 50ms according to Lorenzo) */
-#define IPEDMA_RESET_DELAY 100000 /**< Sleep between accessing DMA control and reset registers */
-#define IPEDMA_ADD_PAGE_DELAY 1000 /**< Delay between submitting successive DMA pages into IPEDMA_REG_PAGE_ADDR register */
-#define IPEDMA_NODATA_SLEEP 10 /**< To keep CPU free */
#define IPEDMA_REG_RESET 0x00
#define IPEDMA_REG_CONTROL 0x04
#define IPEDMA_REG_TLP_SIZE 0x0C
#define IPEDMA_REG_TLP_COUNT 0x10
#define IPEDMA_REG_PCIE_GEN 0x18
+#define IPEDMA_REG_VERSION 0x20
#define IPEDMA_REG_PAGE_ADDR 0x50
#define IPEDMA_REG_UPDATE_ADDR 0x54
#define IPEDMA_REG_LAST_READ 0x58 /**< In streaming mode, we can use it freely to track current status */
@@ -38,9 +33,16 @@
#define IPEDMA_REG_UPDATE_THRESHOLD 0x60
#define IPEDMA_REG_STREAMING_STATUS 0x68
+#define IPEDMA_FLAG_NOSYNC 0x01 /**< Do not call kernel space for page synchronization */
+#define IPEDMA_FLAG_NOSLEEP 0x02 /**< Do not sleep in the loop while waiting for the data */
+
#define IPEDMA_MASK_PCIE_GEN 0xF
#define IPEDMA_MASK_STREAMING_MODE 0x10
+#define IPEDMA_RESET_DELAY 10000 /**< Sleep between accessing DMA control and reset registers */
+#define IPEDMA_ADD_PAGE_DELAY 1000 /**< Delay between submitting successive DMA pages into IPEDMA_REG_PAGE_ADDR register */
+#define IPEDMA_NODATA_SLEEP 100 /**< To keep CPU free, in nanoseconds */
+
#define WR(addr, value) { *(uint32_t*)(ctx->base_addr + addr) = value; }
#define RD(addr, value) { value = *(uint32_t*)(ctx->base_addr + addr); }
@@ -59,6 +61,7 @@ struct ipe_dma_s {
pcilib_irq_type_t irq_preserve; /**< indicates that IRQs should not be disabled during clean-up */
int irq_started; /**< indicates that IRQ subsystem is initialized (detecting which types should be preserverd) */
+ uint32_t version; /**< hardware version */
int started; /**< indicates that DMA buffers are initialized and reading is allowed */
int writting; /**< indicates that we are in middle of writting packet */
int reused; /**< indicates that DMA was found intialized, buffers were reused, and no additional initialization is needed */
@@ -66,6 +69,11 @@ struct ipe_dma_s {
int mode64; /**< indicates 64-bit operation mode */
int streaming; /**< indicates if DMA is operating in streaming or ring-buffer mode */
+ uint32_t dma_flags; /**< Various operation flags, see IPEDMA_FLAG_* */
+ size_t dma_timeout; /**< DMA timeout,IPEDMA_DMA_TIMEOUT is used by default */
+ size_t dma_pages; /**< Number of DMA pages in ring buffer to allocate */
+ size_t dma_page_size; /**< Size of a single DMA page */
+
pcilib_kmem_handle_t *desc; /**< in-memory status descriptor written by DMA engine upon operation progess */
pcilib_kmem_handle_t *pages; /**< collection of memory-locked pages for DMA operation */