11
#include <arpa/inet.h>
20
int pcilib_clean_kernel_memory(pcilib_t *ctx, pcilib_kmem_use_t use, pcilib_kmem_flags_t flags) {
21
kmem_handle_t kh = {0};
23
kh.flags = flags|KMEM_FLAG_MASS;
25
return ioctl(ctx->handle, PCIDRIVER_IOC_KMEM_FREE, &kh);
29
static int pcilib_free_kernel_buffer(pcilib_t *ctx, pcilib_kmem_list_t *kbuf, size_t i, pcilib_kmem_flags_t flags) {
30
kmem_handle_t kh = {0};
32
if (kbuf->buf.blocks[i].ua) munmap(kbuf->buf.blocks[i].ua, kbuf->buf.blocks[i].size + kbuf->buf.blocks[i].alignment_offset);
33
kh.handle_id = kbuf->buf.blocks[i].handle_id;
34
kh.pa = kbuf->buf.blocks[i].pa;
37
return ioctl(ctx->handle, PCIDRIVER_IOC_KMEM_FREE, &kh);
40
static void pcilib_cancel_kernel_memory(pcilib_t *ctx, pcilib_kmem_list_t *kbuf, pcilib_kmem_flags_t flags, int last_flags) {
43
if (!kbuf->buf.n_blocks) return;
45
// consistency error during processing of last block, special treatment could be needed
47
pcilib_kmem_flags_t failed_flags = flags;
49
if (last_flags&KMEM_FLAG_REUSED_PERSISTENT) flags&=~PCILIB_KMEM_FLAG_PERSISTENT;
50
if (last_flags&KMEM_FLAG_REUSED_HW) flags&=~PCILIB_KMEM_FLAG_HARDWARE;
52
if (failed_flags != flags) {
53
ret = pcilib_free_kernel_buffer(ctx, kbuf, --kbuf->buf.n_blocks, failed_flags);
54
if (ret) pcilib_error("PCIDRIVER_IOC_KMEM_FREE ioctl have failed");
58
pcilib_free_kernel_memory(ctx, kbuf, flags);
61
pcilib_kmem_handle_t *pcilib_alloc_kernel_memory(pcilib_t *ctx, pcilib_kmem_type_t type, size_t nmemb, size_t size, size_t alignment, pcilib_kmem_use_t use, pcilib_kmem_flags_t flags) {
63
const char *error = NULL;
69
pcilib_tristate_t reused = PCILIB_TRISTATE_NO;
73
kmem_handle_t kh = {0};
75
pcilib_kmem_list_t *kbuf = (pcilib_kmem_list_t*)malloc(sizeof(pcilib_kmem_list_t) + nmemb * sizeof(pcilib_kmem_addr_t));
77
pcilib_error("Memory allocation has failed");
81
memset(kbuf, 0, sizeof(pcilib_kmem_list_t) + nmemb * sizeof(pcilib_kmem_addr_t));
83
ret = ioctl( ctx->handle, PCIDRIVER_IOC_MMAP_MODE, PCIDRIVER_MMAP_KMEM );
85
pcilib_error("PCIDRIVER_IOC_MMAP_MODE ioctl have failed");
94
if ((type&PCILIB_KMEM_TYPE_MASK) == PCILIB_KMEM_TYPE_REGION) {
96
} else if ((type&PCILIB_KMEM_TYPE_MASK) != PCILIB_KMEM_TYPE_PAGE) {
100
for ( i = 0; i < nmemb; i++) {
104
if ((type&PCILIB_KMEM_TYPE_MASK) == PCILIB_KMEM_TYPE_REGION) {
105
kh.pa = alignment + i * size;
108
ret = ioctl(ctx->handle, PCIDRIVER_IOC_KMEM_ALLOC, &kh);
110
kbuf->buf.n_blocks = i;
111
error = "PCIDRIVER_IOC_KMEM_ALLOC ioctl have failed";
115
kbuf->buf.blocks[i].handle_id = kh.handle_id;
116
kbuf->buf.blocks[i].pa = kh.pa;
117
kbuf->buf.blocks[i].size = kh.size;
119
if (!i) reused = (kh.flags&KMEM_FLAG_REUSED)?PCILIB_TRISTATE_YES:PCILIB_TRISTATE_NO;
121
if (kh.flags&KMEM_FLAG_REUSED) {
122
if (!i) reused = PCILIB_TRISTATE_YES;
123
else if (!reused) reused = PCILIB_TRISTATE_PARTIAL;
126
if (persistent < 0) {
127
/*if (((flags&PCILIB_KMEM_FLAG_PERSISTENT) == 0)&&(kh.flags&KMEM_FLAG_REUSED_PERSISTENT)) err = PCILIB_ERROR_INVALID_STATE;
128
else*/ persistent = (kh.flags&KMEM_FLAG_REUSED_PERSISTENT)?1:0;
129
} else if ((kh.flags&KMEM_FLAG_REUSED_PERSISTENT) == 0) err = PCILIB_ERROR_INVALID_STATE;
130
} else if (kh.flags&KMEM_FLAG_REUSED_PERSISTENT) err = PCILIB_ERROR_INVALID_STATE;
134
/*if (((flags&PCILIB_KMEM_FLAG_HARDWARE) == 0)&&(kh.flags&KMEM_FLAG_REUSED_HW)) err = PCILIB_ERROR_INVALID_STATE;
135
else*/ hardware = (kh.flags&KMEM_FLAG_REUSED_HW)?1:0;
136
} else if ((kh.flags&KMEM_FLAG_REUSED_HW) == 0) err = PCILIB_ERROR_INVALID_STATE;
137
} else if (kh.flags&KMEM_FLAG_REUSED_HW) err = PCILIB_ERROR_INVALID_STATE;
140
if (!i) reused = PCILIB_TRISTATE_NO;
141
else if (reused) reused = PCILIB_TRISTATE_PARTIAL;
143
if ((persistent > 0)&&((flags&PCILIB_KMEM_FLAG_PERSISTENT) == 0)) err = PCILIB_ERROR_INVALID_STATE;
144
if ((hardware > 0)&&((flags&PCILIB_KMEM_FLAG_HARDWARE) == 0)) err = PCILIB_ERROR_INVALID_STATE;
148
kbuf->buf.n_blocks = i + 1;
152
if ((kh.align)&&((kh.type&PCILIB_KMEM_TYPE_MASK) != PCILIB_KMEM_TYPE_PAGE)) {
153
if (kh.pa % kh.align) kbuf->buf.blocks[i].alignment_offset = kh.align - kh.pa % kh.align;
154
kbuf->buf.blocks[i].size -= kh.align;
157
addr = mmap( 0, kbuf->buf.blocks[i].size + kbuf->buf.blocks[i].alignment_offset, PROT_WRITE | PROT_READ, MAP_SHARED, ctx->handle, 0 );
158
if ((!addr)||(addr == MAP_FAILED)) {
159
kbuf->buf.n_blocks = i + 1;
160
error = "Failed to mmap allocated kernel memory";
164
kbuf->buf.blocks[i].ua = addr;
165
// if (use == PCILIB_KMEM_USE_DMA_PAGES) {
166
// memset(addr, 10, kbuf->buf.blocks[i].size + kbuf->buf.blocks[i].alignment_offset);
169
kbuf->buf.blocks[i].mmap_offset = kh.pa & ctx->page_mask;
172
//This is possible in the case of error (nothing is allocated yet) or if buffers are not reused
173
if (persistent < 0) persistent = 0;
174
if (hardware < 0) hardware = 0;
177
pcilib_kmem_flags_t free_flags = 0;
179
// for the sake of simplicity always clean partialy reused buffers
180
if ((persistent == PCILIB_TRISTATE_PARTIAL)||((persistent <= 0)&&(flags&PCILIB_KMEM_FLAG_PERSISTENT))) {
181
free_flags |= PCILIB_KMEM_FLAG_PERSISTENT;
184
if ((hardware <= 0)&&(flags&PCILIB_KMEM_FLAG_HARDWARE)) {
185
free_flags |= PCILIB_KMEM_FLAG_HARDWARE;
188
// do not clean if we have reused peresistent buffers
189
// we don't care about -1, because it will be the value only if no buffers actually allocated
190
if ((!persistent)||(reused != PCILIB_TRISTATE_YES)) {
191
pcilib_cancel_kernel_memory(ctx, kbuf, free_flags, err?kh.flags:0);
194
if (!error) error = "Reused buffers are inconsistent";
201
memcpy(&kbuf->buf.addr, &kbuf->buf.blocks[0], sizeof(pcilib_kmem_addr_t));
204
kbuf->buf.reused = reused|(persistent?PCILIB_KMEM_REUSE_PERSISTENT:0)|(hardware?PCILIB_KMEM_REUSE_HARDWARE:0);
205
kbuf->buf.n_blocks = nmemb;
208
kbuf->next = ctx->kmem_list;
209
if (ctx->kmem_list) ctx->kmem_list->prev = kbuf;
210
ctx->kmem_list = kbuf;
212
return (pcilib_kmem_handle_t*)kbuf;
215
void pcilib_free_kernel_memory(pcilib_t *ctx, pcilib_kmem_handle_t *k, pcilib_kmem_flags_t flags) {
218
pcilib_kmem_list_t *kbuf = (pcilib_kmem_list_t*)k;
220
// if linked in to the list
221
if (kbuf->next) kbuf->next->prev = kbuf->prev;
222
if (kbuf->prev) kbuf->prev->next = kbuf->next;
223
else if (ctx->kmem_list == kbuf) ctx->kmem_list = kbuf->next;
225
for (i = 0; i < kbuf->buf.n_blocks; i++) {
226
ret = pcilib_free_kernel_buffer(ctx, kbuf, i, flags);
227
if ((ret)&&(!err)) err = ret;
233
pcilib_error("PCIDRIVER_IOC_KMEM_FREE ioctl have failed");
238
int pcilib_kmem_sync(pcilib_t *ctx, pcilib_kmem_handle_t *k, pcilib_kmem_sync_direction_t dir) {
241
pcilib_kmem_list_t *kbuf = (pcilib_kmem_list_t*)k;
243
for (i = 0; i < kbuf->buf.n_blocks; i++) {
244
ret = pcilib_kmem_sync_block(ctx, k, dir, i);
246
pcilib_error("PCIDRIVER_IOC_KMEM_SYNC ioctl have failed");
247
return PCILIB_ERROR_FAILED;
255
int pcilib_kmem_sync_block(pcilib_t *ctx, pcilib_kmem_handle_t *k, pcilib_kmem_sync_direction_t dir, size_t block) {
258
pcilib_kmem_list_t *kbuf = (pcilib_kmem_list_t*)k;
261
ks.handle.handle_id = kbuf->buf.blocks[block].handle_id;
262
ks.handle.pa = kbuf->buf.blocks[block].pa;
263
ret = ioctl(ctx->handle, PCIDRIVER_IOC_KMEM_SYNC, &ks);
265
pcilib_error("PCIDRIVER_IOC_KMEM_SYNC ioctl have failed");
266
return PCILIB_ERROR_FAILED;
272
void *pcilib_kmem_get_ua(pcilib_t *ctx, pcilib_kmem_handle_t *k) {
273
pcilib_kmem_list_t *kbuf = (pcilib_kmem_list_t*)k;
274
return kbuf->buf.addr.ua + kbuf->buf.addr.alignment_offset + kbuf->buf.addr.mmap_offset;
277
uintptr_t pcilib_kmem_get_pa(pcilib_t *ctx, pcilib_kmem_handle_t *k) {
278
pcilib_kmem_list_t *kbuf = (pcilib_kmem_list_t*)k;
279
return kbuf->buf.addr.pa + kbuf->buf.addr.alignment_offset;
282
uintptr_t pcilib_kmem_get_ba(pcilib_t *ctx, pcilib_kmem_handle_t *k) {
283
pcilib_kmem_list_t *kbuf = (pcilib_kmem_list_t*)k;
284
return kbuf->buf.addr.pa + kbuf->buf.addr.alignment_offset;
287
void *pcilib_kmem_get_block_ua(pcilib_t *ctx, pcilib_kmem_handle_t *k, size_t block) {
288
pcilib_kmem_list_t *kbuf = (pcilib_kmem_list_t*)k;
289
return kbuf->buf.blocks[block].ua + kbuf->buf.blocks[block].alignment_offset + kbuf->buf.blocks[block].mmap_offset;
292
uintptr_t pcilib_kmem_get_block_pa(pcilib_t *ctx, pcilib_kmem_handle_t *k, size_t block) {
293
pcilib_kmem_list_t *kbuf = (pcilib_kmem_list_t*)k;
294
return kbuf->buf.blocks[block].pa + kbuf->buf.blocks[block].alignment_offset;
297
uintptr_t pcilib_kmem_get_block_ba(pcilib_t *ctx, pcilib_kmem_handle_t *k, size_t block) {
298
pcilib_kmem_list_t *kbuf = (pcilib_kmem_list_t*)k;
299
return kbuf->buf.blocks[block].pa + kbuf->buf.blocks[block].alignment_offset;
302
size_t pcilib_kmem_get_block_size(pcilib_t *ctx, pcilib_kmem_handle_t *k, size_t block) {
303
pcilib_kmem_list_t *kbuf = (pcilib_kmem_list_t*)k;
304
return kbuf->buf.blocks[block].size;
307
pcilib_kmem_reuse_state_t pcilib_kmem_is_reused(pcilib_t *ctx, pcilib_kmem_handle_t *k) {
308
pcilib_kmem_list_t *kbuf = (pcilib_kmem_list_t*)k;
309
return kbuf->buf.reused;