/tomo/pyhst

To get this branch, use:
bzr branch http://darksoft.org/webbzr/tomo/pyhst
78 by Suren A. Chilingaryan
Add COPYING and fix license statements
1
/*
2
 * The PyHST program is Copyright (C) 2002-2011 of the
3
 * European Synchrotron Radiation Facility (ESRF) and
4
 * Karlsruhe Institute of Technology (KIT).
5
 *
6
 * PyHST is free software: you can redistribute it and/or modify it
7
 * under the terms of the GNU General Public License as published by the
8
 * Free Software Foundation, either version 3 of the License, or
9
 * (at your option) any later version.
10
 * 
11
 * hst is distributed in the hope that it will be useful, but
12
 * WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
14
 * See the GNU General Public License for more details.
15
 * 
16
 * You should have received a copy of the GNU General Public License along
17
 * with this program.  If not, see <http://www.gnu.org/licenses/>.
18
 */
19
30 by csa
Multi-GPU, Multi-CPU, and Hybrid modes support
20
#ifndef _HW_SCHED_H
21
#define _HW_SCHED_H
22
32 by csa
Fix crash in FFTW3 initialization and cleanup in multi-threaded case
23
#include <glib.h>
24
30 by csa
Multi-GPU, Multi-CPU, and Hybrid modes support
25
typedef struct HWSchedT *HWSched;
26
#ifdef HW_USE_THREADS
27
typedef GMutex *HWMutex;
28
#else /* HW_USE_THREADS */
29
typedef void *HWMutex;
30
#endif /* HW_USE_THREADS */
31
32
33
#include "hw_thread.h"
34
35
enum HWSchedModeT {
36
    HW_SCHED_MODE_PREALLOCATED = 0,
37
    HW_SCHED_MODE_SEQUENTIAL
38
};
39
typedef enum HWSchedModeT HWSchedMode;
40
151 by Suren A. Chilingaryan
Multislice mode: preload into the GPU memory complete slices
41
enum HWSchedChunkT {
42
    HW_SCHED_CHUNK_INVALID = -1,
43
    HW_SCHED_CHUNK_INIT = -2,
44
    HW_SCHED_CHUNK_FREE = -3,
45
    HW_SCHED_CHUNK_TERMINATOR = -4
46
};
47
typedef enum HWSchedChunkT HWSchedChunk;
48
49
enum HWSchedFlagsT {
50
    HW_SCHED_FLAG_INIT_CALL = 1,        //! Executed in each thread before real chunks
51
    HW_SCHED_FLAG_FREE_CALL = 2,        //! Executed in each thread after real chunks
52
    HW_SCHED_FLAG_TERMINATOR_CALL = 4   //! Executed in one of the threads after all threads are done
53
};
54
typedef enum HWSchedFlagsT HWSchedFlags;
55
30 by csa
Multi-GPU, Multi-CPU, and Hybrid modes support
56
57
#define HW_SINGLE_MODE
58
//#define HW_DETECT_CPU_CORES
59
#define HW_MAX_THREADS 128
60
61
#ifdef HW_SINGLE_MODE
62
    typedef HWRunFunction HWEntry;
63
# define hw_run_entry(runs, entry) entry
64
#else /* HW_SINGLE_MODE */
65
    typedef int HWEntry;
66
# define hw_run_entry(runs, entry) runs[entry]
67
#endif /* HW_SINGLE_MODE */
68
69
#ifndef HW_HIDE_DETAILS
70
struct HWSchedT {
71
    int status;
72
    int started;
73
    
74
    int n_threads;
75
    HWThread thread[HW_MAX_THREADS];
76
    
77
#ifdef HW_USE_THREADS
78
    GCond *job_cond, *compl_cond;
79
    GMutex *job_cond_mutex, *compl_cond_mutex, *data_mutex;
32 by csa
Fix crash in FFTW3 initialization and cleanup in multi-threaded case
80
    GMutex *sync_mutex;
30 by csa
Multi-GPU, Multi-CPU, and Hybrid modes support
81
#endif /* HW_USE_THREADS */
82
    
83
    HWSchedMode mode;
84
    HWSchedMode saved_mode;
151 by Suren A. Chilingaryan
Multislice mode: preload into the GPU memory complete slices
85
    HWSchedFlags flags;
30 by csa
Multi-GPU, Multi-CPU, and Hybrid modes support
86
    int *n_blocks;
87
    int *cur_block;
88
89
    HWEntry entry;
90
    void *ctx;
91
};
92
typedef struct HWSchedT HWSchedS;
93
#endif /* HW_HIDE_DETAILS */
94
95
# ifdef __cplusplus
96
extern "C" {
97
# endif
98
99
HWSched hw_sched_create(int ppu_count);
49 by root
Merge /home/matthias/dev/pyHST
100
int hw_sched_init(void);
30 by csa
Multi-GPU, Multi-CPU, and Hybrid modes support
101
void hw_sched_destroy(HWSched ctx);
49 by root
Merge /home/matthias/dev/pyHST
102
int hw_sched_get_cpu_count(void);
103
151 by Suren A. Chilingaryan
Multislice mode: preload into the GPU memory complete slices
104
int hw_sched_set_sequential_mode(HWSched ctx, int *n_blocks, int *cur_block, HWSchedFlags flags);
30 by csa
Multi-GPU, Multi-CPU, and Hybrid modes support
105
int hw_sched_get_chunk(HWSched ctx, int thread_id);
106
int hw_sched_schedule_task(HWSched ctx, void *appctx, HWEntry entry);
107
int hw_sched_wait_task(HWSched ctx);
108
int hw_sched_execute_task(HWSched ctx, void *appctx, HWEntry entry);
109
110
int hw_sched_schedule_thread_task(HWSched ctx, void *appctx, HWEntry entry);
111
int hw_sched_wait_thread_task(HWSched ctx);
112
int hw_sched_execute_thread_task(HWSched ctx, void *appctx, HWEntry entry);
113
49 by root
Merge /home/matthias/dev/pyHST
114
HWMutex hw_sched_create_mutex(void);
30 by csa
Multi-GPU, Multi-CPU, and Hybrid modes support
115
void hw_sched_destroy_mutex(HWMutex ctx);
116
117
#ifdef HW_USE_THREADS
118
# define hw_sched_lock(ctx, type) g_mutex_lock(ctx->type##_mutex)
119
# define hw_sched_unlock(ctx, type) g_mutex_unlock(ctx->type##_mutex)
120
# define hw_sched_broadcast(ctx, type) g_cond_broadcast(ctx->type##_cond)
121
# define hw_sched_signal(ctx, type) g_cond_signal(ctx->type##_cond)
122
# define hw_sched_wait(ctx, type) g_cond_wait(ctx->type##_cond, ctx->type##_cond_mutex)
123
49 by root
Merge /home/matthias/dev/pyHST
124
#define hw_sched_create_mutex(void) g_mutex_new()
30 by csa
Multi-GPU, Multi-CPU, and Hybrid modes support
125
#define hw_sched_destroy_mutex(ctx) g_mutex_free(ctx)
126
#define hw_sched_lock_mutex(ctx) g_mutex_lock(ctx)
127
#define hw_sched_unlock_mutex(ctx) g_mutex_unlock(ctx)
128
#else /* HW_USE_THREADS */
129
# define hw_sched_lock(ctx, type)
130
# define hw_sched_unlock(ctx, type)
131
# define hw_sched_broadcast(ctx, type)
132
# define hw_sched_signal(ctx, type)
133
# define hw_sched_wait(ctx, type)
134
49 by root
Merge /home/matthias/dev/pyHST
135
#define hw_sched_create_mutex(void) NULL
30 by csa
Multi-GPU, Multi-CPU, and Hybrid modes support
136
#define hw_sched_destroy_mutex(ctx)
137
#define hw_sched_lock_mutex(ctx)
138
#define hw_sched_unlock_mutex(ctx)
139
#endif /* HW_USE_THREADS */
140
141
# ifdef __cplusplus
142
}
143
# endif
144
145
#endif /* _HW_SCHED_H */
146