1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
|
#ifndef __MBUFF_H
#define __MBUFF_H
#define RTL_SHM_MISC_MINOR 254
/* max length of the name of the shared memory area */
#define MBUFF_NAME_LEN 32
/* max number of attached mmaps per one area */
#define MBUFF_MAX_MMAPS 16
#ifdef SHM_DEMO
#define MBUFF_DEV_NAME "./mbuff"
#else
#define MBUFF_DEV_NAME "/dev/mbuff"
#endif
#ifdef __KERNEL__
#include <linux/types.h>
#include <linux/fs.h>
#endif
/*
All ioctl()s are called with name filled in with the appropriate
name for the mbuff to be referenced. Calls to any ioctl() makes
that mbuff "active", i.e., read(), write(), and mmap() use that
mbuff. I didn't do this yet.
ioctl()s:
ALLOCATE:
Call with size=0 to just find out if the area exists; no
mbuff will be allocated. Otherwise, allocate an mbuff with
that size.
DEALLOCATE:
Decrease reference count for an mbuff.
issues:
- using this method, it is *really* easy to get dangling
mbuffs, i.e., mbuffs that nobody owns. When you close
/dev/mbuff, it would be a good idea to decrease the ref
count of the active mbuff.
*/
#define IOCTL_MBUFF_INFO 0
#define IOCTL_MBUFF_ALLOCATE 1
#define IOCTL_MBUFF_DEALLOCATE 2
#define IOCTL_MBUFF_SELECT 3
#define IOCTL_MBUFF_LAST IOCTL_MBUFF_SELECT
struct mbuff_request_struct{
unsigned int flags;
char name[MBUFF_NAME_LEN+1];
size_t size;
unsigned int reserved[4];
};
#ifndef __KERNEL__
#include <stdio.h>
#include <unistd.h>
#include <fcntl.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
/* you can use mbuff_alloc several times, the buffer
will be deallocated when mbuff_free was called the same number of times
AND area is not mmaped anywhere anymore
AND it is not used in the kernel as well */
inline void * mbuff_alloc(const char *name, int size) {
int fd;
struct mbuff_request_struct req={0,"default",0,{0}};
void * mbuf;
if(name) strncpy(req.name,name,sizeof(req.name));
req.name[sizeof(req.name)-1]='\0';
req.size = size;
if(( fd = open(MBUFF_DEV_NAME,O_RDWR) ) < 0 ){
perror("open failed");
return NULL;
}
ioctl(fd,IOCTL_MBUFF_ALLOCATE,&req);
mbuf=mmap(NULL,size,PROT_WRITE|PROT_READ,MAP_SHARED|MAP_FILE,fd, 0);
if( mbuf == (void *) -1)
mbuf=NULL;
close(fd);
return mbuf;
}
inline void mbuff_free(const char *name, void * mbuf) {
int fd;
struct mbuff_request_struct req={0,"default",0,{0}};
int size;
if(name) strncpy(req.name,name,sizeof(req.name));
req.name[sizeof(req.name)-1]='\0';
if(( fd = open(MBUFF_DEV_NAME,O_RDWR) ) < 0 ){
perror("open failed");
return;
}
size=ioctl(fd,IOCTL_MBUFF_DEALLOCATE,&req);
if(size > 0) munmap( mbuf, size);
close(fd);
/* in general, it could return size, but typical "free" is void */
return;
}
/* mbuff_attach and mbuff_detach do not change usage counters -
area allocated using mbuff_attach will be deallocated on program exit/kill
if nobody else uses it - mbuff_detach is not needed -
the only lock keeping area allocated is mmap */
inline char * mbuff_attach(const char *name, int size) {
int fd;
struct mbuff_request_struct req={0,"default",0,{0}};
void * mbuf;
if(name) strncpy(req.name,name,sizeof(req.name));
req.name[sizeof(req.name)-1]='\0';
req.size = size;
if(( fd = open(MBUFF_DEV_NAME,O_RDWR) ) < 0 ){
perror("open failed");
return NULL;
}
ioctl(fd,IOCTL_MBUFF_ALLOCATE,&req);
mbuf=mmap(NULL,size,PROT_WRITE|PROT_READ,MAP_SHARED|MAP_FILE,fd, 0);
/* area will be deallocated on the last munmap, not now */
ioctl(fd,IOCTL_MBUFF_DEALLOCATE,&req);
if( mbuf == (void *) -1)
mbuf=NULL;
close(fd);
return mbuf;
}
inline void mbuff_detach(const char *name, void * mbuf) {
int fd;
struct mbuff_request_struct req={0,"default",0,{0}};
int size;
if(name) strncpy(req.name,name,sizeof(req.name));
req.name[sizeof(req.name)-1]='\0';
if(( fd = open(MBUFF_DEV_NAME,O_RDWR) ) < 0 ){
perror("open failed");
return;
}
size=ioctl(fd,IOCTL_MBUFF_SELECT,&req);
if(size > 0) munmap( mbuf, size);
close(fd);
/* in general, it could return size, but typical "free" is void */
return;
}
#else
struct mbuff{
struct mbuff *next;
struct mbuff *prev;
char name[MBUFF_NAME_LEN+1];
struct vm_area_struct *(vm_area[MBUFF_MAX_MMAPS]);
struct file *file;
unsigned char *buf;
unsigned long size;
int count; /* number of allocations from user space */
int kcount; /* number of allocations from kernel space */
int open_cnt; /* #times opened */
int open_mode;
};
extern int shm_allocate(const char *name,unsigned int size, void **shm);
extern int shm_deallocate(void * shm);
inline void * mbuff_alloc(const char *name, int size) {
void *tmp=NULL;
if( shm_allocate(name, size, &tmp) > 0 )
return tmp;
else
return NULL;
}
inline void mbuff_free(const char *name, void * mbuf) {
/* it would be no problem to deallocate using only name */
shm_deallocate(mbuf);
}
/* in kernel space implementing "nonlocking" attach and detach
would be very unsafe (deallocation from user space possible at any time) */
#define mbuff_attach(name,size) mbuff_alloc(name,size)
#define mbuff_detach(name,mbuf) mbuff_free(name,mbuf)
extern char mbuff_default_name[];
extern int mbuff_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
unsigned long arg);
#ifdef LINUX_V22
extern int mbuff_mmap(struct file *file, struct vm_area_struct *vma);
#else
extern int mbuff_mmap(struct inode *inode, struct file *file,
struct vm_area_struct *vma);
#endif
extern int mbuff_open_with_name( struct inode *inode, struct file *file,
const char * name);
#endif
#endif
|