/* * Copyright 1996 The Australian National University. * Copyright 1996 Fujitsu Laboratories Limited * * This software may be distributed under the terms of the Gnu * Public License version 2 or later */ /* * linux/drivers/ap1000/ringbuf.c * * This provides the /proc/XX/ringbuf interface to the Tnet ring buffer */ #define _APLIB_ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* we have a small number of reserved ring buffers to ensure that at least one parallel program can always run */ #define RBUF_RESERVED 4 #define RBUF_RESERVED_ORDER 5 static struct { char *rb_ptr; char *shared_ptr; int used; } reserved_ringbuf[RBUF_RESERVED]; void ap_ringbuf_init(void) { int i,j; char *rb_ptr, *shared_ptr; int rb_size = PAGE_SIZE * (1<ringbuf) return; if (tsk->ringbuf->ringbuf) { char *rb_ptr = tsk->ringbuf->ringbuf; char *shared_ptr = tsk->ringbuf->shared; int order = tsk->ringbuf->order; int rb_size = PAGE_SIZE * (1<ringbuf,sizeof(*(tsk->ringbuf))); tsk->ringbuf = NULL; } /* * map the ring buffer into users memory */ static int cap_map(int rb_size) { struct task_struct *tsk=current; int i; char *rb_ptr=NULL; char *shared_ptr=NULL; int order = 0; int error,old_uid; error = verify_area(VERIFY_WRITE,(char *)RBUF_VBASE,rb_size); if (error) return error; if (!MPP_IS_PAR_TASK(tsk->taskid)) { printk("ringbuf_mmap called from non-parallel task\n"); return -EINVAL; } if (tsk->ringbuf) return -EINVAL; rb_size -= RBUF_RING_BUFFER_OFFSET; rb_size >>= 1; switch (rb_size/1024) { case 128: order = 5; break; case 512: order = 7; break; case 2048: order = 9; break; case 8192: order = 11; break; default: printk("ringbuf_mmap with invalid size %d\n",rb_size); return -EINVAL; } if (order == RBUF_RESERVED_ORDER) { for (i=0;ieuid; current->euid = 0; error = sys_mlock(RBUF_VBASE,2*rb_size+RBUF_RING_BUFFER_OFFSET); current->euid = old_uid; if (error) { printk("ringbuffer mlock failed\n"); return error; } #endif /* the queue pages */ #define MAP_QUEUE(offset,phys) \ io_remap_page_range(RBUF_VBASE + offset, \ phys<ringbuf) { tsk->ringbuf = (void *)kmalloc(sizeof(*(tsk->ringbuf)),GFP_ATOMIC); if (!tsk->ringbuf) return -ENOMEM; } memset(tsk->ringbuf,0,sizeof(*tsk->ringbuf)); tsk->ringbuf->ringbuf = rb_ptr; tsk->ringbuf->shared = shared_ptr; tsk->ringbuf->order = order; tsk->ringbuf->write_ptr = mmu_v2p((unsigned)rb_ptr)<<1; tsk->ringbuf->vaddr = RBUF_VBASE; memset(tsk->ringbuf->vaddr+RBUF_SHARED_PAGE_OFF,0,PAGE_SIZE); { struct _kernel_cap_shared *_kernel = (struct _kernel_cap_shared *)tsk->ringbuf->vaddr; _kernel->rbuf_read_ptr = (rb_size>>5) - 1; } return 0; } static int ringbuf_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { int numcells, *phys_cells; extern struct cap_init cap_init; switch (cmd) { case CAP_GETINIT: if (copy_to_user((char *)arg,(char *)&cap_init,sizeof(cap_init))) return -EFAULT; break; case CAP_SYNC: if (verify_area(VERIFY_READ, (void *) arg, sizeof(int)*2)) return -EFAULT; if (get_user(numcells,(int *)arg)) return -EFAULT; if (get_user((unsigned)phys_cells, ((int *)arg)+1)) return -EFAULT; if (verify_area(VERIFY_READ,phys_cells,sizeof(int)*numcells)) return -EFAULT; return ap_sync(numcells,phys_cells); break; case CAP_SETGANG: { int v; if (get_user(v,(int *)arg)) return -EFAULT; mpp_set_gang_factor(v); break; } case CAP_MAP: return cap_map(arg); default: printk("unknown ringbuf ioctl %d\n",cmd); return -EINVAL; } return 0; } static struct file_operations proc_ringbuf_operations = { NULL, NULL, NULL, NULL, /* readdir */ NULL, /* poll */ ringbuf_ioctl, /* ioctl */ NULL, /* mmap */ NULL, /* no special open code */ NULL, /* flush */ NULL, /* no special release code */ NULL /* can't fsync */ }; struct inode_operations proc_ringbuf_inode_operations = { &proc_ringbuf_operations, /* default base directory file-ops */ };