Permalink
Browse files

usrp_e : Add driver for USRP-E1XX FPGA interface.

  • Loading branch information...
1 parent d8e727b commit c3396d428dee357d6204e8ff55bbeaf735b06d29 Philip Balister committed May 11, 2011
Showing with 1,446 additions and 14 deletions.
  1. +6 −14 drivers/misc/Kconfig
  2. +1 −0 drivers/misc/Makefile
  3. +1,349 −0 drivers/misc/usrp_e.c
  4. +90 −0 include/linux/usrp_e.h
View
20 drivers/misc/Kconfig
@@ -404,21 +404,12 @@ config TI_DAC7512
This driver can also be built as a module. If so, the module
will be called ti_dac7512.
-config VMWARE_BALLOON
- tristate "VMware Balloon Driver"
- depends on X86
+config USRP_E
+ tristate "USRP-E FPGA interface driver"
+ default n
help
- This is VMware physical memory management driver which acts
- like a "balloon" that can be inflated to reclaim physical pages
- by reserving them in the guest and invalidating them in the
- monitor, freeing up the underlying machine pages so they can
- be allocated to other guests. The balloon can also be deflated
- to allow the guest to use more physical memory.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here: the
- module will be called vmw_balloon.
+ This driver is for the Ettus Research USRP Embedded Software
+ Defined Radio platform.
config ARM_CHARLCD
bool "ARM Ltd. Character LCD Driver"
@@ -451,6 +442,7 @@ config PCH_PHUB
To compile this driver as a module, choose M here: the module will
be called pch_phub.
+ If you do not plan to run this kernel on that hardware choose N.
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
View
1 drivers/misc/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
obj-$(CONFIG_C2PORT) += c2port/
obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
obj-$(CONFIG_HMC6352) += hmc6352.o
+obj-$(CONFIG_USRP_E) += usrp_e.o
obj-y += eeprom/
obj-y += cb710/
obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
View
1,349 drivers/misc/usrp_e.c
@@ -0,0 +1,1349 @@
+/*
+ * -*- linux-c -*-
+ * Interface for USRP Embedded from Ettus Research, LLC.
+ * This driver uses the GPMC interface on the OMAP3 to pass data
+ * to/from a Spartan 3 FPGA.
+ *
+ * Copyright (C) Ettus Research, LLC
+ *
+ * Written by Philip Balister <philip@opensdr.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "linux/fs.h"
+#include "linux/module.h"
+#include "linux/cdev.h"
+#include "linux/device.h"
+#include "linux/spinlock.h"
+#include "linux/errno.h"
+#include "linux/irq.h"
+#include "linux/interrupt.h"
+#include "linux/wait.h"
+#include "linux/sched.h"
+#include "linux/dma-mapping.h"
+#include "linux/semaphore.h"
+#include "linux/kthread.h"
+#include "linux/poll.h"
+#include "linux/slab.h"
+#include "linux/delay.h"
+
+#include "plat/gpmc.h"
+#include "plat/gpio.h"
+#include "plat/dma.h"
+
+#include "asm/uaccess.h"
+#include "asm/io.h"
+#include "asm/atomic.h"
+
+#include "linux/usrp_e.h"
+
+#define TX_SPACE_AVAILABLE_GPIO 144
+#define RX_DATA_READY_GPIO 146
+
+static atomic_t use_count = ATOMIC_INIT(0);
+static atomic_t mapped = ATOMIC_INIT(0);
+static int shutting_down;
+
+struct spi_regs_wb;
+struct i2c_regs_wb;
+
+struct usrp_e_dev {
+ struct cdev cdev;
+ unsigned long mem_base;
+ unsigned long control_mem_base;
+ u32 *ioaddr;
+ u8 *ctl_addr;
+ struct spi_regs_wb *ctl_spi;
+ struct i2c_regs_wb *ctl_i2c;
+ spinlock_t fpga_lock;
+
+ atomic_t n_overruns;
+ atomic_t n_underruns;
+
+} *usrp_e_devp;
+
+struct dma_data {
+ int ch;
+ struct omap_dma_channel_params params;
+
+ unsigned long virt_from;
+ unsigned long virt_to;
+ unsigned long phys_from;
+ unsigned long phys_to;
+};
+
+#define UE_REG_MISC_RX_LEN (10)
+#define UE_REG_MISC_TX_LEN (12)
+#define UE_SR_CLEAR_GLOBAL ((8 << 7) + 4*48 + 4*2)
+
+#if 0 // Clean out this code
+#define MISC_REGS_BASE 0x0
+
+#define UE_REG_MISC_LED (MISC_REGS_BASE + 0)
+
+#define UE_REG_MISC_RX_LEN (MISC_REGS_BASE + 10)
+#define UE_REG_MISC_TX_LEN (MISC_REGS_BASE + 12)
+
+#define UE_REG_SLAVE(n) ((n)<<7)
+#define UE_REG_SR_ADDR(n) ((UE_REG_SLAVE(5)) + (4*(n)))
+
+#define UE_SR_CLEAR_FIFO UE_REG_SR_ADDR(48)
+#endif
+
+#define CTL_SPI_BASE 0x100
+
+struct spi_regs_wb {
+ u32 txrx0;
+ u32 txrx1;
+ u32 txrx2;
+ u32 txrx3;
+ u32 ctrl;
+ u32 div;
+ u32 ss;
+};
+
+/* Defines for spi ctrl register */
+#define UE_SPI_CTRL_ASS (BIT(13))
+#define UE_SPI_CTRL_IE (BIT(12))
+#define UE_SPI_CTRL_LSB (BIT(11))
+/* defines for TXNEG and RXNEG in usrp_e.h so user can pass them to driver. */
+#define UE_SPI_CTRL_GO_BSY (BIT(8))
+#define UE_SPI_CTRL_CHAR_LEN_MASK 0x7f
+
+
+#define CTL_I2C_BASE 0x180
+#if 1
+struct i2c_regs_wb {
+ u8 prescalar_lo;
+ u8 dummy;
+ u8 dummy1;
+ u8 dummy2;
+ u8 prescalar_hi;
+ u8 dummy3;
+ u8 dummy4;
+ u8 dummy5;
+ u8 ctrl;
+ u8 dummy6;
+ u8 dummy7;
+ u8 dummy8;
+ u8 data;
+ u8 dummy9;
+ u8 dummy10;
+ u8 dummy11;
+ u8 cmd_status;
+};
+#else
+struct i2c_regs_wb {
+ u16 prescalar_lo;
+ u16 dummy2;
+ u16 prescalar_hi;
+ u16 dummy3;
+ u16 ctrl;
+ u16 dummy6;
+ u16 data;
+ u16 dummy9;
+ u16 cmd_status;
+};
+#endif
+
+#define I2C_CTRL_EN (BIT(7)) /* core enable */
+#define I2C_CTRL_IE (BIT(6)) /* interrupt enable */
+
+/* STA, STO, RD, WR, and IACK bits are cleared automatically */
+
+#define I2C_CMD_START (BIT(7))
+#define I2C_CMD_STOP (BIT(6))
+#define I2C_CMD_RD (BIT(5))
+#define I2C_CMD_WR (BIT(4))
+#define I2C_CMD_NACK (BIT(3))
+#define I2C_CMD_RSVD_2 (BIT(2))
+#define I2C_CMD_RSVD_1 (BIT(1))
+#define I2C_CMD_IACK (BIT(0))
+
+#define I2C_ST_RXACK (BIT(7))
+#define I2C_ST_BUSY (BIT(6))
+#define I2C_ST_AL (BIT(5))
+#define I2C_RSVD_4 (BIT(4))
+#define I2C_RSVD_3 (BIT(3))
+#define I2C_RSVD_2 (BIT(2))
+#define I2C_ST_TIP (BIT(1))
+#define I2C_ST_IP (BIT(0))
+
+#define MAX_WB_DIV 4
+#define MASTER_CLK_RATE 64000000
+#define PRESCALAR(wb_div) (((MASTER_CLK_RATE/(wb_div)) / (5 * 100000)) - 1)
+
+static __u16 prescalar_values[MAX_WB_DIV+1] = {
+ 0xffff,
+ PRESCALAR(1),
+ PRESCALAR(2),
+ PRESCALAR(3),
+ PRESCALAR(4),
+};
+
+static struct dma_data *rx_dma;
+static struct dma_data *tx_dma;
+
+struct ring_buffer_entry {
+ unsigned long dma_addr;
+ __u8 *frame_addr;
+};
+
+struct ring_buffer {
+ struct ring_buffer_info (*rbi)[];
+ struct ring_buffer_entry (*rbe)[];
+ int num_pages;
+ unsigned long (*pages)[];
+};
+
+static struct ring_buffer tx_rb;
+static struct ring_buffer rx_rb;
+
+static struct usrp_e_ring_buffer_size_t rb_size;
+
+#define NUM_PAGES_RX_FLAGS 1
+#define NUM_RX_FRAMES 100
+#define NUM_PAGES_TX_FLAGS 1
+#define NUM_TX_FRAMES 100
+
+static int tx_rb_read;
+static int rx_rb_write;
+
+static DEFINE_SPINLOCK(tx_rb_read_lock);
+static DEFINE_SPINLOCK(rx_rb_write_lock);
+
+static int alloc_ring_buffer(struct ring_buffer *rb,
+ unsigned int num_bufs, enum dma_data_direction direction);
+static void delete_ring_buffer(struct ring_buffer *rb,
+ unsigned int num_bufs, enum dma_data_direction direction);
+static int alloc_ring_buffers(void);
+static void init_ring_buffer(struct ring_buffer *rb, int num_bufs,
+ int init_flags, enum dma_data_direction direction);
+
+static dev_t usrp_e_dev_number;
+static struct class *usrp_e_class;
+
+#define DEVICE_NAME "usrp_e"
+
+static const struct file_operations usrp_e_fops;
+
+static irqreturn_t space_available_irqhandler(int irq, void *dev_id);
+static irqreturn_t data_ready_irqhandler(int irq, void *dev_id);
+static void usrp_rx_dma_irq(int ch, u16 stat, void *data);
+static void usrp_tx_dma_irq(int ch, u16 stat, void *data);
+
+static DECLARE_WAIT_QUEUE_HEAD(data_received_queue);
+static DECLARE_WAIT_QUEUE_HEAD(space_available_queue);
+static DECLARE_WAIT_QUEUE_HEAD(received_data_from_user);
+static DECLARE_WAIT_QUEUE_HEAD(tx_rb_space_available);
+
+static void usrp_e_spi_init(void);
+static void usrp_e_i2c_init(void);
+
+static int init_dma_controller(void);
+static void release_dma_controller(void);
+static int get_frame_from_fpga_start(void);
+static int get_frame_from_fpga_finish(void);
+static int send_frame_to_fpga_start(void);
+static int send_frame_to_fpga_finish(void);
+
+static int rx_dma_active;
+static int tx_dma_active;
+
+static int __init
+usrp_e_init(void)
+{
+ int ret;
+ struct usrp_e_dev *p;
+
+ printk(KERN_DEBUG "usrp_e entering driver initialization\n");
+
+ if (alloc_chrdev_region(&usrp_e_dev_number, 0, 1, DEVICE_NAME) < 0) {
+ printk(KERN_DEBUG "Can't register device\n");
+ return -1;
+ }
+
+ usrp_e_class = class_create(THIS_MODULE, DEVICE_NAME);
+
+ usrp_e_devp = kzalloc(sizeof(struct usrp_e_dev), GFP_KERNEL);
+ if (!usrp_e_devp) {
+ printk(KERN_ERR "Bad kmalloc\n");
+ return -ENOMEM;
+ }
+
+ p = usrp_e_devp; /* Shorten var name so I stay sane. */
+
+ printk(KERN_DEBUG "usrp_e data struct malloc'd.\n");
+
+ atomic_set(&p->n_underruns, 0);
+ atomic_set(&p->n_overruns, 0);
+
+ printk(KERN_DEBUG "usrp_e Data initialized..\n");
+
+ cdev_init(&p->cdev, &usrp_e_fops);
+ p->cdev.owner = THIS_MODULE;
+
+ ret = cdev_add(&p->cdev, MKDEV(MAJOR(usrp_e_dev_number), 0), 1);
+ if (ret) {
+ printk(KERN_ERR "Bad cdev\n");
+ return ret;
+ }
+
+ printk(KERN_DEBUG "usrp_e major number : %d\n",
+ MAJOR(usrp_e_dev_number));
+ device_create(usrp_e_class, NULL, MKDEV(MAJOR(usrp_e_dev_number), 0),
+ NULL, "usrp_e%d", 0);
+
+ printk(KERN_DEBUG "Getting Chip Select\n");
+
+ if (gpmc_cs_request(4, SZ_2K, &p->mem_base) < 0) {
+ printk(KERN_ERR "Failed request for GPMC mem for usrp_e\n");
+ return -1;
+ }
+ printk(KERN_DEBUG "Got CS4, address = %lx\n", p->mem_base);
+
+ if (!request_mem_region(p->mem_base, SZ_2K, "usrp_e")) {
+ printk(KERN_ERR "Request_mem_region failed.\n");
+ gpmc_cs_free(4);
+ return -1;
+ }
+
+ p->ioaddr = ioremap(p->mem_base, SZ_2K);
+ spin_lock_init(&p->fpga_lock);
+
+ if (gpmc_cs_request(6, SZ_2K, &p->control_mem_base) < 0) {
+ printk(KERN_ERR "Failed request for GPMC control mem for usrp_e\n");
+ return -1;
+ }
+ printk(KERN_DEBUG "Got CS6, address = %lx\n", p->control_mem_base);
+
+ if (!request_mem_region(p->control_mem_base, SZ_2K, "usrp_e_c")) {
+ printk(KERN_ERR "Request_mem_region failed.\n");
+ gpmc_cs_free(6);
+ return -1;
+ }
+
+ p->ctl_addr = ioremap_nocache(p->control_mem_base, SZ_2K);
+
+
+ /* Configure GPIO's */
+
+ if (!(((gpio_request(TX_SPACE_AVAILABLE_GPIO,
+ "TX_SPACE_AVAILABLE_GPIO") == 0) &&
+ (gpio_direction_input(TX_SPACE_AVAILABLE_GPIO) == 0)))) {
+ printk(KERN_ERR "Could not claim GPIO for TX_SPACE_AVAILABLE_GPIO\n");
+ return -1;
+ }
+
+ if (!(((gpio_request(RX_DATA_READY_GPIO, "RX_DATA_READY_GPIO") == 0) &&
+ (gpio_direction_input(RX_DATA_READY_GPIO) == 0)))) {
+ printk(KERN_ERR "Could not claim GPIO for RX_DATA_READY_GPIO\n");
+ return -1;
+ }
+
+ /* Debug gpios */
+ if (!(((gpio_request(14, "Debug0") == 0) &&
+ (gpio_direction_output(14, 0) == 0)))) {
+ printk(KERN_ERR "Could not claim GPIO for Debug0\n");
+ return -1;
+ }
+
+ if (!(((gpio_request(21, "Debug1") == 0) &&
+ (gpio_direction_output(21, 0) == 0)))) {
+ printk(KERN_ERR "Could not claim GPIO for Debug1\n");
+ return -1;
+ }
+
+ if (!(((gpio_request(22, "Debug2") == 0) &&
+ (gpio_direction_output(22, 0) == 0)))) {
+ printk(KERN_ERR "Could not claim GPIO for Debug2\n");
+ return -1;
+ }
+
+ if (!(((gpio_request(23, "Debug3") == 0) &&
+ (gpio_direction_output(23, 0) == 0)))) {
+ printk(KERN_ERR "Could not claim GPIO for Debug3\n");
+ return -1;
+ }
+
+ rb_size.num_pages_rx_flags = NUM_PAGES_RX_FLAGS;
+ rb_size.num_rx_frames = NUM_RX_FRAMES;
+ rb_size.num_pages_tx_flags = NUM_PAGES_TX_FLAGS;
+ rb_size.num_tx_frames = NUM_TX_FRAMES;
+
+ ret = alloc_ring_buffers();
+ if (ret < 0)
+ return ret;
+
+ /* Initialize various DMA related flags */
+ rx_dma_active = 0;
+ tx_dma_active = 0;
+ shutting_down = 0;
+
+ printk(KERN_DEBUG "usrp_e Driver Initialized.\n");
+
+ return 0;
+}
+
+static void __exit
+usrp_e_cleanup(void)
+{
+ struct usrp_e_dev *p = usrp_e_devp;
+
+ unregister_chrdev_region(usrp_e_dev_number, 1);
+
+ release_mem_region(p->mem_base, SZ_2K);
+ release_mem_region(p->control_mem_base, SZ_2K);
+
+ device_destroy(usrp_e_class, MKDEV(MAJOR(usrp_e_dev_number), 0));
+ cdev_del(&p->cdev);
+
+ class_destroy(usrp_e_class);
+
+ iounmap(p->ioaddr);
+ iounmap(p->ctl_addr);
+
+ gpmc_cs_free(4);
+ gpmc_cs_free(6);
+
+ printk(KERN_DEBUG "Freeing gpios\n");
+
+ gpio_free(TX_SPACE_AVAILABLE_GPIO);
+ gpio_free(RX_DATA_READY_GPIO);
+
+ /* debug */
+ gpio_free(14);
+ gpio_free(21);
+ gpio_free(22);
+ gpio_free(23);
+
+ delete_ring_buffer(&tx_rb, rb_size.num_tx_frames, DMA_TO_DEVICE);
+ delete_ring_buffer(&rx_rb, rb_size.num_rx_frames, DMA_FROM_DEVICE);
+
+ kfree(p);
+
+ printk(KERN_DEBUG "Leaving cleanup\n");
+}
+
+static int
+usrp_e_open(struct inode *inode, struct file *file)
+{
+ struct usrp_e_dev *p = usrp_e_devp;
+ int ret;
+
+ printk(KERN_DEBUG "usrp_e open called, use_count = %d\n",
+ atomic_read(&use_count));
+ if (atomic_add_return(1, &use_count) != 1) {
+ printk(KERN_ERR "use_count = %d\n", atomic_read(&use_count));
+ atomic_dec(&use_count);
+ return -EBUSY;
+ }
+
+ /* reset the FPGA */
+ writew(0, p->ctl_addr + UE_SR_CLEAR_GLOBAL);
+ mdelay(1);
+
+ /* Initialize wishbone SPI and I2C interfaces */
+
+ usrp_e_spi_init();
+ usrp_e_i2c_init();
+
+ ret = init_dma_controller();
+ if (ret < 0)
+ return ret;
+
+ tx_rb_read = 0;
+ rx_rb_write = 0;
+
+ tx_dma_active = 0;
+ rx_dma_active = 0;
+ shutting_down = 0;
+
+ init_ring_buffer(&rx_rb, rb_size.num_rx_frames, RB_KERNEL, DMA_FROM_DEVICE);
+ init_ring_buffer(&tx_rb, rb_size.num_tx_frames, RB_KERNEL, DMA_TO_DEVICE);
+
+ /* Configure interrupts for GPIO pins */
+
+ ret = request_irq(gpio_to_irq(TX_SPACE_AVAILABLE_GPIO),
+ space_available_irqhandler,
+ IRQF_TRIGGER_RISING, "usrp_e_space_available", NULL);
+
+ ret = request_irq(gpio_to_irq(RX_DATA_READY_GPIO),
+ data_ready_irqhandler,
+ IRQF_TRIGGER_RISING, "usrp_e_data_ready", NULL);
+
+ printk(KERN_DEBUG "usrp: leaving open\n");
+ return 0;
+}
+
+static int
+usrp_e_release(struct inode *inode, struct file *file)
+{
+ struct usrp_e_dev *usrp_e_devp = file->private_data;
+
+ printk(KERN_DEBUG "usrp_e release called\n");
+
+ if (atomic_read(&use_count) != 1) {
+ printk(KERN_ERR "Attempt to close usrp_e driver that is not open");
+ return -ENOENT;
+ }
+
+ printk(KERN_DEBUG "Waiting for DMA to become inactive\n");
+ shutting_down = 0;
+ while (tx_dma_active || rx_dma_active)
+ cpu_relax();
+
+ /* Freeing gpio irq's */
+ printk(KERN_DEBUG "Freeing gpio irq's\n");
+
+ free_irq(gpio_to_irq(TX_SPACE_AVAILABLE_GPIO), NULL);
+ free_irq(gpio_to_irq(RX_DATA_READY_GPIO), NULL);
+
+ printk(KERN_DEBUG "Freeing DMA channels\n");
+
+ release_dma_controller();
+
+ usrp_e_devp = 0;
+
+ atomic_dec(&use_count);
+
+ return 0;
+}
+
+static ssize_t
+usrp_e_read(struct file *file, char *buf, size_t count, loff_t *ppos)
+{
+
+ return count;
+}
+
+static ssize_t
+usrp_e_write(struct file *file, const char *buf, size_t count, loff_t *ppos)
+{
+
+ send_frame_to_fpga_start();
+
+ return count;
+}
+
+static loff_t
+usrp_e_llseek(struct file *file, loff_t offest, int orig)
+{
+ printk(KERN_DEBUG "usrp_e llseek called\n");
+
+ return 0;
+}
+
+static int usrp_e_ctl16(unsigned long arg, int direction)
+{
+ struct usrp_e_ctl16 __user *argp = (struct usrp_e_ctl16 __user *) arg;
+ int i;
+ struct usrp_e_ctl16 ctl;
+
+ if (copy_from_user(&ctl, argp, sizeof(struct usrp_e_ctl16)))
+ return -EFAULT;
+
+ if (ctl.count > 10)
+ return -EINVAL;
+
+ if (direction == 0) {
+ for (i = 0; i < ctl.count; i++)
+ writew(ctl.buf[i], &usrp_e_devp->ctl_addr \
+ [i + ctl.offset]);
+ } else if (direction == 1) {
+ for (i = 0; i < ctl.count; i++)
+ ctl.buf[i] = readw(&usrp_e_devp->ctl_addr \
+ [i + ctl.offset]);
+
+ if (copy_to_user(argp, &ctl, sizeof(struct usrp_e_ctl16)))
+ return -EFAULT;
+ } else
+ return -EFAULT;
+
+ return 0;
+}
+
+static int usrp_e_ctl32(unsigned long arg, int direction)
+{
+ struct usrp_e_ctl32 __user *argp = (struct usrp_e_ctl32 __user *) arg;
+ int i;
+ struct usrp_e_ctl32 ctl;
+
+ if (copy_from_user(&ctl, argp, sizeof(struct usrp_e_ctl32)))
+ return -EFAULT;
+
+ if (ctl.count > 20)
+ return -EINVAL;
+
+ if (direction == 0) {
+ for (i = 0; i < ctl.count; i++)
+ writel(ctl.buf[i], &usrp_e_devp->ctl_addr \
+ [i + ctl.offset]);
+ } else if (direction == 1) {
+ for (i = 0; i < ctl.count; i++)
+ ctl.buf[i] = readl(&usrp_e_devp->ctl_addr \
+ [i + ctl.offset]);
+
+ if (copy_to_user(argp, &ctl, sizeof(struct usrp_e_ctl16)))
+ return -EFAULT;
+
+ } else
+ return -EFAULT;
+
+ return 0;
+}
+
+static int usrp_e_get_rb_info(unsigned long arg)
+{
+ struct usrp_e_ring_buffer_size_t __user *argp = (struct usrp_e_ring_buffer_size_t __user *) arg;
+
+ if (copy_to_user(argp, &rb_size, sizeof(rb_size)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static void usrp_e_spi_init()
+{
+ struct usrp_e_dev *p = usrp_e_devp;
+
+ p->ctl_spi = (struct spi_regs_wb *)(p->ctl_addr + CTL_SPI_BASE);
+ writel(64, &p->ctl_spi->div); /* 1 = Div by 4 (12.5 MHz) */
+}
+
+static int usrp_e_spi_wait(void)
+{
+ struct usrp_e_dev *p = usrp_e_devp;
+
+ while (readl(&p->ctl_spi->ctrl) & UE_SPI_CTRL_GO_BSY) {
+ if (signal_pending(current)) {
+ printk(KERN_DEBUG "Signal received.\n");
+ set_current_state(TASK_RUNNING);
+ return -EINTR;
+ }
+ schedule();
+ }
+
+ return 0;
+}
+
+static int usrp_e_spi(unsigned long __user arg)
+{
+ struct usrp_e_dev *p = usrp_e_devp;
+ struct usrp_e_spi __user *argp = (struct usrp_e_spi __user *) arg;
+ struct usrp_e_spi spi_cmd;
+ int ctrl, ret;
+
+ if (copy_from_user(&spi_cmd, argp, sizeof(struct usrp_e_spi)))
+ return -EFAULT;
+
+ spi_cmd.flags &= (UE_SPI_CTRL_TXNEG | UE_SPI_CTRL_RXNEG);
+ ctrl = UE_SPI_CTRL_ASS | (UE_SPI_CTRL_CHAR_LEN_MASK & spi_cmd.length) \
+ | spi_cmd.flags;
+
+ ret = usrp_e_spi_wait();
+ if (ret < 0)
+ return ret;
+
+ writel((spi_cmd.slave & 0xff), &p->ctl_spi->ss);
+
+ writel(spi_cmd.data, &p->ctl_spi->txrx0);
+
+ writel(ctrl, &p->ctl_spi->ctrl);
+ writel((ctrl | UE_SPI_CTRL_GO_BSY), &p->ctl_spi->ctrl);
+
+ if (spi_cmd.readback) {
+ usrp_e_spi_wait();
+ if (copy_to_user(&argp->data, &p->ctl_spi->txrx0,
+ sizeof(__u32)))
+ return -EFAULT;
+ else
+ return 0;
+ } else
+ return 0;
+
+}
+
+static void usrp_e_i2c_init()
+{
+ struct usrp_e_dev *p = usrp_e_devp;
+ int wb_div;
+
+ p->ctl_i2c = (struct i2c_regs_wb *)(p->ctl_addr + CTL_I2C_BASE);
+
+ writeb(0, &p->ctl_i2c->ctrl); /* disable core */
+
+ /* Assume wb_div is 4, deal with this later */
+ wb_div = 4;
+ if (wb_div > MAX_WB_DIV)
+ wb_div = MAX_WB_DIV;
+
+ writeb((prescalar_values[wb_div] & 0xff), &p->ctl_i2c->prescalar_lo);
+ writeb(((prescalar_values[wb_div] >> 8) & 0xff),
+ &p->ctl_i2c->prescalar_hi);
+ writeb(I2C_CTRL_EN, &p->ctl_i2c->ctrl); /* enable core */
+}
+
+static int usrp_e_i2c_wait(__u32 mask, int chk_ack)
+{
+ struct usrp_e_dev *p = usrp_e_devp;
+
+ while (readb(&p->ctl_i2c->cmd_status) & mask) {
+ if (signal_pending(current)) {
+ printk(KERN_DEBUG "Signal received.\n");
+ set_current_state(TASK_RUNNING);
+ return -EINTR;
+ }
+ schedule();
+ }
+
+ if (chk_ack) {
+ if ((readb(&p->ctl_i2c->cmd_status) & I2C_ST_RXACK) == 0)
+ return 1;
+ else
+ return 0;
+ }
+
+ return 0;
+}
+
+static int usrp_e_i2c(unsigned long arg, int direction)
+{
+ struct usrp_e_dev *p = usrp_e_devp;
+ struct usrp_e_i2c __user *argp = (struct usrp_e_i2c __user *) arg;
+ struct usrp_e_i2c tmp;
+ struct usrp_e_i2c *i2c_msg;
+ int ret, len, i;
+
+ if (copy_from_user(&tmp, argp, sizeof(struct usrp_e_i2c)))
+ return -EFAULT;
+
+ i2c_msg = kmalloc(sizeof(struct usrp_e_i2c) + tmp.len, GFP_KERNEL);
+ if (!i2c_msg)
+ return -ENOMEM;
+
+ if (copy_from_user(i2c_msg, argp,
+ (sizeof(struct usrp_e_i2c) + tmp.len)))
+ return -EFAULT;
+
+ if (direction) {
+ /* read */
+ if (i2c_msg->len == 0)
+ return 1;
+
+ usrp_e_i2c_wait(I2C_ST_BUSY, 0);
+
+ writeb(((i2c_msg->addr << 1) | 1), &p->ctl_i2c->data);
+ writeb((I2C_CMD_WR | I2C_CMD_START), &p->ctl_i2c->cmd_status);
+ ret = usrp_e_i2c_wait(I2C_ST_TIP, 1);
+ if (ret < 0) {
+ return ret;
+ } else if (ret == 0) {
+ writeb(I2C_CMD_STOP, &p->ctl_i2c->cmd_status);
+ return 2;
+ }
+
+ for (len = i2c_msg->len, i = 0; len > 0; i++, len--) {
+ writeb((I2C_CMD_RD | ((len == 1) ?
+ (I2C_CMD_NACK | I2C_CMD_STOP) : 0)),
+ &p->ctl_i2c->cmd_status);
+ usrp_e_i2c_wait(I2C_ST_TIP, 0);
+ i2c_msg->data[i] = readb(&p->ctl_i2c->data);
+ }
+ if (copy_to_user(argp, i2c_msg, (sizeof(struct usrp_e_i2c) +
+ tmp.len)))
+ return -EFAULT;
+ } else {
+ /* write */
+ usrp_e_i2c_wait(I2C_ST_BUSY, 0);
+ writeb(((i2c_msg->addr << 1) | 0), &p->ctl_i2c->data);
+ writeb((I2C_CMD_WR | I2C_CMD_START |
+ (i2c_msg->len == 0 ? I2C_CMD_STOP : 0)),
+ &p->ctl_i2c->cmd_status);
+ ret = usrp_e_i2c_wait(I2C_ST_TIP, 1);
+ if (ret < 0) {
+ return ret;
+ } else if (ret == 0) {
+ writeb(I2C_CMD_STOP, &p->ctl_i2c->cmd_status);
+ return 2;
+ }
+ for (len = i2c_msg->len, i = 0; len > 0; i++, len--) {
+ writeb(i2c_msg->data[i], &p->ctl_i2c->data);
+ writeb((I2C_CMD_WR | (len == 1 ? I2C_CMD_STOP : 0)),
+ &p->ctl_i2c->cmd_status);
+ ret = usrp_e_i2c_wait(I2C_ST_TIP, 1);
+ if (ret < 0) {
+ return ret;
+ } else if (ret == 0) {
+ writeb(I2C_CMD_STOP, &p->ctl_i2c->cmd_status);
+ return 2;
+ }
+ }
+
+ }
+
+
+ return 1;
+}
+
+static long usrp_e_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+
+ switch (cmd) {
+ case USRP_E_WRITE_CTL16:
+ return usrp_e_ctl16(arg, 0);
+
+ case USRP_E_READ_CTL16:
+ return usrp_e_ctl16(arg, 1);
+
+ case USRP_E_WRITE_CTL32:
+ return usrp_e_ctl32(arg, 0);
+
+ case USRP_E_READ_CTL32:
+ return usrp_e_ctl32(arg, 1);
+
+ case USRP_E_SPI:
+ return usrp_e_spi(arg);
+
+ case USRP_E_I2C_WRITE:
+ return usrp_e_i2c(arg, 0);
+
+ case USRP_E_I2C_READ:
+ return usrp_e_i2c(arg, 1);
+
+ case USRP_E_GET_RB_INFO:
+ return usrp_e_get_rb_info(arg);
+
+ case USRP_E_GET_COMPAT_NUMBER:
+ return USRP_E_COMPAT_NUMBER;
+
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static unsigned int usrp_e_poll(struct file *filp, poll_table *wait)
+{
+ unsigned int mask = 0;
+ unsigned long flags;
+
+ poll_wait(filp, &data_received_queue, wait);
+ poll_wait(filp, &tx_rb_space_available, wait);
+
+ /* Make sure write is active (if needed) before sleeping */
+ send_frame_to_fpga_start();
+
+ /* Make sure to read in case the rx ring buffer is empty */
+ get_frame_from_fpga_start();
+
+ spin_lock_irqsave(&rx_rb_write_lock, flags);
+ if (rx_rb_write == 0) {
+ if ((*rx_rb.rbi)[rb_size.num_rx_frames - 1].flags & RB_USER)
+ mask |= POLLIN | POLLRDNORM;
+ } else {
+ if ((*rx_rb.rbi)[rx_rb_write - 1].flags & RB_USER)
+ mask |= POLLIN | POLLRDNORM;
+ }
+ spin_unlock_irqrestore(&rx_rb_write_lock, flags);
+
+ spin_lock_irqsave(&tx_rb_read_lock, flags);
+ if (tx_rb_read == 0) {
+ if ((*tx_rb.rbi)[rb_size.num_tx_frames - 1].flags & RB_KERNEL)
+ mask |= POLLOUT | POLLWRNORM;
+ } else {
+ if ((*tx_rb.rbi)[tx_rb_read - 1].flags & RB_KERNEL)
+ mask |= POLLOUT | POLLWRNORM;
+ }
+ spin_unlock_irqrestore(&tx_rb_read_lock, flags);
+
+ return mask;
+
+}
+
+/* The mmap code is based on code in af_packet.c */
+
+static void usrp_e_mm_open(struct vm_area_struct *vma)
+{
+
+ atomic_inc(&mapped);
+}
+
+static void usrp_e_mm_close(struct vm_area_struct *vma)
+{
+
+ atomic_dec(&mapped);
+}
+
+static const struct vm_operations_struct usrp_e_mmap_ops = {
+ .open = usrp_e_mm_open,
+ .close = usrp_e_mm_close,
+};
+
+static int usrp_e_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ unsigned long size, expected_size;
+ unsigned int i;
+ unsigned long start;
+ int err;
+ struct page *page;
+
+ if (vma->vm_pgoff)
+ return -EINVAL;
+
+ /* Verify the user will map the entire tx and rx ring buffer space */
+ expected_size = (rb_size.num_rx_frames + rb_size.num_tx_frames) * (PAGE_SIZE >> 1)
+ + (rb_size.num_pages_rx_flags + rb_size.num_pages_tx_flags) * PAGE_SIZE;
+
+ size = vma->vm_end - vma->vm_start;
+ printk(KERN_DEBUG "Size = %ld, expected sixe = %ld\n", size, expected_size);
+
+ if (size != expected_size)
+ return -EINVAL;
+
+ start = vma->vm_start;
+
+ page = virt_to_page(rx_rb.rbi);
+ err = vm_insert_page(vma, start, page);
+ if (err)
+ return -EINVAL;
+
+ start += PAGE_SIZE;
+
+ for (i = 0; i < rx_rb.num_pages; ++i) {
+ struct page *page = virt_to_page((*rx_rb.pages)[i]);
+ err = vm_insert_page(vma, start, page);
+ if (err)
+ return -EINVAL;
+
+ start += PAGE_SIZE;
+ }
+
+ page = virt_to_page(tx_rb.rbi);
+ err = vm_insert_page(vma, start, page);
+ if (err)
+ return -EINVAL;
+
+ start += PAGE_SIZE;
+
+ for (i = 0; i < tx_rb.num_pages; ++i) {
+ struct page *page = virt_to_page((*tx_rb.pages)[i]);
+
+ err = vm_insert_page(vma, start, page);
+ if (err)
+ return err;
+
+ start += PAGE_SIZE;
+ }
+
+// vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ vma->vm_ops = &usrp_e_mmap_ops;
+
+ return 0;
+}
+
+static const struct file_operations usrp_e_fops = {
+ .owner = THIS_MODULE,
+ .open = usrp_e_open,
+ .release = usrp_e_release,
+ .read = usrp_e_read,
+ .write = usrp_e_write,
+ .llseek = usrp_e_llseek,
+ .unlocked_ioctl = usrp_e_ioctl,
+ .poll = usrp_e_poll,
+ .mmap = usrp_e_mmap,
+};
+
+MODULE_VERSION("0.2");
+MODULE_ALIAS(DEVICE_NAME);
+MODULE_DESCRIPTION(DEVICE_NAME);
+MODULE_AUTHOR("Philip Balister <philip@opensdr.com>");
+MODULE_LICENSE("GPL v2");
+
+module_init(usrp_e_init);
+module_exit(usrp_e_cleanup);
+
+static irqreturn_t space_available_irqhandler(int irq, void *dev_id)
+{
+ int serviced = IRQ_NONE;
+
+ send_frame_to_fpga_start();
+
+ serviced = IRQ_HANDLED;
+
+ return serviced;
+}
+
+static void usrp_rx_dma_irq(int ch, u16 stat, void *data)
+{
+
+ rx_dma_active = 0;
+
+ get_frame_from_fpga_finish();
+
+}
+
+static void usrp_tx_dma_irq(int ch, u16 stat, void *data)
+{
+
+ tx_dma_active = 0;
+
+ send_frame_to_fpga_finish();
+
+}
+
+static irqreturn_t data_ready_irqhandler(int irq, void *dev_id)
+{
+ int serviced = IRQ_NONE;
+
+ get_frame_from_fpga_start();
+
+ serviced = IRQ_HANDLED;
+
+ return serviced;
+}
+
+static int init_dma_controller()
+{
+ struct usrp_e_dev *p = usrp_e_devp;
+
+ rx_dma = kzalloc(sizeof(struct dma_data), GFP_KERNEL);
+ if (!rx_dma) {
+ printk(KERN_ERR "Failed to allocate memory for rx_dma struct.");
+ return -ENOMEM;
+ }
+
+ if (omap_request_dma(OMAP_DMA_NO_DEVICE, "usrp-e-rx",
+ usrp_rx_dma_irq, (void *) rx_dma, &rx_dma->ch)) {
+ printk(KERN_ERR "Could not get rx DMA channel for usrp_e\n");
+ return -ENOMEM;
+ }
+ printk(KERN_DEBUG "rx_dma->ch %d\n", rx_dma->ch);
+
+ rx_dma->phys_from = p->mem_base;
+
+ memset(&rx_dma->params, 0, sizeof(rx_dma->params));
+ rx_dma->params.data_type = OMAP_DMA_DATA_TYPE_S16;
+
+ rx_dma->params.src_amode = OMAP_DMA_AMODE_CONSTANT;
+ rx_dma->params.dst_amode = OMAP_DMA_AMODE_POST_INC;
+
+ rx_dma->params.src_start = p->mem_base;
+ rx_dma->params.dst_start = rx_dma->phys_to;
+
+ rx_dma->params.src_ei = 1;
+ rx_dma->params.src_fi = 1;
+ rx_dma->params.dst_ei = 1;
+ rx_dma->params.dst_fi = 1;
+
+ rx_dma->params.elem_count = 1024;
+ rx_dma->params.frame_count = 1;
+
+ rx_dma->params.read_prio = DMA_CH_PRIO_HIGH;
+ rx_dma->params.write_prio = DMA_CH_PRIO_LOW;
+
+ omap_set_dma_params(rx_dma->ch, &rx_dma->params);
+
+// Play with these with a real application
+ omap_set_dma_src_burst_mode(rx_dma->ch, OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_dest_burst_mode(rx_dma->ch, OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_src_data_pack(rx_dma->ch, 1);
+ omap_set_dma_dest_data_pack(rx_dma->ch, 1);
+
+#if 0 // Need to find implentations of the endian calls
+ omap_set_dma_src_endian_type(rx_dma->ch, OMAP_DMA_BIG_ENDIAN);
+ omap_set_dma_dst_endian_type(rx_dma->ch, OMAP_DMA_LITTLE_ENDIAN);
+#endif
+
+ tx_dma = kzalloc(sizeof(struct dma_data), GFP_KERNEL);
+ if (!tx_dma) {
+ printk(KERN_ERR "Failed to allocate memory for tx_dma struct.");
+ return -ENOMEM;
+ }
+
+ if (omap_request_dma(OMAP_DMA_NO_DEVICE, "usrp-e-tx",
+ usrp_tx_dma_irq, (void *) tx_dma, &tx_dma->ch)) {
+ printk(KERN_ERR "Could not get tx DMA channel for usrp_e\n");
+ return -ENOMEM;
+ }
+
+ printk(KERN_DEBUG "tx_dma->ch %d\n", tx_dma->ch);
+
+ tx_dma->phys_from = p->mem_base;
+
+ memset(&tx_dma->params, 0, sizeof(tx_dma->params));
+ tx_dma->params.data_type = OMAP_DMA_DATA_TYPE_S16;
+
+ tx_dma->params.src_amode = OMAP_DMA_AMODE_POST_INC;
+ tx_dma->params.dst_amode = OMAP_DMA_AMODE_CONSTANT;
+
+ tx_dma->params.src_start = tx_dma->phys_from;
+ tx_dma->params.dst_start = p->mem_base;
+
+ tx_dma->params.src_ei = 1;
+ tx_dma->params.src_fi = 1;
+ tx_dma->params.dst_ei = 1;
+ tx_dma->params.dst_fi = 1;
+
+ tx_dma->params.elem_count = 1024;
+ tx_dma->params.frame_count = 1;
+
+ tx_dma->params.read_prio = DMA_CH_PRIO_LOW;
+ tx_dma->params.write_prio = DMA_CH_PRIO_HIGH;
+
+ omap_set_dma_params(tx_dma->ch, &tx_dma->params);
+
+// Play with these with a real application
+ omap_set_dma_src_burst_mode(tx_dma->ch, OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_dest_burst_mode(tx_dma->ch, OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_src_data_pack(tx_dma->ch, 1);
+ omap_set_dma_dest_data_pack(tx_dma->ch, 1);
+
+ return 0;
+}
+
+static void release_dma_controller()
+{
+
+ omap_free_dma(rx_dma->ch);
+ omap_free_dma(tx_dma->ch);
+
+ kfree(rx_dma);
+ kfree(tx_dma);
+}
+
+static int get_frame_from_fpga_start()
+{
+ struct usrp_e_dev *p = usrp_e_devp;
+ struct ring_buffer_info *rbi;
+ struct ring_buffer_entry *rbe;
+ u16 elements_to_read;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rx_rb_write_lock, flags);
+ rbi = &(*rx_rb.rbi)[rx_rb_write];
+ rbe = &(*rx_rb.rbe)[rx_rb_write];
+ spin_unlock_irqrestore(&rx_rb_write_lock, flags);
+
+ /* Check for space available in the ring buffer */
+ /* If no space, drop data. A read call will restart dma transfers. */
+ if ((rbi->flags & RB_KERNEL) && (gpio_get_value(RX_DATA_READY_GPIO)) && !rx_dma_active && !shutting_down) {
+
+ rx_dma_active = 1;
+
+ elements_to_read = readw(p->ctl_addr + UE_REG_MISC_RX_LEN);
+ if (elements_to_read > 1024) {
+ printk(KERN_ERR "usrp_e: FPGA has bad transfer size of %d\n", elements_to_read);
+ goto out;
+ }
+
+ rbi->flags = RB_DMA_ACTIVE;
+
+// writew(1, p->ctl_addr + 54);
+ rbi->len = elements_to_read << 1;
+
+// writew(2, p->ctl_addr + 54);
+ omap_set_dma_dest_addr_size(rx_dma->ch, rbe->dma_addr,
+ elements_to_read);
+
+// writew(3, p->ctl_addr + 54);
+ omap_start_dma(rx_dma->ch);
+
+// writew(4, p->ctl_addr + 54);
+ dma_sync_single_for_device(NULL, rbe->dma_addr, SZ_2K, DMA_FROM_DEVICE);
+ }
+
+out:
+ return 0;
+
+}
+
+
+static int get_frame_from_fpga_finish()
+{
+ unsigned long flags;
+
+ dma_sync_single_for_cpu(NULL, (*rx_rb.rbe)[rx_rb_write].dma_addr, SZ_2K, DMA_FROM_DEVICE);
+
+ spin_lock_irqsave(&rx_rb_write_lock, flags);
+ (*rx_rb.rbi)[rx_rb_write].flags = RB_USER;
+ rx_rb_write++;
+ if (rx_rb_write == rb_size.num_rx_frames)
+ rx_rb_write = 0;
+ spin_unlock_irqrestore(&rx_rb_write_lock, flags);
+
+ wake_up_interruptible(&data_received_queue);
+
+ rx_dma_active = 0;
+
+ get_frame_from_fpga_start();
+
+ return 0;
+}
+
+static int send_frame_to_fpga_start()
+{
+ struct usrp_e_dev *p = usrp_e_devp;
+ struct ring_buffer_info *rbi;
+ struct ring_buffer_entry *rbe;
+ u16 elements_to_write;
+ unsigned long flags;
+
+// printk("In send_frame_to_fpga_start.\n");
+
+ /* Check if there is data to write to the FPGA, if so send it */
+ /* Otherwise, do nothing. Process is restarted by calls to write */
+
+ spin_lock_irqsave(&tx_rb_read_lock, flags);
+ rbi = &(*tx_rb.rbi)[tx_rb_read];
+ rbe = &(*tx_rb.rbe)[tx_rb_read];
+ spin_unlock_irqrestore(&tx_rb_read_lock, flags);
+
+ if ((rbi->flags & RB_USER) && !tx_dma_active && (gpio_get_value(TX_SPACE_AVAILABLE_GPIO)) && !shutting_down) {
+// printk("In send_frame_to_fpga_start, past if.\n");
+ tx_dma_active = 1;
+
+ elements_to_write = ((rbi->len) >> 1);
+
+ writew(elements_to_write, p->ctl_addr + UE_REG_MISC_TX_LEN);
+
+ rbi->flags = RB_DMA_ACTIVE;
+
+// writew(1, p->ctl_addr + 54);
+ omap_set_dma_src_addr_size(tx_dma->ch, rbe->dma_addr,
+ elements_to_write);
+
+// writew(2, p->ctl_addr + 54);
+// dma_sync_single_for_device(NULL, rbe->dma_addr, SZ_2K, DMA_TO_DEVICE);
+ dsb();
+
+// writew(3, p->ctl_addr + 54);
+ omap_start_dma(tx_dma->ch);
+ }
+ return 0;
+}
+
+static int send_frame_to_fpga_finish()
+{
+ unsigned long flags;
+
+// dma_sync_single_for_cpu(NULL, (*tx_rb.rbe)[tx_rb_read].dma_addr, SZ_2K, DMA_TO_DEVICE);
+
+ spin_lock_irqsave(&tx_rb_read_lock, flags);
+ (*tx_rb.rbi)[tx_rb_read].flags = RB_KERNEL;
+
+ tx_rb_read++;
+ if (tx_rb_read == rb_size.num_tx_frames)
+ tx_rb_read = 0;
+ spin_unlock_irqrestore(&tx_rb_read_lock, flags);
+
+ wake_up_interruptible(&tx_rb_space_available);
+
+ tx_dma_active = 0;
+
+ send_frame_to_fpga_start();
+
+ return 0;
+}
+
+static int alloc_ring_buffer(struct ring_buffer *rb,
+ unsigned int num_bufs, enum dma_data_direction direction)
+{
+ int i;
+
+ rb->rbi = (void *) __get_free_page(GFP_KERNEL | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN);
+
+ rb->rbe = kzalloc(sizeof(struct ring_buffer_entry) * num_bufs, GFP_KERNEL);
+ if (!rb) {
+ printk(KERN_ERR "Failed to allocate memory for rb entries\n");
+ return -ENOMEM;
+ }
+
+ rb->num_pages = (num_bufs & 1) ? ((num_bufs + 1) / 2) : (num_bufs / 2);
+
+ rb->pages = kzalloc(sizeof(unsigned long) * rb->num_pages, GFP_KERNEL);
+ if (!(rb->pages)) {
+ printk(KERN_ERR "Failed to allocate memory for rb page entries\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < rb->num_pages; i++) {
+ (*rb->pages)[i] = __get_free_page(GFP_KERNEL | __GFP_DMA | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN);
+
+ (*(rb->rbe))[i*2].frame_addr =
+ (void *) (*(rb->pages))[i];
+ (*(rb->rbe))[i*2 + 1].frame_addr =
+ (void *) ((*(rb->pages))[i] + SZ_2K);
+ if (!(*(rb->rbe))[i*2].frame_addr || !(*(rb->rbe))[i*2 + 1].frame_addr) {
+ printk(KERN_ERR "Failed to allocate memory dma buf\n");
+ return -ENOMEM;
+ }
+
+ (*(rb->rbe))[i*2].dma_addr = dma_map_single(NULL, (*(rb->rbe))[i*2].frame_addr, SZ_2K, direction);
+ (*(rb->rbe))[i*2 + 1].dma_addr = dma_map_single(NULL, (*(rb->rbe))[i*2 + 1].frame_addr, SZ_2K, direction);
+ if (!(*(rb->rbe))[i*2].dma_addr || !(*(rb->rbe))[i*2 + 1].dma_addr) {
+ printk(KERN_ERR "Failed to get physical address for dma buf\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void delete_ring_buffer(struct ring_buffer *rb,
+ unsigned int num_bufs, enum dma_data_direction direction)
+{
+ unsigned int i;
+ unsigned int num_pages;
+
+ printk(KERN_DEBUG "Entering delete_ring_buffer\n");
+
+ num_pages = (num_bufs & 1) ? ((num_bufs + 1) / 2) : (num_bufs / 2);
+
+ for (i = 0; i < num_pages; i++) {
+ dma_unmap_single(NULL, (*rb->rbe)[i*2].dma_addr, SZ_2K, direction);
+ dma_unmap_single(NULL, (*rb->rbe)[i*2 + 1].dma_addr, SZ_2K, direction);
+ free_page((*rb->pages)[i]);
+ }
+
+ free_page((unsigned long) rb->rbi);
+
+ kfree(rb->pages);
+ kfree(rb->rbe);
+
+ printk(KERN_DEBUG "Leaving delete_ring_buffer\n");
+}
+
+static int alloc_ring_buffers()
+{
+
+ if (alloc_ring_buffer(&tx_rb, rb_size.num_rx_frames, DMA_TO_DEVICE) < 0)
+ return -ENOMEM;
+ if (alloc_ring_buffer(&rx_rb, rb_size.num_tx_frames, DMA_FROM_DEVICE) < 0)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void init_ring_buffer(struct ring_buffer *rb, int num_bufs,
+ int initial_flags, enum dma_data_direction direction)
+{
+ int i;
+
+ for (i = 0; i < num_bufs; i++) {
+ dma_sync_single_for_device(NULL, (*rb->rbe)[i].dma_addr,
+ SZ_2K, direction);
+ dma_sync_single_for_cpu(NULL, (*rb->rbe)[i].dma_addr,
+ SZ_2K, direction);
+ (*rb->rbi)[i].flags = initial_flags;
+ }
+
+}
+
View
90 include/linux/usrp_e.h
@@ -0,0 +1,90 @@
+
+/*
+ * Copyright (C) 2010 Ettus Research, LLC
+ *
+ * Written by Philip Balister <philip@opensdr.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __USRP_E_H
+#define __USRP_E_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+struct usrp_e_ctl16 {
+ __u32 offset;
+ __u32 count;
+ __u16 buf[20];
+};
+
+struct usrp_e_ctl32 {
+ __u32 offset;
+ __u32 count;
+ __u32 buf[10];
+};
+
+/* SPI interface */
+
+#define UE_SPI_TXONLY 0
+#define UE_SPI_TXRX 1
+
+/* Defines for spi ctrl register */
+#define UE_SPI_CTRL_TXNEG (BIT(10))
+#define UE_SPI_CTRL_RXNEG (BIT(9))
+
+#define UE_SPI_PUSH_RISE 0
+#define UE_SPI_PUSH_FALL UE_SPI_CTRL_TXNEG
+#define UE_SPI_LATCH_RISE 0
+#define UE_SPI_LATCH_FALL UE_SPI_CTRL_RXNEG
+
+struct usrp_e_spi {
+ __u8 readback;
+ __u32 slave;
+ __u32 data;
+ __u32 length;
+ __u32 flags;
+};
+
+struct usrp_e_i2c {
+ __u8 addr;
+ __u32 len;
+ __u8 data[];
+};
+
+#define USRP_E_IOC_MAGIC 'u'
+#define USRP_E_WRITE_CTL16 _IOW(USRP_E_IOC_MAGIC, 0x20, struct usrp_e_ctl16)
+#define USRP_E_READ_CTL16 _IOWR(USRP_E_IOC_MAGIC, 0x21, struct usrp_e_ctl16)
+#define USRP_E_WRITE_CTL32 _IOW(USRP_E_IOC_MAGIC, 0x22, struct usrp_e_ctl32)
+#define USRP_E_READ_CTL32 _IOWR(USRP_E_IOC_MAGIC, 0x23, struct usrp_e_ctl32)
+#define USRP_E_SPI _IOWR(USRP_E_IOC_MAGIC, 0x24, struct usrp_e_spi)
+#define USRP_E_I2C_READ _IOWR(USRP_E_IOC_MAGIC, 0x25, struct usrp_e_i2c)
+#define USRP_E_I2C_WRITE _IOW(USRP_E_IOC_MAGIC, 0x26, struct usrp_e_i2c)
+#define USRP_E_GET_RB_INFO _IOR(USRP_E_IOC_MAGIC, 0x27, struct usrp_e_ring_buffer_size_t)
+#define USRP_E_GET_COMPAT_NUMBER _IO(USRP_E_IOC_MAGIC, 0x28)
+
+#define USRP_E_COMPAT_NUMBER 1
+
+/* Flag defines */
+#define RB_USER (BIT(0))
+#define RB_KERNEL (BIT(1))
+#define RB_OVERRUN (BIT(2))
+#define RB_DMA_ACTIVE (BIT(3))
+
+struct ring_buffer_info {
+ int flags;
+ int len;
+};
+
+struct usrp_e_ring_buffer_size_t {
+ int num_pages_rx_flags;
+ int num_rx_frames;
+ int num_pages_tx_flags;
+ int num_tx_frames;
+};
+
+#endif

0 comments on commit c3396d4

Please sign in to comment.