Wednesday, December 14, 2016

The public iommu and iommu_domain API interface in Linux: what is the concept of "dm: direct mapping"?

  1. iommu_domain
    /* iommu fault flags */
    #define IOMMU_FAULT_READ    0x0
    #define IOMMU_FAULT_WRITE   0x1

    typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
                struct device *, unsigned long, int, void *);

    struct iommu_domain_geometry {
        dma_addr_t aperture_start; /* First address that can be mapped    */
        dma_addr_t aperture_end;   /* Last address that can be mapped     */
        bool force_aperture;       /* DMA only allowed in mappable range? */
    };

    /* Domain feature flags */
    #define __IOMMU_DOMAIN_PAGING   (1U << 0)  /* Support for iommu_map/unmap */
    #define __IOMMU_DOMAIN_DMA_API  (1U << 1)  /* Domain for use in DMA-API
                              implementation              */
    #define __IOMMU_DOMAIN_PT   (1U << 2)  /* Domain is identity mapped   */

    /*
     * This are the possible domain-types
     *
     *  IOMMU_DOMAIN_BLOCKED    - All DMA is blocked, can be used to isolate
     *                devices
     *  IOMMU_DOMAIN_IDENTITY   - DMA addresses are system physical addresses
     *  IOMMU_DOMAIN_UNMANAGED  - DMA mappings managed by IOMMU-API user, used
     *                for VMs
     *  IOMMU_DOMAIN_DMA    - Internally used for DMA-API implementations.
     *                This flag allows IOMMU drivers to implement
     *                certain optimizations for these domains
     */
    #define IOMMU_DOMAIN_BLOCKED    (0U)
    #define IOMMU_DOMAIN_IDENTITY   (__IOMMU_DOMAIN_PT)
    #define IOMMU_DOMAIN_UNMANAGED  (__IOMMU_DOMAIN_PAGING)
    #define IOMMU_DOMAIN_DMA    (__IOMMU_DOMAIN_PAGING |    \
                     __IOMMU_DOMAIN_DMA_API)

    struct iommu_domain {
        unsigned type;
        const struct iommu_ops *ops;
        iommu_fault_handler_t handler;
        void *handler_token;
        struct iommu_domain_geometry geometry;
        void *iova_cookie;
    };

                                                                                                    
     where IOMMU_DOMAIN_DMA has been integrated by the following:

    IOMMU_DOMAIN_DMA

    Defined as a preprocessor macro in: Referenced (in 8 files total) in:
  2. iommu operations:

/*
 * Following constraints are specifc to FSL_PAMUV1:
 *  -aperture must be power of 2, and naturally aligned
 *  -number of windows must be power of 2, and address space size
 *   of each window is determined by aperture size / # of windows
 *  -the actual size of the mapped region of a window must be power
 *   of 2 starting with 4KB and physical address must be naturally
 *   aligned.
 * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints.
 * The caller can invoke iommu_domain_get_attr to check if the underlying
 * iommu implementation supports these constraints.
 */

enum iommu_attr {
    DOMAIN_ATTR_GEOMETRY,
    DOMAIN_ATTR_PAGING,
    DOMAIN_ATTR_WINDOWS,
    DOMAIN_ATTR_FSL_PAMU_STASH,
    DOMAIN_ATTR_FSL_PAMU_ENABLE,
    DOMAIN_ATTR_FSL_PAMUV1,
    DOMAIN_ATTR_NESTING,    /* two stages of translation */
    DOMAIN_ATTR_MAX,
};


/**
 * struct iommu_dm_region - descriptor for a direct mapped memory region
 * @list: Linked list pointers
 * @start: System physical start address of the region
 * @length: Length of the region in bytes
 * @prot: IOMMU Protection flags (READ/WRITE/...)
 */
struct iommu_dm_region {
    struct list_head    list;
    phys_addr_t        start;
    size_t            length;
    int            prot;
};

/**
 * struct iommu_ops - iommu ops and capabilities
 * @capable: check capability
 * @domain_alloc: allocate iommu domain
 * @domain_free: free iommu domain
 * @attach_dev: attach device to an iommu domain
 * @detach_dev: detach device from an iommu domain
 * @map: map a physically contiguous memory region to an iommu domain
 * @unmap: unmap a physically contiguous memory region from an iommu domain
 * @map_sg: map a scatter-gather list of physically contiguous memory chunks
 * to an iommu domain
 * @iova_to_phys: translate iova to physical address
 * @add_device: add device to iommu grouping
 * @remove_device: remove device from iommu grouping
 * @device_group: find iommu group for a particular device
 * @domain_get_attr: Query domain attributes
 * @domain_set_attr: Change domain attributes
 * @get_dm_regions: Request list of direct mapping requirements for a device
 * @put_dm_regions: Free list of direct mapping requirements for a device
 * @domain_window_enable: Configure and enable a particular window for a domain
 * @domain_window_disable: Disable a particular window for a domain
 * @domain_set_windows: Set the number of windows for a domain
 * @domain_get_windows: Return the number of windows for a domain
 * @of_xlate: add OF master IDs to iommu grouping
 * @pgsize_bitmap: bitmap of supported page sizes
 * @priv: per-instance data private to the iommu driver
 */

struct iommu_ops {
    bool (*capable)(enum iommu_cap);

    /* Domain allocation and freeing by the iommu driver */
    struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
    void (*domain_free)(struct iommu_domain *);

    int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
    void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
    int (*map)(struct iommu_domain *domain, unsigned long iova,
           phys_addr_t paddr, size_t size, int prot);
    size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
             size_t size);
    size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
             struct scatterlist *sg, unsigned int nents, int prot);
    phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
    int (*add_device)(struct device *dev);
    void (*remove_device)(struct device *dev);
    struct iommu_group *(*device_group)(struct device *dev);
    int (*domain_get_attr)(struct iommu_domain *domain,
                   enum iommu_attr attr, void *data);
    int (*domain_set_attr)(struct iommu_domain *domain,
                   enum iommu_attr attr, void *data);

    /* Request/Free a list of direct mapping requirements for a device */
    void (*get_dm_regions)(struct device *dev, struct list_head *list);
    void (*put_dm_regions)(struct device *dev, struct list_head *list);

    /* Window handling functions */
    int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
                    phys_addr_t paddr, u64 size, int prot);
    void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
    /* Set the number of windows per domain */
    int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count);
    /* Get the number of windows per domain */
    u32 (*domain_get_windows)(struct iommu_domain *domain);

#ifdef CONFIG_OF_IOMMU
    int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
#endif

    unsigned long pgsize_bitmap;
    void *priv;
};

Gen TEE Driver SHM suballocator APIs

from linux/tee_drv.h (in optee branch until Gen Tee Driver is in upstream):

  1. Jens has done a good job to encapsulate "tee_shm" class.
  2. The public header is the linux/tee_drv.h, but the private interface is in drivers/tee/tee_private.h:
    /**
     * struct tee_shm - shared memory object
     * @teedev: device used to allocate the object
     * @ctx:    context using the object, if NULL the context is gone
     * @link    link element to traverse list of shm objects in a ctx
     * @paddr:  physical address of the shared memory
     * @kaddr:  virtual address of the shared memory
     * @size:   size of shared memory
     * @dmabuf: dmabuf used to for exporting to user space
     * @flags:  defined by TEE_SHM_* in tee_drv.h
     * @id:     unique id of a shared memory object on this device
     */

    struct tee_shm {
        struct tee_device *teedev;
        struct tee_context *ctx;
        struct list_head link;
        phys_addr_t paddr;
        void *kaddr;
        size_t size;
        struct dma_buf *dmabuf;
        u32 flags;
        int id;
    };
  3. Public API is defined as follows:
 /**
 * struct tee_shm_pool_mem_info - holds information needed to create a shared
 * memory pool
 * @vaddr:      Virtual address of start of pool
 * @paddr:      Physical address of start of pool
 * @size:       Size in bytes of the pool
 */

struct tee_shm_pool_mem_info {
        unsigned long vaddr;
        phys_addr_t paddr;
        size_t size;
};

/**
 * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
 * memory range
 * @dev:         Device allocating the pool
 * @priv_info:   Information for driver private shared memory pool
 * @dmabuf_info: Information for dma-buf shared memory pool
 *
 * Start and end of pools will must be page aligned.
 *
 * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
 * in @dmabuf, others will use the range provided by @priv.
 *
 * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
 */

struct tee_shm_pool *
tee_shm_pool_alloc_res_mem(struct device *dev,
                           struct tee_shm_pool_mem_info *priv_info,
                           struct tee_shm_pool_mem_info *dmabuf_info);


 /**
 * tee_shm_pool_free() - Free a shared memory pool
 * @pool:       The shared memory pool to free
 *
 * There must be no remaining shared memory allocated from this pool when
 * this function is called.
 */

void tee_shm_pool_free(struct tee_shm_pool *pool);
 /**
 * tee_shm_alloc() - Allocate shared memory
 * @ctx:        Context that allocates the shared memory
 * @size:       Requested size of shared memory
 * @flags:      Flags setting properties for the requested shared memory.
 *
 * Memory allocated as global shared memory is automatically freed when the
 * TEE file pointer is closed. The @flags field uses the bits defined by
 * TEE_SHM_* above. TEE_SHM_MAPPED must currently always be set. If
 * TEE_SHM_DMA_BUF global shared memory will be allocated and associated
 * with a dma-buf handle, else driver private memory.
 *
 * @returns a pointer to 'struct tee_shm'
 */

struct tee_shm *
      tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags);

/**
 * tee_shm_free() - Free shared memory
 * @shm:        Handle to shared memory to free
 */

void tee_shm_free(struct tee_shm *shm);

/**
 * tee_shm_put() - Decrease reference count on a shared memory handle
 * @shm:        Shared memory handle
 */

void tee_shm_put(struct tee_shm *shm);

/**
 * tee_shm_va2pa() - Get physical address of a virtual address
 * @shm:        Shared memory handle
 * @va:         Virtual address to tranlsate
 * @pa:         Returned physical address
 * @returns 0 on success and < 0 on failure
 */

int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa);

/**
 * tee_shm_pa2va() - Get virtual address of a physical address
 * @shm:        Shared memory handle
 * @pa:         Physical address to tranlsate
 * @va:         Returned virtual address
 * @returns 0 on success and < 0 on failure
 */

int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va);

/**
 * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
 * @shm:        Shared memory handle
 * @offs:       Offset from start of this shared memory
 * @returns virtual address of the shared memory + offs if offs is within
 *      the bounds of this shared memory, else an ERR_PTR
 */

void *tee_shm_get_va(struct tee_shm *shm, size_t offs);

/**
 * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
 * @shm:        Shared memory handle
 * @offs:       Offset from start of this shared memory
 * @pa:         Physical address to return
 * @returns 0 if offs is within the bounds of this shared memory, else an
 *      error code.
 */

int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa);

/**
 * tee_shm_get_id() - Get id of a shared memory object
 * @shm:        Shared memory handle
 * @returns id
 */

int tee_shm_get_id(struct tee_shm *shm);

/**
 * tee_shm_get_from_id() - Find shared memory object and increase referece count
 * @ctx:        Context owning the shared memory
 * @id:         Id of shared memory object
 * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
 */

struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id);

Bit Fields are Evil in Linux Kernel ... except linux/tcp.h has them!

but here is an example:

struct tcp_options_received {
/*  PAWS/RTTM data  */
    long    ts_recent_stamp;/* Time we stored ts_recent (for aging) */
    u32 ts_recent;  /* Time stamp to echo next      */
    u32 rcv_tsval;  /* Time stamp value                 */
    u32 rcv_tsecr;  /* Time stamp echo reply            */
    u16     saw_tstamp : 1, /* Saw TIMESTAMP on last packet     */
        tstamp_ok : 1,  /* TIMESTAMP seen on SYN packet     */
        dsack : 1,  /* D-SACK is scheduled          */
        wscale_ok : 1,  /* Wscale seen on SYN packet        */
        sack_ok : 4,    /* SACK seen on SYN packet      */
        snd_wscale : 4, /* Window scaling received from sender  */
        rcv_wscale : 4; /* Window scaling to send to receiver   */
    u8  num_sacks;  /* Number of SACK blocks        */
    u16 user_mss;   /* mss requested by user in ioctl   */
    u16 mss_clamp;  /* Maximal mss, negotiated at connection setup */
};


Monday, July 14, 2014

virtualbox and ubuntu linaro toolchain to build xen-unstable smmu-v9

I finally got virtualbox configured the way I liked.

I had to log in to the VM to do this:
apt-get install virtualbox-guest-dkms virtualbox-guest-utils virtualbox-guest-x11

After install, I rebooted the Ubuntu 14.04 guest and the screen resolution works fine.

To build xen-unstable in Julien's xenbits tree, I did the following:

0. pull the smmu-v9 branch like so:
git clone git://xenbits.xen.org/people/julieng/xen-unstable.git
git pull smmu-v9



1. install tool chain
thomas@thomas-VirtualBox:~/projects/xen-unstable$ sudo apt-get install gcc-aarch64-linux-gnu
Then I can compile:
make xen XEN_TARGET_ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu-

Wednesday, May 11, 2011

LET overview link found While Attending a class on LTE Call Processing

http://www.motorola.com/web/Business/Solutions/Industry%20Solutions/Service%20Providers/Wireless%20Operators/LTE/_Document/Static%20Files/6834_MotDoc_New.pdf

MME: Mobility Management Entity the control plane
NAS: Non-Access Stratum

What is GFx

GFX stands for Graphic Effect(s)

Thursday, May 5, 2011