track authenticity, integrity, provenance and digital edits of images, audio and video from
capture to sharing to ongoing use.
See: https://lab.witness.org/ticks-or-it-didnt-happen/
- Right to privacy
- Freedom of surveillance
1090 static struct dma_buf_ops dma_buf_ops = { 1091 .map_dma_buf = ion_map_dma_buf,
878 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, 879 enum dma_data_direction direction)
75 /** 76 * ion_client_create() - allocate a client and returns it 77 * @dev: the global ion device 78 * @name: used for debugging 79 */ 80 struct ion_client *ion_client_create(struct ion_device *dev, 81 const char *name); 82 83 /** 84 * ion_client_destroy() - free's a client and all it's handles 85 * @client: the client 86 * 87 * Free the provided client and all it's resources including 88 * any handles it is holding. 89 */ 90 void ion_client_destroy(struct ion_client *client); 91 92 /** 93 * ion_alloc - allocate ion memory 94 * @client: the client 95 * @len: size of the allocation 96 * @align: requested allocation alignment, lots of hardware blocks 97 * have alignment requirements of some kind 98 * @heap_id_mask: mask of heaps to allocate from, if multiple bits are set 99 * heaps will be tried in order from highest to lowest 100 * id 101 * @flags: heap flags, the low 16 bits are consumed by ion, the 102 * high 16 bits are passed on to the respective heap and 103 * can be heap custom 104 * 105 * Allocate memory in one of the heaps provided in heap mask and return 106 * an opaque handle to it. 107 */ 108 struct ion_handle *ion_alloc(struct ion_client *client, size_t len, 109 size_t align, unsigned int heap_id_mask, 110 unsigned int flags); 111 112 /** 113 * ion_free - free a handle 114 * @client: the client 115 * @handle: the handle to free 116 * 117 * Free the provided handle. 118 */ 119 void ion_free(struct ion_client *client, struct ion_handle *handle); 120 121 /** 122 * ion_map_kernel - create mapping for the given handle 123 * @client: the client 124 * @handle: handle to map 125 * 126 * Map the given handle into the kernel and return a kernel address that 127 * can be used to access this address. 128 */ 129 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle); 130 131 /** 132 * ion_unmap_kernel() - destroy a kernel mapping for a handle 133 * @client: the client 134 * @handle: handle to unmap 135 */ 136 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle); 137 138 /** 139 * ion_share_dma_buf() - share buffer as dma-buf 140 * @client: the client 141 * @handle: the handle 142 */ 143 struct dma_buf *ion_share_dma_buf(struct ion_client *client, 144 struct ion_handle *handle); 145 146 /** 147 * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd 148 * @client: the client 149 * @handle: the handle 150 */ 151 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle); 152 153 /** 154 * ion_import_dma_buf() - get ion_handle from dma-buf 155 * @client: the client 156 * @dmabuf: the dma-buf 157 * 158 * Get the ion_buffer associated with the dma-buf and return the ion_handle. 159 * If no ion_handle exists for this buffer, return newly created ion_handle. 160 * If dma-buf from another exporter is passed, return ERR_PTR(-EINVAL) 161 */ 162 struct ion_handle *ion_import_dma_buf(struct ion_client *client, 163 struct dma_buf *dmabuf); 164 165 /** 166 * ion_import_dma_buf_fd() - given a dma-buf fd from the ion exporter get handle 167 * @client: the client 168 * @fd: the dma-buf fd 169 * 170 * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf_fd, 171 * import that fd and return a handle representing it. If a dma-buf from 172 * another exporter is passed in this function will return ERR_PTR(-EINVAL) 173 */ 174 struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd); 175 176 #endif /* _LINUX_ION_H */
/* iommu fault flags */
#define IOMMU_FAULT_READ 0x0
#define IOMMU_FAULT_WRITE 0x1
typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
struct device *, unsigned long, int, void *);
struct iommu_domain_geometry {
dma_addr_t aperture_start; /* First address that can be mapped */
dma_addr_t aperture_end; /* Last address that can be mapped */
bool force_aperture; /* DMA only allowed in mappable range? */
};
/* Domain feature flags */
#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
implementation */
#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
/*
* This are the possible domain-types
*
* IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
* devices
* IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
* IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
* for VMs
* IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
* This flag allows IOMMU drivers to implement
* certain optimizations for these domains
*/
#define IOMMU_DOMAIN_BLOCKED (0U)
#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
__IOMMU_DOMAIN_DMA_API)
struct iommu_domain {
unsigned type;
const struct iommu_ops *ops;
iommu_fault_handler_t handler;
void *handler_token;
struct iommu_domain_geometry geometry;
void *iova_cookie;
};
where IOMMU_DOMAIN_DMA has been integrated by the following:
IOMMU_DOMAIN_DMA
Defined as a preprocessor macro in: Referenced (in 8 files total) in:
- drivers/iommu/mtk_iommu.c, line 256
- drivers/iommu/arm-smmu.c:
- drivers/iommu/arm-smmu-v3.c:
- drivers/iommu/iommu.c:
- drivers/iommu/exynos-iommu.c:
- drivers/iommu/rockchip-iommu.c:
- drivers/iommu/amd_iommu.c:
- include/linux/iommu.h, line 76
/**
* struct tee_shm - shared memory object
* @teedev: device used to allocate the object
* @ctx: context using the object, if NULL the context is gone
* @link link element to traverse list of shm objects in a ctx
* @paddr: physical address of the shared memory
* @kaddr: virtual address of the shared memory
* @size: size of shared memory
* @dmabuf: dmabuf used to for exporting to user space
* @flags: defined by TEE_SHM_* in tee_drv.h
* @id: unique id of a shared memory object on this device
*/
struct tee_shm {
struct tee_device *teedev;
struct tee_context *ctx;
struct list_head link;
phys_addr_t paddr;
void *kaddr;
size_t size;
struct dma_buf *dmabuf;
u32 flags;
int id;
};
/**
* struct tee_shm_pool_mem_info - holds information needed to create a shared
* memory pool
* @vaddr: Virtual address of start of pool
* @paddr: Physical address of start of pool
* @size: Size in bytes of the pool
*/
struct tee_shm_pool_mem_info {
unsigned long vaddr;
phys_addr_t paddr;
size_t size;
};
/**
* tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
* memory range
* @dev: Device allocating the pool
* @priv_info: Information for driver private shared memory pool
* @dmabuf_info: Information for dma-buf shared memory pool
*
* Start and end of pools will must be page aligned.
*
* Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
* in @dmabuf, others will use the range provided by @priv.
*
* @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
*/
struct tee_shm_pool *
tee_shm_pool_alloc_res_mem(struct device *dev,
struct tee_shm_pool_mem_info *priv_info,
struct tee_shm_pool_mem_info *dmabuf_info);
/**
* tee_shm_pool_free() - Free a shared memory pool
* @pool: The shared memory pool to free
*
* There must be no remaining shared memory allocated from this pool when
* this function is called.
*/
void tee_shm_pool_free(struct tee_shm_pool *pool);
/**
* tee_shm_alloc() - Allocate shared memory
* @ctx: Context that allocates the shared memory
* @size: Requested size of shared memory
* @flags: Flags setting properties for the requested shared memory.
*
* Memory allocated as global shared memory is automatically freed when the
* TEE file pointer is closed. The @flags field uses the bits defined by
* TEE_SHM_* above. TEE_SHM_MAPPED must currently always be set. If
* TEE_SHM_DMA_BUF global shared memory will be allocated and associated
* with a dma-buf handle, else driver private memory.
*
* @returns a pointer to 'struct tee_shm'
*/
struct tee_shm *
tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags);
/**
* tee_shm_free() - Free shared memory
* @shm: Handle to shared memory to free
*/
void tee_shm_free(struct tee_shm *shm);
/**
* tee_shm_put() - Decrease reference count on a shared memory handle
* @shm: Shared memory handle
*/
void tee_shm_put(struct tee_shm *shm);
/**
* tee_shm_va2pa() - Get physical address of a virtual address
* @shm: Shared memory handle
* @va: Virtual address to tranlsate
* @pa: Returned physical address
* @returns 0 on success and < 0 on failure
*/
int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa);
/**
* tee_shm_pa2va() - Get virtual address of a physical address
* @shm: Shared memory handle
* @pa: Physical address to tranlsate
* @va: Returned virtual address
* @returns 0 on success and < 0 on failure
*/
int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va);
/**
* tee_shm_get_va() - Get virtual address of a shared memory plus an offset
* @shm: Shared memory handle
* @offs: Offset from start of this shared memory
* @returns virtual address of the shared memory + offs if offs is within
* the bounds of this shared memory, else an ERR_PTR
*/
void *tee_shm_get_va(struct tee_shm *shm, size_t offs);
/**
* tee_shm_get_pa() - Get physical address of a shared memory plus an offset
* @shm: Shared memory handle
* @offs: Offset from start of this shared memory
* @pa: Physical address to return
* @returns 0 if offs is within the bounds of this shared memory, else an
* error code.
*/
int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa);
/**
* tee_shm_get_id() - Get id of a shared memory object
* @shm: Shared memory handle
* @returns id
*/
int tee_shm_get_id(struct tee_shm *shm);
/**
* tee_shm_get_from_id() - Find shared memory object and increase referece count
* @ctx: Context owning the shared memory
* @id: Id of shared memory object
* @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
*/
struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id);
struct tcp_options_received {
/* PAWS/RTTM data */
long ts_recent_stamp;/* Time we stored ts_recent (for aging) */
u32 ts_recent; /* Time stamp to echo next */
u32 rcv_tsval; /* Time stamp value */
u32 rcv_tsecr; /* Time stamp echo reply */
u16 saw_tstamp : 1, /* Saw TIMESTAMP on last packet */
tstamp_ok : 1, /* TIMESTAMP seen on SYN packet */
dsack : 1, /* D-SACK is scheduled */
wscale_ok : 1, /* Wscale seen on SYN packet */
sack_ok : 4, /* SACK seen on SYN packet */
snd_wscale : 4, /* Window scaling received from sender */
rcv_wscale : 4; /* Window scaling to send to receiver */
u8 num_sacks; /* Number of SACK blocks */
u16 user_mss; /* mss requested by user in ioctl */
u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
};
apt-get install virtualbox-guest-dkms virtualbox-guest-utils virtualbox-guest-x11
thomas@thomas-VirtualBox:~/projects/xen-unstable$ sudo apt-get install gcc-aarch64-linux-gnuThen I can compile:make xen XEN_TARGET_ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu-