Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef _LINUX_VIRTIO_RING_H
3 : #define _LINUX_VIRTIO_RING_H
4 :
5 : #include <asm/barrier.h>
6 : #include <linux/irqreturn.h>
7 : #include <uapi/linux/virtio_ring.h>
8 :
9 : /*
10 : * Barriers in virtio are tricky. Non-SMP virtio guests can't assume
11 : * they're not on an SMP host system, so they need to assume real
12 : * barriers. Non-SMP virtio hosts could skip the barriers, but does
13 : * anyone care?
14 : *
15 : * For virtio_pci on SMP, we don't need to order with respect to MMIO
16 : * accesses through relaxed memory I/O windows, so virt_mb() et al are
17 : * sufficient.
18 : *
19 : * For using virtio to talk to real devices (eg. other heterogeneous
20 : * CPUs) we do need real barriers. In theory, we could be using both
21 : * kinds of virtio, so it's a runtime decision, and the branch is
22 : * actually quite cheap.
23 : */
24 :
25 : static inline void virtio_mb(bool weak_barriers)
26 : {
27 0 : if (weak_barriers)
28 0 : virt_mb();
29 : else
30 0 : mb();
31 : }
32 :
33 : static inline void virtio_rmb(bool weak_barriers)
34 : {
35 0 : if (weak_barriers)
36 0 : virt_rmb();
37 : else
38 0 : dma_rmb();
39 : }
40 :
41 : static inline void virtio_wmb(bool weak_barriers)
42 : {
43 0 : if (weak_barriers)
44 0 : virt_wmb();
45 : else
46 0 : dma_wmb();
47 : }
48 :
49 : #define virtio_store_mb(weak_barriers, p, v) \
50 : do { \
51 : if (weak_barriers) { \
52 : virt_store_mb(*p, v); \
53 : } else { \
54 : WRITE_ONCE(*p, v); \
55 : mb(); \
56 : } \
57 : } while (0) \
58 :
59 : struct virtio_device;
60 : struct virtqueue;
61 :
62 : /*
63 : * Creates a virtqueue and allocates the descriptor ring. If
64 : * may_reduce_num is set, then this may allocate a smaller ring than
65 : * expected. The caller should query virtqueue_get_vring_size to learn
66 : * the actual size of the ring.
67 : */
68 : struct virtqueue *vring_create_virtqueue(unsigned int index,
69 : unsigned int num,
70 : unsigned int vring_align,
71 : struct virtio_device *vdev,
72 : bool weak_barriers,
73 : bool may_reduce_num,
74 : bool ctx,
75 : bool (*notify)(struct virtqueue *vq),
76 : void (*callback)(struct virtqueue *vq),
77 : const char *name);
78 :
79 : /*
80 : * Creates a virtqueue and allocates the descriptor ring with per
81 : * virtqueue DMA device.
82 : */
83 : struct virtqueue *vring_create_virtqueue_dma(unsigned int index,
84 : unsigned int num,
85 : unsigned int vring_align,
86 : struct virtio_device *vdev,
87 : bool weak_barriers,
88 : bool may_reduce_num,
89 : bool ctx,
90 : bool (*notify)(struct virtqueue *vq),
91 : void (*callback)(struct virtqueue *vq),
92 : const char *name,
93 : struct device *dma_dev);
94 :
95 : /*
96 : * Creates a virtqueue with a standard layout but a caller-allocated
97 : * ring.
98 : */
99 : struct virtqueue *vring_new_virtqueue(unsigned int index,
100 : unsigned int num,
101 : unsigned int vring_align,
102 : struct virtio_device *vdev,
103 : bool weak_barriers,
104 : bool ctx,
105 : void *pages,
106 : bool (*notify)(struct virtqueue *vq),
107 : void (*callback)(struct virtqueue *vq),
108 : const char *name);
109 :
110 : /*
111 : * Destroys a virtqueue. If created with vring_create_virtqueue, this
112 : * also frees the ring.
113 : */
114 : void vring_del_virtqueue(struct virtqueue *vq);
115 :
116 : /* Filter out transport-specific feature bits. */
117 : void vring_transport_features(struct virtio_device *vdev);
118 :
119 : irqreturn_t vring_interrupt(int irq, void *_vq);
120 : #endif /* _LINUX_VIRTIO_RING_H */
|