Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef _LINUX_VIRTIO_RING_H
3 : #define _LINUX_VIRTIO_RING_H
4 :
5 : #include <asm/barrier.h>
6 : #include <linux/irqreturn.h>
7 : #include <uapi/linux/virtio_ring.h>
8 :
9 : /*
10 : * Barriers in virtio are tricky. Non-SMP virtio guests can't assume
11 : * they're not on an SMP host system, so they need to assume real
12 : * barriers. Non-SMP virtio hosts could skip the barriers, but does
13 : * anyone care?
14 : *
15 : * For virtio_pci on SMP, we don't need to order with respect to MMIO
16 : * accesses through relaxed memory I/O windows, so virt_mb() et al are
17 : * sufficient.
18 : *
19 : * For using virtio to talk to real devices (eg. other heterogeneous
20 : * CPUs) we do need real barriers. In theory, we could be using both
21 : * kinds of virtio, so it's a runtime decision, and the branch is
22 : * actually quite cheap.
23 : */
24 :
25 : static inline void virtio_mb(bool weak_barriers)
26 : {
27 0 : if (weak_barriers)
28 0 : virt_mb();
29 : else
30 0 : mb();
31 : }
32 :
33 : static inline void virtio_rmb(bool weak_barriers)
34 : {
35 0 : if (weak_barriers)
36 0 : virt_rmb();
37 : else
38 0 : dma_rmb();
39 : }
40 :
41 : static inline void virtio_wmb(bool weak_barriers)
42 : {
43 0 : if (weak_barriers)
44 0 : virt_wmb();
45 : else
46 0 : dma_wmb();
47 : }
48 :
49 : #define virtio_store_mb(weak_barriers, p, v) \
50 : do { \
51 : if (weak_barriers) { \
52 : virt_store_mb(*p, v); \
53 : } else { \
54 : WRITE_ONCE(*p, v); \
55 : mb(); \
56 : } \
57 : } while (0) \
58 :
59 : struct virtio_device;
60 : struct virtqueue;
61 : struct device;
62 :
63 : /*
64 : * Creates a virtqueue and allocates the descriptor ring. If
65 : * may_reduce_num is set, then this may allocate a smaller ring than
66 : * expected. The caller should query virtqueue_get_vring_size to learn
67 : * the actual size of the ring.
68 : */
69 : struct virtqueue *vring_create_virtqueue(unsigned int index,
70 : unsigned int num,
71 : unsigned int vring_align,
72 : struct virtio_device *vdev,
73 : bool weak_barriers,
74 : bool may_reduce_num,
75 : bool ctx,
76 : bool (*notify)(struct virtqueue *vq),
77 : void (*callback)(struct virtqueue *vq),
78 : const char *name);
79 :
80 : /*
81 : * Creates a virtqueue and allocates the descriptor ring with per
82 : * virtqueue DMA device.
83 : */
84 : struct virtqueue *vring_create_virtqueue_dma(unsigned int index,
85 : unsigned int num,
86 : unsigned int vring_align,
87 : struct virtio_device *vdev,
88 : bool weak_barriers,
89 : bool may_reduce_num,
90 : bool ctx,
91 : bool (*notify)(struct virtqueue *vq),
92 : void (*callback)(struct virtqueue *vq),
93 : const char *name,
94 : struct device *dma_dev);
95 :
96 : /*
97 : * Creates a virtqueue with a standard layout but a caller-allocated
98 : * ring.
99 : */
100 : struct virtqueue *vring_new_virtqueue(unsigned int index,
101 : unsigned int num,
102 : unsigned int vring_align,
103 : struct virtio_device *vdev,
104 : bool weak_barriers,
105 : bool ctx,
106 : void *pages,
107 : bool (*notify)(struct virtqueue *vq),
108 : void (*callback)(struct virtqueue *vq),
109 : const char *name);
110 :
111 : /*
112 : * Destroys a virtqueue. If created with vring_create_virtqueue, this
113 : * also frees the ring.
114 : */
115 : void vring_del_virtqueue(struct virtqueue *vq);
116 :
117 : /* Filter out transport-specific feature bits. */
118 : void vring_transport_features(struct virtio_device *vdev);
119 :
120 : irqreturn_t vring_interrupt(int irq, void *_vq);
121 :
122 : u32 vring_notification_data(struct virtqueue *_vq);
123 : #endif /* _LINUX_VIRTIO_RING_H */
|