LCOV - code coverage report
Current view: top level - arch/um/drivers - virtio_uml.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 9 538 1.7 %
Date: 2023-08-24 13:40:31 Functions: 3 55 5.5 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-or-later
       2             : /*
       3             :  * Virtio vhost-user driver
       4             :  *
       5             :  * Copyright(c) 2019 Intel Corporation
       6             :  *
       7             :  * This driver allows virtio devices to be used over a vhost-user socket.
       8             :  *
       9             :  * Guest devices can be instantiated by kernel module or command line
      10             :  * parameters. One device will be created for each parameter. Syntax:
      11             :  *
      12             :  *              virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]
      13             :  * where:
      14             :  *              <socket>  := vhost-user socket path to connect
      15             :  *              <virtio_id>       := virtio device id (as in virtio_ids.h)
      16             :  *              <platform_id>     := (optional) platform device id
      17             :  *
      18             :  * example:
      19             :  *              virtio_uml.device=/var/uml.socket:1
      20             :  *
      21             :  * Based on Virtio MMIO driver by Pawel Moll, copyright 2011-2014, ARM Ltd.
      22             :  */
      23             : #include <linux/module.h>
      24             : #include <linux/of.h>
      25             : #include <linux/platform_device.h>
      26             : #include <linux/slab.h>
      27             : #include <linux/virtio.h>
      28             : #include <linux/virtio_config.h>
      29             : #include <linux/virtio_ring.h>
      30             : #include <linux/time-internal.h>
      31             : #include <linux/virtio-uml.h>
      32             : #include <shared/as-layout.h>
      33             : #include <irq_kern.h>
      34             : #include <init.h>
      35             : #include <os.h>
      36             : #include "vhost_user.h"
      37             : 
      38             : #define MAX_SUPPORTED_QUEUE_SIZE        256
      39             : 
      40             : #define to_virtio_uml_device(_vdev) \
      41             :         container_of(_vdev, struct virtio_uml_device, vdev)
      42             : 
      43             : struct virtio_uml_platform_data {
      44             :         u32 virtio_device_id;
      45             :         const char *socket_path;
      46             :         struct work_struct conn_broken_wk;
      47             :         struct platform_device *pdev;
      48             : };
      49             : 
      50             : struct virtio_uml_device {
      51             :         struct virtio_device vdev;
      52             :         struct platform_device *pdev;
      53             :         struct virtio_uml_platform_data *pdata;
      54             : 
      55             :         spinlock_t sock_lock;
      56             :         int sock, req_fd, irq;
      57             :         u64 features;
      58             :         u64 protocol_features;
      59             :         u8 status;
      60             :         u8 registered:1;
      61             :         u8 suspended:1;
      62             :         u8 no_vq_suspend:1;
      63             : 
      64             :         u8 config_changed_irq:1;
      65             :         uint64_t vq_irq_vq_map;
      66             :         int recv_rc;
      67             : };
      68             : 
      69             : struct virtio_uml_vq_info {
      70             :         int kick_fd, call_fd;
      71             :         char name[32];
      72             :         bool suspended;
      73             : };
      74             : 
      75             : extern unsigned long long physmem_size, highmem;
      76             : 
      77             : #define vu_err(vu_dev, ...)     dev_err(&(vu_dev)->pdev->dev, ##__VA_ARGS__)
      78             : 
      79             : /* Vhost-user protocol */
      80             : 
      81           0 : static int full_sendmsg_fds(int fd, const void *buf, unsigned int len,
      82             :                             const int *fds, unsigned int fds_num)
      83             : {
      84             :         int rc;
      85             : 
      86             :         do {
      87           0 :                 rc = os_sendmsg_fds(fd, buf, len, fds, fds_num);
      88           0 :                 if (rc > 0) {
      89           0 :                         buf += rc;
      90           0 :                         len -= rc;
      91           0 :                         fds = NULL;
      92           0 :                         fds_num = 0;
      93             :                 }
      94           0 :         } while (len && (rc >= 0 || rc == -EINTR));
      95             : 
      96           0 :         if (rc < 0)
      97             :                 return rc;
      98           0 :         return 0;
      99             : }
     100             : 
     101           0 : static int full_read(int fd, void *buf, int len, bool abortable)
     102             : {
     103             :         int rc;
     104             : 
     105           0 :         if (!len)
     106             :                 return 0;
     107             : 
     108             :         do {
     109           0 :                 rc = os_read_file(fd, buf, len);
     110           0 :                 if (rc > 0) {
     111           0 :                         buf += rc;
     112           0 :                         len -= rc;
     113             :                 }
     114           0 :         } while (len && (rc > 0 || rc == -EINTR || (!abortable && rc == -EAGAIN)));
     115             : 
     116           0 :         if (rc < 0)
     117             :                 return rc;
     118           0 :         if (rc == 0)
     119             :                 return -ECONNRESET;
     120           0 :         return 0;
     121             : }
     122             : 
     123             : static int vhost_user_recv_header(int fd, struct vhost_user_msg *msg)
     124             : {
     125           0 :         return full_read(fd, msg, sizeof(msg->header), true);
     126             : }
     127             : 
     128           0 : static int vhost_user_recv(struct virtio_uml_device *vu_dev,
     129             :                            int fd, struct vhost_user_msg *msg,
     130             :                            size_t max_payload_size, bool wait)
     131             : {
     132             :         size_t size;
     133             :         int rc;
     134             : 
     135             :         /*
     136             :          * In virtio time-travel mode, we're handling all the vhost-user
     137             :          * FDs by polling them whenever appropriate. However, we may get
     138             :          * into a situation where we're sending out an interrupt message
     139             :          * to a device (e.g. a net device) and need to handle a simulation
     140             :          * time message while doing so, e.g. one that tells us to update
     141             :          * our idea of how long we can run without scheduling.
     142             :          *
     143             :          * Thus, we need to not just read() from the given fd, but need
     144             :          * to also handle messages for the simulation time - this function
     145             :          * does that for us while waiting for the given fd to be readable.
     146             :          */
     147             :         if (wait)
     148             :                 time_travel_wait_readable(fd);
     149             : 
     150           0 :         rc = vhost_user_recv_header(fd, msg);
     151             : 
     152           0 :         if (rc)
     153             :                 return rc;
     154           0 :         size = msg->header.size;
     155           0 :         if (size > max_payload_size)
     156             :                 return -EPROTO;
     157           0 :         return full_read(fd, &msg->payload, size, false);
     158             : }
     159             : 
     160           0 : static void vhost_user_check_reset(struct virtio_uml_device *vu_dev,
     161             :                                    int rc)
     162             : {
     163           0 :         struct virtio_uml_platform_data *pdata = vu_dev->pdata;
     164             : 
     165           0 :         if (rc != -ECONNRESET)
     166             :                 return;
     167             : 
     168           0 :         if (!vu_dev->registered)
     169             :                 return;
     170             : 
     171           0 :         vu_dev->registered = 0;
     172             : 
     173           0 :         schedule_work(&pdata->conn_broken_wk);
     174             : }
     175             : 
     176           0 : static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
     177             :                                 struct vhost_user_msg *msg,
     178             :                                 size_t max_payload_size)
     179             : {
     180           0 :         int rc = vhost_user_recv(vu_dev, vu_dev->sock, msg,
     181             :                                  max_payload_size, true);
     182             : 
     183           0 :         if (rc) {
     184           0 :                 vhost_user_check_reset(vu_dev, rc);
     185           0 :                 return rc;
     186             :         }
     187             : 
     188           0 :         if (msg->header.flags != (VHOST_USER_FLAG_REPLY | VHOST_USER_VERSION))
     189             :                 return -EPROTO;
     190             : 
     191           0 :         return 0;
     192             : }
     193             : 
     194           0 : static int vhost_user_recv_u64(struct virtio_uml_device *vu_dev,
     195             :                                u64 *value)
     196             : {
     197             :         struct vhost_user_msg msg;
     198           0 :         int rc = vhost_user_recv_resp(vu_dev, &msg,
     199             :                                       sizeof(msg.payload.integer));
     200             : 
     201           0 :         if (rc)
     202             :                 return rc;
     203           0 :         if (msg.header.size != sizeof(msg.payload.integer))
     204             :                 return -EPROTO;
     205           0 :         *value = msg.payload.integer;
     206           0 :         return 0;
     207             : }
     208             : 
     209           0 : static int vhost_user_recv_req(struct virtio_uml_device *vu_dev,
     210             :                                struct vhost_user_msg *msg,
     211             :                                size_t max_payload_size)
     212             : {
     213           0 :         int rc = vhost_user_recv(vu_dev, vu_dev->req_fd, msg,
     214             :                                  max_payload_size, false);
     215             : 
     216           0 :         if (rc)
     217             :                 return rc;
     218             : 
     219           0 :         if ((msg->header.flags & ~VHOST_USER_FLAG_NEED_REPLY) !=
     220             :                         VHOST_USER_VERSION)
     221             :                 return -EPROTO;
     222             : 
     223           0 :         return 0;
     224             : }
     225             : 
     226           0 : static int vhost_user_send(struct virtio_uml_device *vu_dev,
     227             :                            bool need_response, struct vhost_user_msg *msg,
     228             :                            int *fds, size_t num_fds)
     229             : {
     230           0 :         size_t size = sizeof(msg->header) + msg->header.size;
     231             :         unsigned long flags;
     232             :         bool request_ack;
     233             :         int rc;
     234             : 
     235           0 :         msg->header.flags |= VHOST_USER_VERSION;
     236             : 
     237             :         /*
     238             :          * The need_response flag indicates that we already need a response,
     239             :          * e.g. to read the features. In these cases, don't request an ACK as
     240             :          * it is meaningless. Also request an ACK only if supported.
     241             :          */
     242           0 :         request_ack = !need_response;
     243           0 :         if (!(vu_dev->protocol_features &
     244             :                         BIT_ULL(VHOST_USER_PROTOCOL_F_REPLY_ACK)))
     245           0 :                 request_ack = false;
     246             : 
     247           0 :         if (request_ack)
     248           0 :                 msg->header.flags |= VHOST_USER_FLAG_NEED_REPLY;
     249             : 
     250           0 :         spin_lock_irqsave(&vu_dev->sock_lock, flags);
     251           0 :         rc = full_sendmsg_fds(vu_dev->sock, msg, size, fds, num_fds);
     252           0 :         if (rc < 0)
     253             :                 goto out;
     254             : 
     255           0 :         if (request_ack) {
     256             :                 uint64_t status;
     257             : 
     258           0 :                 rc = vhost_user_recv_u64(vu_dev, &status);
     259           0 :                 if (rc)
     260             :                         goto out;
     261             : 
     262           0 :                 if (status) {
     263           0 :                         vu_err(vu_dev, "slave reports error: %llu\n", status);
     264           0 :                         rc = -EIO;
     265           0 :                         goto out;
     266             :                 }
     267             :         }
     268             : 
     269             : out:
     270           0 :         spin_unlock_irqrestore(&vu_dev->sock_lock, flags);
     271           0 :         return rc;
     272             : }
     273             : 
     274             : static int vhost_user_send_no_payload(struct virtio_uml_device *vu_dev,
     275             :                                       bool need_response, u32 request)
     276             : {
     277           0 :         struct vhost_user_msg msg = {
     278             :                 .header.request = request,
     279             :         };
     280             : 
     281           0 :         return vhost_user_send(vu_dev, need_response, &msg, NULL, 0);
     282             : }
     283             : 
     284             : static int vhost_user_send_no_payload_fd(struct virtio_uml_device *vu_dev,
     285             :                                          u32 request, int fd)
     286             : {
     287           0 :         struct vhost_user_msg msg = {
     288             :                 .header.request = request,
     289             :         };
     290             : 
     291           0 :         return vhost_user_send(vu_dev, false, &msg, &fd, 1);
     292             : }
     293             : 
     294           0 : static int vhost_user_send_u64(struct virtio_uml_device *vu_dev,
     295             :                                u32 request, u64 value)
     296             : {
     297           0 :         struct vhost_user_msg msg = {
     298             :                 .header.request = request,
     299             :                 .header.size = sizeof(msg.payload.integer),
     300             :                 .payload.integer = value,
     301             :         };
     302             : 
     303           0 :         return vhost_user_send(vu_dev, false, &msg, NULL, 0);
     304             : }
     305             : 
     306           0 : static int vhost_user_set_owner(struct virtio_uml_device *vu_dev)
     307             : {
     308           0 :         return vhost_user_send_no_payload(vu_dev, false, VHOST_USER_SET_OWNER);
     309             : }
     310             : 
     311           0 : static int vhost_user_get_features(struct virtio_uml_device *vu_dev,
     312             :                                    u64 *features)
     313             : {
     314           0 :         int rc = vhost_user_send_no_payload(vu_dev, true,
     315             :                                             VHOST_USER_GET_FEATURES);
     316             : 
     317           0 :         if (rc)
     318             :                 return rc;
     319           0 :         return vhost_user_recv_u64(vu_dev, features);
     320             : }
     321             : 
     322             : static int vhost_user_set_features(struct virtio_uml_device *vu_dev,
     323             :                                    u64 features)
     324             : {
     325           0 :         return vhost_user_send_u64(vu_dev, VHOST_USER_SET_FEATURES, features);
     326             : }
     327             : 
     328           0 : static int vhost_user_get_protocol_features(struct virtio_uml_device *vu_dev,
     329             :                                             u64 *protocol_features)
     330             : {
     331           0 :         int rc = vhost_user_send_no_payload(vu_dev, true,
     332             :                         VHOST_USER_GET_PROTOCOL_FEATURES);
     333             : 
     334           0 :         if (rc)
     335             :                 return rc;
     336           0 :         return vhost_user_recv_u64(vu_dev, protocol_features);
     337             : }
     338             : 
     339             : static int vhost_user_set_protocol_features(struct virtio_uml_device *vu_dev,
     340             :                                             u64 protocol_features)
     341             : {
     342           0 :         return vhost_user_send_u64(vu_dev, VHOST_USER_SET_PROTOCOL_FEATURES,
     343             :                                    protocol_features);
     344             : }
     345             : 
     346           0 : static void vhost_user_reply(struct virtio_uml_device *vu_dev,
     347             :                              struct vhost_user_msg *msg, int response)
     348             : {
     349           0 :         struct vhost_user_msg reply = {
     350             :                 .payload.integer = response,
     351             :         };
     352           0 :         size_t size = sizeof(reply.header) + sizeof(reply.payload.integer);
     353             :         int rc;
     354             : 
     355           0 :         reply.header = msg->header;
     356           0 :         reply.header.flags &= ~VHOST_USER_FLAG_NEED_REPLY;
     357           0 :         reply.header.flags |= VHOST_USER_FLAG_REPLY;
     358           0 :         reply.header.size = sizeof(reply.payload.integer);
     359             : 
     360           0 :         rc = full_sendmsg_fds(vu_dev->req_fd, &reply, size, NULL, 0);
     361             : 
     362           0 :         if (rc)
     363           0 :                 vu_err(vu_dev,
     364             :                        "sending reply to slave request failed: %d (size %zu)\n",
     365             :                        rc, size);
     366           0 : }
     367             : 
     368           0 : static irqreturn_t vu_req_read_message(struct virtio_uml_device *vu_dev,
     369             :                                        struct time_travel_event *ev)
     370             : {
     371             :         struct virtqueue *vq;
     372           0 :         int response = 1;
     373             :         struct {
     374             :                 struct vhost_user_msg msg;
     375             :                 u8 extra_payload[512];
     376             :         } msg;
     377             :         int rc;
     378           0 :         irqreturn_t irq_rc = IRQ_NONE;
     379             : 
     380             :         while (1) {
     381           0 :                 rc = vhost_user_recv_req(vu_dev, &msg.msg,
     382             :                                          sizeof(msg.msg.payload) +
     383             :                                          sizeof(msg.extra_payload));
     384           0 :                 if (rc)
     385             :                         break;
     386             : 
     387           0 :                 switch (msg.msg.header.request) {
     388             :                 case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG:
     389           0 :                         vu_dev->config_changed_irq = true;
     390           0 :                         response = 0;
     391           0 :                         break;
     392             :                 case VHOST_USER_SLAVE_VRING_CALL:
     393           0 :                         virtio_device_for_each_vq((&vu_dev->vdev), vq) {
     394           0 :                                 if (vq->index == msg.msg.payload.vring_state.index) {
     395           0 :                                         response = 0;
     396           0 :                                         vu_dev->vq_irq_vq_map |= BIT_ULL(vq->index);
     397           0 :                                         break;
     398             :                                 }
     399             :                         }
     400             :                         break;
     401             :                 case VHOST_USER_SLAVE_IOTLB_MSG:
     402             :                         /* not supported - VIRTIO_F_ACCESS_PLATFORM */
     403             :                 case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
     404             :                         /* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */
     405             :                 default:
     406           0 :                         vu_err(vu_dev, "unexpected slave request %d\n",
     407             :                                msg.msg.header.request);
     408             :                 }
     409             : 
     410           0 :                 if (ev && !vu_dev->suspended)
     411           0 :                         time_travel_add_irq_event(ev);
     412             : 
     413           0 :                 if (msg.msg.header.flags & VHOST_USER_FLAG_NEED_REPLY)
     414           0 :                         vhost_user_reply(vu_dev, &msg.msg, response);
     415             :                 irq_rc = IRQ_HANDLED;
     416             :         }
     417             :         /* mask EAGAIN as we try non-blocking read until socket is empty */
     418           0 :         vu_dev->recv_rc = (rc == -EAGAIN) ? 0 : rc;
     419           0 :         return irq_rc;
     420             : }
     421             : 
     422           0 : static irqreturn_t vu_req_interrupt(int irq, void *data)
     423             : {
     424           0 :         struct virtio_uml_device *vu_dev = data;
     425           0 :         irqreturn_t ret = IRQ_HANDLED;
     426             : 
     427             :         if (!um_irq_timetravel_handler_used())
     428           0 :                 ret = vu_req_read_message(vu_dev, NULL);
     429             : 
     430           0 :         if (vu_dev->recv_rc) {
     431           0 :                 vhost_user_check_reset(vu_dev, vu_dev->recv_rc);
     432           0 :         } else if (vu_dev->vq_irq_vq_map) {
     433             :                 struct virtqueue *vq;
     434             : 
     435           0 :                 virtio_device_for_each_vq((&vu_dev->vdev), vq) {
     436           0 :                         if (vu_dev->vq_irq_vq_map & BIT_ULL(vq->index))
     437           0 :                                 vring_interrupt(0 /* ignored */, vq);
     438             :                 }
     439           0 :                 vu_dev->vq_irq_vq_map = 0;
     440           0 :         } else if (vu_dev->config_changed_irq) {
     441           0 :                 virtio_config_changed(&vu_dev->vdev);
     442           0 :                 vu_dev->config_changed_irq = false;
     443             :         }
     444             : 
     445           0 :         return ret;
     446             : }
     447             : 
     448             : static void vu_req_interrupt_comm_handler(int irq, int fd, void *data,
     449             :                                           struct time_travel_event *ev)
     450             : {
     451             :         vu_req_read_message(data, ev);
     452             : }
     453             : 
     454           0 : static int vhost_user_init_slave_req(struct virtio_uml_device *vu_dev)
     455             : {
     456             :         int rc, req_fds[2];
     457             : 
     458             :         /* Use a pipe for slave req fd, SIGIO is not supported for eventfd */
     459           0 :         rc = os_pipe(req_fds, true, true);
     460           0 :         if (rc < 0)
     461             :                 return rc;
     462           0 :         vu_dev->req_fd = req_fds[0];
     463             : 
     464           0 :         rc = um_request_irq_tt(UM_IRQ_ALLOC, vu_dev->req_fd, IRQ_READ,
     465             :                                vu_req_interrupt, IRQF_SHARED,
     466           0 :                                vu_dev->pdev->name, vu_dev,
     467             :                                vu_req_interrupt_comm_handler);
     468           0 :         if (rc < 0)
     469             :                 goto err_close;
     470             : 
     471           0 :         vu_dev->irq = rc;
     472             : 
     473           0 :         rc = vhost_user_send_no_payload_fd(vu_dev, VHOST_USER_SET_SLAVE_REQ_FD,
     474             :                                            req_fds[1]);
     475           0 :         if (rc)
     476             :                 goto err_free_irq;
     477             : 
     478             :         goto out;
     479             : 
     480             : err_free_irq:
     481           0 :         um_free_irq(vu_dev->irq, vu_dev);
     482             : err_close:
     483           0 :         os_close_file(req_fds[0]);
     484             : out:
     485             :         /* Close unused write end of request fds */
     486           0 :         os_close_file(req_fds[1]);
     487           0 :         return rc;
     488             : }
     489             : 
     490           0 : static int vhost_user_init(struct virtio_uml_device *vu_dev)
     491             : {
     492           0 :         int rc = vhost_user_set_owner(vu_dev);
     493             : 
     494           0 :         if (rc)
     495             :                 return rc;
     496           0 :         rc = vhost_user_get_features(vu_dev, &vu_dev->features);
     497           0 :         if (rc)
     498             :                 return rc;
     499             : 
     500           0 :         if (vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)) {
     501           0 :                 rc = vhost_user_get_protocol_features(vu_dev,
     502             :                                 &vu_dev->protocol_features);
     503           0 :                 if (rc)
     504             :                         return rc;
     505           0 :                 vu_dev->protocol_features &= VHOST_USER_SUPPORTED_PROTOCOL_F;
     506           0 :                 rc = vhost_user_set_protocol_features(vu_dev,
     507             :                                 vu_dev->protocol_features);
     508           0 :                 if (rc)
     509             :                         return rc;
     510             :         }
     511             : 
     512           0 :         if (vu_dev->protocol_features &
     513             :                         BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
     514           0 :                 rc = vhost_user_init_slave_req(vu_dev);
     515           0 :                 if (rc)
     516             :                         return rc;
     517             :         }
     518             : 
     519             :         return 0;
     520             : }
     521             : 
     522           0 : static void vhost_user_get_config(struct virtio_uml_device *vu_dev,
     523             :                                   u32 offset, void *buf, u32 len)
     524             : {
     525           0 :         u32 cfg_size = offset + len;
     526             :         struct vhost_user_msg *msg;
     527           0 :         size_t payload_size = sizeof(msg->payload.config) + cfg_size;
     528           0 :         size_t msg_size = sizeof(msg->header) + payload_size;
     529             :         int rc;
     530             : 
     531           0 :         if (!(vu_dev->protocol_features &
     532             :               BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
     533             :                 return;
     534             : 
     535           0 :         msg = kzalloc(msg_size, GFP_KERNEL);
     536           0 :         if (!msg)
     537             :                 return;
     538           0 :         msg->header.request = VHOST_USER_GET_CONFIG;
     539           0 :         msg->header.size = payload_size;
     540           0 :         msg->payload.config.offset = 0;
     541           0 :         msg->payload.config.size = cfg_size;
     542             : 
     543           0 :         rc = vhost_user_send(vu_dev, true, msg, NULL, 0);
     544           0 :         if (rc) {
     545           0 :                 vu_err(vu_dev, "sending VHOST_USER_GET_CONFIG failed: %d\n",
     546             :                        rc);
     547           0 :                 goto free;
     548             :         }
     549             : 
     550           0 :         rc = vhost_user_recv_resp(vu_dev, msg, msg_size);
     551           0 :         if (rc) {
     552           0 :                 vu_err(vu_dev,
     553             :                        "receiving VHOST_USER_GET_CONFIG response failed: %d\n",
     554             :                        rc);
     555           0 :                 goto free;
     556             :         }
     557             : 
     558           0 :         if (msg->header.size != payload_size ||
     559           0 :             msg->payload.config.size != cfg_size) {
     560           0 :                 rc = -EPROTO;
     561           0 :                 vu_err(vu_dev,
     562             :                        "Invalid VHOST_USER_GET_CONFIG sizes (payload %d expected %zu, config %u expected %u)\n",
     563             :                        msg->header.size, payload_size,
     564             :                        msg->payload.config.size, cfg_size);
     565           0 :                 goto free;
     566             :         }
     567           0 :         memcpy(buf, msg->payload.config.payload + offset, len);
     568             : 
     569             : free:
     570           0 :         kfree(msg);
     571             : }
     572             : 
     573           0 : static void vhost_user_set_config(struct virtio_uml_device *vu_dev,
     574             :                                   u32 offset, const void *buf, u32 len)
     575             : {
     576             :         struct vhost_user_msg *msg;
     577           0 :         size_t payload_size = sizeof(msg->payload.config) + len;
     578           0 :         size_t msg_size = sizeof(msg->header) + payload_size;
     579             :         int rc;
     580             : 
     581           0 :         if (!(vu_dev->protocol_features &
     582             :               BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
     583             :                 return;
     584             : 
     585           0 :         msg = kzalloc(msg_size, GFP_KERNEL);
     586           0 :         if (!msg)
     587             :                 return;
     588           0 :         msg->header.request = VHOST_USER_SET_CONFIG;
     589           0 :         msg->header.size = payload_size;
     590           0 :         msg->payload.config.offset = offset;
     591           0 :         msg->payload.config.size = len;
     592           0 :         memcpy(msg->payload.config.payload, buf, len);
     593             : 
     594           0 :         rc = vhost_user_send(vu_dev, false, msg, NULL, 0);
     595           0 :         if (rc)
     596           0 :                 vu_err(vu_dev, "sending VHOST_USER_SET_CONFIG failed: %d\n",
     597             :                        rc);
     598             : 
     599           0 :         kfree(msg);
     600             : }
     601             : 
     602           0 : static int vhost_user_init_mem_region(u64 addr, u64 size, int *fd_out,
     603             :                                       struct vhost_user_mem_region *region_out)
     604             : {
     605             :         unsigned long long mem_offset;
     606           0 :         int rc = phys_mapping(addr, &mem_offset);
     607             : 
     608           0 :         if (WARN(rc < 0, "phys_mapping of 0x%llx returned %d\n", addr, rc))
     609             :                 return -EFAULT;
     610           0 :         *fd_out = rc;
     611           0 :         region_out->guest_addr = addr;
     612           0 :         region_out->user_addr = addr;
     613           0 :         region_out->size = size;
     614           0 :         region_out->mmap_offset = mem_offset;
     615             : 
     616             :         /* Ensure mapping is valid for the entire region */
     617           0 :         rc = phys_mapping(addr + size - 1, &mem_offset);
     618           0 :         if (WARN(rc != *fd_out, "phys_mapping of 0x%llx failed: %d != %d\n",
     619             :                  addr + size - 1, rc, *fd_out))
     620             :                 return -EFAULT;
     621           0 :         return 0;
     622             : }
     623             : 
     624           0 : static int vhost_user_set_mem_table(struct virtio_uml_device *vu_dev)
     625             : {
     626           0 :         struct vhost_user_msg msg = {
     627             :                 .header.request = VHOST_USER_SET_MEM_TABLE,
     628             :                 .header.size = sizeof(msg.payload.mem_regions),
     629             :                 .payload.mem_regions.num = 1,
     630             :         };
     631           0 :         unsigned long reserved = uml_reserved - uml_physmem;
     632             :         int fds[2];
     633             :         int rc;
     634             : 
     635             :         /*
     636             :          * This is a bit tricky, see also the comment with setup_physmem().
     637             :          *
     638             :          * Essentially, setup_physmem() uses a file to mmap() our physmem,
     639             :          * but the code and data we *already* have is omitted. To us, this
     640             :          * is no difference, since they both become part of our address
     641             :          * space and memory consumption. To somebody looking in from the
     642             :          * outside, however, it is different because the part of our memory
     643             :          * consumption that's already part of the binary (code/data) is not
     644             :          * mapped from the file, so it's not visible to another mmap from
     645             :          * the file descriptor.
     646             :          *
     647             :          * Thus, don't advertise this space to the vhost-user slave. This
     648             :          * means that the slave will likely abort or similar when we give
     649             :          * it an address from the hidden range, since it's not marked as
     650             :          * a valid address, but at least that way we detect the issue and
     651             :          * don't just have the slave read an all-zeroes buffer from the
     652             :          * shared memory file, or write something there that we can never
     653             :          * see (depending on the direction of the virtqueue traffic.)
     654             :          *
     655             :          * Since we usually don't want to use .text for virtio buffers,
     656             :          * this effectively means that you cannot use
     657             :          *  1) global variables, which are in the .bss and not in the shm
     658             :          *     file-backed memory
     659             :          *  2) the stack in some processes, depending on where they have
     660             :          *     their stack (or maybe only no interrupt stack?)
     661             :          *
     662             :          * The stack is already not typically valid for DMA, so this isn't
     663             :          * much of a restriction, but global variables might be encountered.
     664             :          *
     665             :          * It might be possible to fix it by copying around the data that's
     666             :          * between bss_start and where we map the file now, but it's not
     667             :          * something that you typically encounter with virtio drivers, so
     668             :          * it didn't seem worthwhile.
     669             :          */
     670           0 :         rc = vhost_user_init_mem_region(reserved, physmem_size - reserved,
     671             :                                         &fds[0],
     672             :                                         &msg.payload.mem_regions.regions[0]);
     673             : 
     674           0 :         if (rc < 0)
     675             :                 return rc;
     676           0 :         if (highmem) {
     677           0 :                 msg.payload.mem_regions.num++;
     678           0 :                 rc = vhost_user_init_mem_region(__pa(end_iomem), highmem,
     679             :                                 &fds[1], &msg.payload.mem_regions.regions[1]);
     680           0 :                 if (rc < 0)
     681             :                         return rc;
     682             :         }
     683             : 
     684           0 :         return vhost_user_send(vu_dev, false, &msg, fds,
     685           0 :                                msg.payload.mem_regions.num);
     686             : }
     687             : 
     688           0 : static int vhost_user_set_vring_state(struct virtio_uml_device *vu_dev,
     689             :                                       u32 request, u32 index, u32 num)
     690             : {
     691           0 :         struct vhost_user_msg msg = {
     692             :                 .header.request = request,
     693             :                 .header.size = sizeof(msg.payload.vring_state),
     694             :                 .payload.vring_state.index = index,
     695             :                 .payload.vring_state.num = num,
     696             :         };
     697             : 
     698           0 :         return vhost_user_send(vu_dev, false, &msg, NULL, 0);
     699             : }
     700             : 
     701             : static int vhost_user_set_vring_num(struct virtio_uml_device *vu_dev,
     702             :                                     u32 index, u32 num)
     703             : {
     704           0 :         return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_NUM,
     705             :                                           index, num);
     706             : }
     707             : 
     708             : static int vhost_user_set_vring_base(struct virtio_uml_device *vu_dev,
     709             :                                      u32 index, u32 offset)
     710             : {
     711           0 :         return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_BASE,
     712             :                                           index, offset);
     713             : }
     714             : 
     715           0 : static int vhost_user_set_vring_addr(struct virtio_uml_device *vu_dev,
     716             :                                      u32 index, u64 desc, u64 used, u64 avail,
     717             :                                      u64 log)
     718             : {
     719           0 :         struct vhost_user_msg msg = {
     720             :                 .header.request = VHOST_USER_SET_VRING_ADDR,
     721             :                 .header.size = sizeof(msg.payload.vring_addr),
     722             :                 .payload.vring_addr.index = index,
     723             :                 .payload.vring_addr.desc = desc,
     724             :                 .payload.vring_addr.used = used,
     725             :                 .payload.vring_addr.avail = avail,
     726             :                 .payload.vring_addr.log = log,
     727             :         };
     728             : 
     729           0 :         return vhost_user_send(vu_dev, false, &msg, NULL, 0);
     730             : }
     731             : 
     732           0 : static int vhost_user_set_vring_fd(struct virtio_uml_device *vu_dev,
     733             :                                    u32 request, int index, int fd)
     734             : {
     735           0 :         struct vhost_user_msg msg = {
     736             :                 .header.request = request,
     737             :                 .header.size = sizeof(msg.payload.integer),
     738             :                 .payload.integer = index,
     739             :         };
     740             : 
     741           0 :         if (index & ~VHOST_USER_VRING_INDEX_MASK)
     742             :                 return -EINVAL;
     743           0 :         if (fd < 0) {
     744           0 :                 msg.payload.integer |= VHOST_USER_VRING_POLL_MASK;
     745           0 :                 return vhost_user_send(vu_dev, false, &msg, NULL, 0);
     746             :         }
     747           0 :         return vhost_user_send(vu_dev, false, &msg, &fd, 1);
     748             : }
     749             : 
     750             : static int vhost_user_set_vring_call(struct virtio_uml_device *vu_dev,
     751             :                                      int index, int fd)
     752             : {
     753           0 :         return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_CALL,
     754             :                                        index, fd);
     755             : }
     756             : 
     757             : static int vhost_user_set_vring_kick(struct virtio_uml_device *vu_dev,
     758             :                                      int index, int fd)
     759             : {
     760           0 :         return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_KICK,
     761             :                                        index, fd);
     762             : }
     763             : 
     764             : static int vhost_user_set_vring_enable(struct virtio_uml_device *vu_dev,
     765             :                                        u32 index, bool enable)
     766             : {
     767           0 :         if (!(vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)))
     768             :                 return 0;
     769             : 
     770           0 :         return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_ENABLE,
     771             :                                           index, enable);
     772             : }
     773             : 
     774             : 
     775             : /* Virtio interface */
     776             : 
     777           0 : static bool vu_notify(struct virtqueue *vq)
     778             : {
     779           0 :         struct virtio_uml_vq_info *info = vq->priv;
     780           0 :         const uint64_t n = 1;
     781             :         int rc;
     782             : 
     783           0 :         if (info->suspended)
     784             :                 return true;
     785             : 
     786             :         time_travel_propagate_time();
     787             : 
     788           0 :         if (info->kick_fd < 0) {
     789             :                 struct virtio_uml_device *vu_dev;
     790             : 
     791           0 :                 vu_dev = to_virtio_uml_device(vq->vdev);
     792             : 
     793           0 :                 return vhost_user_set_vring_state(vu_dev, VHOST_USER_VRING_KICK,
     794           0 :                                                   vq->index, 0) == 0;
     795             :         }
     796             : 
     797             :         do {
     798           0 :                 rc = os_write_file(info->kick_fd, &n, sizeof(n));
     799           0 :         } while (rc == -EINTR);
     800           0 :         return !WARN(rc != sizeof(n), "write returned %d\n", rc);
     801             : }
     802             : 
     803           0 : static irqreturn_t vu_interrupt(int irq, void *opaque)
     804             : {
     805           0 :         struct virtqueue *vq = opaque;
     806           0 :         struct virtio_uml_vq_info *info = vq->priv;
     807             :         uint64_t n;
     808             :         int rc;
     809           0 :         irqreturn_t ret = IRQ_NONE;
     810             : 
     811             :         do {
     812           0 :                 rc = os_read_file(info->call_fd, &n, sizeof(n));
     813           0 :                 if (rc == sizeof(n))
     814           0 :                         ret |= vring_interrupt(irq, vq);
     815           0 :         } while (rc == sizeof(n) || rc == -EINTR);
     816           0 :         WARN(rc != -EAGAIN, "read returned %d\n", rc);
     817           0 :         return ret;
     818             : }
     819             : 
     820             : 
     821           0 : static void vu_get(struct virtio_device *vdev, unsigned offset,
     822             :                    void *buf, unsigned len)
     823             : {
     824           0 :         struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
     825             : 
     826           0 :         vhost_user_get_config(vu_dev, offset, buf, len);
     827           0 : }
     828             : 
     829           0 : static void vu_set(struct virtio_device *vdev, unsigned offset,
     830             :                    const void *buf, unsigned len)
     831             : {
     832           0 :         struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
     833             : 
     834           0 :         vhost_user_set_config(vu_dev, offset, buf, len);
     835           0 : }
     836             : 
     837           0 : static u8 vu_get_status(struct virtio_device *vdev)
     838             : {
     839           0 :         struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
     840             : 
     841           0 :         return vu_dev->status;
     842             : }
     843             : 
     844           0 : static void vu_set_status(struct virtio_device *vdev, u8 status)
     845             : {
     846           0 :         struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
     847             : 
     848           0 :         vu_dev->status = status;
     849           0 : }
     850             : 
     851           0 : static void vu_reset(struct virtio_device *vdev)
     852             : {
     853           0 :         struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
     854             : 
     855           0 :         vu_dev->status = 0;
     856           0 : }
     857             : 
     858           0 : static void vu_del_vq(struct virtqueue *vq)
     859             : {
     860           0 :         struct virtio_uml_vq_info *info = vq->priv;
     861             : 
     862           0 :         if (info->call_fd >= 0) {
     863             :                 struct virtio_uml_device *vu_dev;
     864             : 
     865           0 :                 vu_dev = to_virtio_uml_device(vq->vdev);
     866             : 
     867           0 :                 um_free_irq(vu_dev->irq, vq);
     868           0 :                 os_close_file(info->call_fd);
     869             :         }
     870             : 
     871           0 :         if (info->kick_fd >= 0)
     872           0 :                 os_close_file(info->kick_fd);
     873             : 
     874           0 :         vring_del_virtqueue(vq);
     875           0 :         kfree(info);
     876           0 : }
     877             : 
     878           0 : static void vu_del_vqs(struct virtio_device *vdev)
     879             : {
     880           0 :         struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
     881             :         struct virtqueue *vq, *n;
     882             :         u64 features;
     883             : 
     884             :         /* Note: reverse order as a workaround to a decoding bug in snabb */
     885           0 :         list_for_each_entry_reverse(vq, &vdev->vqs, list)
     886           0 :                 WARN_ON(vhost_user_set_vring_enable(vu_dev, vq->index, false));
     887             : 
     888             :         /* Ensure previous messages have been processed */
     889           0 :         WARN_ON(vhost_user_get_features(vu_dev, &features));
     890             : 
     891           0 :         list_for_each_entry_safe(vq, n, &vdev->vqs, list)
     892           0 :                 vu_del_vq(vq);
     893           0 : }
     894             : 
     895           0 : static int vu_setup_vq_call_fd(struct virtio_uml_device *vu_dev,
     896             :                                struct virtqueue *vq)
     897             : {
     898           0 :         struct virtio_uml_vq_info *info = vq->priv;
     899             :         int call_fds[2];
     900             :         int rc;
     901             : 
     902             :         /* no call FD needed/desired in this case */
     903           0 :         if (vu_dev->protocol_features &
     904           0 :                         BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) &&
     905             :             vu_dev->protocol_features &
     906             :                         BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
     907           0 :                 info->call_fd = -1;
     908           0 :                 return 0;
     909             :         }
     910             : 
     911             :         /* Use a pipe for call fd, since SIGIO is not supported for eventfd */
     912           0 :         rc = os_pipe(call_fds, true, true);
     913           0 :         if (rc < 0)
     914             :                 return rc;
     915             : 
     916           0 :         info->call_fd = call_fds[0];
     917           0 :         rc = um_request_irq(vu_dev->irq, info->call_fd, IRQ_READ,
     918           0 :                             vu_interrupt, IRQF_SHARED, info->name, vq);
     919           0 :         if (rc < 0)
     920             :                 goto close_both;
     921             : 
     922           0 :         rc = vhost_user_set_vring_call(vu_dev, vq->index, call_fds[1]);
     923           0 :         if (rc)
     924             :                 goto release_irq;
     925             : 
     926             :         goto out;
     927             : 
     928             : release_irq:
     929           0 :         um_free_irq(vu_dev->irq, vq);
     930             : close_both:
     931           0 :         os_close_file(call_fds[0]);
     932             : out:
     933             :         /* Close (unused) write end of call fds */
     934           0 :         os_close_file(call_fds[1]);
     935             : 
     936           0 :         return rc;
     937             : }
     938             : 
     939           0 : static struct virtqueue *vu_setup_vq(struct virtio_device *vdev,
     940             :                                      unsigned index, vq_callback_t *callback,
     941             :                                      const char *name, bool ctx)
     942             : {
     943           0 :         struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
     944           0 :         struct platform_device *pdev = vu_dev->pdev;
     945             :         struct virtio_uml_vq_info *info;
     946             :         struct virtqueue *vq;
     947           0 :         int num = MAX_SUPPORTED_QUEUE_SIZE;
     948             :         int rc;
     949             : 
     950           0 :         info = kzalloc(sizeof(*info), GFP_KERNEL);
     951           0 :         if (!info) {
     952             :                 rc = -ENOMEM;
     953             :                 goto error_kzalloc;
     954             :         }
     955           0 :         snprintf(info->name, sizeof(info->name), "%s.%d-%s", pdev->name,
     956             :                  pdev->id, name);
     957             : 
     958           0 :         vq = vring_create_virtqueue(index, num, PAGE_SIZE, vdev, true, true,
     959             :                                     ctx, vu_notify, callback, info->name);
     960           0 :         if (!vq) {
     961             :                 rc = -ENOMEM;
     962             :                 goto error_create;
     963             :         }
     964           0 :         vq->priv = info;
     965           0 :         vq->num_max = num;
     966           0 :         num = virtqueue_get_vring_size(vq);
     967             : 
     968           0 :         if (vu_dev->protocol_features &
     969             :                         BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS)) {
     970           0 :                 info->kick_fd = -1;
     971             :         } else {
     972           0 :                 rc = os_eventfd(0, 0);
     973           0 :                 if (rc < 0)
     974             :                         goto error_kick;
     975           0 :                 info->kick_fd = rc;
     976             :         }
     977             : 
     978           0 :         rc = vu_setup_vq_call_fd(vu_dev, vq);
     979           0 :         if (rc)
     980             :                 goto error_call;
     981             : 
     982           0 :         rc = vhost_user_set_vring_num(vu_dev, index, num);
     983           0 :         if (rc)
     984             :                 goto error_setup;
     985             : 
     986           0 :         rc = vhost_user_set_vring_base(vu_dev, index, 0);
     987           0 :         if (rc)
     988             :                 goto error_setup;
     989             : 
     990           0 :         rc = vhost_user_set_vring_addr(vu_dev, index,
     991             :                                        virtqueue_get_desc_addr(vq),
     992             :                                        virtqueue_get_used_addr(vq),
     993             :                                        virtqueue_get_avail_addr(vq),
     994             :                                        (u64) -1);
     995           0 :         if (rc)
     996             :                 goto error_setup;
     997             : 
     998             :         return vq;
     999             : 
    1000             : error_setup:
    1001           0 :         if (info->call_fd >= 0) {
    1002           0 :                 um_free_irq(vu_dev->irq, vq);
    1003           0 :                 os_close_file(info->call_fd);
    1004             :         }
    1005             : error_call:
    1006           0 :         if (info->kick_fd >= 0)
    1007           0 :                 os_close_file(info->kick_fd);
    1008             : error_kick:
    1009           0 :         vring_del_virtqueue(vq);
    1010             : error_create:
    1011           0 :         kfree(info);
    1012             : error_kzalloc:
    1013           0 :         return ERR_PTR(rc);
    1014             : }
    1015             : 
    1016           0 : static int vu_find_vqs(struct virtio_device *vdev, unsigned nvqs,
    1017             :                        struct virtqueue *vqs[], vq_callback_t *callbacks[],
    1018             :                        const char * const names[], const bool *ctx,
    1019             :                        struct irq_affinity *desc)
    1020             : {
    1021           0 :         struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
    1022           0 :         int i, queue_idx = 0, rc;
    1023             :         struct virtqueue *vq;
    1024             : 
    1025             :         /* not supported for now */
    1026           0 :         if (WARN_ON(nvqs > 64))
    1027             :                 return -EINVAL;
    1028             : 
    1029           0 :         rc = vhost_user_set_mem_table(vu_dev);
    1030           0 :         if (rc)
    1031             :                 return rc;
    1032             : 
    1033           0 :         for (i = 0; i < nvqs; ++i) {
    1034           0 :                 if (!names[i]) {
    1035           0 :                         vqs[i] = NULL;
    1036           0 :                         continue;
    1037             :                 }
    1038             : 
    1039           0 :                 vqs[i] = vu_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
    1040           0 :                                      ctx ? ctx[i] : false);
    1041           0 :                 if (IS_ERR(vqs[i])) {
    1042           0 :                         rc = PTR_ERR(vqs[i]);
    1043           0 :                         goto error_setup;
    1044             :                 }
    1045             :         }
    1046             : 
    1047           0 :         list_for_each_entry(vq, &vdev->vqs, list) {
    1048           0 :                 struct virtio_uml_vq_info *info = vq->priv;
    1049             : 
    1050           0 :                 if (info->kick_fd >= 0) {
    1051           0 :                         rc = vhost_user_set_vring_kick(vu_dev, vq->index,
    1052             :                                                        info->kick_fd);
    1053           0 :                         if (rc)
    1054             :                                 goto error_setup;
    1055             :                 }
    1056             : 
    1057           0 :                 rc = vhost_user_set_vring_enable(vu_dev, vq->index, true);
    1058           0 :                 if (rc)
    1059             :                         goto error_setup;
    1060             :         }
    1061             : 
    1062             :         return 0;
    1063             : 
    1064             : error_setup:
    1065           0 :         vu_del_vqs(vdev);
    1066           0 :         return rc;
    1067             : }
    1068             : 
    1069           0 : static u64 vu_get_features(struct virtio_device *vdev)
    1070             : {
    1071           0 :         struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
    1072             : 
    1073           0 :         return vu_dev->features;
    1074             : }
    1075             : 
    1076           0 : static int vu_finalize_features(struct virtio_device *vdev)
    1077             : {
    1078           0 :         struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
    1079           0 :         u64 supported = vdev->features & VHOST_USER_SUPPORTED_F;
    1080             : 
    1081           0 :         vring_transport_features(vdev);
    1082           0 :         vu_dev->features = vdev->features | supported;
    1083             : 
    1084           0 :         return vhost_user_set_features(vu_dev, vu_dev->features);
    1085             : }
    1086             : 
    1087           0 : static const char *vu_bus_name(struct virtio_device *vdev)
    1088             : {
    1089           0 :         struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
    1090             : 
    1091           0 :         return vu_dev->pdev->name;
    1092             : }
    1093             : 
    1094             : static const struct virtio_config_ops virtio_uml_config_ops = {
    1095             :         .get = vu_get,
    1096             :         .set = vu_set,
    1097             :         .get_status = vu_get_status,
    1098             :         .set_status = vu_set_status,
    1099             :         .reset = vu_reset,
    1100             :         .find_vqs = vu_find_vqs,
    1101             :         .del_vqs = vu_del_vqs,
    1102             :         .get_features = vu_get_features,
    1103             :         .finalize_features = vu_finalize_features,
    1104             :         .bus_name = vu_bus_name,
    1105             : };
    1106             : 
    1107           0 : static void virtio_uml_release_dev(struct device *d)
    1108             : {
    1109           0 :         struct virtio_device *vdev =
    1110           0 :                         container_of(d, struct virtio_device, dev);
    1111           0 :         struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
    1112             : 
    1113             :         time_travel_propagate_time();
    1114             : 
    1115             :         /* might not have been opened due to not negotiating the feature */
    1116           0 :         if (vu_dev->req_fd >= 0) {
    1117           0 :                 um_free_irq(vu_dev->irq, vu_dev);
    1118           0 :                 os_close_file(vu_dev->req_fd);
    1119             :         }
    1120             : 
    1121           0 :         os_close_file(vu_dev->sock);
    1122           0 :         kfree(vu_dev);
    1123           0 : }
    1124             : 
    1125           0 : void virtio_uml_set_no_vq_suspend(struct virtio_device *vdev,
    1126             :                                   bool no_vq_suspend)
    1127             : {
    1128           0 :         struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
    1129             : 
    1130           0 :         if (WARN_ON(vdev->config != &virtio_uml_config_ops))
    1131             :                 return;
    1132             : 
    1133           0 :         vu_dev->no_vq_suspend = no_vq_suspend;
    1134           0 :         dev_info(&vdev->dev, "%sabled VQ suspend\n",
    1135             :                  no_vq_suspend ? "dis" : "en");
    1136             : }
    1137             : 
    1138             : static void vu_of_conn_broken(struct work_struct *wk)
    1139             : {
    1140             :         struct virtio_uml_platform_data *pdata;
    1141             :         struct virtio_uml_device *vu_dev;
    1142             : 
    1143             :         pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
    1144             : 
    1145             :         vu_dev = platform_get_drvdata(pdata->pdev);
    1146             : 
    1147             :         virtio_break_device(&vu_dev->vdev);
    1148             : 
    1149             :         /*
    1150             :          * We can't remove the device from the devicetree so the only thing we
    1151             :          * can do is warn.
    1152             :          */
    1153             :         WARN_ON(1);
    1154             : }
    1155             : 
    1156             : /* Platform device */
    1157             : 
    1158             : static struct virtio_uml_platform_data *
    1159           0 : virtio_uml_create_pdata(struct platform_device *pdev)
    1160             : {
    1161           0 :         struct device_node *np = pdev->dev.of_node;
    1162             :         struct virtio_uml_platform_data *pdata;
    1163             :         int ret;
    1164             : 
    1165           0 :         if (!np)
    1166             :                 return ERR_PTR(-EINVAL);
    1167             : 
    1168           0 :         pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
    1169           0 :         if (!pdata)
    1170             :                 return ERR_PTR(-ENOMEM);
    1171             : 
    1172           0 :         INIT_WORK(&pdata->conn_broken_wk, vu_of_conn_broken);
    1173             :         pdata->pdev = pdev;
    1174             : 
    1175           0 :         ret = of_property_read_string(np, "socket-path", &pdata->socket_path);
    1176             :         if (ret)
    1177           0 :                 return ERR_PTR(ret);
    1178             : 
    1179             :         ret = of_property_read_u32(np, "virtio-device-id",
    1180             :                                    &pdata->virtio_device_id);
    1181             :         if (ret)
    1182             :                 return ERR_PTR(ret);
    1183             : 
    1184             :         return pdata;
    1185             : }
    1186             : 
    1187           0 : static int virtio_uml_probe(struct platform_device *pdev)
    1188             : {
    1189           0 :         struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
    1190             :         struct virtio_uml_device *vu_dev;
    1191             :         int rc;
    1192             : 
    1193           0 :         if (!pdata) {
    1194           0 :                 pdata = virtio_uml_create_pdata(pdev);
    1195           0 :                 if (IS_ERR(pdata))
    1196           0 :                         return PTR_ERR(pdata);
    1197             :         }
    1198             : 
    1199           0 :         vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL);
    1200           0 :         if (!vu_dev)
    1201             :                 return -ENOMEM;
    1202             : 
    1203           0 :         vu_dev->pdata = pdata;
    1204           0 :         vu_dev->vdev.dev.parent = &pdev->dev;
    1205           0 :         vu_dev->vdev.dev.release = virtio_uml_release_dev;
    1206           0 :         vu_dev->vdev.config = &virtio_uml_config_ops;
    1207           0 :         vu_dev->vdev.id.device = pdata->virtio_device_id;
    1208           0 :         vu_dev->vdev.id.vendor = VIRTIO_DEV_ANY_ID;
    1209           0 :         vu_dev->pdev = pdev;
    1210           0 :         vu_dev->req_fd = -1;
    1211             : 
    1212             :         time_travel_propagate_time();
    1213             : 
    1214             :         do {
    1215           0 :                 rc = os_connect_socket(pdata->socket_path);
    1216           0 :         } while (rc == -EINTR);
    1217           0 :         if (rc < 0)
    1218             :                 goto error_free;
    1219           0 :         vu_dev->sock = rc;
    1220             : 
    1221           0 :         spin_lock_init(&vu_dev->sock_lock);
    1222             : 
    1223           0 :         rc = vhost_user_init(vu_dev);
    1224           0 :         if (rc)
    1225             :                 goto error_init;
    1226             : 
    1227           0 :         platform_set_drvdata(pdev, vu_dev);
    1228             : 
    1229           0 :         device_set_wakeup_capable(&vu_dev->vdev.dev, true);
    1230             : 
    1231           0 :         rc = register_virtio_device(&vu_dev->vdev);
    1232           0 :         if (rc)
    1233           0 :                 put_device(&vu_dev->vdev.dev);
    1234           0 :         vu_dev->registered = 1;
    1235           0 :         return rc;
    1236             : 
    1237             : error_init:
    1238           0 :         os_close_file(vu_dev->sock);
    1239             : error_free:
    1240           0 :         kfree(vu_dev);
    1241           0 :         return rc;
    1242             : }
    1243             : 
    1244           0 : static int virtio_uml_remove(struct platform_device *pdev)
    1245             : {
    1246           0 :         struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
    1247             : 
    1248           0 :         unregister_virtio_device(&vu_dev->vdev);
    1249           0 :         return 0;
    1250             : }
    1251             : 
    1252             : /* Command line device list */
    1253             : 
    1254           0 : static void vu_cmdline_release_dev(struct device *d)
    1255             : {
    1256           0 : }
    1257             : 
    1258             : static struct device vu_cmdline_parent = {
    1259             :         .init_name = "virtio-uml-cmdline",
    1260             :         .release = vu_cmdline_release_dev,
    1261             : };
    1262             : 
    1263             : static bool vu_cmdline_parent_registered;
    1264             : static int vu_cmdline_id;
    1265             : 
    1266           0 : static int vu_unregister_cmdline_device(struct device *dev, void *data)
    1267             : {
    1268           0 :         struct platform_device *pdev = to_platform_device(dev);
    1269           0 :         struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
    1270             : 
    1271           0 :         kfree(pdata->socket_path);
    1272           0 :         platform_device_unregister(pdev);
    1273           0 :         return 0;
    1274             : }
    1275             : 
    1276           0 : static void vu_conn_broken(struct work_struct *wk)
    1277             : {
    1278             :         struct virtio_uml_platform_data *pdata;
    1279             :         struct virtio_uml_device *vu_dev;
    1280             : 
    1281           0 :         pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
    1282             : 
    1283           0 :         vu_dev = platform_get_drvdata(pdata->pdev);
    1284             : 
    1285           0 :         virtio_break_device(&vu_dev->vdev);
    1286             : 
    1287           0 :         vu_unregister_cmdline_device(&pdata->pdev->dev, NULL);
    1288           0 : }
    1289             : 
    1290           0 : static int vu_cmdline_set(const char *device, const struct kernel_param *kp)
    1291             : {
    1292           0 :         const char *ids = strchr(device, ':');
    1293             :         unsigned int virtio_device_id;
    1294             :         int processed, consumed, err;
    1295             :         char *socket_path;
    1296             :         struct virtio_uml_platform_data pdata, *ppdata;
    1297             :         struct platform_device *pdev;
    1298             : 
    1299           0 :         if (!ids || ids == device)
    1300             :                 return -EINVAL;
    1301             : 
    1302           0 :         processed = sscanf(ids, ":%u%n:%d%n",
    1303             :                            &virtio_device_id, &consumed,
    1304             :                            &vu_cmdline_id, &consumed);
    1305             : 
    1306           0 :         if (processed < 1 || ids[consumed])
    1307             :                 return -EINVAL;
    1308             : 
    1309           0 :         if (!vu_cmdline_parent_registered) {
    1310           0 :                 err = device_register(&vu_cmdline_parent);
    1311           0 :                 if (err) {
    1312           0 :                         pr_err("Failed to register parent device!\n");
    1313           0 :                         put_device(&vu_cmdline_parent);
    1314           0 :                         return err;
    1315             :                 }
    1316           0 :                 vu_cmdline_parent_registered = true;
    1317             :         }
    1318             : 
    1319           0 :         socket_path = kmemdup_nul(device, ids - device, GFP_KERNEL);
    1320           0 :         if (!socket_path)
    1321             :                 return -ENOMEM;
    1322             : 
    1323           0 :         pdata.virtio_device_id = (u32) virtio_device_id;
    1324           0 :         pdata.socket_path = socket_path;
    1325             : 
    1326           0 :         pr_info("Registering device virtio-uml.%d id=%d at %s\n",
    1327             :                 vu_cmdline_id, virtio_device_id, socket_path);
    1328             : 
    1329           0 :         pdev = platform_device_register_data(&vu_cmdline_parent, "virtio-uml",
    1330             :                                              vu_cmdline_id++, &pdata,
    1331             :                                              sizeof(pdata));
    1332           0 :         err = PTR_ERR_OR_ZERO(pdev);
    1333           0 :         if (err)
    1334             :                 goto free;
    1335             : 
    1336           0 :         ppdata = pdev->dev.platform_data;
    1337           0 :         ppdata->pdev = pdev;
    1338           0 :         INIT_WORK(&ppdata->conn_broken_wk, vu_conn_broken);
    1339             : 
    1340           0 :         return 0;
    1341             : 
    1342             : free:
    1343           0 :         kfree(socket_path);
    1344           0 :         return err;
    1345             : }
    1346             : 
    1347           0 : static int vu_cmdline_get_device(struct device *dev, void *data)
    1348             : {
    1349           0 :         struct platform_device *pdev = to_platform_device(dev);
    1350           0 :         struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
    1351           0 :         char *buffer = data;
    1352           0 :         unsigned int len = strlen(buffer);
    1353             : 
    1354           0 :         snprintf(buffer + len, PAGE_SIZE - len, "%s:%d:%d\n",
    1355             :                  pdata->socket_path, pdata->virtio_device_id, pdev->id);
    1356           0 :         return 0;
    1357             : }
    1358             : 
    1359           0 : static int vu_cmdline_get(char *buffer, const struct kernel_param *kp)
    1360             : {
    1361           0 :         buffer[0] = '\0';
    1362           0 :         if (vu_cmdline_parent_registered)
    1363           0 :                 device_for_each_child(&vu_cmdline_parent, buffer,
    1364             :                                       vu_cmdline_get_device);
    1365           0 :         return strlen(buffer) + 1;
    1366             : }
    1367             : 
    1368             : static const struct kernel_param_ops vu_cmdline_param_ops = {
    1369             :         .set = vu_cmdline_set,
    1370             :         .get = vu_cmdline_get,
    1371             : };
    1372             : 
    1373             : device_param_cb(device, &vu_cmdline_param_ops, NULL, S_IRUSR);
    1374             : __uml_help(vu_cmdline_param_ops,
    1375             : "virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]\n"
    1376             : "    Configure a virtio device over a vhost-user socket.\n"
    1377             : "    See virtio_ids.h for a list of possible virtio device id values.\n"
    1378             : "    Optionally use a specific platform_device id.\n\n"
    1379             : );
    1380             : 
    1381             : 
    1382           1 : static void vu_unregister_cmdline_devices(void)
    1383             : {
    1384           1 :         if (vu_cmdline_parent_registered) {
    1385           0 :                 device_for_each_child(&vu_cmdline_parent, NULL,
    1386             :                                       vu_unregister_cmdline_device);
    1387           0 :                 device_unregister(&vu_cmdline_parent);
    1388           0 :                 vu_cmdline_parent_registered = false;
    1389             :         }
    1390           1 : }
    1391             : 
    1392             : /* Platform driver */
    1393             : 
    1394             : static const struct of_device_id virtio_uml_match[] = {
    1395             :         { .compatible = "virtio,uml", },
    1396             :         { }
    1397             : };
    1398             : MODULE_DEVICE_TABLE(of, virtio_uml_match);
    1399             : 
    1400           0 : static int virtio_uml_suspend(struct platform_device *pdev, pm_message_t state)
    1401             : {
    1402           0 :         struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
    1403             : 
    1404           0 :         if (!vu_dev->no_vq_suspend) {
    1405             :                 struct virtqueue *vq;
    1406             : 
    1407           0 :                 virtio_device_for_each_vq((&vu_dev->vdev), vq) {
    1408           0 :                         struct virtio_uml_vq_info *info = vq->priv;
    1409             : 
    1410           0 :                         info->suspended = true;
    1411           0 :                         vhost_user_set_vring_enable(vu_dev, vq->index, false);
    1412             :                 }
    1413             :         }
    1414             : 
    1415           0 :         if (!device_may_wakeup(&vu_dev->vdev.dev)) {
    1416           0 :                 vu_dev->suspended = true;
    1417           0 :                 return 0;
    1418             :         }
    1419             : 
    1420           0 :         return irq_set_irq_wake(vu_dev->irq, 1);
    1421             : }
    1422             : 
    1423           0 : static int virtio_uml_resume(struct platform_device *pdev)
    1424             : {
    1425           0 :         struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
    1426             : 
    1427           0 :         if (!vu_dev->no_vq_suspend) {
    1428             :                 struct virtqueue *vq;
    1429             : 
    1430           0 :                 virtio_device_for_each_vq((&vu_dev->vdev), vq) {
    1431           0 :                         struct virtio_uml_vq_info *info = vq->priv;
    1432             : 
    1433           0 :                         info->suspended = false;
    1434           0 :                         vhost_user_set_vring_enable(vu_dev, vq->index, true);
    1435             :                 }
    1436             :         }
    1437             : 
    1438           0 :         vu_dev->suspended = false;
    1439             : 
    1440           0 :         if (!device_may_wakeup(&vu_dev->vdev.dev))
    1441             :                 return 0;
    1442             : 
    1443           0 :         return irq_set_irq_wake(vu_dev->irq, 0);
    1444             : }
    1445             : 
    1446             : static struct platform_driver virtio_uml_driver = {
    1447             :         .probe = virtio_uml_probe,
    1448             :         .remove = virtio_uml_remove,
    1449             :         .driver = {
    1450             :                 .name = "virtio-uml",
    1451             :                 .of_match_table = virtio_uml_match,
    1452             :         },
    1453             :         .suspend = virtio_uml_suspend,
    1454             :         .resume = virtio_uml_resume,
    1455             : };
    1456             : 
    1457           1 : static int __init virtio_uml_init(void)
    1458             : {
    1459           1 :         return platform_driver_register(&virtio_uml_driver);
    1460             : }
    1461             : 
    1462           1 : static void __exit virtio_uml_exit(void)
    1463             : {
    1464           1 :         platform_driver_unregister(&virtio_uml_driver);
    1465           1 :         vu_unregister_cmdline_devices();
    1466           1 : }
    1467             : 
    1468             : module_init(virtio_uml_init);
    1469             : module_exit(virtio_uml_exit);
    1470             : __uml_exitcall(virtio_uml_exit);
    1471             : 
    1472             : MODULE_DESCRIPTION("UML driver for vhost-user virtio devices");
    1473             : MODULE_LICENSE("GPL");

Generated by: LCOV version 1.14