Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 30 additions & 16 deletions drivers/block/xen-blkfront.c
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,10 @@ static unsigned int xen_blkif_max_segments = 32;
module_param_named(max, xen_blkif_max_segments, int, S_IRUGO);
MODULE_PARM_DESC(max, "Maximum amount of segments in indirect requests (default is 32)");

static bool __read_mostly xen_blkif_trusted = true;
module_param_named(trusted, xen_blkif_trusted, bool, 0644);
MODULE_PARM_DESC(trusted, "Is the backend trusted");

#define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE)

/*
Expand Down Expand Up @@ -131,6 +135,7 @@ struct blkfront_info
unsigned int discard_granularity;
unsigned int discard_alignment;
unsigned int feature_persistent:1;
unsigned int bounce:1;
unsigned int max_indirect_segments;
int is_ready;
};
Expand Down Expand Up @@ -200,8 +205,8 @@ static int fill_grant_buffer(struct blkfront_info *info, int num)
if (!gnt_list_entry)
goto out_of_memory;

if (info->feature_persistent) {
granted_page = alloc_page(GFP_NOIO);
if (info->bounce) {
granted_page = alloc_page(GFP_NOIO | __GFP_ZERO);
if (!granted_page) {
kfree(gnt_list_entry);
goto out_of_memory;
Expand All @@ -220,7 +225,7 @@ static int fill_grant_buffer(struct blkfront_info *info, int num)
list_for_each_entry_safe(gnt_list_entry, n,
&info->grants, node) {
list_del(&gnt_list_entry->node);
if (info->feature_persistent)
if (info->bounce)
__free_page(pfn_to_page(gnt_list_entry->pfn));
kfree(gnt_list_entry);
i--;
Expand Down Expand Up @@ -249,7 +254,7 @@ static struct grant *get_grant(grant_ref_t *gref_head,
/* Assign a gref to this page */
gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
BUG_ON(gnt_list_entry->gref == -ENOSPC);
if (!info->feature_persistent) {
if (!info->bounce) {
BUG_ON(!pfn);
gnt_list_entry->pfn = pfn;
}
Expand Down Expand Up @@ -506,7 +511,7 @@ static int blkif_queue_request(struct request *req)
kunmap_atomic(segments);

n = i / SEGS_PER_INDIRECT_FRAME;
if (!info->feature_persistent) {
if (!info->bounce) {
struct page *indirect_page;

/* Fetch a pre-allocated page to use for indirect grefs */
Expand All @@ -527,7 +532,7 @@ static int blkif_queue_request(struct request *req)

info->shadow[id].grants_used[i] = gnt_list_entry;

if (rq_data_dir(req) && info->feature_persistent) {
if (rq_data_dir(req) && info->bounce) {
char *bvec_data;
void *shared_data;

Expand Down Expand Up @@ -711,11 +716,12 @@ static const char *flush_info(unsigned int feature_flush)
static void xlvbd_flush(struct blkfront_info *info)
{
blk_queue_flush(info->rq, info->feature_flush);
pr_info("blkfront: %s: %s %s %s %s %s\n",
pr_info("blkfront: %s: %s %s %s %s %s %s %s\n",
info->gd->disk_name, flush_info(info->feature_flush),
"persistent grants:", info->feature_persistent ?
"enabled;" : "disabled;", "indirect descriptors:",
info->max_indirect_segments ? "enabled;" : "disabled;");
info->max_indirect_segments ? "enabled;" : "disabled;",
"bounce buffer:", info->bounce ? "enabled" : "disabled;");
}

static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
Expand Down Expand Up @@ -962,7 +968,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
0, 0UL);
info->persistent_gnts_c--;
}
if (info->feature_persistent)
if (info->bounce)
__free_page(pfn_to_page(persistent_gnt->pfn));
kfree(persistent_gnt);
}
Expand All @@ -976,7 +982,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
if (!list_empty(&info->indirect_pages)) {
struct page *indirect_page, *n;

BUG_ON(info->feature_persistent);
BUG_ON(info->bounce);
list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) {
list_del(&indirect_page->lru);
__free_page(indirect_page);
Expand All @@ -997,7 +1003,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
for (j = 0; j < segs; j++) {
persistent_gnt = info->shadow[i].grants_used[j];
gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
if (info->feature_persistent)
if (info->bounce)
__free_page(pfn_to_page(persistent_gnt->pfn));
kfree(persistent_gnt);
}
Expand Down Expand Up @@ -1057,7 +1063,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
nseg = s->req.operation == BLKIF_OP_INDIRECT ?
s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;

if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
if (bret->operation == BLKIF_OP_READ && info->bounce) {
/*
* Copy the data received from the backend into the bvec.
* Since bv_offset can be different than 0, and bv_len different
Expand Down Expand Up @@ -1293,6 +1299,10 @@ static int talk_to_blkback(struct xenbus_device *dev,
struct xenbus_transaction xbt;
int err;

/* Check if backend is trusted. */
info->bounce = !xen_blkif_trusted ||
!xenbus_read_unsigned(dev->nodename, "trusted", 1);

/* Create shared ring, alloc event channel. */
err = setup_blkring(dev, info);
if (err)
Expand Down Expand Up @@ -1697,17 +1707,18 @@ static int blkfront_setup_indirect(struct blkfront_info *info)
if (err)
goto out_of_memory;

if (!info->feature_persistent && info->max_indirect_segments) {
if (!info->bounce && info->max_indirect_segments) {
/*
* We are using indirect descriptors but not persistent
* grants, we need to allocate a set of pages that can be
* We are using indirect descriptors but don't have a bounce
* buffer, we need to allocate a set of pages that can be
* used for mapping indirect grefs
*/
int num = INDIRECT_GREFS(segs) * BLK_RING_SIZE;

BUG_ON(!list_empty(&info->indirect_pages));
for (i = 0; i < num; i++) {
struct page *indirect_page = alloc_page(GFP_NOIO);
struct page *indirect_page = alloc_page(GFP_NOIO |
__GFP_ZERO);
if (!indirect_page)
goto out_of_memory;
list_add(&indirect_page->lru, &info->indirect_pages);
Expand Down Expand Up @@ -1863,6 +1874,9 @@ static void blkfront_connect(struct blkfront_info *info)
else
info->feature_persistent = persistent;

if (info->feature_persistent)
info->bounce = true;

err = blkfront_setup_indirect(info);
if (err) {
xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
Expand Down
53 changes: 50 additions & 3 deletions drivers/net/xen-netfront.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,10 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644);
MODULE_PARM_DESC(max_queues,
"Maximum number of queues per virtual interface");

static bool __read_mostly xennet_trusted = true;
module_param_named(trusted, xennet_trusted, bool, 0644);
MODULE_PARM_DESC(trusted, "Is the backend trusted");

static const struct ethtool_ops xennet_ethtool_ops;

struct netfront_cb {
Expand Down Expand Up @@ -160,6 +164,9 @@ struct netfront_info {
struct netfront_stats __percpu *rx_stats;
struct netfront_stats __percpu *tx_stats;

/* Should skbs be bounced into a zeroed buffer? */
bool bounce;

atomic_t rx_gso_checksum_fixup;
};

Expand Down Expand Up @@ -266,7 +273,7 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
if (unlikely(!skb))
return NULL;

page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
page = alloc_page(GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO);
if (!page) {
kfree_skb(skb);
return NULL;
Expand Down Expand Up @@ -516,6 +523,34 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
return queue_idx;
}

struct sk_buff *bounce_skb(const struct sk_buff *skb)
{
unsigned int headerlen = skb_headroom(skb);
/* Align size to allocate full pages and avoid contiguous data leaks */
unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len,
PAGE_SIZE);
struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO);

if (!n)
return NULL;

if (!IS_ALIGNED((uintptr_t)n->head, PAGE_SIZE)) {
WARN_ONCE(1, "misaligned skb allocated\n");
kfree_skb(n);
return NULL;
}

/* Set the data pointer */
skb_reserve(n, headerlen);
/* Set the tail pointer and length */
skb_put(n, skb->len);

BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));

skb_copy_header(n, skb);
return n;
}

static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
Expand Down Expand Up @@ -563,9 +598,13 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)

/* The first req should be at least ETH_HLEN size or the packet will be
* dropped by netback.
*
* If the backend is not trusted bounce all data to zeroed pages to
* avoid exposing contiguous data on the granted page not belonging to
* the skb.
*/
if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
nskb = skb_copy(skb, GFP_ATOMIC);
if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
nskb = bounce_skb(skb);
if (!nskb)
goto drop;
dev_kfree_skb_any(skb);
Expand Down Expand Up @@ -1774,6 +1813,10 @@ static int talk_to_netback(struct xenbus_device *dev,

info->netdev->irq = 0;

/* Check if backend is trusted. */
info->bounce = !xennet_trusted ||
!xenbus_read_unsigned(dev->nodename, "trusted", 1);

/* Check if backend supports multiple queues */
err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
"multi-queue-max-queues", "%u", &max_queues);
Expand Down Expand Up @@ -1936,6 +1979,10 @@ static int xennet_connect(struct net_device *dev)
if (err)
return err;

if (np->bounce)
dev_info(&np->xbdev->dev,
"bouncing transmitted data to zeroed pages\n");

/* talk_to_netback() sets the correct number of queues */
num_queues = dev->real_num_tx_queues;

Expand Down
15 changes: 15 additions & 0 deletions drivers/xen/xenbus/xenbus_xs.c
Original file line number Diff line number Diff line change
Expand Up @@ -526,6 +526,21 @@ int xenbus_scanf(struct xenbus_transaction t,
}
EXPORT_SYMBOL_GPL(xenbus_scanf);

/* Read an (optional) unsigned value. */
unsigned int xenbus_read_unsigned(const char *dir, const char *node,
unsigned int default_val)
{
unsigned int val;
int ret;

ret = xenbus_scanf(XBT_NIL, dir, node, "%u", &val);
if (ret <= 0)
val = default_val;

return val;
}
EXPORT_SYMBOL_GPL(xenbus_read_unsigned);

/* Single printf and write: returns -errno or 0. */
int xenbus_printf(struct xenbus_transaction t,
const char *dir, const char *node, const char *fmt, ...)
Expand Down
1 change: 1 addition & 0 deletions include/linux/skbuff.h
Original file line number Diff line number Diff line change
Expand Up @@ -1030,6 +1030,7 @@ static inline struct sk_buff *alloc_skb_head(gfp_t priority)
struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask);

Expand Down
4 changes: 4 additions & 0 deletions include/xen/xenbus.h
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,10 @@ __scanf(4, 5)
int xenbus_scanf(struct xenbus_transaction t,
const char *dir, const char *node, const char *fmt, ...);

/* Read an (optional) unsigned value. */
unsigned int xenbus_read_unsigned(const char *dir, const char *node,
unsigned int default_val);

/* Single printf and write: returns -errno or 0. */
__printf(4, 5)
int xenbus_printf(struct xenbus_transaction t,
Expand Down
9 changes: 5 additions & 4 deletions net/core/skbuff.c
Original file line number Diff line number Diff line change
Expand Up @@ -1115,14 +1115,15 @@ static void skb_headers_offset_update(struct sk_buff *skb, int off)
skb->inner_mac_header += off;
}

static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
void skb_copy_header(struct sk_buff *new, const struct sk_buff *old)
{
__copy_skb_header(new, old);

skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
}
EXPORT_SYMBOL(skb_copy_header);

static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
{
Expand Down Expand Up @@ -1166,7 +1167,7 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
BUG();

copy_skb_header(n, skb);
skb_copy_header(n, skb);
return n;
}
EXPORT_SYMBOL(skb_copy);
Expand Down Expand Up @@ -1225,7 +1226,7 @@ struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
skb_clone_fraglist(n);
}

copy_skb_header(n, skb);
skb_copy_header(n, skb);
out:
return n;
}
Expand Down Expand Up @@ -1396,7 +1397,7 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
skb->len + head_copy_len))
BUG();

copy_skb_header(n, skb);
skb_copy_header(n, skb);

skb_headers_offset_update(n, newheadroom - oldheadroom);

Expand Down