Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[pull] master from torvalds:master #136

Merged
merged 29 commits into from
Sep 26, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
7f04839
scsi: lpfc: Fix initial FLOGI failure due to BBSCN not supported
Sep 11, 2020
27ba3e8
scsi: sd: sd_zbc: Fix handling of host-aware ZBC disks
damien-lemoal Sep 15, 2020
6c5dee1
scsi: sd: sd_zbc: Fix ZBC disk initialization
damien-lemoal Sep 15, 2020
59e330f
nvme: return errors for hwmon init
keithbusch Sep 17, 2020
50b7c24
nvme-pci: fix NULL req in completion handler
Sep 22, 2020
9e0e8da
nvme-fc: fail new connections to a deleted host or remote port
Sep 17, 2020
46d2613
nvme-core: don't use NVME_NSID_ALL for command effects and supported log
ChaitanayaKulkarni Sep 22, 2020
f7e8098
s390/zcrypt: Fix ZCRYPT_PERDEV_REQCNT ioctl
borntraeger Sep 21, 2020
9754d6c
Merge tag 'nvme-5.9-2020-09-24' of git://git.infradead.org/nvme into …
axboe Sep 24, 2020
f3cd485
io_uring: ensure open/openat2 name is cleaned on cancelation
axboe Sep 24, 2020
3aab917
block: remove unused BLK_QC_T_EAGAIN flag
lostjeffle Sep 25, 2020
62c774e
io_uring: don't unconditionally set plug->nowait = true
axboe Sep 25, 2020
f38c7e3
io_uring: ensure async buffered read-retry is setup properly
axboe Sep 25, 2020
678ff6a
mm: slab: fix potential double free in ___cache_free
shakeelb Sep 26, 2020
4166343
mm, THP, swap: fix allocating cluster for swapfile by mistake
Sep 26, 2020
8d3fe09
mm: memcontrol: fix missing suffix of workingset_restore
Sep 26, 2020
d3f7b1b
mm/gup: fix gup_fast with dynamic page table folding
Sep 26, 2020
6c5c7b9
mm/migrate: correct thp migration stats
x-y-z Sep 26, 2020
1e1b6d6
lib/string.c: implement stpcpy
nickdesaulniers Sep 26, 2020
ffa550c
lib/memregion.c: include memregion.h
JasonYanHw Sep 26, 2020
a1cd6c2
arch/x86/lib/usercopy_64.c: fix __copy_user_flushcache() cache writeback
Sep 26, 2020
c1d0da8
mm: replace memmap_context by meminit_context
ldu4 Sep 26, 2020
f85086f
mm: don't rely on system state to detect hot-plug operations
ldu4 Sep 26, 2020
ce26842
mm: validate pmd after splitting
minchank Sep 15, 2020
8fb1e91
Merge branch 'akpm' (patches from Andrew)
torvalds Sep 26, 2020
eeddbe6
Merge tag 's390-5.9-7' of git://git.kernel.org/pub/scm/linux/kernel/g…
torvalds Sep 26, 2020
9d2fbae
Merge tag 'block-5.9-2020-09-25' of git://git.kernel.dk/linux-block
torvalds Sep 26, 2020
692495b
Merge tag 'io_uring-5.9-2020-09-25' of git://git.kernel.dk/linux-block
torvalds Sep 26, 2020
a1bffa4
Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/g…
torvalds Sep 26, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 18 additions & 7 deletions Documentation/admin-guide/cgroup-v2.rst
Original file line number Diff line number Diff line change
Expand Up @@ -1324,15 +1324,26 @@ PAGE_SIZE multiple when read back.
pgmajfault
Number of major page faults incurred

workingset_refault
Number of refaults of previously evicted pages
workingset_refault_anon
Number of refaults of previously evicted anonymous pages.

workingset_activate
Number of refaulted pages that were immediately activated
workingset_refault_file
Number of refaults of previously evicted file pages.

workingset_restore
Number of restored pages which have been detected as an active
workingset before they got reclaimed.
workingset_activate_anon
Number of refaulted anonymous pages that were immediately
activated.

workingset_activate_file
Number of refaulted file pages that were immediately activated.

workingset_restore_anon
Number of restored anonymous pages which have been detected as
an active workingset before they got reclaimed.

workingset_restore_file
Number of restored file pages which have been detected as an
active workingset before they got reclaimed.

workingset_nodereclaim
Number of times a shadow node has been reclaimed
Expand Down
6 changes: 3 additions & 3 deletions arch/ia64/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -538,7 +538,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
if (map_start < map_end)
memmap_init_zone((unsigned long)(map_end - map_start),
args->nid, args->zone, page_to_pfn(map_start),
MEMMAP_EARLY, NULL);
MEMINIT_EARLY, NULL);
return 0;
}

Expand All @@ -547,8 +547,8 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn)
{
if (!vmem_map) {
memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY,
NULL);
memmap_init_zone(size, nid, zone, start_pfn,
MEMINIT_EARLY, NULL);
} else {
struct page *start;
struct memmap_init_callback_data args;
Expand Down
42 changes: 30 additions & 12 deletions arch/s390/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -1260,26 +1260,44 @@ static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)

#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)

static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
{
if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
return (p4d_t *) pgd_deref(*pgd) + p4d_index(address);
return (p4d_t *) pgd;
if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
return (p4d_t *) pgdp;
}
#define p4d_offset_lockless p4d_offset_lockless

static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
{
if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
return (pud_t *) p4d_deref(*p4d) + pud_index(address);
return (pud_t *) p4d;
return p4d_offset_lockless(pgdp, *pgdp, address);
}

static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
{
if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
return (pud_t *) p4d_deref(p4d) + pud_index(address);
return (pud_t *) p4dp;
}
#define pud_offset_lockless pud_offset_lockless

static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
{
return pud_offset_lockless(p4dp, *p4dp, address);
}
#define pud_offset pud_offset

static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
{
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
return (pmd_t *) pud_deref(pud) + pmd_index(address);
return (pmd_t *) pudp;
}
#define pmd_offset_lockless pmd_offset_lockless

static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
{
if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
return (pmd_t *) pud_deref(*pud) + pmd_index(address);
return (pmd_t *) pud;
return pmd_offset_lockless(pudp, *pudp, address);
}
#define pmd_offset pmd_offset

Expand Down
2 changes: 1 addition & 1 deletion arch/x86/lib/usercopy_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
*/
if (size < 8) {
if (!IS_ALIGNED(dest, 4) || size != 4)
clean_cache_range(dst, 1);
clean_cache_range(dst, size);
} else {
if (!IS_ALIGNED(dest, 8)) {
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
Expand Down
46 changes: 46 additions & 0 deletions block/blk-settings.c
Original file line number Diff line number Diff line change
Expand Up @@ -801,6 +801,52 @@ bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
}
EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);

/**
* blk_queue_set_zoned - configure a disk queue zoned model.
* @disk: the gendisk of the queue to configure
* @model: the zoned model to set
*
* Set the zoned model of the request queue of @disk according to @model.
* When @model is BLK_ZONED_HM (host managed), this should be called only
* if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option).
* If @model specifies BLK_ZONED_HA (host aware), the effective model used
* depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions
* on the disk.
*/
void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
{
switch (model) {
case BLK_ZONED_HM:
/*
* Host managed devices are supported only if
* CONFIG_BLK_DEV_ZONED is enabled.
*/
WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
break;
case BLK_ZONED_HA:
/*
* Host aware devices can be treated either as regular block
* devices (similar to drive managed devices) or as zoned block
* devices to take advantage of the zone command set, similarly
* to host managed devices. We try the latter if there are no
* partitions and zoned block device support is enabled, else
* we do nothing special as far as the block layer is concerned.
*/
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) ||
disk_has_partitions(disk))
model = BLK_ZONED_NONE;
break;
case BLK_ZONED_NONE:
default:
if (WARN_ON_ONCE(model != BLK_ZONED_NONE))
model = BLK_ZONED_NONE;
break;
}

disk->queue->limits.zoned = model;
}
EXPORT_SYMBOL_GPL(blk_queue_set_zoned);

static int __init blk_settings_init(void)
{
blk_max_low_pfn = max_low_pfn - 1;
Expand Down
85 changes: 55 additions & 30 deletions drivers/base/node.c
Original file line number Diff line number Diff line change
Expand Up @@ -761,14 +761,36 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
return pfn_to_nid(pfn);
}

static int do_register_memory_block_under_node(int nid,
struct memory_block *mem_blk)
{
int ret;

/*
* If this memory block spans multiple nodes, we only indicate
* the last processed node.
*/
mem_blk->nid = nid;

ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
&mem_blk->dev.kobj,
kobject_name(&mem_blk->dev.kobj));
if (ret)
return ret;

return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
&node_devices[nid]->dev.kobj,
kobject_name(&node_devices[nid]->dev.kobj));
}

/* register memory section under specified node if it spans that node */
static int register_mem_sect_under_node(struct memory_block *mem_blk,
void *arg)
static int register_mem_block_under_node_early(struct memory_block *mem_blk,
void *arg)
{
unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
int ret, nid = *(int *)arg;
int nid = *(int *)arg;
unsigned long pfn;

for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
Expand All @@ -785,38 +807,33 @@ static int register_mem_sect_under_node(struct memory_block *mem_blk,
}

/*
* We need to check if page belongs to nid only for the boot
* case, during hotplug we know that all pages in the memory
* block belong to the same node.
*/
if (system_state == SYSTEM_BOOTING) {
page_nid = get_nid_for_pfn(pfn);
if (page_nid < 0)
continue;
if (page_nid != nid)
continue;
}

/*
* If this memory block spans multiple nodes, we only indicate
* the last processed node.
* We need to check if page belongs to nid only at the boot
* case because node's ranges can be interleaved.
*/
mem_blk->nid = nid;

ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
&mem_blk->dev.kobj,
kobject_name(&mem_blk->dev.kobj));
if (ret)
return ret;
page_nid = get_nid_for_pfn(pfn);
if (page_nid < 0)
continue;
if (page_nid != nid)
continue;

return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
&node_devices[nid]->dev.kobj,
kobject_name(&node_devices[nid]->dev.kobj));
return do_register_memory_block_under_node(nid, mem_blk);
}
/* mem section does not span the specified node */
return 0;
}

/*
* During hotplug we know that all pages in the memory block belong to the same
* node.
*/
static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
void *arg)
{
int nid = *(int *)arg;

return do_register_memory_block_under_node(nid, mem_blk);
}

/*
* Unregister a memory block device under the node it spans. Memory blocks
* with multiple nodes cannot be offlined and therefore also never be removed.
Expand All @@ -832,11 +849,19 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
}

int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn)
int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn,
enum meminit_context context)
{
walk_memory_blocks_func_t func;

if (context == MEMINIT_HOTPLUG)
func = register_mem_block_under_node_hotplug;
else
func = register_mem_block_under_node_early;

return walk_memory_blocks(PFN_PHYS(start_pfn),
PFN_PHYS(end_pfn - start_pfn), (void *)&nid,
register_mem_sect_under_node);
func);
}

#ifdef CONFIG_HUGETLBFS
Expand Down
9 changes: 6 additions & 3 deletions drivers/nvme/host/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -3041,7 +3041,7 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
if (!cel)
return -ENOMEM;

ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0, csi,
ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi,
&cel->log, sizeof(cel->log), 0);
if (ret) {
kfree(cel);
Expand Down Expand Up @@ -3236,8 +3236,11 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
if (ret < 0)
return ret;

if (!ctrl->identified)
nvme_hwmon_init(ctrl);
if (!ctrl->identified) {
ret = nvme_hwmon_init(ctrl);
if (ret < 0)
return ret;
}

ctrl->identified = true;

Expand Down
6 changes: 4 additions & 2 deletions drivers/nvme/host/fc.c
Original file line number Diff line number Diff line change
Expand Up @@ -3671,12 +3671,14 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
spin_lock_irqsave(&nvme_fc_lock, flags);
list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
if (lport->localport.node_name != laddr.nn ||
lport->localport.port_name != laddr.pn)
lport->localport.port_name != laddr.pn ||
lport->localport.port_state != FC_OBJSTATE_ONLINE)
continue;

list_for_each_entry(rport, &lport->endp_list, endp_list) {
if (rport->remoteport.node_name != raddr.nn ||
rport->remoteport.port_name != raddr.pn)
rport->remoteport.port_name != raddr.pn ||
rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
continue;

/* if fail to get reference fall through. Will error */
Expand Down
14 changes: 6 additions & 8 deletions drivers/nvme/host/hwmon.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,12 +59,8 @@ static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,

static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data)
{
int ret;

ret = nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
return nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
NVME_CSI_NVM, &data->log, sizeof(data->log), 0);

return ret <= 0 ? ret : -EIO;
}

static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
Expand Down Expand Up @@ -225,7 +221,7 @@ static const struct hwmon_chip_info nvme_hwmon_chip_info = {
.info = nvme_hwmon_info,
};

void nvme_hwmon_init(struct nvme_ctrl *ctrl)
int nvme_hwmon_init(struct nvme_ctrl *ctrl)
{
struct device *dev = ctrl->dev;
struct nvme_hwmon_data *data;
Expand All @@ -234,7 +230,7 @@ void nvme_hwmon_init(struct nvme_ctrl *ctrl)

data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return;
return 0;

data->ctrl = ctrl;
mutex_init(&data->read_lock);
Expand All @@ -244,7 +240,7 @@ void nvme_hwmon_init(struct nvme_ctrl *ctrl)
dev_warn(ctrl->device,
"Failed to read smart log (error %d)\n", err);
devm_kfree(dev, data);
return;
return err;
}

hwmon = devm_hwmon_device_register_with_info(dev, "nvme", data,
Expand All @@ -254,4 +250,6 @@ void nvme_hwmon_init(struct nvme_ctrl *ctrl)
dev_warn(dev, "Failed to instantiate hwmon device\n");
devm_kfree(dev, data);
}

return 0;
}
7 changes: 5 additions & 2 deletions drivers/nvme/host/nvme.h
Original file line number Diff line number Diff line change
Expand Up @@ -827,9 +827,12 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
}

#ifdef CONFIG_NVME_HWMON
void nvme_hwmon_init(struct nvme_ctrl *ctrl);
int nvme_hwmon_init(struct nvme_ctrl *ctrl);
#else
static inline void nvme_hwmon_init(struct nvme_ctrl *ctrl) { }
static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
{
return 0;
}
#endif

u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
Expand Down
Loading