satip-axe/kernel/mm/swapfile.c

2222 lines
56 KiB
C
Raw Permalink Normal View History

/*
* linux/mm/swapfile.c
*
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
* Swap reorganised 29.12.95, Stephen Tweedie
*/
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/mman.h>
#include <linux/slab.h>
#include <linux/kernel_stat.h>
#include <linux/swap.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/namei.h>
#include <linux/shm.h>
#include <linux/blkdev.h>
#include <linux/random.h>
#include <linux/writeback.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/rmap.h>
#include <linux/security.h>
#include <linux/backing-dev.h>
#include <linux/mutex.h>
#include <linux/capability.h>
#include <linux/syscalls.h>
#include <linux/memcontrol.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <linux/swapops.h>
#include <linux/page_cgroup.h>
static DEFINE_SPINLOCK(swap_lock);
static unsigned int nr_swapfiles;
long nr_swap_pages;
long total_swap_pages;
static int swap_overflow;
static int least_priority;
static const char Bad_file[] = "Bad swap file entry ";
static const char Unused_file[] = "Unused swap file entry ";
static const char Bad_offset[] = "Bad swap offset entry ";
static const char Unused_offset[] = "Unused swap offset entry ";
static struct swap_list_t swap_list = {-1, -1};
static struct swap_info_struct swap_info[MAX_SWAPFILES];
static DEFINE_MUTEX(swapon_mutex);
/* For reference count accounting in swap_map */
/* enum for swap_map[] handling. internal use only */
enum {
SWAP_MAP = 0, /* ops for reference from swap users */
SWAP_CACHE, /* ops for reference from swap cache */
};
static inline int swap_count(unsigned short ent)
{
return ent & SWAP_COUNT_MASK;
}
static inline bool swap_has_cache(unsigned short ent)
{
return !!(ent & SWAP_HAS_CACHE);
}
static inline unsigned short encode_swapmap(int count, bool has_cache)
{
unsigned short ret = count;
if (has_cache)
return SWAP_HAS_CACHE | ret;
return ret;
}
/* returnes 1 if swap entry is freed */
static int
__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
{
int type = si - swap_info;
swp_entry_t entry = swp_entry(type, offset);
struct page *page;
int ret = 0;
page = find_get_page(&swapper_space, entry.val);
if (!page)
return 0;
/*
* This function is called from scan_swap_map() and it's called
* by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
* We have to use trylock for avoiding deadlock. This is a special
* case and you should use try_to_free_swap() with explicit lock_page()
* in usual operations.
*/
if (trylock_page(page)) {
ret = try_to_free_swap(page);
unlock_page(page);
}
page_cache_release(page);
return ret;
}
/*
* We need this because the bdev->unplug_fn can sleep and we cannot
* hold swap_lock while calling the unplug_fn. And swap_lock
* cannot be turned into a mutex.
*/
static DECLARE_RWSEM(swap_unplug_sem);
void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
{
swp_entry_t entry;
down_read(&swap_unplug_sem);
entry.val = page_private(page);
if (PageSwapCache(page)) {
struct block_device *bdev = swap_info[swp_type(entry)].bdev;
struct backing_dev_info *bdi;
/*
* If the page is removed from swapcache from under us (with a
* racy try_to_unuse/swapoff) we need an additional reference
* count to avoid reading garbage from page_private(page) above.
* If the WARN_ON triggers during a swapoff it maybe the race
* condition and it's harmless. However if it triggers without
* swapoff it signals a problem.
*/
WARN_ON(page_count(page) <= 1);
bdi = bdev->bd_inode->i_mapping->backing_dev_info;
blk_run_backing_dev(bdi, page);
}
up_read(&swap_unplug_sem);
}
/*
* swapon tell device that all the old swap contents can be discarded,
* to allow the swap device to optimize its wear-levelling.
*/
static int discard_swap(struct swap_info_struct *si)
{
struct swap_extent *se;
int err = 0;
list_for_each_entry(se, &si->extent_list, list) {
sector_t start_block = se->start_block << (PAGE_SHIFT - 9);
sector_t nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
if (se->start_page == 0) {
/* Do not discard the swap header page! */
start_block += 1 << (PAGE_SHIFT - 9);
nr_blocks -= 1 << (PAGE_SHIFT - 9);
if (!nr_blocks)
continue;
}
err = blkdev_issue_discard(si->bdev, start_block,
nr_blocks, GFP_KERNEL,
DISCARD_FL_BARRIER);
if (err)
break;
cond_resched();
}
return err; /* That will often be -EOPNOTSUPP */
}
/*
* swap allocation tell device that a cluster of swap can now be discarded,
* to allow the swap device to optimize its wear-levelling.
*/
static void discard_swap_cluster(struct swap_info_struct *si,
pgoff_t start_page, pgoff_t nr_pages)
{
struct swap_extent *se = si->curr_swap_extent;
int found_extent = 0;
while (nr_pages) {
struct list_head *lh;
if (se->start_page <= start_page &&
start_page < se->start_page + se->nr_pages) {
pgoff_t offset = start_page - se->start_page;
sector_t start_block = se->start_block + offset;
sector_t nr_blocks = se->nr_pages - offset;
if (nr_blocks > nr_pages)
nr_blocks = nr_pages;
start_page += nr_blocks;
nr_pages -= nr_blocks;
if (!found_extent++)
si->curr_swap_extent = se;
start_block <<= PAGE_SHIFT - 9;
nr_blocks <<= PAGE_SHIFT - 9;
if (blkdev_issue_discard(si->bdev, start_block,
nr_blocks, GFP_NOIO,
DISCARD_FL_BARRIER))
break;
}
lh = se->list.next;
if (lh == &si->extent_list)
lh = lh->next;
se = list_entry(lh, struct swap_extent, list);
}
}
static int wait_for_discard(void *word)
{
schedule();
return 0;
}
#define SWAPFILE_CLUSTER 256
#define LATENCY_LIMIT 256
static inline unsigned long scan_swap_map(struct swap_info_struct *si,
int cache)
{
unsigned long offset;
unsigned long scan_base;
unsigned long last_in_cluster = 0;
int latency_ration = LATENCY_LIMIT;
int found_free_cluster = 0;
/*
* We try to cluster swap pages by allocating them sequentially
* in swap. Once we've allocated SWAPFILE_CLUSTER pages this
* way, however, we resort to first-free allocation, starting
* a new cluster. This prevents us from scattering swap pages
* all over the entire swap partition, so that we reduce
* overall disk seek times between swap pages. -- sct
* But we do now try to find an empty cluster. -Andrea
* And we let swap pages go all over an SSD partition. Hugh
*/
si->flags += SWP_SCANNING;
scan_base = offset = si->cluster_next;
if (unlikely(!si->cluster_nr--)) {
if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
si->cluster_nr = SWAPFILE_CLUSTER - 1;
goto checks;
}
if (si->flags & SWP_DISCARDABLE) {
/*
* Start range check on racing allocations, in case
* they overlap the cluster we eventually decide on
* (we scan without swap_lock to allow preemption).
* It's hardly conceivable that cluster_nr could be
* wrapped during our scan, but don't depend on it.
*/
if (si->lowest_alloc)
goto checks;
si->lowest_alloc = si->max;
si->highest_alloc = 0;
}
spin_unlock(&swap_lock);
/*
* If seek is expensive, start searching for new cluster from
* start of partition, to minimize the span of allocated swap.
* But if seek is cheap, search from our current position, so
* that swap is allocated from all over the partition: if the
* Flash Translation Layer only remaps within limited zones,
* we don't want to wear out the first zone too quickly.
*/
if (!(si->flags & SWP_SOLIDSTATE))
scan_base = offset = si->lowest_bit;
last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
/* Locate the first empty (unaligned) cluster */
for (; last_in_cluster <= si->highest_bit; offset++) {
if (si->swap_map[offset])
last_in_cluster = offset + SWAPFILE_CLUSTER;
else if (offset == last_in_cluster) {
spin_lock(&swap_lock);
offset -= SWAPFILE_CLUSTER - 1;
si->cluster_next = offset;
si->cluster_nr = SWAPFILE_CLUSTER - 1;
found_free_cluster = 1;
goto checks;
}
if (unlikely(--latency_ration < 0)) {
cond_resched();
latency_ration = LATENCY_LIMIT;
}
}
offset = si->lowest_bit;
last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
/* Locate the first empty (unaligned) cluster */
for (; last_in_cluster < scan_base; offset++) {
if (si->swap_map[offset])
last_in_cluster = offset + SWAPFILE_CLUSTER;
else if (offset == last_in_cluster) {
spin_lock(&swap_lock);
offset -= SWAPFILE_CLUSTER - 1;
si->cluster_next = offset;
si->cluster_nr = SWAPFILE_CLUSTER - 1;
found_free_cluster = 1;
goto checks;
}
if (unlikely(--latency_ration < 0)) {
cond_resched();
latency_ration = LATENCY_LIMIT;
}
}
offset = scan_base;
spin_lock(&swap_lock);
si->cluster_nr = SWAPFILE_CLUSTER - 1;
si->lowest_alloc = 0;
}
checks:
if (!(si->flags & SWP_WRITEOK))
goto no_page;
if (!si->highest_bit)
goto no_page;
if (offset > si->highest_bit)
scan_base = offset = si->lowest_bit;
/* reuse swap entry of cache-only swap if not hibernation. */
if (vm_swap_full()
&& cache == SWAP_CACHE
&& si->swap_map[offset] == SWAP_HAS_CACHE) {
int swap_was_freed;
spin_unlock(&swap_lock);
swap_was_freed = __try_to_reclaim_swap(si, offset);
spin_lock(&swap_lock);
/* entry was freed successfully, try to use this again */
if (swap_was_freed)
goto checks;
goto scan; /* check next one */
}
if (si->swap_map[offset])
goto scan;
if (offset == si->lowest_bit)
si->lowest_bit++;
if (offset == si->highest_bit)
si->highest_bit--;
si->inuse_pages++;
if (si->inuse_pages == si->pages) {
si->lowest_bit = si->max;
si->highest_bit = 0;
}
if (cache == SWAP_CACHE) /* at usual swap-out via vmscan.c */
si->swap_map[offset] = encode_swapmap(0, true);
else /* at suspend */
si->swap_map[offset] = encode_swapmap(1, false);
si->cluster_next = offset + 1;
si->flags -= SWP_SCANNING;
if (si->lowest_alloc) {
/*
* Only set when SWP_DISCARDABLE, and there's a scan
* for a free cluster in progress or just completed.
*/
if (found_free_cluster) {
/*
* To optimize wear-levelling, discard the
* old data of the cluster, taking care not to
* discard any of its pages that have already
* been allocated by racing tasks (offset has
* already stepped over any at the beginning).
*/
if (offset < si->highest_alloc &&
si->lowest_alloc <= last_in_cluster)
last_in_cluster = si->lowest_alloc - 1;
si->flags |= SWP_DISCARDING;
spin_unlock(&swap_lock);
if (offset < last_in_cluster)
discard_swap_cluster(si, offset,
last_in_cluster - offset + 1);
spin_lock(&swap_lock);
si->lowest_alloc = 0;
si->flags &= ~SWP_DISCARDING;
smp_mb(); /* wake_up_bit advises this */
wake_up_bit(&si->flags, ilog2(SWP_DISCARDING));
} else if (si->flags & SWP_DISCARDING) {
/*
* Delay using pages allocated by racing tasks
* until the whole discard has been issued. We
* could defer that delay until swap_writepage,
* but it's easier to keep this self-contained.
*/
spin_unlock(&swap_lock);
wait_on_bit(&si->flags, ilog2(SWP_DISCARDING),
wait_for_discard, TASK_UNINTERRUPTIBLE);
spin_lock(&swap_lock);
} else {
/*
* Note pages allocated by racing tasks while
* scan for a free cluster is in progress, so
* that its final discard can exclude them.
*/
if (offset < si->lowest_alloc)
si->lowest_alloc = offset;
if (offset > si->highest_alloc)
si->highest_alloc = offset;
}
}
return offset;
scan:
spin_unlock(&swap_lock);
while (++offset <= si->highest_bit) {
if (!si->swap_map[offset]) {
spin_lock(&swap_lock);
goto checks;
}
if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
spin_lock(&swap_lock);
goto checks;
}
if (unlikely(--latency_ration < 0)) {
cond_resched();
latency_ration = LATENCY_LIMIT;
}
}
offset = si->lowest_bit;
while (++offset < scan_base) {
if (!si->swap_map[offset]) {
spin_lock(&swap_lock);
goto checks;
}
if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
spin_lock(&swap_lock);
goto checks;
}
if (unlikely(--latency_ration < 0)) {
cond_resched();
latency_ration = LATENCY_LIMIT;
}
}
spin_lock(&swap_lock);
no_page:
si->flags -= SWP_SCANNING;
return 0;
}
swp_entry_t get_swap_page(void)
{
struct swap_info_struct *si;
pgoff_t offset;
int type, next;
int wrapped = 0;
spin_lock(&swap_lock);
if (nr_swap_pages <= 0)
goto noswap;
nr_swap_pages--;
for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
si = swap_info + type;
next = si->next;
if (next < 0 ||
(!wrapped && si->prio != swap_info[next].prio)) {
next = swap_list.head;
wrapped++;
}
if (!si->highest_bit)
continue;
if (!(si->flags & SWP_WRITEOK))
continue;
swap_list.next = next;
/* This is called for allocating swap entry for cache */
offset = scan_swap_map(si, SWAP_CACHE);
if (offset) {
spin_unlock(&swap_lock);
return swp_entry(type, offset);
}
next = swap_list.next;
}
nr_swap_pages++;
noswap:
spin_unlock(&swap_lock);
return (swp_entry_t) {0};
}
/* The only caller of this function is now susupend routine */
swp_entry_t get_swap_page_of_type(int type)
{
struct swap_info_struct *si;
pgoff_t offset;
spin_lock(&swap_lock);
si = swap_info + type;
if (si->flags & SWP_WRITEOK) {
nr_swap_pages--;
/* This is called for allocating swap entry, not cache */
offset = scan_swap_map(si, SWAP_MAP);
if (offset) {
spin_unlock(&swap_lock);
return swp_entry(type, offset);
}
nr_swap_pages++;
}
spin_unlock(&swap_lock);
return (swp_entry_t) {0};
}
static struct swap_info_struct * swap_info_get(swp_entry_t entry)
{
struct swap_info_struct * p;
unsigned long offset, type;
if (!entry.val)
goto out;
type = swp_type(entry);
if (type >= nr_swapfiles)
goto bad_nofile;
p = & swap_info[type];
if (!(p->flags & SWP_USED))
goto bad_device;
offset = swp_offset(entry);
if (offset >= p->max)
goto bad_offset;
if (!p->swap_map[offset])
goto bad_free;
spin_lock(&swap_lock);
return p;
bad_free:
printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
goto out;
bad_offset:
printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
goto out;
bad_device:
printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
goto out;
bad_nofile:
printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
out:
return NULL;
}
static int swap_entry_free(struct swap_info_struct *p,
swp_entry_t ent, int cache)
{
unsigned long offset = swp_offset(ent);
int count = swap_count(p->swap_map[offset]);
bool has_cache;
has_cache = swap_has_cache(p->swap_map[offset]);
if (cache == SWAP_MAP) { /* dropping usage count of swap */
if (count < SWAP_MAP_MAX) {
count--;
p->swap_map[offset] = encode_swapmap(count, has_cache);
}
} else { /* dropping swap cache flag */
VM_BUG_ON(!has_cache);
p->swap_map[offset] = encode_swapmap(count, false);
}
/* return code. */
count = p->swap_map[offset];
/* free if no reference */
if (!count) {
if (offset < p->lowest_bit)
p->lowest_bit = offset;
if (offset > p->highest_bit)
p->highest_bit = offset;
if (p->prio > swap_info[swap_list.next].prio)
swap_list.next = p - swap_info;
nr_swap_pages++;
p->inuse_pages--;
}
if (!swap_count(count))
mem_cgroup_uncharge_swap(ent);
return count;
}
/*
* Caller has made sure that the swapdevice corresponding to entry
* is still around or has not been recycled.
*/
void swap_free(swp_entry_t entry)
{
struct swap_info_struct * p;
p = swap_info_get(entry);
if (p) {
swap_entry_free(p, entry, SWAP_MAP);
spin_unlock(&swap_lock);
}
}
/*
* Called after dropping swapcache to decrease refcnt to swap entries.
*/
void swapcache_free(swp_entry_t entry, struct page *page)
{
struct swap_info_struct *p;
int ret;
p = swap_info_get(entry);
if (p) {
ret = swap_entry_free(p, entry, SWAP_CACHE);
if (page) {
bool swapout;
if (ret)
swapout = true; /* the end of swap out */
else
swapout = false; /* no more swap users! */
mem_cgroup_uncharge_swapcache(page, entry, swapout);
}
spin_unlock(&swap_lock);
}
return;
}
/*
* How many references to page are currently swapped out?
*/
static inline int page_swapcount(struct page *page)
{
int count = 0;
struct swap_info_struct *p;
swp_entry_t entry;
entry.val = page_private(page);
p = swap_info_get(entry);
if (p) {
count = swap_count(p->swap_map[swp_offset(entry)]);
spin_unlock(&swap_lock);
}
return count;
}
/*
* We can write to an anon page without COW if there are no other references
* to it. And as a side-effect, free up its swap: because the old content
* on disk will never be read, and seeking back there to write new content
* later would only waste time away from clustering.
*/
int reuse_swap_page(struct page *page)
{
int count;
VM_BUG_ON(!PageLocked(page));
count = page_mapcount(page);
if (count <= 1 && PageSwapCache(page)) {
count += page_swapcount(page);
if (count == 1 && !PageWriteback(page)) {
delete_from_swap_cache(page);
SetPageDirty(page);
}
}
return count == 1;
}
/*
* If swap is getting full, or if there are no more mappings of this page,
* then try_to_free_swap is called to free its swap space.
*/
int try_to_free_swap(struct page *page)
{
VM_BUG_ON(!PageLocked(page));
if (!PageSwapCache(page))
return 0;
if (PageWriteback(page))
return 0;
if (page_swapcount(page))
return 0;
delete_from_swap_cache(page);
SetPageDirty(page);
return 1;
}
/*
* Free the swap entry like above, but also try to
* free the page cache entry if it is the last user.
*/
int free_swap_and_cache(swp_entry_t entry)
{
struct swap_info_struct *p;
struct page *page = NULL;
if (non_swap_entry(entry))
return 1;
p = swap_info_get(entry);
if (p) {
if (swap_entry_free(p, entry, SWAP_MAP) == SWAP_HAS_CACHE) {
page = find_get_page(&swapper_space, entry.val);
if (page && !trylock_page(page)) {
page_cache_release(page);
page = NULL;
}
}
spin_unlock(&swap_lock);
}
if (page) {
/*
* Not mapped elsewhere, or swap space full? Free it!
* Also recheck PageSwapCache now page is locked (above).
*/
if (PageSwapCache(page) && !PageWriteback(page) &&
(!page_mapped(page) || vm_swap_full())) {
delete_from_swap_cache(page);
SetPageDirty(page);
}
unlock_page(page);
page_cache_release(page);
}
return p != NULL;
}
#ifdef CONFIG_HIBERNATION
/*
* Find the swap type that corresponds to given device (if any).
*
* @offset - number of the PAGE_SIZE-sized block of the device, starting
* from 0, in which the swap header is expected to be located.
*
* This is needed for the suspend to disk (aka swsusp).
*/
int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
{
struct block_device *bdev = NULL;
int i;
if (device)
bdev = bdget(device);
spin_lock(&swap_lock);
for (i = 0; i < nr_swapfiles; i++) {
struct swap_info_struct *sis = swap_info + i;
if (!(sis->flags & SWP_WRITEOK))
continue;
if (!bdev) {
if (bdev_p)
*bdev_p = bdgrab(sis->bdev);
spin_unlock(&swap_lock);
return i;
}
if (bdev == sis->bdev) {
struct swap_extent *se;
se = list_entry(sis->extent_list.next,
struct swap_extent, list);
if (se->start_block == offset) {
if (bdev_p)
*bdev_p = bdgrab(sis->bdev);
spin_unlock(&swap_lock);
bdput(bdev);
return i;
}
}
}
spin_unlock(&swap_lock);
if (bdev)
bdput(bdev);
return -ENODEV;
}
/*
* Return either the total number of swap pages of given type, or the number
* of free pages of that type (depending on @free)
*
* This is needed for software suspend
*/
unsigned int count_swap_pages(int type, int free)
{
unsigned int n = 0;
if (type < nr_swapfiles) {
spin_lock(&swap_lock);
if (swap_info[type].flags & SWP_WRITEOK) {
n = swap_info[type].pages;
if (free)
n -= swap_info[type].inuse_pages;
}
spin_unlock(&swap_lock);
}
return n;
}
#endif
/*
* No need to decide whether this PTE shares the swap entry with others,
* just let do_wp_page work it out if a write is requested later - to
* force COW, vm_page_prot omits write permission from any private vma.
*/
static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, swp_entry_t entry, struct page *page)
{
struct mem_cgroup *ptr = NULL;
spinlock_t *ptl;
pte_t *pte;
int ret = 1;
if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr)) {
ret = -ENOMEM;
goto out_nolock;
}
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
if (ret > 0)
mem_cgroup_cancel_charge_swapin(ptr);
ret = 0;
goto out;
}
inc_mm_counter(vma->vm_mm, anon_rss);
get_page(page);
set_pte_at(vma->vm_mm, addr, pte,
pte_mkold(mk_pte(page, vma->vm_page_prot)));
page_add_anon_rmap(page, vma, addr);
mem_cgroup_commit_charge_swapin(page, ptr);
swap_free(entry);
/*
* Move the page to the active list so it is not
* immediately swapped out again after swapon.
*/
activate_page(page);
out:
pte_unmap_unlock(pte, ptl);
out_nolock:
return ret;
}
static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
swp_entry_t entry, struct page *page)
{
pte_t swp_pte = swp_entry_to_pte(entry);
pte_t *pte;
int ret = 0;
/*
* We don't actually need pte lock while scanning for swp_pte: since
* we hold page lock and mmap_sem, swp_pte cannot be inserted into the
* page table while we're scanning; though it could get zapped, and on
* some architectures (e.g. x86_32 with PAE) we might catch a glimpse
* of unmatched parts which look like swp_pte, so unuse_pte must
* recheck under pte lock. Scanning without pte lock lets it be
* preemptible whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
*/
pte = pte_offset_map(pmd, addr);
do {
/*
* swapoff spends a _lot_ of time in this loop!
* Test inline before going to call unuse_pte.
*/
if (unlikely(pte_same(*pte, swp_pte))) {
pte_unmap(pte);
ret = unuse_pte(vma, pmd, addr, entry, page);
if (ret)
goto out;
pte = pte_offset_map(pmd, addr);
}
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap(pte - 1);
out:
return ret;
}
static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
unsigned long addr, unsigned long end,
swp_entry_t entry, struct page *page)
{
pmd_t *pmd;
unsigned long next;
int ret;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
if (pmd_none_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret)
return ret;
} while (pmd++, addr = next, addr != end);
return 0;
}
static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
unsigned long addr, unsigned long end,
swp_entry_t entry, struct page *page)
{
pud_t *pud;
unsigned long next;
int ret;
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
continue;
ret = unuse_pmd_range(vma, pud, addr, next, entry, page);
if (ret)
return ret;
} while (pud++, addr = next, addr != end);
return 0;
}
static int unuse_vma(struct vm_area_struct *vma,
swp_entry_t entry, struct page *page)
{
pgd_t *pgd;
unsigned long addr, end, next;
int ret;
if (page->mapping) {
addr = page_address_in_vma(page, vma);
if (addr == -EFAULT)
return 0;
else
end = addr + PAGE_SIZE;
} else {
addr = vma->vm_start;
end = vma->vm_end;
}
pgd = pgd_offset(vma->vm_mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
ret = unuse_pud_range(vma, pgd, addr, next, entry, page);
if (ret)
return ret;
} while (pgd++, addr = next, addr != end);
return 0;
}
static int unuse_mm(struct mm_struct *mm,
swp_entry_t entry, struct page *page)
{
struct vm_area_struct *vma;
int ret = 0;
if (!down_read_trylock(&mm->mmap_sem)) {
/*
* Activate page so shrink_inactive_list is unlikely to unmap
* its ptes while lock is dropped, so swapoff can make progress.
*/
activate_page(page);
unlock_page(page);
down_read(&mm->mmap_sem);
lock_page(page);
}
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
break;
}
up_read(&mm->mmap_sem);
return (ret < 0)? ret: 0;
}
/*
* Scan swap_map from current position to next entry still in use.
* Recycle to start on reaching the end, returning 0 when empty.
*/
static unsigned int find_next_to_unuse(struct swap_info_struct *si,
unsigned int prev)
{
unsigned int max = si->max;
unsigned int i = prev;
int count;
/*
* No need for swap_lock here: we're just looking
* for whether an entry is in use, not modifying it; false
* hits are okay, and sys_swapoff() has already prevented new
* allocations from this area (while holding swap_lock).
*/
for (;;) {
if (++i >= max) {
if (!prev) {
i = 0;
break;
}
/*
* No entries in use at top of swap_map,
* loop back to start and recheck there.
*/
max = prev + 1;
prev = 0;
i = 1;
}
count = si->swap_map[i];
if (count && swap_count(count) != SWAP_MAP_BAD)
break;
}
return i;
}
/*
* We completely avoid races by reading each swap page in advance,
* and then search for the process using it. All the necessary
* page table adjustments can then be made atomically.
*/
static int try_to_unuse(unsigned int type)
{
struct swap_info_struct * si = &swap_info[type];
struct mm_struct *start_mm;
unsigned short *swap_map;
unsigned short swcount;
struct page *page;
swp_entry_t entry;
unsigned int i = 0;
int retval = 0;
int reset_overflow = 0;
int shmem;
/*
* When searching mms for an entry, a good strategy is to
* start at the first mm we freed the previous entry from
* (though actually we don't notice whether we or coincidence
* freed the entry). Initialize this start_mm with a hold.
*
* A simpler strategy would be to start at the last mm we
* freed the previous entry from; but that would take less
* advantage of mmlist ordering, which clusters forked mms
* together, child after parent. If we race with dup_mmap(), we
* prefer to resolve parent before child, lest we miss entries
* duplicated after we scanned child: using last mm would invert
* that. Though it's only a serious concern when an overflowed
* swap count is reset from SWAP_MAP_MAX, preventing a rescan.
*/
start_mm = &init_mm;
atomic_inc(&init_mm.mm_users);
/*
* Keep on scanning until all entries have gone. Usually,
* one pass through swap_map is enough, but not necessarily:
* there are races when an instance of an entry might be missed.
*/
while ((i = find_next_to_unuse(si, i)) != 0) {
if (signal_pending(current)) {
retval = -EINTR;
break;
}
/*
* Get a page for the entry, using the existing swap
* cache page if there is one. Otherwise, get a clean
* page and read the swap into it.
*/
swap_map = &si->swap_map[i];
entry = swp_entry(type, i);
page = read_swap_cache_async(entry,
GFP_HIGHUSER_MOVABLE, NULL, 0);
if (!page) {
/*
* Either swap_duplicate() failed because entry
* has been freed independently, and will not be
* reused since sys_swapoff() already disabled
* allocation from here, or alloc_page() failed.
*/
if (!*swap_map)
continue;
retval = -ENOMEM;
break;
}
/*
* Don't hold on to start_mm if it looks like exiting.
*/
if (atomic_read(&start_mm->mm_users) == 1) {
mmput(start_mm);
start_mm = &init_mm;
atomic_inc(&init_mm.mm_users);
}
/*
* Wait for and lock page. When do_swap_page races with
* try_to_unuse, do_swap_page can handle the fault much
* faster than try_to_unuse can locate the entry. This
* apparently redundant "wait_on_page_locked" lets try_to_unuse
* defer to do_swap_page in such a case - in some tests,
* do_swap_page and try_to_unuse repeatedly compete.
*/
wait_on_page_locked(page);
wait_on_page_writeback(page);
lock_page(page);
wait_on_page_writeback(page);
/*
* Remove all references to entry.
* Whenever we reach init_mm, there's no address space
* to search, but use it as a reminder to search shmem.
*/
shmem = 0;
swcount = *swap_map;
if (swap_count(swcount)) {
if (start_mm == &init_mm)
shmem = shmem_unuse(entry, page);
else
retval = unuse_mm(start_mm, entry, page);
}
if (swap_count(*swap_map)) {
int set_start_mm = (*swap_map >= swcount);
struct list_head *p = &start_mm->mmlist;
struct mm_struct *new_start_mm = start_mm;
struct mm_struct *prev_mm = start_mm;
struct mm_struct *mm;
atomic_inc(&new_start_mm->mm_users);
atomic_inc(&prev_mm->mm_users);
spin_lock(&mmlist_lock);
while (swap_count(*swap_map) && !retval && !shmem &&
(p = p->next) != &start_mm->mmlist) {
mm = list_entry(p, struct mm_struct, mmlist);
if (!atomic_inc_not_zero(&mm->mm_users))
continue;
spin_unlock(&mmlist_lock);
mmput(prev_mm);
prev_mm = mm;
cond_resched();
swcount = *swap_map;
if (!swap_count(swcount)) /* any usage ? */
;
else if (mm == &init_mm) {
set_start_mm = 1;
shmem = shmem_unuse(entry, page);
} else
retval = unuse_mm(mm, entry, page);
if (set_start_mm && *swap_map < swcount) {
mmput(new_start_mm);
atomic_inc(&mm->mm_users);
new_start_mm = mm;
set_start_mm = 0;
}
spin_lock(&mmlist_lock);
}
spin_unlock(&mmlist_lock);
mmput(prev_mm);
mmput(start_mm);
start_mm = new_start_mm;
}
if (shmem) {
/* page has already been unlocked and released */
if (shmem > 0)
continue;
retval = shmem;
break;
}
if (retval) {
unlock_page(page);
page_cache_release(page);
break;
}
/*
* How could swap count reach 0x7ffe ?
* There's no way to repeat a swap page within an mm
* (except in shmem, where it's the shared object which takes
* the reference count)?
* We believe SWAP_MAP_MAX cannot occur.(if occur, unsigned
* short is too small....)
* If that's wrong, then we should worry more about
* exit_mmap() and do_munmap() cases described above:
* we might be resetting SWAP_MAP_MAX too early here.
* We know "Undead"s can happen, they're okay, so don't
* report them; but do report if we reset SWAP_MAP_MAX.
*/
/* We might release the lock_page() in unuse_mm(). */
if (!PageSwapCache(page) || page_private(page) != entry.val)
goto retry;
if (swap_count(*swap_map) == SWAP_MAP_MAX) {
spin_lock(&swap_lock);
*swap_map = encode_swapmap(0, true);
spin_unlock(&swap_lock);
reset_overflow = 1;
}
/*
* If a reference remains (rare), we would like to leave
* the page in the swap cache; but try_to_unmap could
* then re-duplicate the entry once we drop page lock,
* so we might loop indefinitely; also, that page could
* not be swapped out to other storage meanwhile. So:
* delete from cache even if there's another reference,
* after ensuring that the data has been saved to disk -
* since if the reference remains (rarer), it will be
* read from disk into another page. Splitting into two
* pages would be incorrect if swap supported "shared
* private" pages, but they are handled by tmpfs files.
*/
if (swap_count(*swap_map) &&
PageDirty(page) && PageSwapCache(page)) {
struct writeback_control wbc = {
.sync_mode = WB_SYNC_NONE,
};
swap_writepage(page, &wbc);
lock_page(page);
wait_on_page_writeback(page);
}
/*
* It is conceivable that a racing task removed this page from
* swap cache just before we acquired the page lock at the top,
* or while we dropped it in unuse_mm(). The page might even
* be back in swap cache on another swap area: that we must not
* delete, since it may not have been written out to swap yet.
*/
if (PageSwapCache(page) &&
likely(page_private(page) == entry.val))
delete_from_swap_cache(page);
/*
* So we could skip searching mms once swap count went
* to 1, we did not mark any present ptes as dirty: must
* mark page dirty so shrink_page_list will preserve it.
*/
SetPageDirty(page);
retry:
unlock_page(page);
page_cache_release(page);
/*
* Make sure that we aren't completely killing
* interactive performance.
*/
cond_resched();
}
mmput(start_mm);
if (reset_overflow) {
printk(KERN_WARNING "swapoff: cleared swap entry overflow\n");
swap_overflow = 0;
}
return retval;
}
/*
* After a successful try_to_unuse, if no swap is now in use, we know
* we can empty the mmlist. swap_lock must be held on entry and exit.
* Note that mmlist_lock nests inside swap_lock, and an mm must be
* added to the mmlist just after page_duplicate - before would be racy.
*/
static void drain_mmlist(void)
{
struct list_head *p, *next;
unsigned int i;
for (i = 0; i < nr_swapfiles; i++)
if (swap_info[i].inuse_pages)
return;
spin_lock(&mmlist_lock);
list_for_each_safe(p, next, &init_mm.mmlist)
list_del_init(p);
spin_unlock(&mmlist_lock);
}
/*
* Use this swapdev's extent info to locate the (PAGE_SIZE) block which
* corresponds to page offset `offset'.
*/
sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset)
{
struct swap_extent *se = sis->curr_swap_extent;
struct swap_extent *start_se = se;
for ( ; ; ) {
struct list_head *lh;
if (se->start_page <= offset &&
offset < (se->start_page + se->nr_pages)) {
return se->start_block + (offset - se->start_page);
}
lh = se->list.next;
if (lh == &sis->extent_list)
lh = lh->next;
se = list_entry(lh, struct swap_extent, list);
sis->curr_swap_extent = se;
BUG_ON(se == start_se); /* It *must* be present */
}
}
#ifdef CONFIG_HIBERNATION
/*
* Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
* corresponding to given index in swap_info (swap type).
*/
sector_t swapdev_block(int swap_type, pgoff_t offset)
{
struct swap_info_struct *sis;
if (swap_type >= nr_swapfiles)
return 0;
sis = swap_info + swap_type;
return (sis->flags & SWP_WRITEOK) ? map_swap_page(sis, offset) : 0;
}
#endif /* CONFIG_HIBERNATION */
/*
* Free all of a swapdev's extent information
*/
static void destroy_swap_extents(struct swap_info_struct *sis)
{
while (!list_empty(&sis->extent_list)) {
struct swap_extent *se;
se = list_entry(sis->extent_list.next,
struct swap_extent, list);
list_del(&se->list);
kfree(se);
}
}
/*
* Add a block range (and the corresponding page range) into this swapdev's
* extent list. The extent list is kept sorted in page order.
*
* This function rather assumes that it is called in ascending page order.
*/
static int
add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
unsigned long nr_pages, sector_t start_block)
{
struct swap_extent *se;
struct swap_extent *new_se;
struct list_head *lh;
lh = sis->extent_list.prev; /* The highest page extent */
if (lh != &sis->extent_list) {
se = list_entry(lh, struct swap_extent, list);
BUG_ON(se->start_page + se->nr_pages != start_page);
if (se->start_block + se->nr_pages == start_block) {
/* Merge it */
se->nr_pages += nr_pages;
return 0;
}
}
/*
* No merge. Insert a new extent, preserving ordering.
*/
new_se = kmalloc(sizeof(*se), GFP_KERNEL);
if (new_se == NULL)
return -ENOMEM;
new_se->start_page = start_page;
new_se->nr_pages = nr_pages;
new_se->start_block = start_block;
list_add_tail(&new_se->list, &sis->extent_list);
return 1;
}
/*
* A `swap extent' is a simple thing which maps a contiguous range of pages
* onto a contiguous range of disk blocks. An ordered list of swap extents
* is built at swapon time and is then used at swap_writepage/swap_readpage
* time for locating where on disk a page belongs.
*
* If the swapfile is an S_ISBLK block device, a single extent is installed.
* This is done so that the main operating code can treat S_ISBLK and S_ISREG
* swap files identically.
*
* Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
* extent list operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK
* swapfiles are handled *identically* after swapon time.
*
* For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
* and will parse them into an ordered extent list, in PAGE_SIZE chunks. If
* some stray blocks are found which do not fall within the PAGE_SIZE alignment
* requirements, they are simply tossed out - we will never use those blocks
* for swapping.
*
* For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon. This
* prevents root from shooting her foot off by ftruncating an in-use swapfile,
* which will scribble on the fs.
*
* The amount of disk space which a single swap extent represents varies.
* Typically it is in the 1-4 megabyte range. So we can have hundreds of
* extents in the list. To avoid much list walking, we cache the previous
* search location in `curr_swap_extent', and start new searches from there.
* This is extremely effective. The average number of iterations in
* map_swap_page() has been measured at about 0.3 per page. - akpm.
*/
static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
{
struct inode *inode;
unsigned blocks_per_page;
unsigned long page_no;
unsigned blkbits;
sector_t probe_block;
sector_t last_block;
sector_t lowest_block = -1;
sector_t highest_block = 0;
int nr_extents = 0;
int ret;
inode = sis->swap_file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) {
ret = add_swap_extent(sis, 0, sis->max, 0);
*span = sis->pages;
goto done;
}
blkbits = inode->i_blkbits;
blocks_per_page = PAGE_SIZE >> blkbits;
/*
* Map all the blocks into the extent list. This code doesn't try
* to be very smart.
*/
probe_block = 0;
page_no = 0;
last_block = i_size_read(inode) >> blkbits;
while ((probe_block + blocks_per_page) <= last_block &&
page_no < sis->max) {
unsigned block_in_page;
sector_t first_block;
first_block = bmap(inode, probe_block);
if (first_block == 0)
goto bad_bmap;
/*
* It must be PAGE_SIZE aligned on-disk
*/
if (first_block & (blocks_per_page - 1)) {
probe_block++;
goto reprobe;
}
for (block_in_page = 1; block_in_page < blocks_per_page;
block_in_page++) {
sector_t block;
block = bmap(inode, probe_block + block_in_page);
if (block == 0)
goto bad_bmap;
if (block != first_block + block_in_page) {
/* Discontiguity */
probe_block++;
goto reprobe;
}
}
first_block >>= (PAGE_SHIFT - blkbits);
if (page_no) { /* exclude the header page */
if (first_block < lowest_block)
lowest_block = first_block;
if (first_block > highest_block)
highest_block = first_block;
}
/*
* We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
*/
ret = add_swap_extent(sis, page_no, 1, first_block);
if (ret < 0)
goto out;
nr_extents += ret;
page_no++;
probe_block += blocks_per_page;
reprobe:
continue;
}
ret = nr_extents;
*span = 1 + highest_block - lowest_block;
if (page_no == 0)
page_no = 1; /* force Empty message */
sis->max = page_no;
sis->pages = page_no - 1;
sis->highest_bit = page_no - 1;
done:
sis->curr_swap_extent = list_entry(sis->extent_list.prev,
struct swap_extent, list);
goto out;
bad_bmap:
printk(KERN_ERR "swapon: swapfile has holes\n");
ret = -EINVAL;
out:
return ret;
}
SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
{
struct swap_info_struct * p = NULL;
unsigned short *swap_map;
struct file *swap_file, *victim;
struct address_space *mapping;
struct inode *inode;
char * pathname;
int i, type, prev;
int err;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
pathname = getname(specialfile);
err = PTR_ERR(pathname);
if (IS_ERR(pathname))
goto out;
victim = filp_open(pathname, O_RDWR|O_LARGEFILE, 0);
putname(pathname);
err = PTR_ERR(victim);
if (IS_ERR(victim))
goto out;
mapping = victim->f_mapping;
prev = -1;
spin_lock(&swap_lock);
for (type = swap_list.head; type >= 0; type = swap_info[type].next) {
p = swap_info + type;
if (p->flags & SWP_WRITEOK) {
if (p->swap_file->f_mapping == mapping)
break;
}
prev = type;
}
if (type < 0) {
err = -EINVAL;
spin_unlock(&swap_lock);
goto out_dput;
}
if (!security_vm_enough_memory(p->pages))
vm_unacct_memory(p->pages);
else {
err = -ENOMEM;
spin_unlock(&swap_lock);
goto out_dput;
}
if (prev < 0) {
swap_list.head = p->next;
} else {
swap_info[prev].next = p->next;
}
if (type == swap_list.next) {
/* just pick something that's safe... */
swap_list.next = swap_list.head;
}
if (p->prio < 0) {
for (i = p->next; i >= 0; i = swap_info[i].next)
swap_info[i].prio = p->prio--;
least_priority++;
}
nr_swap_pages -= p->pages;
total_swap_pages -= p->pages;
p->flags &= ~SWP_WRITEOK;
spin_unlock(&swap_lock);
current->flags |= PF_OOM_ORIGIN;
err = try_to_unuse(type);
current->flags &= ~PF_OOM_ORIGIN;
if (err) {
/* re-insert swap space back into swap_list */
spin_lock(&swap_lock);
if (p->prio < 0)
p->prio = --least_priority;
prev = -1;
for (i = swap_list.head; i >= 0; i = swap_info[i].next) {
if (p->prio >= swap_info[i].prio)
break;
prev = i;
}
p->next = i;
if (prev < 0)
swap_list.head = swap_list.next = p - swap_info;
else
swap_info[prev].next = p - swap_info;
nr_swap_pages += p->pages;
total_swap_pages += p->pages;
p->flags |= SWP_WRITEOK;
spin_unlock(&swap_lock);
goto out_dput;
}
/* wait for any unplug function to finish */
down_write(&swap_unplug_sem);
up_write(&swap_unplug_sem);
destroy_swap_extents(p);
mutex_lock(&swapon_mutex);
spin_lock(&swap_lock);
drain_mmlist();
/* wait for anyone still in scan_swap_map */
p->highest_bit = 0; /* cuts scans short */
while (p->flags >= SWP_SCANNING) {
spin_unlock(&swap_lock);
schedule_timeout_uninterruptible(1);
spin_lock(&swap_lock);
}
swap_file = p->swap_file;
p->swap_file = NULL;
p->max = 0;
swap_map = p->swap_map;
p->swap_map = NULL;
p->flags = 0;
spin_unlock(&swap_lock);
mutex_unlock(&swapon_mutex);
vfree(swap_map);
/* Destroy swap account informatin */
swap_cgroup_swapoff(type);
inode = mapping->host;
if (S_ISBLK(inode->i_mode)) {
struct block_device *bdev = I_BDEV(inode);
set_blocksize(bdev, p->old_block_size);
bd_release(bdev);
} else {
mutex_lock(&inode->i_mutex);
inode->i_flags &= ~S_SWAPFILE;
mutex_unlock(&inode->i_mutex);
}
filp_close(swap_file, NULL);
err = 0;
out_dput:
filp_close(victim, NULL);
out:
return err;
}
#ifdef CONFIG_PROC_FS
/* iterator */
static void *swap_start(struct seq_file *swap, loff_t *pos)
{
struct swap_info_struct *ptr = swap_info;
int i;
loff_t l = *pos;
mutex_lock(&swapon_mutex);
if (!l)
return SEQ_START_TOKEN;
for (i = 0; i < nr_swapfiles; i++, ptr++) {
if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
continue;
if (!--l)
return ptr;
}
return NULL;
}
static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
{
struct swap_info_struct *ptr;
struct swap_info_struct *endptr = swap_info + nr_swapfiles;
if (v == SEQ_START_TOKEN)
ptr = swap_info;
else {
ptr = v;
ptr++;
}
for (; ptr < endptr; ptr++) {
if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
continue;
++*pos;
return ptr;
}
return NULL;
}
static void swap_stop(struct seq_file *swap, void *v)
{
mutex_unlock(&swapon_mutex);
}
static int swap_show(struct seq_file *swap, void *v)
{
struct swap_info_struct *ptr = v;
struct file *file;
int len;
if (ptr == SEQ_START_TOKEN) {
seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
return 0;
}
file = ptr->swap_file;
len = seq_path(swap, &file->f_path, " \t\n\\");
seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
len < 40 ? 40 - len : 1, " ",
S_ISBLK(file->f_path.dentry->d_inode->i_mode) ?
"partition" : "file\t",
ptr->pages << (PAGE_SHIFT - 10),
ptr->inuse_pages << (PAGE_SHIFT - 10),
ptr->prio);
return 0;
}
static const struct seq_operations swaps_op = {
.start = swap_start,
.next = swap_next,
.stop = swap_stop,
.show = swap_show
};
static int swaps_open(struct inode *inode, struct file *file)
{
return seq_open(file, &swaps_op);
}
static const struct file_operations proc_swaps_operations = {
.open = swaps_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static int __init procswaps_init(void)
{
proc_create("swaps", 0, NULL, &proc_swaps_operations);
return 0;
}
__initcall(procswaps_init);
#endif /* CONFIG_PROC_FS */
#ifdef MAX_SWAPFILES_CHECK
static int __init max_swapfiles_check(void)
{
MAX_SWAPFILES_CHECK();
return 0;
}
late_initcall(max_swapfiles_check);
#endif
/*
* Written 01/25/92 by Simmule Turner, heavily changed by Linus.
*
* The swapon system call
*/
SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
{
struct swap_info_struct * p;
char *name = NULL;
struct block_device *bdev = NULL;
struct file *swap_file = NULL;
struct address_space *mapping;
unsigned int type;
int i, prev;
int error;
union swap_header *swap_header = NULL;
unsigned int nr_good_pages = 0;
int nr_extents = 0;
sector_t span;
unsigned long maxpages = 1;
unsigned long swapfilepages;
unsigned short *swap_map = NULL;
struct page *page = NULL;
struct inode *inode = NULL;
int did_down = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
spin_lock(&swap_lock);
p = swap_info;
for (type = 0 ; type < nr_swapfiles ; type++,p++)
if (!(p->flags & SWP_USED))
break;
error = -EPERM;
if (type >= MAX_SWAPFILES) {
spin_unlock(&swap_lock);
goto out;
}
if (type >= nr_swapfiles)
nr_swapfiles = type+1;
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->extent_list);
p->flags = SWP_USED;
p->next = -1;
spin_unlock(&swap_lock);
name = getname(specialfile);
error = PTR_ERR(name);
if (IS_ERR(name)) {
name = NULL;
goto bad_swap_2;
}
swap_file = filp_open(name, O_RDWR|O_LARGEFILE, 0);
error = PTR_ERR(swap_file);
if (IS_ERR(swap_file)) {
swap_file = NULL;
goto bad_swap_2;
}
p->swap_file = swap_file;
mapping = swap_file->f_mapping;
inode = mapping->host;
error = -EBUSY;
for (i = 0; i < nr_swapfiles; i++) {
struct swap_info_struct *q = &swap_info[i];
if (i == type || !q->swap_file)
continue;
if (mapping == q->swap_file->f_mapping)
goto bad_swap;
}
error = -EINVAL;
if (S_ISBLK(inode->i_mode)) {
bdev = I_BDEV(inode);
error = bd_claim(bdev, sys_swapon);
if (error < 0) {
bdev = NULL;
error = -EINVAL;
goto bad_swap;
}
p->old_block_size = block_size(bdev);
error = set_blocksize(bdev, PAGE_SIZE);
if (error < 0)
goto bad_swap;
p->bdev = bdev;
} else if (S_ISREG(inode->i_mode)) {
p->bdev = inode->i_sb->s_bdev;
mutex_lock(&inode->i_mutex);
did_down = 1;
if (IS_SWAPFILE(inode)) {
error = -EBUSY;
goto bad_swap;
}
} else {
goto bad_swap;
}
swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
/*
* Read the swap header.
*/
if (!mapping->a_ops->readpage) {
error = -EINVAL;
goto bad_swap;
}
page = read_mapping_page(mapping, 0, swap_file);
if (IS_ERR(page)) {
error = PTR_ERR(page);
goto bad_swap;
}
swap_header = kmap(page);
if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
printk(KERN_ERR "Unable to find swap-space signature\n");
error = -EINVAL;
goto bad_swap;
}
/* swap partition endianess hack... */
if (swab32(swap_header->info.version) == 1) {
swab32s(&swap_header->info.version);
swab32s(&swap_header->info.last_page);
swab32s(&swap_header->info.nr_badpages);
for (i = 0; i < swap_header->info.nr_badpages; i++)
swab32s(&swap_header->info.badpages[i]);
}
/* Check the swap header's sub-version */
if (swap_header->info.version != 1) {
printk(KERN_WARNING
"Unable to handle swap header version %d\n",
swap_header->info.version);
error = -EINVAL;
goto bad_swap;
}
p->lowest_bit = 1;
p->cluster_next = 1;
/*
* Find out how many pages are allowed for a single swap
* device. There are two limiting factors: 1) the number of
* bits for the swap offset in the swp_entry_t type and
* 2) the number of bits in the a swap pte as defined by
* the different architectures. In order to find the
* largest possible bit mask a swap entry with swap type 0
* and swap offset ~0UL is created, encoded to a swap pte,
* decoded to a swp_entry_t again and finally the swap
* offset is extracted. This will mask all the bits from
* the initial ~0UL mask that can't be encoded in either
* the swp_entry_t or the architecture definition of a
* swap pte.
*/
maxpages = swp_offset(pte_to_swp_entry(
swp_entry_to_pte(swp_entry(0, ~0UL)))) - 1;
if (maxpages > swap_header->info.last_page)
maxpages = swap_header->info.last_page;
p->highest_bit = maxpages - 1;
error = -EINVAL;
if (!maxpages)
goto bad_swap;
if (swapfilepages && maxpages > swapfilepages) {
printk(KERN_WARNING
"Swap area shorter than signature indicates\n");
goto bad_swap;
}
if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
goto bad_swap;
if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
goto bad_swap;
/* OK, set up the swap map and apply the bad block list */
swap_map = vmalloc(maxpages * sizeof(short));
if (!swap_map) {
error = -ENOMEM;
goto bad_swap;
}
memset(swap_map, 0, maxpages * sizeof(short));
for (i = 0; i < swap_header->info.nr_badpages; i++) {
int page_nr = swap_header->info.badpages[i];
if (page_nr <= 0 || page_nr >= swap_header->info.last_page) {
error = -EINVAL;
goto bad_swap;
}
swap_map[page_nr] = SWAP_MAP_BAD;
}
error = swap_cgroup_swapon(type, maxpages);
if (error)
goto bad_swap;
nr_good_pages = swap_header->info.last_page -
swap_header->info.nr_badpages -
1 /* header page */;
if (nr_good_pages) {
swap_map[0] = SWAP_MAP_BAD;
p->max = maxpages;
p->pages = nr_good_pages;
nr_extents = setup_swap_extents(p, &span);
if (nr_extents < 0) {
error = nr_extents;
goto bad_swap;
}
nr_good_pages = p->pages;
}
if (!nr_good_pages) {
printk(KERN_WARNING "Empty swap-file\n");
error = -EINVAL;
goto bad_swap;
}
if (p->bdev) {
if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
p->flags |= SWP_SOLIDSTATE;
p->cluster_next = 1 + (random32() % p->highest_bit);
}
if (discard_swap(p) == 0)
p->flags |= SWP_DISCARDABLE;
}
mutex_lock(&swapon_mutex);
spin_lock(&swap_lock);
if (swap_flags & SWAP_FLAG_PREFER)
p->prio =
(swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
else
p->prio = --least_priority;
p->swap_map = swap_map;
p->flags |= SWP_WRITEOK;
nr_swap_pages += nr_good_pages;
total_swap_pages += nr_good_pages;
printk(KERN_INFO "Adding %uk swap on %s. "
"Priority:%d extents:%d across:%lluk %s%s\n",
nr_good_pages<<(PAGE_SHIFT-10), name, p->prio,
nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
(p->flags & SWP_SOLIDSTATE) ? "SS" : "",
(p->flags & SWP_DISCARDABLE) ? "D" : "");
/* insert swap space into swap_list: */
prev = -1;
for (i = swap_list.head; i >= 0; i = swap_info[i].next) {
if (p->prio >= swap_info[i].prio) {
break;
}
prev = i;
}
p->next = i;
if (prev < 0) {
swap_list.head = swap_list.next = p - swap_info;
} else {
swap_info[prev].next = p - swap_info;
}
spin_unlock(&swap_lock);
mutex_unlock(&swapon_mutex);
error = 0;
goto out;
bad_swap:
if (bdev) {
set_blocksize(bdev, p->old_block_size);
bd_release(bdev);
}
destroy_swap_extents(p);
swap_cgroup_swapoff(type);
bad_swap_2:
spin_lock(&swap_lock);
p->swap_file = NULL;
p->flags = 0;
spin_unlock(&swap_lock);
vfree(swap_map);
if (swap_file)
filp_close(swap_file, NULL);
out:
if (page && !IS_ERR(page)) {
kunmap(page);
page_cache_release(page);
}
if (name)
putname(name);
if (did_down) {
if (!error)
inode->i_flags |= S_SWAPFILE;
mutex_unlock(&inode->i_mutex);
}
return error;
}
void si_swapinfo(struct sysinfo *val)
{
unsigned int i;
unsigned long nr_to_be_unused = 0;
spin_lock(&swap_lock);
for (i = 0; i < nr_swapfiles; i++) {
if (!(swap_info[i].flags & SWP_USED) ||
(swap_info[i].flags & SWP_WRITEOK))
continue;
nr_to_be_unused += swap_info[i].inuse_pages;
}
val->freeswap = nr_swap_pages + nr_to_be_unused;
val->totalswap = total_swap_pages + nr_to_be_unused;
spin_unlock(&swap_lock);
}
/*
* Verify that a swap entry is valid and increment its swap map count.
*
* Note: if swap_map[] reaches SWAP_MAP_MAX the entries are treated as
* "permanent", but will be reclaimed by the next swapoff.
* Returns error code in following case.
* - success -> 0
* - swp_entry is invalid -> EINVAL
* - swp_entry is migration entry -> EINVAL
* - swap-cache reference is requested but there is already one. -> EEXIST
* - swap-cache reference is requested but the entry is not used. -> ENOENT
*/
static int __swap_duplicate(swp_entry_t entry, bool cache)
{
struct swap_info_struct * p;
unsigned long offset, type;
int result = -EINVAL;
int count;
bool has_cache;
if (non_swap_entry(entry))
return -EINVAL;
type = swp_type(entry);
if (type >= nr_swapfiles)
goto bad_file;
p = type + swap_info;
offset = swp_offset(entry);
spin_lock(&swap_lock);
if (unlikely(offset >= p->max))
goto unlock_out;
count = swap_count(p->swap_map[offset]);
has_cache = swap_has_cache(p->swap_map[offset]);
if (cache == SWAP_CACHE) { /* called for swapcache/swapin-readahead */
/* set SWAP_HAS_CACHE if there is no cache and entry is used */
if (!has_cache && count) {
p->swap_map[offset] = encode_swapmap(count, true);
result = 0;
} else if (has_cache) /* someone added cache */
result = -EEXIST;
else if (!count) /* no users */
result = -ENOENT;
} else if (count || has_cache) {
if (count < SWAP_MAP_MAX - 1) {
p->swap_map[offset] = encode_swapmap(count + 1,
has_cache);
result = 0;
} else if (count <= SWAP_MAP_MAX) {
if (swap_overflow++ < 5)
printk(KERN_WARNING
"swap_dup: swap entry overflow\n");
p->swap_map[offset] = encode_swapmap(SWAP_MAP_MAX,
has_cache);
result = 0;
}
} else
result = -ENOENT; /* unused swap entry */
unlock_out:
spin_unlock(&swap_lock);
out:
return result;
bad_file:
printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
goto out;
}
/*
* increase reference count of swap entry by 1.
*/
void swap_duplicate(swp_entry_t entry)
{
__swap_duplicate(entry, SWAP_MAP);
}
/*
* @entry: swap entry for which we allocate swap cache.
*
* Called when allocating swap cache for exising swap entry,
* This can return error codes. Returns 0 at success.
* -EBUSY means there is a swap cache.
* Note: return code is different from swap_duplicate().
*/
int swapcache_prepare(swp_entry_t entry)
{
return __swap_duplicate(entry, SWAP_CACHE);
}
struct swap_info_struct *
get_swap_info_struct(unsigned type)
{
return &swap_info[type];
}
/*
* swap_lock prevents swap_map being freed. Don't grab an extra
* reference on the swaphandle, it doesn't matter if it becomes unused.
*/
int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
{
struct swap_info_struct *si;
int our_page_cluster = page_cluster;
pgoff_t target, toff;
pgoff_t base, end;
int nr_pages = 0;
if (!our_page_cluster) /* no readahead */
return 0;
si = &swap_info[swp_type(entry)];
target = swp_offset(entry);
base = (target >> our_page_cluster) << our_page_cluster;
end = base + (1 << our_page_cluster);
if (!base) /* first page is swap header */
base++;
spin_lock(&swap_lock);
if (end > si->max) /* don't go beyond end of map */
end = si->max;
/* Count contiguous allocated slots above our target */
for (toff = target; ++toff < end; nr_pages++) {
/* Don't read in free or bad pages */
if (!si->swap_map[toff])
break;
if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD)
break;
}
/* Count contiguous allocated slots below our target */
for (toff = target; --toff >= base; nr_pages++) {
/* Don't read in free or bad pages */
if (!si->swap_map[toff])
break;
if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD)
break;
}
spin_unlock(&swap_lock);
/*
* Indicate starting offset, and return number of pages to get:
* if only 1, say 0, since there's then no readahead to be done.
*/
*offset = ++toff;
return nr_pages? ++nr_pages: 0;
}