1: /*
2: * Each physical page in the system has a struct page associated with
3: * it to keep track of whatever it is we are using the page for at the
4: * moment. Note that we have no way to track which tasks are using
5: * a page, though if it is a pagecache page, rmap structures can tell us
6: * who is mapping it.
7: */
8: struct page {
9: unsigned long flags; /* Atomic flags, some possibly
10: * updated asynchronously */
11: atomic_t _count; /* Usage count, see below. */
12: union {
13: /*
14: * Count of ptes mapped in
15: * mms, to show when page is
16: * mapped & limit reverse map
17: * searches.
18: *
19: * Used also for tail pages
20: * refcounting instead of
21: * _count. Tail pages cannot
22: * be mapped and keeping the
23: * tail page _count zero at
24: * all times guarantees
25: * get_page_unless_zero() will
26: * never succeed on tail
27: * pages.
28: */
29: atomic_t _mapcount;
30:
31: struct { /* SLUB */
32: u16 inuse;
33: u16 objects;
34: };
35: };
36: union {
37: struct {
38: unsigned long private; /* Mapping-private opaque data:
39: * usually used for buffer_heads
40: * if PagePrivate set; used for
41: * swp_entry_t if PageSwapCache;
42: * indicates order in the buddy
43: * system if PG_buddy is set.
44: */
45: struct address_space *mapping; /* If low bit clear, points to
46: * inode address_space, or NULL.
47: * If page mapped as anonymous
48: * memory, low bit is set, and
49: * it points to anon_vma object:
50: * see PAGE_MAPPING_ANON below.
51: */
52: };
53: #if USE_SPLIT_PTLOCKS
54: spinlock_t ptl;
55: #endif
56: struct kmem_cache *slab; /* SLUB: Pointer to slab */
57: struct page *first_page; /* Compound tail pages */
58: };
59: union {
60: pgoff_t index; /* Our offset within mapping. */
61: void *freelist; /* SLUB: freelist req. slab lock */
62: };
63: struct list_head lru; /* Pageout list, eg. active_list
64: * protected by zone->lru_lock !
65: */
66: /*
67: * On machines where all RAM is mapped into kernel address space,
68: * we can simply calculate the virtual address. On machines with
69: * highmem some memory is mapped into kernel virtual memory
70: * dynamically, so we need a place to store that address.
71: * Note that this field could be 16 bits on x86 ... ;)
72: *
73: * Architectures with slow multiplication can define
74: * WANT_PAGE_VIRTUAL in asm/page.h
75: */
76: #if defined(WANT_PAGE_VIRTUAL)
77: void *virtual; /* Kernel virtual address (NULL if
78: not kmapped, ie. highmem) */
79: #endif /* WANT_PAGE_VIRTUAL */
80: #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
81: unsigned long debug_flags; /* Use atomic bitops on this */
82: #endif
83:
84: #ifdef CONFIG_KMEMCHECK
85: /*
86: * kmemcheck wants to track the status of each byte in a page; this
87: * is a pointer to such a status block. NULL if not tracked.
88: */
89: void *shadow;
90: #endif
91: };