struct tast_struct 和mm_struct成员中的冰山一角

这个东西范围太广了,发现的只是其中微小的的一个角落

tast_struct




struct task_struct {
    volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
    void *stack;
    atomic_t usage;
    unsigned int flags;    /* per process flags, defined below */
    unsigned int ptrace;

    int lock_depth;        /* BKL lock depth */

#ifdef CONFIG_SMP
#ifdef __ARCH_WANT_UNLOCKED_CTXSW
    int oncpu;
#endif
#endif

    int prio, static_prio, normal_prio;
    struct list_head run_list;
    struct sched_entity se;

    unsigned short ioprio;
#ifdef CONFIG_BLK_DEV_IO_TRACE
    unsigned int btrace_seq;
#endif

    unsigned int policy;
    cpumask_t cpus_allowed;
    unsigned int time_slice;
    struct sched_class *sched_class;

#ifdef CONFIG_PREEMPT_RCU
        int rcu_read_lock_nesting;
        atomic_t *rcu_flipctr1;
        atomic_t *rcu_flipctr2;
#endif

#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
    struct sched_info sched_info;
#endif

    struct list_head tasks;
    /*
     * ptrace_list/ptrace_children forms the list of my children
     * that were stolen by a ptracer.
     */
    struct list_head ptrace_children;
    struct list_head ptrace_list;

    struct mm_struct *mm, *active_mm;

/* task state */
    struct linux_binfmt *binfmt;
    int exit_state;
    int exit_code, exit_signal;
    int pdeath_signal;  /*  The signal sent when the parent dies  */
    /* ??? */
    unsigned int personality;
    unsigned did_exec:1;
    pid_t pid;
    pid_t tgid;

#ifdef CONFIG_CC_STACKPROTECTOR
    /* Canary value for the -fstack-protector gcc feature */
    unsigned long stack_canary;
#endif
    /*
     * pointers to (original) parent process, youngest child, younger sibling,
     * older sibling, respectively.  (p->father can be replaced with
     * p->parent->pid)
     */
    struct task_struct *real_parent; /* real parent process (when being debugged) */
    struct task_struct *parent;    /* parent process */
    /*
     * children/sibling forms the list of my children plus the
     * tasks I'm ptracing.
     */
    struct list_head children;    /* list of my children */
    struct list_head sibling;    /* linkage in my parent's children list */
    struct task_struct *group_leader;    /* threadgroup leader */

    /* PID/PID hash table linkage. */
    struct pid_link pids[PIDTYPE_MAX];
    struct list_head thread_group;

    struct completion *vfork_done;        /* for vfork() */
    int __user *set_child_tid;        /* CLONE_CHILD_SETTID */
    int __user *clear_child_tid;        /* CLONE_CHILD_CLEARTID */

    unsigned int rt_priority;
    cputime_t utime, stime;
    unsigned long nvcsw, nivcsw; /* context switch counts */
    struct timespec start_time;
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
    unsigned long min_flt, maj_flt;

      cputime_t it_prof_expires, it_virt_expires;
    unsigned long long it_sched_expires;
    struct list_head cpu_timers[3];

    struct task_struct* posix_timer_list;

/* process credentials */
    uid_t uid,euid,suid,fsuid;
    gid_t gid,egid,sgid,fsgid;
    struct group_info *group_info;
    kernel_cap_t   cap_effective, cap_inheritable, cap_permitted;
    unsigned keep_capabilities:1;
    struct user_struct *user;
#ifdef CONFIG_KEYS
    struct key *request_key_auth;    /* assumed request_key authority */
    struct key *thread_keyring;    /* keyring private to this thread */
    unsigned char jit_keyring;    /* default keyring to attach requested keys to */
#endif
    /*
     * fpu_counter contains the number of consecutive context switches
     * that the FPU is used. If this is over a threshold, the lazy fpu
     * saving becomes unlazy to save the trap. This is an unsigned char
     * so that after 256 times the counter wraps and the behavior turns
     * lazy again; this to deal with bursty apps that only use FPU for
     * a short time
     */
    unsigned char fpu_counter;
    int oomkilladj; /* OOM kill score adjustment (bit shift). */
    char comm[TASK_COMM_LEN]; /* executable name excluding path
                     - access with [gs]et_task_comm (which lock
                       it with task_lock())
                     - initialized normally by flush_old_exec */
/* file system info */
    int link_count, total_link_count;
#ifdef CONFIG_SYSVIPC
/* ipc stuff */
    struct sysv_sem sysvsem;
#endif
/* CPU-specific state of this task */
    struct thread_struct thread;
/* filesystem information */
    struct fs_struct *fs;
/* open file information */
    struct files_struct *files;
/* namespaces */
    struct nsproxy *nsproxy;
/* signal handlers */
    struct signal_struct *signal;
    struct sighand_struct *sighand;

    sigset_t blocked, real_blocked;
    sigset_t saved_sigmask;        /* To be restored with TIF_RESTORE_SIGMASK */
    struct sigpending pending;

    unsigned long sas_ss_sp;
    size_t sas_ss_size;
    int (*notifier)(void *priv);
    void *notifier_data;
    sigset_t *notifier_mask;

    void *security;
    struct audit_context *audit_context;
    seccomp_t seccomp;

/* Thread group tracking */
       u32 parent_exec_id;
       u32 self_exec_id;
/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */
    spinlock_t alloc_lock;

    /* Protection of the PI data structures: */
    raw_spinlock_t pi_lock;

#ifdef CONFIG_RT_MUTEXES
    /* PI waiters blocked on a rt_mutex held by this task */
    struct plist_head pi_waiters;
    /* Deadlock detection and priority inheritance handling */
    struct rt_mutex_waiter *pi_blocked_on;
#endif

#ifdef CONFIG_DEBUG_MUTEXES
    /* mutex deadlock detection */
    struct mutex_waiter *blocked_on;
#endif
    int pagefault_disabled;
#ifdef CONFIG_TRACE_IRQFLAGS
    unsigned int irq_events;
    int hardirqs_enabled;
    unsigned long hardirq_enable_ip;
    unsigned int hardirq_enable_event;
    unsigned long hardirq_disable_ip;
    unsigned int hardirq_disable_event;
    int softirqs_enabled;
    unsigned long softirq_disable_ip;
    unsigned int softirq_disable_event;
    unsigned long softirq_enable_ip;
    unsigned int softirq_enable_event;
    int hardirq_context;
    int softirq_context;
#endif
#ifdef CONFIG_LOCKDEP
# define MAX_LOCK_DEPTH 30UL
    u64 curr_chain_key;
    int lockdep_depth;
    struct held_lock held_locks[MAX_LOCK_DEPTH];
    unsigned int lockdep_recursion;
#endif

#define MAX_PREEMPT_TRACE 25

#ifdef CONFIG_PREEMPT_TRACE
    unsigned long preempt_trace_eip[MAX_PREEMPT_TRACE];
    unsigned long preempt_trace_parent_eip[MAX_PREEMPT_TRACE];
#endif

#define MAX_LOCK_STACK    MAX_PREEMPT_TRACE
#ifdef CONFIG_DEBUG_PREEMPT
    int lock_count;
# ifdef CONFIG_PREEMPT_RT
    struct rt_mutex *owned_lock[MAX_LOCK_STACK];
# endif
#endif
#ifdef CONFIG_DETECT_SOFTLOCKUP
    unsigned long    softlockup_count; /* Count to keep track how long the
                       *  thread is in the kernel without
                       *  sleeping.
                       */
#endif
    /* realtime bits */

#ifdef CONFIG_DEBUG_RT_MUTEXES
    void *last_kernel_lock;
#endif

/* journalling filesystem info */
    void *journal_info;

/* stacked block device info */
    struct bio *bio_list, **bio_tail;

/* VM state */
    struct reclaim_state *reclaim_state;

    struct backing_dev_info *backing_dev_info;

    struct io_context *io_context;

    unsigned long ptrace_message;
    siginfo_t *last_siginfo; /* For ptrace use.  */
/*
 * current io wait handle: wait queue entry to use for io waits
 * If this thread is processing aio, this points at the waitqueue
 * inside the currently handled kiocb. It may be NULL (i.e. default
 * to a stack based synchronous wait) if its doing sync IO.
 */
    wait_queue_t *io_wait;
#ifdef CONFIG_TASK_XACCT
/* i/o counters(bytes read/written, #syscalls */
    u64 rchar, wchar, syscr, syscw;
#endif
    struct task_io_accounting ioac;
#if defined(CONFIG_TASK_XACCT)
    u64 acct_rss_mem1;    /* accumulated rss usage */
    u64 acct_vm_mem1;    /* accumulated virtual memory usage */
    cputime_t acct_stimexpd;/* stime since last update */
#endif
#ifdef CONFIG_NUMA
      struct mempolicy *mempolicy;
    short il_next;
#endif
#ifdef CONFIG_CPUSETS
    struct cpuset *cpuset;
    nodemask_t mems_allowed;
    int cpuset_mems_generation;
    int cpuset_mem_spread_rotor;
#endif
    struct robust_list_head __user *robust_list;
#ifdef CONFIG_COMPAT
    struct compat_robust_list_head __user *compat_robust_list;
#endif
    struct list_head pi_state_list;
    struct futex_pi_state *pi_state_cache;

    atomic_t fs_excl;    /* holding fs exclusive resources */
    struct rcu_head rcu;

    /*
     * cache last used pipe for splice
     */
    struct pipe_inode_info *splice_pipe;
#ifdef    CONFIG_TASK_DELAY_ACCT
    struct task_delay_info *delays;
#endif
#ifdef CONFIG_FAULT_INJECTION
    int make_it_fail;
#endif
#ifdef CONFIG_LTT_USERSPACE_GENERIC
    uint8_t ltt_facilities[LTT_FAC_PER_PROCESS];
#endif //CONFIG_LTT_USERSPACE_GENERIC
#ifdef CONFIG_PREEMPT_RT
    /*
     * Temporary hack, until we find a solution to
     * handle printk in atomic operations.
     */
    int in_printk;
#endif
#ifdef CONFIG_SUBSYSTEM
    int itron_tid;
    void (*hook)(void);
#endif
 };



mm_struct介绍

struct mm_struct {
    /*
        指向线性区对象的链表头
    */
    struct vm_area_struct    *mmap;
    /*
        指向线性区对象的红-黑树
    */    
    struct rb_root        mm_rb;
    /*
        指向最后一个引用的线性区对象
    */
    struct vm_area_struct     *mmap_cache;
    /*
        在进程地址空间中搜索有效线性地址区间的方法
    */
    unsigned long (*get_unmapped_area)(struct file *filp,
            unsigned long addr,unsigned long len,
            unsigned long pgoff,unsigned long flags);
    /*
        释放线性区时调用的方法
    */
    void (*unmap_area)(struct mm_struct *mm,unsigned long addr);
    /*
        标识第一个分配的匿名线性区或者是文件内存映射的线性地址
    */
    unsigned long mmap_base;
    /*
        
    */
    unsigned long task_size;
    
    unsigned long cache_hole_size;
    /*
        内核从这个地址开始搜索进程地址空间中线性地址的空闲区间
    */
    unsigned long free_area_cache;
    /*
        指向页全局目录
    */
    pgd_t *pgd;
    /*
        存放共享mm_struct数据结构的轻量级进程的个数
    */
    atomic_t mm_users;
    /*
        内存描述符的主使用计数器,每次mm_count递减时,内核都要检查它是否变为0,如
        果是,就要解除这个内存描述符,因为不再有用户使用它
    */
    atomic_t mm_count;
    /*
        线性区的个数
    */
    int map_count;
    /*
            
    */
    struct rw_semaphore mmap_sem;
    /*
        线性区的自旋锁和页表的自旋锁
    */
    spinlock_t page_table_lock;
    /*
        存放链表相邻元素的地址,第一个元素是init_mm的mm_list字段
    */
    struct list_head mmlist;
    
    mm_counter_t _file_rss;
    mm_counter_t _anon_rss;
    /*
        进程所拥有的最大页框数    
    */
    unsigned long hiwater_rss;
    /*
        进程线性区中的最大页数
    */
    unsigned long hiwater_vm;
    /*
        进程地址空间的大小,"锁住"而不能换出的页的个数,共享文件内存映射中的页数,
        可执行内存映射中的页数    
    */
    unsigned long total_vm,locked_vm,shared_vm,exec_vm;
    /*
        用户态堆栈中的页数,在保留区中的页数或者是在特殊线性区中的页数,线性区默认
        的访问标志,this进程的页表数        
    */
    unsigned long stack_vm,reserved_vm,def_flags,nr_ptes;
    /*
        可执行代码开始地址,结束地址,已初始化数据的开始地址,结束地址
    */
    unsigned long start_code,end_code,start_data,end_data;
    /*
        堆的起始地址,堆的当前最后地址,用户态堆栈的起始地址
    */
    unsigned long start_brk,brk,start_stack;
    /*
        命令行参数的起始地址,命令行参数的最后地址,环境变量的起始地址,环境变量的最后地址
    */
    unsigned long arg_start,arg_end,env_start,env_end;
    /*
        开始执行ELF程序时会使用到saved_auxv参数        
    */
    unsigned long saved_auxv[AT_VECTOR_SIZE];
    /*
        用于懒惰TLB交换的位掩码
    */
    cpumask_t    cpu_vm_mask;
    /*
        指向有关特定体系结构信息的表
    */
    mm_context_t context;
    /*
        
    */
    unsigned int faultstamp;
    /*
        
    */
    unsigned int token_priority;
    /*
        
    */
    unsigned int last_interval;
    /*
        线性区默认的访问标志(访问设置相关位的时候,必须使用原子操作来解决)
    */
    unsigned long flags;
    /*
        正在把进程地址空间的内容卸载到转储文件中的轻量级进程的数量
    */
    int core_waiters;
    /*
        指向创建内存转储文件时的补充原语
    */
    struct completion *core_startup_done,core_done;
    /*
        用于异步I/O上下文链表的锁    
    */
    rwlock_t ioctx_list_lock;
    /*
        异步I/O上下文链表
    */
    struct kioctx *ioctx_list;
};


这里只是其中的一小部分,更多内容,更多细节有待发掘

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章