一:task_struct結構體分析
1、進程有兩種特殊形式:沒有用戶虛擬地址空間的進程叫內核線程,共享用戶虛擬地址空間的進程叫作用戶線程。共享同一個用戶虛擬地址空間的所有用戶線程叫線程組。
C語言標準庫進程? ? ? ? ? ? ? ? ?Linux內核進程
包括多個個線程的進程? ? ? ? 線程組
只有一個線程的進程? ? ? ? ? ?任務或進程
線程? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? 共享用戶虛擬地址空間的進程
2、Linux內核提供API函數來設置進程狀態:
TASK_RUNING (可運行狀態或者可就緒狀態)
TASK_INTERRUPTIBLE(課終端睡眠狀態,又叫淺睡眠狀態)
TASK_UNINTERUPTIBLE(不可中斷狀態,又叫深度睡眠狀態,我們可以通過ps命令產看被標記為D狀態的進程)
TASK_STOPPED(終止狀態)
EXIT_ZOMBIE(僵尸狀態)
3、Linux內核目錄結構
arch:不同平臺體系結構的相關代碼
block:設備驅動
doucmentation:描述模塊功能和協議規范
drivers:驅動程序(USB總線、PCI總線、網卡驅動、顯卡等))
fs:虛擬文件系統VFS代碼
include:內核源碼依賴的大部分頭文件
init:內核初始化代碼,直接關聯到內存各個組件入口
ipc:進程間通信實現
kernel:內核核心代碼(進程管理、IPQ管理)
lib:C標準庫的子集
license:Linux內核根據Licenses/preferredGPL-2.0中提供GNU通用許可版本2
mm:內存管理相關實現操作
net:網絡協議代碼(TCP、IPv6、Wifi等)
samples:內核實例代碼
sound:聲卡驅動源碼
tools:與內核交互
usr:用戶打包和壓縮內核的實現的源碼
virt:/kvm虛擬化目錄相關實現
4、Linux進程描述符task_struct結構體類型來描述,具體源碼分析如下:5.6.18
include/linux/sched.h
// 進程描述符
struct task_struct {
#ifdef CONFIG_THREAD_INFO_IN_TASK/** For reasons of header soup (see current_thread_info()), this* must be the first element of task_struct.*/struct thread_info thread_info;
#endif/* -1 unrunnable, 0 runnable, >0 stopped: */volatile long state; // 判斷進程的狀態標志/** This begins the randomizable portion of task_struct. Only* scheduling-critical items should be added above here.*/randomized_struct_fields_startvoid *stack; // 指向內核棧refcount_t usage;/* Per task flags (PF_*), defined further below: */unsigned int flags;unsigned int ptrace;#ifdef CONFIG_SMPstruct llist_node wake_entry;int on_cpu;
#ifdef CONFIG_THREAD_INFO_IN_TASK/* Current CPU: */unsigned int cpu;
#endifunsigned int wakee_flips;unsigned long wakee_flip_decay_ts;struct task_struct *last_wakee;/** recent_used_cpu is initially set as the last CPU used by a task* that wakes affine another task. Waker/wakee relationships can* push tasks around a CPU where each wakeup moves to the next one.* Tracking a recently used CPU allows a quick search for a recently* used CPU that may be idle.*/int recent_used_cpu;int wake_cpu;
#endifint on_rq;// 下面4個成員為:進程調度策略和優先級int prio;int static_prio;int normal_prio;unsigned int rt_priority;const struct sched_class *sched_class;struct sched_entity se;struct sched_rt_entity rt;
#ifdef CONFIG_CGROUP_SCHEDstruct task_group *sched_task_group;
#endifstruct sched_dl_entity dl;#ifdef CONFIG_UCLAMP_TASK/* Clamp values requested for a scheduling entity */struct uclamp_se uclamp_req[UCLAMP_CNT];/* Effective clamp values used for a scheduling entity */struct uclamp_se uclamp[UCLAMP_CNT];
#endif#ifdef CONFIG_PREEMPT_NOTIFIERS/* List of struct preempt_notifier: */struct hlist_head preempt_notifiers;
#endif#ifdef CONFIG_BLK_DEV_IO_TRACEunsigned int btrace_seq;
#endifunsigned int policy;int nr_cpus_allowed;const cpumask_t *cpus_ptr;cpumask_t cpus_mask;#ifdef CONFIG_PREEMPT_RCUint rcu_read_lock_nesting;union rcu_special rcu_read_unlock_special;struct list_head rcu_node_entry;struct rcu_node *rcu_blocked_node;
#endif /* #ifdef CONFIG_PREEMPT_RCU */#ifdef CONFIG_TASKS_RCUunsigned long rcu_tasks_nvcsw;u8 rcu_tasks_holdout;u8 rcu_tasks_idx;int rcu_tasks_idle_cpu;struct list_head rcu_tasks_holdout_list;
#endif /* #ifdef CONFIG_TASKS_RCU */struct sched_info sched_info;struct list_head tasks;
#ifdef CONFIG_SMPstruct plist_node pushable_tasks;struct rb_node pushable_dl_tasks;
#endif// 這兩個指針指向內存描述符。// 進程:mm/active_mm 指向同一個內存描述符// 內核線程:mm是空指針// 當內核執行的時候,active_mm指向從進程借用內存描述符struct mm_struct *mm;struct mm_struct *active_mm;/* Per-thread vma caching: */struct vmacache vmacache;#ifdef SPLIT_RSS_COUNTINGstruct task_rss_stat rss_stat;
#endifint exit_state;int exit_code;int exit_signal;/* The signal sent when the parent dies: */int pdeath_signal;/* JOBCTL_*, siglock protected: */unsigned long jobctl;/* Used for emulating ABI behavior of previous Linux versions: */unsigned int personality;/* Scheduler bits, serialized by scheduler locks: */unsigned sched_reset_on_fork:1;unsigned sched_contributes_to_load:1;unsigned sched_migrated:1;unsigned sched_remote_wakeup:1;
#ifdef CONFIG_PSIunsigned sched_psi_wake_requeue:1;
#endif/* Force alignment to the next boundary: */unsigned :0;/* Unserialized, strictly 'current' *//* Bit to tell LSMs we're in execve(): */unsigned in_execve:1;unsigned in_iowait:1;
#ifndef TIF_RESTORE_SIGMASKunsigned restore_sigmask:1;
#endif
#ifdef CONFIG_MEMCGunsigned in_user_fault:1;
#endif
#ifdef CONFIG_COMPAT_BRKunsigned brk_randomized:1;
#endif
#ifdef CONFIG_CGROUPS/* disallow userland-initiated cgroup migration */unsigned no_cgroup_migration:1;/* task is frozen/stopped (used by the cgroup freezer) */unsigned frozen:1;
#endif
#ifdef CONFIG_BLK_CGROUP/* to be used once the psi infrastructure lands upstream. */unsigned use_memdelay:1;
#endifunsigned long atomic_flags; /* Flags requiring atomic access. */struct restart_block restart_block;// 全局的進程號// 全局的線程組標識符pid_t pid;pid_t tgid;#ifdef CONFIG_STACKPROTECTOR/* Canary value for the -fstack-protector GCC feature: */unsigned long stack_canary;
#endif/** Pointers to the (original) parent process, youngest child, younger sibling,* older sibling, respectively. (p->father can be replaced with* p->real_parent->pid)*//* Real parent process: */struct task_struct __rcu *real_parent; // 指向真實的父進程/* Recipient of SIGCHLD, wait4() reports: */ // 指向父進程struct task_struct __rcu *parent;/** Children/sibling form the list of natural children:*/struct list_head children;struct list_head sibling;struct task_struct *group_leader; // 指向線程組的組長/** 'ptraced' is the list of tasks this task is using ptrace() on.** This includes both natural children and PTRACE_ATTACH targets.* 'ptrace_entry' is this task's link on the p->parent->ptraced list.*/struct list_head ptraced;struct list_head ptrace_entry;/* PID/PID hash table linkage. */struct pid *thread_pid;struct hlist_node pid_links[PIDTYPE_MAX];struct list_head thread_group;struct list_head thread_node;struct completion *vfork_done;/* CLONE_CHILD_SETTID: */int __user *set_child_tid;/* CLONE_CHILD_CLEARTID: */int __user *clear_child_tid;u64 utime;u64 stime;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIMEu64 utimescaled;u64 stimescaled;
#endifu64 gtime;struct prev_cputime prev_cputime;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GENstruct vtime vtime;
#endif#ifdef CONFIG_NO_HZ_FULLatomic_t tick_dep_mask;
#endif/* Context switch counts: */unsigned long nvcsw;unsigned long nivcsw;/* Monotonic time in nsecs: */u64 start_time;/* Boot based time in nsecs: */u64 start_boottime;/* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */unsigned long min_flt;unsigned long maj_flt;/* Empty if CONFIG_POSIX_CPUTIMERS=n */struct posix_cputimers posix_cputimers;/* Process credentials: *//* Tracer's credentials at attach: */const struct cred __rcu *ptracer_cred;/* Objective and real subjective task credentials (COW): */const struct cred __rcu *real_cred;/* Effective (overridable) subjective task credentials (COW): */const struct cred __rcu *cred;#ifdef CONFIG_KEYS/* Cached requested key. */struct key *cached_requested_key;
#endif/** executable name, excluding path.** - normally initialized setup_new_exec()* - access it with [gs]et_task_comm()* - lock it with task_lock()*/char comm[TASK_COMM_LEN];struct nameidata *nameidata;// 用NUIX系統:信號量和共享內存
#ifdef CONFIG_SYSVIPCstruct sysv_sem sysvsem;struct sysv_shm sysvshm;
#endif#ifdef CONFIG_DETECT_HUNG_TASKunsigned long last_switch_count;unsigned long last_switch_time;
#endif/* Filesystem information: */struct fs_struct *fs; // 該成員屬于文件系統信息,主要是進程的根目錄和當前工作目錄/* Open file information: */struct files_struct *files; // 打開文件列表/* Namespaces: */struct nsproxy *nsproxy;/* Signal handlers: */struct signal_struct *signal;struct sighand_struct __rcu *sighand;sigset_t blocked;sigset_t real_blocked;/* Restored if set_restore_sigmask() was used: */sigset_t saved_sigmask;struct sigpending pending;unsigned long sas_ss_sp;size_t sas_ss_size;unsigned int sas_ss_flags;struct callback_head *task_works;#ifdef CONFIG_AUDIT
#ifdef CONFIG_AUDITSYSCALLstruct audit_context *audit_context;
#endifkuid_t loginuid;unsigned int sessionid;
#endifstruct seccomp seccomp;/* Thread group tracking: */u64 parent_exec_id;u64 self_exec_id;/* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */spinlock_t alloc_lock;/* Protection of the PI data structures: */raw_spinlock_t pi_lock;struct wake_q_node wake_q;#ifdef CONFIG_RT_MUTEXES/* PI waiters blocked on a rt_mutex held by this task: */struct rb_root_cached pi_waiters;/* Updated under owner's pi_lock and rq lock */struct task_struct *pi_top_task;/* Deadlock detection and priority inheritance handling: */struct rt_mutex_waiter *pi_blocked_on;
#endif#ifdef CONFIG_DEBUG_MUTEXES/* Mutex deadlock detection: */struct mutex_waiter *blocked_on;
#endif#ifdef CONFIG_DEBUG_ATOMIC_SLEEPint non_block_count;
#endif#ifdef CONFIG_TRACE_IRQFLAGSunsigned int irq_events;unsigned long hardirq_enable_ip;unsigned long hardirq_disable_ip;unsigned int hardirq_enable_event;unsigned int hardirq_disable_event;int hardirqs_enabled;int hardirq_context;unsigned long softirq_disable_ip;unsigned long softirq_enable_ip;unsigned int softirq_disable_event;unsigned int softirq_enable_event;int softirqs_enabled;int softirq_context;
#endif#ifdef CONFIG_LOCKDEP
# define MAX_LOCK_DEPTH 48ULu64 curr_chain_key;int lockdep_depth;unsigned int lockdep_recursion;struct held_lock held_locks[MAX_LOCK_DEPTH];
#endif#ifdef CONFIG_UBSANunsigned int in_ubsan;
#endif/* Journalling filesystem info: */void *journal_info;/* Stacked block device info: */struct bio_list *bio_list;#ifdef CONFIG_BLOCK/* Stack plugging: */struct blk_plug *plug;
#endif/* VM state: */struct reclaim_state *reclaim_state;struct backing_dev_info *backing_dev_info;struct io_context *io_context;#ifdef CONFIG_COMPACTIONstruct capture_control *capture_control;
#endif/* Ptrace state: */unsigned long ptrace_message;kernel_siginfo_t *last_siginfo;struct task_io_accounting ioac;
#ifdef CONFIG_PSI/* Pressure stall state */unsigned int psi_flags;
#endif
#ifdef CONFIG_TASK_XACCT/* Accumulated RSS usage: */u64 acct_rss_mem1;/* Accumulated virtual memory usage: */u64 acct_vm_mem1;/* stime + utime since last update: */u64 acct_timexpd;
#endif
#ifdef CONFIG_CPUSETS/* Protected by ->alloc_lock: */nodemask_t mems_allowed;/* Seqence number to catch updates: */seqcount_t mems_allowed_seq;int cpuset_mem_spread_rotor;int cpuset_slab_spread_rotor;
#endif
#ifdef CONFIG_CGROUPS/* Control Group info protected by css_set_lock: */struct css_set __rcu *cgroups;/* cg_list protected by css_set_lock and tsk->alloc_lock: */struct list_head cg_list;
#endif
#ifdef CONFIG_X86_CPU_RESCTRLu32 closid;u32 rmid;
#endif
#ifdef CONFIG_FUTEXstruct robust_list_head __user *robust_list;
#ifdef CONFIG_COMPATstruct compat_robust_list_head __user *compat_robust_list;
#endifstruct list_head pi_state_list;struct futex_pi_state *pi_state_cache;struct mutex futex_exit_mutex;unsigned int futex_state;
#endif
#ifdef CONFIG_PERF_EVENTSstruct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];struct mutex perf_event_mutex;struct list_head perf_event_list;
#endif
#ifdef CONFIG_DEBUG_PREEMPTunsigned long preempt_disable_ip;
#endif
#ifdef CONFIG_NUMA/* Protected by alloc_lock: */struct mempolicy *mempolicy;short il_prev;short pref_node_fork;
#endif
#ifdef CONFIG_NUMA_BALANCINGint numa_scan_seq;unsigned int numa_scan_period;unsigned int numa_scan_period_max;int numa_preferred_nid;unsigned long numa_migrate_retry;/* Migration stamp: */u64 node_stamp;u64 last_task_numa_placement;u64 last_sum_exec_runtime;struct callback_head numa_work;/** This pointer is only modified for current in syscall and* pagefault context (and for tasks being destroyed), so it can be read* from any of the following contexts:* - RCU read-side critical section* - current->numa_group from everywhere* - task's runqueue locked, task not running*/struct numa_group __rcu *numa_group;/** numa_faults is an array split into four regions:* faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer* in this precise order.** faults_memory: Exponential decaying average of faults on a per-node* basis. Scheduling placement decisions are made based on these* counts. The values remain static for the duration of a PTE scan.* faults_cpu: Track the nodes the process was running on when a NUMA* hinting fault was incurred.* faults_memory_buffer and faults_cpu_buffer: Record faults per node* during the current scan window. When the scan completes, the counts* in faults_memory and faults_cpu decay and these values are copied.*/unsigned long *numa_faults;unsigned long total_numa_faults;/** numa_faults_locality tracks if faults recorded during the last* scan window were remote/local or failed to migrate. The task scan* period is adapted based on the locality of the faults with different* weights depending on whether they were shared or private faults*/unsigned long numa_faults_locality[3];unsigned long numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */#ifdef CONFIG_RSEQstruct rseq __user *rseq;u32 rseq_sig;/** RmW on rseq_event_mask must be performed atomically* with respect to preemption.*/unsigned long rseq_event_mask;
#endifstruct tlbflush_unmap_batch tlb_ubc;union {refcount_t rcu_users;struct rcu_head rcu;};/* Cache last used pipe for splice(): */struct pipe_inode_info *splice_pipe;struct page_frag task_frag;#ifdef CONFIG_TASK_DELAY_ACCTstruct task_delay_info *delays;
#endif#ifdef CONFIG_FAULT_INJECTIONint make_it_fail;unsigned int fail_nth;
#endif/** When (nr_dirtied >= nr_dirtied_pause), it's time to call* balance_dirty_pages() for a dirty throttling pause:*/int nr_dirtied;int nr_dirtied_pause;/* Start of a write-and-pause period: */unsigned long dirty_paused_when;#ifdef CONFIG_LATENCYTOPint latency_record_count;struct latency_record latency_record[LT_SAVECOUNT];
#endif/** Time slack values; these are used to round up poll() and* select() etc timeout values. These are in nanoseconds.*/u64 timer_slack_ns;u64 default_timer_slack_ns;#ifdef CONFIG_KASANunsigned int kasan_depth;
#endif#ifdef CONFIG_FUNCTION_GRAPH_TRACER/* Index of current stored address in ret_stack: */int curr_ret_stack;int curr_ret_depth;/* Stack of return addresses for return function tracing: */struct ftrace_ret_stack *ret_stack;/* Timestamp for last schedule: */unsigned long long ftrace_timestamp;/** Number of functions that haven't been traced* because of depth overrun:*/atomic_t trace_overrun;/* Pause tracing: */atomic_t tracing_graph_pause;
#endif#ifdef CONFIG_TRACING/* State flags for use by tracers: */unsigned long trace;/* Bitmask and counter of trace recursion: */unsigned long trace_recursion;
#endif /* CONFIG_TRACING */#ifdef CONFIG_KCOV/* See kernel/kcov.c for more details. *//* Coverage collection mode enabled for this task (0 if disabled): */unsigned int kcov_mode;/* Size of the kcov_area: */unsigned int kcov_size;/* Buffer for coverage collection: */void *kcov_area;/* KCOV descriptor wired with this task or NULL: */struct kcov *kcov;/* KCOV common handle for remote coverage collection: */u64 kcov_handle;/* KCOV sequence number: */int kcov_sequence;
#endif#ifdef CONFIG_MEMCGstruct mem_cgroup *memcg_in_oom;gfp_t memcg_oom_gfp_mask;int memcg_oom_order;/* Number of pages to reclaim on returning to userland: */unsigned int memcg_nr_pages_over_high;/* Used by memcontrol for targeted memcg charge: */struct mem_cgroup *active_memcg;
#endif#ifdef CONFIG_BLK_CGROUPstruct request_queue *throttle_queue;
#endif#ifdef CONFIG_UPROBESstruct uprobe_task *utask;
#endif
#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)unsigned int sequential_io;unsigned int sequential_io_avg;
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEPunsigned long task_state_change;
#endifint pagefault_disabled;
#ifdef CONFIG_MMUstruct task_struct *oom_reaper_list;
#endif
#ifdef CONFIG_VMAP_STACKstruct vm_struct *stack_vm_area;
#endif
#ifdef CONFIG_THREAD_INFO_IN_TASK/* A live task holds one reference: */refcount_t stack_refcount;
#endif
#ifdef CONFIG_LIVEPATCHint patch_state;
#endif
#ifdef CONFIG_SECURITY/* Used by LSM modules for access restriction: */void *security;
#endif#ifdef CONFIG_GCC_PLUGIN_STACKLEAKunsigned long lowest_stack;unsigned long prev_lowest_stack;
#endif/** New fields for task_struct should be added above here, so that* they are included in the randomized portion of task_struct.*/randomized_struct_fields_end/* CPU-specific state of this task: */struct thread_struct thread;/** WARNING: on x86, 'thread_struct' contains a variable-sized* structure. It *MUST* be at the end of 'task_struct'.** Do not put anything below here!*/
};
5、進程優先級
// 下面4個成員為:進程調度策略和優先級
int prio;
int static_prio;
int normal_prio;
unsigned int rt_priority;
優先級 | 限期進程 | 實時進程 | 普通進程 |
prio調度優先級(數值越小,優先級越高) | 大多數情況下prio等于normal_prio。特殊情況下,如果進程X占有實時互斥鎖,進程Y正在等待鎖,進程Y的優先級比進程X優先級高,那么吧X的優先級臨時提高到進程Y的優先級,即進程X的prio的值等于進程y的prio值 | ||
static_prio靜態優先級 | 總是為0(無意義) | 總是為0(無意義) | 120+nice值,數值越小,表示優先級越高 |
normal_prio正常優先級 | -1 | 99-rt_priority | static_prio |
rt_priority實時優先級 | 總是為0(無意義) | 實時進程的優先級,范圍1-99,數值越大優先級越高 | 總是為0(無意義) |
6、內核線程:它是獨立運行在內核空間的進程,與普通用戶進程區別在于內核線程沒有獨立的地址空間。task_struct數據結構里面有一個成員指針mm設置為NULL,它只能獨立運行在內核空間。
二、進程調度CFS及4個調度類
1、調度:就是按照某種調度的算法設計,從進程的就緒隊列當中選取進程分配CPU,主要是協調對CPU等等相關的資源使用。進程調度目的:最大限度利用CPU時間。如果調度器支持就緒狀態切換到執行狀態,同時支持執行狀態切換到就緒狀態,稱該調度器為搶占式調度器。
2、調度類sched_class結構體源碼分析:
keenel/sched/sched.h
// 調度類sched_class結構體類型
struct sched_class {// 操作系統當中有多個調度類,按照調度優先級排成一個鏈表const struct sched_class *next;#ifdef CONFIG_UCLAMP_TASKint uclamp_enabled;
#endif// 將進程加入到執行隊列當中,即將調度實體(進程)存放到紅黑樹當中,并對nr_running變量自動加1void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);// 從執行隊列當中刪除進程,并對nr_running變量自動減1void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);// 放棄CPU執行權限 實際上該函數執行先出隊后入隊,在這種情況它直接將調度實體存放在紅黑樹的最右端void (*yield_task) (struct rq *rq);bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt);// 專門用于檢查當前進程是否可被新進程搶占void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);// 選擇下一個要執行的進程struct task_struct *(*pick_next_task)(struct rq *rq);// 將進程施加到運行隊列當中void (*put_prev_task)(struct rq *rq, struct task_struct *p);void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);#ifdef CONFIG_SMPint (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);// 為進程選擇一個合適的CPUint (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);// 遷移任務到另一個CPUvoid (*migrate_task_rq)(struct task_struct *p, int new_cpu);// 專門用于喚醒進程void (*task_woken)(struct rq *this_rq, struct task_struct *task);//修改進程在CPU的親和力void (*set_cpus_allowed)(struct task_struct *p,const struct cpumask *newmask);// 啟動/禁止運行隊列void (*rq_online)(struct rq *rq);void (*rq_offline)(struct rq *rq);
#endifvoid (*task_tick)(struct rq *rq, struct task_struct *p, int queued);void (*task_fork)(struct task_struct *p);void (*task_dead)(struct task_struct *p);/** The switched_from() call is allowed to drop rq->lock, therefore we* cannot assume the switched_from/switched_to pair is serliazed by* rq->lock. They are however serialized by p->pi_lock.*/void (*switched_from)(struct rq *this_rq, struct task_struct *task);void (*switched_to) (struct rq *this_rq, struct task_struct *task);void (*prio_changed) (struct rq *this_rq, struct task_struct *task,int oldprio);unsigned int (*get_rr_interval)(struct rq *rq,struct task_struct *task);void (*update_curr)(struct rq *rq);#define TASK_SET_GROUP 0
#define TASK_MOVE_GROUP 1#ifdef CONFIG_FAIR_GROUP_SCHEDvoid (*task_change_group)(struct task_struct *p, int type);
#endif
};
3、調度器類可分為五種:
extern const struct sched_class stop_sched_class; // 停機調度類
extern const struct sched_class dl_sched_class; // 期限調度類
extern const struct sched_class rt_sched_class; // 實時調度類
extern const struct sched_class fair_sched_class; // 公平調度類
extern const struct sched_class idle_sched_class; // 空閑調度類
這5種調度類的優先級從高到低依次為:停機調度類-->期限調度類-->實時調度類-->公平調度類-->空閑調度類。
4、進程優先級,Linux內核優先級源碼
include/linux/sched/prio.h
// Linux內核優先級
#define MAX_USER_RT_PRIO 100
#define MAX_RT_PRIO MAX_USER_RT_PRIO#define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
#define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
5、進程分類
實時進程:優先級高、需要立即被執行的進程
普通進程:優先級低、更長執行時間的進程
進程的優先級是一個0--139的整數直接來表示,數字越小優先級越高,其中優先級0-99留給實時進程,100-139留給普通進程。
6、內核調度策略
Linux內核提供一些調度策略供用戶應用程序來選擇調度器,Linux內核調度策略源碼如下:
inluce/uapi/linux/sched.h
/** Scheduling policies*/// Linux內核調度策略
#define SCHED_NORMAL 0 // 普通進程調度策略
#define SCHED_FIFO 1 // 實時進程調度策略
#define SCHED_RR 2 // 實時進程調度策略
#define SCHED_BATCH 3 // 普通進程調度策略
/* SCHED_ISO: reserved but not implemented yet */
#define SCHED_IDLE 5 // 普通進程調度策略
#define SCHED_DEADLINE 6 // 限期進程調度策略
三、RCU機制及內存優化屏障
1、RCU機制:應用場景是鏈表,有效地提高遍歷讀取數據的效率,讀取鏈表有成員數據的時候通常只需要rcu_read_lock(),允許多個線程同時讀取鏈表,并且允許一個同時修改鏈表。
2、RCU意思是讀-復制-更新。讀拷貝更新(RCU)模式添加鏈表項對應函數list_add_rcu(...)。讀拷貝更新(RCU)模式刪除鏈表項對應函數list_del_rcu(...)。讀拷貝更新(RCU)模式更新 鏈表項list_repalce_rcu(...)。
在整個操作過程中,有時要防止編譯器和CPU優化代碼執行順序,smp_wmb()保證在它之前的兩行代碼執行完畢之后再執行后兩行。
3、編譯器優化:為提高系統性能,編譯器在不影響邏輯的情況下會調整至零點執行順序。
4、CPU執行優化:為提高流水線的性能,CPU的亂序執行會讓后面的寄存器沖的指令優先于前面指令完成。
5、內存屏障:
內存屏障是一種保證內存訪問順序的方法,解決內存訪問亂序問題。
假設使用禁止內核搶占方法保護臨界區:
preempt_desable();
臨界區
preempt_enable();
臨界區
preempt_desable();
preempt_enable();
preempt_desable();
preempt_ensable();
臨界區
6、GCC編譯器定義的宏
include/linux/compiler-gcc.h
/* The "volatile" is due to gcc bugs */
#define barrier() __asm__ __volatile__("": : :"memory")
關鍵字為__volatile__告訴編譯器:禁止優化代碼,不需要改變barrier()前面的代碼塊、barrier()和后面代碼塊這3個代碼塊的順序。
7、處理器內存屏障
處理器內存屏障解決CPU 之間的內存訪問亂序問題和處理器訪問外圍設備的亂序問題。
內存屏障類型 | 強制性的內存屏障 | SMP內存屏障 |
通用內存屏障 | mb() | smp_mb() |
寫內存屏障 | wmb() | smp_wmp() |
讀內存屏障 | rmb() | smp_rmb() |
數據依賴屏障 | read_barrier_depends() | smp_read_barrier_depends() |
除數據依賴屏障之外,所有處理器內存屏障隱含編譯器優化屏障。
參考連接:https://github.com/0voice