[ SYSTEM ]: Linux srv.persadacompanies.com 4.18.0-553.56.1.el8_10.x86_64 #1 SMP Tue Jun 10 05:00:59 EDT 2025 x86_64
[ SERVER ]: Apache | PHP: 8.4.20
[ USER ]: persadamedika | IP: 45.64.1.108
GEFORCE FILE MANAGER
/
usr
/
src
/
kernels
/
4.18.0-553.56.1.el8_10.x86_64
/
include
/
linux
/
sched
/
UPLOAD:
NAME
SIZE
QUICK PERMS
ACTIONS
📄 autogroup.h
1,229 B
SET
[ EDIT ]
|
[ DEL ]
📄 clock.h
2,506 B
SET
[ EDIT ]
|
[ DEL ]
📄 coredump.h
3,241 B
SET
[ EDIT ]
|
[ DEL ]
📄 cpufreq.h
910 B
SET
[ EDIT ]
|
[ DEL ]
📄 cputime.h
5,349 B
SET
[ EDIT ]
|
[ DEL ]
📄 deadline.h
652 B
SET
[ EDIT ]
|
[ DEL ]
📄 debug.h
1,439 B
SET
[ EDIT ]
|
[ DEL ]
📄 hotplug.h
664 B
SET
[ EDIT ]
|
[ DEL ]
📄 idle.h
1,890 B
SET
[ EDIT ]
|
[ DEL ]
📄 init.h
240 B
SET
[ EDIT ]
|
[ DEL ]
📄 isolation.h
1,584 B
SET
[ EDIT ]
|
[ DEL ]
📄 jobctl.h
1,752 B
SET
[ EDIT ]
|
[ DEL ]
📄 loadavg.h
1,588 B
SET
[ EDIT ]
|
[ DEL ]
📄 mm.h
12,697 B
SET
[ EDIT ]
|
[ DEL ]
📄 nohz.h
907 B
SET
[ EDIT ]
|
[ DEL ]
📄 numa_balancing.h
1,292 B
SET
[ EDIT ]
|
[ DEL ]
📄 prio.h
1,754 B
SET
[ EDIT ]
|
[ DEL ]
📄 rt.h
1,259 B
SET
[ EDIT ]
|
[ DEL ]
📄 signal.h
21,112 B
SET
[ EDIT ]
|
[ DEL ]
📄 smt.h
415 B
SET
[ EDIT ]
|
[ DEL ]
📄 stat.h
967 B
SET
[ EDIT ]
|
[ DEL ]
📄 sysctl.h
2,860 B
SET
[ EDIT ]
|
[ DEL ]
📄 task.h
5,404 B
SET
[ EDIT ]
|
[ DEL ]
📄 task_stack.h
3,053 B
SET
[ EDIT ]
|
[ DEL ]
📄 topology.h
7,071 B
SET
[ EDIT ]
|
[ DEL ]
📄 types.h
683 B
SET
[ EDIT ]
|
[ DEL ]
📄 user.h
2,049 B
SET
[ EDIT ]
|
[ DEL ]
📄 wake_q.h
2,272 B
SET
[ EDIT ]
|
[ DEL ]
📄 xacct.h
854 B
SET
[ EDIT ]
|
[ DEL ]
DELETE SELECTED
[ CLOSE ]
EDIT: clock.h
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_CLOCK_H #define _LINUX_SCHED_CLOCK_H #include <linux/smp.h> /* * Do not use outside of architecture code which knows its limitations. * * sched_clock() has no promise of monotonicity or bounded drift between * CPUs, use (which you should not) requires disabling IRQs. * * Please use one of the three interfaces below. */ extern unsigned long long notrace sched_clock(void); /* * See the comment in kernel/sched/clock.c */ extern u64 running_clock(void); extern u64 sched_clock_cpu(int cpu); extern void sched_clock_init(void); #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK static inline void sched_clock_tick(void) { } static inline void clear_sched_clock_stable(void) { } static inline void sched_clock_idle_sleep_event(void) { } static inline void sched_clock_idle_wakeup_event(void) { } static inline u64 cpu_clock(int cpu) { return sched_clock(); } static inline u64 local_clock(void) { return sched_clock(); } #else extern int sched_clock_stable(void); extern void clear_sched_clock_stable(void); /* * When sched_clock_stable(), __sched_clock_offset provides the offset * between local_clock() and sched_clock(). */ extern u64 __sched_clock_offset; extern void sched_clock_tick(void); extern void sched_clock_tick_stable(void); extern void sched_clock_idle_sleep_event(void); extern void sched_clock_idle_wakeup_event(void); /* * As outlined in clock.c, provides a fast, high resolution, nanosecond * time source that is monotonic per cpu argument and has bounded drift * between cpus. * * ######################### BIG FAT WARNING ########################## * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # * # go backwards !! # * #################################################################### */ static inline u64 cpu_clock(int cpu) { return sched_clock_cpu(cpu); } static inline u64 local_clock(void) { return sched_clock_cpu(raw_smp_processor_id()); } #endif #ifdef CONFIG_IRQ_TIME_ACCOUNTING /* * An i/f to runtime opt-in for irq time accounting based off of sched_clock. * The reason for this explicit opt-in is not to have perf penalty with * slow sched_clocks. */ extern void enable_sched_clock_irqtime(void); extern void disable_sched_clock_irqtime(void); #else static inline void enable_sched_clock_irqtime(void) {} static inline void disable_sched_clock_irqtime(void) {} #endif #endif /* _LINUX_SCHED_CLOCK_H */