From 4592cacfdb1961bd2fb9c28fea02ec564c77c975 Mon Sep 17 00:00:00 2001 From: Park Ju Hyung Date: Tue, 1 Oct 2019 14:02:19 +0900 Subject: [PATCH 01/15] ext4: remove additional tracings added by CAF Signed-off-by: Park Ju Hyung --- fs/ext4/inline.c | 14 ------------ fs/ext4/inode.c | 54 ---------------------------------------------- fs/ext4/readpage.c | 47 ++++------------------------------------ 3 files changed, 4 insertions(+), 111 deletions(-) diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 4b209479a468..3e2fabd53950 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -12,7 +12,6 @@ #include "ext4.h" #include "xattr.h" #include "truncate.h" -#include #define EXT4_XATTR_SYSTEM_DATA "data" #define EXT4_MIN_INLINE_DATA_SIZE ((sizeof(__le32) * EXT4_N_BLOCKS)) @@ -509,17 +508,6 @@ int ext4_readpage_inline(struct inode *inode, struct page *page) return -EAGAIN; } - if (trace_android_fs_dataread_start_enabled()) { - char *path, pathbuf[MAX_TRACE_PATHBUF_LEN]; - - path = android_fstrace_get_pathname(pathbuf, - MAX_TRACE_PATHBUF_LEN, - inode); - trace_android_fs_dataread_start(inode, page_offset(page), - PAGE_SIZE, current->pid, - path, current->comm); - } - /* * Current inline data can only exist in the 1st page, * So for all the other pages, just set them uptodate. @@ -531,8 +519,6 @@ int ext4_readpage_inline(struct inode *inode, struct page *page) SetPageUptodate(page); } - trace_android_fs_dataread_end(inode, page_offset(page), PAGE_SIZE); - up_read(&EXT4_I(inode)->xattr_sem); unlock_page(page); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 3c5b75c170f2..7f22b516b742 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -47,7 +47,6 @@ #include "truncate.h" #include -#include #define MPAGE_DA_EXTENT_TAIL 0x01 @@ -1288,16 +1287,6 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping, if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) return -EIO; - if (trace_android_fs_datawrite_start_enabled()) { - char *path, pathbuf[MAX_TRACE_PATHBUF_LEN]; - - path = android_fstrace_get_pathname(pathbuf, - MAX_TRACE_PATHBUF_LEN, - inode); - trace_android_fs_datawrite_start(inode, pos, len, - current->pid, path, - current->comm); - } trace_ext4_write_begin(inode, pos, len, flags); /* * Reserve one block more for addition to orphan list in case @@ -1440,7 +1429,6 @@ static int ext4_write_end(struct file *file, int inline_data = ext4_has_inline_data(inode); bool verity = ext4_verity_in_progress(inode); - trace_android_fs_datawrite_end(inode, pos, len); trace_ext4_write_end(inode, pos, len, copied); if (inline_data) { ret = ext4_write_inline_data_end(inode, pos, len, @@ -1553,7 +1541,6 @@ static int ext4_journalled_write_end(struct file *file, int inline_data = ext4_has_inline_data(inode); bool verity = ext4_verity_in_progress(inode); - trace_android_fs_datawrite_end(inode, pos, len); trace_ext4_journalled_write_end(inode, pos, len, copied); from = pos & (PAGE_SIZE - 1); to = from + len; @@ -3111,16 +3098,6 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping, len, flags, pagep, fsdata); } *fsdata = (void *)0; - if (trace_android_fs_datawrite_start_enabled()) { - char *path, pathbuf[MAX_TRACE_PATHBUF_LEN]; - - path = android_fstrace_get_pathname(pathbuf, - MAX_TRACE_PATHBUF_LEN, - inode); - trace_android_fs_datawrite_start(inode, pos, len, - current->pid, - path, current->comm); - } trace_ext4_da_write_begin(inode, pos, len, flags); if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { @@ -3239,7 +3216,6 @@ static int ext4_da_write_end(struct file *file, return ext4_write_end(file, mapping, pos, len, copied, page, fsdata); - trace_android_fs_datawrite_end(inode, pos, len); trace_ext4_da_write_end(inode, pos, len, copied); start = pos & (PAGE_SIZE - 1); end = start + copied - 1; @@ -3945,7 +3921,6 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter) size_t count = iov_iter_count(iter); loff_t offset = iocb->ki_pos; ssize_t ret; - int rw = iov_iter_rw(iter); if (!fscrypt_dio_supported(iocb, iter)) return 0; @@ -3963,28 +3938,6 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter) if (ext4_has_inline_data(inode)) return 0; - if (trace_android_fs_dataread_start_enabled() && - (rw == READ)) { - char *path, pathbuf[MAX_TRACE_PATHBUF_LEN]; - - path = android_fstrace_get_pathname(pathbuf, - MAX_TRACE_PATHBUF_LEN, - inode); - trace_android_fs_dataread_start(inode, offset, count, - current->pid, path, - current->comm); - } - if (trace_android_fs_datawrite_start_enabled() && - (rw == WRITE)) { - char *path, pathbuf[MAX_TRACE_PATHBUF_LEN]; - - path = android_fstrace_get_pathname(pathbuf, - MAX_TRACE_PATHBUF_LEN, - inode); - trace_android_fs_datawrite_start(inode, offset, count, - current->pid, path, - current->comm); - } trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); if (iov_iter_rw(iter) == READ) ret = ext4_direct_IO_read(iocb, iter); @@ -3992,13 +3945,6 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter) ret = ext4_direct_IO_write(iocb, iter); trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret); - if (trace_android_fs_dataread_start_enabled() && - (rw == READ)) - trace_android_fs_dataread_end(inode, offset, count); - if (trace_android_fs_datawrite_start_enabled() && - (rw == WRITE)) - trace_android_fs_datawrite_end(inode, offset, count); - return ret; } diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index fd1a1e2e7df1..2d65c5ce7b14 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -46,7 +46,6 @@ #include #include "ext4.h" -#include #define NUM_PREALLOC_POST_READ_CTXS 128 @@ -160,17 +159,6 @@ static bool bio_post_read_required(struct bio *bio) return bio->bi_private && !bio->bi_status; } -static void -ext4_trace_read_completion(struct bio *bio) -{ - struct page *first_page = bio->bi_io_vec[0].bv_page; - - if (first_page != NULL) - trace_android_fs_dataread_end(first_page->mapping->host, - page_offset(first_page), - bio->bi_iter.bi_size); -} - /* * I/O completion handler for multipage BIOs. * @@ -185,9 +173,6 @@ ext4_trace_read_completion(struct bio *bio) */ static void mpage_end_io(struct bio *bio) { - if (trace_android_fs_dataread_start_enabled()) - ext4_trace_read_completion(bio); - if (bio_post_read_required(bio)) { struct bio_post_read_ctx *ctx = bio->bi_private; @@ -237,30 +222,6 @@ static inline loff_t ext4_readpage_limit(struct inode *inode) return i_size_read(inode); } -static void -ext4_submit_bio_read(struct bio *bio) -{ - if (trace_android_fs_dataread_start_enabled()) { - struct page *first_page = bio->bi_io_vec[0].bv_page; - - if (first_page != NULL) { - char *path, pathbuf[MAX_TRACE_PATHBUF_LEN]; - - path = android_fstrace_get_pathname(pathbuf, - MAX_TRACE_PATHBUF_LEN, - first_page->mapping->host); - trace_android_fs_dataread_start( - first_page->mapping->host, - page_offset(first_page), - bio->bi_iter.bi_size, - current->pid, - path, - current->comm); - } - } - submit_bio(bio); -} - int ext4_mpage_readpages(struct address_space *mapping, struct list_head *pages, struct page *page, unsigned nr_pages, bool is_readahead) @@ -409,7 +370,7 @@ int ext4_mpage_readpages(struct address_space *mapping, if (bio && (last_block_in_bio != blocks[0] - 1 || !fscrypt_mergeable_bio(bio, inode, next_block))) { submit_and_realloc: - ext4_submit_bio_read(bio); + submit_bio(bio); bio = NULL; } if (bio == NULL) { @@ -442,14 +403,14 @@ int ext4_mpage_readpages(struct address_space *mapping, if (((map.m_flags & EXT4_MAP_BOUNDARY) && (relative_block == map.m_len)) || (first_hole != blocks_per_page)) { - ext4_submit_bio_read(bio); + submit_bio(bio); bio = NULL; } else last_block_in_bio = blocks[blocks_per_page - 1]; goto next_page; confused: if (bio) { - ext4_submit_bio_read(bio); + submit_bio(bio); bio = NULL; } if (!PageUptodate(page)) @@ -462,7 +423,7 @@ int ext4_mpage_readpages(struct address_space *mapping, } BUG_ON(pages && !list_empty(pages)); if (bio) - ext4_submit_bio_read(bio); + submit_bio(bio); return 0; } From f00e7007ce8704fd477a792a90618890267e016e Mon Sep 17 00:00:00 2001 From: Park Ju Hyung Date: Tue, 1 Oct 2019 14:05:36 +0900 Subject: [PATCH 02/15] fs: remove remaining android_fs tracings Signed-off-by: Park Ju Hyung --- fs/mpage.c | 36 ------------ include/trace/events/android_fs.h | 66 ---------------------- include/trace/events/android_fs_template.h | 64 --------------------- 3 files changed, 166 deletions(-) delete mode 100644 include/trace/events/android_fs.h delete mode 100644 include/trace/events/android_fs_template.h diff --git a/fs/mpage.c b/fs/mpage.c index f48de1f9bd24..ccba3c4c4479 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -32,14 +32,6 @@ #include #include "internal.h" -#define CREATE_TRACE_POINTS -#include - -EXPORT_TRACEPOINT_SYMBOL(android_fs_datawrite_start); -EXPORT_TRACEPOINT_SYMBOL(android_fs_datawrite_end); -EXPORT_TRACEPOINT_SYMBOL(android_fs_dataread_start); -EXPORT_TRACEPOINT_SYMBOL(android_fs_dataread_end); - /* * I/O completion handler for multipage BIOs. * @@ -57,16 +49,6 @@ static void mpage_end_io(struct bio *bio) struct bio_vec *bv; struct bvec_iter_all iter_all; - if (trace_android_fs_dataread_end_enabled() && - (bio_data_dir(bio) == READ)) { - struct page *first_page = bio->bi_io_vec[0].bv_page; - - if (first_page != NULL) - trace_android_fs_dataread_end(first_page->mapping->host, - page_offset(first_page), - bio->bi_iter.bi_size); - } - bio_for_each_segment_all(bv, bio, iter_all) { struct page *page = bv->bv_page; page_endio(page, bio_op(bio), @@ -78,24 +60,6 @@ static void mpage_end_io(struct bio *bio) static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio) { - if (trace_android_fs_dataread_start_enabled() && (op == REQ_OP_READ)) { - struct page *first_page = bio->bi_io_vec[0].bv_page; - - if (first_page != NULL) { - char *path, pathbuf[MAX_TRACE_PATHBUF_LEN]; - - path = android_fstrace_get_pathname(pathbuf, - MAX_TRACE_PATHBUF_LEN, - first_page->mapping->host); - trace_android_fs_dataread_start( - first_page->mapping->host, - page_offset(first_page), - bio->bi_iter.bi_size, - current->pid, - path, - current->comm); - } - } bio->bi_end_io = mpage_end_io; bio_set_op_attrs(bio, op, op_flags); guard_bio_eod(bio); diff --git a/include/trace/events/android_fs.h b/include/trace/events/android_fs.h deleted file mode 100644 index 7edb6bcfe482..000000000000 --- a/include/trace/events/android_fs.h +++ /dev/null @@ -1,66 +0,0 @@ -#undef TRACE_SYSTEM -#define TRACE_SYSTEM android_fs - -#if !defined(_TRACE_ANDROID_FS_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_ANDROID_FS_H - -#include -#include -#include - -DEFINE_EVENT(android_fs_data_start_template, android_fs_dataread_start, - TP_PROTO(struct inode *inode, loff_t offset, int bytes, - pid_t pid, char *pathname, char *command), - TP_ARGS(inode, offset, bytes, pid, pathname, command)); - -DEFINE_EVENT(android_fs_data_end_template, android_fs_dataread_end, - TP_PROTO(struct inode *inode, loff_t offset, int bytes), - TP_ARGS(inode, offset, bytes)); - -DEFINE_EVENT(android_fs_data_start_template, android_fs_datawrite_start, - TP_PROTO(struct inode *inode, loff_t offset, int bytes, - pid_t pid, char *pathname, char *command), - TP_ARGS(inode, offset, bytes, pid, pathname, command)); - -DEFINE_EVENT(android_fs_data_end_template, android_fs_datawrite_end, - TP_PROTO(struct inode *inode, loff_t offset, int bytes), - TP_ARGS(inode, offset, bytes)); - -#endif /* _TRACE_ANDROID_FS_H */ - -/* This part must be outside protection */ -#include - -#ifndef ANDROID_FSTRACE_GET_PATHNAME -#define ANDROID_FSTRACE_GET_PATHNAME - -/* Sizes an on-stack array, so careful if sizing this up ! */ -#define MAX_TRACE_PATHBUF_LEN 256 - -static inline char * -android_fstrace_get_pathname(char *buf, int buflen, struct inode *inode) -{ - char *path; - struct dentry *d; - - /* - * d_obtain_alias() will either iput() if it locates an existing - * dentry or transfer the reference to the new dentry created. - * So get an extra reference here. - */ - ihold(inode); - d = d_obtain_alias(inode); - if (likely(!IS_ERR(d))) { - path = dentry_path_raw(d, buf, buflen); - if (unlikely(IS_ERR(path))) { - strcpy(buf, "ERROR"); - path = buf; - } - dput(d); - } else { - strcpy(buf, "ERROR"); - path = buf; - } - return path; -} -#endif diff --git a/include/trace/events/android_fs_template.h b/include/trace/events/android_fs_template.h deleted file mode 100644 index b23d17b56c63..000000000000 --- a/include/trace/events/android_fs_template.h +++ /dev/null @@ -1,64 +0,0 @@ -#if !defined(_TRACE_ANDROID_FS_TEMPLATE_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_ANDROID_FS_TEMPLATE_H - -#include - -DECLARE_EVENT_CLASS(android_fs_data_start_template, - TP_PROTO(struct inode *inode, loff_t offset, int bytes, - pid_t pid, char *pathname, char *command), - TP_ARGS(inode, offset, bytes, pid, pathname, command), - TP_STRUCT__entry( - __string(pathbuf, pathname); - __field(loff_t, offset); - __field(int, bytes); - __field(loff_t, i_size); - __string(cmdline, command); - __field(pid_t, pid); - __field(ino_t, ino); - ), - TP_fast_assign( - { - /* - * Replace the spaces in filenames and cmdlines - * because this screws up the tooling that parses - * the traces. - */ - __assign_str(pathbuf, pathname); - (void)strreplace(__get_str(pathbuf), ' ', '_'); - __entry->offset = offset; - __entry->bytes = bytes; - __entry->i_size = i_size_read(inode); - __assign_str(cmdline, command); - (void)strreplace(__get_str(cmdline), ' ', '_'); - __entry->pid = pid; - __entry->ino = inode->i_ino; - } - ), - TP_printk("entry_name %s, offset %llu, bytes %d, cmdline %s," - " pid %d, i_size %llu, ino %lu", - __get_str(pathbuf), __entry->offset, __entry->bytes, - __get_str(cmdline), __entry->pid, __entry->i_size, - (unsigned long) __entry->ino) -); - -DECLARE_EVENT_CLASS(android_fs_data_end_template, - TP_PROTO(struct inode *inode, loff_t offset, int bytes), - TP_ARGS(inode, offset, bytes), - TP_STRUCT__entry( - __field(ino_t, ino); - __field(loff_t, offset); - __field(int, bytes); - ), - TP_fast_assign( - { - __entry->ino = inode->i_ino; - __entry->offset = offset; - __entry->bytes = bytes; - } - ), - TP_printk("ino %lu, offset %llu, bytes %d", - (unsigned long) __entry->ino, - __entry->offset, __entry->bytes) -); - -#endif /* _TRACE_ANDROID_FS_TEMPLATE_H */ From 940cb34e4e72cb14cc3f6a769c3d2e3c8679957d Mon Sep 17 00:00:00 2001 From: Kazuki Hashimoto Date: Mon, 14 Feb 2022 07:05:28 +0900 Subject: [PATCH 03/15] Makefile: Apply tuning flags from lazerl0rd Signed-off-by: Kazuki Hashimoto Signed-off-by: saikiran2001 --- Makefile | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Makefile b/Makefile index e6766da1a016..700db73b33ac 100644 --- a/Makefile +++ b/Makefile @@ -784,6 +784,11 @@ POLLY_FLAGS += -mllvm -polly-run-dce endif endif +ifdef CONFIG_CC_IS_CLANG +KBUILD_CFLAGS += -mllvm -inline-threshold=600 +KBUILD_CFLAGS += -mllvm -inlinehint-threshold=750 +endif + # Tell gcc to never replace conditional load with a non-conditional one KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) KBUILD_CFLAGS += $(call cc-option,-fno-allow-store-data-races) From 7fd8ec3364ba33d9d84c7ef46f0bee32c2104435 Mon Sep 17 00:00:00 2001 From: Danny Lin Date: Tue, 6 Aug 2019 03:06:40 +0000 Subject: [PATCH 04/15] Makefile: Use O3 optimization level for Clang LTO Signed-off-by: Danny Lin Signed-off-by: Kazuki Hashimoto --- Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Makefile b/Makefile index 700db73b33ac..7b70b61ac276 100644 --- a/Makefile +++ b/Makefile @@ -957,6 +957,8 @@ KBUILD_LDFLAGS += --lto-O3 endif CC_FLAGS_LTO_CLANG += -fvisibility=default KBUILD_LDS_MODULE += $(srctree)/scripts/module-lto.lds +# Set O3 optimization level for LTO +KBUILD_LDFLAGS += --plugin-opt=O3 endif ifdef CONFIG_LTO From a3a80307e51e8c2ced2f79148cbd1135c65f50b0 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Sat, 9 Jan 2021 01:16:28 -0800 Subject: [PATCH 05/15] clk: qcom: qcom-cpufreq-hw: Set each CPU clock to its max when waking up The default frequency on Qualcomm CPUs is the lowest frequency supported by the CPU. This hurts latency when waking from suspend, as each CPU coming online runs at its lowest frequency until the governor can take over later. To speed up waking from suspend, hijack the CPUHP_AP_ONLINE hook and use it to set the highest available frequency on each CPU as they come online. This is done behind the governor's back but it's fine because the governor isn't running at this point in time for a CPU that's coming online. This speeds up waking from suspend significantly. Signed-off-by: Sultan Alsawaf [Kazuki: Port to v5.4 qcom-cpufreq-hw.c] Signed-off-by: Kazuki Hashimoto Signed-off-by: saikiran2001 --- drivers/cpufreq/qcom-cpufreq-hw.c | 26 ++++++++++++++++++++++++++ kernel/cpu.c | 2 ++ 2 files changed, 28 insertions(+) diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index df75ce7c770c..5538f460a0eb 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -80,6 +80,13 @@ struct cpufreq_counter { spinlock_t lock; }; +struct cpufreq_qcom_boost { + struct cpufreq_qcom *c; + unsigned int max_index; +}; + +static DEFINE_PER_CPU(struct cpufreq_qcom_boost, cpufreq_boost_pcpu); + static const u16 cpufreq_qcom_std_offsets[REG_ARRAY_SIZE] = { [REG_ENABLE] = 0x0, [REG_FREQ_LUT] = 0x110, @@ -430,6 +437,16 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = { .resume = qcom_cpufreq_hw_resume, }; +static int cpuhp_qcom_online(unsigned int cpu) +{ + struct cpufreq_qcom_boost *b = &per_cpu(cpufreq_boost_pcpu, cpu); + struct cpufreq_qcom *c = b->c; + + /* Set the max frequency by default before the governor takes over */ + writel_relaxed(b->max_index, c->base + offsets[REG_PERF_STATE]); + return 0; +} + static int qcom_cpufreq_hw_read_lut(struct platform_device *pdev, struct cpufreq_qcom *c, u32 max_cores) { @@ -485,6 +502,10 @@ static int qcom_cpufreq_hw_read_lut(struct platform_device *pdev, } c->table[i].frequency = CPUFREQ_TABLE_END; + for_each_cpu(cpu, &c->related_cpus) { + per_cpu(cpufreq_boost_pcpu, cpu).c = c; + per_cpu(cpufreq_boost_pcpu, cpu).max_index = i - 1; + } if (cpu_dev) dev_pm_opp_set_sharing_cpus(cpu_dev, &c->related_cpus); @@ -705,6 +726,11 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev) return rc; } + rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE, "qcom-cpufreq:online", + cpuhp_qcom_online, NULL); + if (rc) + dev_err(&pdev->dev, "CPUHP callback setup failed, rc=%d\n", rc); + of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); dev_dbg(&pdev->dev, "QCOM CPUFreq HW driver initialized\n"); diff --git a/kernel/cpu.c b/kernel/cpu.c index f4308a83e63b..c95d9f93889e 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1638,11 +1638,13 @@ static struct cpuhp_step cpuhp_hp_states[] = { .startup.single = NULL, .teardown.single = smpcfd_dying_cpu, }, +#ifndef CONFIG_ARM_QCOM_CPUFREQ_HW /* Entry state on starting. Interrupts enabled from here on. Transient * state for synchronsization */ [CPUHP_AP_ONLINE] = { .name = "ap:online", }, +#endif /* * Handled on controll processor until the plugged processor manages * this itself. From 7f98fe9d9d14005d4a20d94715a38d16a45c22df Mon Sep 17 00:00:00 2001 From: Kazuki Hashimoto Date: Mon, 13 Jun 2022 04:36:26 +0900 Subject: [PATCH 06/15] cpuidle: lpm-levels: Take each CPU's QoS latency into account Current code assumes that all CPUs have the same QoS latency when it doesn't. Signed-off-by: Kazuki Hashimoto --- drivers/cpuidle/governor.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c index e9801f26c732..b183aba54ae6 100644 --- a/drivers/cpuidle/governor.c +++ b/drivers/cpuidle/governor.c @@ -109,7 +109,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov) */ int cpuidle_governor_latency_req(unsigned int cpu) { - int global_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); + int global_req = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY, cpu); struct device *device = get_cpu_device(cpu); int device_req = dev_pm_qos_raw_resume_latency(device); From 3310fe163e51924a06fb1233f943ddbfc6466e41 Mon Sep 17 00:00:00 2001 From: Mahesh Sivasubramanian Date: Thu, 26 Jan 2017 16:37:08 -0700 Subject: [PATCH 07/15] kernel: tick-sched: Add API to get the next wakeup for a CPU Add get_next_event_cpu to get the next wakeup time for the CPU. This is used by the sleep driver if it has to query the next wakeup for a CPU other than the thread that its running on. Change-Id: I889de90928b9b1e51a51b2f9205d7865facfcc20 Signed-off-by: Mahesh Sivasubramanian --- include/linux/tick.h | 1 + kernel/time/tick-sched.c | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/include/linux/tick.h b/include/linux/tick.h index f92a10b5e112..614e2ff54545 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -292,4 +292,5 @@ static inline void tick_nohz_task_switch(void) __tick_nohz_task_switch(); } +ktime_t *get_next_event_cpu(unsigned int cpu); #endif diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 9acc0b7928fe..3baa55b164f7 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -1438,3 +1438,8 @@ int tick_check_oneshot_change(int allow_nohz) tick_nohz_switch_to_nohz(); return 0; } + +ktime_t *get_next_event_cpu(unsigned int cpu) +{ + return &(per_cpu(tick_cpu_device, cpu).evtdev->next_event); +} From 87ac2d6daba4cba37e6ea888e11a8f863af4ee3b Mon Sep 17 00:00:00 2001 From: Juhyung Park Date: Fri, 17 Jun 2022 16:19:33 +0900 Subject: [PATCH 08/15] block: import ssg from G998USQU5CVDB Signed-off-by: Juhyung Park --- block/Kconfig.iosched | 6 + block/Makefile | 3 +- block/ssg-iosched.c | 862 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 870 insertions(+), 1 deletion(-) create mode 100644 block/ssg-iosched.c diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index b89310a022ad..4412b0f0e96a 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -18,6 +18,12 @@ config MQ_IOSCHED_KYBER synchronous writes, it will self-tune queue depths to achieve that goal. +config MQ_IOSCHED_SSG + tristate "SamSung Generic I/O scheduler" + default n + ---help--- + SamSung Generic IO scheduler. + config IOSCHED_BFQ tristate "BFQ I/O scheduler" ---help--- diff --git a/block/Makefile b/block/Makefile index 9bfd4c9f9e72..c6f678080d66 100644 --- a/block/Makefile +++ b/block/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_BLK_CGROUP_IOLATENCY) += blk-iolatency.o obj-$(CONFIG_BLK_CGROUP_IOCOST) += blk-iocost.o obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o +obj-$(CONFIG_MQ_IOSCHED_SSG) += ssg-iosched.o bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o obj-$(CONFIG_IOSCHED_BFQ) += bfq.o @@ -38,4 +39,4 @@ obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o obj-$(CONFIG_BLK_PM) += blk-pm.o obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += keyslot-manager.o bio-crypt-ctx.o \ blk-crypto.o -obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) += blk-crypto-fallback.o \ No newline at end of file +obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) += blk-crypto-fallback.o diff --git a/block/ssg-iosched.c b/block/ssg-iosched.c new file mode 100644 index 000000000000..8759c1680d21 --- /dev/null +++ b/block/ssg-iosched.c @@ -0,0 +1,862 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SamSung Generic I/O scheduler + * for the blk-mq scheduling framework + * + * Copyright (C) 2021 Jisoo Oh + * Copyright (C) 2021 Manjong Lee + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "blk.h" +#include "blk-mq.h" +#include "blk-mq-debugfs.h" +#include "blk-mq-tag.h" +#include "blk-mq-sched.h" + +extern void blk_sec_account_process_IO(struct bio *bio); + +static const int read_expire = HZ / 2; /* max time before a read is submitted. */ +static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ +static const int max_write_starvation = 2; /* max times reads can starve a write */ +static const int async_write_percent = 25; /* max tags percentige for async write */ +static const unsigned int max_async_write_tags = 8; /* max tags for async write. */ + +struct ssg_data { + /* + * run time data + */ + + /* + * requests are present on both sort_list and fifo_list + */ + struct rb_root sort_list[2]; + struct list_head fifo_list[2]; + + /* + * next in sort order. read, write or both are NULL + */ + struct request *next_rq[2]; + unsigned int starved_writes; /* times reads have starved writes */ + + /* + * settings that change how the i/o scheduler behaves + */ + int fifo_expire[2]; + int max_write_starvation; + int front_merges; + int async_write_depth; /* async write depth for each tag map */ + atomic_t async_write_cnt; + + spinlock_t lock; + spinlock_t zone_lock; + struct list_head dispatch; +}; + +static inline struct rb_root *ssg_rb_root(struct ssg_data *ssg, struct request *rq) +{ + return &ssg->sort_list[rq_data_dir(rq)]; +} + +/* + * get the request after `rq' in sector-sorted order + */ +static inline struct request *ssg_latter_request(struct request *rq) +{ + struct rb_node *node = rb_next(&rq->rb_node); + + if (node) + return rb_entry_rq(node); + + return NULL; +} + +static void ssg_add_rq_rb(struct ssg_data *ssg, struct request *rq) +{ + struct rb_root *root = ssg_rb_root(ssg, rq); + + elv_rb_add(root, rq); +} + +static inline void ssg_del_rq_rb(struct ssg_data *ssg, struct request *rq) +{ + const int data_dir = rq_data_dir(rq); + + if (ssg->next_rq[data_dir] == rq) + ssg->next_rq[data_dir] = ssg_latter_request(rq); + + elv_rb_del(ssg_rb_root(ssg, rq), rq); +} + +/* + * remove rq from rbtree and fifo. + */ +static void ssg_remove_request(struct request_queue *q, struct request *rq) +{ + struct ssg_data *ssg = q->elevator->elevator_data; + + list_del_init(&rq->queuelist); + + /* + * We might not be on the rbtree, if we are doing an insert merge + */ + if (!RB_EMPTY_NODE(&rq->rb_node)) + ssg_del_rq_rb(ssg, rq); + + elv_rqhash_del(q, rq); + if (q->last_merge == rq) + q->last_merge = NULL; +} + +static void ssg_request_merged(struct request_queue *q, struct request *req, + enum elv_merge type) +{ + struct ssg_data *ssg = q->elevator->elevator_data; + + /* + * if the merge was a front merge, we need to reposition request + */ + if (type == ELEVATOR_FRONT_MERGE) { + elv_rb_del(ssg_rb_root(ssg, req), req); + ssg_add_rq_rb(ssg, req); + } +} + +static void ssg_merged_requests(struct request_queue *q, struct request *req, + struct request *next) +{ + /* + * if next expires before rq, assign its expire time to rq + * and move into next position (next will be deleted) in fifo + */ + if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { + if (time_before((unsigned long)next->fifo_time, + (unsigned long)req->fifo_time)) { + list_move(&req->queuelist, &next->queuelist); + req->fifo_time = next->fifo_time; + } + } + + /* + * kill knowledge of next, this one is a goner + */ + ssg_remove_request(q, next); +} + +/* + * move an entry to dispatch queue + */ +static void ssg_move_request(struct ssg_data *ssg, struct request *rq) +{ + const int data_dir = rq_data_dir(rq); + + ssg->next_rq[READ] = NULL; + ssg->next_rq[WRITE] = NULL; + ssg->next_rq[data_dir] = ssg_latter_request(rq); + + /* + * take it off the sort and fifo list + */ + ssg_remove_request(rq->q, rq); +} + +/* + * ssg_check_fifo returns 0 if there are no expired requests on the fifo, + * 1 otherwise. Requires !list_empty(&ssg->fifo_list[data_dir]) + */ +static inline int ssg_check_fifo(struct ssg_data *ssg, int ddir) +{ + struct request *rq = rq_entry_fifo(ssg->fifo_list[ddir].next); + + /* + * rq is expired! + */ + if (time_after_eq(jiffies, (unsigned long)rq->fifo_time)) + return 1; + + return 0; +} + +/* + * For the specified data direction, return the next request to + * dispatch using arrival ordered lists. + */ +static struct request *ssg_fifo_request(struct ssg_data *ssg, int data_dir) +{ + struct request *rq; + unsigned long flags; + + if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE)) + return NULL; + + if (list_empty(&ssg->fifo_list[data_dir])) + return NULL; + + rq = rq_entry_fifo(ssg->fifo_list[data_dir].next); + if (data_dir == READ || !blk_queue_is_zoned(rq->q)) + return rq; + + /* + * Look for a write request that can be dispatched, that is one with + * an unlocked target zone. + */ + spin_lock_irqsave(&ssg->zone_lock, flags); + list_for_each_entry(rq, &ssg->fifo_list[WRITE], queuelist) { + if (blk_req_can_dispatch_to_zone(rq)) + goto out; + } + rq = NULL; +out: + spin_unlock_irqrestore(&ssg->zone_lock, flags); + + return rq; +} + +/* + * For the specified data direction, return the next request to + * dispatch using sector position sorted lists. + */ +static struct request *ssg_next_request(struct ssg_data *ssg, int data_dir) +{ + struct request *rq; + unsigned long flags; + + if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE)) + return NULL; + + rq = ssg->next_rq[data_dir]; + if (!rq) + return NULL; + + if (data_dir == READ || !blk_queue_is_zoned(rq->q)) + return rq; + + /* + * Look for a write request that can be dispatched, that is one with + * an unlocked target zone. + */ + spin_lock_irqsave(&ssg->zone_lock, flags); + while (rq) { + if (blk_req_can_dispatch_to_zone(rq)) + break; + rq = ssg_latter_request(rq); + } + spin_unlock_irqrestore(&ssg->zone_lock, flags); + + return rq; +} + +/* + * ssg_dispatch_requests selects the best request according to + * read/write expire, etc + */ +static struct request *__ssg_dispatch_request(struct ssg_data *ssg) +{ + struct request *rq, *next_rq; + bool reads, writes; + int data_dir; + + if (!list_empty(&ssg->dispatch)) { + rq = list_first_entry(&ssg->dispatch, struct request, queuelist); + list_del_init(&rq->queuelist); + goto done; + } + + reads = !list_empty(&ssg->fifo_list[READ]); + writes = !list_empty(&ssg->fifo_list[WRITE]); + + /* + * select the appropriate data direction (read / write) + */ + + if (reads) { + BUG_ON(RB_EMPTY_ROOT(&ssg->sort_list[READ])); + + if (ssg_fifo_request(ssg, WRITE) && + (ssg->starved_writes++ >= ssg->max_write_starvation)) + goto dispatch_writes; + + data_dir = READ; + + goto dispatch_find_request; + } + + /* + * there are either no reads or writes have been starved + */ + + if (writes) { +dispatch_writes: + BUG_ON(RB_EMPTY_ROOT(&ssg->sort_list[WRITE])); + + ssg->starved_writes = 0; + + data_dir = WRITE; + + goto dispatch_find_request; + } + + return NULL; + +dispatch_find_request: + /* + * we are not running a batch, find best request for selected data_dir + */ + next_rq = ssg_next_request(ssg, data_dir); + if (ssg_check_fifo(ssg, data_dir) || !next_rq) { + /* + * A deadline has expired, the last request was in the other + * direction, or we have run out of higher-sectored requests. + * Start again from the request with the earliest expiry time. + */ + rq = ssg_fifo_request(ssg, data_dir); + } else { + /* + * The last req was the same dir and we have a next request in + * sort order. No expired requests so continue on from here. + */ + rq = next_rq; + } + + /* + * For a zoned block device, if we only have writes queued and none of + * them can be dispatched, rq will be NULL. + */ + if (!rq) + return NULL; + + /* + * rq is the selected appropriate request. + */ + ssg_move_request(ssg, rq); +done: + /* + * If the request needs its target zone locked, do it. + */ + blk_req_zone_write_lock(rq); + rq->rq_flags |= RQF_STARTED; + return rq; +} + +/* + * One confusing aspect here is that we get called for a specific + * hardware queue, but we may return a request that is for a + * different hardware queue. This is because ssg-iosched has shared + * state for all hardware queues, in terms of sorting, FIFOs, etc. + */ +static struct request *ssg_dispatch_request(struct blk_mq_hw_ctx *hctx) +{ + struct ssg_data *ssg = hctx->queue->elevator->elevator_data; + struct request *rq; + + spin_lock(&ssg->lock); + rq = __ssg_dispatch_request(ssg); + spin_unlock(&ssg->lock); + + return rq; +} + +static unsigned int ssg_sched_tags_map_nr(struct request_queue *q) +{ + return q->queue_hw_ctx[0]->sched_tags->bitmap_tags.sb.map_nr; +} + +static unsigned int ssg_sched_tags_depth(struct request_queue *q) +{ + return q->queue_hw_ctx[0]->sched_tags->bitmap_tags.sb.depth; +} + +static void ssg_set_shallow_depth(struct request_queue *q) +{ + struct ssg_data *ssg = q->elevator->elevator_data; + unsigned int map_nr; + unsigned int depth; + unsigned int async_write_depth; + + depth = ssg_sched_tags_depth(q); + map_nr = ssg_sched_tags_map_nr(q); + + async_write_depth = depth * async_write_percent / 100U; + async_write_depth = min(async_write_depth, max_async_write_tags); + + ssg->async_write_depth = + (async_write_depth / map_nr) ? (async_write_depth / map_nr) : 1; +} + +static void ssg_depth_updated(struct blk_mq_hw_ctx *hctx) +{ + struct request_queue *q = hctx->queue; + struct ssg_data *ssg = q->elevator->elevator_data; + + ssg_set_shallow_depth(q); + sbitmap_queue_min_shallow_depth(&hctx->sched_tags->bitmap_tags, + ssg->async_write_depth); +} + +static inline bool ssg_op_is_async_write(unsigned int op) +{ + return (op & REQ_OP_MASK) == REQ_OP_WRITE && !op_is_sync(op); +} + +static void ssg_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) +{ + struct ssg_data *ssg = data->q->elevator->elevator_data; + + if (!ssg_op_is_async_write(op)) + return; + + if (atomic_read(&ssg->async_write_cnt) > max_async_write_tags) + data->shallow_depth = ssg->async_write_depth; +} + +static int ssg_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) +{ + struct request_queue *q = hctx->queue; + struct ssg_data *ssg = q->elevator->elevator_data; + + ssg_set_shallow_depth(q); + sbitmap_queue_min_shallow_depth(&hctx->sched_tags->bitmap_tags, + ssg->async_write_depth); + return 0; +} + +static void ssg_exit_queue(struct elevator_queue *e) +{ + struct ssg_data *ssg = e->elevator_data; + + BUG_ON(!list_empty(&ssg->fifo_list[READ])); + BUG_ON(!list_empty(&ssg->fifo_list[WRITE])); + + kfree(ssg); +} + +/* + * initialize elevator private data (ssg_data). + */ +static int ssg_init_queue(struct request_queue *q, struct elevator_type *e) +{ + struct ssg_data *ssg; + struct elevator_queue *eq; + + eq = elevator_alloc(q, e); + if (!eq) + return -ENOMEM; + + ssg = kzalloc_node(sizeof(*ssg), GFP_KERNEL, q->node); + if (!ssg) { + kobject_put(&eq->kobj); + return -ENOMEM; + } + eq->elevator_data = ssg; + + INIT_LIST_HEAD(&ssg->fifo_list[READ]); + INIT_LIST_HEAD(&ssg->fifo_list[WRITE]); + ssg->sort_list[READ] = RB_ROOT; + ssg->sort_list[WRITE] = RB_ROOT; + ssg->fifo_expire[READ] = read_expire; + ssg->fifo_expire[WRITE] = write_expire; + ssg->max_write_starvation = max_write_starvation; + ssg->front_merges = 1; + atomic_set(&ssg->async_write_cnt, 0); + spin_lock_init(&ssg->lock); + spin_lock_init(&ssg->zone_lock); + INIT_LIST_HEAD(&ssg->dispatch); + + q->elevator = eq; + return 0; +} + +static int ssg_request_merge(struct request_queue *q, struct request **rq, + struct bio *bio) +{ + struct ssg_data *ssg = q->elevator->elevator_data; + sector_t sector = bio_end_sector(bio); + struct request *__rq; + + if (!ssg->front_merges) + return ELEVATOR_NO_MERGE; + + __rq = elv_rb_find(&ssg->sort_list[bio_data_dir(bio)], sector); + if (__rq) { + BUG_ON(sector != blk_rq_pos(__rq)); + + if (elv_bio_merge_ok(__rq, bio)) { + *rq = __rq; + return ELEVATOR_FRONT_MERGE; + } + } + + return ELEVATOR_NO_MERGE; +} + +static bool ssg_bio_merge(struct request_queue *q, struct bio *bio, + unsigned int nr_segs) +{ + struct ssg_data *ssg = q->elevator->elevator_data; + struct request *free = NULL; + bool ret; + + spin_lock(&ssg->lock); + ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free); + spin_unlock(&ssg->lock); + + if (free) + blk_mq_free_request(free); + + return ret; +} + +/* + * add rq to rbtree and fifo + */ +static void ssg_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, + bool at_head) +{ + struct request_queue *q = hctx->queue; + struct ssg_data *ssg = q->elevator->elevator_data; + const int data_dir = rq_data_dir(rq); + + /* + * This may be a requeue of a write request that has locked its + * target zone. If it is the case, this releases the zone lock. + */ + blk_req_zone_write_unlock(rq); + + if (blk_mq_sched_try_insert_merge(q, rq)) + return; + + blk_mq_sched_request_inserted(rq); + + if (at_head || blk_rq_is_passthrough(rq)) { + if (at_head) + list_add(&rq->queuelist, &ssg->dispatch); + else + list_add_tail(&rq->queuelist, &ssg->dispatch); + } else { + ssg_add_rq_rb(ssg, rq); + + if (rq_mergeable(rq)) { + elv_rqhash_add(q, rq); + if (!q->last_merge) + q->last_merge = rq; + } + + /* + * set expire time and add to fifo list + */ + rq->fifo_time = jiffies + ssg->fifo_expire[data_dir]; + list_add_tail(&rq->queuelist, &ssg->fifo_list[data_dir]); + } +} + +static void ssg_insert_requests(struct blk_mq_hw_ctx *hctx, + struct list_head *list, bool at_head) +{ + struct request_queue *q = hctx->queue; + struct ssg_data *ssg = q->elevator->elevator_data; + + spin_lock(&ssg->lock); + while (!list_empty(list)) { + struct request *rq; + + rq = list_first_entry(list, struct request, queuelist); + list_del_init(&rq->queuelist); + ssg_insert_request(hctx, rq, at_head); + } + spin_unlock(&ssg->lock); +} + +/* + * Nothing to do here. This is defined only to ensure that .finish_request + * method is called upon request completion. + */ +static void ssg_prepare_request(struct request *rq, struct bio *bio) +{ + struct ssg_data *ssg = rq->q->elevator->elevator_data; + + if (ssg_op_is_async_write(rq->cmd_flags)) + atomic_inc(&ssg->async_write_cnt); + + blk_sec_account_process_IO(bio); +} + +/* + * For zoned block devices, write unlock the target zone of + * completed write requests. Do this while holding the zone lock + * spinlock so that the zone is never unlocked while ssg_fifo_request() + * or ssg_next_request() are executing. This function is called for + * all requests, whether or not these requests complete successfully. + * + * For a zoned block device, __ssg_dispatch_request() may have stopped + * dispatching requests if all the queued requests are write requests directed + * at zones that are already locked due to on-going write requests. To ensure + * write request dispatch progress in this case, mark the queue as needing a + * restart to ensure that the queue is run again after completion of the + * request and zones being unlocked. + */ +static void ssg_finish_request(struct request *rq) +{ + struct request_queue *q = rq->q; + struct ssg_data *ssg = q->elevator->elevator_data; + + if (blk_queue_is_zoned(q)) { + unsigned long flags; + + spin_lock_irqsave(&ssg->zone_lock, flags); + blk_req_zone_write_unlock(rq); + if (!list_empty(&ssg->fifo_list[WRITE])) + blk_mq_sched_mark_restart_hctx(rq->mq_hctx); + spin_unlock_irqrestore(&ssg->zone_lock, flags); + } + + if (unlikely(!(rq->rq_flags & RQF_ELVPRIV))) + return; + + if (ssg_op_is_async_write(rq->cmd_flags)) + atomic_dec(&ssg->async_write_cnt); +} + +static bool ssg_has_work(struct blk_mq_hw_ctx *hctx) +{ + struct ssg_data *ssg = hctx->queue->elevator->elevator_data; + + return !list_empty_careful(&ssg->dispatch) || + !list_empty_careful(&ssg->fifo_list[0]) || + !list_empty_careful(&ssg->fifo_list[1]); +} + +/* + * sysfs parts below + */ +static ssize_t ssg_var_show(int var, char *page) +{ + return sprintf(page, "%d\n", var); +} + +static void ssg_var_store(int *var, const char *page) +{ + char *p = (char *) page; + + *var = simple_strtol(p, &p, 10); +} + +#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ +{ \ + struct ssg_data *ssg = e->elevator_data; \ + int __data = __VAR; \ + if (__CONV) \ + __data = jiffies_to_msecs(__data); \ + return ssg_var_show(__data, (page)); \ +} +SHOW_FUNCTION(ssg_read_expire_show, ssg->fifo_expire[READ], 1); +SHOW_FUNCTION(ssg_write_expire_show, ssg->fifo_expire[WRITE], 1); +SHOW_FUNCTION(ssg_max_write_starvation_show, ssg->max_write_starvation, 0); +SHOW_FUNCTION(ssg_front_merges_show, ssg->front_merges, 0); +SHOW_FUNCTION(ssg_async_write_depth_show, ssg->async_write_depth, 0); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ +{ \ + struct ssg_data *ssg = e->elevator_data; \ + int __data; \ + ssg_var_store(&__data, (page)); \ + if (__data < (MIN)) \ + __data = (MIN); \ + else if (__data > (MAX)) \ + __data = (MAX); \ + if (__CONV) \ + *(__PTR) = msecs_to_jiffies(__data); \ + else \ + *(__PTR) = __data; \ + return count; \ +} +STORE_FUNCTION(ssg_read_expire_store, &ssg->fifo_expire[READ], 0, INT_MAX, 1); +STORE_FUNCTION(ssg_write_expire_store, &ssg->fifo_expire[WRITE], 0, INT_MAX, 1); +STORE_FUNCTION(ssg_max_write_starvation_store, &ssg->max_write_starvation, INT_MIN, INT_MAX, 0); +STORE_FUNCTION(ssg_front_merges_store, &ssg->front_merges, 0, 1, 0); +#undef STORE_FUNCTION + +#define SSG_ATTR(name) \ + __ATTR(name, 0644, ssg_##name##_show, ssg_##name##_store) + +#define SSG_ATTR_RO(name) \ + __ATTR(name, 0444, ssg_##name##_show, NULL) + +static struct elv_fs_entry ssg_attrs[] = { + SSG_ATTR(read_expire), + SSG_ATTR(write_expire), + SSG_ATTR(max_write_starvation), + SSG_ATTR(front_merges), + SSG_ATTR_RO(async_write_depth), + __ATTR_NULL +}; + +#ifdef CONFIG_BLK_DEBUG_FS +#define SSG_DEBUGFS_DDIR_ATTRS(ddir, name) \ +static void *ssg_##name##_fifo_start(struct seq_file *m, \ + loff_t *pos) \ + __acquires(&ssg->lock) \ +{ \ + struct request_queue *q = m->private; \ + struct ssg_data *ssg = q->elevator->elevator_data; \ + \ + spin_lock(&ssg->lock); \ + return seq_list_start(&ssg->fifo_list[ddir], *pos); \ +} \ + \ +static void *ssg_##name##_fifo_next(struct seq_file *m, void *v, \ + loff_t *pos) \ +{ \ + struct request_queue *q = m->private; \ + struct ssg_data *ssg = q->elevator->elevator_data; \ + \ + return seq_list_next(v, &ssg->fifo_list[ddir], pos); \ +} \ + \ +static void ssg_##name##_fifo_stop(struct seq_file *m, void *v) \ + __releases(&ssg->lock) \ +{ \ + struct request_queue *q = m->private; \ + struct ssg_data *ssg = q->elevator->elevator_data; \ + \ + spin_unlock(&ssg->lock); \ +} \ + \ +static const struct seq_operations ssg_##name##_fifo_seq_ops = { \ + .start = ssg_##name##_fifo_start, \ + .next = ssg_##name##_fifo_next, \ + .stop = ssg_##name##_fifo_stop, \ + .show = blk_mq_debugfs_rq_show, \ +}; \ + \ +static int ssg_##name##_next_rq_show(void *data, \ + struct seq_file *m) \ +{ \ + struct request_queue *q = data; \ + struct ssg_data *ssg = q->elevator->elevator_data; \ + struct request *rq = ssg->next_rq[ddir]; \ + \ + if (rq) \ + __blk_mq_debugfs_rq_show(m, rq); \ + return 0; \ +} +SSG_DEBUGFS_DDIR_ATTRS(READ, read) +SSG_DEBUGFS_DDIR_ATTRS(WRITE, write) +#undef SSG_DEBUGFS_DDIR_ATTRS + +static int ssg_starved_writes_show(void *data, struct seq_file *m) +{ + struct request_queue *q = data; + struct ssg_data *ssg = q->elevator->elevator_data; + + seq_printf(m, "%u\n", ssg->starved_writes); + return 0; +} + +static void *ssg_dispatch_start(struct seq_file *m, loff_t *pos) + __acquires(&ssg->lock) +{ + struct request_queue *q = m->private; + struct ssg_data *ssg = q->elevator->elevator_data; + + spin_lock(&ssg->lock); + return seq_list_start(&ssg->dispatch, *pos); +} + +static void *ssg_dispatch_next(struct seq_file *m, void *v, loff_t *pos) +{ + struct request_queue *q = m->private; + struct ssg_data *ssg = q->elevator->elevator_data; + + return seq_list_next(v, &ssg->dispatch, pos); +} + +static void ssg_dispatch_stop(struct seq_file *m, void *v) + __releases(&ssg->lock) +{ + struct request_queue *q = m->private; + struct ssg_data *ssg = q->elevator->elevator_data; + + spin_unlock(&ssg->lock); +} + +static const struct seq_operations ssg_dispatch_seq_ops = { + .start = ssg_dispatch_start, + .next = ssg_dispatch_next, + .stop = ssg_dispatch_stop, + .show = blk_mq_debugfs_rq_show, +}; + +#define SSG_IOSCHED_QUEUE_DDIR_ATTRS(name) \ + {#name "_fifo_list", 0400, .seq_ops = &ssg_##name##_fifo_seq_ops}, \ + {#name "_next_rq", 0400, ssg_##name##_next_rq_show} +static const struct blk_mq_debugfs_attr ssg_queue_debugfs_attrs[] = { + SSG_IOSCHED_QUEUE_DDIR_ATTRS(read), + SSG_IOSCHED_QUEUE_DDIR_ATTRS(write), + {"starved_writes", 0400, ssg_starved_writes_show}, + {"dispatch", 0400, .seq_ops = &ssg_dispatch_seq_ops}, + {}, +}; +#undef SSG_IOSCHED_QUEUE_DDIR_ATTRS +#endif + +static struct elevator_type ssg_iosched = { + .ops = { + .insert_requests = ssg_insert_requests, + .dispatch_request = ssg_dispatch_request, + .prepare_request = ssg_prepare_request, + .finish_request = ssg_finish_request, + .next_request = elv_rb_latter_request, + .former_request = elv_rb_former_request, + .bio_merge = ssg_bio_merge, + .request_merge = ssg_request_merge, + .requests_merged = ssg_merged_requests, + .request_merged = ssg_request_merged, + .has_work = ssg_has_work, + .limit_depth = ssg_limit_depth, + .depth_updated = ssg_depth_updated, + .init_hctx = ssg_init_hctx, + .init_sched = ssg_init_queue, + .exit_sched = ssg_exit_queue, + }, + +#ifdef CONFIG_BLK_DEBUG_FS + .queue_debugfs_attrs = ssg_queue_debugfs_attrs, +#endif + .elevator_attrs = ssg_attrs, + .elevator_name = "ssg", + .elevator_alias = "ssg", + .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE, + .elevator_owner = THIS_MODULE, +}; +MODULE_ALIAS("ssg"); + +static int __init ssg_iosched_init(void) +{ + return elv_register(&ssg_iosched); +} + +static void __exit ssg_iosched_exit(void) +{ + elv_unregister(&ssg_iosched); +} + +module_init(ssg_iosched_init); +module_exit(ssg_iosched_exit); + +MODULE_AUTHOR("Jisoo Oh"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SSG IO Scheduler"); From 54aec5f56b5e81ec6e5e57e4dad78a7fcdb35575 Mon Sep 17 00:00:00 2001 From: Juhyung Park Date: Fri, 17 Jun 2022 17:00:49 +0900 Subject: [PATCH 09/15] block: update ssg from S908BXXU2AVF1 Signed-off-by: Juhyung Park --- block/Kconfig.iosched | 8 ++ block/Makefile | 4 +- block/ssg-cgroup.c | 263 +++++++++++++++++++++++++++++++++++++++ block/ssg-cgroup.h | 65 ++++++++++ block/ssg-iosched.c | 281 ++++++++++++++++++++++++++++++++++-------- 5 files changed, 572 insertions(+), 49 deletions(-) create mode 100644 block/ssg-cgroup.c create mode 100644 block/ssg-cgroup.h diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index 4412b0f0e96a..dfcacc050cff 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -24,6 +24,14 @@ config MQ_IOSCHED_SSG ---help--- SamSung Generic IO scheduler. +config MQ_IOSCHED_SSG_CGROUP + tristate "Control Group for SamSung Generic I/O scheduler" + default n + depends on BLK_CGROUP + depends on MQ_IOSCHED_SSG + ---help--- + Control Group for SamSung Generic IO scheduler. + config IOSCHED_BFQ tristate "BFQ I/O scheduler" ---help--- diff --git a/block/Makefile b/block/Makefile index c6f678080d66..a49a77b13491 100644 --- a/block/Makefile +++ b/block/Makefile @@ -21,7 +21,9 @@ obj-$(CONFIG_BLK_CGROUP_IOLATENCY) += blk-iolatency.o obj-$(CONFIG_BLK_CGROUP_IOCOST) += blk-iocost.o obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o -obj-$(CONFIG_MQ_IOSCHED_SSG) += ssg-iosched.o +ssg-$(CONFIG_MQ_IOSCHED_SSG) := ssg-iosched.o +ssg-$(CONFIG_MQ_IOSCHED_SSG_CGROUP) += ssg-cgroup.o +obj-$(CONFIG_MQ_IOSCHED_SSG) += ssg.o bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o obj-$(CONFIG_IOSCHED_BFQ) += bfq.o diff --git a/block/ssg-cgroup.c b/block/ssg-cgroup.c new file mode 100644 index 000000000000..df42f532f196 --- /dev/null +++ b/block/ssg-cgroup.c @@ -0,0 +1,263 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Control Group of SamSung Generic I/O scheduler + * + * Copyright (C) 2021 Changheun Lee + */ + +#include +#include + +#include "blk-mq.h" +#include "blk-mq-tag.h" +#include "ssg-cgroup.h" + + + +static struct blkcg_policy ssg_blkcg_policy; + + + +#define CPD_TO_SSG_BLKCG(_cpd) \ + container_of_safe((_cpd), struct ssg_blkcg, cpd) +#define BLKCG_TO_SSG_BLKCG(_blkcg) \ + CPD_TO_SSG_BLKCG(blkcg_to_cpd((_blkcg), &ssg_blkcg_policy)) + +#define PD_TO_SSG_BLKG(_pd) \ + container_of_safe((_pd), struct ssg_blkg, pd) +#define BLKG_TO_SSG_BLKG(_blkg) \ + PD_TO_SSG_BLKG(blkg_to_pd((_blkg), &ssg_blkcg_policy)) + +#define CSS_TO_SSG_BLKCG(css) BLKCG_TO_SSG_BLKCG(css_to_blkcg(css)) + + + +static struct blkcg_policy_data *ssg_blkcg_cpd_alloc(gfp_t gfp) +{ + struct ssg_blkcg *ssg_blkcg; + + ssg_blkcg = kzalloc(sizeof(struct ssg_blkcg), gfp); + if (ZERO_OR_NULL_PTR(ssg_blkcg)) + return NULL; + + return &ssg_blkcg->cpd; +} + +static void ssg_blkcg_cpd_init(struct blkcg_policy_data *cpd) +{ + struct ssg_blkcg *ssg_blkcg = CPD_TO_SSG_BLKCG(cpd); + + if (IS_ERR_OR_NULL(ssg_blkcg)) + return; + + ssg_blkcg->max_available_ratio = 100; +} + +static void ssg_blkcg_cpd_free(struct blkcg_policy_data *cpd) +{ + struct ssg_blkcg *ssg_blkcg = CPD_TO_SSG_BLKCG(cpd); + + if (IS_ERR_OR_NULL(ssg_blkcg)) + return; + + kfree(ssg_blkcg); +} + +static void ssg_blkcg_set_shallow_depth(struct ssg_blkcg *ssg_blkcg, + struct ssg_blkg *ssg_blkg, struct blk_mq_tags *tags) +{ + unsigned int depth = tags->bitmap_tags.sb.depth; + unsigned int map_nr = tags->bitmap_tags.sb.map_nr; + + ssg_blkg->max_available_rqs = + depth * ssg_blkcg->max_available_ratio / 100U; + ssg_blkg->shallow_depth = + max_t(unsigned int, 1, ssg_blkg->max_available_rqs / map_nr); +} + +static struct blkg_policy_data *ssg_blkcg_pd_alloc(gfp_t gfp, + struct request_queue *q, struct blkcg *blkcg) +{ + struct ssg_blkg *ssg_blkg; + + ssg_blkg = kzalloc_node(sizeof(struct ssg_blkg), gfp, q->node); + if (ZERO_OR_NULL_PTR(ssg_blkg)) + return NULL; + + return &ssg_blkg->pd; +} + +static void ssg_blkcg_pd_init(struct blkg_policy_data *pd) +{ + struct ssg_blkg *ssg_blkg; + struct ssg_blkcg *ssg_blkcg; + + ssg_blkg = PD_TO_SSG_BLKG(pd); + if (IS_ERR_OR_NULL(ssg_blkg)) + return; + + ssg_blkcg = BLKCG_TO_SSG_BLKCG(pd->blkg->blkcg); + if (IS_ERR_OR_NULL(ssg_blkcg)) + return; + + atomic_set(&ssg_blkg->current_rqs, 0); + ssg_blkcg_set_shallow_depth(ssg_blkcg, ssg_blkg, + pd->blkg->q->queue_hw_ctx[0]->sched_tags); +} + +static void ssg_blkcg_pd_free(struct blkg_policy_data *pd) +{ + struct ssg_blkg *ssg_blkg = PD_TO_SSG_BLKG(pd); + + if (IS_ERR_OR_NULL(ssg_blkg)) + return; + + kfree(ssg_blkg); +} + +unsigned int ssg_blkcg_shallow_depth(struct request_queue *q) +{ + struct blkcg_gq *blkg; + struct ssg_blkg *ssg_blkg; + + rcu_read_lock(); + blkg = blkg_lookup(css_to_blkcg(blkcg_css()), q); + ssg_blkg = BLKG_TO_SSG_BLKG(blkg); + rcu_read_unlock(); + + if (IS_ERR_OR_NULL(ssg_blkg)) + return 0; + + if (atomic_read(&ssg_blkg->current_rqs) < ssg_blkg->max_available_rqs) + return 0; + + return ssg_blkg->shallow_depth; +} + +void ssg_blkcg_depth_updated(struct blk_mq_hw_ctx *hctx) +{ + struct request_queue *q = hctx->queue; + struct cgroup_subsys_state *pos_css; + struct blkcg_gq *blkg; + struct ssg_blkg *ssg_blkg; + struct ssg_blkcg *ssg_blkcg; + + rcu_read_lock(); + blkg_for_each_descendant_pre(blkg, pos_css, q->root_blkg) { + ssg_blkg = BLKG_TO_SSG_BLKG(blkg); + if (IS_ERR_OR_NULL(ssg_blkg)) + continue; + + ssg_blkcg = BLKCG_TO_SSG_BLKCG(blkg->blkcg); + if (IS_ERR_OR_NULL(ssg_blkcg)) + continue; + + atomic_set(&ssg_blkg->current_rqs, 0); + ssg_blkcg_set_shallow_depth(ssg_blkcg, ssg_blkg, hctx->sched_tags); + } + rcu_read_unlock(); +} + +void ssg_blkcg_inc_rq(struct blkcg_gq *blkg) +{ + struct ssg_blkg *ssg_blkg = BLKG_TO_SSG_BLKG(blkg); + + if (IS_ERR_OR_NULL(ssg_blkg)) + return; + + atomic_inc(&ssg_blkg->current_rqs); +} + +void ssg_blkcg_dec_rq(struct blkcg_gq *blkg) +{ + struct ssg_blkg *ssg_blkg = BLKG_TO_SSG_BLKG(blkg); + + if (IS_ERR_OR_NULL(ssg_blkg)) + return; + + atomic_dec(&ssg_blkg->current_rqs); +} + +static int ssg_blkcg_show_max_available_ratio(struct seq_file *sf, void *v) +{ + struct ssg_blkcg *ssg_blkcg = CSS_TO_SSG_BLKCG(seq_css(sf)); + + if (IS_ERR_OR_NULL(ssg_blkcg)) + return -EINVAL; + + seq_printf(sf, "%d\n", ssg_blkcg->max_available_ratio); + + return 0; +} + +static int ssg_blkcg_set_max_available_ratio(struct cgroup_subsys_state *css, + struct cftype *cftype, u64 ratio) +{ + struct blkcg *blkcg = css_to_blkcg(css); + struct ssg_blkcg *ssg_blkcg = CSS_TO_SSG_BLKCG(css); + struct blkcg_gq *blkg; + struct ssg_blkg *ssg_blkg; + + if (IS_ERR_OR_NULL(ssg_blkcg)) + return -EINVAL; + + if (ratio > 100) + return -EINVAL; + + spin_lock_irq(&blkcg->lock); + ssg_blkcg->max_available_ratio = ratio; + hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { + ssg_blkg = BLKG_TO_SSG_BLKG(blkg); + if (IS_ERR_OR_NULL(ssg_blkg)) + continue; + + ssg_blkcg_set_shallow_depth(ssg_blkcg, ssg_blkg, + blkg->q->queue_hw_ctx[0]->sched_tags); + } + spin_unlock_irq(&blkcg->lock); + + return 0; +} + +struct cftype ssg_blkg_files[] = { + { + .name = "ssg.max_available_ratio", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = ssg_blkcg_show_max_available_ratio, + .write_u64 = ssg_blkcg_set_max_available_ratio, + }, + + {} /* terminate */ +}; + +static struct blkcg_policy ssg_blkcg_policy = { + .legacy_cftypes = ssg_blkg_files, + + .cpd_alloc_fn = ssg_blkcg_cpd_alloc, + .cpd_init_fn = ssg_blkcg_cpd_init, + .cpd_free_fn = ssg_blkcg_cpd_free, + + .pd_alloc_fn = ssg_blkcg_pd_alloc, + .pd_init_fn = ssg_blkcg_pd_init, + .pd_free_fn = ssg_blkcg_pd_free, +}; + +int ssg_blkcg_activate(struct request_queue *q) +{ + return blkcg_activate_policy(q, &ssg_blkcg_policy); +} + +void ssg_blkcg_deactivate(struct request_queue *q) +{ + blkcg_deactivate_policy(q, &ssg_blkcg_policy); +} + +int ssg_blkcg_init(void) +{ + return blkcg_policy_register(&ssg_blkcg_policy); +} + +void ssg_blkcg_exit(void) +{ + blkcg_policy_unregister(&ssg_blkcg_policy); +} diff --git a/block/ssg-cgroup.h b/block/ssg-cgroup.h new file mode 100644 index 000000000000..c8fde426e31e --- /dev/null +++ b/block/ssg-cgroup.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef SSG_CGROUP_H +#define SSG_CGROUP_H +#include + +#if IS_ENABLED(CONFIG_MQ_IOSCHED_SSG_CGROUP) +struct ssg_blkcg { + struct blkcg_policy_data cpd; /* must be the first member */ + + int max_available_ratio; +}; + +struct ssg_blkg { + struct blkg_policy_data pd; /* must be the first member */ + + atomic_t current_rqs; + int max_available_rqs; + unsigned int shallow_depth; /* shallow depth for each tag map to get sched tag */ +}; + +extern int ssg_blkcg_init(void); +extern void ssg_blkcg_exit(void); +extern int ssg_blkcg_activate(struct request_queue *q); +extern void ssg_blkcg_deactivate(struct request_queue *q); +extern unsigned int ssg_blkcg_shallow_depth(struct request_queue *q); +extern void ssg_blkcg_depth_updated(struct blk_mq_hw_ctx *hctx); +extern void ssg_blkcg_inc_rq(struct blkcg_gq *blkg); +extern void ssg_blkcg_dec_rq(struct blkcg_gq *blkg); +#else +int ssg_blkcg_init(void) +{ + return 0; +} +void ssg_blkcg_exit(void) +{ +} + +int ssg_blkcg_activate(struct request_queue *q) +{ + return 0; +} + +void ssg_blkcg_deactivate(struct request_queue *q) +{ +} + +unsigned int ssg_blkcg_shallow_depth(struct request_queue *q) +{ + return 0; +} + +void ssg_blkcg_depth_updated(struct blk_mq_hw_ctx *hctx) +{ +} + +void ssg_blkcg_inc_rq(struct blkcg_gq *blkg) +{ +} + +void ssg_blkcg_dec_rq(struct blkcg_gq *blkg) +{ +} +#endif + +#endif diff --git a/block/ssg-iosched.c b/block/ssg-iosched.c index 8759c1680d21..fd8695fbceee 100644 --- a/block/ssg-iosched.c +++ b/block/ssg-iosched.c @@ -5,6 +5,7 @@ * * Copyright (C) 2021 Jisoo Oh * Copyright (C) 2021 Manjong Lee + * Copyright (C) 2021 Changheun Lee */ #include #include @@ -24,19 +25,41 @@ #include "blk-mq-debugfs.h" #include "blk-mq-tag.h" #include "blk-mq-sched.h" +#include "ssg-cgroup.h" + +#if IS_ENABLED(CONFIG_BLK_SEC_STATS) +extern void blk_sec_stats_account_init(struct request_queue *q); +extern void blk_sec_stats_account_exit(struct elevator_queue *eq); +extern void blk_sec_stats_account_io_done( + struct request *rq, unsigned int data_size, + pid_t tgid, const char *tg_name, u64 tg_start_time); +#else +#define blk_sec_stats_account_init(q) do {} while(0) +#define blk_sec_stats_account_exit(eq) do {} while(0) +#define blk_sec_stats_account_io_done(rq, size, tgid, name, time) do {} while(0) +#endif -extern void blk_sec_account_process_IO(struct bio *bio); +#define MAX_ASYNC_WRITE_RQS 8 static const int read_expire = HZ / 2; /* max time before a read is submitted. */ static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ static const int max_write_starvation = 2; /* max times reads can starve a write */ -static const int async_write_percent = 25; /* max tags percentige for async write */ -static const unsigned int max_async_write_tags = 8; /* max tags for async write. */ +static const int congestion_threshold = 90; /* percentage of congestion threshold */ +static const int max_tgroup_io_ratio = 50; /* maximum service ratio for each thread group */ +static const int max_async_write_ratio = 25; /* maximum service ratio for async write */ + +struct ssg_request_info { + pid_t tgid; + char tg_name[TASK_COMM_LEN]; + u64 tg_start_time; + + struct blkcg_gq *blkg; + + unsigned int data_size; +}; struct ssg_data { - /* - * run time data - */ + struct request_queue *queue; /* * requests are present on both sort_list and fifo_list @@ -48,7 +71,7 @@ struct ssg_data { * next in sort order. read, write or both are NULL */ struct request *next_rq[2]; - unsigned int starved_writes; /* times reads have starved writes */ + unsigned int starved_writes; /* times reads have starved writes */ /* * settings that change how the i/o scheduler behaves @@ -56,8 +79,22 @@ struct ssg_data { int fifo_expire[2]; int max_write_starvation; int front_merges; - int async_write_depth; /* async write depth for each tag map */ - atomic_t async_write_cnt; + + /* + * to control request allocation + */ + atomic_t allocated_rqs; + atomic_t async_write_rqs; + int congestion_threshold_rqs; + int max_tgroup_rqs; + int max_async_write_rqs; + unsigned int tgroup_shallow_depth; /* thread group shallow depth for each tag map */ + unsigned int async_write_shallow_depth; /* async write shallow depth for each tag map */ + + /* + * I/O context information for each request + */ + struct ssg_request_info *rq_info; spinlock_t lock; spinlock_t zone_lock; @@ -99,6 +136,41 @@ static inline void ssg_del_rq_rb(struct ssg_data *ssg, struct request *rq) elv_rb_del(ssg_rb_root(ssg, rq), rq); } +static inline struct ssg_request_info *ssg_rq_info(struct ssg_data *ssg, + struct request *rq) +{ + if (unlikely(!ssg->rq_info)) + return NULL; + + if (unlikely(!rq)) + return NULL; + + if (unlikely(rq->internal_tag < 0)) + return NULL; + + if (unlikely(rq->internal_tag >= rq->q->nr_requests)) + return NULL; + + return &ssg->rq_info[rq->internal_tag]; +} + +static inline void set_thread_group_info(struct ssg_request_info *rqi) +{ + struct task_struct *gleader = current->group_leader; + + rqi->tgid = task_tgid_nr(gleader); + strncpy(rqi->tg_name, gleader->comm, TASK_COMM_LEN - 1); + rqi->tg_name[TASK_COMM_LEN - 1] = '\0'; + rqi->tg_start_time = gleader->start_time; +} + +static inline void clear_thread_group_info(struct ssg_request_info *rqi) +{ + rqi->tgid = 0; + rqi->tg_name[0] = '\0'; + rqi->tg_start_time = 0; +} + /* * remove rq from rbtree and fifo. */ @@ -359,75 +431,132 @@ static struct request *ssg_dispatch_request(struct blk_mq_hw_ctx *hctx) { struct ssg_data *ssg = hctx->queue->elevator->elevator_data; struct request *rq; + struct ssg_request_info *rqi; spin_lock(&ssg->lock); rq = __ssg_dispatch_request(ssg); spin_unlock(&ssg->lock); + rqi = ssg_rq_info(ssg, rq); + if (likely(rqi)) + rqi->data_size = blk_rq_bytes(rq); + return rq; } -static unsigned int ssg_sched_tags_map_nr(struct request_queue *q) +static void ssg_completed_request(struct request *rq, u64 now) { - return q->queue_hw_ctx[0]->sched_tags->bitmap_tags.sb.map_nr; + struct ssg_data *ssg = rq->q->elevator->elevator_data; + struct ssg_request_info *rqi; + + rqi = ssg_rq_info(ssg, rq); + if (likely(rqi)) + blk_sec_stats_account_io_done(rq, rqi->data_size, + rqi->tgid, rqi->tg_name, rqi->tg_start_time); } -static unsigned int ssg_sched_tags_depth(struct request_queue *q) +static void ssg_set_shallow_depth(struct ssg_data *ssg, struct blk_mq_tags *tags) { - return q->queue_hw_ctx[0]->sched_tags->bitmap_tags.sb.depth; + unsigned int depth = tags->bitmap_tags.sb.depth; + unsigned int map_nr = tags->bitmap_tags.sb.map_nr; + + ssg->max_async_write_rqs = depth * max_async_write_ratio / 100U; + ssg->max_async_write_rqs = + min_t(int, ssg->max_async_write_rqs, MAX_ASYNC_WRITE_RQS); + ssg->async_write_shallow_depth = + max_t(unsigned int, ssg->max_async_write_rqs / map_nr, 1); + + ssg->max_tgroup_rqs = depth * max_tgroup_io_ratio / 100U; + ssg->tgroup_shallow_depth = + max_t(unsigned int, ssg->max_tgroup_rqs / map_nr, 1); } -static void ssg_set_shallow_depth(struct request_queue *q) +static void ssg_depth_updated(struct blk_mq_hw_ctx *hctx) { + struct request_queue *q = hctx->queue; struct ssg_data *ssg = q->elevator->elevator_data; - unsigned int map_nr; - unsigned int depth; - unsigned int async_write_depth; + struct blk_mq_tags *tags = hctx->sched_tags; + unsigned int depth = tags->bitmap_tags.sb.depth; - depth = ssg_sched_tags_depth(q); - map_nr = ssg_sched_tags_map_nr(q); + ssg->congestion_threshold_rqs = depth * congestion_threshold / 100U; - async_write_depth = depth * async_write_percent / 100U; - async_write_depth = min(async_write_depth, max_async_write_tags); + kfree(ssg->rq_info); + ssg->rq_info = kmalloc(depth * sizeof(struct ssg_request_info), + GFP_KERNEL | __GFP_ZERO); + if (ZERO_OR_NULL_PTR(ssg->rq_info)) + ssg->rq_info = NULL; - ssg->async_write_depth = - (async_write_depth / map_nr) ? (async_write_depth / map_nr) : 1; + ssg_set_shallow_depth(ssg, tags); + sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, + ssg->async_write_shallow_depth); + + ssg_blkcg_depth_updated(hctx); } -static void ssg_depth_updated(struct blk_mq_hw_ctx *hctx) +static inline bool ssg_op_is_async_write(unsigned int op) { - struct request_queue *q = hctx->queue; - struct ssg_data *ssg = q->elevator->elevator_data; + return (op & REQ_OP_MASK) == REQ_OP_WRITE && !op_is_sync(op); +} + +static unsigned int ssg_async_write_shallow_depth(unsigned int op, + struct blk_mq_alloc_data *data) +{ + struct ssg_data *ssg = data->q->elevator->elevator_data; - ssg_set_shallow_depth(q); - sbitmap_queue_min_shallow_depth(&hctx->sched_tags->bitmap_tags, - ssg->async_write_depth); + if (!ssg_op_is_async_write(op)) + return 0; + + if (atomic_read(&ssg->async_write_rqs) < ssg->max_async_write_rqs) + return 0; + + return ssg->async_write_shallow_depth; } -static inline bool ssg_op_is_async_write(unsigned int op) +static unsigned int ssg_tgroup_shallow_depth(struct blk_mq_alloc_data *data) { - return (op & REQ_OP_MASK) == REQ_OP_WRITE && !op_is_sync(op); + struct ssg_data *ssg = data->q->elevator->elevator_data; + pid_t tgid = task_tgid_nr(current->group_leader); + int nr_requests = data->q->nr_requests; + int tgroup_rqs = 0; + int i; + + if (unlikely(!ssg->rq_info)) + return 0; + + for (i = 0; i < nr_requests; i++) + if (tgid == ssg->rq_info[i].tgid) + tgroup_rqs++; + + if (tgroup_rqs < ssg->max_tgroup_rqs) + return 0; + + return ssg->tgroup_shallow_depth; } static void ssg_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) { struct ssg_data *ssg = data->q->elevator->elevator_data; + unsigned int shallow_depth = ssg_blkcg_shallow_depth(data->q); - if (!ssg_op_is_async_write(op)) - return; + shallow_depth = min_not_zero(shallow_depth, + ssg_async_write_shallow_depth(op, data)); + + if (atomic_read(&ssg->allocated_rqs) > ssg->congestion_threshold_rqs) + shallow_depth = min_not_zero(shallow_depth, + ssg_tgroup_shallow_depth(data)); - if (atomic_read(&ssg->async_write_cnt) > max_async_write_tags) - data->shallow_depth = ssg->async_write_depth; + data->shallow_depth = shallow_depth; } static int ssg_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) { - struct request_queue *q = hctx->queue; - struct ssg_data *ssg = q->elevator->elevator_data; + struct ssg_data *ssg = hctx->queue->elevator->elevator_data; + struct blk_mq_tags *tags = hctx->sched_tags; + + ssg_set_shallow_depth(ssg, tags); + sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, + ssg->async_write_shallow_depth); - ssg_set_shallow_depth(q); - sbitmap_queue_min_shallow_depth(&hctx->sched_tags->bitmap_tags, - ssg->async_write_depth); return 0; } @@ -435,10 +564,15 @@ static void ssg_exit_queue(struct elevator_queue *e) { struct ssg_data *ssg = e->elevator_data; + ssg_blkcg_deactivate(ssg->queue); + BUG_ON(!list_empty(&ssg->fifo_list[READ])); BUG_ON(!list_empty(&ssg->fifo_list[WRITE])); + kfree(ssg->rq_info); kfree(ssg); + + blk_sec_stats_account_exit(e); } /* @@ -460,6 +594,7 @@ static int ssg_init_queue(struct request_queue *q, struct elevator_type *e) } eq->elevator_data = ssg; + ssg->queue = q; INIT_LIST_HEAD(&ssg->fifo_list[READ]); INIT_LIST_HEAD(&ssg->fifo_list[WRITE]); ssg->sort_list[READ] = RB_ROOT; @@ -468,12 +603,25 @@ static int ssg_init_queue(struct request_queue *q, struct elevator_type *e) ssg->fifo_expire[WRITE] = write_expire; ssg->max_write_starvation = max_write_starvation; ssg->front_merges = 1; - atomic_set(&ssg->async_write_cnt, 0); + + atomic_set(&ssg->allocated_rqs, 0); + atomic_set(&ssg->async_write_rqs, 0); + ssg->congestion_threshold_rqs = + q->nr_requests * congestion_threshold / 100U; + ssg->rq_info = kmalloc(q->nr_requests * sizeof(struct ssg_request_info), + GFP_KERNEL | __GFP_ZERO); + if (ZERO_OR_NULL_PTR(ssg->rq_info)) + ssg->rq_info = NULL; + spin_lock_init(&ssg->lock); spin_lock_init(&ssg->zone_lock); INIT_LIST_HEAD(&ssg->dispatch); + ssg_blkcg_activate(q); + q->elevator = eq; + + blk_sec_stats_account_init(q); return 0; } @@ -584,11 +732,22 @@ static void ssg_insert_requests(struct blk_mq_hw_ctx *hctx, static void ssg_prepare_request(struct request *rq, struct bio *bio) { struct ssg_data *ssg = rq->q->elevator->elevator_data; + struct ssg_request_info *rqi; - if (ssg_op_is_async_write(rq->cmd_flags)) - atomic_inc(&ssg->async_write_cnt); + atomic_inc(&ssg->allocated_rqs); + + rqi = ssg_rq_info(ssg, rq); + if (likely(rqi)) { + set_thread_group_info(rqi); + + rcu_read_lock(); + rqi->blkg = blkg_lookup(css_to_blkcg(blkcg_css()), rq->q); + ssg_blkcg_inc_rq(rqi->blkg); + rcu_read_unlock(); + } - blk_sec_account_process_IO(bio); + if (ssg_op_is_async_write(rq->cmd_flags)) + atomic_inc(&ssg->async_write_rqs); } /* @@ -609,6 +768,7 @@ static void ssg_finish_request(struct request *rq) { struct request_queue *q = rq->q; struct ssg_data *ssg = q->elevator->elevator_data; + struct ssg_request_info *rqi; if (blk_queue_is_zoned(q)) { unsigned long flags; @@ -623,8 +783,17 @@ static void ssg_finish_request(struct request *rq) if (unlikely(!(rq->rq_flags & RQF_ELVPRIV))) return; + atomic_dec(&ssg->allocated_rqs); + + rqi = ssg_rq_info(ssg, rq); + if (likely(rqi)) { + clear_thread_group_info(rqi); + ssg_blkcg_dec_rq(rqi->blkg); + rqi->blkg = NULL; + } + if (ssg_op_is_async_write(rq->cmd_flags)) - atomic_dec(&ssg->async_write_cnt); + atomic_dec(&ssg->async_write_rqs); } static bool ssg_has_work(struct blk_mq_hw_ctx *hctx) @@ -664,7 +833,8 @@ SHOW_FUNCTION(ssg_read_expire_show, ssg->fifo_expire[READ], 1); SHOW_FUNCTION(ssg_write_expire_show, ssg->fifo_expire[WRITE], 1); SHOW_FUNCTION(ssg_max_write_starvation_show, ssg->max_write_starvation, 0); SHOW_FUNCTION(ssg_front_merges_show, ssg->front_merges, 0); -SHOW_FUNCTION(ssg_async_write_depth_show, ssg->async_write_depth, 0); +SHOW_FUNCTION(ssg_tgroup_shallow_depth_show, ssg->tgroup_shallow_depth, 0); +SHOW_FUNCTION(ssg_async_write_shallow_depth_show, ssg->async_write_shallow_depth, 0); #undef SHOW_FUNCTION #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ @@ -700,7 +870,8 @@ static struct elv_fs_entry ssg_attrs[] = { SSG_ATTR(write_expire), SSG_ATTR(max_write_starvation), SSG_ATTR(front_merges), - SSG_ATTR_RO(async_write_depth), + SSG_ATTR_RO(tgroup_shallow_depth), + SSG_ATTR_RO(async_write_shallow_depth), __ATTR_NULL }; @@ -817,6 +988,7 @@ static struct elevator_type ssg_iosched = { .ops = { .insert_requests = ssg_insert_requests, .dispatch_request = ssg_dispatch_request, + .completed_request = ssg_completed_request, .prepare_request = ssg_prepare_request, .finish_request = ssg_finish_request, .next_request = elv_rb_latter_request, @@ -846,11 +1018,24 @@ MODULE_ALIAS("ssg"); static int __init ssg_iosched_init(void) { - return elv_register(&ssg_iosched); + int ret; + + ret = elv_register(&ssg_iosched); + if (ret) + return ret; + + ret = ssg_blkcg_init(); + if (ret) { + elv_unregister(&ssg_iosched); + return ret; + } + + return ret; } static void __exit ssg_iosched_exit(void) { + ssg_blkcg_exit(); elv_unregister(&ssg_iosched); } From 13ccf20ac4a98b6407a5caadef3602097d93af92 Mon Sep 17 00:00:00 2001 From: Kazuki Hashimoto Date: Fri, 17 Jun 2022 19:36:41 +0900 Subject: [PATCH 10/15] defconfig: Use SSG Signed-off-by: Kazuki Hashimoto Signed-off-by: saikiran2001 --- arch/arm64/configs/vendor/lahaina-qgki_defconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/configs/vendor/lahaina-qgki_defconfig b/arch/arm64/configs/vendor/lahaina-qgki_defconfig index 854ab1c87ef5..62ea58c3f874 100644 --- a/arch/arm64/configs/vendor/lahaina-qgki_defconfig +++ b/arch/arm64/configs/vendor/lahaina-qgki_defconfig @@ -146,8 +146,8 @@ CONFIG_MODVERSIONS=y # CONFIG_VMAP_STACK is not set CONFIG_BLK_INLINE_ENCRYPTION=y CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y -CONFIG_IOSCHED_BFQ=y -CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_MQ_IOSCHED_SSG=y +CONFIG_MQ_IOSCHED_SSG_CGROUP=y CONFIG_GKI_HACKS_TO_FIX=y CONFIG_GKI_OPT_FEATURES=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set From 8bdf6195287e87abcac09c5ccecbb24ed0001424 Mon Sep 17 00:00:00 2001 From: Juhyung Park Date: Fri, 17 Jun 2022 17:45:57 +0900 Subject: [PATCH 11/15] block: use ssg by default Signed-off-by: Juhyung Park --- block/elevator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/elevator.c b/block/elevator.c index 3ba826230c57..7c2c3e627355 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -632,7 +632,7 @@ static struct elevator_type *elevator_get_default(struct request_queue *q) if (q->nr_hw_queues != 1) return NULL; - return elevator_get(q, "mq-deadline", false); + return elevator_get(q, "ssg", false); } /* From 3b632eda2ff0dba8c56839157bf3c93e2c408991 Mon Sep 17 00:00:00 2001 From: freak07 Date: Thu, 7 Oct 2021 12:24:42 +0200 Subject: [PATCH 12/15] drivers: scsi: ufs-qcom: set auto hibern8 back to 1ms Previous generations of qualcomm kernels used a value of 1ms for the auto-hibernate idle timer. QCOM increased that value initially to 5ms on 5.4 kernels followed by 10ms to improve performance. 5ms: https://git.codelinaro.org/clo/la/kernel/msm-5.4/-/commit/608ed7596857111c8538b6c7c1be939aa000c006 10ms: https://git.codelinaro.org/clo/la/kernel/msm-5.4/-/commit/ef3f24dd1cf7252517ab5ba9e8a7c74718370d2e According to those commits there was only a negligible power impact. However shortly after the increase to 10ms the same was reverted to 5ms due to power impact: https://git.codelinaro.org/clo/la/kernel/msm-5.4/-/commit/b16f5839ec0135aed413e86da9f69da25198901d Since all my 5.4 devices performed worse than their 4.19 predecessors in idle/suspend I found this one of the causes. Revert this value back to 1ms as it was on previous QCOM generations to improve power consumption during idle/suspend. Change-Id: Ibcbeec83a007ec934081438c7dc11e7541173643 --- drivers/scsi/ufs/ufs-qcom.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index be567f650dd4..8c341d927330 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -1856,8 +1856,8 @@ static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) ufs_spin_lock_irqsave(hba->host->host_lock, flags); /* Set the rpm auto suspend delay to 3s */ hba->host->hostt->rpm_autosuspend_delay = UFS_QCOM_AUTO_SUSPEND_DELAY; - /* Set the default auto-hiberate idle timer value to 5ms */ - hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 5) | + /* Set the default auto-hibernate idle timer value to 1ms */ + hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 1) | FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3); /* Set the clock gating delay to performance mode */ hba->clk_gating.delay_ms = UFS_QCOM_CLK_GATING_DELAY_MS_PERF; From 5adf8f8126ed16bc01098fced5c18764f7d72b5b Mon Sep 17 00:00:00 2001 From: Lecopzer Chen Date: Wed, 19 Jan 2022 15:10:32 +0800 Subject: [PATCH 13/15] ANDROID: kbuild: fix ld_flags missing for LTO commit dc5723b02e523b ("kbuild: add support for Clang LTO") from upstream had included scripts/Makefile.lib, but in android12-5.4 it seems missing and makes ld_flags always empty. Bug: 215292602 Fixes: 2e39b40dd2180 ("ANDROID: kbuild: add support for Clang LTO") Signed-off-by: Lecopzer Chen Change-Id: I4fcbf84e75df0dcd44b175d722fdf6869101e191 Signed-off-by: Divyanshu-Modi --- scripts/Makefile.modpost | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost index 7d530530384d..210bf8a86030 100644 --- a/scripts/Makefile.modpost +++ b/scripts/Makefile.modpost @@ -43,6 +43,9 @@ __modpost: include include/config/auto.conf include scripts/Kbuild.include +# for ld_flags +include scripts/Makefile.lib + kernelsymfile := $(objtree)/Module.symvers modulesymfile := $(firstword $(KBUILD_EXTMOD))/Module.symvers From e3e4137e3311c7285f285f7425116a8ac42ef9f8 Mon Sep 17 00:00:00 2001 From: Dmitry <42121541+dkpost3@users.noreply.github.com> Date: Sat, 26 Nov 2022 05:47:26 +0300 Subject: [PATCH 14/15] Add files via upload auto build flashable kernel --- build.sh | 80 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) create mode 100644 build.sh diff --git a/build.sh b/build.sh new file mode 100644 index 000000000000..4ff864a80fd3 --- /dev/null +++ b/build.sh @@ -0,0 +1,80 @@ +#!/bin/bash +# +# Compile script for QuicksilveR kernel +# Copyright (C) 2020-2021 Adithya R. +# (edits for CrystalCore kernel @dkpost3) + +SECONDS=0 # builtin bash timer +TC_DIR="$HOME/clang-r416183b1" +AK3_DIR="$HOME/AnyKernel3" +DEFCONFIG="vendor/lahaina-qgki_defconfig vendor/xiaomi_QGKI.config vendor/lisa_QGKI.config" + +ZIPNAME="CrystalCore-lisa-$(date '+%Y%m%d-%H%M').zip" + +git clone https://github.com/aseelps/prebuilts_clang_host_linux-x86_r416183b1.git $TC_DIR + +if test -z "$(git rev-parse --show-cdup 2>/dev/null)" && + head=$(git rev-parse --verify HEAD 2>/dev/null); then + ZIPNAME="${ZIPNAME::-4}-$(echo $head | cut -c1-8).zip" +fi + +MAKE_PARAMS="O=out ARCH=arm64 CC=clang CLANG_TRIPLE=aarch64-linux-gnu- LD=ld.lld LLVM=1 LLVM_IAS=1 \ + CROSS_COMPILE=$TC_DIR/bin/llvm-" + +export PATH="$TC_DIR/bin:$PATH" + +if [[ $1 = "-r" || $1 = "--regen" ]]; then + make $MAKE_PARAMS $DEFCONFIG savedefconfig + cp out/defconfig arch/arm64/configs/$DEFCONFIG + echo -e "\nSuccessfully regenerated defconfig at $DEFCONFIG" + exit +fi + +if [[ $1 = "-c" || $1 = "--clean" ]]; then + rm -rf out + echo "Cleaned output folder" +fi + +mkdir -p out +make $MAKE_PARAMS $DEFCONFIG + +echo -e "\nStarting compilation...\n" +make -j$(nproc --all) $MAKE_PARAMS || exit $? +make -j$(nproc --all) $MAKE_PARAMS INSTALL_MOD_PATH=modules INSTALL_MOD_STRIP=1 modules_install + +kernel="out/arch/arm64/boot/Image" +dtb="out/arch/arm64/boot/dts/vendor/qcom/yupik.dtb" +dtbo="out/arch/arm64/boot/dts/vendor/qcom/lisa-sm7325-overlay.dtbo" + +if [ -f "$kernel" ] && [ -f "$dtb" ] && [ -f "$dtbo" ]; then + echo -e "\nKernel compiled succesfully! Zipping up...\n" + if [ -d "$AK3_DIR" ]; then + cp -r $AK3_DIR AnyKernel3 + git -C AnyKernel3 checkout lisa &> /dev/null + elif ! git clone -q https://github.com/ghostrider-reborn/AnyKernel3 -b lisa; then + echo -e "\nAnyKernel3 repo not found locally and couldn't clone from GitHub! Aborting..." + exit 1 + fi + cp $kernel AnyKernel3 + cp $dtb AnyKernel3/dtb + wget -P scripts/dtc/libfdt/ https://raw.githubusercontent.com/Anonym3310/mkdtimg/master/mkdtboimg.py + python2 scripts/dtc/libfdt/mkdtboimg.py create AnyKernel3/dtbo.img --page_size=4096 $dtbo + cp $(find out/modules/lib/modules/5.4* -name '*.ko') AnyKernel3/modules/vendor/lib/modules/ + cp out/modules/lib/modules/5.4*/modules.{alias,dep,softdep} AnyKernel3/modules/vendor/lib/modules + cp out/modules/lib/modules/5.4*/modules.order AnyKernel3/modules/vendor/lib/modules/modules.load + sed -i 's/\(kernel\/[^: ]*\/\)\([^: ]*\.ko\)/\/vendor\/lib\/modules\/\2/g' AnyKernel3/modules/vendor/lib/modules/modules.dep + sed -i 's/.*\///g' AnyKernel3/modules/vendor/lib/modules/modules.load + rm -rf out/arch/arm64/boot out/modules + cd AnyKernel3 + zip -r9 "../$ZIPNAME" * -x .git README.md *placeholder + cd .. + rm -rf AnyKernel3 + echo -e "\nCompleted in $((SECONDS / 60)) minute(s) and $((SECONDS % 60)) second(s) !" + echo "Zip: $ZIPNAME" + [ -x "$(command -v gdrive)" ] && gdrive upload --share "$ZIPNAME" +else + echo -e "\nCompilation failed!" + exit 1 +fi + +rm -rf $TC_DIR scripts/dtc/libfdt/mkdtboimg.py From d1d8c691ba7f3e69333c07fd2b0827772eb0fc24 Mon Sep 17 00:00:00 2001 From: Dmitry <42121541+dkpost3@users.noreply.github.com> Date: Sat, 26 Nov 2022 07:01:25 +0300 Subject: [PATCH 15/15] Update build.sh auto build flashable kernel (5.4.219-CrystalCore ) --- build.sh | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/build.sh b/build.sh index 4ff864a80fd3..b1ec39aa16d2 100644 --- a/build.sh +++ b/build.sh @@ -3,15 +3,15 @@ # Compile script for QuicksilveR kernel # Copyright (C) 2020-2021 Adithya R. # (edits for CrystalCore kernel @dkpost3) - + SECONDS=0 # builtin bash timer -TC_DIR="$HOME/clang-r416183b1" +TC_DIR="$HOME/clang/clang-r450784d" AK3_DIR="$HOME/AnyKernel3" DEFCONFIG="vendor/lahaina-qgki_defconfig vendor/xiaomi_QGKI.config vendor/lisa_QGKI.config" - -ZIPNAME="CrystalCore-lisa-$(date '+%Y%m%d-%H%M').zip" -git clone https://github.com/aseelps/prebuilts_clang_host_linux-x86_r416183b1.git $TC_DIR +git clone https://gitlab.com/lynnnnzx/clang-r450784e.git $TC_DIR + +ZIPNAME="CrystalCore-lisa-$(date '+%Y%m%d-%H%M').zip" if test -z "$(git rev-parse --show-cdup 2>/dev/null)" && head=$(git rev-parse --verify HEAD 2>/dev/null); then @@ -58,7 +58,7 @@ if [ -f "$kernel" ] && [ -f "$dtb" ] && [ -f "$dtbo" ]; then cp $kernel AnyKernel3 cp $dtb AnyKernel3/dtb wget -P scripts/dtc/libfdt/ https://raw.githubusercontent.com/Anonym3310/mkdtimg/master/mkdtboimg.py - python2 scripts/dtc/libfdt/mkdtboimg.py create AnyKernel3/dtbo.img --page_size=4096 $dtbo + python3 scripts/dtc/libfdt/mkdtboimg.py create AnyKernel3/dtbo.img --page_size=4096 $dtbo cp $(find out/modules/lib/modules/5.4* -name '*.ko') AnyKernel3/modules/vendor/lib/modules/ cp out/modules/lib/modules/5.4*/modules.{alias,dep,softdep} AnyKernel3/modules/vendor/lib/modules cp out/modules/lib/modules/5.4*/modules.order AnyKernel3/modules/vendor/lib/modules/modules.load @@ -77,4 +77,5 @@ else exit 1 fi -rm -rf $TC_DIR scripts/dtc/libfdt/mkdtboimg.py +rm -rf $TC_DIR +rm -rf scripts/dtc/libfdt/mkdtboimg.py