Openwrt/package/boot/uboot-mediatek/patches/002-nand-add-spi-nand-driver.patch
John Crispin b9cf9ebdb7 mediatek: add uboot
Signed-off-by: John Crispin <john@phrozen.org>
2020-05-04 16:31:19 +02:00

8660 lines
218 KiB
Diff

From de8b6cf615be20b25d0f3c817866de2c0d46a704 Mon Sep 17 00:00:00 2001
From: Sam Shih <sam.shih@mediatek.com>
Date: Mon, 20 Apr 2020 17:10:05 +0800
Subject: [PATCH 1/3] nand: add spi nand driver
Add spi nand driver support for mt7622 based on nfi controller
Signed-off-by: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
---
drivers/mtd/Kconfig | 7 +
drivers/mtd/Makefile | 4 +
drivers/mtd/nand/raw/nand.c | 2 +
drivers/mtd/nandx/NOTICE | 52 +
drivers/mtd/nandx/Nandx.config | 17 +
drivers/mtd/nandx/Nandx.mk | 91 ++
drivers/mtd/nandx/README | 31 +
drivers/mtd/nandx/core/Nandx.mk | 38 +
drivers/mtd/nandx/core/core_io.c | 735 +++++++++
drivers/mtd/nandx/core/core_io.h | 39 +
drivers/mtd/nandx/core/nand/device_spi.c | 200 +++
drivers/mtd/nandx/core/nand/device_spi.h | 132 ++
drivers/mtd/nandx/core/nand/nand_spi.c | 526 +++++++
drivers/mtd/nandx/core/nand/nand_spi.h | 35 +
drivers/mtd/nandx/core/nand_base.c | 304 ++++
drivers/mtd/nandx/core/nand_base.h | 71 +
drivers/mtd/nandx/core/nand_chip.c | 272 ++++
drivers/mtd/nandx/core/nand_chip.h | 103 ++
drivers/mtd/nandx/core/nand_device.c | 285 ++++
drivers/mtd/nandx/core/nand_device.h | 608 ++++++++
drivers/mtd/nandx/core/nfi.h | 51 +
drivers/mtd/nandx/core/nfi/nfi_base.c | 1357 +++++++++++++++++
drivers/mtd/nandx/core/nfi/nfi_base.h | 95 ++
drivers/mtd/nandx/core/nfi/nfi_regs.h | 114 ++
drivers/mtd/nandx/core/nfi/nfi_spi.c | 689 +++++++++
drivers/mtd/nandx/core/nfi/nfi_spi.h | 44 +
drivers/mtd/nandx/core/nfi/nfi_spi_regs.h | 64 +
drivers/mtd/nandx/core/nfi/nfiecc.c | 510 +++++++
drivers/mtd/nandx/core/nfi/nfiecc.h | 90 ++
drivers/mtd/nandx/core/nfi/nfiecc_regs.h | 51 +
drivers/mtd/nandx/driver/Nandx.mk | 18 +
drivers/mtd/nandx/driver/bbt/bbt.c | 408 +++++
drivers/mtd/nandx/driver/uboot/driver.c | 574 +++++++
drivers/mtd/nandx/include/Nandx.mk | 16 +
drivers/mtd/nandx/include/internal/bbt.h | 62 +
.../mtd/nandx/include/internal/nandx_core.h | 250 +++
.../mtd/nandx/include/internal/nandx_errno.h | 40 +
.../mtd/nandx/include/internal/nandx_util.h | 221 +++
drivers/mtd/nandx/include/uboot/nandx_os.h | 78 +
include/configs/mt7622.h | 25 +
40 files changed, 8309 insertions(+)
create mode 100644 drivers/mtd/nandx/NOTICE
create mode 100644 drivers/mtd/nandx/Nandx.config
create mode 100644 drivers/mtd/nandx/Nandx.mk
create mode 100644 drivers/mtd/nandx/README
create mode 100644 drivers/mtd/nandx/core/Nandx.mk
create mode 100644 drivers/mtd/nandx/core/core_io.c
create mode 100644 drivers/mtd/nandx/core/core_io.h
create mode 100644 drivers/mtd/nandx/core/nand/device_spi.c
create mode 100644 drivers/mtd/nandx/core/nand/device_spi.h
create mode 100644 drivers/mtd/nandx/core/nand/nand_spi.c
create mode 100644 drivers/mtd/nandx/core/nand/nand_spi.h
create mode 100644 drivers/mtd/nandx/core/nand_base.c
create mode 100644 drivers/mtd/nandx/core/nand_base.h
create mode 100644 drivers/mtd/nandx/core/nand_chip.c
create mode 100644 drivers/mtd/nandx/core/nand_chip.h
create mode 100644 drivers/mtd/nandx/core/nand_device.c
create mode 100644 drivers/mtd/nandx/core/nand_device.h
create mode 100644 drivers/mtd/nandx/core/nfi.h
create mode 100644 drivers/mtd/nandx/core/nfi/nfi_base.c
create mode 100644 drivers/mtd/nandx/core/nfi/nfi_base.h
create mode 100644 drivers/mtd/nandx/core/nfi/nfi_regs.h
create mode 100644 drivers/mtd/nandx/core/nfi/nfi_spi.c
create mode 100644 drivers/mtd/nandx/core/nfi/nfi_spi.h
create mode 100644 drivers/mtd/nandx/core/nfi/nfi_spi_regs.h
create mode 100644 drivers/mtd/nandx/core/nfi/nfiecc.c
create mode 100644 drivers/mtd/nandx/core/nfi/nfiecc.h
create mode 100644 drivers/mtd/nandx/core/nfi/nfiecc_regs.h
create mode 100644 drivers/mtd/nandx/driver/Nandx.mk
create mode 100644 drivers/mtd/nandx/driver/bbt/bbt.c
create mode 100644 drivers/mtd/nandx/driver/uboot/driver.c
create mode 100644 drivers/mtd/nandx/include/Nandx.mk
create mode 100644 drivers/mtd/nandx/include/internal/bbt.h
create mode 100644 drivers/mtd/nandx/include/internal/nandx_core.h
create mode 100644 drivers/mtd/nandx/include/internal/nandx_errno.h
create mode 100644 drivers/mtd/nandx/include/internal/nandx_util.h
create mode 100644 drivers/mtd/nandx/include/uboot/nandx_os.h
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 5e7571cf3d..34a59b44b9 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -101,6 +101,13 @@ config HBMC_AM654
This is the driver for HyperBus controller on TI's AM65x and
other SoCs
+config MTK_SPI_NAND
+ tristate "Mediatek SPI Nand"
+ depends on DM_MTD
+ help
+ This option will support SPI Nand device via Mediatek
+ NFI controller.
+
source "drivers/mtd/nand/Kconfig"
source "drivers/mtd/spi/Kconfig"
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 318788c5e2..1df1031b23 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -41,3 +41,7 @@ obj-$(CONFIG_$(SPL_TPL_)SPI_FLASH_SUPPORT) += spi/
obj-$(CONFIG_SPL_UBI) += ubispl/
endif
+
+ifeq ($(CONFIG_MTK_SPI_NAND), y)
+include $(srctree)/drivers/mtd/nandx/Nandx.mk
+endif
diff --git a/drivers/mtd/nand/raw/nand.c b/drivers/mtd/nand/raw/nand.c
index 026419e4e6..4be0c7d8f3 100644
--- a/drivers/mtd/nand/raw/nand.c
+++ b/drivers/mtd/nand/raw/nand.c
@@ -91,8 +91,10 @@ static void nand_init_chip(int i)
if (board_nand_init(nand))
return;
+#ifndef CONFIG_MTK_SPI_NAND
if (nand_scan(mtd, maxchips))
return;
+#endif
nand_register(i, mtd);
}
diff --git a/drivers/mtd/nandx/NOTICE b/drivers/mtd/nandx/NOTICE
new file mode 100644
index 0000000000..1a06ca3867
--- /dev/null
+++ b/drivers/mtd/nandx/NOTICE
@@ -0,0 +1,52 @@
+
+/*
+ * Nandx - Mediatek Common Nand Driver
+ * Copyright (C) 2017 MediaTek Inc.
+ *
+ * Nandx is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ * a) This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * Alternatively,
+ *
+ * b) Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+####################################################################################################
\ No newline at end of file
diff --git a/drivers/mtd/nandx/Nandx.config b/drivers/mtd/nandx/Nandx.config
new file mode 100644
index 0000000000..35705ee28d
--- /dev/null
+++ b/drivers/mtd/nandx/Nandx.config
@@ -0,0 +1,17 @@
+NANDX_SIMULATOR_SUPPORT := n
+NANDX_CTP_SUPPORT := n
+NANDX_DA_SUPPORT := n
+NANDX_PRELOADER_SUPPORT := n
+NANDX_LK_SUPPORT := n
+NANDX_KERNEL_SUPPORT := n
+NANDX_BROM_SUPPORT := n
+NANDX_UBOOT_SUPPORT := y
+NANDX_BBT_SUPPORT := y
+
+NANDX_NAND_SPI := y
+NANDX_NAND_SLC := n
+NANDX_NAND_MLC := n
+NANDX_NAND_TLC := n
+NANDX_NFI_BASE := y
+NANDX_NFI_ECC := y
+NANDX_NFI_SPI := y
diff --git a/drivers/mtd/nandx/Nandx.mk b/drivers/mtd/nandx/Nandx.mk
new file mode 100644
index 0000000000..f5a6f2a628
--- /dev/null
+++ b/drivers/mtd/nandx/Nandx.mk
@@ -0,0 +1,91 @@
+#
+# Copyright (C) 2017 MediaTek Inc.
+# Licensed under either
+# BSD Licence, (see NOTICE for more details)
+# GNU General Public License, version 2.0, (see NOTICE for more details)
+#
+
+nandx_dir := $(shell dirname $(lastword $(MAKEFILE_LIST)))
+include $(nandx_dir)/Nandx.config
+
+ifeq ($(NANDX_SIMULATOR_SUPPORT), y)
+sim-obj :=
+sim-inc :=
+nandx-obj := sim-obj
+nandx-prefix := .
+nandx-postfix := %.o
+sim-inc += -I$(nandx-prefix)/include/internal
+sim-inc += -I$(nandx-prefix)/include/simulator
+endif
+
+ifeq ($(NANDX_CTP_SUPPORT), y)
+nandx-obj := C_SRC_FILES
+nandx-prefix := $(nandx_dir)
+nandx-postfix := %.c
+INC_DIRS += $(nandx_dir)/include/internal
+INC_DIRS += $(nandx_dir)/include/ctp
+endif
+
+ifeq ($(NANDX_DA_SUPPORT), y)
+nandx-obj := obj-y
+nandx-prefix := $(nandx_dir)
+nandx-postfix := %.o
+INCLUDE_PATH += $(TOPDIR)/platform/$(CODE_BASE)/dev/nand/nandx/include/internal
+INCLUDE_PATH += $(TOPDIR)/platform/$(CODE_BASE)/dev/nand/nandx/include/da
+endif
+
+ifeq ($(NANDX_PRELOADER_SUPPORT), y)
+nandx-obj := MOD_SRC
+nandx-prefix := $(nandx_dir)
+nandx-postfix := %.c
+C_OPTION += -I$(MTK_PATH_PLATFORM)/src/drivers/nandx/include/internal
+C_OPTION += -I$(MTK_PATH_PLATFORM)/src/drivers/nandx/include/preloader
+endif
+
+ifeq ($(NANDX_LK_SUPPORT), y)
+nandx-obj := MODULE_SRCS
+nandx-prefix := $(nandx_dir)
+nandx-postfix := %.c
+GLOBAL_INCLUDES += $(nandx_dir)/include/internal
+GLOBAL_INCLUDES += $(nandx_dir)/include/lk
+endif
+
+ifeq ($(NANDX_KERNEL_SUPPORT), y)
+nandx-obj := obj-y
+nandx-prefix := nandx
+nandx-postfix := %.o
+ccflags-y += -I$(nandx_dir)/include/internal
+ccflags-y += -I$(nandx_dir)/include/kernel
+endif
+
+ifeq ($(NANDX_UBOOT_SUPPORT), y)
+nandx-obj := obj-y
+nandx-prefix := nandx
+nandx-postfix := %.o
+ccflags-y += -I$(nandx_dir)/include/internal
+ccflags-y += -I$(nandx_dir)/include/uboot
+endif
+
+nandx-y :=
+include $(nandx_dir)/core/Nandx.mk
+nandx-target := $(nandx-prefix)/core/$(nandx-postfix)
+$(nandx-obj) += $(patsubst %.c, $(nandx-target), $(nandx-y))
+
+
+nandx-y :=
+include $(nandx_dir)/driver/Nandx.mk
+nandx-target := $(nandx-prefix)/driver/$(nandx-postfix)
+$(nandx-obj) += $(patsubst %.c, $(nandx-target), $(nandx-y))
+
+ifeq ($(NANDX_SIMULATOR_SUPPORT), y)
+cc := gcc
+CFLAGS += $(sim-inc)
+
+.PHONY:nandx
+nandx: $(sim-obj)
+ $(cc) $(sim-obj) -o nandx
+
+.PHONY:clean
+clean:
+ rm -rf $(sim-obj) nandx
+endif
diff --git a/drivers/mtd/nandx/README b/drivers/mtd/nandx/README
new file mode 100644
index 0000000000..0feaeaeb88
--- /dev/null
+++ b/drivers/mtd/nandx/README
@@ -0,0 +1,31 @@
+
+ NAND2.0
+ ===============================
+
+ NAND2.0 is a common nand driver which designed for accessing
+different type of NANDs(SLC, SPI-NAND, MLC, TLC) on various OS. This
+driver can work on mostly SoCs of Mediatek.
+
+ Although there already has a common nand driver, it doesn't cover
+SPI-NAND, and not match our IC-Verification's reqirement. We need
+a driver that can be exten or cut easily.
+
+ This driver is base on NANDX & SLC. We try to refactor structures,
+and make them inheritable. We also refactor some operations' flow
+principally for adding SPI-NAND support.
+
+ This driver's architecture is like:
+
+ Driver @LK/Uboot/DA... |IC verify/other purposes
+ ----------------------------------------------------------------
+ partition | BBM |
+ -------------------------------------- | extend_core
+ nandx_core/core_io |
+ ----------------------------------------------------------------
+ nand_chip/nand_base |
+ -------------------------------------- | extend_nfi
+ nand_device | nfi/nfi_base |
+
+ Any block of above graph can be extended at your will, if you
+want add new feature into this code, please make sure that your code
+would follow the framework, and we will be appreciated about it.
diff --git a/drivers/mtd/nandx/core/Nandx.mk b/drivers/mtd/nandx/core/Nandx.mk
new file mode 100644
index 0000000000..7a5661c044
--- /dev/null
+++ b/drivers/mtd/nandx/core/Nandx.mk
@@ -0,0 +1,38 @@
+#
+# Copyright (C) 2017 MediaTek Inc.
+# Licensed under either
+# BSD Licence, (see NOTICE for more details)
+# GNU General Public License, version 2.0, (see NOTICE for more details)
+#
+
+nandx-y += nand_device.c
+nandx-y += nand_base.c
+nandx-y += nand_chip.c
+nandx-y += core_io.c
+
+nandx-header-y += nand_device.h
+nandx-header-y += nand_base.h
+nandx-header-y += nand_chip.h
+nandx-header-y += core_io.h
+nandx-header-y += nfi.h
+
+nandx-$(NANDX_NAND_SPI) += nand/device_spi.c
+nandx-$(NANDX_NAND_SPI) += nand/nand_spi.c
+nandx-$(NANDX_NAND_SLC) += nand/device_slc.c
+nandx-$(NANDX_NAND_SLC) += nand/nand_slc.c
+
+nandx-header-$(NANDX_NAND_SPI) += nand/device_spi.h
+nandx-header-$(NANDX_NAND_SPI) += nand/nand_spi.h
+nandx-header-$(NANDX_NAND_SLC) += nand/device_slc.h
+nandx-header-$(NANDX_NAND_SLC) += nand/nand_slc.h
+
+nandx-$(NANDX_NFI_BASE) += nfi/nfi_base.c
+nandx-$(NANDX_NFI_ECC) += nfi/nfiecc.c
+nandx-$(NANDX_NFI_SPI) += nfi/nfi_spi.c
+
+nandx-header-$(NANDX_NFI_BASE) += nfi/nfi_base.h
+nandx-header-$(NANDX_NFI_BASE) += nfi/nfi_regs.h
+nandx-header-$(NANDX_NFI_ECC) += nfi/nfiecc.h
+nandx-header-$(NANDX_NFI_ECC) += nfi/nfiecc_regs.h
+nandx-header-$(NANDX_NFI_SPI) += nfi/nfi_spi.h
+nandx-header-$(NANDX_NFI_SPI) += nfi/nfi_spi_regs.h
diff --git a/drivers/mtd/nandx/core/core_io.c b/drivers/mtd/nandx/core/core_io.c
new file mode 100644
index 0000000000..716eeed38d
--- /dev/null
+++ b/drivers/mtd/nandx/core/core_io.c
@@ -0,0 +1,735 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+/*NOTE: switch cache/multi*/
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "nand_chip.h"
+#include "core_io.h"
+
+static struct nandx_desc *g_nandx;
+
+static inline bool is_sector_align(u64 val)
+{
+ return reminder(val, g_nandx->chip->sector_size) ? false : true;
+}
+
+static inline bool is_page_align(u64 val)
+{
+ return reminder(val, g_nandx->chip->page_size) ? false : true;
+}
+
+static inline bool is_block_align(u64 val)
+{
+ return reminder(val, g_nandx->chip->block_size) ? false : true;
+}
+
+static inline u32 page_sectors(void)
+{
+ return div_down(g_nandx->chip->page_size, g_nandx->chip->sector_size);
+}
+
+static inline u32 sector_oob(void)
+{
+ return div_down(g_nandx->chip->oob_size, page_sectors());
+}
+
+static inline u32 sector_padded_size(void)
+{
+ return g_nandx->chip->sector_size + g_nandx->chip->sector_spare_size;
+}
+
+static inline u32 page_padded_size(void)
+{
+ return page_sectors() * sector_padded_size();
+}
+
+static inline u32 offset_to_padded_col(u64 offset)
+{
+ struct nandx_desc *nandx = g_nandx;
+ u32 col, sectors;
+
+ col = reminder(offset, nandx->chip->page_size);
+ sectors = div_down(col, nandx->chip->sector_size);
+
+ return col + sectors * nandx->chip->sector_spare_size;
+}
+
+static inline u32 offset_to_row(u64 offset)
+{
+ return div_down(offset, g_nandx->chip->page_size);
+}
+
+static inline u32 offset_to_col(u64 offset)
+{
+ return reminder(offset, g_nandx->chip->page_size);
+}
+
+static inline u32 oob_upper_size(void)
+{
+ return g_nandx->ecc_en ? g_nandx->chip->oob_size :
+ g_nandx->chip->sector_spare_size * page_sectors();
+}
+
+static inline bool is_upper_oob_align(u64 val)
+{
+ return reminder(val, oob_upper_size()) ? false : true;
+}
+
+#define prepare_op(_op, _row, _col, _len, _data, _oob) \
+ do { \
+ (_op).row = (_row); \
+ (_op).col = (_col); \
+ (_op).len = (_len); \
+ (_op).data = (_data); \
+ (_op).oob = (_oob); \
+ } while (0)
+
+static int operation_multi(enum nandx_op_mode mode, u8 *data, u8 *oob,
+ u64 offset, size_t len)
+{
+ struct nandx_desc *nandx = g_nandx;
+ u32 row = offset_to_row(offset);
+ u32 col = offset_to_padded_col(offset);
+
+ if (nandx->mode == NANDX_IDLE) {
+ nandx->mode = mode;
+ nandx->ops_current = 0;
+ } else if (nandx->mode != mode) {
+ pr_info("forbid mixed operations.\n");
+ return -EOPNOTSUPP;
+ }
+
+ prepare_op(nandx->ops[nandx->ops_current], row, col, len, data, oob);
+ nandx->ops_current++;
+
+ if (nandx->ops_current == nandx->ops_multi_len)
+ return nandx_sync();
+
+ return nandx->ops_multi_len - nandx->ops_current;
+}
+
+static int operation_sequent(enum nandx_op_mode mode, u8 *data, u8 *oob,
+ u64 offset, size_t len)
+{
+ struct nandx_desc *nandx = g_nandx;
+ struct nand_chip *chip = nandx->chip;
+ u32 row = offset_to_row(offset);
+ func_chip_ops chip_ops;
+ u8 *ref_data = data, *ref_oob = oob;
+ int align, ops, row_step;
+ int i, rem;
+
+ align = data ? chip->page_size : oob_upper_size();
+ ops = data ? div_down(len, align) : div_down(len, oob_upper_size());
+ row_step = 1;
+
+ switch (mode) {
+ case NANDX_ERASE:
+ chip_ops = chip->erase_block;
+ align = chip->block_size;
+ ops = div_down(len, align);
+ row_step = chip->block_pages;
+ break;
+
+ case NANDX_READ:
+ chip_ops = chip->read_page;
+ break;
+
+ case NANDX_WRITE:
+ chip_ops = chip->write_page;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (!data) {
+ ref_data = nandx->head_buf;
+ memset(ref_data, 0xff, chip->page_size);
+ }
+
+ if (!oob) {
+ ref_oob = nandx->head_buf + chip->page_size;
+ memset(ref_oob, 0xff, oob_upper_size());
+ }
+
+ for (i = 0; i < ops; i++) {
+ prepare_op(nandx->ops[nandx->ops_current],
+ row + i * row_step, 0, align, ref_data, ref_oob);
+ nandx->ops_current++;
+ /* if data or oob is null, nandx->head_buf or
+ * nandx->head_buf + chip->page_size should not been used
+ * so, here it is safe to use the buf.
+ */
+ ref_data = data ? ref_data + chip->page_size : nandx->head_buf;
+ ref_oob = oob ? ref_oob + oob_upper_size() :
+ nandx->head_buf + chip->page_size;
+ }
+
+ if (nandx->mode == NANDX_WRITE) {
+ rem = reminder(nandx->ops_current, nandx->min_write_pages);
+ if (rem)
+ return nandx->min_write_pages - rem;
+ }
+
+ nandx->ops_current = 0;
+ return chip_ops(chip, nandx->ops, ops);
+}
+
+static int read_pages(u8 *data, u8 *oob, u64 offset, size_t len)
+{
+ struct nandx_desc *nandx = g_nandx;
+ struct nand_chip *chip = nandx->chip;
+ struct nandx_split64 split = {0};
+ u8 *ref_data = data, *ref_oob;
+ u32 row, col;
+ int ret = 0, i, ops;
+ u32 head_offset = 0;
+ u64 val;
+
+ if (!data)
+ return operation_sequent(NANDX_READ, NULL, oob, offset, len);
+
+ ref_oob = oob ? oob : nandx->head_buf + chip->page_size;
+
+ nandx_split(&split, offset, len, val, chip->page_size);
+
+ if (split.head_len) {
+ row = offset_to_row(split.head);
+ col = offset_to_col(split.head);
+ prepare_op(nandx->ops[nandx->ops_current], row, 0,
+ chip->page_size,
+ nandx->head_buf, ref_oob);
+ nandx->ops_current++;
+
+ head_offset = col;
+
+ ref_data += split.head_len;
+ ref_oob = oob ? ref_oob + oob_upper_size() :
+ nandx->head_buf + chip->page_size;
+ }
+
+ if (split.body_len) {
+ ops = div_down(split.body_len, chip->page_size);
+ row = offset_to_row(split.body);
+ for (i = 0; i < ops; i++) {
+ prepare_op(nandx->ops[nandx->ops_current],
+ row + i, 0, chip->page_size,
+ ref_data, ref_oob);
+ nandx->ops_current++;
+ ref_data += chip->page_size;
+ ref_oob = oob ? ref_oob + oob_upper_size() :
+ nandx->head_buf + chip->page_size;
+ }
+ }
+
+ if (split.tail_len) {
+ row = offset_to_row(split.tail);
+ prepare_op(nandx->ops[nandx->ops_current], row, 0,
+ chip->page_size, nandx->tail_buf, ref_oob);
+ nandx->ops_current++;
+ }
+
+ ret = chip->read_page(chip, nandx->ops, nandx->ops_current);
+
+ if (split.head_len)
+ memcpy(data, nandx->head_buf + head_offset, split.head_len);
+ if (split.tail_len)
+ memcpy(ref_data, nandx->tail_buf, split.tail_len);
+
+ nandx->ops_current = 0;
+ return ret;
+}
+
+int nandx_read(u8 *data, u8 *oob, u64 offset, size_t len)
+{
+ struct nandx_desc *nandx = g_nandx;
+
+ if (!len || len > nandx->info.total_size)
+ return -EINVAL;
+ if (div_up(len, nandx->chip->page_size) > nandx->ops_len)
+ return -EINVAL;
+ if (!data && !oob)
+ return -EINVAL;
+ /**
+ * as design, oob not support partial read
+ * and, the length of oob buf should be oob size aligned
+ */
+ if (!data && !is_upper_oob_align(len))
+ return -EINVAL;
+
+ if (g_nandx->multi_en) {
+ /* as design, there only 2 buf for partial read,
+ * if partial read allowed for multi read,
+ * there are not enough buf
+ */
+ if (!is_sector_align(offset))
+ return -EINVAL;
+ if (data && !is_sector_align(len))
+ return -EINVAL;
+ return operation_multi(NANDX_READ, data, oob, offset, len);
+ }
+
+ nandx->ops_current = 0;
+ nandx->mode = NANDX_IDLE;
+ return read_pages(data, oob, offset, len);
+}
+
+static int write_pages(u8 *data, u8 *oob, u64 offset, size_t len)
+{
+ struct nandx_desc *nandx = g_nandx;
+ struct nand_chip *chip = nandx->chip;
+ struct nandx_split64 split = {0};
+ int ret, rem, i, ops;
+ u32 row, col;
+ u8 *ref_oob = oob;
+ u64 val;
+
+ nandx->mode = NANDX_WRITE;
+
+ if (!data)
+ return operation_sequent(NANDX_WRITE, NULL, oob, offset, len);
+
+ if (!oob) {
+ ref_oob = nandx->head_buf + chip->page_size;
+ memset(ref_oob, 0xff, oob_upper_size());
+ }
+
+ nandx_split(&split, offset, len, val, chip->page_size);
+
+ /*NOTE: slc can support sector write, here copy too many data.*/
+ if (split.head_len) {
+ row = offset_to_row(split.head);
+ col = offset_to_col(split.head);
+ memset(nandx->head_buf, 0xff, page_padded_size());
+ memcpy(nandx->head_buf + col, data, split.head_len);
+ prepare_op(nandx->ops[nandx->ops_current], row, 0,
+ chip->page_size, nandx->head_buf, ref_oob);
+ nandx->ops_current++;
+
+ data += split.head_len;
+ ref_oob = oob ? ref_oob + oob_upper_size() :
+ nandx->head_buf + chip->page_size;
+ }
+
+ if (split.body_len) {
+ row = offset_to_row(split.body);
+ ops = div_down(split.body_len, chip->page_size);
+ for (i = 0; i < ops; i++) {
+ prepare_op(nandx->ops[nandx->ops_current],
+ row + i, 0, chip->page_size, data, ref_oob);
+ nandx->ops_current++;
+ data += chip->page_size;
+ ref_oob = oob ? ref_oob + oob_upper_size() :
+ nandx->head_buf + chip->page_size;
+ }
+ }
+
+ if (split.tail_len) {
+ row = offset_to_row(split.tail);
+ memset(nandx->tail_buf, 0xff, page_padded_size());
+ memcpy(nandx->tail_buf, data, split.tail_len);
+ prepare_op(nandx->ops[nandx->ops_current], row, 0,
+ chip->page_size, nandx->tail_buf, ref_oob);
+ nandx->ops_current++;
+ }
+
+ rem = reminder(nandx->ops_current, nandx->min_write_pages);
+ if (rem)
+ return nandx->min_write_pages - rem;
+
+ ret = chip->write_page(chip, nandx->ops, nandx->ops_current);
+
+ nandx->ops_current = 0;
+ nandx->mode = NANDX_IDLE;
+ return ret;
+}
+
+int nandx_write(u8 *data, u8 *oob, u64 offset, size_t len)
+{
+ struct nandx_desc *nandx = g_nandx;
+
+ if (!len || len > nandx->info.total_size)
+ return -EINVAL;
+ if (div_up(len, nandx->chip->page_size) > nandx->ops_len)
+ return -EINVAL;
+ if (!data && !oob)
+ return -EINVAL;
+ if (!data && !is_upper_oob_align(len))
+ return -EINVAL;
+
+ if (nandx->multi_en) {
+ if (!is_page_align(offset))
+ return -EINVAL;
+ if (data && !is_page_align(len))
+ return -EINVAL;
+
+ return operation_multi(NANDX_WRITE, data, oob, offset, len);
+ }
+
+ return write_pages(data, oob, offset, len);
+}
+
+int nandx_erase(u64 offset, size_t len)
+{
+ struct nandx_desc *nandx = g_nandx;
+
+ if (!len || len > nandx->info.total_size)
+ return -EINVAL;
+ if (div_down(len, nandx->chip->block_size) > nandx->ops_len)
+ return -EINVAL;
+ if (!is_block_align(offset) || !is_block_align(len))
+ return -EINVAL;
+
+ if (g_nandx->multi_en)
+ return operation_multi(NANDX_ERASE, NULL, NULL, offset, len);
+
+ nandx->ops_current = 0;
+ nandx->mode = NANDX_IDLE;
+ return operation_sequent(NANDX_ERASE, NULL, NULL, offset, len);
+}
+
+int nandx_sync(void)
+{
+ struct nandx_desc *nandx = g_nandx;
+ struct nand_chip *chip = nandx->chip;
+ func_chip_ops chip_ops;
+ int ret, i, rem;
+
+ if (!nandx->ops_current)
+ return 0;
+
+ rem = reminder(nandx->ops_current, nandx->ops_multi_len);
+ if (nandx->multi_en && rem) {
+ ret = -EIO;
+ goto error;
+ }
+
+ switch (nandx->mode) {
+ case NANDX_IDLE:
+ return 0;
+ case NANDX_ERASE:
+ chip_ops = chip->erase_block;
+ break;
+ case NANDX_READ:
+ chip_ops = chip->read_page;
+ break;
+ case NANDX_WRITE:
+ chip_ops = chip->write_page;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rem = reminder(nandx->ops_current, nandx->min_write_pages);
+ if (!nandx->multi_en && nandx->mode == NANDX_WRITE && rem) {
+ /* in one process of program, only allow 2 pages to do partial
+ * write, here we supposed 1st buf would be used, and 2nd
+ * buf should be not used.
+ */
+ memset(nandx->tail_buf, 0xff,
+ chip->page_size + oob_upper_size());
+ for (i = 0; i < rem; i++) {
+ prepare_op(nandx->ops[nandx->ops_current],
+ nandx->ops[nandx->ops_current - 1].row + 1,
+ 0, chip->page_size, nandx->tail_buf,
+ nandx->tail_buf + chip->page_size);
+ nandx->ops_current++;
+ }
+ }
+
+ ret = chip_ops(nandx->chip, nandx->ops, nandx->ops_current);
+
+error:
+ nandx->mode = NANDX_IDLE;
+ nandx->ops_current = 0;
+
+ return ret;
+}
+
+int nandx_ioctl(int cmd, void *arg)
+{
+ struct nandx_desc *nandx = g_nandx;
+ struct nand_chip *chip = nandx->chip;
+ int ret = 0;
+
+ switch (cmd) {
+ case CORE_CTRL_NAND_INFO:
+ *(struct nandx_info *)arg = nandx->info;
+ break;
+
+ case CHIP_CTRL_OPS_MULTI:
+ ret = chip->chip_ctrl(chip, cmd, arg);
+ if (!ret)
+ nandx->multi_en = *(bool *)arg;
+ break;
+
+ case NFI_CTRL_ECC:
+ ret = chip->chip_ctrl(chip, cmd, arg);
+ if (!ret)
+ nandx->ecc_en = *(bool *)arg;
+ break;
+
+ default:
+ ret = chip->chip_ctrl(chip, cmd, arg);
+ break;
+ }
+
+ return ret;
+}
+
+bool nandx_is_bad_block(u64 offset)
+{
+ struct nandx_desc *nandx = g_nandx;
+
+ prepare_op(nandx->ops[0], offset_to_row(offset), 0,
+ nandx->chip->page_size, nandx->head_buf,
+ nandx->head_buf + nandx->chip->page_size);
+
+ return nandx->chip->is_bad_block(nandx->chip, nandx->ops, 1);
+}
+
+int nandx_suspend(void)
+{
+ return g_nandx->chip->suspend(g_nandx->chip);
+}
+
+int nandx_resume(void)
+{
+ return g_nandx->chip->resume(g_nandx->chip);
+}
+
+int nandx_init(struct nfi_resource *res)
+{
+ struct nand_chip *chip;
+ struct nandx_desc *nandx;
+ int ret = 0;
+
+ if (!res)
+ return -EINVAL;
+
+ chip = nand_chip_init(res);
+ if (!chip) {
+ pr_info("nand chip init fail.\n");
+ return -EFAULT;
+ }
+
+ nandx = (struct nandx_desc *)mem_alloc(1, sizeof(struct nandx_desc));
+ if (!nandx)
+ return -ENOMEM;
+
+ g_nandx = nandx;
+
+ nandx->chip = chip;
+ nandx->min_write_pages = chip->min_program_pages;
+ nandx->ops_multi_len = nandx->min_write_pages * chip->plane_num;
+ nandx->ops_len = chip->block_pages * chip->plane_num;
+ nandx->ops = mem_alloc(1, sizeof(struct nand_ops) * nandx->ops_len);
+ if (!nandx->ops) {
+ ret = -ENOMEM;
+ goto ops_error;
+ }
+
+#if NANDX_BULK_IO_USE_DRAM
+ nandx->head_buf = NANDX_CORE_BUF_ADDR;
+#else
+ nandx->head_buf = mem_alloc(2, page_padded_size());
+#endif
+ if (!nandx->head_buf) {
+ ret = -ENOMEM;
+ goto buf_error;
+ }
+ nandx->tail_buf = nandx->head_buf + page_padded_size();
+ memset(nandx->head_buf, 0xff, 2 * page_padded_size());
+ nandx->multi_en = false;
+ nandx->ecc_en = false;
+ nandx->ops_current = 0;
+ nandx->mode = NANDX_IDLE;
+
+ nandx->info.max_io_count = nandx->ops_len;
+ nandx->info.min_write_pages = nandx->min_write_pages;
+ nandx->info.plane_num = chip->plane_num;
+ nandx->info.oob_size = chip->oob_size;
+ nandx->info.page_parity_size = chip->sector_spare_size * page_sectors();
+ nandx->info.page_size = chip->page_size;
+ nandx->info.block_size = chip->block_size;
+ nandx->info.total_size = chip->block_size * chip->block_num;
+ nandx->info.fdm_ecc_size = chip->fdm_ecc_size;
+ nandx->info.fdm_reg_size = chip->fdm_reg_size;
+ nandx->info.ecc_strength = chip->ecc_strength;
+ nandx->info.sector_size = chip->sector_size;
+
+ return 0;
+
+buf_error:
+#if !NANDX_BULK_IO_USE_DRAM
+ mem_free(nandx->head_buf);
+#endif
+ops_error:
+ mem_free(nandx);
+
+ return ret;
+}
+
+void nandx_exit(void)
+{
+ nand_chip_exit(g_nandx->chip);
+#if !NANDX_BULK_IO_USE_DRAM
+ mem_free(g_nandx->head_buf);
+#endif
+ mem_free(g_nandx->ops);
+ mem_free(g_nandx);
+}
+
+#ifdef NANDX_UNIT_TEST
+static void dump_buf(u8 *buf, u32 len)
+{
+ u32 i;
+
+ pr_info("dump buf@0x%X start", (u32)buf);
+ for (i = 0; i < len; i++) {
+ if (!reminder(i, 16))
+ pr_info("\n0x");
+ pr_info("%x ", buf[i]);
+ }
+ pr_info("\ndump buf done.\n");
+}
+
+int nandx_unit_test(u64 offset, size_t len)
+{
+ u8 *src_buf, *dst_buf;
+ u32 i, j;
+ int ret;
+
+ if (!len || len > g_nandx->chip->block_size)
+ return -EINVAL;
+
+#if NANDX_BULK_IO_USE_DRAM
+ src_buf = NANDX_UT_SRC_ADDR;
+ dst_buf = NANDX_UT_DST_ADDR;
+
+#else
+ src_buf = mem_alloc(1, g_nandx->chip->page_size);
+ if (!src_buf)
+ return -ENOMEM;
+ dst_buf = mem_alloc(1, g_nandx->chip->page_size);
+ if (!dst_buf) {
+ mem_free(src_buf);
+ return -ENOMEM;
+ }
+#endif
+
+ pr_info("%s: src_buf address 0x%x, dst_buf address 0x%x\n",
+ __func__, (int)((unsigned long)src_buf),
+ (int)((unsigned long)dst_buf));
+
+ memset(dst_buf, 0, g_nandx->chip->page_size);
+ pr_info("read page 0 data...!\n");
+ ret = nandx_read(dst_buf, NULL, 0, g_nandx->chip->page_size);
+ if (ret < 0) {
+ pr_info("read fail with ret %d\n", ret);
+ } else {
+ pr_info("read page success!\n");
+ }
+
+ for (i = 0; i < g_nandx->chip->page_size; i++) {
+ src_buf[i] = 0x5a;
+ }
+
+ ret = nandx_erase(offset, g_nandx->chip->block_size);
+ if (ret < 0) {
+ pr_info("erase fail with ret %d\n", ret);
+ goto error;
+ }
+
+ for (j = 0; j < g_nandx->chip->block_pages; j++) {
+ memset(dst_buf, 0, g_nandx->chip->page_size);
+ pr_info("check data after erase...!\n");
+ ret = nandx_read(dst_buf, NULL, offset, g_nandx->chip->page_size);
+ if (ret < 0) {
+ pr_info("read fail with ret %d\n", ret);
+ goto error;
+ }
+
+ for (i = 0; i < g_nandx->chip->page_size; i++) {
+ if (dst_buf[i] != 0xff) {
+ pr_info("read after erase, check fail @%d\n", i);
+ pr_info("all data should be 0xff\n");
+ ret = -ENANDERASE;
+ dump_buf(dst_buf, 128);
+ //goto error;
+ break;
+ }
+ }
+
+ pr_info("write data...!\n");
+ ret = nandx_write(src_buf, NULL, offset, g_nandx->chip->page_size);
+ if (ret < 0) {
+ pr_info("write fail with ret %d\n", ret);
+ goto error;
+ }
+
+ memset(dst_buf, 0, g_nandx->chip->page_size);
+ pr_info("read data...!\n");
+ ret = nandx_read(dst_buf, NULL, offset, g_nandx->chip->page_size);
+ if (ret < 0) {
+ pr_info("read fail with ret %d\n", ret);
+ goto error;
+ }
+
+ for (i = 0; i < g_nandx->chip->page_size; i++) {
+ if (dst_buf[i] != src_buf[i]) {
+ pr_info("read after write, check fail @%d\n", i);
+ pr_info("dst_buf should be same as src_buf\n");
+ ret = -EIO;
+ dump_buf(src_buf + i, 128);
+ dump_buf(dst_buf + i, 128);
+ break;
+ }
+ }
+
+ pr_err("%s %d %s@%d\n", __func__, __LINE__, ret?"Failed":"OK", j);
+ if (ret)
+ break;
+
+ offset += g_nandx->chip->page_size;
+ }
+
+ ret = nandx_erase(offset, g_nandx->chip->block_size);
+ if (ret < 0) {
+ pr_info("erase fail with ret %d\n", ret);
+ goto error;
+ }
+
+ memset(dst_buf, 0, g_nandx->chip->page_size);
+ ret = nandx_read(dst_buf, NULL, offset, g_nandx->chip->page_size);
+ if (ret < 0) {
+ pr_info("read fail with ret %d\n", ret);
+ goto error;
+ }
+
+ for (i = 0; i < g_nandx->chip->page_size; i++) {
+ if (dst_buf[i] != 0xff) {
+ pr_info("read after erase, check fail\n");
+ pr_info("all data should be 0xff\n");
+ ret = -ENANDERASE;
+ dump_buf(dst_buf, 128);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+#if !NANDX_BULK_IO_USE_DRAM
+ mem_free(src_buf);
+ mem_free(dst_buf);
+#endif
+ return ret;
+}
+#endif
diff --git a/drivers/mtd/nandx/core/core_io.h b/drivers/mtd/nandx/core/core_io.h
new file mode 100644
index 0000000000..edcb60908a
--- /dev/null
+++ b/drivers/mtd/nandx/core/core_io.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __CORE_IO_H__
+#define __CORE_IO_H__
+
+typedef int (*func_chip_ops)(struct nand_chip *, struct nand_ops *,
+ int);
+
+enum nandx_op_mode {
+ NANDX_IDLE,
+ NANDX_WRITE,
+ NANDX_READ,
+ NANDX_ERASE
+};
+
+struct nandx_desc {
+ struct nand_chip *chip;
+ struct nandx_info info;
+ enum nandx_op_mode mode;
+
+ bool multi_en;
+ bool ecc_en;
+
+ struct nand_ops *ops;
+ int ops_len;
+ int ops_multi_len;
+ int ops_current;
+ int min_write_pages;
+
+ u8 *head_buf;
+ u8 *tail_buf;
+};
+
+#endif /* __CORE_IO_H__ */
diff --git a/drivers/mtd/nandx/core/nand/device_spi.c b/drivers/mtd/nandx/core/nand/device_spi.c
new file mode 100644
index 0000000000..db338c28c2
--- /dev/null
+++ b/drivers/mtd/nandx/core/nand/device_spi.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#include "nandx_util.h"
+#include "../nand_device.h"
+#include "device_spi.h"
+
+/* spi nand basic commands */
+static struct nand_cmds spi_cmds = {
+ .reset = 0xff,
+ .read_id = 0x9f,
+ .read_status = 0x0f,
+ .read_param_page = 0x03,
+ .set_feature = 0x1f,
+ .get_feature = 0x0f,
+ .read_1st = 0x13,
+ .read_2nd = -1,
+ .random_out_1st = 0x03,
+ .random_out_2nd = -1,
+ .program_1st = 0x02,
+ .program_2nd = 0x10,
+ .erase_1st = 0xd8,
+ .erase_2nd = -1,
+ .read_cache = 0x30,
+ .read_cache_last = 0x3f,
+ .program_cache = 0x02
+};
+
+/* spi nand extend commands */
+static struct spi_extend_cmds spi_extend_cmds = {
+ .die_select = 0xc2,
+ .write_enable = 0x06
+};
+
+/* means the start bit of addressing type */
+static struct nand_addressing spi_addressing = {
+ .row_bit_start = 0,
+ .block_bit_start = 0,
+ .plane_bit_start = 12,
+ .lun_bit_start = 0,
+};
+
+/* spi nand endurance */
+static struct nand_endurance spi_endurance = {
+ .pe_cycle = 100000,
+ .ecc_req = 1,
+ .max_bitflips = 1
+};
+
+/* array_busy, write_protect, erase_fail, program_fail */
+static struct nand_status spi_status[] = {
+ {.array_busy = BIT(0),
+ .write_protect = BIT(1),
+ .erase_fail = BIT(2),
+ .program_fail = BIT(3)}
+};
+
+/* measure time by the us */
+static struct nand_array_timing spi_array_timing = {
+ .tRST = 500,
+ .tWHR = 1,
+ .tR = 25,
+ .tRCBSY = 25,
+ .tFEAT = 1,
+ .tPROG = 600,
+ .tPCBSY = 600,
+ .tBERS = 10000,
+ .tDBSY = 1
+};
+
+/* spi nand device table */
+static struct device_spi spi_nand[] = {
+ {
+ NAND_DEVICE("W25N01GV",
+ NAND_PACK_ID(0xef, 0xaa, 0x21, 0, 0, 0, 0, 0),
+ 3, 0, 3, 3,
+ 1, 1, 1, 1024, KB(128), KB(2), 64, 1,
+ &spi_cmds, &spi_addressing, &spi_status[0],
+ &spi_endurance, &spi_array_timing),
+ {
+ NAND_SPI_PROTECT(0xa0, 1, 2, 6),
+ NAND_SPI_CONFIG(0xb0, 4, 6, 0),
+ NAND_SPI_STATUS(0xc0, 4, 5),
+ NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
+ },
+ &spi_extend_cmds, 0xff, 0xff
+ },
+ {
+ NAND_DEVICE("MX35LF1G",
+ NAND_PACK_ID(0xc2, 0x12, 0x21, 0, 0, 0, 0, 0),
+ 2, 0, 3, 3,
+ 1, 1, 1, 1024, KB(128), KB(2), 64, 1,
+ &spi_cmds, &spi_addressing, &spi_status[0],
+ &spi_endurance, &spi_array_timing),
+ {
+ NAND_SPI_PROTECT(0xa0, 1, 2, 6),
+ NAND_SPI_CONFIG(0xb0, 4, 6, 1),
+ NAND_SPI_STATUS(0xc0, 4, 5),
+ NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
+ },
+ &spi_extend_cmds, 0xff, 0xff
+ },
+ {
+ NAND_DEVICE("MT29F4G01ABAFDWB",
+ NAND_PACK_ID(0x2c, 0x34, 0, 0, 0, 0, 0, 0),
+ 2, 0, 3, 3,
+ 1, 1, 1, 2048, KB(256), KB(4), 256, 1,
+ &spi_cmds, &spi_addressing, &spi_status[0],
+ &spi_endurance, &spi_array_timing),
+ {
+ NAND_SPI_PROTECT(0xa0, 1, 2, 6),
+ NAND_SPI_CONFIG(0xb0, 4, 6, 1),
+ NAND_SPI_STATUS(0xc0, 4, 5),
+ NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
+ },
+ &spi_extend_cmds, 0xff, 0xff
+ },
+ {
+ NAND_DEVICE("GD5F4GQ4UB",
+ NAND_PACK_ID(0xc8, 0xd4, 0, 0, 0, 0, 0, 0),
+ 2, 0, 3, 3,
+ 1, 1, 1, 2048, KB(256), KB(4), 256, 1,
+ &spi_cmds, &spi_addressing, &spi_status[0],
+ &spi_endurance, &spi_array_timing),
+ {
+ NAND_SPI_PROTECT(0xa0, 1, 2, 6),
+ NAND_SPI_CONFIG(0xb0, 4, 6, 1),
+ NAND_SPI_STATUS(0xc0, 4, 5),
+ NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
+ },
+ &spi_extend_cmds, 0xff, 0xff
+ },
+ {
+ NAND_DEVICE("TC58CVG2S0HRAIJ",
+ NAND_PACK_ID(0x98, 0xED, 0x51, 0, 0, 0, 0, 0),
+ 3, 0, 3, 3,
+ 1, 1, 1, 2048, KB(256), KB(4), 256, 1,
+ &spi_cmds, &spi_addressing, &spi_status[0],
+ &spi_endurance, &spi_array_timing),
+ {
+ NAND_SPI_PROTECT(0xa0, 1, 2, 6),
+ NAND_SPI_CONFIG(0xb0, 4, 6, 1),
+ NAND_SPI_STATUS(0xc0, 4, 5),
+ NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
+ },
+ &spi_extend_cmds, 0xff, 0xff
+ },
+ {
+ NAND_DEVICE("NO-DEVICE",
+ NAND_PACK_ID(0, 0, 0, 0, 0, 0, 0, 0), 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1,
+ &spi_cmds, &spi_addressing, &spi_status[0],
+ &spi_endurance, &spi_array_timing),
+ {
+ NAND_SPI_PROTECT(0xa0, 1, 2, 6),
+ NAND_SPI_CONFIG(0xb0, 4, 6, 0),
+ NAND_SPI_STATUS(0xc0, 4, 5),
+ NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
+ },
+ &spi_extend_cmds, 0xff, 0xff
+ }
+};
+
+u8 spi_replace_rx_cmds(u8 mode)
+{
+ u8 rx_replace_cmds[] = {0x03, 0x3b, 0x6b, 0xbb, 0xeb};
+
+ return rx_replace_cmds[mode];
+}
+
+u8 spi_replace_tx_cmds(u8 mode)
+{
+ u8 tx_replace_cmds[] = {0x02, 0x32};
+
+ return tx_replace_cmds[mode];
+}
+
+u8 spi_replace_rx_col_cycle(u8 mode)
+{
+ u8 rx_replace_col_cycle[] = {3, 3, 3, 3, 4};
+
+ return rx_replace_col_cycle[mode];
+}
+
+u8 spi_replace_tx_col_cycle(u8 mode)
+{
+ u8 tx_replace_col_cycle[] = {2, 2};
+
+ return tx_replace_col_cycle[mode];
+}
+
+struct nand_device *nand_get_device(int index)
+{
+ return &spi_nand[index].dev;
+}
+
diff --git a/drivers/mtd/nandx/core/nand/device_spi.h b/drivers/mtd/nandx/core/nand/device_spi.h
new file mode 100644
index 0000000000..1676b61fc8
--- /dev/null
+++ b/drivers/mtd/nandx/core/nand/device_spi.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __DEVICE_SPI_H__
+#define __DEVICE_SPI_H__
+
+/*
+ * extend commands
+ * @die_select: select nand device die command
+ * @write_enable: enable write command before write data to spi nand
+ * spi nand device will auto to be disable after write done
+ */
+struct spi_extend_cmds {
+ short die_select;
+ short write_enable;
+};
+
+/*
+ * protection feature register
+ * @addr: register address
+ * @wp_en_bit: write protection enable bit
+ * @bp_start_bit: block protection mask start bit
+ * @bp_end_bit: block protection mask end bit
+ */
+struct feature_protect {
+ u8 addr;
+ u8 wp_en_bit;
+ u8 bp_start_bit;
+ u8 bp_end_bit;
+};
+
+/*
+ * configuration feature register
+ * @addr: register address
+ * @ecc_en_bit: in-die ecc enable bit
+ * @otp_en_bit: enter otp access mode bit
+ * @need_qe: quad io enable bit
+ */
+struct feature_config {
+ u8 addr;
+ u8 ecc_en_bit;
+ u8 otp_en_bit;
+ u8 need_qe;
+};
+
+/*
+ * status feature register
+ * @addr: register address
+ * @ecc_start_bit: ecc status mask start bit for error bits number
+ * @ecc_end_bit: ecc status mask end bit for error bits number
+ * note that:
+ * operations status (ex. array busy status) could see on struct nand_status
+ */
+struct feature_status {
+ u8 addr;
+ u8 ecc_start_bit;
+ u8 ecc_end_bit;
+};
+
+/*
+ * character feature register
+ * @addr: register address
+ * @die_sel_bit: die select bit
+ * @drive_start_bit: drive strength mask start bit
+ * @drive_end_bit: drive strength mask end bit
+ */
+struct feature_character {
+ u8 addr;
+ u8 die_sel_bit;
+ u8 drive_start_bit;
+ u8 drive_end_bit;
+};
+
+/*
+ * spi features
+ * @protect: protection feature register
+ * @config: configuration feature register
+ * @status: status feature register
+ * @character: character feature register
+ */
+struct spi_features {
+ struct feature_protect protect;
+ struct feature_config config;
+ struct feature_status status;
+ struct feature_character character;
+};
+
+/*
+ * device_spi
+ * configurations of spi nand device table
+ * @dev: base information of nand device
+ * @feature: feature information for spi nand
+ * @extend_cmds: extended the nand base commands
+ * @tx_mode_mask: tx mode mask for chip read
+ * @rx_mode_mask: rx mode mask for chip write
+ */
+struct device_spi {
+ struct nand_device dev;
+ struct spi_features feature;
+ struct spi_extend_cmds *extend_cmds;
+
+ u8 tx_mode_mask;
+ u8 rx_mode_mask;
+};
+
+#define NAND_SPI_PROTECT(addr, wp_en_bit, bp_start_bit, bp_end_bit) \
+ {addr, wp_en_bit, bp_start_bit, bp_end_bit}
+
+#define NAND_SPI_CONFIG(addr, ecc_en_bit, otp_en_bit, need_qe) \
+ {addr, ecc_en_bit, otp_en_bit, need_qe}
+
+#define NAND_SPI_STATUS(addr, ecc_start_bit, ecc_end_bit) \
+ {addr, ecc_start_bit, ecc_end_bit}
+
+#define NAND_SPI_CHARACTER(addr, die_sel_bit, drive_start_bit, drive_end_bit) \
+ {addr, die_sel_bit, drive_start_bit, drive_end_bit}
+
+static inline struct device_spi *device_to_spi(struct nand_device *dev)
+{
+ return container_of(dev, struct device_spi, dev);
+}
+
+u8 spi_replace_rx_cmds(u8 mode);
+u8 spi_replace_tx_cmds(u8 mode);
+u8 spi_replace_rx_col_cycle(u8 mode);
+u8 spi_replace_tx_col_cycle(u8 mode);
+
+#endif /* __DEVICE_SPI_H__ */
diff --git a/drivers/mtd/nandx/core/nand/nand_spi.c b/drivers/mtd/nandx/core/nand/nand_spi.c
new file mode 100644
index 0000000000..2ae03e1cf4
--- /dev/null
+++ b/drivers/mtd/nandx/core/nand/nand_spi.c
@@ -0,0 +1,526 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "../nand_chip.h"
+#include "../nand_device.h"
+#include "../nfi.h"
+#include "../nand_base.h"
+#include "device_spi.h"
+#include "nand_spi.h"
+
+#define READY_TIMEOUT 500000 /* us */
+
+static int nand_spi_read_status(struct nand_base *nand)
+{
+ struct device_spi *dev = device_to_spi(nand->dev);
+ u8 status;
+
+ nand->get_feature(nand, dev->feature.status.addr, &status, 1);
+
+ return status;
+}
+
+static int nand_spi_wait_ready(struct nand_base *nand, u32 timeout)
+{
+ u64 now, end;
+ int status;
+
+ end = get_current_time_us() + timeout;
+
+ do {
+ status = nand_spi_read_status(nand);
+ status &= nand->dev->status->array_busy;
+ now = get_current_time_us();
+
+ if (now > end)
+ break;
+ } while (status);
+
+ return status ? -EBUSY : 0;
+}
+
+static int nand_spi_set_op_mode(struct nand_base *nand, u8 mode)
+{
+ struct nand_spi *spi_nand = base_to_spi(nand);
+ struct nfi *nfi = nand->nfi;
+ int ret = 0;
+
+ if (spi_nand->op_mode != mode) {
+ ret = nfi->nfi_ctrl(nfi, SNFI_CTRL_OP_MODE, (void *)&mode);
+ spi_nand->op_mode = mode;
+ }
+
+ return ret;
+}
+
+static int nand_spi_set_config(struct nand_base *nand, u8 addr, u8 mask,
+ bool en)
+{
+ u8 configs = 0;
+
+ nand->get_feature(nand, addr, &configs, 1);
+
+ if (en)
+ configs |= mask;
+ else
+ configs &= ~mask;
+
+ nand->set_feature(nand, addr, &configs, 1);
+
+ configs = 0;
+ nand->get_feature(nand, addr, &configs, 1);
+
+ return (configs & mask) == en ? 0 : -EFAULT;
+}
+
+static int nand_spi_die_select(struct nand_base *nand, int *row)
+{
+ struct device_spi *dev = device_to_spi(nand->dev);
+ struct nfi *nfi = nand->nfi;
+ int lun_blocks, block_pages, lun, blocks;
+ int page = *row, ret = 0;
+ u8 param = 0, die_sel;
+
+ if (nand->dev->lun_num < 2)
+ return 0;
+
+ block_pages = nand_block_pages(nand->dev);
+ lun_blocks = nand_lun_blocks(nand->dev);
+ blocks = div_down(page, block_pages);
+ lun = div_down(blocks, lun_blocks);
+
+ if (dev->extend_cmds->die_select == -1) {
+ die_sel = (u8)(lun << dev->feature.character.die_sel_bit);
+ nand->get_feature(nand, dev->feature.character.addr, &param, 1);
+ param |= die_sel;
+ nand->set_feature(nand, dev->feature.character.addr, &param, 1);
+ param = 0;
+ nand->get_feature(nand, dev->feature.character.addr, &param, 1);
+ ret = (param & die_sel) ? 0 : -EFAULT;
+ } else {
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->extend_cmds->die_select);
+ nfi->send_addr(nfi, lun, 0, 1, 0);
+ nfi->trigger(nfi);
+ }
+
+ *row = page - (lun_blocks * block_pages) * lun;
+
+ return ret;
+}
+
+static int nand_spi_select_device(struct nand_base *nand, int cs)
+{
+ struct nand_spi *spi = base_to_spi(nand);
+ struct nand_base *parent = spi->parent;
+
+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+ return parent->select_device(nand, cs);
+}
+
+static int nand_spi_reset(struct nand_base *nand)
+{
+ struct nand_spi *spi = base_to_spi(nand);
+ struct nand_base *parent = spi->parent;
+
+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+ parent->reset(nand);
+
+ return nand_spi_wait_ready(nand, READY_TIMEOUT);
+}
+
+static int nand_spi_read_id(struct nand_base *nand, u8 *id, int count)
+{
+ struct nand_spi *spi = base_to_spi(nand);
+ struct nand_base *parent = spi->parent;
+
+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+ return parent->read_id(nand, id, count);
+}
+
+static int nand_spi_read_param_page(struct nand_base *nand, u8 *data,
+ int count)
+{
+ struct device_spi *dev = device_to_spi(nand->dev);
+ struct nand_spi *spi = base_to_spi(nand);
+ struct nfi *nfi = nand->nfi;
+ int sectors, value;
+ u8 param = 0;
+
+ sectors = div_round_up(count, nfi->sector_size);
+
+ nand->get_feature(nand, dev->feature.config.addr, &param, 1);
+ param |= BIT(dev->feature.config.otp_en_bit);
+ nand->set_feature(nand, dev->feature.config.addr, &param, 1);
+
+ param = 0;
+ nand->get_feature(nand, dev->feature.config.addr, &param, 1);
+ if (param & BIT(dev->feature.config.otp_en_bit)) {
+ value = 0;
+ nfi->nfi_ctrl(nfi, NFI_CTRL_ECC, &value);
+ nand->dev->col_cycle = spi_replace_rx_col_cycle(spi->rx_mode);
+ nand->read_page(nand, 0x01);
+ nand->read_data(nand, 0x01, 0, sectors, data, NULL);
+ }
+
+ param &= ~BIT(dev->feature.config.otp_en_bit);
+ nand->set_feature(nand, dev->feature.config.addr, &param, 1);
+
+ return 0;
+}
+
+static int nand_spi_set_feature(struct nand_base *nand, u8 addr,
+ u8 *param,
+ int count)
+{
+ struct nand_spi *spi = base_to_spi(nand);
+ struct nand_base *parent = spi->parent;
+
+ nand->write_enable(nand);
+
+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+ return parent->set_feature(nand, addr, param, count);
+}
+
+static int nand_spi_get_feature(struct nand_base *nand, u8 addr,
+ u8 *param,
+ int count)
+{
+ struct nand_spi *spi = base_to_spi(nand);
+ struct nand_base *parent = spi->parent;
+
+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+ return parent->get_feature(nand, addr, param, count);
+}
+
+static int nand_spi_addressing(struct nand_base *nand, int *row,
+ int *col)
+{
+ struct nand_device *dev = nand->dev;
+ int plane, block, block_pages;
+ int ret;
+
+ ret = nand_spi_die_select(nand, row);
+ if (ret)
+ return ret;
+
+ block_pages = nand_block_pages(dev);
+ block = div_down(*row, block_pages);
+
+ plane = block % dev->plane_num;
+ *col |= (plane << dev->addressing->plane_bit_start);
+
+ return 0;
+}
+
+static int nand_spi_read_page(struct nand_base *nand, int row)
+{
+ struct nand_spi *spi = base_to_spi(nand);
+ struct nand_base *parent = spi->parent;
+
+ if (spi->op_mode == SNFI_AUTO_MODE)
+ nand_spi_set_op_mode(nand, SNFI_AUTO_MODE);
+ else
+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+ parent->read_page(nand, row);
+
+ return nand_spi_wait_ready(nand, READY_TIMEOUT);
+}
+
+static int nand_spi_read_data(struct nand_base *nand, int row, int col,
+ int sectors, u8 *data, u8 *oob)
+{
+ struct device_spi *dev = device_to_spi(nand->dev);
+ struct nand_spi *spi = base_to_spi(nand);
+ struct nand_base *parent = spi->parent;
+ int ret;
+
+ if ((spi->rx_mode == SNFI_RX_114 || spi->rx_mode == SNFI_RX_144) &&
+ dev->feature.config.need_qe)
+ nand_spi_set_config(nand, dev->feature.config.addr,
+ BIT(0), true);
+
+ nand->dev->col_cycle = spi_replace_rx_col_cycle(spi->rx_mode);
+
+ nand_spi_set_op_mode(nand, SNFI_CUSTOM_MODE);
+
+ ret = parent->read_data(nand, row, col, sectors, data, oob);
+ if (ret)
+ return -ENANDREAD;
+
+ if (spi->ondie_ecc) {
+ ret = nand_spi_read_status(nand);
+ ret &= GENMASK(dev->feature.status.ecc_end_bit,
+ dev->feature.status.ecc_start_bit);
+ ret >>= dev->feature.status.ecc_start_bit;
+ if (ret > nand->dev->endurance->ecc_req)
+ return -ENANDREAD;
+ else if (ret > nand->dev->endurance->max_bitflips)
+ return -ENANDFLIPS;
+ }
+
+ return 0;
+}
+
+static int nand_spi_write_enable(struct nand_base *nand)
+{
+ struct device_spi *dev = device_to_spi(nand->dev);
+ struct nfi *nfi = nand->nfi;
+ int status;
+
+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->extend_cmds->write_enable);
+
+ nfi->trigger(nfi);
+
+ status = nand_spi_read_status(nand);
+ status &= nand->dev->status->write_protect;
+
+ return !status;
+}
+
+static int nand_spi_program_data(struct nand_base *nand, int row,
+ int col,
+ u8 *data, u8 *oob)
+{
+ struct device_spi *dev = device_to_spi(nand->dev);
+ struct nand_spi *spi = base_to_spi(nand);
+
+ if (spi->tx_mode == SNFI_TX_114 && dev->feature.config.need_qe)
+ nand_spi_set_config(nand, dev->feature.config.addr,
+ BIT(0), true);
+
+ nand_spi_set_op_mode(nand, SNFI_CUSTOM_MODE);
+
+ nand->dev->col_cycle = spi_replace_tx_col_cycle(spi->tx_mode);
+
+ return spi->parent->program_data(nand, row, col, data, oob);
+}
+
+static int nand_spi_program_page(struct nand_base *nand, int row)
+{
+ struct nand_spi *spi = base_to_spi(nand);
+ struct nand_device *dev = nand->dev;
+ struct nfi *nfi = nand->nfi;
+
+ if (spi->op_mode == SNFI_AUTO_MODE)
+ nand_spi_set_op_mode(nand, SNFI_AUTO_MODE);
+ else
+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->program_2nd);
+ nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
+ nfi->trigger(nfi);
+
+ return nand_spi_wait_ready(nand, READY_TIMEOUT);
+}
+
+static int nand_spi_erase_block(struct nand_base *nand, int row)
+{
+ struct nand_spi *spi = base_to_spi(nand);
+ struct nand_base *parent = spi->parent;
+
+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+ parent->erase_block(nand, row);
+
+ return nand_spi_wait_ready(nand, READY_TIMEOUT);
+}
+
+static int nand_chip_spi_ctrl(struct nand_chip *chip, int cmd,
+ void *args)
+{
+ struct nand_base *nand = chip->nand;
+ struct device_spi *dev = device_to_spi(nand->dev);
+ struct nand_spi *spi = base_to_spi(nand);
+ struct nfi *nfi = nand->nfi;
+ int ret = 0, value = *(int *)args;
+
+ switch (cmd) {
+ case CHIP_CTRL_ONDIE_ECC:
+ spi->ondie_ecc = (bool)value;
+ ret = nand_spi_set_config(nand, dev->feature.config.addr,
+ BIT(dev->feature.config.ecc_en_bit),
+ spi->ondie_ecc);
+ break;
+
+ case SNFI_CTRL_TX_MODE:
+ if (value < 0 || value > SNFI_TX_114)
+ return -EOPNOTSUPP;
+
+ if (dev->tx_mode_mask & BIT(value)) {
+ spi->tx_mode = value;
+ nand->dev->cmds->random_out_1st = spi_replace_tx_cmds(
+ spi->tx_mode);
+ ret = nfi->nfi_ctrl(nfi, cmd, args);
+ }
+
+ break;
+
+ case SNFI_CTRL_RX_MODE:
+ if (value < 0 || value > SNFI_RX_144)
+ return -EOPNOTSUPP;
+
+ if (dev->rx_mode_mask & BIT(value)) {
+ spi->rx_mode = value;
+ nand->dev->cmds->program_1st = spi_replace_rx_cmds(
+ spi->rx_mode);
+ ret = nfi->nfi_ctrl(nfi, cmd, args);
+ }
+
+ break;
+
+ case CHIP_CTRL_OPS_CACHE:
+ case CHIP_CTRL_OPS_MULTI:
+ case CHIP_CTRL_PSLC_MODE:
+ case CHIP_CTRL_DDR_MODE:
+ case CHIP_CTRL_DRIVE_STRENGTH:
+ case CHIP_CTRL_TIMING_MODE:
+ ret = -EOPNOTSUPP;
+ break;
+
+ default:
+ ret = nfi->nfi_ctrl(nfi, cmd, args);
+ break;
+ }
+
+ return ret;
+}
+
+int nand_chip_spi_resume(struct nand_chip *chip)
+{
+ struct nand_base *nand = chip->nand;
+ struct nand_spi *spi = base_to_spi(nand);
+ struct device_spi *dev = device_to_spi(nand->dev);
+ struct nfi *nfi = nand->nfi;
+ struct nfi_format format;
+ u8 mask;
+
+ nand->reset(nand);
+
+ mask = GENMASK(dev->feature.protect.bp_end_bit,
+ dev->feature.protect.bp_start_bit);
+ nand_spi_set_config(nand, dev->feature.config.addr, mask, false);
+ mask = BIT(dev->feature.config.ecc_en_bit);
+ nand_spi_set_config(nand, dev->feature.config.addr, mask,
+ spi->ondie_ecc);
+
+ format.page_size = nand->dev->page_size;
+ format.spare_size = nand->dev->spare_size;
+ format.ecc_req = nand->dev->endurance->ecc_req;
+
+ return nfi->set_format(nfi, &format);
+}
+
+static int nand_spi_set_format(struct nand_base *nand)
+{
+ struct nfi_format format = {
+ nand->dev->page_size,
+ nand->dev->spare_size,
+ nand->dev->endurance->ecc_req
+ };
+
+ return nand->nfi->set_format(nand->nfi, &format);
+}
+
+struct nand_base *nand_device_init(struct nand_chip *chip)
+{
+ struct nand_base *nand;
+ struct nand_spi *spi;
+ struct device_spi *dev;
+ int ret;
+ u8 mask;
+
+ spi = mem_alloc(1, sizeof(struct nand_spi));
+ if (!spi) {
+ pr_info("alloc nand_spi fail\n");
+ return NULL;
+ }
+
+ spi->ondie_ecc = false;
+ spi->op_mode = SNFI_CUSTOM_MODE;
+ spi->rx_mode = SNFI_RX_114;
+ spi->tx_mode = SNFI_TX_114;
+
+ spi->parent = chip->nand;
+ nand = &spi->base;
+ nand->dev = spi->parent->dev;
+ nand->nfi = spi->parent->nfi;
+
+ nand->select_device = nand_spi_select_device;
+ nand->reset = nand_spi_reset;
+ nand->read_id = nand_spi_read_id;
+ nand->read_param_page = nand_spi_read_param_page;
+ nand->set_feature = nand_spi_set_feature;
+ nand->get_feature = nand_spi_get_feature;
+ nand->read_status = nand_spi_read_status;
+ nand->addressing = nand_spi_addressing;
+ nand->read_page = nand_spi_read_page;
+ nand->read_data = nand_spi_read_data;
+ nand->write_enable = nand_spi_write_enable;
+ nand->program_data = nand_spi_program_data;
+ nand->program_page = nand_spi_program_page;
+ nand->erase_block = nand_spi_erase_block;
+
+ chip->chip_ctrl = nand_chip_spi_ctrl;
+ chip->nand_type = NAND_SPI;
+ chip->resume = nand_chip_spi_resume;
+
+ ret = nand_detect_device(nand);
+ if (ret)
+ goto err;
+
+ nand->select_device(nand, 0);
+
+ ret = nand_spi_set_format(nand);
+ if (ret)
+ goto err;
+
+ dev = (struct device_spi *)nand->dev;
+
+ nand->dev->cmds->random_out_1st =
+ spi_replace_rx_cmds(spi->rx_mode);
+ nand->dev->cmds->program_1st =
+ spi_replace_tx_cmds(spi->tx_mode);
+
+ mask = GENMASK(dev->feature.protect.bp_end_bit,
+ dev->feature.protect.bp_start_bit);
+ ret = nand_spi_set_config(nand, dev->feature.protect.addr, mask, false);
+ if (ret)
+ goto err;
+
+ mask = BIT(dev->feature.config.ecc_en_bit);
+ ret = nand_spi_set_config(nand, dev->feature.config.addr, mask,
+ spi->ondie_ecc);
+ if (ret)
+ goto err;
+
+ return nand;
+
+err:
+ mem_free(spi);
+ return NULL;
+}
+
+void nand_exit(struct nand_base *nand)
+{
+ struct nand_spi *spi = base_to_spi(nand);
+
+ nand_base_exit(spi->parent);
+ mem_free(spi);
+}
diff --git a/drivers/mtd/nandx/core/nand/nand_spi.h b/drivers/mtd/nandx/core/nand/nand_spi.h
new file mode 100644
index 0000000000..e55e4de6f7
--- /dev/null
+++ b/drivers/mtd/nandx/core/nand/nand_spi.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NAND_SPI_H__
+#define __NAND_SPI_H__
+
+/*
+ * spi nand handler
+ * @base: spi nand base functions
+ * @parent: common parent nand base functions
+ * @tx_mode: spi bus width of transfer to device
+ * @rx_mode: spi bus width of transfer from device
+ * @op_mode: spi nand controller (NFI) operation mode
+ * @ondie_ecc: spi nand on-die ecc flag
+ */
+
+struct nand_spi {
+ struct nand_base base;
+ struct nand_base *parent;
+ u8 tx_mode;
+ u8 rx_mode;
+ u8 op_mode;
+ bool ondie_ecc;
+};
+
+static inline struct nand_spi *base_to_spi(struct nand_base *base)
+{
+ return container_of(base, struct nand_spi, base);
+}
+
+#endif /* __NAND_SPI_H__ */
diff --git a/drivers/mtd/nandx/core/nand_base.c b/drivers/mtd/nandx/core/nand_base.c
new file mode 100644
index 0000000000..65998e5460
--- /dev/null
+++ b/drivers/mtd/nandx/core/nand_base.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "nand_chip.h"
+#include "nand_device.h"
+#include "nfi.h"
+#include "nand_base.h"
+
+static int nand_base_select_device(struct nand_base *nand, int cs)
+{
+ struct nfi *nfi = nand->nfi;
+
+ nfi->reset(nfi);
+
+ return nfi->select_chip(nfi, cs);
+}
+
+static int nand_base_reset(struct nand_base *nand)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->reset);
+ nfi->trigger(nfi);
+
+ return nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tRST);
+}
+
+static int nand_base_read_id(struct nand_base *nand, u8 *id, int count)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->read_id);
+ nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tWHR);
+ nfi->send_addr(nfi, 0, 0, 1, 0);
+
+ return nfi->read_bytes(nfi, id, count);
+}
+
+static int nand_base_read_param_page(struct nand_base *nand, u8 *data,
+ int count)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->read_param_page);
+ nfi->send_addr(nfi, 0, 0, 1, 0);
+
+ nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tR);
+
+ return nfi->read_bytes(nfi, data, count);
+}
+
+static int nand_base_set_feature(struct nand_base *nand, u8 addr,
+ u8 *param,
+ int count)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->set_feature);
+ nfi->send_addr(nfi, addr, 0, 1, 0);
+
+ nfi->write_bytes(nfi, param, count);
+
+ return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
+ dev->array_timing->tFEAT);
+}
+
+static int nand_base_get_feature(struct nand_base *nand, u8 addr,
+ u8 *param,
+ int count)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->get_feature);
+ nfi->send_addr(nfi, addr, 0, 1, 0);
+ nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tFEAT);
+
+ return nfi->read_bytes(nfi, param, count);
+}
+
+static int nand_base_read_status(struct nand_base *nand)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+ u8 status = 0;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->read_status);
+ nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tWHR);
+ nfi->read_bytes(nfi, &status, 1);
+
+ return status;
+}
+
+static int nand_base_addressing(struct nand_base *nand, int *row,
+ int *col)
+{
+ struct nand_device *dev = nand->dev;
+ int lun, plane, block, page, cs = 0;
+ int block_pages, target_blocks, wl = 0;
+ int icol = *col;
+
+ if (dev->target_num > 1) {
+ block_pages = nand_block_pages(dev);
+ target_blocks = nand_target_blocks(dev);
+ cs = div_down(*row, block_pages * target_blocks);
+ *row -= cs * block_pages * target_blocks;
+ }
+
+ nand->select_device(nand, cs);
+
+ block_pages = nand_block_pages(dev);
+ block = div_down(*row, block_pages);
+ page = *row - block * block_pages;
+ plane = reminder(block, dev->plane_num);
+ lun = div_down(block, nand_lun_blocks(dev));
+
+ wl |= (page << dev->addressing->row_bit_start);
+ wl |= (block << dev->addressing->block_bit_start);
+ wl |= (plane << dev->addressing->plane_bit_start);
+ wl |= (lun << dev->addressing->lun_bit_start);
+
+ *row = wl;
+ *col = icol;
+
+ return 0;
+}
+
+static int nand_base_read_page(struct nand_base *nand, int row)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->read_1st);
+ nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
+ nfi->send_cmd(nfi, dev->cmds->read_2nd);
+ nfi->trigger(nfi);
+
+ return nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tR);
+}
+
+static int nand_base_read_data(struct nand_base *nand, int row, int col,
+ int sectors, u8 *data, u8 *oob)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->random_out_1st);
+ nfi->send_addr(nfi, col, row, dev->col_cycle, dev->row_cycle);
+ nfi->send_cmd(nfi, dev->cmds->random_out_2nd);
+ nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tRCBSY);
+
+ return nfi->read_sectors(nfi, data, oob, sectors);
+}
+
+static int nand_base_write_enable(struct nand_base *nand)
+{
+ struct nand_device *dev = nand->dev;
+ int status;
+
+ status = nand_base_read_status(nand);
+ if (status & dev->status->write_protect)
+ return 0;
+
+ return -ENANDWP;
+}
+
+static int nand_base_program_data(struct nand_base *nand, int row,
+ int col,
+ u8 *data, u8 *oob)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->program_1st);
+ nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
+
+ return nfi->write_page(nfi, data, oob);
+}
+
+static int nand_base_program_page(struct nand_base *nand, int row)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->program_2nd);
+ nfi->trigger(nfi);
+
+ return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
+ dev->array_timing->tPROG);
+}
+
+static int nand_base_erase_block(struct nand_base *nand, int row)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->erase_1st);
+ nfi->send_addr(nfi, 0, row, 0, dev->row_cycle);
+ nfi->send_cmd(nfi, dev->cmds->erase_2nd);
+ nfi->trigger(nfi);
+
+ return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
+ dev->array_timing->tBERS);
+}
+
+static int nand_base_read_cache(struct nand_base *nand, int row)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->read_1st);
+ nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
+ nfi->send_cmd(nfi, dev->cmds->read_cache);
+ nfi->trigger(nfi);
+
+ return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
+ dev->array_timing->tRCBSY);
+}
+
+static int nand_base_read_last(struct nand_base *nand)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->read_cache_last);
+ nfi->trigger(nfi);
+
+ return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
+ dev->array_timing->tRCBSY);
+}
+
+static int nand_base_program_cache(struct nand_base *nand)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->program_cache);
+ nfi->trigger(nfi);
+
+ return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
+ dev->array_timing->tPCBSY);
+}
+
+struct nand_base *nand_base_init(struct nand_device *dev,
+ struct nfi *nfi)
+{
+ struct nand_base *nand;
+
+ nand = mem_alloc(1, sizeof(struct nand_base));
+ if (!nand)
+ return NULL;
+
+ nand->dev = dev;
+ nand->nfi = nfi;
+ nand->select_device = nand_base_select_device;
+ nand->reset = nand_base_reset;
+ nand->read_id = nand_base_read_id;
+ nand->read_param_page = nand_base_read_param_page;
+ nand->set_feature = nand_base_set_feature;
+ nand->get_feature = nand_base_get_feature;
+ nand->read_status = nand_base_read_status;
+ nand->addressing = nand_base_addressing;
+ nand->read_page = nand_base_read_page;
+ nand->read_data = nand_base_read_data;
+ nand->read_cache = nand_base_read_cache;
+ nand->read_last = nand_base_read_last;
+ nand->write_enable = nand_base_write_enable;
+ nand->program_data = nand_base_program_data;
+ nand->program_page = nand_base_program_page;
+ nand->program_cache = nand_base_program_cache;
+ nand->erase_block = nand_base_erase_block;
+
+ return nand;
+}
+
+void nand_base_exit(struct nand_base *base)
+{
+ nfi_exit(base->nfi);
+ mem_free(base);
+}
diff --git a/drivers/mtd/nandx/core/nand_base.h b/drivers/mtd/nandx/core/nand_base.h
new file mode 100644
index 0000000000..13217978e5
--- /dev/null
+++ b/drivers/mtd/nandx/core/nand_base.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NAND_BASE_H__
+#define __NAND_BASE_H__
+
+/*
+ * nand base functions
+ * @dev: nand device infomations
+ * @nfi: nand host controller
+ * @select_device: select one nand device of multi nand on chip
+ * @reset: reset current nand device
+ * @read_id: read current nand id
+ * @read_param_page: read current nand parameters page
+ * @set_feature: configurate the nand device feature
+ * @get_feature: get the nand device feature
+ * @read_status: read nand device status
+ * @addressing: addressing the address to nand device physical address
+ * @read_page: read page data to device cache register
+ * @read_data: read data from device cache register by bus protocol
+ * @read_cache: nand cache read operation for data output
+ * @read_last: nand cache read operation for last page output
+ * @write_enable: enable program/erase for nand, especially spi nand
+ * @program_data: program data to nand device cache register
+ * @program_page: program page data from nand device cache register to array
+ * @program_cache: nand cache program operation for data input
+ * @erase_block: erase nand block operation
+ */
+struct nand_base {
+ struct nand_device *dev;
+ struct nfi *nfi;
+ int (*select_device)(struct nand_base *nand, int cs);
+ int (*reset)(struct nand_base *nand);
+ int (*read_id)(struct nand_base *nand, u8 *id, int count);
+ int (*read_param_page)(struct nand_base *nand, u8 *data, int count);
+ int (*set_feature)(struct nand_base *nand, u8 addr, u8 *param,
+ int count);
+ int (*get_feature)(struct nand_base *nand, u8 addr, u8 *param,
+ int count);
+ int (*read_status)(struct nand_base *nand);
+ int (*addressing)(struct nand_base *nand, int *row, int *col);
+
+ int (*read_page)(struct nand_base *nand, int row);
+ int (*read_data)(struct nand_base *nand, int row, int col, int sectors,
+ u8 *data, u8 *oob);
+ int (*read_cache)(struct nand_base *nand, int row);
+ int (*read_last)(struct nand_base *nand);
+
+ int (*write_enable)(struct nand_base *nand);
+ int (*program_data)(struct nand_base *nand, int row, int col, u8 *data,
+ u8 *oob);
+ int (*program_page)(struct nand_base *nand, int row);
+ int (*program_cache)(struct nand_base *nand);
+
+ int (*erase_block)(struct nand_base *nand, int row);
+};
+
+struct nand_base *nand_base_init(struct nand_device *device,
+ struct nfi *nfi);
+void nand_base_exit(struct nand_base *base);
+
+struct nand_base *nand_device_init(struct nand_chip *nand);
+void nand_exit(struct nand_base *nand);
+
+int nand_detect_device(struct nand_base *nand);
+
+#endif /* __NAND_BASE_H__ */
diff --git a/drivers/mtd/nandx/core/nand_chip.c b/drivers/mtd/nandx/core/nand_chip.c
new file mode 100644
index 0000000000..02adc6f52e
--- /dev/null
+++ b/drivers/mtd/nandx/core/nand_chip.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "nand_chip.h"
+#include "nand_device.h"
+#include "nfi.h"
+#include "nand_base.h"
+
+static int nand_chip_read_page(struct nand_chip *chip,
+ struct nand_ops *ops,
+ int count)
+{
+ struct nand_base *nand = chip->nand;
+ struct nand_device *dev = nand->dev;
+ int i, ret = 0;
+ int row, col, sectors;
+ u8 *data, *oob;
+
+ for (i = 0; i < count; i++) {
+ row = ops[i].row;
+ col = ops[i].col;
+
+ nand->addressing(nand, &row, &col);
+ ops[i].status = nand->read_page(nand, row);
+ if (ops[i].status < 0) {
+ ret = ops[i].status;
+ continue;
+ }
+
+ data = ops[i].data;
+ oob = ops[i].oob;
+ sectors = ops[i].len / chip->sector_size;
+ ops[i].status = nand->read_data(nand, row, col,
+ sectors, data, oob);
+ if (ops[i].status > 0)
+ ops[i].status = ops[i].status >=
+ dev->endurance->max_bitflips ?
+ -ENANDFLIPS : 0;
+
+ ret = min_t(int, ret, ops[i].status);
+ }
+
+ return ret;
+}
+
+static int nand_chip_write_page(struct nand_chip *chip,
+ struct nand_ops *ops,
+ int count)
+{
+ struct nand_base *nand = chip->nand;
+ struct nand_device *dev = nand->dev;
+ int i, ret = 0;
+ int row, col;
+ u8 *data, *oob;
+
+ for (i = 0; i < count; i++) {
+ row = ops[i].row;
+ col = ops[i].col;
+
+ nand->addressing(nand, &row, &col);
+
+ ops[i].status = nand->write_enable(nand);
+ if (ops[i].status) {
+ pr_debug("Write Protect at %x!\n", row);
+ ops[i].status = -ENANDWP;
+ return -ENANDWP;
+ }
+
+ data = ops[i].data;
+ oob = ops[i].oob;
+ ops[i].status = nand->program_data(nand, row, col, data, oob);
+ if (ops[i].status < 0) {
+ ret = ops[i].status;
+ continue;
+ }
+
+ ops[i].status = nand->program_page(nand, row);
+ if (ops[i].status < 0) {
+ ret = ops[i].status;
+ continue;
+ }
+
+ ops[i].status = nand->read_status(nand);
+ if (ops[i].status & dev->status->program_fail)
+ ops[i].status = -ENANDWRITE;
+
+ ret = min_t(int, ret, ops[i].status);
+ }
+
+ return ret;
+}
+
+static int nand_chip_erase_block(struct nand_chip *chip,
+ struct nand_ops *ops,
+ int count)
+{
+ struct nand_base *nand = chip->nand;
+ struct nand_device *dev = nand->dev;
+ int i, ret = 0;
+ int row, col;
+
+ for (i = 0; i < count; i++) {
+ row = ops[i].row;
+ col = ops[i].col;
+
+ nand->addressing(nand, &row, &col);
+
+ ops[i].status = nand->write_enable(nand);
+ if (ops[i].status) {
+ pr_debug("Write Protect at %x!\n", row);
+ ops[i].status = -ENANDWP;
+ return -ENANDWP;
+ }
+
+ ops[i].status = nand->erase_block(nand, row);
+ if (ops[i].status < 0) {
+ ret = ops[i].status;
+ continue;
+ }
+
+ ops[i].status = nand->read_status(nand);
+ if (ops[i].status & dev->status->erase_fail)
+ ops[i].status = -ENANDERASE;
+
+ ret = min_t(int, ret, ops[i].status);
+ }
+
+ return ret;
+}
+
+/* read first bad mark on spare */
+static int nand_chip_is_bad_block(struct nand_chip *chip,
+ struct nand_ops *ops,
+ int count)
+{
+ int i, ret, value;
+ int status = 0;
+ u8 *data, *tmp_buf;
+
+ tmp_buf = mem_alloc(1, chip->page_size);
+ if (!tmp_buf)
+ return -ENOMEM;
+
+ memset(tmp_buf, 0x00, chip->page_size);
+
+ /* Disable ECC */
+ value = 0;
+ ret = chip->chip_ctrl(chip, NFI_CTRL_ECC, &value);
+ if (ret)
+ goto out;
+
+ ret = chip->read_page(chip, ops, count);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < count; i++) {
+ data = ops[i].data;
+
+ /* temp solution for mt7622, because of no bad mark swap */
+ if (!memcmp(data, tmp_buf, chip->page_size)) {
+ ops[i].status = -ENANDBAD;
+ status = -ENANDBAD;
+
+ } else {
+ ops[i].status = 0;
+ }
+ }
+
+ /* Enable ECC */
+ value = 1;
+ ret = chip->chip_ctrl(chip, NFI_CTRL_ECC, &value);
+ if (ret)
+ goto out;
+
+ mem_free(tmp_buf);
+ return status;
+
+out:
+ mem_free(tmp_buf);
+ return ret;
+}
+
+static int nand_chip_ctrl(struct nand_chip *chip, int cmd, void *args)
+{
+ return -EOPNOTSUPP;
+}
+
+static int nand_chip_suspend(struct nand_chip *chip)
+{
+ return 0;
+}
+
+static int nand_chip_resume(struct nand_chip *chip)
+{
+ return 0;
+}
+
+struct nand_chip *nand_chip_init(struct nfi_resource *res)
+{
+ struct nand_chip *chip;
+ struct nand_base *nand;
+ struct nfi *nfi;
+
+ chip = mem_alloc(1, sizeof(struct nand_chip));
+ if (!chip) {
+ pr_info("nand chip alloc fail!\n");
+ return NULL;
+ }
+
+ nfi = nfi_init(res);
+ if (!nfi) {
+ pr_info("nfi init fail!\n");
+ goto nfi_err;
+ }
+
+ nand = nand_base_init(NULL, nfi);
+ if (!nand) {
+ pr_info("nand base init fail!\n");
+ goto base_err;
+ }
+
+ chip->nand = (void *)nand;
+ chip->read_page = nand_chip_read_page;
+ chip->write_page = nand_chip_write_page;
+ chip->erase_block = nand_chip_erase_block;
+ chip->is_bad_block = nand_chip_is_bad_block;
+ chip->chip_ctrl = nand_chip_ctrl;
+ chip->suspend = nand_chip_suspend;
+ chip->resume = nand_chip_resume;
+
+ nand = nand_device_init(chip);
+ if (!nand)
+ goto nand_err;
+
+ chip->nand = (void *)nand;
+ chip->plane_num = nand->dev->plane_num;
+ chip->block_num = nand_total_blocks(nand->dev);
+ chip->block_size = nand->dev->block_size;
+ chip->block_pages = nand_block_pages(nand->dev);
+ chip->page_size = nand->dev->page_size;
+ chip->oob_size = nfi->fdm_size * div_down(chip->page_size,
+ nfi->sector_size);
+ chip->sector_size = nfi->sector_size;
+ chip->sector_spare_size = nfi->sector_spare_size;
+ chip->min_program_pages = nand->dev->min_program_pages;
+ chip->ecc_strength = nfi->ecc_strength;
+ chip->ecc_parity_size = nfi->ecc_parity_size;
+ chip->fdm_ecc_size = nfi->fdm_ecc_size;
+ chip->fdm_reg_size = nfi->fdm_size;
+
+ return chip;
+
+nand_err:
+ mem_free(nand);
+base_err:
+ nfi_exit(nfi);
+nfi_err:
+ mem_free(chip);
+ return NULL;
+}
+
+void nand_chip_exit(struct nand_chip *chip)
+{
+ nand_exit(chip->nand);
+ mem_free(chip);
+}
diff --git a/drivers/mtd/nandx/core/nand_chip.h b/drivers/mtd/nandx/core/nand_chip.h
new file mode 100644
index 0000000000..3e9c8e6ca3
--- /dev/null
+++ b/drivers/mtd/nandx/core/nand_chip.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NAND_CHIP_H__
+#define __NAND_CHIP_H__
+
+enum nand_type {
+ NAND_SPI,
+ NAND_SLC,
+ NAND_MLC,
+ NAND_TLC
+};
+
+/*
+ * nand chip operation unit
+ * one nand_ops indicates one row operation
+ * @row: nand chip row address, like as nand row
+ * @col: nand chip column address, like as nand column
+ * @len: operate data length, min is sector_size,
+ * max is page_size and sector_size aligned
+ * @status: one operation result status
+ * @data: data buffer for operation
+ * @oob: oob buffer for operation, like as nand spare area
+ */
+struct nand_ops {
+ int row;
+ int col;
+ int len;
+ int status;
+ void *data;
+ void *oob;
+};
+
+/*
+ * nand chip descriptions
+ * nand chip includes nand controller and the several same nand devices
+ * @nand_type: the nand type on this chip,
+ * the chip maybe have several nand device and the type must be same
+ * @plane_num: the whole plane number on the chip
+ * @block_num: the whole block number on the chip
+ * @block_size: nand device block size
+ * @block_pages: nand device block has page number
+ * @page_size: nand device page size
+ * @oob_size: chip out of band size, like as nand spare szie,
+ * but restricts this:
+ * the size is provied by nand controller(NFI),
+ * because NFI would use some nand spare size
+ * @min_program_pages: chip needs min pages per program operations
+ * one page as one nand_ops
+ * @sector_size: chip min read size
+ * @sector_spare_size: spare size for sector, is spare_size/page_sectors
+ * @ecc_strength: ecc stregth per sector_size, it would be for calculated ecc
+ * @ecc_parity_size: ecc parity size for one sector_size data
+ * @nand: pointer to inherited struct nand_base
+ * @read_page: read %count pages on chip
+ * @write_page: write %count pages on chip
+ * @erase_block: erase %count blocks on chip, one block is one nand_ops
+ * it is better to set nand_ops.row to block start row
+ * @is_bad_block: judge the %count blocks on chip if they are bad
+ * by vendor specification
+ * @chip_ctrl: control the chip features by nandx_ctrl_cmd
+ * @suspend: suspend nand chip
+ * @resume: resume nand chip
+ */
+struct nand_chip {
+ int nand_type;
+ int plane_num;
+ int block_num;
+ int block_size;
+ int block_pages;
+ int page_size;
+ int oob_size;
+
+ int min_program_pages;
+ int sector_size;
+ int sector_spare_size;
+ int ecc_strength;
+ int ecc_parity_size;
+ u32 fdm_ecc_size;
+ u32 fdm_reg_size;
+
+ void *nand;
+
+ int (*read_page)(struct nand_chip *chip, struct nand_ops *ops,
+ int count);
+ int (*write_page)(struct nand_chip *chip, struct nand_ops *ops,
+ int count);
+ int (*erase_block)(struct nand_chip *chip, struct nand_ops *ops,
+ int count);
+ int (*is_bad_block)(struct nand_chip *chip, struct nand_ops *ops,
+ int count);
+ int (*chip_ctrl)(struct nand_chip *chip, int cmd, void *args);
+ int (*suspend)(struct nand_chip *chip);
+ int (*resume)(struct nand_chip *chip);
+};
+
+struct nand_chip *nand_chip_init(struct nfi_resource *res);
+void nand_chip_exit(struct nand_chip *chip);
+#endif /* __NAND_CHIP_H__ */
diff --git a/drivers/mtd/nandx/core/nand_device.c b/drivers/mtd/nandx/core/nand_device.c
new file mode 100644
index 0000000000..9f6764d1bc
--- /dev/null
+++ b/drivers/mtd/nandx/core/nand_device.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "nand_chip.h"
+#include "nand_device.h"
+#include "nand_base.h"
+
+#define MAX_CHIP_DEVICE 4
+#define PARAM_PAGE_LEN 2048
+#define ONFI_CRC_BASE 0x4f4e
+
+static u16 nand_onfi_crc16(u16 crc, u8 const *p, size_t len)
+{
+ int i;
+
+ while (len--) {
+ crc ^= *p++ << 8;
+
+ for (i = 0; i < 8; i++)
+ crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
+ }
+
+ return crc;
+}
+
+static inline void decode_addr_cycle(u8 addr_cycle, u8 *row_cycle,
+ u8 *col_cycle)
+{
+ *row_cycle = addr_cycle & 0xf;
+ *col_cycle = (addr_cycle >> 4) & 0xf;
+}
+
+static int detect_onfi(struct nand_device *dev,
+ struct nand_onfi_params *onfi)
+{
+ struct nand_endurance *endurance = dev->endurance;
+ u16 size, i, crc16;
+ u8 *id;
+
+ size = sizeof(struct nand_onfi_params) - sizeof(u16);
+
+ for (i = 0; i < 3; i++) {
+ crc16 = nand_onfi_crc16(ONFI_CRC_BASE, (u8 *)&onfi[i], size);
+
+ if (onfi[i].signature[0] == 'O' &&
+ onfi[i].signature[1] == 'N' &&
+ onfi[i].signature[2] == 'F' &&
+ onfi[i].signature[3] == 'I' &&
+ onfi[i].crc16 == crc16)
+ break;
+
+ /* in some spi nand, onfi signature maybe "NAND" */
+ if (onfi[i].signature[0] == 'N' &&
+ onfi[i].signature[1] == 'A' &&
+ onfi[i].signature[2] == 'N' &&
+ onfi[i].signature[3] == 'D' &&
+ onfi[i].crc16 == crc16)
+ break;
+ }
+
+ if (i == 3)
+ return -ENODEV;
+
+ memcpy(dev->name, onfi[i].model, 20);
+ id = onfi[i].manufacturer;
+ dev->id = NAND_PACK_ID(id[0], id[1], id[2], id[3], id[4], id[5], id[6],
+ id[7]);
+ dev->id_len = MAX_ID_NUM;
+ dev->io_width = (onfi[i].features & 1) ? NAND_IO16 : NAND_IO8;
+ decode_addr_cycle(onfi[i].addr_cycle, &dev->row_cycle,
+ &dev->col_cycle);
+ dev->target_num = 1;
+ dev->lun_num = onfi[i].lun_num;
+ dev->plane_num = BIT(onfi[i].plane_address_bits);
+ dev->block_num = onfi[i].lun_blocks / dev->plane_num;
+ dev->block_size = onfi[i].block_pages * onfi[i].page_size;
+ dev->page_size = onfi[i].page_size;
+ dev->spare_size = onfi[i].spare_size;
+
+ endurance->ecc_req = onfi[i].ecc_req;
+ endurance->pe_cycle = onfi[i].valid_block_endurance;
+ endurance->max_bitflips = endurance->ecc_req >> 1;
+
+ return 0;
+}
+
+static int detect_jedec(struct nand_device *dev,
+ struct nand_jedec_params *jedec)
+{
+ struct nand_endurance *endurance = dev->endurance;
+ u16 size, i, crc16;
+ u8 *id;
+
+ size = sizeof(struct nand_jedec_params) - sizeof(u16);
+
+ for (i = 0; i < 3; i++) {
+ crc16 = nand_onfi_crc16(ONFI_CRC_BASE, (u8 *)&jedec[i], size);
+
+ if (jedec[i].signature[0] == 'J' &&
+ jedec[i].signature[1] == 'E' &&
+ jedec[i].signature[2] == 'S' &&
+ jedec[i].signature[3] == 'D' &&
+ jedec[i].crc16 == crc16)
+ break;
+ }
+
+ if (i == 3)
+ return -ENODEV;
+
+ memcpy(dev->name, jedec[i].model, 20);
+ id = jedec[i].manufacturer;
+ dev->id = NAND_PACK_ID(id[0], id[1], id[2], id[3], id[4], id[5], id[6],
+ id[7]);
+ dev->id_len = MAX_ID_NUM;
+ dev->io_width = (jedec[i].features & 1) ? NAND_IO16 : NAND_IO8;
+ decode_addr_cycle(jedec[i].addr_cycle, &dev->row_cycle,
+ &dev->col_cycle);
+ dev->target_num = 1;
+ dev->lun_num = jedec[i].lun_num;
+ dev->plane_num = BIT(jedec[i].plane_address_bits);
+ dev->block_num = jedec[i].lun_blocks / dev->plane_num;
+ dev->block_size = jedec[i].block_pages * jedec[i].page_size;
+ dev->page_size = jedec[i].page_size;
+ dev->spare_size = jedec[i].spare_size;
+
+ endurance->ecc_req = jedec[i].endurance_block0[0];
+ endurance->pe_cycle = jedec[i].valid_block_endurance;
+ endurance->max_bitflips = endurance->ecc_req >> 1;
+
+ return 0;
+}
+
+static struct nand_device *detect_parameters_page(struct nand_base
+ *nand)
+{
+ struct nand_device *dev = nand->dev;
+ void *params;
+ int ret;
+
+ params = mem_alloc(1, PARAM_PAGE_LEN);
+ if (!params)
+ return NULL;
+
+ memset(params, 0, PARAM_PAGE_LEN);
+ ret = nand->read_param_page(nand, params, PARAM_PAGE_LEN);
+ if (ret < 0) {
+ pr_info("read parameters page fail!\n");
+ goto error;
+ }
+
+ ret = detect_onfi(dev, params);
+ if (ret) {
+ pr_info("detect onfi device fail! try to detect jedec\n");
+ ret = detect_jedec(dev, params);
+ if (ret) {
+ pr_info("detect jedec device fail!\n");
+ goto error;
+ }
+ }
+
+ mem_free(params);
+ return dev;
+
+error:
+ mem_free(params);
+ return NULL;
+}
+
+static int read_device_id(struct nand_base *nand, int cs, u8 *id)
+{
+ int i;
+
+ nand->select_device(nand, cs);
+ nand->reset(nand);
+ nand->read_id(nand, id, MAX_ID_NUM);
+ pr_info("device %d ID: ", cs);
+
+ for (i = 0; i < MAX_ID_NUM; i++)
+ pr_info("%x ", id[i]);
+
+ pr_info("\n");
+
+ return 0;
+}
+
+static int detect_more_device(struct nand_base *nand, u8 *id)
+{
+ u8 id_ext[MAX_ID_NUM];
+ int i, j, target_num = 0;
+
+ for (i = 1; i < MAX_CHIP_DEVICE; i++) {
+ memset(id_ext, 0xff, MAX_ID_NUM);
+ read_device_id(nand, i, id_ext);
+
+ for (j = 0; j < MAX_ID_NUM; j++) {
+ if (id_ext[j] != id[j])
+ goto out;
+ }
+
+ target_num += 1;
+ }
+
+out:
+ return target_num;
+}
+
+static struct nand_device *scan_device_table(const u8 *id, int id_len)
+{
+ struct nand_device *dev;
+ int i = 0, j;
+ u8 ids[MAX_ID_NUM] = {0};
+
+ while (1) {
+ dev = nand_get_device(i);
+
+ if (!strcmp(dev->name, "NO-DEVICE"))
+ break;
+
+ if (id_len < dev->id_len) {
+ i += 1;
+ continue;
+ }
+
+ NAND_UNPACK_ID(dev->id, ids, MAX_ID_NUM);
+ for (j = 0; j < dev->id_len; j++) {
+ if (ids[j] != id[j])
+ break;
+ }
+
+ if (j == dev->id_len)
+ break;
+
+ i += 1;
+ }
+
+ return dev;
+}
+
+int nand_detect_device(struct nand_base *nand)
+{
+ struct nand_device *dev;
+ u8 id[MAX_ID_NUM] = { 0 };
+ int target_num = 0;
+
+ /* Get nand device default setting for reset/read_id */
+ nand->dev = scan_device_table(NULL, -1);
+
+ read_device_id(nand, 0, id);
+ dev = scan_device_table(id, MAX_ID_NUM);
+
+ if (!strcmp(dev->name, "NO-DEVICE")) {
+ pr_info("device scan fail\n");
+ return -ENODEV;
+ }
+
+ /* TobeFix: has null pointer issue in this funciton */
+ if (!strcmp(dev->name, "NO-DEVICE")) {
+ pr_info("device scan fail, detect parameters page\n");
+ dev = detect_parameters_page(nand);
+ if (!dev) {
+ pr_info("detect parameters fail\n");
+ return -ENODEV;
+ }
+ }
+
+ if (dev->target_num > 1)
+ target_num = detect_more_device(nand, id);
+
+ target_num += 1;
+ pr_debug("chip has target device num: %d\n", target_num);
+
+ if (dev->target_num != target_num)
+ dev->target_num = target_num;
+
+ nand->dev = dev;
+
+ return 0;
+}
+
diff --git a/drivers/mtd/nandx/core/nand_device.h b/drivers/mtd/nandx/core/nand_device.h
new file mode 100644
index 0000000000..e142cf529d
--- /dev/null
+++ b/drivers/mtd/nandx/core/nand_device.h
@@ -0,0 +1,608 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NAND_DEVICE_H__
+#define __NAND_DEVICE_H__
+
+/* onfi 3.2 */
+struct nand_onfi_params {
+ /* Revision information and features block. 0 */
+ /*
+ * Byte 0: 4Fh,
+ * Byte 1: 4Eh,
+ * Byte 2: 46h,
+ * Byte 3: 49h,
+ */
+ u8 signature[4];
+ /*
+ * 9-15 Reserved (0)
+ * 8 1 = supports ONFI version 3.2
+ * 7 1 = supports ONFI version 3.1
+ * 6 1 = supports ONFI version 3.0
+ * 5 1 = supports ONFI version 2.3
+ * 4 1 = supports ONFI version 2.2
+ * 3 1 = supports ONFI version 2.1
+ * 2 1 = supports ONFI version 2.0
+ * 1 1 = supports ONFI version 1.0
+ * 0 Reserved (0)
+ */
+ u16 revision;
+ /*
+ * 13-15 Reserved (0)
+ * 12 1 = supports external Vpp
+ * 11 1 = supports Volume addressing
+ * 10 1 = supports NV-DDR2
+ * 9 1 = supports EZ NAND
+ * 8 1 = supports program page register clear enhancement
+ * 7 1 = supports extended parameter page
+ * 6 1 = supports multi-plane read operations
+ * 5 1 = supports NV-DDR
+ * 4 1 = supports odd to even page Copyback
+ * 3 1 = supports multi-plane program and erase operations
+ * 2 1 = supports non-sequential page programming
+ * 1 1 = supports multiple LUN operations
+ * 0 1 = supports 16-bit data bus width
+ */
+ u16 features;
+ /*
+ * 13-15 Reserved (0)
+ * 12 1 = supports LUN Get and LUN Set Features
+ * 11 1 = supports ODT Configure
+ * 10 1 = supports Volume Select
+ * 9 1 = supports Reset LUN
+ * 8 1 = supports Small Data Move
+ * 7 1 = supports Change Row Address
+ * 6 1 = supports Change Read Column Enhanced
+ * 5 1 = supports Read Unique ID
+ * 4 1 = supports Copyback
+ * 3 1 = supports Read Status Enhanced
+ * 2 1 = supports Get Features and Set Features
+ * 1 1 = supports Read Cache commands
+ * 0 1 = supports Page Cache Program command
+ */
+ u16 opt_cmds;
+ /*
+ * 4-7 Reserved (0)
+ * 3 1 = supports Multi-plane Block Erase
+ * 2 1 = supports Multi-plane Copyback Program
+ * 1 1 = supports Multi-plane Page Program
+ * 0 1 = supports Random Data Out
+ */
+ u8 advance_cmds;
+ u8 reserved0[1];
+ u16 extend_param_len;
+ u8 param_page_num;
+ u8 reserved1[17];
+
+ /* Manufacturer information block. 32 */
+ u8 manufacturer[12];
+ u8 model[20];
+ u8 jedec_id;
+ u16 data_code;
+ u8 reserved2[13];
+
+ /* Memory organization block. 80 */
+ u32 page_size;
+ u16 spare_size;
+ u32 partial_page_size; /* obsolete */
+ u16 partial_spare_size; /* obsolete */
+ u32 block_pages;
+ u32 lun_blocks;
+ u8 lun_num;
+ /*
+ * 4-7 Column address cycles
+ * 0-3 Row address cycles
+ */
+ u8 addr_cycle;
+ u8 cell_bits;
+ u16 lun_max_bad_blocks;
+ u16 block_endurance;
+ u8 target_begin_valid_blocks;
+ u16 valid_block_endurance;
+ u8 page_program_num;
+ u8 partial_program_attr; /* obsolete */
+ u8 ecc_req;
+ /*
+ * 4-7 Reserved (0)
+ * 0-3 Number of plane address bits
+ */
+ u8 plane_address_bits;
+ /*
+ * 6-7 Reserved (0)
+ * 5 1 = lower bit XNOR block address restriction
+ * 4 1 = read cache supported
+ * 3 Address restrictions for cache operations
+ * 2 1 = program cache supported
+ * 1 1 = no block address restrictions
+ * 0 Overlapped / concurrent multi-plane support
+ */
+ u8 multi_plane_attr;
+ u8 ez_nand_support;
+ u8 reserved3[12];
+
+ /* Electrical parameters block. 128 */
+ u8 io_pin_max_capacitance;
+ /*
+ * 6-15 Reserved (0)
+ * 5 1 = supports timing mode 5
+ * 4 1 = supports timing mode 4
+ * 3 1 = supports timing mode 3
+ * 2 1 = supports timing mode 2
+ * 1 1 = supports timing mode 1
+ * 0 1 = supports timing mode 0, shall be 1
+ */
+ u16 sdr_timing_mode;
+ u16 sdr_program_cache_timing_mode; /* obsolete */
+ u16 tPROG;
+ u16 tBERS;
+ u16 tR;
+ u16 tCCS;
+ /*
+ * 7 Reserved (0)
+ * 6 1 = supports NV-DDR2 timing mode 8
+ * 5 1 = supports NV-DDR timing mode 5
+ * 4 1 = supports NV-DDR timing mode 4
+ * 3 1 = supports NV-DDR timing mode 3
+ * 2 1 = supports NV-DDR timing mode 2
+ * 1 1 = supports NV-DDR timing mode 1
+ * 0 1 = supports NV-DDR timing mode 0
+ */
+ u8 nvddr_timing_mode;
+ /*
+ * 7 1 = supports timing mode 7
+ * 6 1 = supports timing mode 6
+ * 5 1 = supports timing mode 5
+ * 4 1 = supports timing mode 4
+ * 3 1 = supports timing mode 3
+ * 2 1 = supports timing mode 2
+ * 1 1 = supports timing mode 1
+ * 0 1 = supports timing mode 0
+ */
+ u8 nvddr2_timing_mode;
+ /*
+ * 4-7 Reserved (0)
+ * 3 1 = device requires Vpp enablement sequence
+ * 2 1 = device supports CLK stopped for data input
+ * 1 1 = typical capacitance
+ * 0 tCAD value to use
+ */
+ u8 nvddr_fetures;
+ u16 clk_pin_capacitance;
+ u16 io_pin_capacitance;
+ u16 input_pin_capacitance;
+ u8 input_pin_max_capacitance;
+ /*
+ * 3-7 Reserved (0)
+ * 2 1 = supports 18 Ohm drive strength
+ * 1 1 = supports 25 Ohm drive strength
+ * 0 1 = supports driver strength settings
+ */
+ u8 drive_strength;
+ u16 tR_multi_plane;
+ u16 tADL;
+ u16 tR_ez_nand;
+ /*
+ * 6-7 Reserved (0)
+ * 5 1 = external VREFQ required for >= 200 MT/s
+ * 4 1 = supports differential signaling for DQS
+ * 3 1 = supports differential signaling for RE_n
+ * 2 1 = supports ODT value of 30 Ohms
+ * 1 1 = supports matrix termination ODT
+ * 0 1 = supports self-termination ODT
+ */
+ u8 nvddr2_features;
+ u8 nvddr2_warmup_cycles;
+ u8 reserved4[4];
+
+ /* vendor block. 164 */
+ u16 vendor_revision;
+ u8 vendor_spec[88];
+
+ /* CRC for Parameter Page. 254 */
+ u16 crc16;
+} __packed;
+
+/* JESD230-B */
+struct nand_jedec_params {
+ /* Revision information and features block. 0 */
+ /*
+ * Byte 0:4Ah
+ * Byte 1:45h
+ * Byte 2:53h
+ * Byte 3:44h
+ */
+ u8 signature[4];
+ /*
+ * 3-15: Reserved (0)
+ * 2: 1 = supports parameter page revision 1.0 and standard revision 1.0
+ * 1: 1 = supports vendor specific parameter page
+ * 0: Reserved (0)
+ */
+ u16 revision;
+ /*
+ * 9-15 Reserved (0)
+ * 8: 1 = supports program page register clear enhancement
+ * 7: 1 = supports external Vpp
+ * 6: 1 = supports Toggle Mode DDR
+ * 5: 1 = supports Synchronous DDR
+ * 4: 1 = supports multi-plane read operations
+ * 3: 1 = supports multi-plane program and erase operations
+ * 2: 1 = supports non-sequential page programming
+ * 1: 1 = supports multiple LUN operations
+ * 0: 1 = supports 16-bit data bus width
+ */
+ u16 features;
+ /*
+ * 11-23: Reserved (0)
+ * 10: 1 = supports Synchronous Reset
+ * 9: 1 = supports Reset LUN (Primary)
+ * 8: 1 = supports Small Data Move
+ * 7: 1 = supports Multi-plane Copyback Program (Primary)
+ * 6: 1 = supports Random Data Out (Primary)
+ * 5: 1 = supports Read Unique ID
+ * 4: 1 = supports Copyback
+ * 3: 1 = supports Read Status Enhanced (Primary)
+ * 2: 1 = supports Get Features and Set Features
+ * 1: 1 = supports Read Cache commands
+ * 0: 1 = supports Page Cache Program command
+ */
+ u8 opt_cmds[3];
+ /*
+ * 8-15: Reserved (0)
+ * 7: 1 = supports secondary Read Status Enhanced
+ * 6: 1 = supports secondary Multi-plane Block Erase
+ * 5: 1 = supports secondary Multi-plane Copyback Program
+ * 4: 1 = supports secondary Multi-plane Program
+ * 3: 1 = supports secondary Random Data Out
+ * 2: 1 = supports secondary Multi-plane Copyback Read
+ * 1: 1 = supports secondary Multi-plane Read Cache Random
+ * 0: 1 = supports secondary Multi-plane Read
+ */
+ u16 secondary_cmds;
+ u8 param_page_num;
+ u8 reserved0[18];
+
+ /* Manufacturer information block. 32*/
+ u8 manufacturer[12];
+ u8 model[20];
+ u8 jedec_id[6];
+ u8 reserved1[10];
+
+ /* Memory organization block. 80 */
+ u32 page_size;
+ u16 spare_size;
+ u8 reserved2[6];
+ u32 block_pages;
+ u32 lun_blocks;
+ u8 lun_num;
+ /*
+ * 4-7 Column address cycles
+ * 0-3 Row address cycles
+ */
+ u8 addr_cycle;
+ u8 cell_bits;
+ u8 page_program_num;
+ /*
+ * 4-7 Reserved (0)
+ * 0-3 Number of plane address bits
+ */
+ u8 plane_address_bits;
+ /*
+ * 3-7: Reserved (0)
+ * 2: 1= read cache supported
+ * 1: 1 = program cache supported
+ * 0: 1= No multi-plane block address restrictions
+ */
+ u8 multi_plane_attr;
+ u8 reserved3[38];
+
+ /* Electrical parameters block. 144 */
+ /*
+ * 6-15: Reserved (0)
+ * 5: 1 = supports 20 ns speed grade (50 MHz)
+ * 4: 1 = supports 25 ns speed grade (40 MHz)
+ * 3: 1 = supports 30 ns speed grade (~33 MHz)
+ * 2: 1 = supports 35 ns speed grade (~28 MHz)
+ * 1: 1 = supports 50 ns speed grade (20 MHz)
+ * 0: 1 = supports 100 ns speed grade (10 MHz)
+ */
+ u16 sdr_speed;
+ /*
+ * 8-15: Reserved (0)
+ * 7: 1 = supports 5 ns speed grade (200 MHz)
+ * 6: 1 = supports 6 ns speed grade (~166 MHz)
+ * 5: 1 = supports 7.5 ns speed grade (~133 MHz)
+ * 4: 1 = supports 10 ns speed grade (100 MHz)
+ * 3: 1 = supports 12 ns speed grade (~83 MHz)
+ * 2: 1 = supports 15 ns speed grade (~66 MHz)
+ * 1: 1 = supports 25 ns speed grade (40 MHz)
+ * 0: 1 = supports 30 ns speed grade (~33 MHz)
+ */
+ u16 toggle_ddr_speed;
+ /*
+ * 6-15: Reserved (0)
+ * 5: 1 = supports 10 ns speed grade (100 MHz)
+ * 4: 1 = supports 12 ns speed grade (~83 MHz)
+ * 3: 1 = supports 15 ns speed grade (~66 MHz)
+ * 2: 1 = supports 20 ns speed grade (50 MHz)
+ * 1: 1 = supports 30 ns speed grade (~33 MHz)
+ * 0: 1 = supports 50 ns speed grade (20 MHz)
+ */
+ u16 sync_ddr_speed;
+ u8 sdr_features;
+ u8 toggle_ddr_features;
+ /*
+ * 2-7: Reserved (0)
+ * 1: Device supports CK stopped for data input
+ * 0: tCAD value to use
+ */
+ u8 sync_ddr_features;
+ u16 tPROG;
+ u16 tBERS;
+ u16 tR;
+ u16 tR_multi_plane;
+ u16 tCCS;
+ u16 io_pin_capacitance;
+ u16 input_pin_capacitance;
+ u16 ck_pin_capacitance;
+ /*
+ * 3-7: Reserved (0)
+ * 2: 1 = supports 18 ohm drive strength
+ * 1: 1 = supports 25 ohm drive strength
+ * 0: 1 = supports 35ohm/50ohm drive strength
+ */
+ u8 drive_strength;
+ u16 tADL;
+ u8 reserved4[36];
+
+ /* ECC and endurance block. 208 */
+ u8 target_begin_valid_blocks;
+ u16 valid_block_endurance;
+ /*
+ * Byte 0: Number of bits ECC correctability
+ * Byte 1: Codeword size
+ * Byte 2-3: Bad blocks maximum per LUN
+ * Byte 4-5: Block endurance
+ * Byte 6-7: Reserved (0)
+ */
+ u8 endurance_block0[8];
+ u8 endurance_block1[8];
+ u8 endurance_block2[8];
+ u8 endurance_block3[8];
+ u8 reserved5[29];
+
+ /* Reserved. 272 */
+ u8 reserved6[148];
+
+ /* Vendor specific block. 420 */
+ u16 vendor_revision;
+ u8 vendor_spec[88];
+
+ /* CRC for Parameter Page. 510 */
+ u16 crc16;
+} __packed;
+
+/* parallel nand io width */
+enum nand_io_width {
+ NAND_IO8,
+ NAND_IO16
+};
+
+/* all supported nand timming type */
+enum nand_timing_type {
+ NAND_TIMING_SDR,
+ NAND_TIMING_SYNC_DDR,
+ NAND_TIMING_TOGGLE_DDR,
+ NAND_TIMING_NVDDR2
+};
+
+/* nand basic commands */
+struct nand_cmds {
+ short reset;
+ short read_id;
+ short read_status;
+ short read_param_page;
+ short set_feature;
+ short get_feature;
+ short read_1st;
+ short read_2nd;
+ short random_out_1st;
+ short random_out_2nd;
+ short program_1st;
+ short program_2nd;
+ short erase_1st;
+ short erase_2nd;
+ short read_cache;
+ short read_cache_last;
+ short program_cache;
+};
+
+/*
+ * addressing for nand physical address
+ * @row_bit_start: row address start bit
+ * @block_bit_start: block address start bit
+ * @plane_bit_start: plane address start bit
+ * @lun_bit_start: lun address start bit
+ */
+struct nand_addressing {
+ u8 row_bit_start;
+ u8 block_bit_start;
+ u8 plane_bit_start;
+ u8 lun_bit_start;
+};
+
+/*
+ * nand operations status
+ * @array_busy: indicates device array operation busy
+ * @write_protect: indicates the device cannot be wrote or erased
+ * @erase_fail: indicates erase operation fail
+ * @program_fail: indicates program operation fail
+ */
+struct nand_status {
+ u8 array_busy;
+ u8 write_protect;
+ u8 erase_fail;
+ u8 program_fail;
+};
+
+/*
+ * nand endurance information
+ * @pe_cycle: max program/erase cycle for nand stored data stability
+ * @ecc_req: ecc strength required for the nand, measured per 1KB
+ * @max_bitflips: bitflips is ecc corrected bits,
+ * max_bitflips is the threshold for nand stored data stability
+ * if corrected bits is over max_bitflips, stored data must be moved
+ * to another good block
+ */
+struct nand_endurance {
+ int pe_cycle;
+ int ecc_req;
+ int max_bitflips;
+};
+
+/* wait for nand busy type */
+enum nand_wait_type {
+ NAND_WAIT_IRQ,
+ NAND_WAIT_POLLING,
+ NAND_WAIT_TWHR2,
+};
+
+/* each nand array operations time */
+struct nand_array_timing {
+ u16 tRST;
+ u16 tWHR;
+ u16 tR;
+ u16 tRCBSY;
+ u16 tFEAT;
+ u16 tPROG;
+ u16 tPCBSY;
+ u16 tBERS;
+ u16 tDBSY;
+};
+
+/* nand sdr interface timing required */
+struct nand_sdr_timing {
+ u16 tREA;
+ u16 tREH;
+ u16 tCR;
+ u16 tRP;
+ u16 tWP;
+ u16 tWH;
+ u16 tWHR;
+ u16 tCLS;
+ u16 tALS;
+ u16 tCLH;
+ u16 tALH;
+ u16 tWC;
+ u16 tRC;
+};
+
+/* nand onfi ddr (nvddr) interface timing required */
+struct nand_onfi_timing {
+ u16 tCAD;
+ u16 tWPRE;
+ u16 tWPST;
+ u16 tWRCK;
+ u16 tDQSCK;
+ u16 tWHR;
+};
+
+/* nand toggle ddr (toggle 1.0) interface timing required */
+struct nand_toggle_timing {
+ u16 tCS;
+ u16 tCH;
+ u16 tCAS;
+ u16 tCAH;
+ u16 tCALS;
+ u16 tCALH;
+ u16 tWP;
+ u16 tWPRE;
+ u16 tWPST;
+ u16 tWPSTH;
+ u16 tCR;
+ u16 tRPRE;
+ u16 tRPST;
+ u16 tRPSTH;
+ u16 tCDQSS;
+ u16 tWHR;
+};
+
+/* nand basic device information */
+struct nand_device {
+ u8 *name;
+ u64 id;
+ u8 id_len;
+ u8 io_width;
+ u8 row_cycle;
+ u8 col_cycle;
+ u8 target_num;
+ u8 lun_num;
+ u8 plane_num;
+ int block_num;
+ int block_size;
+ int page_size;
+ int spare_size;
+ int min_program_pages;
+ struct nand_cmds *cmds;
+ struct nand_addressing *addressing;
+ struct nand_status *status;
+ struct nand_endurance *endurance;
+ struct nand_array_timing *array_timing;
+};
+
+#define NAND_DEVICE(_name, _id, _id_len, _io_width, _row_cycle, \
+ _col_cycle, _target_num, _lun_num, _plane_num, \
+ _block_num, _block_size, _page_size, _spare_size, \
+ _min_program_pages, _cmds, _addressing, _status, \
+ _endurance, _array_timing) \
+{ \
+ _name, _id, _id_len, _io_width, _row_cycle, \
+ _col_cycle, _target_num, _lun_num, _plane_num, \
+ _block_num, _block_size, _page_size, _spare_size, \
+ _min_program_pages, _cmds, _addressing, _status, \
+ _endurance, _array_timing \
+}
+
+#define MAX_ID_NUM sizeof(u64)
+
+#define NAND_PACK_ID(id0, id1, id2, id3, id4, id5, id6, id7) \
+ ( \
+ id0 | id1 << 8 | id2 << 16 | id3 << 24 | \
+ (u64)id4 << 32 | (u64)id5 << 40 | \
+ (u64)id6 << 48 | (u64)id7 << 56 \
+ )
+
+#define NAND_UNPACK_ID(id, ids, len) \
+ do { \
+ int _i; \
+ for (_i = 0; _i < len; _i++) \
+ ids[_i] = id >> (_i << 3) & 0xff; \
+ } while (0)
+
+static inline int nand_block_pages(struct nand_device *device)
+{
+ return div_down(device->block_size, device->page_size);
+}
+
+static inline int nand_lun_blocks(struct nand_device *device)
+{
+ return device->plane_num * device->block_num;
+}
+
+static inline int nand_target_blocks(struct nand_device *device)
+{
+ return device->lun_num * device->plane_num * device->block_num;
+}
+
+static inline int nand_total_blocks(struct nand_device *device)
+{
+ return device->target_num * device->lun_num * device->plane_num *
+ device->block_num;
+}
+
+struct nand_device *nand_get_device(int index);
+#endif /* __NAND_DEVICE_H__ */
diff --git a/drivers/mtd/nandx/core/nfi.h b/drivers/mtd/nandx/core/nfi.h
new file mode 100644
index 0000000000..ba84e73ccc
--- /dev/null
+++ b/drivers/mtd/nandx/core/nfi.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NFI_H__
+#define __NFI_H__
+
+struct nfi_format {
+ int page_size;
+ int spare_size;
+ int ecc_req;
+};
+
+struct nfi {
+ int sector_size;
+ int sector_spare_size;
+ int fdm_size; /*for sector*/
+ int fdm_ecc_size;
+ int ecc_strength;
+ int ecc_parity_size; /*for sector*/
+
+ int (*select_chip)(struct nfi *nfi, int cs);
+ int (*set_format)(struct nfi *nfi, struct nfi_format *format);
+ int (*set_timing)(struct nfi *nfi, void *timing, int type);
+ int (*nfi_ctrl)(struct nfi *nfi, int cmd, void *args);
+
+ int (*reset)(struct nfi *nfi);
+ int (*send_cmd)(struct nfi *nfi, short cmd);
+ int (*send_addr)(struct nfi *nfi, int col, int row,
+ int col_cycle, int row_cycle);
+ int (*trigger)(struct nfi *nfi);
+
+ int (*write_page)(struct nfi *nfi, u8 *data, u8 *fdm);
+ int (*write_bytes)(struct nfi *nfi, u8 *data, int count);
+ int (*read_sectors)(struct nfi *nfi, u8 *data, u8 *fdm,
+ int sectors);
+ int (*read_bytes)(struct nfi *nfi, u8 *data, int count);
+
+ int (*wait_ready)(struct nfi *nfi, int type, u32 timeout);
+
+ int (*enable_randomizer)(struct nfi *nfi, u32 row, bool encode);
+ int (*disable_randomizer)(struct nfi *nfi);
+};
+
+struct nfi *nfi_init(struct nfi_resource *res);
+void nfi_exit(struct nfi *nfi);
+
+#endif /* __NFI_H__ */
diff --git a/drivers/mtd/nandx/core/nfi/nfi_base.c b/drivers/mtd/nandx/core/nfi/nfi_base.c
new file mode 100644
index 0000000000..d8679d7aa3
--- /dev/null
+++ b/drivers/mtd/nandx/core/nfi/nfi_base.c
@@ -0,0 +1,1357 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+/**
+ * nfi_base.c - the base logic for nfi to access nand flash
+ *
+ * slc/mlc/tlc could use same code to access nand
+ * of cause, there still some work need to do
+ * even for spi nand, there should be a chance to integrate code together
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "../nfi.h"
+#include "../nand_device.h"
+#include "nfi_regs.h"
+#include "nfiecc.h"
+#include "nfi_base.h"
+
+static const int spare_size_mt7622[] = {
+ 16, 26, 27, 28
+};
+
+#define RAND_SEED_SHIFT(op) \
+ ((op) == RAND_ENCODE ? ENCODE_SEED_SHIFT : DECODE_SEED_SHIFT)
+#define RAND_EN(op) \
+ ((op) == RAND_ENCODE ? RAN_ENCODE_EN : RAN_DECODE_EN)
+
+#define SS_SEED_NUM 128
+static u16 ss_randomizer_seed[SS_SEED_NUM] = {
+ 0x576A, 0x05E8, 0x629D, 0x45A3, 0x649C, 0x4BF0, 0x2342, 0x272E,
+ 0x7358, 0x4FF3, 0x73EC, 0x5F70, 0x7A60, 0x1AD8, 0x3472, 0x3612,
+ 0x224F, 0x0454, 0x030E, 0x70A5, 0x7809, 0x2521, 0x484F, 0x5A2D,
+ 0x492A, 0x043D, 0x7F61, 0x3969, 0x517A, 0x3B42, 0x769D, 0x0647,
+ 0x7E2A, 0x1383, 0x49D9, 0x07B8, 0x2578, 0x4EEC, 0x4423, 0x352F,
+ 0x5B22, 0x72B9, 0x367B, 0x24B6, 0x7E8E, 0x2318, 0x6BD0, 0x5519,
+ 0x1783, 0x18A7, 0x7B6E, 0x7602, 0x4B7F, 0x3648, 0x2C53, 0x6B99,
+ 0x0C23, 0x67CF, 0x7E0E, 0x4D8C, 0x5079, 0x209D, 0x244A, 0x747B,
+ 0x350B, 0x0E4D, 0x7004, 0x6AC3, 0x7F3E, 0x21F5, 0x7A15, 0x2379,
+ 0x1517, 0x1ABA, 0x4E77, 0x15A1, 0x04FA, 0x2D61, 0x253A, 0x1302,
+ 0x1F63, 0x5AB3, 0x049A, 0x5AE8, 0x1CD7, 0x4A00, 0x30C8, 0x3247,
+ 0x729C, 0x5034, 0x2B0E, 0x57F2, 0x00E4, 0x575B, 0x6192, 0x38F8,
+ 0x2F6A, 0x0C14, 0x45FC, 0x41DF, 0x38DA, 0x7AE1, 0x7322, 0x62DF,
+ 0x5E39, 0x0E64, 0x6D85, 0x5951, 0x5937, 0x6281, 0x33A1, 0x6A32,
+ 0x3A5A, 0x2BAC, 0x743A, 0x5E74, 0x3B2E, 0x7EC7, 0x4FD2, 0x5D28,
+ 0x751F, 0x3EF8, 0x39B1, 0x4E49, 0x746B, 0x6EF6, 0x44BE, 0x6DB7
+};
+
+#if 0
+static void dump_register(void *regs)
+{
+ int i;
+
+ pr_info("registers:\n");
+ for (i = 0; i < 0x600; i += 0x10) {
+ pr_info(" address 0x%X : %X %X %X %X\n",
+ (u32)((unsigned long)regs + i),
+ (u32)readl(regs + i),
+ (u32)readl(regs + i + 0x4),
+ (u32)readl(regs + i + 0x8),
+ (u32)readl(regs + i + 0xC));
+ }
+}
+#endif
+
+static int nfi_enable_randomizer(struct nfi *nfi, u32 row, bool encode)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ enum randomizer_op op = RAND_ENCODE;
+ void *regs = nb->res.nfi_regs;
+ u32 val;
+
+ if (!encode)
+ op = RAND_DECODE;
+
+ /* randomizer type and reseed type setup */
+ val = readl(regs + NFI_CNFG);
+ val |= CNFG_RAND_SEL | CNFG_RESEED_SEC_EN;
+ writel(val, regs + NFI_CNFG);
+
+ /* randomizer seed and type setup */
+ val = ss_randomizer_seed[row % SS_SEED_NUM] & RAN_SEED_MASK;
+ val <<= RAND_SEED_SHIFT(op);
+ val |= RAND_EN(op);
+ writel(val, regs + NFI_RANDOM_CNFG);
+
+ return 0;
+}
+
+static int nfi_disable_randomizer(struct nfi *nfi)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+
+ writel(0, nb->res.nfi_regs + NFI_RANDOM_CNFG);
+
+ return 0;
+}
+
+static int nfi_irq_handler(int irq, void *data)
+{
+ struct nfi_base *nb = (struct nfi_base *) data;
+ void *regs = nb->res.nfi_regs;
+ u16 status, en;
+
+ status = readw(regs + NFI_INTR_STA);
+ en = readw(regs + NFI_INTR_EN);
+
+ if (!(status & en))
+ return NAND_IRQ_NONE;
+
+ writew(~status & en, regs + NFI_INTR_EN);
+
+ nandx_event_complete(nb->done);
+
+ return NAND_IRQ_HANDLED;
+}
+
+static int nfi_select_chip(struct nfi *nfi, int cs)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+
+ writel(cs, nb->res.nfi_regs + NFI_CSEL);
+
+ return 0;
+}
+
+static inline void set_op_mode(void *regs, u32 mode)
+{
+ u32 val = readl(regs + NFI_CNFG);
+
+ val &= ~CNFG_OP_MODE_MASK;
+ val |= mode;
+
+ writel(val, regs + NFI_CNFG);
+}
+
+static int nfi_reset(struct nfi *nfi)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ void *regs = nb->res.nfi_regs;
+ int ret, val;
+
+ /* The NFI reset to reset all registers and force the NFI
+ * master be early terminated
+ */
+ writel(CON_FIFO_FLUSH | CON_NFI_RST, regs + NFI_CON);
+
+ /* check state of NFI internal FSM and NAND interface FSM */
+ ret = readl_poll_timeout_atomic(regs + NFI_MASTER_STA, val,
+ !(val & MASTER_BUS_BUSY),
+ 10, NFI_TIMEOUT);
+ if (ret)
+ pr_info("nfi reset timeout...\n");
+
+ writel(CON_FIFO_FLUSH | CON_NFI_RST, regs + NFI_CON);
+ writew(STAR_DE, regs + NFI_STRDATA);
+
+ return ret;
+}
+
+static void bad_mark_swap(struct nfi *nfi, u8 *buf, u8 *fdm)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ u32 start_sector = div_down(nb->col, nfi->sector_size);
+ u32 data_mark_pos;
+ u8 temp;
+
+ /* raw access, no need to do swap. */
+ if (!nb->ecc_en)
+ return;
+
+ if (!buf || !fdm)
+ return;
+
+ if (nb->bad_mark_ctrl.sector < start_sector ||
+ nb->bad_mark_ctrl.sector > start_sector + nb->rw_sectors)
+ return;
+
+ data_mark_pos = nb->bad_mark_ctrl.position +
+ (nb->bad_mark_ctrl.sector - start_sector) *
+ nfi->sector_size;
+
+ temp = *fdm;
+ *fdm = *(buf + data_mark_pos);
+ *(buf + data_mark_pos) = temp;
+}
+
+static u8 *fdm_shift(struct nfi *nfi, u8 *fdm, int sector)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ u8 *pos;
+
+ if (!fdm)
+ return NULL;
+
+ /* map the sector's FDM data to free oob:
+ * the beginning of the oob area stores the FDM data of bad mark sectors
+ */
+ if (sector < nb->bad_mark_ctrl.sector)
+ pos = fdm + (sector + 1) * nfi->fdm_size;
+ else if (sector == nb->bad_mark_ctrl.sector)
+ pos = fdm;
+ else
+ pos = fdm + sector * nfi->fdm_size;
+
+ return pos;
+
+}
+
+static void set_bad_mark_ctrl(struct nfi_base *nb)
+{
+ int temp, page_size = nb->format.page_size;
+
+ nb->bad_mark_ctrl.bad_mark_swap = bad_mark_swap;
+ nb->bad_mark_ctrl.fdm_shift = fdm_shift;
+
+ temp = nb->nfi.sector_size + nb->nfi.sector_spare_size;
+ nb->bad_mark_ctrl.sector = div_down(page_size, temp);
+ nb->bad_mark_ctrl.position = reminder(page_size, temp);
+}
+
+/* NOTE: check if page_size valid future */
+static int setup_format(struct nfi_base *nb, int spare_idx)
+{
+ struct nfi *nfi = &nb->nfi;
+ u32 page_size = nb->format.page_size;
+ u32 val;
+
+ switch (page_size) {
+ case 512:
+ val = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512;
+ break;
+
+ case KB(2):
+ if (nfi->sector_size == 512)
+ val = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512;
+ else
+ val = PAGEFMT_512_2K;
+
+ break;
+
+ case KB(4):
+ if (nfi->sector_size == 512)
+ val = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512;
+ else
+ val = PAGEFMT_2K_4K;
+
+ break;
+
+ case KB(8):
+ if (nfi->sector_size == 512)
+ val = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512;
+ else
+ val = PAGEFMT_4K_8K;
+
+ break;
+
+ case KB(16):
+ val = PAGEFMT_8K_16K;
+ break;
+
+ default:
+ pr_info("invalid page len: %d\n", page_size);
+ return -EINVAL;
+ }
+
+ val |= spare_idx << PAGEFMT_SPARE_SHIFT;
+ val |= nfi->fdm_size << PAGEFMT_FDM_SHIFT;
+ val |= nfi->fdm_ecc_size << PAGEFMT_FDM_ECC_SHIFT;
+ writel(val, nb->res.nfi_regs + NFI_PAGEFMT);
+
+ if (nb->custom_sector_en) {
+ val = nfi->sector_spare_size + nfi->sector_size;
+ val |= SECCUS_SIZE_EN;
+ writel(val, nb->res.nfi_regs + NFI_SECCUS_SIZE);
+ }
+
+ return 0;
+}
+
+static int adjust_spare(struct nfi_base *nb, int *spare)
+{
+ int multi = nb->nfi.sector_size == 512 ? 1 : 2;
+ int i, count = nb->caps->spare_size_num;
+
+ if (*spare >= nb->caps->spare_size[count - 1] * multi) {
+ *spare = nb->caps->spare_size[count - 1] * multi;
+ return count - 1;
+ }
+
+ if (*spare < nb->caps->spare_size[0] * multi)
+ return -EINVAL;
+
+ for (i = 1; i < count; i++) {
+ if (*spare < nb->caps->spare_size[i] * multi) {
+ *spare = nb->caps->spare_size[i - 1] * multi;
+ return i - 1;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int nfi_set_format(struct nfi *nfi, struct nfi_format *format)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ struct nfiecc *ecc = nb->ecc;
+ int ecc_strength = format->ecc_req;
+ int min_fdm, min_ecc, max_ecc;
+ u32 temp, page_sectors;
+ int spare_idx = 0;
+
+ if (!nb->buf) {
+#if NANDX_BULK_IO_USE_DRAM
+ nb->buf = NANDX_NFI_BUF_ADDR;
+#else
+ nb->buf = mem_alloc(1, format->page_size + format->spare_size);
+#endif
+ if (!nb->buf)
+ return -ENOMEM;
+ }
+
+ nb->format = *format;
+
+ /* ToBeFixed: for spi nand, now sector size is 512,
+ * it should be same with slc.
+ */
+ nfi->sector_size = 512;
+ /* format->ecc_req is the requirement per 1KB */
+ ecc_strength >>= 1;
+
+ page_sectors = div_down(format->page_size, nfi->sector_size);
+ nfi->sector_spare_size = div_down(format->spare_size, page_sectors);
+
+ if (!nb->custom_sector_en) {
+ spare_idx = adjust_spare(nb, &nfi->sector_spare_size);
+ if (spare_idx < 0)
+ return -EINVAL;
+ }
+
+ /* calculate ecc strength and fdm size */
+ temp = (nfi->sector_spare_size - nb->caps->max_fdm_size) * 8;
+ min_ecc = div_down(temp, nb->caps->ecc_parity_bits);
+ min_ecc = ecc->adjust_strength(ecc, min_ecc);
+ if (min_ecc < 0)
+ return -EINVAL;
+
+ temp = div_up(nb->res.min_oob_req, page_sectors);
+ temp = (nfi->sector_spare_size - temp) * 8;
+ max_ecc = div_down(temp, nb->caps->ecc_parity_bits);
+ max_ecc = ecc->adjust_strength(ecc, max_ecc);
+ if (max_ecc < 0)
+ return -EINVAL;
+
+ temp = div_up(temp * nb->caps->ecc_parity_bits, 8);
+ temp = nfi->sector_spare_size - temp;
+ min_fdm = min_t(u32, temp, (u32)nb->caps->max_fdm_size);
+
+ if (ecc_strength > max_ecc) {
+ pr_info("required ecc strength %d, max supported %d\n",
+ ecc_strength, max_ecc);
+ nfi->ecc_strength = max_ecc;
+ nfi->fdm_size = min_fdm;
+ } else if (format->ecc_req < min_ecc) {
+ nfi->ecc_strength = min_ecc;
+ nfi->fdm_size = nb->caps->max_fdm_size;
+ } else {
+ ecc_strength = ecc->adjust_strength(ecc, ecc_strength);
+ if (ecc_strength < 0)
+ return -EINVAL;
+
+ nfi->ecc_strength = ecc_strength;
+ temp = div_up(ecc_strength * nb->caps->ecc_parity_bits, 8);
+ nfi->fdm_size = nfi->sector_spare_size - temp;
+ }
+
+ nb->page_sectors = div_down(format->page_size, nfi->sector_size);
+
+ /* some IC has fixed fdm_ecc_size, if not assigend, set to fdm_size */
+ nfi->fdm_ecc_size = nb->caps->fdm_ecc_size ? : nfi->fdm_size;
+
+ nfi->ecc_parity_size = div_up(nfi->ecc_strength *
+ nb->caps->ecc_parity_bits,
+ 8);
+ set_bad_mark_ctrl(nb);
+
+ pr_debug("sector_size: %d\n", nfi->sector_size);
+ pr_debug("sector_spare_size: %d\n", nfi->sector_spare_size);
+ pr_debug("fdm_size: %d\n", nfi->fdm_size);
+ pr_debug("fdm_ecc_size: %d\n", nfi->fdm_ecc_size);
+ pr_debug("ecc_strength: %d\n", nfi->ecc_strength);
+ pr_debug("ecc_parity_size: %d\n", nfi->ecc_parity_size);
+
+ return setup_format(nb, spare_idx);
+}
+
+static int nfi_ctrl(struct nfi *nfi, int cmd, void *args)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ int ret = 0;
+
+ switch (cmd) {
+ case NFI_CTRL_DMA:
+ nb->dma_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_AUTOFORMAT:
+ nb->auto_format = *(bool *)args;
+ break;
+
+ case NFI_CTRL_NFI_IRQ:
+ nb->nfi_irq_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_PAGE_IRQ:
+ nb->page_irq_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_BAD_MARK_SWAP:
+ nb->bad_mark_swap_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_ECC:
+ nb->ecc_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_ECC_MODE:
+ nb->ecc_mode = *(enum nfiecc_mode *)args;
+ break;
+
+ case NFI_CTRL_ECC_CLOCK:
+ /* NOTE: it seems that there's nothing need to do
+ * if new IC need, just add tht logic
+ */
+ nb->ecc_clk_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_ECC_IRQ:
+ nb->ecc_irq_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_ECC_DECODE_MODE:
+ nb->ecc_deccon = *(enum nfiecc_deccon *)args;
+ break;
+
+ default:
+ pr_info("invalid arguments.\n");
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ pr_debug("%s: set cmd(%d) to %d\n", __func__, cmd, *(int *)args);
+ return ret;
+}
+
+static int nfi_send_cmd(struct nfi *nfi, short cmd)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ void *regs = nb->res.nfi_regs;
+ int ret;
+ u32 val;
+
+ pr_debug("%s: cmd 0x%x\n", __func__, cmd);
+
+ if (cmd < 0)
+ return -EINVAL;
+
+ set_op_mode(regs, nb->op_mode);
+
+ writel(cmd, regs + NFI_CMD);
+
+ ret = readl_poll_timeout_atomic(regs + NFI_STA,
+ val, !(val & STA_CMD),
+ 5, NFI_TIMEOUT);
+ if (ret)
+ pr_info("send cmd 0x%x timeout\n", cmd);
+
+ return ret;
+}
+
+static int nfi_send_addr(struct nfi *nfi, int col, int row,
+ int col_cycle, int row_cycle)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ void *regs = nb->res.nfi_regs;
+ int ret;
+ u32 val;
+
+ pr_debug("%s: col 0x%x, row 0x%x, col_cycle 0x%x, row_cycle 0x%x\n",
+ __func__, col, row, col_cycle, row_cycle);
+
+ nb->col = col;
+ nb->row = row;
+
+ writel(col, regs + NFI_COLADDR);
+ writel(row, regs + NFI_ROWADDR);
+ writel(col_cycle | (row_cycle << ROW_SHIFT), regs + NFI_ADDRNOB);
+
+ ret = readl_poll_timeout_atomic(regs + NFI_STA,
+ val, !(val & STA_ADDR),
+ 5, NFI_TIMEOUT);
+ if (ret)
+ pr_info("send address timeout\n");
+
+ return ret;
+}
+
+static int nfi_trigger(struct nfi *nfi)
+{
+ /* Nothing need to do. */
+ return 0;
+}
+
+static inline int wait_io_ready(void *regs)
+{
+ u32 val;
+ int ret;
+
+ ret = readl_poll_timeout_atomic(regs + NFI_PIO_DIRDY,
+ val, val & PIO_DI_RDY,
+ 2, NFI_TIMEOUT);
+ if (ret)
+ pr_info("wait io ready timeout\n");
+
+ return ret;
+}
+
+static int wait_ready_irq(struct nfi_base *nb, u32 timeout)
+{
+ void *regs = nb->res.nfi_regs;
+ int ret;
+ u32 val;
+
+ writel(0xf1, regs + NFI_CNRNB);
+ nandx_event_init(nb->done);
+
+ writel(INTR_BUSY_RETURN_EN, (void *)(regs + NFI_INTR_EN));
+
+ /**
+ * check if nand already bean ready,
+ * avoid issue that casued by missing irq-event.
+ */
+ val = readl(regs + NFI_STA);
+ if (val & STA_BUSY2READY) {
+ readl(regs + NFI_INTR_STA);
+ writel(0, (void *)(regs + NFI_INTR_EN));
+ return 0;
+ }
+
+ ret = nandx_event_wait_complete(nb->done, timeout);
+
+ writew(0, regs + NFI_CNRNB);
+ return ret;
+}
+
+static void wait_ready_twhr2(struct nfi_base *nb, u32 timeout)
+{
+ /* NOTE: this for tlc */
+}
+
+static int wait_ready_poll(struct nfi_base *nb, u32 timeout)
+{
+ void *regs = nb->res.nfi_regs;
+ int ret;
+ u32 val;
+
+ writel(0x21, regs + NFI_CNRNB);
+ ret = readl_poll_timeout_atomic(regs + NFI_STA, val,
+ val & STA_BUSY2READY,
+ 2, timeout);
+ writew(0, regs + NFI_CNRNB);
+
+ return ret;
+}
+
+static int nfi_wait_ready(struct nfi *nfi, int type, u32 timeout)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ int ret;
+
+ switch (type) {
+ case NAND_WAIT_IRQ:
+ if (nb->nfi_irq_en)
+ ret = wait_ready_irq(nb, timeout);
+ else
+ ret = -EINVAL;
+
+ break;
+
+ case NAND_WAIT_POLLING:
+ ret = wait_ready_poll(nb, timeout);
+ break;
+
+ case NAND_WAIT_TWHR2:
+ wait_ready_twhr2(nb, timeout);
+ ret = 0;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ pr_info("%s: type 0x%x, timeout 0x%x\n",
+ __func__, type, timeout);
+
+ return ret;
+}
+
+static int enable_ecc_decode(struct nfi_base *nb, int sectors)
+{
+ struct nfi *nfi = &nb->nfi;
+ struct nfiecc *ecc = nb->ecc;
+
+ ecc->config.op = ECC_DECODE;
+ ecc->config.mode = nb->ecc_mode;
+ ecc->config.deccon = nb->ecc_deccon;
+ ecc->config.sectors = sectors;
+ ecc->config.len = nfi->sector_size + nfi->fdm_ecc_size;
+ ecc->config.strength = nfi->ecc_strength;
+
+ return ecc->enable(ecc);
+}
+
+static int enable_ecc_encode(struct nfi_base *nb)
+{
+ struct nfiecc *ecc = nb->ecc;
+ struct nfi *nfi = &nb->nfi;
+
+ ecc->config.op = ECC_ENCODE;
+ ecc->config.mode = nb->ecc_mode;
+ ecc->config.len = nfi->sector_size + nfi->fdm_ecc_size;
+ ecc->config.strength = nfi->ecc_strength;
+
+ return ecc->enable(ecc);
+}
+
+static void read_fdm(struct nfi_base *nb, u8 *fdm, int start_sector,
+ int sectors)
+{
+ void *regs = nb->res.nfi_regs;
+ int j, i = start_sector;
+ u32 vall, valm;
+ u8 *buf = fdm;
+
+ for (; i < start_sector + sectors; i++) {
+ if (nb->bad_mark_swap_en)
+ buf = nb->bad_mark_ctrl.fdm_shift(&nb->nfi, fdm, i);
+
+ vall = readl(regs + NFI_FDML(i));
+ valm = readl(regs + NFI_FDMM(i));
+
+ for (j = 0; j < nb->nfi.fdm_size; j++)
+ *buf++ = (j >= 4 ? valm : vall) >> ((j & 3) << 3);
+ }
+}
+
+static void write_fdm(struct nfi_base *nb, u8 *fdm)
+{
+ struct nfi *nfi = &nb->nfi;
+ void *regs = nb->res.nfi_regs;
+ u32 vall, valm;
+ int i, j;
+ u8 *buf = fdm;
+
+ for (i = 0; i < nb->page_sectors; i++) {
+ if (nb->bad_mark_swap_en)
+ buf = nb->bad_mark_ctrl.fdm_shift(nfi, fdm, i);
+
+ vall = 0;
+ for (j = 0; j < 4; j++)
+ vall |= (j < nfi->fdm_size ? *buf++ : 0xff) << (j * 8);
+ writel(vall, regs + NFI_FDML(i));
+
+ valm = 0;
+ for (j = 0; j < 4; j++)
+ valm |= (j < nfi->fdm_size ? *buf++ : 0xff) << (j * 8);
+ writel(valm, regs + NFI_FDMM(i));
+ }
+}
+
+/* NOTE: pio not use auto format */
+static int pio_rx_data(struct nfi_base *nb, u8 *data, u8 *fdm,
+ int sectors)
+{
+ struct nfiecc_status ecc_status;
+ struct nfi *nfi = &nb->nfi;
+ void *regs = nb->res.nfi_regs;
+ u32 val, bitflips = 0;
+ int len, ret, i;
+ u8 *buf;
+
+ val = readl(regs + NFI_CNFG) | CNFG_BYTE_RW;
+ writel(val, regs + NFI_CNFG);
+
+ len = nfi->sector_size + nfi->sector_spare_size;
+ len *= sectors;
+
+ for (i = 0; i < len; i++) {
+ ret = wait_io_ready(regs);
+ if (ret)
+ return ret;
+
+ nb->buf[i] = readb(regs + NFI_DATAR);
+ }
+
+ /* TODO: do error handle for autoformat setting of pio */
+ if (nb->ecc_en) {
+ for (i = 0; i < sectors; i++) {
+ buf = nb->buf + i * (nfi->sector_size +
+ nfi->sector_spare_size);
+ ret = nb->ecc->correct_data(nb->ecc, &ecc_status,
+ buf, i);
+ if (data)
+ memcpy(data + i * nfi->sector_size,
+ buf, nfi->sector_size);
+ if (fdm)
+ memcpy(fdm + i * nfi->fdm_size,
+ buf + nfi->sector_size, nfi->fdm_size);
+ if (ret) {
+ ret = nb->ecc->decode_status(nb->ecc, i, 1);
+ if (ret < 0)
+ return ret;
+
+ bitflips = max_t(int, (int)bitflips, ret);
+ }
+ }
+
+ return bitflips;
+ }
+
+ /* raw read, only data not null, and its length should be $len */
+ if (data)
+ memcpy(data, nb->buf, len);
+
+ return 0;
+}
+
+static int pio_tx_data(struct nfi_base *nb, u8 *data, u8 *fdm,
+ int sectors)
+{
+ struct nfi *nfi = &nb->nfi;
+ void *regs = nb->res.nfi_regs;
+ u32 i, val;
+ int len, ret;
+
+ val = readw(regs + NFI_CNFG) | CNFG_BYTE_RW;
+ writew(val, regs + NFI_CNFG);
+
+ len = nb->ecc_en ? nfi->sector_size :
+ nfi->sector_size + nfi->sector_spare_size;
+ len *= sectors;
+
+ /* data shouldn't null,
+ * and if ecc enable ,fdm been written in prepare process
+ */
+ for (i = 0; i < len; i++) {
+ ret = wait_io_ready(regs);
+ if (ret)
+ return ret;
+ writeb(data[i], regs + NFI_DATAW);
+ }
+
+ return 0;
+}
+
+static bool is_page_empty(struct nfi_base *nb, u8 *data, u8 *fdm,
+ int sectors)
+{
+ u32 empty = readl(nb->res.nfi_regs + NFI_STA) & STA_EMP_PAGE;
+
+ if (empty) {
+ pr_info("empty page!\n");
+ return true;
+ }
+
+ return false;
+}
+
+static int rw_prepare(struct nfi_base *nb, int sectors, u8 *data,
+ u8 *fdm, bool read)
+{
+ void *regs = nb->res.nfi_regs;
+ u32 len = nb->nfi.sector_size * sectors;
+ bool irq_en = nb->dma_en && nb->nfi_irq_en;
+ void *dma_addr;
+ u32 val;
+ int ret;
+
+ nb->rw_sectors = sectors;
+
+ if (irq_en) {
+ nandx_event_init(nb->done);
+ writel(INTR_AHB_DONE_EN, regs + NFI_INTR_EN);
+ }
+
+ val = readw(regs + NFI_CNFG);
+ if (read)
+ val |= CNFG_READ_EN;
+ else
+ val &= ~CNFG_READ_EN;
+
+ /* as design, now, auto format enabled when ecc enabled */
+ if (nb->ecc_en) {
+ val |= CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
+
+ if (read)
+ ret = enable_ecc_decode(nb, sectors);
+ else
+ ret = enable_ecc_encode(nb);
+
+ if (ret) {
+ pr_info("%s: ecc enable %s fail!\n", __func__,
+ read ? "decode" : "encode");
+ return ret;
+ }
+ } else {
+ val &= ~(CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN);
+ }
+
+ if (!read && nb->bad_mark_swap_en)
+ nb->bad_mark_ctrl.bad_mark_swap(&nb->nfi, data, fdm);
+
+ if (!nb->ecc_en && read)
+ len += sectors * nb->nfi.sector_spare_size;
+
+ if (nb->dma_en) {
+ val |= CNFG_DMA_BURST_EN | CNFG_AHB;
+
+ if (read) {
+ dma_addr = (void *)(unsigned long)nandx_dma_map(
+ nb->res.dev, nb->buf,
+ (u64)len, NDMA_FROM_DEV);
+ } else {
+ memcpy(nb->buf, data, len);
+ dma_addr = (void *)(unsigned long)nandx_dma_map(
+ nb->res.dev, nb->buf,
+ (u64)len, NDMA_TO_DEV);
+ }
+
+ writel((unsigned long)dma_addr, (void *)regs + NFI_STRADDR);
+
+ nb->access_len = len;
+ nb->dma_addr = dma_addr;
+ }
+
+ if (nb->ecc_en && !read && fdm)
+ write_fdm(nb, fdm);
+
+ writew(val, regs + NFI_CNFG);
+ /* setup R/W sector number */
+ writel(sectors << CON_SEC_SHIFT, regs + NFI_CON);
+
+ return 0;
+}
+
+static void rw_trigger(struct nfi_base *nb, bool read)
+{
+ void *regs = nb->res.nfi_regs;
+ u32 val;
+
+ val = read ? CON_BRD : CON_BWR;
+ val |= readl(regs + NFI_CON);
+ writel(val, regs + NFI_CON);
+
+ writel(STAR_EN, regs + NFI_STRDATA);
+}
+
+static int rw_wait_done(struct nfi_base *nb, int sectors, bool read)
+{
+ void *regs = nb->res.nfi_regs;
+ bool irq_en = nb->dma_en && nb->nfi_irq_en;
+ int ret;
+ u32 val;
+
+ if (irq_en) {
+ ret = nandx_event_wait_complete(nb->done, NFI_TIMEOUT);
+ if (!ret) {
+ writew(0, regs + NFI_INTR_EN);
+ return ret;
+ }
+ }
+
+ if (read) {
+ ret = readl_poll_timeout_atomic(regs + NFI_BYTELEN, val,
+ ADDRCNTR_SEC(val) >=
+ (u32)sectors,
+ 2, NFI_TIMEOUT);
+ /* HW issue: if not wait ahb done, need polling bus busy */
+ if (!ret && !irq_en)
+ ret = readl_poll_timeout_atomic(regs + NFI_MASTER_STA,
+ val,
+ !(val &
+ MASTER_BUS_BUSY),
+ 2, NFI_TIMEOUT);
+ } else {
+ ret = readl_poll_timeout_atomic(regs + NFI_ADDRCNTR, val,
+ ADDRCNTR_SEC(val) >=
+ (u32)sectors,
+ 2, NFI_TIMEOUT);
+ }
+
+ if (ret) {
+ pr_info("do page %s timeout\n", read ? "read" : "write");
+ return ret;
+ }
+
+ if (read && nb->ecc_en) {
+ ret = nb->ecc->wait_done(nb->ecc);
+ if (ret)
+ return ret;
+
+ return nb->ecc->decode_status(nb->ecc, 0, sectors);
+ }
+
+ return 0;
+}
+
+static int rw_data(struct nfi_base *nb, u8 *data, u8 *fdm, int sectors,
+ bool read)
+{
+ if (read && nb->dma_en && nb->ecc_en && fdm)
+ read_fdm(nb, fdm, 0, sectors);
+
+ if (!nb->dma_en) {
+ if (read)
+ return pio_rx_data(nb, data, fdm, sectors);
+
+ return pio_tx_data(nb, data, fdm, sectors);
+ }
+
+ return 0;
+}
+
+static void rw_complete(struct nfi_base *nb, u8 *data, u8 *fdm,
+ bool read)
+{
+ int data_len = 0;
+ bool is_empty;
+
+ if (nb->dma_en) {
+ if (read) {
+ nandx_dma_unmap(nb->res.dev, nb->buf, nb->dma_addr,
+ (u64)nb->access_len, NDMA_FROM_DEV);
+
+ if (data) {
+ data_len = nb->rw_sectors * nb->nfi.sector_size;
+ memcpy(data, nb->buf, data_len);
+ }
+
+ if (fdm)
+ memcpy(fdm, nb->buf + data_len,
+ nb->access_len - data_len);
+
+ if (nb->read_status == -ENANDREAD) {
+ is_empty = nb->is_page_empty(nb, data, fdm,
+ nb->rw_sectors);
+ if (is_empty)
+ nb->read_status = 0;
+ }
+ } else {
+ nandx_dma_unmap(nb->res.dev, nb->buf, nb->dma_addr,
+ (u64)nb->access_len, NDMA_TO_DEV);
+ }
+ }
+
+ /* whether it's reading or writing, we all check if nee swap
+ * for write, we need to restore data
+ */
+ if (nb->bad_mark_swap_en)
+ nb->bad_mark_ctrl.bad_mark_swap(&nb->nfi, data, fdm);
+
+ if (nb->ecc_en)
+ nb->ecc->disable(nb->ecc);
+
+ writel(0, nb->res.nfi_regs + NFI_CNFG);
+ writel(0, nb->res.nfi_regs + NFI_CON);
+}
+
+static int nfi_read_sectors(struct nfi *nfi, u8 *data, u8 *fdm,
+ int sectors)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ int bitflips = 0, ret;
+
+ pr_debug("%s: read page#%d\n", __func__, nb->row);
+ pr_debug("%s: data address 0x%x, fdm address 0x%x, sectors 0x%x\n",
+ __func__, (u32)((unsigned long)data),
+ (u32)((unsigned long)fdm), sectors);
+
+ nb->read_status = 0;
+
+ ret = nb->rw_prepare(nb, sectors, data, fdm, true);
+ if (ret)
+ return ret;
+
+ nb->rw_trigger(nb, true);
+
+ if (nb->dma_en) {
+ ret = nb->rw_wait_done(nb, sectors, true);
+ if (ret > 0)
+ bitflips = ret;
+ else if (ret == -ENANDREAD)
+ nb->read_status = -ENANDREAD;
+ else if (ret < 0)
+ goto complete;
+
+ }
+
+ ret = nb->rw_data(nb, data, fdm, sectors, true);
+ if (ret > 0)
+ ret = max_t(int, ret, bitflips);
+
+complete:
+ nb->rw_complete(nb, data, fdm, true);
+
+ if (nb->read_status == -ENANDREAD)
+ return -ENANDREAD;
+
+ return ret;
+}
+
+int nfi_write_page(struct nfi *nfi, u8 *data, u8 *fdm)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ u32 sectors = div_down(nb->format.page_size, nfi->sector_size);
+ int ret;
+
+ pr_debug("%s: data address 0x%x, fdm address 0x%x\n",
+ __func__, (int)((unsigned long)data),
+ (int)((unsigned long)fdm));
+
+ ret = nb->rw_prepare(nb, sectors, data, fdm, false);
+ if (ret)
+ return ret;
+
+ nb->rw_trigger(nb, false);
+
+ ret = nb->rw_data(nb, data, fdm, sectors, false);
+ if (ret)
+ return ret;
+
+ ret = nb->rw_wait_done(nb, sectors, false);
+
+ nb->rw_complete(nb, data, fdm, false);
+
+ return ret;
+}
+
+static int nfi_rw_bytes(struct nfi *nfi, u8 *data, int count, bool read)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ void *regs = nb->res.nfi_regs;
+ int i, ret;
+ u32 val;
+
+ for (i = 0; i < count; i++) {
+ val = readl(regs + NFI_STA) & NFI_FSM_MASK;
+ if (val != NFI_FSM_CUSTDATA) {
+ val = readw(regs + NFI_CNFG) | CNFG_BYTE_RW;
+ if (read)
+ val |= CNFG_READ_EN;
+ writew(val, regs + NFI_CNFG);
+
+ val = div_up(count, nfi->sector_size);
+ val = (val << CON_SEC_SHIFT) | CON_BRD | CON_BWR;
+ writel(val, regs + NFI_CON);
+
+ writew(STAR_EN, regs + NFI_STRDATA);
+ }
+
+ ret = wait_io_ready(regs);
+ if (ret)
+ return ret;
+
+ if (read)
+ data[i] = readb(regs + NFI_DATAR);
+ else
+ writeb(data[i], regs + NFI_DATAW);
+ }
+
+ writel(0, nb->res.nfi_regs + NFI_CNFG);
+
+ return 0;
+}
+
+static int nfi_read_bytes(struct nfi *nfi, u8 *data, int count)
+{
+ return nfi_rw_bytes(nfi, data, count, true);
+}
+
+static int nfi_write_bytes(struct nfi *nfi, u8 *data, int count)
+{
+ return nfi_rw_bytes(nfi, data, count, false);
+}
+
+/* As register map says, only when flash macro is idle,
+ * sw reset or nand interface change can be issued
+ */
+static inline int wait_flash_macro_idle(void *regs)
+{
+ u32 val;
+
+ return readl_poll_timeout_atomic(regs + NFI_STA, val,
+ val & FLASH_MACRO_IDLE, 2,
+ NFI_TIMEOUT);
+}
+
+#define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \
+ ((tpoecs) << 28 | (tprecs) << 22 | (tc2r) << 16 | \
+ (tw2r) << 12 | (twh) << 8 | (twst) << 4 | (trlt))
+
+static int nfi_set_sdr_timing(struct nfi *nfi, void *timing, u8 type)
+{
+ struct nand_sdr_timing *sdr = (struct nand_sdr_timing *) timing;
+ struct nfi_base *nb = nfi_to_base(nfi);
+ void *regs = nb->res.nfi_regs;
+ u32 tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt, tstrobe;
+ u32 rate, val;
+ int ret;
+
+ ret = wait_flash_macro_idle(regs);
+ if (ret)
+ return ret;
+
+ /* turn clock rate into KHZ */
+ rate = nb->res.clock_1x / 1000;
+
+ tpoecs = max_t(u16, sdr->tALH, sdr->tCLH);
+ tpoecs = div_up(tpoecs * rate, 1000000);
+ tpoecs &= 0xf;
+
+ tprecs = max_t(u16, sdr->tCLS, sdr->tALS);
+ tprecs = div_up(tprecs * rate, 1000000);
+ tprecs &= 0x3f;
+
+ /* tc2r is in unit of 2T */
+ tc2r = div_up(sdr->tCR * rate, 1000000);
+ tc2r = div_down(tc2r, 2);
+ tc2r &= 0x3f;
+
+ tw2r = div_up(sdr->tWHR * rate, 1000000);
+ tw2r = div_down(tw2r, 2);
+ tw2r &= 0xf;
+
+ twh = max_t(u16, sdr->tREH, sdr->tWH);
+ twh = div_up(twh * rate, 1000000) - 1;
+ twh &= 0xf;
+
+ twst = div_up(sdr->tWP * rate, 1000000) - 1;
+ twst &= 0xf;
+
+ trlt = div_up(sdr->tRP * rate, 1000000) - 1;
+ trlt &= 0xf;
+
+ /* If tREA is bigger than tRP, setup strobe sel here */
+ if ((trlt + 1) * 1000000 / rate < sdr->tREA) {
+ tstrobe = sdr->tREA - (trlt + 1) * 1000000 / rate;
+ tstrobe = div_up(tstrobe * rate, 1000000);
+ val = readl(regs + NFI_DEBUG_CON1);
+ val &= ~STROBE_MASK;
+ val |= tstrobe << STROBE_SHIFT;
+ writel(val, regs + NFI_DEBUG_CON1);
+ }
+
+ /*
+ * ACCON: access timing control register
+ * -------------------------------------
+ * 31:28: tpoecs, minimum required time for CS post pulling down after
+ * accessing the device
+ * 27:22: tprecs, minimum required time for CS pre pulling down before
+ * accessing the device
+ * 21:16: tc2r, minimum required time from NCEB low to NREB low
+ * 15:12: tw2r, minimum required time from NWEB high to NREB low.
+ * 11:08: twh, write enable hold time
+ * 07:04: twst, write wait states
+ * 03:00: trlt, read wait states
+ */
+ val = ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt);
+ pr_info("acctiming: 0x%x\n", val);
+ writel(val, regs + NFI_ACCCON);
+
+ /* set NAND type */
+ writel(NAND_TYPE_ASYNC, regs + NFI_NAND_TYPE_CNFG);
+
+ return ret;
+}
+
+static int nfi_set_timing(struct nfi *nfi, void *timing, int type)
+{
+ switch (type) {
+ case NAND_TIMING_SDR:
+ return nfi_set_sdr_timing(nfi, timing, type);
+
+ /* NOTE: for mlc/tlc */
+ case NAND_TIMING_SYNC_DDR:
+ case NAND_TIMING_TOGGLE_DDR:
+ case NAND_TIMING_NVDDR2:
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void set_nfi_funcs(struct nfi *nfi)
+{
+ nfi->select_chip = nfi_select_chip;
+ nfi->set_format = nfi_set_format;
+ nfi->nfi_ctrl = nfi_ctrl;
+ nfi->set_timing = nfi_set_timing;
+
+ nfi->reset = nfi_reset;
+ nfi->send_cmd = nfi_send_cmd;
+ nfi->send_addr = nfi_send_addr;
+ nfi->trigger = nfi_trigger;
+
+ nfi->write_page = nfi_write_page;
+ nfi->write_bytes = nfi_write_bytes;
+ nfi->read_sectors = nfi_read_sectors;
+ nfi->read_bytes = nfi_read_bytes;
+
+ nfi->wait_ready = nfi_wait_ready;
+
+ nfi->enable_randomizer = nfi_enable_randomizer;
+ nfi->disable_randomizer = nfi_disable_randomizer;
+}
+
+static struct nfi_caps nfi_caps_mt7622 = {
+ .max_fdm_size = 8,
+ .fdm_ecc_size = 1,
+ .ecc_parity_bits = 13,
+ .spare_size = spare_size_mt7622,
+ .spare_size_num = 4,
+};
+
+static struct nfi_caps *nfi_get_match_data(enum mtk_ic_version ic)
+{
+ /* NOTE: add other IC's data */
+ return &nfi_caps_mt7622;
+}
+
+static void set_nfi_base_params(struct nfi_base *nb)
+{
+ nb->ecc_en = false;
+ nb->dma_en = false;
+ nb->nfi_irq_en = false;
+ nb->ecc_irq_en = false;
+ nb->page_irq_en = false;
+ nb->ecc_clk_en = false;
+ nb->randomize_en = false;
+ nb->custom_sector_en = false;
+ nb->bad_mark_swap_en = false;
+
+ nb->op_mode = CNFG_CUSTOM_MODE;
+ nb->ecc_deccon = ECC_DEC_CORRECT;
+ nb->ecc_mode = ECC_NFI_MODE;
+
+ nb->done = nandx_event_create();
+ nb->caps = nfi_get_match_data(nb->res.ic_ver);
+
+ nb->set_op_mode = set_op_mode;
+ nb->is_page_empty = is_page_empty;
+
+ nb->rw_prepare = rw_prepare;
+ nb->rw_trigger = rw_trigger;
+ nb->rw_wait_done = rw_wait_done;
+ nb->rw_data = rw_data;
+ nb->rw_complete = rw_complete;
+}
+
+struct nfi *__weak nfi_extend_init(struct nfi_base *nb)
+{
+ return &nb->nfi;
+}
+
+void __weak nfi_extend_exit(struct nfi_base *nb)
+{
+ mem_free(nb);
+}
+
+struct nfi *nfi_init(struct nfi_resource *res)
+{
+ struct nfiecc_resource ecc_res;
+ struct nfi_base *nb;
+ struct nfiecc *ecc;
+ struct nfi *nfi;
+ int ret;
+
+ nb = mem_alloc(1, sizeof(struct nfi_base));
+ if (!nb) {
+ pr_info("nfi alloc memory fail @%s.\n", __func__);
+ return NULL;
+ }
+
+ nb->res = *res;
+
+ ret = nandx_irq_register(res->dev, res->nfi_irq_id, nfi_irq_handler,
+ "mtk_nand", nb);
+ if (ret) {
+ pr_info("nfi irq register failed!\n");
+ goto error;
+ }
+
+ /* fill ecc paras and init ecc */
+ ecc_res.ic_ver = nb->res.ic_ver;
+ ecc_res.dev = nb->res.dev;
+ ecc_res.irq_id = nb->res.ecc_irq_id;
+ ecc_res.regs = nb->res.ecc_regs;
+ ecc = nfiecc_init(&ecc_res);
+ if (!ecc) {
+ pr_info("nfiecc init fail.\n");
+ return NULL;
+ }
+
+ nb->ecc = ecc;
+
+ set_nfi_base_params(nb);
+ set_nfi_funcs(&nb->nfi);
+
+ /* Assign a temp sector size for reading ID & para page.
+ * We may assign new value later.
+ */
+ nb->nfi.sector_size = 512;
+
+ /* give a default timing, and as discuss
+ * this is the only thing what we need do for nfi init
+ * if need do more, then we can add a function
+ */
+ writel(0x30C77FFF, nb->res.nfi_regs + NFI_ACCCON);
+
+ nfi = nfi_extend_init(nb);
+ if (nfi)
+ return nfi;
+
+error:
+ mem_free(nb);
+ return NULL;
+}
+
+void nfi_exit(struct nfi *nfi)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+
+ nandx_event_destroy(nb->done);
+ nfiecc_exit(nb->ecc);
+#if !NANDX_BULK_IO_USE_DRAM
+ mem_free(nb->buf);
+#endif
+ nfi_extend_exit(nb);
+}
+
diff --git a/drivers/mtd/nandx/core/nfi/nfi_base.h b/drivers/mtd/nandx/core/nfi/nfi_base.h
new file mode 100644
index 0000000000..ae894eaa31
--- /dev/null
+++ b/drivers/mtd/nandx/core/nfi/nfi_base.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NFI_BASE_H__
+#define __NFI_BASE_H__
+
+#define NFI_TIMEOUT 1000000
+
+enum randomizer_op {
+ RAND_ENCODE,
+ RAND_DECODE
+};
+
+struct bad_mark_ctrl {
+ void (*bad_mark_swap)(struct nfi *nfi, u8 *buf, u8 *fdm);
+ u8 *(*fdm_shift)(struct nfi *nfi, u8 *fdm, int sector);
+ u32 sector;
+ u32 position;
+};
+
+struct nfi_caps {
+ u8 max_fdm_size;
+ u8 fdm_ecc_size;
+ u8 ecc_parity_bits;
+ const int *spare_size;
+ u32 spare_size_num;
+};
+
+struct nfi_base {
+ struct nfi nfi;
+ struct nfi_resource res;
+ struct nfiecc *ecc;
+ struct nfi_format format;
+ struct nfi_caps *caps;
+ struct bad_mark_ctrl bad_mark_ctrl;
+
+ /* page_size + spare_size */
+ u8 *buf;
+
+ /* used for spi nand */
+ u8 cmd_mode;
+ u32 op_mode;
+
+ int page_sectors;
+
+ void *done;
+
+ /* for read/write */
+ int col;
+ int row;
+ int access_len;
+ int rw_sectors;
+ void *dma_addr;
+ int read_status;
+
+ bool dma_en;
+ bool nfi_irq_en;
+ bool page_irq_en;
+ bool auto_format;
+ bool ecc_en;
+ bool ecc_irq_en;
+ bool ecc_clk_en;
+ bool randomize_en;
+ bool custom_sector_en;
+ bool bad_mark_swap_en;
+
+ enum nfiecc_deccon ecc_deccon;
+ enum nfiecc_mode ecc_mode;
+
+ void (*set_op_mode)(void *regs, u32 mode);
+ bool (*is_page_empty)(struct nfi_base *nb, u8 *data, u8 *fdm,
+ int sectors);
+
+ int (*rw_prepare)(struct nfi_base *nb, int sectors, u8 *data, u8 *fdm,
+ bool read);
+ void (*rw_trigger)(struct nfi_base *nb, bool read);
+ int (*rw_wait_done)(struct nfi_base *nb, int sectors, bool read);
+ int (*rw_data)(struct nfi_base *nb, u8 *data, u8 *fdm, int sectors,
+ bool read);
+ void (*rw_complete)(struct nfi_base *nb, u8 *data, u8 *fdm, bool read);
+};
+
+static inline struct nfi_base *nfi_to_base(struct nfi *nfi)
+{
+ return container_of(nfi, struct nfi_base, nfi);
+}
+
+struct nfi *nfi_extend_init(struct nfi_base *nb);
+void nfi_extend_exit(struct nfi_base *nb);
+
+#endif /* __NFI_BASE_H__ */
diff --git a/drivers/mtd/nandx/core/nfi/nfi_regs.h b/drivers/mtd/nandx/core/nfi/nfi_regs.h
new file mode 100644
index 0000000000..ba4868acc8
--- /dev/null
+++ b/drivers/mtd/nandx/core/nfi/nfi_regs.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NFI_REGS_H__
+#define __NFI_REGS_H__
+
+#define NFI_CNFG 0x000
+#define CNFG_AHB BIT(0)
+#define CNFG_READ_EN BIT(1)
+#define CNFG_DMA_BURST_EN BIT(2)
+#define CNFG_RESEED_SEC_EN BIT(4)
+#define CNFG_RAND_SEL BIT(5)
+#define CNFG_BYTE_RW BIT(6)
+#define CNFG_HW_ECC_EN BIT(8)
+#define CNFG_AUTO_FMT_EN BIT(9)
+#define CNFG_RAND_MASK GENMASK(5, 4)
+#define CNFG_OP_MODE_MASK GENMASK(14, 12)
+#define CNFG_IDLE_MOD 0
+#define CNFG_READ_MODE (1 << 12)
+#define CNFG_SINGLE_READ_MODE (2 << 12)
+#define CNFG_PROGRAM_MODE (3 << 12)
+#define CNFG_ERASE_MODE (4 << 12)
+#define CNFG_RESET_MODE (5 << 12)
+#define CNFG_CUSTOM_MODE (6 << 12)
+#define NFI_PAGEFMT 0x004
+#define PAGEFMT_SPARE_SHIFT 4
+#define PAGEFMT_FDM_ECC_SHIFT 12
+#define PAGEFMT_FDM_SHIFT 8
+#define PAGEFMT_SEC_SEL_512 BIT(2)
+#define PAGEFMT_512_2K 0
+#define PAGEFMT_2K_4K 1
+#define PAGEFMT_4K_8K 2
+#define PAGEFMT_8K_16K 3
+#define NFI_CON 0x008
+#define CON_FIFO_FLUSH BIT(0)
+#define CON_NFI_RST BIT(1)
+#define CON_BRD BIT(8)
+#define CON_BWR BIT(9)
+#define CON_SEC_SHIFT 12
+#define NFI_ACCCON 0x00c
+#define NFI_INTR_EN 0x010
+#define INTR_BUSY_RETURN_EN BIT(4)
+#define INTR_AHB_DONE_EN BIT(6)
+#define NFI_INTR_STA 0x014
+#define NFI_CMD 0x020
+#define NFI_ADDRNOB 0x030
+#define ROW_SHIFT 4
+#define NFI_COLADDR 0x034
+#define NFI_ROWADDR 0x038
+#define NFI_STRDATA 0x040
+#define STAR_EN 1
+#define STAR_DE 0
+#define NFI_CNRNB 0x044
+#define NFI_DATAW 0x050
+#define NFI_DATAR 0x054
+#define NFI_PIO_DIRDY 0x058
+#define PIO_DI_RDY 1
+#define NFI_STA 0x060
+#define STA_CMD BIT(0)
+#define STA_ADDR BIT(1)
+#define FLASH_MACRO_IDLE BIT(5)
+#define STA_BUSY BIT(8)
+#define STA_BUSY2READY BIT(9)
+#define STA_EMP_PAGE BIT(12)
+#define NFI_FSM_CUSTDATA (0xe << 16)
+#define NFI_FSM_MASK GENMASK(19, 16)
+#define NAND_FSM_MASK GENMASK(29, 23)
+#define NFI_ADDRCNTR 0x070
+#define CNTR_VALID_MASK GENMASK(16, 0)
+#define CNTR_MASK GENMASK(15, 12)
+#define ADDRCNTR_SEC_SHIFT 12
+#define ADDRCNTR_SEC(val) \
+ (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
+#define NFI_STRADDR 0x080
+#define NFI_BYTELEN 0x084
+#define NFI_CSEL 0x090
+#define NFI_FDML(x) (0x0a0 + (x) * 8)
+#define NFI_FDMM(x) (0x0a4 + (x) * 8)
+#define NFI_DEBUG_CON1 0x220
+#define STROBE_MASK GENMASK(4, 3)
+#define STROBE_SHIFT 3
+#define ECC_CLK_EN BIT(11)
+#define AUTOC_SRAM_MODE BIT(12)
+#define BYPASS_MASTER_EN BIT(15)
+#define NFI_MASTER_STA 0x224
+#define MASTER_BUS_BUSY 0x3
+#define NFI_SECCUS_SIZE 0x22c
+#define SECCUS_SIZE_EN BIT(17)
+#define NFI_RANDOM_CNFG 0x238
+#define RAN_ENCODE_EN BIT(0)
+#define ENCODE_SEED_SHIFT 1
+#define RAN_DECODE_EN BIT(16)
+#define DECODE_SEED_SHIFT 17
+#define RAN_SEED_MASK 0x7fff
+#define NFI_EMPTY_THRESH 0x23c
+#define NFI_NAND_TYPE_CNFG 0x240
+#define NAND_TYPE_ASYNC 0
+#define NAND_TYPE_TOGGLE 1
+#define NAND_TYPE_SYNC 2
+#define NFI_ACCCON1 0x244
+#define NFI_DELAY_CTRL 0x248
+#define NFI_TLC_RD_WHR2 0x300
+#define TLC_RD_WHR2_EN BIT(12)
+#define TLC_RD_WHR2_MASK GENMASK(11, 0)
+#define SNF_SNF_CNFG 0x55c
+#define SPI_MODE_EN 1
+#define SPI_MODE_DIS 0
+
+#endif /* __NFI_REGS_H__ */
+
diff --git a/drivers/mtd/nandx/core/nfi/nfi_spi.c b/drivers/mtd/nandx/core/nfi/nfi_spi.c
new file mode 100644
index 0000000000..67cd0aaad9
--- /dev/null
+++ b/drivers/mtd/nandx/core/nfi/nfi_spi.c
@@ -0,0 +1,689 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "../nfi.h"
+#include "nfiecc.h"
+#include "nfi_regs.h"
+#include "nfi_base.h"
+#include "nfi_spi_regs.h"
+#include "nfi_spi.h"
+
+#define NFI_CMD_DUMMY_RD 0x00
+#define NFI_CMD_DUMMY_WR 0x80
+
+static struct nfi_spi_delay spi_delay[SPI_NAND_MAX_DELAY] = {
+ /*
+ * tCLK_SAM_DLY, tCLK_OUT_DLY, tCS_DLY, tWR_EN_DLY,
+ * tIO_IN_DLY[4], tIO_OUT_DLY[4], tREAD_LATCH_LATENCY
+ */
+ {0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 0},
+ {21, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 0},
+ {63, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 0},
+ {0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 1},
+ {21, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 1},
+ {63, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 1}
+};
+
+static inline struct nfi_spi *base_to_snfi(struct nfi_base *nb)
+{
+ return container_of(nb, struct nfi_spi, base);
+}
+
+static void snfi_mac_enable(struct nfi_base *nb)
+{
+ void *regs = nb->res.nfi_regs;
+ u32 val;
+
+ val = readl(regs + SNF_MAC_CTL);
+ val &= ~MAC_XIO_SEL;
+ val |= SF_MAC_EN;
+
+ writel(val, regs + SNF_MAC_CTL);
+}
+
+static void snfi_mac_disable(struct nfi_base *nb)
+{
+ void *regs = nb->res.nfi_regs;
+ u32 val;
+
+ val = readl(regs + SNF_MAC_CTL);
+ val &= ~(SF_TRIG | SF_MAC_EN);
+ writel(val, regs + SNF_MAC_CTL);
+}
+
+static int snfi_mac_trigger(struct nfi_base *nb)
+{
+ void *regs = nb->res.nfi_regs;
+ int ret;
+ u32 val;
+
+ val = readl(regs + SNF_MAC_CTL);
+ val |= SF_TRIG;
+ writel(val, regs + SNF_MAC_CTL);
+
+ ret = readl_poll_timeout_atomic(regs + SNF_MAC_CTL, val,
+ val & WIP_READY, 10,
+ NFI_TIMEOUT);
+ if (ret) {
+ pr_info("polling wip ready for read timeout\n");
+ return ret;
+ }
+
+ return readl_poll_timeout_atomic(regs + SNF_MAC_CTL, val,
+ !(val & WIP), 10,
+ NFI_TIMEOUT);
+}
+
+static int snfi_mac_op(struct nfi_base *nb)
+{
+ int ret;
+
+ snfi_mac_enable(nb);
+ ret = snfi_mac_trigger(nb);
+ snfi_mac_disable(nb);
+
+ return ret;
+}
+
+static void snfi_write_mac(struct nfi_spi *nfi_spi, u8 *data, int count)
+{
+ struct nandx_split32 split = {0};
+ u32 reg_offset = round_down(nfi_spi->tx_count, 4);
+ void *regs = nfi_spi->base.res.nfi_regs;
+ u32 data_offset = 0, i, val;
+ u8 *p_val = (u8 *)(&val);
+
+ nandx_split(&split, nfi_spi->tx_count, count, val, 4);
+
+ if (split.head_len) {
+ val = readl(regs + SPI_GPRAM_ADDR + reg_offset);
+
+ for (i = 0; i < split.head_len; i++)
+ p_val[split.head + i] = data[i];
+
+ writel(val, regs + SPI_GPRAM_ADDR + reg_offset);
+ }
+
+ if (split.body_len) {
+ reg_offset = split.body;
+ data_offset = split.head_len;
+
+ for (i = 0; i < split.body_len; i++) {
+ p_val[i & 3] = data[data_offset + i];
+
+ if ((i & 3) == 3) {
+ writel(val, regs + SPI_GPRAM_ADDR + reg_offset);
+ reg_offset += 4;
+ }
+ }
+ }
+
+ if (split.tail_len) {
+ reg_offset = split.tail;
+ data_offset += split.body_len;
+
+ for (i = 0; i < split.tail_len; i++) {
+ p_val[i] = data[data_offset + i];
+
+ if (i == split.tail_len - 1)
+ writel(val, regs + SPI_GPRAM_ADDR + reg_offset);
+ }
+ }
+}
+
+static void snfi_read_mac(struct nfi_spi *nfi_spi, u8 *data, int count)
+{
+ void *regs = nfi_spi->base.res.nfi_regs;
+ u32 reg_offset = round_down(nfi_spi->tx_count, 4);
+ struct nandx_split32 split = {0};
+ u32 data_offset = 0, i, val;
+ u8 *p_val = (u8 *)&val;
+
+ nandx_split(&split, nfi_spi->tx_count, count, val, 4);
+
+ if (split.head_len) {
+ val = readl(regs + SPI_GPRAM_ADDR + reg_offset);
+
+ for (i = 0; i < split.head_len; i++)
+ data[data_offset + i] = p_val[split.head + i];
+ }
+
+ if (split.body_len) {
+ reg_offset = split.body;
+ data_offset = split.head_len;
+
+ for (i = 0; i < split.body_len; i++) {
+ if ((i & 3) == 0) {
+ val = readl(regs + SPI_GPRAM_ADDR + reg_offset);
+ reg_offset += 4;
+ }
+
+ data[data_offset + i] = p_val[i % 4];
+ }
+ }
+
+ if (split.tail_len) {
+ reg_offset = split.tail;
+ data_offset += split.body_len;
+ val = readl(regs + SPI_GPRAM_ADDR + reg_offset);
+
+ for (i = 0; i < split.tail_len; i++)
+ data[data_offset + i] = p_val[i];
+ }
+}
+
+static int snfi_send_command(struct nfi *nfi, short cmd)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
+
+ if (cmd == -1)
+ return 0;
+
+ if (nfi_spi->snfi_mode == SNFI_MAC_MODE) {
+ snfi_write_mac(nfi_spi, (u8 *)&cmd, 1);
+ nfi_spi->tx_count++;
+ return 0;
+ }
+
+ nfi_spi->cmd[nfi_spi->cur_cmd_idx++] = cmd;
+ return 0;
+}
+
+static int snfi_send_address(struct nfi *nfi, int col, int row,
+ int col_cycle,
+ int row_cycle)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
+ u32 addr, cycle, temp;
+
+ nb->col = col;
+ nb->row = row;
+
+ if (nfi_spi->snfi_mode == SNFI_MAC_MODE) {
+ addr = row;
+ cycle = row_cycle;
+
+ if (!row_cycle) {
+ addr = col;
+ cycle = col_cycle;
+ }
+
+ temp = nandx_cpu_to_be32(addr) >> ((4 - cycle) << 3);
+ snfi_write_mac(nfi_spi, (u8 *)&temp, cycle);
+ nfi_spi->tx_count += cycle;
+ } else {
+ nfi_spi->row_addr[nfi_spi->cur_addr_idx++] = row;
+ nfi_spi->col_addr[nfi_spi->cur_addr_idx++] = col;
+ }
+
+ return 0;
+}
+
+static int snfi_trigger(struct nfi *nfi)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
+ void *regs = nb->res.nfi_regs;
+
+ writel(nfi_spi->tx_count, regs + SNF_MAC_OUTL);
+ writel(0, regs + SNF_MAC_INL);
+
+ nfi_spi->tx_count = 0;
+ nfi_spi->cur_cmd_idx = 0;
+ nfi_spi->cur_addr_idx = 0;
+
+ return snfi_mac_op(nb);
+}
+
+static int snfi_select_chip(struct nfi *nfi, int cs)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ void *regs = nb->res.nfi_regs;
+ u32 val;
+
+ val = readl(regs + SNF_MISC_CTL);
+
+ if (cs == 0) {
+ val &= ~SF2CS_SEL;
+ val &= ~SF2CS_EN;
+ } else if (cs == 1) {
+ val |= SF2CS_SEL;
+ val |= SF2CS_EN;
+ } else {
+ return -EIO;
+ }
+
+ writel(val, regs + SNF_MISC_CTL);
+
+ return 0;
+}
+
+static int snfi_set_delay(struct nfi_base *nb, u8 delay_mode)
+{
+ void *regs = nb->res.nfi_regs;
+ struct nfi_spi_delay *delay;
+ u32 val;
+
+ if (delay_mode < 0 || delay_mode > SPI_NAND_MAX_DELAY)
+ return -EINVAL;
+
+ delay = &spi_delay[delay_mode];
+
+ val = delay->tIO_OUT_DLY[0] | delay->tIO_OUT_DLY[1] << 8 |
+ delay->tIO_OUT_DLY[2] << 16 |
+ delay->tIO_OUT_DLY[3] << 24;
+ writel(val, regs + SNF_DLY_CTL1);
+
+ val = delay->tIO_IN_DLY[0] | (delay->tIO_IN_DLY[1] << 8) |
+ delay->tIO_IN_DLY[2] << 16 |
+ delay->tIO_IN_DLY[3] << 24;
+ writel(val, regs + SNF_DLY_CTL2);
+
+ val = delay->tCLK_SAM_DLY | delay->tCLK_OUT_DLY << 8 |
+ delay->tCS_DLY << 16 |
+ delay->tWR_EN_DLY << 24;
+ writel(val, regs + SNF_DLY_CTL3);
+
+ writel(delay->tCS_DLY, regs + SNF_DLY_CTL4);
+
+ val = readl(regs + SNF_MISC_CTL);
+ val |= (delay->tREAD_LATCH_LATENCY) <<
+ LATCH_LAT_SHIFT;
+ writel(val, regs + SNF_MISC_CTL);
+
+ return 0;
+}
+
+static int snfi_set_timing(struct nfi *nfi, void *timing, int type)
+{
+ /* Nothing need to do. */
+ return 0;
+}
+
+static int snfi_wait_ready(struct nfi *nfi, int type, u32 timeout)
+{
+ /* Nothing need to do. */
+ return 0;
+}
+
+static int snfi_ctrl(struct nfi *nfi, int cmd, void *args)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
+ int ret = 0;
+
+ if (!args)
+ return -EINVAL;
+
+ switch (cmd) {
+ case NFI_CTRL_DMA:
+ nb->dma_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_NFI_IRQ:
+ nb->nfi_irq_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_ECC_IRQ:
+ nb->ecc_irq_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_PAGE_IRQ:
+ nb->page_irq_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_ECC:
+ nb->ecc_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_BAD_MARK_SWAP:
+ nb->bad_mark_swap_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_ECC_CLOCK:
+ nb->ecc_clk_en = *(bool *)args;
+ break;
+
+ case SNFI_CTRL_OP_MODE:
+ nfi_spi->snfi_mode = *(u8 *)args;
+ break;
+
+ case SNFI_CTRL_RX_MODE:
+ nfi_spi->read_cache_mode = *(u8 *)args;
+ break;
+
+ case SNFI_CTRL_TX_MODE:
+ nfi_spi->write_cache_mode = *(u8 *)args;
+ break;
+
+ case SNFI_CTRL_DELAY_MODE:
+ ret = snfi_set_delay(nb, *(u8 *)args);
+ break;
+
+ default:
+ pr_info("operation not support.\n");
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int snfi_read_bytes(struct nfi *nfi, u8 *data, int count)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
+ void *regs = nb->res.nfi_regs;
+ int ret;
+
+ writel(nfi_spi->tx_count, regs + SNF_MAC_OUTL);
+ writel(count, regs + SNF_MAC_INL);
+
+ ret = snfi_mac_op(nb);
+ if (ret)
+ return ret;
+
+ snfi_read_mac(nfi_spi, data, count);
+
+ nfi_spi->tx_count = 0;
+
+ return 0;
+}
+
+static int snfi_write_bytes(struct nfi *nfi, u8 *data, int count)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
+ void *regs = nb->res.nfi_regs;
+
+ snfi_write_mac(nfi_spi, data, count);
+ nfi_spi->tx_count += count;
+
+ writel(0, regs + SNF_MAC_INL);
+ writel(nfi_spi->tx_count, regs + SNF_MAC_OUTL);
+
+ nfi_spi->tx_count = 0;
+
+ return snfi_mac_op(nb);
+}
+
+static int snfi_reset(struct nfi *nfi)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
+ void *regs = nb->res.nfi_regs;
+ u32 val;
+ int ret;
+
+ ret = nfi_spi->parent->nfi.reset(nfi);
+ if (ret)
+ return ret;
+
+ val = readl(regs + SNF_MISC_CTL);
+ val |= SW_RST;
+ writel(val, regs + SNF_MISC_CTL);
+
+ ret = readx_poll_timeout_atomic(readw, regs + SNF_STA_CTL1, val,
+ !(val & SPI_STATE), 50,
+ NFI_TIMEOUT);
+ if (ret) {
+ pr_info("spi state active in reset [0x%x] = 0x%x\n",
+ SNF_STA_CTL1, val);
+ return ret;
+ }
+
+ val = readl(regs + SNF_MISC_CTL);
+ val &= ~SW_RST;
+ writel(val, regs + SNF_MISC_CTL);
+
+ return 0;
+}
+
+static int snfi_config_for_write(struct nfi_base *nb, int count)
+{
+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
+ void *regs = nb->res.nfi_regs;
+ u32 val;
+
+ nb->set_op_mode(regs, CNFG_CUSTOM_MODE);
+
+ val = readl(regs + SNF_MISC_CTL);
+
+ if (nfi_spi->write_cache_mode == SNFI_TX_114)
+ val |= PG_LOAD_X4_EN;
+
+ if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE)
+ val |= PG_LOAD_CUSTOM_EN;
+
+ writel(val, regs + SNF_MISC_CTL);
+
+ val = count * (nb->nfi.sector_size + nb->nfi.sector_spare_size);
+ writel(val << PG_LOAD_SHIFT, regs + SNF_MISC_CTL2);
+
+ val = readl(regs + SNF_PG_CTL1);
+
+ if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE)
+ val |= nfi_spi->cmd[0] << PG_LOAD_CMD_SHIFT;
+ else {
+ val |= nfi_spi->cmd[0] | nfi_spi->cmd[1] << PG_LOAD_CMD_SHIFT |
+ nfi_spi->cmd[2] << PG_EXE_CMD_SHIFT;
+
+ writel(nfi_spi->row_addr[1], regs + SNF_PG_CTL3);
+ writel(nfi_spi->cmd[3] << GF_CMD_SHIFT | nfi_spi->col_addr[2] <<
+ GF_ADDR_SHIFT, regs + SNF_GF_CTL1);
+ }
+
+ writel(val, regs + SNF_PG_CTL1);
+ writel(nfi_spi->col_addr[1], regs + SNF_PG_CTL2);
+
+ writel(NFI_CMD_DUMMY_WR, regs + NFI_CMD);
+
+ return 0;
+}
+
+static int snfi_config_for_read(struct nfi_base *nb, int count)
+{
+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
+ void *regs = nb->res.nfi_regs;
+ u32 val;
+ int ret = 0;
+
+ nb->set_op_mode(regs, CNFG_CUSTOM_MODE);
+
+ val = readl(regs + SNF_MISC_CTL);
+ val &= ~DARA_READ_MODE_MASK;
+
+ switch (nfi_spi->read_cache_mode) {
+
+ case SNFI_RX_111:
+ break;
+
+ case SNFI_RX_112:
+ val |= X2_DATA_MODE << READ_MODE_SHIFT;
+ break;
+
+ case SNFI_RX_114:
+ val |= X4_DATA_MODE << READ_MODE_SHIFT;
+ break;
+
+ case SNFI_RX_122:
+ val |= DUAL_IO_MODE << READ_MODE_SHIFT;
+ break;
+
+ case SNFI_RX_144:
+ val |= QUAD_IO_MODE << READ_MODE_SHIFT;
+ break;
+
+ default:
+ pr_info("Not support this read operarion: %d!\n",
+ nfi_spi->read_cache_mode);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE)
+ val |= DATARD_CUSTOM_EN;
+
+ writel(val, regs + SNF_MISC_CTL);
+
+ val = count * (nb->nfi.sector_size + nb->nfi.sector_spare_size);
+ writel(val, regs + SNF_MISC_CTL2);
+
+ val = readl(regs + SNF_RD_CTL2);
+
+ if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE) {
+ val |= nfi_spi->cmd[0];
+ writel(nfi_spi->col_addr[1], regs + SNF_RD_CTL3);
+ } else {
+ val |= nfi_spi->cmd[2];
+ writel(nfi_spi->cmd[0] << PAGE_READ_CMD_SHIFT |
+ nfi_spi->row_addr[0], regs + SNF_RD_CTL1);
+ writel(nfi_spi->cmd[1] << GF_CMD_SHIFT |
+ nfi_spi->col_addr[1] << GF_ADDR_SHIFT,
+ regs + SNF_GF_CTL1);
+ writel(nfi_spi->col_addr[2], regs + SNF_RD_CTL3);
+ }
+
+ writel(val, regs + SNF_RD_CTL2);
+
+ writel(NFI_CMD_DUMMY_RD, regs + NFI_CMD);
+
+ return ret;
+}
+
+static bool is_page_empty(struct nfi_base *nb, u8 *data, u8 *fdm,
+ int sectors)
+{
+ u32 *data32 = (u32 *)data;
+ u32 *fdm32 = (u32 *)fdm;
+ u32 i, count = 0;
+
+ for (i = 0; i < nb->format.page_size >> 2; i++) {
+ if (data32[i] != 0xffff) {
+ count += zero_popcount(data32[i]);
+ if (count > 10) {
+ pr_info("%s %d %d count:%d\n",
+ __func__, __LINE__, i, count);
+ return false;
+ }
+ }
+ }
+
+ if (fdm) {
+ for (i = 0; i < (nb->nfi.fdm_size * sectors >> 2); i++)
+ if (fdm32[i] != 0xffff) {
+ count += zero_popcount(fdm32[i]);
+ if (count > 10) {
+ pr_info("%s %d %d count:%d\n",
+ __func__, __LINE__, i, count);
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+static int rw_prepare(struct nfi_base *nb, int sectors, u8 *data,
+ u8 *fdm,
+ bool read)
+{
+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
+ int ret;
+
+ ret = nfi_spi->parent->rw_prepare(nb, sectors, data, fdm, read);
+ if (ret)
+ return ret;
+
+ if (read)
+ ret = snfi_config_for_read(nb, sectors);
+ else
+ ret = snfi_config_for_write(nb, sectors);
+
+ return ret;
+}
+
+static void rw_complete(struct nfi_base *nb, u8 *data, u8 *fdm,
+ bool read)
+{
+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
+ void *regs = nb->res.nfi_regs;
+ u32 val;
+
+ nfi_spi->parent->rw_complete(nb, data, fdm, read);
+
+ val = readl(regs + SNF_MISC_CTL);
+
+ if (read)
+ val &= ~DATARD_CUSTOM_EN;
+ else
+ val &= ~PG_LOAD_CUSTOM_EN;
+
+ writel(val, regs + SNF_MISC_CTL);
+
+ nfi_spi->tx_count = 0;
+ nfi_spi->cur_cmd_idx = 0;
+ nfi_spi->cur_addr_idx = 0;
+}
+
+static void set_nfi_base_funcs(struct nfi_base *nb)
+{
+ nb->nfi.reset = snfi_reset;
+ nb->nfi.set_timing = snfi_set_timing;
+ nb->nfi.wait_ready = snfi_wait_ready;
+
+ nb->nfi.send_cmd = snfi_send_command;
+ nb->nfi.send_addr = snfi_send_address;
+ nb->nfi.trigger = snfi_trigger;
+ nb->nfi.nfi_ctrl = snfi_ctrl;
+ nb->nfi.select_chip = snfi_select_chip;
+
+ nb->nfi.read_bytes = snfi_read_bytes;
+ nb->nfi.write_bytes = snfi_write_bytes;
+
+ nb->rw_prepare = rw_prepare;
+ nb->rw_complete = rw_complete;
+ nb->is_page_empty = is_page_empty;
+
+}
+
+struct nfi *nfi_extend_init(struct nfi_base *nb)
+{
+ struct nfi_spi *nfi_spi;
+
+ nfi_spi = mem_alloc(1, sizeof(struct nfi_spi));
+ if (!nfi_spi) {
+ pr_info("snfi alloc memory fail @%s.\n", __func__);
+ return NULL;
+ }
+
+ memcpy(&nfi_spi->base, nb, sizeof(struct nfi_base));
+ nfi_spi->parent = nb;
+
+ nfi_spi->read_cache_mode = SNFI_RX_114;
+ nfi_spi->write_cache_mode = SNFI_TX_114;
+
+ set_nfi_base_funcs(&nfi_spi->base);
+
+ /* Change nfi to spi mode */
+ writel(SPI_MODE, nb->res.nfi_regs + SNF_SNF_CNFG);
+
+ return &(nfi_spi->base.nfi);
+}
+
+void nfi_extend_exit(struct nfi_base *nb)
+{
+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
+
+ mem_free(nfi_spi->parent);
+ mem_free(nfi_spi);
+}
+
diff --git a/drivers/mtd/nandx/core/nfi/nfi_spi.h b/drivers/mtd/nandx/core/nfi/nfi_spi.h
new file mode 100644
index 0000000000..a52255663a
--- /dev/null
+++ b/drivers/mtd/nandx/core/nfi/nfi_spi.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NFI_SPI_H__
+#define __NFI_SPI_H__
+
+#define SPI_NAND_MAX_DELAY 6
+#define SPI_NAND_MAX_OP 4
+
+/*TODO - add comments */
+struct nfi_spi_delay {
+ u8 tCLK_SAM_DLY;
+ u8 tCLK_OUT_DLY;
+ u8 tCS_DLY;
+ u8 tWR_EN_DLY;
+ u8 tIO_IN_DLY[4];
+ u8 tIO_OUT_DLY[4];
+ u8 tREAD_LATCH_LATENCY;
+};
+
+/* SPI Nand structure */
+struct nfi_spi {
+ struct nfi_base base;
+ struct nfi_base *parent;
+
+ u8 snfi_mode;
+ u8 tx_count;
+
+ u8 cmd[SPI_NAND_MAX_OP];
+ u8 cur_cmd_idx;
+
+ u32 row_addr[SPI_NAND_MAX_OP];
+ u32 col_addr[SPI_NAND_MAX_OP];
+ u8 cur_addr_idx;
+
+ u8 read_cache_mode;
+ u8 write_cache_mode;
+};
+
+#endif /* __NFI_SPI_H__ */
diff --git a/drivers/mtd/nandx/core/nfi/nfi_spi_regs.h b/drivers/mtd/nandx/core/nfi/nfi_spi_regs.h
new file mode 100644
index 0000000000..77adf46782
--- /dev/null
+++ b/drivers/mtd/nandx/core/nfi/nfi_spi_regs.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NFI_SPI_REGS_H__
+#define __NFI_SPI_REGS_H__
+
+#define SNF_MAC_CTL 0x500
+#define WIP BIT(0)
+#define WIP_READY BIT(1)
+#define SF_TRIG BIT(2)
+#define SF_MAC_EN BIT(3)
+#define MAC_XIO_SEL BIT(4)
+#define SNF_MAC_OUTL 0x504
+#define SNF_MAC_INL 0x508
+#define SNF_RD_CTL1 0x50c
+#define PAGE_READ_CMD_SHIFT 24
+#define SNF_RD_CTL2 0x510
+#define SNF_RD_CTL3 0x514
+#define SNF_GF_CTL1 0x518
+#define GF_ADDR_SHIFT 16
+#define GF_CMD_SHIFT 24
+#define SNF_GF_CTL3 0x520
+#define SNF_PG_CTL1 0x524
+#define PG_EXE_CMD_SHIFT 16
+#define PG_LOAD_CMD_SHIFT 8
+#define SNF_PG_CTL2 0x528
+#define SNF_PG_CTL3 0x52c
+#define SNF_ER_CTL 0x530
+#define SNF_ER_CTL2 0x534
+#define SNF_MISC_CTL 0x538
+#define SW_RST BIT(28)
+#define PG_LOAD_X4_EN BIT(20)
+#define X2_DATA_MODE 1
+#define X4_DATA_MODE 2
+#define DUAL_IO_MODE 5
+#define QUAD_IO_MODE 6
+#define READ_MODE_SHIFT 16
+#define LATCH_LAT_SHIFT 8
+#define LATCH_LAT_MASK GENMASK(9, 8)
+#define DARA_READ_MODE_MASK GENMASK(18, 16)
+#define SF2CS_SEL BIT(13)
+#define SF2CS_EN BIT(12)
+#define PG_LOAD_CUSTOM_EN BIT(7)
+#define DATARD_CUSTOM_EN BIT(6)
+#define SNF_MISC_CTL2 0x53c
+#define PG_LOAD_SHIFT 16
+#define SNF_DLY_CTL1 0x540
+#define SNF_DLY_CTL2 0x544
+#define SNF_DLY_CTL3 0x548
+#define SNF_DLY_CTL4 0x54c
+#define SNF_STA_CTL1 0x550
+#define SPI_STATE GENMASK(3, 0)
+#define SNF_STA_CTL2 0x554
+#define SNF_STA_CTL3 0x558
+#define SNF_SNF_CNFG 0x55c
+#define SPI_MODE BIT(0)
+#define SNF_DEBUG_SEL 0x560
+#define SPI_GPRAM_ADDR 0x800
+
+#endif /* __NFI_SPI_REGS_H__ */
diff --git a/drivers/mtd/nandx/core/nfi/nfiecc.c b/drivers/mtd/nandx/core/nfi/nfiecc.c
new file mode 100644
index 0000000000..14246fbc3e
--- /dev/null
+++ b/drivers/mtd/nandx/core/nfi/nfiecc.c
@@ -0,0 +1,510 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "nfiecc_regs.h"
+#include "nfiecc.h"
+
+#define NFIECC_IDLE_REG(op) \
+ ((op) == ECC_ENCODE ? NFIECC_ENCIDLE : NFIECC_DECIDLE)
+#define IDLE_MASK 1
+#define NFIECC_CTL_REG(op) \
+ ((op) == ECC_ENCODE ? NFIECC_ENCCON : NFIECC_DECCON)
+#define NFIECC_IRQ_REG(op) \
+ ((op) == ECC_ENCODE ? NFIECC_ENCIRQEN : NFIECC_DECIRQEN)
+#define NFIECC_ADDR(op) \
+ ((op) == ECC_ENCODE ? NFIECC_ENCDIADDR : NFIECC_DECDIADDR)
+
+#define ECC_TIMEOUT 500000
+
+/* ecc strength that each IP supports */
+static const int ecc_strength_mt7622[] = {
+ 4, 6, 8, 10, 12, 14, 16
+};
+
+static int nfiecc_irq_handler(void *data)
+{
+ struct nfiecc *ecc = data;
+ void *regs = ecc->res.regs;
+ u32 status;
+
+ status = readl(regs + NFIECC_DECIRQSTA) & DEC_IRQSTA_GEN;
+ if (status) {
+ status = readl(regs + NFIECC_DECDONE);
+ if (!(status & ecc->config.sectors))
+ return NAND_IRQ_NONE;
+
+ /*
+ * Clear decode IRQ status once again to ensure that
+ * there will be no extra IRQ.
+ */
+ readl(regs + NFIECC_DECIRQSTA);
+ ecc->config.sectors = 0;
+ nandx_event_complete(ecc->done);
+ } else {
+ status = readl(regs + NFIECC_ENCIRQSTA) & ENC_IRQSTA_GEN;
+ if (!status)
+ return NAND_IRQ_NONE;
+
+ nandx_event_complete(ecc->done);
+ }
+
+ return NAND_IRQ_HANDLED;
+}
+
+static inline int nfiecc_wait_idle(struct nfiecc *ecc)
+{
+ int op = ecc->config.op;
+ int ret, val;
+
+ ret = readl_poll_timeout_atomic(ecc->res.regs + NFIECC_IDLE_REG(op),
+ val, val & IDLE_MASK,
+ 10, ECC_TIMEOUT);
+ if (ret)
+ pr_info("%s not idle\n",
+ op == ECC_ENCODE ? "encoder" : "decoder");
+
+ return ret;
+}
+
+static int nfiecc_wait_encode_done(struct nfiecc *ecc)
+{
+ int ret, val;
+
+ if (ecc->ecc_irq_en) {
+ /* poll one time to avoid missing irq event */
+ ret = readl_poll_timeout_atomic(ecc->res.regs + NFIECC_ENCSTA,
+ val, val & ENC_FSM_IDLE, 1, 1);
+ if (!ret)
+ return 0;
+
+ /* irq done, if not, we can go on to poll status for a while */
+ ret = nandx_event_wait_complete(ecc->done, ECC_TIMEOUT);
+ if (ret)
+ return 0;
+ }
+
+ ret = readl_poll_timeout_atomic(ecc->res.regs + NFIECC_ENCSTA,
+ val, val & ENC_FSM_IDLE,
+ 10, ECC_TIMEOUT);
+ if (ret)
+ pr_info("encode timeout\n");
+
+ return ret;
+
+}
+
+static int nfiecc_wait_decode_done(struct nfiecc *ecc)
+{
+ u32 secbit = BIT(ecc->config.sectors - 1);
+ void *regs = ecc->res.regs;
+ int ret, val;
+
+ if (ecc->ecc_irq_en) {
+ ret = readl_poll_timeout_atomic(regs + NFIECC_DECDONE,
+ val, val & secbit, 1, 1);
+ if (!ret)
+ return 0;
+
+ ret = nandx_event_wait_complete(ecc->done, ECC_TIMEOUT);
+ if (ret)
+ return 0;
+ }
+
+ ret = readl_poll_timeout_atomic(regs + NFIECC_DECDONE,
+ val, val & secbit,
+ 10, ECC_TIMEOUT);
+ if (ret) {
+ pr_info("decode timeout\n");
+ return ret;
+ }
+
+ /* decode done does not stands for ecc all work done.
+ * we need check syn, bma, chien, autoc all idle.
+ * just check it when ECC_DECCNFG[13:12] is 3,
+ * which means auto correct.
+ */
+ ret = readl_poll_timeout_atomic(regs + NFIECC_DECFSM,
+ val, (val & FSM_MASK) == FSM_IDLE,
+ 10, ECC_TIMEOUT);
+ if (ret)
+ pr_info("decode fsm(0x%x) is not idle\n",
+ readl(regs + NFIECC_DECFSM));
+
+ return ret;
+}
+
+static int nfiecc_wait_done(struct nfiecc *ecc)
+{
+ if (ecc->config.op == ECC_ENCODE)
+ return nfiecc_wait_encode_done(ecc);
+
+ return nfiecc_wait_decode_done(ecc);
+}
+
+static void nfiecc_encode_config(struct nfiecc *ecc, u32 ecc_idx)
+{
+ struct nfiecc_config *config = &ecc->config;
+ u32 val;
+
+ val = ecc_idx | (config->mode << ecc->caps->ecc_mode_shift);
+
+ if (config->mode == ECC_DMA_MODE)
+ val |= ENC_BURST_EN;
+
+ val |= (config->len << 3) << ENCCNFG_MS_SHIFT;
+ writel(val, ecc->res.regs + NFIECC_ENCCNFG);
+}
+
+static void nfiecc_decode_config(struct nfiecc *ecc, u32 ecc_idx)
+{
+ struct nfiecc_config *config = &ecc->config;
+ u32 dec_sz = (config->len << 3) +
+ config->strength * ecc->caps->parity_bits;
+ u32 val;
+
+ val = ecc_idx | (config->mode << ecc->caps->ecc_mode_shift);
+
+ if (config->mode == ECC_DMA_MODE)
+ val |= DEC_BURST_EN;
+
+ val |= (dec_sz << DECCNFG_MS_SHIFT) |
+ (config->deccon << DEC_CON_SHIFT);
+ val |= DEC_EMPTY_EN;
+ writel(val, ecc->res.regs + NFIECC_DECCNFG);
+}
+
+static void nfiecc_config(struct nfiecc *ecc)
+{
+ u32 idx;
+
+ for (idx = 0; idx < ecc->caps->ecc_strength_num; idx++) {
+ if (ecc->config.strength == ecc->caps->ecc_strength[idx])
+ break;
+ }
+
+ if (ecc->config.op == ECC_ENCODE)
+ nfiecc_encode_config(ecc, idx);
+ else
+ nfiecc_decode_config(ecc, idx);
+}
+
+static int nfiecc_enable(struct nfiecc *ecc)
+{
+ enum nfiecc_operation op = ecc->config.op;
+ void *regs = ecc->res.regs;
+
+ nfiecc_config(ecc);
+
+ writel(ECC_OP_EN, regs + NFIECC_CTL_REG(op));
+
+ if (ecc->ecc_irq_en) {
+ writel(ECC_IRQEN, regs + NFIECC_IRQ_REG(op));
+
+ if (ecc->page_irq_en)
+ writel(ECC_IRQEN | ECC_PG_IRQ_SEL,
+ regs + NFIECC_IRQ_REG(op));
+
+ nandx_event_init(ecc->done);
+ }
+
+ return 0;
+}
+
+static int nfiecc_disable(struct nfiecc *ecc)
+{
+ enum nfiecc_operation op = ecc->config.op;
+ void *regs = ecc->res.regs;
+
+ nfiecc_wait_idle(ecc);
+
+ writel(0, regs + NFIECC_IRQ_REG(op));
+ writel(~ECC_OP_EN, regs + NFIECC_CTL_REG(op));
+
+ return 0;
+}
+
+static int nfiecc_correct_data(struct nfiecc *ecc,
+ struct nfiecc_status *status,
+ u8 *data, u32 sector)
+{
+ u32 err, offset, i;
+ u32 loc, byteloc, bitloc;
+
+ status->corrected = 0;
+ status->failed = 0;
+
+ offset = (sector >> 2);
+ err = readl(ecc->res.regs + NFIECC_DECENUM(offset));
+ err >>= (sector % 4) * 8;
+ err &= ecc->caps->err_mask;
+
+ if (err == ecc->caps->err_mask) {
+ status->failed++;
+ return -ENANDREAD;
+ }
+
+ status->corrected += err;
+ status->bitflips = max_t(u32, status->bitflips, err);
+
+ for (i = 0; i < err; i++) {
+ loc = readl(ecc->res.regs + NFIECC_DECEL(i >> 1));
+ loc >>= ((i & 0x1) << 4);
+ byteloc = loc >> 3;
+ bitloc = loc & 0x7;
+ data[byteloc] ^= (1 << bitloc);
+ }
+
+ return 0;
+}
+
+static int nfiecc_fill_data(struct nfiecc *ecc, u8 *data)
+{
+ struct nfiecc_config *config = &ecc->config;
+ void *regs = ecc->res.regs;
+ int size, ret, i;
+ u32 val;
+
+ if (config->mode == ECC_DMA_MODE) {
+ if ((unsigned long)config->dma_addr & 0x3)
+ pr_info("encode address is not 4B aligned: 0x%x\n",
+ (u32)(unsigned long)config->dma_addr);
+
+ writel((unsigned long)config->dma_addr,
+ regs + NFIECC_ADDR(config->op));
+ } else if (config->mode == ECC_PIO_MODE) {
+ if (config->op == ECC_ENCODE) {
+ size = (config->len + 3) >> 2;
+ } else {
+ size = config->strength * ecc->caps->parity_bits;
+ size = (size + 7) >> 3;
+ size += config->len;
+ size >>= 2;
+ }
+
+ for (i = 0; i < size; i++) {
+ ret = readl_poll_timeout_atomic(regs + NFIECC_PIO_DIRDY,
+ val, val & PIO_DI_RDY,
+ 10, ECC_TIMEOUT);
+ if (ret)
+ return ret;
+
+ writel(*((u32 *)data + i), regs + NFIECC_PIO_DI);
+ }
+ }
+
+ return 0;
+}
+
+static int nfiecc_encode(struct nfiecc *ecc, u8 *data)
+{
+ struct nfiecc_config *config = &ecc->config;
+ u32 len, i, val = 0;
+ u8 *p;
+ int ret;
+
+ /* Under NFI mode, nothing need to do */
+ if (config->mode == ECC_NFI_MODE)
+ return 0;
+
+ ret = nfiecc_fill_data(ecc, data);
+ if (ret)
+ return ret;
+
+ ret = nfiecc_wait_encode_done(ecc);
+ if (ret)
+ return ret;
+
+ ret = nfiecc_wait_idle(ecc);
+ if (ret)
+ return ret;
+
+ /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
+ len = (config->strength * ecc->caps->parity_bits + 7) >> 3;
+ p = data + config->len;
+
+ /* Write the parity bytes generated by the ECC back to the OOB region */
+ for (i = 0; i < len; i++) {
+ if ((i % 4) == 0)
+ val = readl(ecc->res.regs + NFIECC_ENCPAR(i / 4));
+
+ p[i] = (val >> ((i % 4) * 8)) & 0xff;
+ }
+
+ return 0;
+}
+
+static int nfiecc_decode(struct nfiecc *ecc, u8 *data)
+{
+ int ret;
+
+ /* Under NFI mode, nothing need to do */
+ if (ecc->config.mode == ECC_NFI_MODE)
+ return 0;
+
+ ret = nfiecc_fill_data(ecc, data);
+ if (ret)
+ return ret;
+
+ return nfiecc_wait_decode_done(ecc);
+}
+
+static int nfiecc_decode_status(struct nfiecc *ecc, u32 start_sector,
+ u32 sectors)
+{
+ void *regs = ecc->res.regs;
+ u32 i, val = 0, err;
+ u32 bitflips = 0;
+
+ for (i = start_sector; i < start_sector + sectors; i++) {
+ if ((i % 4) == 0)
+ val = readl(regs + NFIECC_DECENUM(i / 4));
+
+ err = val >> ((i % 4) * 5);
+ err &= ecc->caps->err_mask;
+
+ if (err == ecc->caps->err_mask)
+ pr_err("sector %d is uncorrect\n", i);
+
+ bitflips = max_t(u32, bitflips, err);
+ }
+
+ if (bitflips == ecc->caps->err_mask)
+ return -ENANDREAD;
+
+ if (bitflips)
+ pr_info("bitflips %d is corrected\n", bitflips);
+
+ return bitflips;
+}
+
+static int nfiecc_adjust_strength(struct nfiecc *ecc, int strength)
+{
+ struct nfiecc_caps *caps = ecc->caps;
+ int i, count = caps->ecc_strength_num;
+
+ if (strength >= caps->ecc_strength[count - 1])
+ return caps->ecc_strength[count - 1];
+
+ if (strength < caps->ecc_strength[0])
+ return -EINVAL;
+
+ for (i = 1; i < count; i++) {
+ if (strength < caps->ecc_strength[i])
+ return caps->ecc_strength[i - 1];
+ }
+
+ return -EINVAL;
+}
+
+static int nfiecc_ctrl(struct nfiecc *ecc, int cmd, void *args)
+{
+ int ret = 0;
+
+ switch (cmd) {
+ case NFI_CTRL_ECC_IRQ:
+ ecc->ecc_irq_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_ECC_PAGE_IRQ:
+ ecc->page_irq_en = *(bool *)args;
+ break;
+
+ default:
+ pr_info("invalid arguments.\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int nfiecc_hw_init(struct nfiecc *ecc)
+{
+ int ret;
+
+ ret = nfiecc_wait_idle(ecc);
+ if (ret)
+ return ret;
+
+ writel(~ECC_OP_EN, ecc->res.regs + NFIECC_ENCCON);
+
+ ret = nfiecc_wait_idle(ecc);
+ if (ret)
+ return ret;
+
+ writel(~ECC_OP_EN, ecc->res.regs + NFIECC_DECCON);
+
+ return 0;
+}
+
+static struct nfiecc_caps nfiecc_caps_mt7622 = {
+ .err_mask = 0x1f,
+ .ecc_mode_shift = 4,
+ .parity_bits = 13,
+ .ecc_strength = ecc_strength_mt7622,
+ .ecc_strength_num = 7,
+};
+
+static struct nfiecc_caps *nfiecc_get_match_data(enum mtk_ic_version ic)
+{
+ /* NOTE: add other IC's data */
+ return &nfiecc_caps_mt7622;
+}
+
+struct nfiecc *nfiecc_init(struct nfiecc_resource *res)
+{
+ struct nfiecc *ecc;
+ int ret;
+
+ ecc = mem_alloc(1, sizeof(struct nfiecc));
+ if (!ecc)
+ return NULL;
+
+ ecc->res = *res;
+
+ ret = nandx_irq_register(res->dev, res->irq_id, nfiecc_irq_handler,
+ "mtk-ecc", ecc);
+ if (ret) {
+ pr_info("ecc irq register failed!\n");
+ goto error;
+ }
+
+ ecc->ecc_irq_en = false;
+ ecc->page_irq_en = false;
+ ecc->done = nandx_event_create();
+ ecc->caps = nfiecc_get_match_data(res->ic_ver);
+
+ ecc->adjust_strength = nfiecc_adjust_strength;
+ ecc->enable = nfiecc_enable;
+ ecc->disable = nfiecc_disable;
+ ecc->decode = nfiecc_decode;
+ ecc->encode = nfiecc_encode;
+ ecc->wait_done = nfiecc_wait_done;
+ ecc->decode_status = nfiecc_decode_status;
+ ecc->correct_data = nfiecc_correct_data;
+ ecc->nfiecc_ctrl = nfiecc_ctrl;
+
+ ret = nfiecc_hw_init(ecc);
+ if (ret)
+ return NULL;
+
+ return ecc;
+
+error:
+ mem_free(ecc);
+
+ return NULL;
+}
+
+void nfiecc_exit(struct nfiecc *ecc)
+{
+ nandx_event_destroy(ecc->done);
+ mem_free(ecc);
+}
+
diff --git a/drivers/mtd/nandx/core/nfi/nfiecc.h b/drivers/mtd/nandx/core/nfi/nfiecc.h
new file mode 100644
index 0000000000..b02a5c3534
--- /dev/null
+++ b/drivers/mtd/nandx/core/nfi/nfiecc.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NFIECC_H__
+#define __NFIECC_H__
+
+enum nfiecc_mode {
+ ECC_DMA_MODE,
+ ECC_NFI_MODE,
+ ECC_PIO_MODE
+};
+
+enum nfiecc_operation {
+ ECC_ENCODE,
+ ECC_DECODE
+};
+
+enum nfiecc_deccon {
+ ECC_DEC_FER = 1,
+ ECC_DEC_LOCATE = 2,
+ ECC_DEC_CORRECT = 3
+};
+
+struct nfiecc_resource {
+ int ic_ver;
+ void *dev;
+ void *regs;
+ int irq_id;
+
+};
+
+struct nfiecc_status {
+ u32 corrected;
+ u32 failed;
+ u32 bitflips;
+};
+
+struct nfiecc_caps {
+ u32 err_mask;
+ u32 ecc_mode_shift;
+ u32 parity_bits;
+ const int *ecc_strength;
+ u32 ecc_strength_num;
+};
+
+struct nfiecc_config {
+ enum nfiecc_operation op;
+ enum nfiecc_mode mode;
+ enum nfiecc_deccon deccon;
+
+ void *dma_addr; /* DMA use only */
+ u32 strength;
+ u32 sectors;
+ u32 len;
+};
+
+struct nfiecc {
+ struct nfiecc_resource res;
+ struct nfiecc_config config;
+ struct nfiecc_caps *caps;
+
+ bool ecc_irq_en;
+ bool page_irq_en;
+
+ void *done;
+
+ int (*adjust_strength)(struct nfiecc *ecc, int strength);
+ int (*enable)(struct nfiecc *ecc);
+ int (*disable)(struct nfiecc *ecc);
+
+ int (*decode)(struct nfiecc *ecc, u8 *data);
+ int (*encode)(struct nfiecc *ecc, u8 *data);
+
+ int (*decode_status)(struct nfiecc *ecc, u32 start_sector, u32 sectors);
+ int (*correct_data)(struct nfiecc *ecc,
+ struct nfiecc_status *status,
+ u8 *data, u32 sector);
+ int (*wait_done)(struct nfiecc *ecc);
+
+ int (*nfiecc_ctrl)(struct nfiecc *ecc, int cmd, void *args);
+};
+
+struct nfiecc *nfiecc_init(struct nfiecc_resource *res);
+void nfiecc_exit(struct nfiecc *ecc);
+
+#endif /* __NFIECC_H__ */
diff --git a/drivers/mtd/nandx/core/nfi/nfiecc_regs.h b/drivers/mtd/nandx/core/nfi/nfiecc_regs.h
new file mode 100644
index 0000000000..96564cf872
--- /dev/null
+++ b/drivers/mtd/nandx/core/nfi/nfiecc_regs.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NFIECC_REGS_H__
+#define __NFIECC_REGS_H__
+
+#define NFIECC_ENCCON 0x000
+/* NFIECC_DECCON has same bit define */
+#define ECC_OP_EN BIT(0)
+#define NFIECC_ENCCNFG 0x004
+#define ENCCNFG_MS_SHIFT 16
+#define ENC_BURST_EN BIT(8)
+#define NFIECC_ENCDIADDR 0x008
+#define NFIECC_ENCIDLE 0x00c
+#define NFIECC_ENCSTA 0x02c
+#define ENC_FSM_IDLE 1
+#define NFIECC_ENCIRQEN 0x030
+/* NFIECC_DECIRQEN has same bit define */
+#define ECC_IRQEN BIT(0)
+#define ECC_PG_IRQ_SEL BIT(1)
+#define NFIECC_ENCIRQSTA 0x034
+#define ENC_IRQSTA_GEN BIT(0)
+#define NFIECC_PIO_DIRDY 0x080
+#define PIO_DI_RDY BIT(0)
+#define NFIECC_PIO_DI 0x084
+#define NFIECC_DECCON 0x100
+#define NFIECC_DECCNFG 0x104
+#define DEC_BURST_EN BIT(8)
+#define DEC_EMPTY_EN BIT(31)
+#define DEC_CON_SHIFT 12
+#define DECCNFG_MS_SHIFT 16
+#define NFIECC_DECDIADDR 0x108
+#define NFIECC_DECIDLE 0x10c
+#define NFIECC_DECENUM(x) (0x114 + (x) * 4)
+#define NFIECC_DECDONE 0x11c
+#define NFIECC_DECIRQEN 0x140
+#define NFIECC_DECIRQSTA 0x144
+#define DEC_IRQSTA_GEN BIT(0)
+#define NFIECC_DECFSM 0x14c
+#define FSM_MASK 0x7f0f0f0f
+#define FSM_IDLE 0x01010101
+#define NFIECC_BYPASS 0x20c
+#define NFIECC_BYPASS_EN BIT(0)
+#define NFIECC_ENCPAR(x) (0x010 + (x) * 4)
+#define NFIECC_DECEL(x) (0x120 + (x) * 4)
+
+#endif /* __NFIECC_REGS_H__ */
diff --git a/drivers/mtd/nandx/driver/Nandx.mk b/drivers/mtd/nandx/driver/Nandx.mk
new file mode 100644
index 0000000000..3fb93d37c5
--- /dev/null
+++ b/drivers/mtd/nandx/driver/Nandx.mk
@@ -0,0 +1,18 @@
+#
+# Copyright (C) 2017 MediaTek Inc.
+# Licensed under either
+# BSD Licence, (see NOTICE for more details)
+# GNU General Public License, version 2.0, (see NOTICE for more details)
+#
+
+nandx-$(NANDX_SIMULATOR_SUPPORT) += simulator/driver.c
+
+nandx-$(NANDX_CTP_SUPPORT) += ctp/ts_nand.c
+nandx-$(NANDX_CTP_SUPPORT) += ctp/nand_test.c
+nandx-header-$(NANDX_CTP_SUPPORT) += ctp/nand_test.h
+
+nandx-$(NANDX_BBT_SUPPORT) += bbt/bbt.c
+nandx-$(NANDX_BROM_SUPPORT) += brom/driver.c
+nandx-$(NANDX_KERNEL_SUPPORT) += kernel/driver.c
+nandx-$(NANDX_LK_SUPPORT) += lk/driver.c
+nandx-$(NANDX_UBOOT_SUPPORT) += uboot/driver.c
diff --git a/drivers/mtd/nandx/driver/bbt/bbt.c b/drivers/mtd/nandx/driver/bbt/bbt.c
new file mode 100644
index 0000000000..c9d4823e09
--- /dev/null
+++ b/drivers/mtd/nandx/driver/bbt/bbt.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "bbt.h"
+
+/* Not support: multi-chip */
+static u8 main_bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_bbt_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct bbt_manager g_bbt_manager = {
+ { {{main_bbt_pattern, 4}, 0, BBT_INVALID_ADDR},
+ {{mirror_bbt_pattern, 4}, 0, BBT_INVALID_ADDR}
+ },
+ NAND_BBT_SCAN_MAXBLOCKS, NULL
+};
+
+static inline void set_bbt_mark(u8 *bbt, int block, u8 mark)
+{
+ int index, offset;
+
+ index = GET_ENTRY(block);
+ offset = GET_POSITION(block);
+
+ bbt[index] &= ~(BBT_ENTRY_MASK << offset);
+ bbt[index] |= (mark & BBT_ENTRY_MASK) << offset;
+ pr_info("%s %d block:%d, bbt[%d]:0x%x, offset:%d, mark:%d\n",
+ __func__, __LINE__, block, index, bbt[index], offset, mark);
+}
+
+static inline u8 get_bbt_mark(u8 *bbt, int block)
+{
+ int offset = GET_POSITION(block);
+ int index = GET_ENTRY(block);
+ u8 value = bbt[index];
+
+ return (value >> offset) & BBT_ENTRY_MASK;
+}
+
+static void mark_nand_bad(struct nandx_info *nand, int block)
+{
+ u8 *buf;
+
+ buf = mem_alloc(1, nand->page_size + nand->oob_size);
+ if (!buf) {
+ pr_info("%s, %d, memory alloc fail, pagesize:%d, oobsize:%d\n",
+ __func__, __LINE__, nand->page_size, nand->oob_size);
+ return;
+ }
+ memset(buf, 0, nand->page_size + nand->oob_size);
+ nandx_erase(block * nand->block_size, nand->block_size);
+ nandx_write(buf, buf + nand->page_size, block * nand->block_size,
+ nand->page_size);
+ mem_free(buf);
+}
+
+static inline bool is_bbt_data(u8 *buf, struct bbt_pattern *pattern)
+{
+ int i;
+
+ for (i = 0; i < pattern->len; i++) {
+ if (buf[i] != pattern->data[i])
+ return false;
+ }
+
+ return true;
+}
+
+static u64 get_bbt_address(struct nandx_info *nand, u8 *bbt,
+ u64 mirror_addr,
+ int max_blocks)
+{
+ u64 addr, end_addr;
+ u8 mark;
+
+ addr = nand->total_size;
+ end_addr = nand->total_size - nand->block_size * max_blocks;
+
+ while (addr > end_addr) {
+ addr -= nand->block_size;
+ mark = get_bbt_mark(bbt, div_down(addr, nand->block_size));
+
+ if (mark == BBT_BLOCK_WORN || mark == BBT_BLOCK_FACTORY_BAD)
+ continue;
+ if (addr != mirror_addr)
+ return addr;
+ }
+
+ return BBT_INVALID_ADDR;
+}
+
+static int read_bbt(struct bbt_desc *desc, u8 *bbt, u32 len)
+{
+ int ret;
+
+ ret = nandx_read(bbt, NULL, desc->bbt_addr + desc->pattern.len + 1,
+ len);
+ if (ret < 0)
+ pr_info("nand_bbt: error reading BBT page, ret:-%x\n", ret);
+
+ return ret;
+}
+
+static void create_bbt(struct nandx_info *nand, u8 *bbt)
+{
+ u32 offset = 0, block = 0;
+
+ do {
+ if (nandx_is_bad_block(offset)) {
+ pr_info("Create bbt at bad block:%d\n", block);
+ set_bbt_mark(bbt, block, BBT_BLOCK_FACTORY_BAD);
+ }
+ block++;
+ offset += nand->block_size;
+ } while (offset < nand->total_size);
+}
+
+static int search_bbt(struct nandx_info *nand, struct bbt_desc *desc,
+ int max_blocks)
+{
+ u64 addr, end_addr;
+ u8 *buf;
+ int ret;
+
+ buf = mem_alloc(1, nand->page_size);
+ if (!buf) {
+ pr_info("%s, %d, mem alloc fail!!! len:%d\n",
+ __func__, __LINE__, nand->page_size);
+ return -ENOMEM;
+ }
+
+ addr = nand->total_size;
+ end_addr = nand->total_size - max_blocks * nand->block_size;
+ while (addr > end_addr) {
+ addr -= nand->block_size;
+
+ nandx_read(buf, NULL, addr, nand->page_size);
+
+ if (is_bbt_data(buf, &desc->pattern)) {
+ desc->bbt_addr = addr;
+ desc->version = buf[desc->pattern.len];
+ pr_info("BBT is found at addr 0x%llx, version %d\n",
+ desc->bbt_addr, desc->version);
+ ret = 0;
+ break;
+ }
+ ret = -EFAULT;
+ }
+
+ mem_free(buf);
+ return ret;
+}
+
+static int save_bbt(struct nandx_info *nand, struct bbt_desc *desc,
+ u8 *bbt)
+{
+ u32 page_size_mask, total_block;
+ int write_len;
+ u8 *buf;
+ int ret;
+
+ ret = nandx_erase(desc->bbt_addr, nand->block_size);
+ if (ret) {
+ pr_info("erase addr 0x%llx fail !!!, ret %d\n",
+ desc->bbt_addr, ret);
+ return ret;
+ }
+
+ total_block = div_down(nand->total_size, nand->block_size);
+ write_len = GET_BBT_LENGTH(total_block) + desc->pattern.len + 1;
+ page_size_mask = nand->page_size - 1;
+ write_len = (write_len + page_size_mask) & (~page_size_mask);
+
+ buf = (u8 *)mem_alloc(1, write_len);
+ if (!buf) {
+ pr_info("%s, %d, mem alloc fail!!! len:%d\n",
+ __func__, __LINE__, write_len);
+ return -ENOMEM;
+ }
+ memset(buf, 0xFF, write_len);
+
+ memcpy(buf, desc->pattern.data, desc->pattern.len);
+ buf[desc->pattern.len] = desc->version;
+
+ memcpy(buf + desc->pattern.len + 1, bbt, GET_BBT_LENGTH(total_block));
+
+ ret = nandx_write(buf, NULL, desc->bbt_addr, write_len);
+
+ if (ret)
+ pr_info("nandx_write fail(%d), offset:0x%llx, len(%d)\n",
+ ret, desc->bbt_addr, write_len);
+ mem_free(buf);
+
+ return ret;
+}
+
+static int write_bbt(struct nandx_info *nand, struct bbt_desc *main,
+ struct bbt_desc *mirror, u8 *bbt, int max_blocks)
+{
+ int block;
+ int ret;
+
+ do {
+ if (main->bbt_addr == BBT_INVALID_ADDR) {
+ main->bbt_addr = get_bbt_address(nand, bbt,
+ mirror->bbt_addr, max_blocks);
+ if (main->bbt_addr == BBT_INVALID_ADDR)
+ return -ENOSPC;
+ }
+
+ ret = save_bbt(nand, main, bbt);
+ if (!ret)
+ break;
+
+ block = div_down(main->bbt_addr, nand->block_size);
+ set_bbt_mark(bbt, block, BBT_BLOCK_WORN);
+ main->version++;
+ mark_nand_bad(nand, block);
+ main->bbt_addr = BBT_INVALID_ADDR;
+ } while (1);
+
+ return 0;
+}
+
+static void mark_bbt_region(struct nandx_info *nand, u8 *bbt, int bbt_blocks)
+{
+ int total_block;
+ int block;
+ u8 mark;
+
+ total_block = div_down(nand->total_size, nand->block_size);
+ block = total_block - bbt_blocks;
+
+ while (bbt_blocks) {
+ mark = get_bbt_mark(bbt, block);
+ if (mark == BBT_BLOCK_GOOD)
+ set_bbt_mark(bbt, block, BBT_BLOCK_RESERVED);
+ block++;
+ bbt_blocks--;
+ }
+}
+
+static void unmark_bbt_region(struct nandx_info *nand, u8 *bbt, int bbt_blocks)
+{
+ int total_block;
+ int block;
+ u8 mark;
+
+ total_block = div_down(nand->total_size, nand->block_size);
+ block = total_block - bbt_blocks;
+
+ while (bbt_blocks) {
+ mark = get_bbt_mark(bbt, block);
+ if (mark == BBT_BLOCK_RESERVED)
+ set_bbt_mark(bbt, block, BBT_BLOCK_GOOD);
+ block++;
+ bbt_blocks--;
+ }
+}
+
+static int update_bbt(struct nandx_info *nand, struct bbt_desc *desc,
+ u8 *bbt,
+ int max_blocks)
+{
+ int ret = 0, i;
+
+ /* The reserved info is not stored in NAND*/
+ unmark_bbt_region(nand, bbt, max_blocks);
+
+ desc[0].version++;
+ for (i = 0; i < 2; i++) {
+ if (i > 0)
+ desc[i].version = desc[i - 1].version;
+
+ ret = write_bbt(nand, &desc[i], &desc[1 - i], bbt, max_blocks);
+ if (ret)
+ break;
+ }
+ mark_bbt_region(nand, bbt, max_blocks);
+
+ return ret;
+}
+
+int scan_bbt(struct nandx_info *nand)
+{
+ struct bbt_manager *manager = &g_bbt_manager;
+ struct bbt_desc *pdesc;
+ int total_block, len, i;
+ int valid_desc = 0;
+ int ret = 0;
+ u8 *bbt;
+
+ total_block = div_down(nand->total_size, nand->block_size);
+ len = GET_BBT_LENGTH(total_block);
+
+ if (!manager->bbt) {
+ manager->bbt = (u8 *)mem_alloc(1, len);
+ if (!manager->bbt) {
+ pr_info("%s, %d, mem alloc fail!!! len:%d\n",
+ __func__, __LINE__, len);
+ return -ENOMEM;
+ }
+ }
+ bbt = manager->bbt;
+ memset(bbt, 0xFF, len);
+
+ /* scan bbt */
+ for (i = 0; i < 2; i++) {
+ pdesc = &manager->desc[i];
+ pdesc->bbt_addr = BBT_INVALID_ADDR;
+ pdesc->version = 0;
+ ret = search_bbt(nand, pdesc, manager->max_blocks);
+ if (!ret && (pdesc->bbt_addr != BBT_INVALID_ADDR))
+ valid_desc += 1 << i;
+ }
+
+ pdesc = &manager->desc[0];
+ if ((valid_desc == 0x3) && (pdesc[0].version != pdesc[1].version))
+ valid_desc = (pdesc[0].version > pdesc[1].version) ? 1 : 2;
+
+ /* read bbt */
+ for (i = 0; i < 2; i++) {
+ if (!(valid_desc & (1 << i)))
+ continue;
+ ret = read_bbt(&pdesc[i], bbt, len);
+ if (ret) {
+ pdesc->bbt_addr = BBT_INVALID_ADDR;
+ pdesc->version = 0;
+ valid_desc &= ~(1 << i);
+ }
+ /* If two BBT version is same, only need to read the first bbt*/
+ if ((valid_desc == 0x3) &&
+ (pdesc[0].version == pdesc[1].version))
+ break;
+ }
+
+ if (!valid_desc) {
+ create_bbt(nand, bbt);
+ pdesc[0].version = 1;
+ pdesc[1].version = 1;
+ }
+
+ pdesc[0].version = max_t(u8, pdesc[0].version, pdesc[1].version);
+ pdesc[1].version = pdesc[0].version;
+
+ for (i = 0; i < 2; i++) {
+ if (valid_desc & (1 << i))
+ continue;
+
+ ret = write_bbt(nand, &pdesc[i], &pdesc[1 - i], bbt,
+ manager->max_blocks);
+ if (ret) {
+ pr_info("write bbt(%d) fail, ret:%d\n", i, ret);
+ manager->bbt = NULL;
+ return ret;
+ }
+ }
+
+ /* Prevent the bbt regions from erasing / writing */
+ mark_bbt_region(nand, manager->bbt, manager->max_blocks);
+
+ for (i = 0; i < total_block; i++) {
+ if (get_bbt_mark(manager->bbt, i) == BBT_BLOCK_WORN)
+ pr_info("Checked WORN bad blk: %d\n", i);
+ else if (get_bbt_mark(manager->bbt, i) == BBT_BLOCK_FACTORY_BAD)
+ pr_info("Checked Factory bad blk: %d\n", i);
+ else if (get_bbt_mark(manager->bbt, i) == BBT_BLOCK_RESERVED)
+ pr_info("Checked Reserved blk: %d\n", i);
+ else if (get_bbt_mark(manager->bbt, i) != BBT_BLOCK_GOOD)
+ pr_info("Checked unknown blk: %d\n", i);
+ }
+
+ return 0;
+}
+
+int bbt_mark_bad(struct nandx_info *nand, off_t offset)
+{
+ struct bbt_manager *manager = &g_bbt_manager;
+ int block = div_down(offset, nand->block_size);
+ int ret = 0;
+
+ mark_nand_bad(nand, block);
+
+#if 0
+ set_bbt_mark(manager->bbt, block, BBT_BLOCK_WORN);
+
+ /* Update flash-based bad block table */
+ ret = update_bbt(nand, manager->desc, manager->bbt,
+ manager->max_blocks);
+#endif
+ pr_info("block %d, update result %d.\n", block, ret);
+
+ return ret;
+}
+
+int bbt_is_bad(struct nandx_info *nand, off_t offset)
+{
+ int block;
+
+ block = div_down(offset, nand->block_size);
+
+ return get_bbt_mark(g_bbt_manager.bbt, block) != BBT_BLOCK_GOOD;
+}
diff --git a/drivers/mtd/nandx/driver/uboot/driver.c b/drivers/mtd/nandx/driver/uboot/driver.c
new file mode 100644
index 0000000000..7bd3342452
--- /dev/null
+++ b/drivers/mtd/nandx/driver/uboot/driver.c
@@ -0,0 +1,574 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#include <common.h>
+#include <linux/io.h>
+#include <dm.h>
+#include <clk.h>
+#include <nand.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include "nandx_core.h"
+#include "nandx_util.h"
+#include "bbt.h"
+
+typedef int (*func_nandx_operation)(u8 *, u8 *, u64, size_t);
+
+struct nandx_clk {
+ struct clk *nfi_clk;
+ struct clk *ecc_clk;
+ struct clk *snfi_clk;
+ struct clk *snfi_clk_sel;
+ struct clk *snfi_parent_50m;
+};
+
+struct nandx_nfc {
+ struct nandx_info info;
+ struct nandx_clk clk;
+ struct nfi_resource *res;
+
+ struct nand_chip *nand;
+ spinlock_t lock;
+};
+
+/* Default flash layout for MTK nand controller
+ * 64Bytes oob format.
+ */
+static struct nand_ecclayout eccoob = {
+ .eccbytes = 42,
+ .eccpos = {
+ 17, 18, 19, 20, 21, 22, 23, 24, 25,
+ 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41
+ },
+ .oobavail = 16,
+ .oobfree = {
+ {
+ .offset = 0,
+ .length = 16,
+ },
+ }
+};
+
+static struct nandx_nfc *mtd_to_nfc(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+
+ return (struct nandx_nfc *)nand_get_controller_data(nand);
+}
+
+static int nandx_enable_clk(struct nandx_clk *clk)
+{
+ int ret;
+
+ ret = clk_enable(clk->nfi_clk);
+ if (ret) {
+ pr_info("failed to enable nfi clk\n");
+ return ret;
+ }
+
+ ret = clk_enable(clk->ecc_clk);
+ if (ret) {
+ pr_info("failed to enable ecc clk\n");
+ goto disable_nfi_clk;
+ }
+
+ ret = clk_enable(clk->snfi_clk);
+ if (ret) {
+ pr_info("failed to enable snfi clk\n");
+ goto disable_ecc_clk;
+ }
+
+ ret = clk_enable(clk->snfi_clk_sel);
+ if (ret) {
+ pr_info("failed to enable snfi clk sel\n");
+ goto disable_snfi_clk;
+ }
+
+ ret = clk_set_parent(clk->snfi_clk_sel, clk->snfi_parent_50m);
+ if (ret) {
+ pr_info("failed to set snfi parent 50MHz\n");
+ goto disable_snfi_clk;
+ }
+
+ return 0;
+
+disable_snfi_clk:
+ clk_disable(clk->snfi_clk);
+disable_ecc_clk:
+ clk_disable(clk->ecc_clk);
+disable_nfi_clk:
+ clk_disable(clk->nfi_clk);
+
+ return ret;
+}
+
+static void nandx_disable_clk(struct nandx_clk *clk)
+{
+ clk_disable(clk->ecc_clk);
+ clk_disable(clk->nfi_clk);
+ clk_disable(clk->snfi_clk);
+}
+
+static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oob_region)
+{
+ struct nandx_nfc *nfc = (struct nandx_nfc *)mtd_to_nfc(mtd);
+ u32 eccsteps;
+
+ eccsteps = div_down(mtd->writesize, mtd->ecc_step_size);
+
+ if (section >= eccsteps)
+ return -EINVAL;
+
+ oob_region->length = nfc->info.fdm_reg_size - nfc->info.fdm_ecc_size;
+ oob_region->offset = section * nfc->info.fdm_reg_size
+ + nfc->info.fdm_ecc_size;
+
+ return 0;
+}
+
+static int mtk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oob_region)
+{
+ struct nandx_nfc *nfc = (struct nandx_nfc *)mtd_to_nfc(mtd);
+ u32 eccsteps;
+
+ if (section)
+ return -EINVAL;
+
+ eccsteps = div_down(mtd->writesize, mtd->ecc_step_size);
+ oob_region->offset = nfc->info.fdm_reg_size * eccsteps;
+ oob_region->length = mtd->oobsize - oob_region->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops = {
+ .rfree = mtk_nfc_ooblayout_free,
+ .ecc = mtk_nfc_ooblayout_ecc,
+};
+
+struct nfc_compatible {
+ enum mtk_ic_version ic_ver;
+
+ u32 clock_1x;
+ u32 *clock_2x;
+ int clock_2x_num;
+
+ int min_oob_req;
+};
+
+static const struct nfc_compatible nfc_compats_mt7622 = {
+ .ic_ver = NANDX_MT7622,
+ .clock_1x = 26000000,
+ .clock_2x = NULL,
+ .clock_2x_num = 8,
+ .min_oob_req = 1,
+};
+
+static const struct udevice_id ic_of_match[] = {
+ {.compatible = "mediatek,mt7622-nfc", .data = &nfc_compats_mt7622},
+ {}
+};
+
+static int nand_operation(struct mtd_info *mtd, loff_t addr, size_t len,
+ size_t *retlen, uint8_t *data, uint8_t *oob, bool read)
+{
+ struct nandx_split64 split = {0};
+ func_nandx_operation operation;
+ u64 block_oobs, val, align;
+ uint8_t *databuf, *oobbuf;
+ struct nandx_nfc *nfc;
+ bool readoob;
+ int ret = 0;
+
+ nfc = (struct nandx_nfc *)nand_get_controller_data;
+ spin_lock(&nfc->lock);
+
+ databuf = data;
+ oobbuf = oob;
+
+ readoob = data ? false : true;
+ block_oobs = div_up(mtd->erasesize, mtd->writesize) * mtd->oobavail;
+ align = readoob ? block_oobs : mtd->erasesize;
+
+ operation = read ? nandx_read : nandx_write;
+
+ nandx_split(&split, addr, len, val, align);
+
+ if (split.head_len) {
+ ret = operation((u8 *) databuf, oobbuf, addr, split.head_len);
+
+ if (databuf)
+ databuf += split.head_len;
+
+ if (oobbuf)
+ oobbuf += split.head_len;
+
+ addr += split.head_len;
+ *retlen += split.head_len;
+ }
+
+ if (split.body_len) {
+ while (div_up(split.body_len, align)) {
+ ret = operation((u8 *) databuf, oobbuf, addr, align);
+
+ if (databuf) {
+ databuf += mtd->erasesize;
+ split.body_len -= mtd->erasesize;
+ *retlen += mtd->erasesize;
+ }
+
+ if (oobbuf) {
+ oobbuf += block_oobs;
+ split.body_len -= block_oobs;
+ *retlen += block_oobs;
+ }
+
+ addr += mtd->erasesize;
+ }
+
+ }
+
+ if (split.tail_len) {
+ ret = operation((u8 *) databuf, oobbuf, addr, split.tail_len);
+ *retlen += split.tail_len;
+ }
+
+ spin_unlock(&nfc->lock);
+
+ return ret;
+}
+
+static int mtk_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ return nand_operation(mtd, from, len, retlen, buf, NULL, true);
+}
+
+static int mtk_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ return nand_operation(mtd, to, len, retlen, (uint8_t *)buf,
+ NULL, false);
+}
+
+int mtk_nand_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
+{
+ size_t retlen;
+
+ return nand_operation(mtd, from, ops->ooblen, &retlen, NULL,
+ ops->oobbuf, true);
+}
+
+int mtk_nand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
+{
+ size_t retlen;
+
+ return nand_operation(mtd, to, ops->ooblen, &retlen, NULL,
+ ops->oobbuf, false);
+}
+
+static int mtk_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct nandx_nfc *nfc;
+ u64 erase_len, erase_addr;
+ u32 block_size;
+ int ret = 0;
+
+ nfc = (struct nandx_nfc *)mtd_to_nfc(mtd);
+ block_size = nfc->info.block_size;
+ erase_len = instr->len;
+ erase_addr = instr->addr;
+ spin_lock(&nfc->lock);
+ instr->state = MTD_ERASING;
+
+ while (erase_len) {
+ if (mtk_nand_is_bad(mtd, erase_addr)) {
+ pr_info("block(0x%llx) is bad, not erase\n",
+ erase_addr);
+ instr->state = MTD_ERASE_FAILED;
+ goto erase_exit;
+ } else {
+ ret = nandx_erase(erase_addr, block_size);
+ if (ret < 0) {
+ instr->state = MTD_ERASE_FAILED;
+ goto erase_exit;
+ pr_info("erase fail at blk %llu, ret:%d\n",
+ erase_addr, ret);
+ }
+ }
+ erase_addr += block_size;
+ erase_len -= block_size;
+ }
+
+ instr->state = MTD_ERASE_DONE;
+
+erase_exit:
+ ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
+
+ spin_unlock(&nfc->lock);
+ /* Do mtd call back function */
+ if (!ret)
+ mtd_erase_callback(instr);
+
+ return ret;
+}
+
+int mtk_nand_is_bad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nandx_nfc *nfc;
+ int ret;
+
+ nfc = (struct nandx_nfc *)mtd_to_nfc(mtd);
+ spin_lock(&nfc->lock);
+
+ /*ret = bbt_is_bad(&nfc->info, ofs);*/
+ ret = nandx_is_bad_block(ofs);
+ spin_unlock(&nfc->lock);
+
+ if (ret) {
+ pr_info("nand block 0x%x is bad, ret %d!\n", ofs, ret);
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+int mtk_nand_mark_bad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nandx_nfc *nfc;
+ int ret;
+
+ nfc = (struct nandx_nfc *)mtd_to_nfc(mtd);
+ spin_lock(&nfc->lock);
+ pr_info("%s, %d\n", __func__, __LINE__);
+ ret = bbt_mark_bad(&nfc->info, ofs);
+
+ spin_unlock(&nfc->lock);
+
+ return ret;
+}
+
+void mtk_nand_sync(struct mtd_info *mtd)
+{
+ nandx_sync();
+}
+
+static struct mtd_info *mtd_info_create(struct udevice *pdev,
+ struct nandx_nfc *nfc, struct nand_chip *nand)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ int ret;
+
+ nand_set_controller_data(nand, nfc);
+
+ nand->flash_node = dev_of_offset(pdev);
+ nand->ecc.layout = &eccoob;
+
+ ret = nandx_ioctl(CORE_CTRL_NAND_INFO, &nfc->info);
+ if (ret) {
+ pr_info("fail to get nand info (%d)!\n", ret);
+ mem_free(mtd);
+ return NULL;
+ }
+
+ mtd->owner = THIS_MODULE;
+
+ mtd->name = "MTK-SNand";
+ mtd->writesize = nfc->info.page_size;
+ mtd->erasesize = nfc->info.block_size;
+ mtd->oobsize = nfc->info.oob_size;
+ mtd->size = nfc->info.total_size;
+ mtd->type = MTD_NANDFLASH;
+ mtd->flags = MTD_CAP_NANDFLASH;
+ mtd->_erase = mtk_nand_erase;
+ mtd->_read = mtk_nand_read;
+ mtd->_write = mtk_nand_write;
+ mtd->_read_oob = mtk_nand_read_oob;
+ mtd->_write_oob = mtk_nand_write_oob;
+ mtd->_sync = mtk_nand_sync;
+ mtd->_lock = NULL;
+ mtd->_unlock = NULL;
+ mtd->_block_isbad = mtk_nand_is_bad;
+ mtd->_block_markbad = mtk_nand_mark_bad;
+ mtd->writebufsize = mtd->writesize;
+
+ mtd_set_ooblayout(mtd, &mtk_nfc_ooblayout_ops);
+
+ mtd->ecc_strength = nfc->info.ecc_strength;
+ mtd->ecc_step_size = nfc->info.sector_size;
+
+ if (!mtd->bitflip_threshold)
+ mtd->bitflip_threshold = mtd->ecc_strength;
+
+ return mtd;
+}
+
+int board_nand_init(struct nand_chip *nand)
+{
+ struct udevice *dev;
+ struct mtd_info *mtd;
+ struct nandx_nfc *nfc;
+ int arg = 1;
+ int ret;
+
+ ret = uclass_get_device_by_driver(UCLASS_MTD,
+ DM_GET_DRIVER(mtk_snand_drv),
+ &dev);
+ if (ret) {
+ pr_err("Failed to get mtk_nand_drv. (error %d)\n", ret);
+ return ret;
+ }
+
+ nfc = dev_get_priv(dev);
+
+ ret = nandx_enable_clk(&nfc->clk);
+ if (ret) {
+ pr_err("failed to enable nfi clk (error %d)\n", ret);
+ return ret;
+ }
+
+ ret = nandx_init(nfc->res);
+ if (ret) {
+ pr_err("nandx init error (%d)!\n", ret);
+ goto disable_clk;
+ }
+
+ arg = 1;
+ nandx_ioctl(NFI_CTRL_DMA, &arg);
+ nandx_ioctl(NFI_CTRL_ECC, &arg);
+
+#ifdef NANDX_UNIT_TEST
+ nandx_unit_test(0x780000, 0x800);
+#endif
+
+ mtd = mtd_info_create(dev, nfc, nand);
+ if (!mtd) {
+ ret = -ENOMEM;
+ goto disable_clk;
+ }
+
+ spin_lock_init(&nfc->lock);
+#if 0
+ ret = scan_bbt(&nfc->info);
+ if (ret) {
+ pr_info("bbt init error (%d)!\n", ret);
+ goto disable_clk;
+ }
+#endif
+ return ret;
+
+disable_clk:
+ nandx_disable_clk(&nfc->clk);
+
+ return ret;
+}
+
+static int mtk_snand_ofdata_to_platdata(struct udevice *dev)
+{
+ struct nandx_nfc *nfc = dev_get_priv(dev);
+ struct nfc_compatible *compat;
+ struct nfi_resource *res;
+
+ int ret = 0;
+
+ res = mem_alloc(1, sizeof(struct nfi_resource));
+ if (!res)
+ return -ENOMEM;
+
+ nfc->res = res;
+
+ res->nfi_regs = (void *)dev_read_addr_index(dev, 0);
+ res->ecc_regs = (void *)dev_read_addr_index(dev, 1);
+ pr_debug("mtk snand nfi_regs:0x%x ecc_regs:0x%x\n",
+ res->nfi_regs, res->ecc_regs);
+
+ compat = (struct nfc_compatible *)dev_get_driver_data(dev);
+
+ res->ic_ver = (enum mtk_ic_version)(compat->ic_ver);
+ res->clock_1x = compat->clock_1x;
+ res->clock_2x = compat->clock_2x;
+ res->clock_2x_num = compat->clock_2x_num;
+
+ memset(&nfc->clk, 0, sizeof(struct nandx_clk));
+ nfc->clk.nfi_clk =
+ kmalloc(sizeof(*nfc->clk.nfi_clk), GFP_KERNEL);
+ nfc->clk.ecc_clk =
+ kmalloc(sizeof(*nfc->clk.ecc_clk), GFP_KERNEL);
+ nfc->clk.snfi_clk=
+ kmalloc(sizeof(*nfc->clk.snfi_clk), GFP_KERNEL);
+ nfc->clk.snfi_clk_sel =
+ kmalloc(sizeof(*nfc->clk.snfi_clk_sel), GFP_KERNEL);
+ nfc->clk.snfi_parent_50m =
+ kmalloc(sizeof(*nfc->clk.snfi_parent_50m), GFP_KERNEL);
+
+ if (!nfc->clk.nfi_clk || !nfc->clk.ecc_clk || !nfc->clk.snfi_clk ||
+ !nfc->clk.snfi_clk_sel || !nfc->clk.snfi_parent_50m) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = clk_get_by_name(dev, "nfi_clk", nfc->clk.nfi_clk);
+ if (IS_ERR(nfc->clk.nfi_clk)) {
+ ret = PTR_ERR(nfc->clk.nfi_clk);
+ goto err;
+ }
+
+ ret = clk_get_by_name(dev, "ecc_clk", nfc->clk.ecc_clk);
+ if (IS_ERR(nfc->clk.ecc_clk)) {
+ ret = PTR_ERR(nfc->clk.ecc_clk);
+ goto err;
+ }
+
+ ret = clk_get_by_name(dev, "snfi_clk", nfc->clk.snfi_clk);
+ if (IS_ERR(nfc->clk.snfi_clk)) {
+ ret = PTR_ERR(nfc->clk.snfi_clk);
+ goto err;
+ }
+
+ ret = clk_get_by_name(dev, "spinfi_sel", nfc->clk.snfi_clk_sel);
+ if (IS_ERR(nfc->clk.snfi_clk_sel)) {
+ ret = PTR_ERR(nfc->clk.snfi_clk_sel);
+ goto err;
+ }
+
+ ret = clk_get_by_name(dev, "spinfi_parent_50m", nfc->clk.snfi_parent_50m);
+ if (IS_ERR(nfc->clk.snfi_parent_50m))
+ pr_info("spinfi parent 50MHz is not configed\n");
+
+ return 0;
+err:
+ if (nfc->clk.nfi_clk)
+ kfree(nfc->clk.nfi_clk);
+ if (nfc->clk.snfi_clk)
+ kfree(nfc->clk.snfi_clk);
+ if (nfc->clk.ecc_clk)
+ kfree(nfc->clk.ecc_clk);
+ if (nfc->clk.snfi_clk_sel)
+ kfree(nfc->clk.snfi_clk_sel);
+ if (nfc->clk.snfi_parent_50m)
+ kfree(nfc->clk.snfi_parent_50m);
+
+ return ret;
+}
+
+U_BOOT_DRIVER(mtk_snand_drv) = {
+ .name = "mtk_snand",
+ .id = UCLASS_MTD,
+ .of_match = ic_of_match,
+ .ofdata_to_platdata = mtk_snand_ofdata_to_platdata,
+ .priv_auto_alloc_size = sizeof(struct nandx_nfc),
+};
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");
+MODULE_AUTHOR("MediaTek");
diff --git a/drivers/mtd/nandx/include/Nandx.mk b/drivers/mtd/nandx/include/Nandx.mk
new file mode 100644
index 0000000000..667402790e
--- /dev/null
+++ b/drivers/mtd/nandx/include/Nandx.mk
@@ -0,0 +1,16 @@
+#
+# Copyright (C) 2017 MediaTek Inc.
+# Licensed under either
+# BSD Licence, (see NOTICE for more details)
+# GNU General Public License, version 2.0, (see NOTICE for more details)
+#
+
+nandx-header-y += internal/nandx_core.h
+nandx-header-y += internal/nandx_errno.h
+nandx-header-y += internal/nandx_util.h
+nandx-header-$(NANDX_BBT_SUPPORT) += internal/bbt.h
+nandx-header-$(NANDX_SIMULATOR_SUPPORT) += simulator/nandx_os.h
+nandx-header-$(NANDX_CTP_SUPPORT) += ctp/nandx_os.h
+nandx-header-$(NANDX_LK_SUPPORT) += lk/nandx_os.h
+nandx-header-$(NANDX_KERNEL_SUPPORT) += kernel/nandx_os.h
+nandx-header-$(NANDX_UBOOT_SUPPORT) += uboot/nandx_os.h
diff --git a/drivers/mtd/nandx/include/internal/bbt.h b/drivers/mtd/nandx/include/internal/bbt.h
new file mode 100644
index 0000000000..4676def1f5
--- /dev/null
+++ b/drivers/mtd/nandx/include/internal/bbt.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __BBT_H__
+#define __BBT_H__
+
+#define BBT_BLOCK_GOOD 0x03
+#define BBT_BLOCK_WORN 0x02
+#define BBT_BLOCK_RESERVED 0x01
+#define BBT_BLOCK_FACTORY_BAD 0x00
+
+#define BBT_INVALID_ADDR 0
+/* The maximum number of blocks to scan for a bbt */
+#define NAND_BBT_SCAN_MAXBLOCKS 4
+#define NAND_BBT_USE_FLASH 0x00020000
+#define NAND_BBT_NO_OOB 0x00040000
+
+/* Search good / bad pattern on the first and the second page */
+#define NAND_BBT_SCAN2NDPAGE 0x00008000
+/* Search good / bad pattern on the last page of the eraseblock */
+#define NAND_BBT_SCANLASTPAGE 0x00010000
+
+#define NAND_DRAM_BUF_DATABUF_ADDR (NAND_BUF_ADDR)
+
+struct bbt_pattern {
+ u8 *data;
+ int len;
+};
+
+struct bbt_desc {
+ struct bbt_pattern pattern;
+ u8 version;
+ u64 bbt_addr;/*0: invalid value; otherwise, valid value*/
+};
+
+struct bbt_manager {
+ /* main bbt descriptor and mirror descriptor */
+ struct bbt_desc desc[2];/* 0: main bbt; 1: mirror bbt */
+ int max_blocks;
+ u8 *bbt;
+};
+
+#define BBT_ENTRY_MASK 0x03
+#define BBT_ENTRY_SHIFT 2
+
+#define GET_BBT_LENGTH(blocks) (blocks >> 2)
+#define GET_ENTRY(block) ((block) >> BBT_ENTRY_SHIFT)
+#define GET_POSITION(block) (((block) & BBT_ENTRY_MASK) * 2)
+#define GET_MARK_VALUE(block, mark) \
+ (((mark) & BBT_ENTRY_MASK) << GET_POSITION(block))
+
+int scan_bbt(struct nandx_info *nand);
+
+int bbt_mark_bad(struct nandx_info *nand, off_t offset);
+
+int bbt_is_bad(struct nandx_info *nand, off_t offset);
+
+#endif /*__BBT_H__*/
diff --git a/drivers/mtd/nandx/include/internal/nandx_core.h b/drivers/mtd/nandx/include/internal/nandx_core.h
new file mode 100644
index 0000000000..09aff72224
--- /dev/null
+++ b/drivers/mtd/nandx/include/internal/nandx_core.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NANDX_CORE_H__
+#define __NANDX_CORE_H__
+
+/**
+ * mtk_ic_version - indicates specifical IC, IP need this to load some info
+ */
+enum mtk_ic_version {
+ NANDX_MT7622,
+};
+
+/**
+ * nandx_ioctl_cmd - operations supported by nandx
+ *
+ * @NFI_CTRL_DMA dma enable or not
+ * @NFI_CTRL_NFI_MODE customer/read/program/erase...
+ * @NFI_CTRL_ECC ecc enable or not
+ * @NFI_CTRL_ECC_MODE nfi/dma/pio
+ * @CHIP_CTRL_DRIVE_STRENGTH enum chip_ctrl_drive_strength
+ */
+enum nandx_ctrl_cmd {
+ CORE_CTRL_NAND_INFO,
+
+ NFI_CTRL_DMA,
+ NFI_CTRL_NFI_MODE,
+ NFI_CTRL_AUTOFORMAT,
+ NFI_CTRL_NFI_IRQ,
+ NFI_CTRL_PAGE_IRQ,
+ NFI_CTRL_RANDOMIZE,
+ NFI_CTRL_BAD_MARK_SWAP,
+
+ NFI_CTRL_ECC,
+ NFI_CTRL_ECC_MODE,
+ NFI_CTRL_ECC_CLOCK,
+ NFI_CTRL_ECC_IRQ,
+ NFI_CTRL_ECC_PAGE_IRQ,
+ NFI_CTRL_ECC_DECODE_MODE,
+
+ SNFI_CTRL_OP_MODE,
+ SNFI_CTRL_RX_MODE,
+ SNFI_CTRL_TX_MODE,
+ SNFI_CTRL_DELAY_MODE,
+
+ CHIP_CTRL_OPS_CACHE,
+ CHIP_CTRL_OPS_MULTI,
+ CHIP_CTRL_PSLC_MODE,
+ CHIP_CTRL_DRIVE_STRENGTH,
+ CHIP_CTRL_DDR_MODE,
+ CHIP_CTRL_ONDIE_ECC,
+ CHIP_CTRL_TIMING_MODE
+};
+
+enum snfi_ctrl_op_mode {
+ SNFI_CUSTOM_MODE,
+ SNFI_AUTO_MODE,
+ SNFI_MAC_MODE
+};
+
+enum snfi_ctrl_rx_mode {
+ SNFI_RX_111,
+ SNFI_RX_112,
+ SNFI_RX_114,
+ SNFI_RX_122,
+ SNFI_RX_144
+};
+
+enum snfi_ctrl_tx_mode {
+ SNFI_TX_111,
+ SNFI_TX_114,
+};
+
+enum chip_ctrl_drive_strength {
+ CHIP_DRIVE_NORMAL,
+ CHIP_DRIVE_HIGH,
+ CHIP_DRIVE_MIDDLE,
+ CHIP_DRIVE_LOW
+};
+
+enum chip_ctrl_timing_mode {
+ CHIP_TIMING_MODE0,
+ CHIP_TIMING_MODE1,
+ CHIP_TIMING_MODE2,
+ CHIP_TIMING_MODE3,
+ CHIP_TIMING_MODE4,
+ CHIP_TIMING_MODE5,
+};
+
+/**
+ * nandx_info - basic information
+ */
+struct nandx_info {
+ u32 max_io_count;
+ u32 min_write_pages;
+ u32 plane_num;
+ u32 oob_size;
+ u32 page_parity_size;
+ u32 page_size;
+ u32 block_size;
+ u64 total_size;
+ u32 fdm_reg_size;
+ u32 fdm_ecc_size;
+ u32 ecc_strength;
+ u32 sector_size;
+};
+
+/**
+ * nfi_resource - the resource needed by nfi & ecc to do initialization
+ */
+struct nfi_resource {
+ int ic_ver;
+ void *dev;
+
+ void *ecc_regs;
+ int ecc_irq_id;
+
+ void *nfi_regs;
+ int nfi_irq_id;
+
+ u32 clock_1x;
+ u32 *clock_2x;
+ int clock_2x_num;
+
+ int min_oob_req;
+};
+
+/**
+ * nandx_init - init all related modules below
+ *
+ * @res: basic resource of the project
+ *
+ * return 0 if init success, otherwise return negative error code
+ */
+int nandx_init(struct nfi_resource *res);
+
+/**
+ * nandx_exit - release resource those that obtained in init flow
+ */
+void nandx_exit(void);
+
+/**
+ * nandx_read - read data from nand this function can read data and related
+ * oob from specifical address
+ * if do multi_ops, set one operation per time, and call nandx_sync at last
+ * in multi mode, not support page partial read
+ * oob not support partial read
+ *
+ * @data: buf to receive data from nand
+ * @oob: buf to receive oob data from nand which related to data page
+ * length of @oob should oob size aligned, oob not support partial read
+ * @offset: offset address on the whole flash
+ * @len: the length of @data that need to read
+ *
+ * if read success return 0, otherwise return negative error code
+ */
+int nandx_read(u8 *data, u8 *oob, u64 offset, size_t len);
+
+/**
+ * nandx_write - write data to nand
+ * this function can write data and related oob to specifical address
+ * if do multi_ops, set one operation per time, and call nandx_sync at last
+ *
+ * @data: source data to be written to nand,
+ * for multi operation, the length of @data should be page size aliged
+ * @oob: source oob which related to data page to be written to nand,
+ * length of @oob should oob size aligned
+ * @offset: offset address on the whole flash, the value should be start address
+ * of a page
+ * @len: the length of @data that need to write,
+ * for multi operation, the len should be page size aliged
+ *
+ * if write success return 0, otherwise return negative error code
+ * if return value > 0, it indicates that how many pages still need to write,
+ * and data has not been written to nand
+ * please call nandx_sync after pages alligned $nandx_info.min_write_pages
+ */
+int nandx_write(u8 *data, u8 *oob, u64 offset, size_t len);
+
+/**
+ * nandx_erase - erase an area of nand
+ * if do multi_ops, set one operation per time, and call nandx_sync at last
+ *
+ * @offset: offset address on the flash
+ * @len: erase length which should be block size aligned
+ *
+ * if erase success return 0, otherwise return negative error code
+ */
+int nandx_erase(u64 offset, size_t len);
+
+/**
+ * nandx_sync - sync all operations to nand
+ * when do multi_ops, this function will be called at last operation
+ * when write data, if number of pages not alligned
+ * by $nandx_info.min_write_pages, this interface could be called to do
+ * force write, 0xff will be padded to blanked pages.
+ */
+int nandx_sync(void);
+
+/**
+ * nandx_is_bad_block - check if the block is bad
+ * only check the flag that marked by the flash vendor
+ *
+ * @offset: offset address on the whole flash
+ *
+ * return true if the block is bad, otherwise return false
+ */
+bool nandx_is_bad_block(u64 offset);
+
+/**
+ * nandx_ioctl - set/get property of nand chip
+ *
+ * @cmd: parameter that defined in enum nandx_ioctl_cmd
+ * @arg: operate parameter
+ *
+ * return 0 if operate success, otherwise return negative error code
+ */
+int nandx_ioctl(int cmd, void *arg);
+
+/**
+ * nandx_suspend - suspend nand, and store some data
+ *
+ * return 0 if suspend success, otherwise return negative error code
+ */
+int nandx_suspend(void);
+
+/**
+ * nandx_resume - resume nand, and replay some data
+ *
+ * return 0 if resume success, otherwise return negative error code
+ */
+int nandx_resume(void);
+
+#ifdef NANDX_UNIT_TEST
+/**
+ * nandx_unit_test - unit test
+ *
+ * @offset: offset address on the whole flash
+ * @len: should be not larger than a block size, we only test a block per time
+ *
+ * return 0 if test success, otherwise return negative error code
+ */
+int nandx_unit_test(u64 offset, size_t len);
+#endif
+
+#endif /* __NANDX_CORE_H__ */
diff --git a/drivers/mtd/nandx/include/internal/nandx_errno.h b/drivers/mtd/nandx/include/internal/nandx_errno.h
new file mode 100644
index 0000000000..51fb299c03
--- /dev/null
+++ b/drivers/mtd/nandx/include/internal/nandx_errno.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NANDX_ERRNO_H__
+#define __NANDX_ERRNO_H__
+
+#ifndef EIO
+#define EIO 5 /* I/O error */
+#define ENOMEM 12 /* Out of memory */
+#define EFAULT 14 /* Bad address */
+#define EBUSY 16 /* Device or resource busy */
+#define ENODEV 19 /* No such device */
+#define EINVAL 22 /* Invalid argument */
+#define ENOSPC 28 /* No space left on device */
+/* Operation not supported on transport endpoint */
+#define EOPNOTSUPP 95
+#define ETIMEDOUT 110 /* Connection timed out */
+#endif
+
+#define ENANDFLIPS 1024 /* Too many bitflips, uncorrected */
+#define ENANDREAD 1025 /* Read fail, can't correct */
+#define ENANDWRITE 1026 /* Write fail */
+#define ENANDERASE 1027 /* Erase fail */
+#define ENANDBAD 1028 /* Bad block */
+#define ENANDWP 1029
+
+#define IS_NAND_ERR(err) ((err) >= -ENANDBAD && (err) <= -ENANDFLIPS)
+
+#ifndef MAX_ERRNO
+#define MAX_ERRNO 4096
+#define ERR_PTR(errno) ((void *)((long)errno))
+#define PTR_ERR(ptr) ((long)(ptr))
+#define IS_ERR(ptr) ((unsigned long)(ptr) > (unsigned long)-MAX_ERRNO)
+#endif
+
+#endif /* __NANDX_ERRNO_H__ */
diff --git a/drivers/mtd/nandx/include/internal/nandx_util.h b/drivers/mtd/nandx/include/internal/nandx_util.h
new file mode 100644
index 0000000000..1990b000ee
--- /dev/null
+++ b/drivers/mtd/nandx/include/internal/nandx_util.h
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NANDX_UTIL_H__
+#define __NANDX_UTIL_H__
+
+typedef unsigned char u8;
+typedef unsigned short u16;
+typedef unsigned int u32;
+typedef unsigned long long u64;
+
+enum nand_irq_return {
+ NAND_IRQ_NONE,
+ NAND_IRQ_HANDLED,
+};
+
+enum nand_dma_operation {
+ NDMA_FROM_DEV,
+ NDMA_TO_DEV,
+};
+
+
+/*
+ * Compatible function
+ * used for preloader/lk/kernel environment
+ */
+#include "nandx_os.h"
+#include "nandx_errno.h"
+
+#ifndef BIT
+#define BIT(a) (1 << (a))
+#endif
+
+#ifndef min_t
+#define min_t(type, x, y) ({ \
+ type __min1 = (x); \
+ type __min2 = (y); \
+ __min1 < __min2 ? __min1 : __min2; })
+
+#define max_t(type, x, y) ({ \
+ type __max1 = (x); \
+ type __max2 = (y); \
+ __max1 > __max2 ? __max1 : __max2; })
+#endif
+
+#ifndef GENMASK
+#define GENMASK(h, l) \
+ (((~0UL) << (l)) & (~0UL >> ((sizeof(unsigned long) * 8) - 1 - (h))))
+#endif
+
+#ifndef __weak
+#define __weak __attribute__((__weak__))
+#endif
+
+#ifndef __packed
+#define __packed __attribute__((__packed__))
+#endif
+
+#ifndef KB
+#define KB(x) ((x) << 10)
+#define MB(x) (KB(x) << 10)
+#define GB(x) (MB(x) << 10)
+#endif
+
+#ifndef offsetof
+#define offsetof(type, member) ((size_t)&((type *)0)->member)
+#endif
+
+#ifndef NULL
+#define NULL (void *)0
+#endif
+static inline u32 nandx_popcount(u32 x)
+{
+ x = (x & 0x55555555) + ((x >> 1) & 0x55555555);
+ x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
+ x = (x & 0x0F0F0F0F) + ((x >> 4) & 0x0F0F0F0F);
+ x = (x & 0x00FF00FF) + ((x >> 8) & 0x00FF00FF);
+ x = (x & 0x0000FFFF) + ((x >> 16) & 0x0000FFFF);
+
+ return x;
+}
+
+#ifndef zero_popcount
+#define zero_popcount(x) (32 - nandx_popcount(x))
+#endif
+
+#ifndef do_div
+#define do_div(n, base) \
+ ({ \
+ u32 __base = (base); \
+ u32 __rem; \
+ __rem = ((u64)(n)) % __base; \
+ (n) = ((u64)(n)) / __base; \
+ __rem; \
+ })
+#endif
+
+#define div_up(x, y) \
+ ({ \
+ u64 __temp = ((x) + (y) - 1); \
+ do_div(__temp, (y)); \
+ __temp; \
+ })
+
+#define div_down(x, y) \
+ ({ \
+ u64 __temp = (x); \
+ do_div(__temp, (y)); \
+ __temp; \
+ })
+
+#define div_round_up(x, y) (div_up(x, y) * (y))
+#define div_round_down(x, y) (div_down(x, y) * (y))
+
+#define reminder(x, y) \
+ ({ \
+ u64 __temp = (x); \
+ do_div(__temp, (y)); \
+ })
+
+#ifndef round_up
+#define round_up(x, y) ((((x) - 1) | ((y) - 1)) + 1)
+#define round_down(x, y) ((x) & ~((y) - 1))
+#endif
+
+#ifndef readx_poll_timeout_atomic
+#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
+ ({ \
+ u64 end = get_current_time_us() + timeout_us; \
+ for (;;) { \
+ u64 now = get_current_time_us(); \
+ (val) = op(addr); \
+ if (cond) \
+ break; \
+ if (now > end) { \
+ (val) = op(addr); \
+ break; \
+ } \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+ })
+
+#define readl_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readl, addr, val, cond, delay_us, timeout_us)
+#define readw_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readw, addr, val, cond, delay_us, timeout_us)
+#define readb_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readb, addr, val, cond, delay_us, timeout_us)
+#endif
+
+struct nandx_split64 {
+ u64 head;
+ size_t head_len;
+ u64 body;
+ size_t body_len;
+ u64 tail;
+ size_t tail_len;
+};
+
+struct nandx_split32 {
+ u32 head;
+ u32 head_len;
+ u32 body;
+ u32 body_len;
+ u32 tail;
+ u32 tail_len;
+};
+
+#define nandx_split(split, offset, len, val, align) \
+ do { \
+ (split)->head = (offset); \
+ (val) = div_round_down((offset), (align)); \
+ (val) = (align) - ((offset) - (val)); \
+ if ((val) == (align)) \
+ (split)->head_len = 0; \
+ else if ((val) > (len)) \
+ (split)->head_len = len; \
+ else \
+ (split)->head_len = val; \
+ (split)->body = (offset) + (split)->head_len; \
+ (split)->body_len = div_round_down((len) - \
+ (split)->head_len,\
+ (align)); \
+ (split)->tail = (split)->body + (split)->body_len; \
+ (split)->tail_len = (len) - (split)->head_len - \
+ (split)->body_len; \
+ } while (0)
+
+#ifndef container_of
+#define container_of(ptr, type, member) \
+ ({const __typeof__(((type *)0)->member) * __mptr = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+#endif
+
+static inline u32 nandx_cpu_to_be32(u32 val)
+{
+ u32 temp = 1;
+ u8 *p_temp = (u8 *)&temp;
+
+ if (*p_temp)
+ return ((val & 0xff) << 24) | ((val & 0xff00) << 8) |
+ ((val >> 8) & 0xff00) | ((val >> 24) & 0xff);
+
+ return val;
+}
+
+static inline void nandx_set_bits32(unsigned long addr, u32 mask,
+ u32 val)
+{
+ u32 temp = readl((void *)addr);
+
+ temp &= ~(mask);
+ temp |= val;
+ writel(temp, (void *)addr);
+}
+
+#endif /* __NANDX_UTIL_H__ */
diff --git a/drivers/mtd/nandx/include/uboot/nandx_os.h b/drivers/mtd/nandx/include/uboot/nandx_os.h
new file mode 100644
index 0000000000..8ea53378bf
--- /dev/null
+++ b/drivers/mtd/nandx/include/uboot/nandx_os.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NANDX_OS_H__
+#define __NANDX_OS_H__
+
+#include <common.h>
+#include <dm.h>
+#include <clk.h>
+#include <asm/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/compiler-gcc.h>
+
+#define NANDX_BULK_IO_USE_DRAM 0
+
+#define nandx_event_create() NULL
+#define nandx_event_destroy(event)
+#define nandx_event_complete(event)
+#define nandx_event_init(event)
+#define nandx_event_wait_complete(event, timeout) true
+
+#define nandx_irq_register(dev, irq, irq_handler, name, data) NULL
+
+static inline void *mem_alloc(u32 count, u32 size)
+{
+ return kmalloc(count * size, GFP_KERNEL | __GFP_ZERO);
+}
+
+static inline void mem_free(void *mem)
+{
+ kfree(mem);
+}
+
+static inline u64 get_current_time_us(void)
+{
+ return timer_get_us();
+}
+
+static inline u32 nandx_dma_map(void *dev, void *buf, u64 len,
+ enum nand_dma_operation op)
+{
+ unsigned long addr = (unsigned long)buf;
+ u64 size;
+
+ size = ALIGN(len, ARCH_DMA_MINALIGN);
+
+ if (op == NDMA_FROM_DEV)
+ invalidate_dcache_range(addr, addr + size);
+ else
+ flush_dcache_range(addr, addr + size);
+
+ return addr;
+}
+
+static inline void nandx_dma_unmap(void *dev, void *buf, void *addr,
+ u64 len, enum nand_dma_operation op)
+{
+ u64 size;
+
+ size = ALIGN(len, ARCH_DMA_MINALIGN);
+
+ if (op != NDMA_FROM_DEV)
+ invalidate_dcache_range((unsigned long)addr, addr + size);
+ else
+ flush_dcache_range((unsigned long)addr, addr + size);
+
+ return addr;
+}
+
+#endif /* __NANDX_OS_H__ */
diff --git a/include/configs/mt7622.h b/include/configs/mt7622.h
index dfd506ed24..6d0c956484 100644
--- a/include/configs/mt7622.h
+++ b/include/configs/mt7622.h
@@ -11,6 +11,31 @@
#include <linux/sizes.h>
+/* SPI Nand */
+#if defined(CONFIG_MTD_RAW_NAND)
+#define CONFIG_SYS_MAX_NAND_DEVICE 1
+#define CONFIG_SYS_NAND_BASE 0x1100d000
+
+#define ENV_BOOT_READ_IMAGE \
+ "boot_rd_img=" \
+ "nand read 0x4007ff28 0x380000 0x1400000" \
+ ";iminfo 0x4007ff28 \0"
+
+#define ENV_BOOT_WRITE_IMAGE \
+ "boot_wr_img=" \
+ "nand write 0x4007ff28 0x380000 0x1400000" \
+ ";iminfo 0x4007ff28 \0"
+
+#define ENV_BOOT_CMD \
+ "mtk_boot=run boot_rd_img;bootm;\0"
+
+#define CONFIG_EXTRA_ENV_SETTINGS \
+ ENV_BOOT_READ_IMAGE \
+ ENV_BOOT_CMD \
+ "bootcmd=run mtk_boot;\0"
+
+#endif
+
#define CONFIG_SYS_MAXARGS 8
#define CONFIG_SYS_BOOTM_LEN SZ_64M
#define CONFIG_SYS_CBSIZE SZ_1K
--
2.17.1