blob: 792a4a1d7ee90579ad4219f6f88224434de47242 [file] [log] [blame]
Angel Pons182dbde2020-04-02 23:49:05 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Elyes HAOUAS400f9ca2019-06-23 07:01:22 +02002
Kyösti Mälkki13f66502019-03-03 08:01:05 +02003#include <device/mmio.h>
Kyösti Mälkki9f0a2be2014-06-30 07:34:36 +03004#include <console/console.h>
Furquan Shaikhc28984d2016-11-20 21:04:00 -08005#include <spi_flash.h>
Zheng Bao600784e2013-02-07 17:30:23 +08006#include <spi-generic.h>
zbao01bd79f2012-03-23 11:36:08 +08007#include <device/device.h>
Zheng Bao7bcffa52012-11-28 11:36:52 +08008#include <device/pci.h>
9#include <device/pci_ops.h>
Elyes HAOUAS400f9ca2019-06-23 07:01:22 +020010#include <types.h>
zbao01bd79f2012-03-23 11:36:08 +080011
Martin Roth3316cf22012-12-05 16:22:54 -070012#include "SBPLATFORM.h"
13#include <vendorcode/amd/cimx/sb800/ECfan.h>
14
Kyösti Mälkki11104952014-06-29 16:17:33 +030015#define AMD_SB_SPI_TX_LEN 8
16
Stefan Reinauer12bce3f2015-06-18 01:17:38 -070017static uintptr_t spibar;
zbao01bd79f2012-03-23 11:36:08 +080018
Zheng Bao7bcffa52012-11-28 11:36:52 +080019static void reset_internal_fifo_pointer(void)
zbao01bd79f2012-03-23 11:36:08 +080020{
zbao01bd79f2012-03-23 11:36:08 +080021 do {
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -080022 write8((void *)(spibar + 2),
23 read8((void *)(spibar + 2)) | 0x10);
24 } while (read8((void *)(spibar + 0xD)) & 0x7);
zbao01bd79f2012-03-23 11:36:08 +080025}
26
Zheng Bao7bcffa52012-11-28 11:36:52 +080027static void execute_command(void)
zbao01bd79f2012-03-23 11:36:08 +080028{
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -080029 write8((void *)(spibar + 2), read8((void *)(spibar + 2)) | 1);
Zheng Bao7bcffa52012-11-28 11:36:52 +080030
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -080031 while ((read8((void *)(spibar + 2)) & 1) &&
Elyes Haouas616be8c2022-07-16 09:50:29 +020032 (read8((void *)(spibar + 3)) & 0x80));
zbao01bd79f2012-03-23 11:36:08 +080033}
34
Elyes HAOUASeb9e63f2022-01-25 11:51:43 +010035void spi_init(void)
zbao01bd79f2012-03-23 11:36:08 +080036{
Elyes HAOUAS1a4abb72018-05-19 16:49:20 +020037 struct device *dev;
Zheng Bao7bcffa52012-11-28 11:36:52 +080038
Kyösti Mälkkic70eed12018-05-22 02:18:00 +030039 dev = pcidev_on_root(0x14, 3);
Zheng Bao7bcffa52012-11-28 11:36:52 +080040 spibar = pci_read_config32(dev, 0xA0) & ~0x1F;
zbao01bd79f2012-03-23 11:36:08 +080041}
42
Furquan Shaikh94f86992016-12-01 07:12:32 -080043static int spi_ctrlr_xfer(const struct spi_slave *slave, const void *dout,
Furquan Shaikh0dba0252016-11-30 04:34:22 -080044 size_t bytesout, void *din, size_t bytesin)
zbao01bd79f2012-03-23 11:36:08 +080045{
Paul Menzeldee0f882018-11-10 11:24:38 +010046 /* First byte is cmd which can not be sent through FIFO. */
Zheng Bao7bcffa52012-11-28 11:36:52 +080047 u8 cmd = *(u8 *)dout++;
48 u8 readoffby1;
49 u8 readwrite;
Furquan Shaikh0dba0252016-11-30 04:34:22 -080050 size_t count;
zbao01bd79f2012-03-23 11:36:08 +080051
Gabe Black93d9f922014-03-27 21:52:43 -070052 bytesout--;
zbao01bd79f2012-03-23 11:36:08 +080053
Kyösti Mälkki9f0a2be2014-06-30 07:34:36 +030054 /*
55 * Check if this is a write command attempting to transfer more bytes
56 * than the controller can handle. Iterations for writes are not
57 * supported here because each SPI write command needs to be preceded
58 * and followed by other SPI commands, and this sequence is controlled
59 * by the SPI chip driver.
60 */
61 if (bytesout > AMD_SB_SPI_TX_LEN) {
62 printk(BIOS_DEBUG, "FCH SPI: Too much to write. Does your SPI chip driver use"
63 " spi_crop_chunk()?\n");
64 return -1;
65 }
66
Zheng Bao7bcffa52012-11-28 11:36:52 +080067 readoffby1 = bytesout ? 0 : 1;
zbao01bd79f2012-03-23 11:36:08 +080068
Zheng Bao7bcffa52012-11-28 11:36:52 +080069 readwrite = (bytesin + readoffby1) << 4 | bytesout;
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -080070 write8((void *)(spibar + 1), readwrite);
71 write8((void *)(spibar + 0), cmd);
zbao01bd79f2012-03-23 11:36:08 +080072
Zheng Bao7bcffa52012-11-28 11:36:52 +080073 reset_internal_fifo_pointer();
74 for (count = 0; count < bytesout; count++, dout++) {
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -080075 write8((void *)(spibar + 0x0C), *(u8 *)dout);
zbao01bd79f2012-03-23 11:36:08 +080076 }
Zheng Bao7bcffa52012-11-28 11:36:52 +080077
78 reset_internal_fifo_pointer();
79 execute_command();
80
81 reset_internal_fifo_pointer();
82 /* Skip the bytes we sent. */
83 for (count = 0; count < bytesout; count++) {
Paul Menzel9eb4d0a2018-11-10 11:27:02 +010084 read8((void *)(spibar + 0x0C));
Zheng Bao7bcffa52012-11-28 11:36:52 +080085 }
86
87 reset_internal_fifo_pointer();
88 for (count = 0; count < bytesin; count++, din++) {
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -080089 *(u8 *)din = read8((void *)(spibar + 0x0C));
Zheng Bao7bcffa52012-11-28 11:36:52 +080090 }
91
92 return 0;
93}
Martin Roth3316cf22012-12-05 16:22:54 -070094
Martin Roth3316cf22012-12-05 16:22:54 -070095static void ImcSleep(void)
96{
97 u8 cmd_val = 0x96; /* Kick off IMC Mailbox command 96 */
98 u8 reg0_val = 0; /* clear response register */
99 u8 reg1_val = 0xB4; /* request ownership flag */
100
Elyes Haouas616be8c2022-07-16 09:50:29 +0200101 WriteECmsg(MSG_REG0, AccWidthUint8, &reg0_val);
102 WriteECmsg(MSG_REG1, AccWidthUint8, &reg1_val);
103 WriteECmsg(MSG_SYS_TO_IMC, AccWidthUint8, &cmd_val);
Martin Roth3316cf22012-12-05 16:22:54 -0700104
105 WaitForEcLDN9MailboxCmdAck();
106}
107
Martin Roth3316cf22012-12-05 16:22:54 -0700108static void ImcWakeup(void)
109{
110 u8 cmd_val = 0x96; /* Kick off IMC Mailbox command 96 */
Idwer Volleringd26da9c2013-12-22 21:38:18 +0000111 u8 reg0_val = 0; /* clear response register */
Martin Roth3316cf22012-12-05 16:22:54 -0700112 u8 reg1_val = 0xB5; /* release ownership flag */
113
Elyes Haouas616be8c2022-07-16 09:50:29 +0200114 WriteECmsg(MSG_REG0, AccWidthUint8, &reg0_val);
115 WriteECmsg(MSG_REG1, AccWidthUint8, &reg1_val);
116 WriteECmsg(MSG_SYS_TO_IMC, AccWidthUint8, &cmd_val);
Martin Roth3316cf22012-12-05 16:22:54 -0700117
118 WaitForEcLDN9MailboxCmdAck();
119}
Martin Roth3316cf22012-12-05 16:22:54 -0700120
Furquan Shaikhc28984d2016-11-20 21:04:00 -0800121int chipset_volatile_group_begin(const struct spi_flash *flash)
122{
Julius Wernercd49cce2019-03-05 16:53:33 -0800123 if (!CONFIG(SB800_IMC_FWM))
Furquan Shaikhc28984d2016-11-20 21:04:00 -0800124 return 0;
125
126 ImcSleep();
127 return 0;
128}
129
130int chipset_volatile_group_end(const struct spi_flash *flash)
131{
Julius Wernercd49cce2019-03-05 16:53:33 -0800132 if (!CONFIG(SB800_IMC_FWM))
Furquan Shaikhc28984d2016-11-20 21:04:00 -0800133 return 0;
134
135 ImcWakeup();
136 return 0;
zbao01bd79f2012-03-23 11:36:08 +0800137}
138
Aaron Durbin851dde82018-04-19 21:15:25 -0600139static int xfer_vectors(const struct spi_slave *slave,
140 struct spi_op vectors[], size_t count)
141{
142 return spi_flash_vector_helper(slave, vectors, count, spi_ctrlr_xfer);
143}
144
Furquan Shaikh94f86992016-12-01 07:12:32 -0800145static const struct spi_ctrlr spi_ctrlr = {
Aaron Durbin851dde82018-04-19 21:15:25 -0600146 .xfer_vector = xfer_vectors,
Furquan Shaikhde705fa2017-04-19 19:27:28 -0700147 .max_xfer_size = AMD_SB_SPI_TX_LEN,
Aaron Durbin1fcc9f32018-01-29 11:30:17 -0700148 .flags = SPI_CNTRLR_DEDUCT_CMD_LEN,
Furquan Shaikh94f86992016-12-01 07:12:32 -0800149};
150
Furquan Shaikh12eca762017-05-18 14:58:49 -0700151const struct spi_ctrlr_buses spi_ctrlr_bus_map[] = {
152 {
153 .ctrlr = &spi_ctrlr,
154 .bus_start = 0,
155 .bus_end = 0,
156 },
157};
158
159const size_t spi_ctrlr_bus_map_count = ARRAY_SIZE(spi_ctrlr_bus_map);