diff -urN linux-2.4.23.org/Documentation/Configure.help linux-2.4.23/Documentation/Configure.help --- linux-2.4.23.org/Documentation/Configure.help 2003-12-06 17:45:52.831977967 +0100 +++ linux-2.4.23/Documentation/Configure.help 2003-12-06 17:46:42.774500910 +0100 @@ -4226,6 +4226,13 @@ If unsure, say N. +IPMI sockets +CONFIG_IPMI_SOCKET + If you say Y here, you will include support for IPMI sockets; + This is the better way for establishing and accessing IPMI devices. + + If unsure, say N. + # Choice: alphatype Alpha system type CONFIG_ALPHA_GENERIC @@ -29334,14 +29341,32 @@ generate an IPMI event describing the panic to each interface registered with the message handler. +Generate OEM events containing the panic string +CONFIG_IPMI_PANIC_STRING + When a panic occurs, this will cause the IPMI message handler to + generate IPMI OEM type f0 events holding the IPMB address of the + panic generator (byte 4 of the event), a sequence number for the + string (byte 5 of the event) and part of the string (the rest of the + event). Bytes 1, 2, and 3 are the normal usage for an OEM event. + You can fetch these events and use the sequence numbers to piece the + string together. + Device interface for IPMI CONFIG_IPMI_DEVICE_INTERFACE This provides an IOCTL interface to the IPMI message handler so userland processes may use IPMI. It supports poll() and select(). +IPMI SI handler +CONFIG_IPMI_SI + Provides a driver for System Interfaces (KCS, SMIC, BT). + Currently, only KCS and SMIC are supported. If + you are using IPMI, you should probably say "y" here. + IPMI KCS handler CONFIG_IPMI_KCS - Provides a driver for a KCS-style interface to a BMC. + Provides a driver for a KCS-style interface to a BMC. This + is deprecated, please use the IPMI System Interface handler + instead. IPMI Watchdog Timer CONFIG_IPMI_WATCHDOG diff -urN linux-2.4.23.org/Documentation/IPMI.txt linux-2.4.23/Documentation/IPMI.txt --- linux-2.4.23.org/Documentation/IPMI.txt 2003-12-06 12:55:11.829841204 +0100 +++ linux-2.4.23/Documentation/IPMI.txt 2003-12-06 17:46:42.792497135 +0100 @@ -41,18 +41,29 @@ driver, each open file for this device ties in to the message handler as an IPMI user. -ipmi_kcs_drv - A driver for the KCS SMI. Most system have a KCS -interface for IPMI. +ipmi_si_drv - A driver for various system interfaces. This supports +KCS, SMIC, and may support BT in the future. Unless you have your own +custom interface, you probably need to use this. + +ipmi_kcs_drv - A driver for the KCS SI. Most systems have a KCS +interface for IPMI. This is deprecated, ipmi_si_drv supports KCS and +SMIC interfaces. + +af_ipmi - A network socket interface to IPMI. This doesn't take up +a character device in your system. Much documentation for the interface is in the include files. The IPMI include files are: -ipmi.h - Contains the user interface and IOCTL interface for IPMI. +net/af_ipmi.h - Contains the socket interface. + +linux/ipmi.h - Contains the user interface and IOCTL interface for IPMI. -ipmi_smi.h - Contains the interface for SMI drivers to use. +linux/ipmi_smi.h - Contains the interface for system management interfaces +(things that interface to IPMI controllers) to use. -ipmi_msgdefs.h - General definitions for base IPMI messaging. +linux/ipmi_msgdefs.h - General definitions for base IPMI messaging. Addressing @@ -260,8 +271,66 @@ in the order they register, although if an SMI unregisters and then another one registers, all bets are off. -The ipmi_smi.h defines the interface for SMIs, see that for more -details. +The ipmi_smi.h defines the interface for management interfaces, see +that for more details. + + +The SI Driver +------------- + +The SI driver allows up to 4 KCS or SMIC interfaces to be configured +in the system. By default, scan the ACPI tables for interfaces, and +if it doesn't find any the driver will attempt to register one KCS +interface at the spec-specified I/O port 0xca2 without interrupts. +You can change this at module load time (for a module) with: + + insmod ipmi_si_drv.o si_type=,.... + si_ports=,... si_addrs=, + si_irqs=,... si_trydefaults=[0|1] + +Each of these except si_trydefaults is a list, the first item for the +first interface, second item for the second interface, etc. + +The si_type may be either "kcs" or "smic". If you leave it blank, it +defaults to "kcs". + +If you specify si_addrs as non-zero for an interface, the driver will +use the memory address given as the address of the device. This +overrides si_ports. + +If you specify si_ports as non-zero for an interface, the driver will +use the I/O port given as the device address. + +If you specify si_irqs as non-zero for an interface, the driver will +attempt to use the given interrupt for the device. + +si_trydefaults sets whether the standard IPMI interface at 0xca2 and +any interfaces specified by ACPE are tried. By default, the driver +tries it, set this value to zero to turn this off. + +When compiled into the kernel, the addresses can be specified on the +kernel command line as: + + ipmi_si=[,]:,[,]:....,[nodefault] + +The type is optional and may be either "kcs" for KCS or "smic" for +SMIC. If not specified, it defaults to KCS. The values is +either "p" or "m" for port or memory addresses. So for +instance, a KCS interface at port 0xca2 using interrupt 9 and a SMIC +memory interface at address 0xf9827341 with no interrupt would be +specified "ipmi_si=k,p0xca2:9,s,m0xf9827341". If you specify zero +for in irq or don't specify it, the driver will run polled unless the +software can detect the interrupt to use in the ACPI tables. + +By default, the driver will attempt to detect a KCS device at the +spec-specified 0xca2 address and any address specified by ACPI. If +you want to turn this off, use the "nodefault" option. + +If you have high-res timers compiled into the kernel, the driver will +use them to provide much better performance. Note that if you do not +have high-res timers enabled in the kernel and you don't have +interrupts enabled, the driver will run VERY slowly. Don't blame me, +these interfaces suck. The KCS Driver @@ -323,7 +392,10 @@ The timeout is the number of seconds to the action, and the pretimeout is the amount of seconds before the reset that the pre-timeout panic will -occur (if pretimeout is zero, then pretimeout will not be enabled). +occur (if pretimeout is zero, then pretimeout will not be enabled). Note +that the pretimeout is the time before the final timeout. So if the +timeout is 50 seconds and the pretimeout is 10 seconds, then the pretimeout +will occur in 40 second (10 seconds before the timeout). The action may be "reset", "power_cycle", or "power_off", and specifies what to do when the timer times out, and defaults to diff -urN linux-2.4.23.org/drivers/char/Config.in linux-2.4.23/drivers/char/Config.in --- linux-2.4.23.org/drivers/char/Config.in 2003-12-06 12:53:22.102691137 +0100 +++ linux-2.4.23/drivers/char/Config.in 2003-12-06 17:46:42.890476581 +0100 @@ -213,7 +213,9 @@ tristate 'IPMI top-level message handler' CONFIG_IPMI_HANDLER dep_mbool ' Generate a panic event to all BMCs on a panic' CONFIG_IPMI_PANIC_EVENT $CONFIG_IPMI_HANDLER +dep_mbool ' Generate a OEM events holding the panic string' CONFIG_IPMI_PANIC_STRING $CONFIG_IPMI_PANIC_EVENT dep_tristate ' Device interface for IPMI' CONFIG_IPMI_DEVICE_INTERFACE $CONFIG_IPMI_HANDLER +dep_tristate ' IPMI SI handler' CONFIG_IPMI_SI $CONFIG_IPMI_HANDLER dep_tristate ' IPMI KCS handler' CONFIG_IPMI_KCS $CONFIG_IPMI_HANDLER dep_tristate ' IPMI Watchdog Timer' CONFIG_IPMI_WATCHDOG $CONFIG_IPMI_HANDLER diff -urN linux-2.4.23.org/drivers/char/ipmi/ipmi_devintf.c linux-2.4.23/drivers/char/ipmi/ipmi_devintf.c --- linux-2.4.23.org/drivers/char/ipmi/ipmi_devintf.c 2003-12-06 12:53:23.036496678 +0100 +++ linux-2.4.23/drivers/char/ipmi/ipmi_devintf.c 2003-12-06 17:46:42.918470709 +0100 @@ -44,6 +44,8 @@ #include #include +#define IPMI_DEVINTF_VERSION "v27" + struct ipmi_file_private { ipmi_user_t user; @@ -53,6 +55,8 @@ struct fasync_struct *fasync_queue; wait_queue_head_t wait; struct semaphore recv_sem; + int default_retries; + unsigned int default_retry_time_ms; }; static void file_receive_handler(struct ipmi_recv_msg *msg, @@ -105,7 +109,7 @@ static struct ipmi_user_hndl ipmi_hndlrs = { - ipmi_recv_hndl : file_receive_handler + .ipmi_recv_hndl = file_receive_handler, }; static int ipmi_open(struct inode *inode, struct file *file) @@ -138,6 +142,10 @@ priv->fasync_queue = NULL; sema_init(&(priv->recv_sem), 1); + /* Use the low-level defaults. */ + priv->default_retries = -1; + priv->default_retry_time_ms = 0; + return 0; } @@ -158,6 +166,47 @@ return 0; } +static int handle_send_req(ipmi_user_t user, + struct ipmi_req *req, + int retries, + unsigned int retry_time_ms) +{ + int rv; + struct ipmi_addr addr; + unsigned char msgdata[IPMI_MAX_MSG_LENGTH]; + + if (req->addr_len > sizeof(struct ipmi_addr)) + return -EINVAL; + + if (copy_from_user(&addr, req->addr, req->addr_len)) + return -EFAULT; + + rv = ipmi_validate_addr(&addr, req->addr_len); + if (rv) + return rv; + + if (req->msg.data != NULL) { + if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) + return -EMSGSIZE; + + if (copy_from_user(&msgdata, + req->msg.data, + req->msg.data_len)) + return -EFAULT; + } else { + req->msg.data_len = 0; + } + req->msg.data = msgdata; + + return ipmi_request_settime(user, + &addr, + req->msgid, + &(req->msg), + 0, + retries, + retry_time_ms); +} + static int ipmi_ioctl(struct inode *inode, struct file *file, unsigned int cmd, @@ -170,54 +219,33 @@ { case IPMICTL_SEND_COMMAND: { - struct ipmi_req req; - struct ipmi_addr addr; - unsigned char msgdata[IPMI_MAX_MSG_LENGTH]; + struct ipmi_req req; if (copy_from_user(&req, (void *) data, sizeof(req))) { rv = -EFAULT; break; } - if (req.addr_len > sizeof(struct ipmi_addr)) - { - rv = -EINVAL; - break; - } + rv = handle_send_req(priv->user, + &req, + priv->default_retries, + priv->default_retry_time_ms); + break; + } - if (copy_from_user(&addr, req.addr, req.addr_len)) { + case IPMICTL_SEND_COMMAND_SETTIME: + { + struct ipmi_req_settime req; + + if (copy_from_user(&req, (void *) data, sizeof(req))) { rv = -EFAULT; break; } - rv = ipmi_validate_addr(&addr, req.addr_len); - if (rv) - break; - - if (req.msg.data != NULL) { - if (req.msg.data_len > IPMI_MAX_MSG_LENGTH) { - rv = -EMSGSIZE; - break; - } - - if (copy_from_user(&msgdata, - req.msg.data, - req.msg.data_len)) - { - rv = -EFAULT; - break; - } - } else { - req.msg.data_len = 0; - } - - req.msg.data = msgdata; - - rv = ipmi_request(priv->user, - &addr, - req.msgid, - &(req.msg), - 0); + rv = handle_send_req(priv->user, + &req.req, + req.retries, + req.retry_time_ms); break; } @@ -416,7 +444,36 @@ rv = 0; break; } + case IPMICTL_SET_TIMING_PARMS_CMD: + { + struct ipmi_timing_parms parms; + + if (copy_from_user(&parms, (void *) data, sizeof(parms))) { + rv = -EFAULT; + break; + } + + priv->default_retries = parms.retries; + priv->default_retry_time_ms = parms.retry_time_ms; + rv = 0; + break; + } + + case IPMICTL_GET_TIMING_PARMS_CMD: + { + struct ipmi_timing_parms parms; + + parms.retries = priv->default_retries; + parms.retry_time_ms = priv->default_retry_time_ms; + if (copy_to_user((void *) data, &parms, sizeof(parms))) { + rv = -EFAULT; + break; + } + + rv = 0; + break; + } } return rv; @@ -424,12 +481,12 @@ static struct file_operations ipmi_fops = { - owner: THIS_MODULE, - ioctl: ipmi_ioctl, - open: ipmi_open, - release: ipmi_release, - fasync: ipmi_fasync, - poll: ipmi_poll + .owner = THIS_MODULE, + .ioctl = ipmi_ioctl, + .open = ipmi_open, + .release = ipmi_release, + .fasync = ipmi_fasync, + .poll = ipmi_poll, }; #define DEVICE_NAME "ipmidev" @@ -468,8 +525,9 @@ static struct ipmi_smi_watcher smi_watcher = { - new_smi : ipmi_new_smi, - smi_gone : ipmi_smi_gone + .owner = THIS_MODULE, + .new_smi = ipmi_new_smi, + .smi_gone = ipmi_smi_gone, }; static __init int init_ipmi_devintf(void) @@ -479,6 +537,9 @@ if (ipmi_major < 0) return -EINVAL; + printk(KERN_INFO "ipmi device interface version " + IPMI_DEVINTF_VERSION "\n"); + rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops); if (rv < 0) { printk(KERN_ERR "ipmi: can't get major %d\n", ipmi_major); @@ -498,9 +559,6 @@ return rv; } - printk(KERN_INFO "ipmi: device interface at char major %d\n", - ipmi_major); - return 0; } module_init(init_ipmi_devintf); diff -urN linux-2.4.23.org/drivers/char/ipmi/ipmi_kcs_intf.c linux-2.4.23/drivers/char/ipmi/ipmi_kcs_intf.c --- linux-2.4.23.org/drivers/char/ipmi/ipmi_kcs_intf.c 2003-12-06 12:53:22.989506463 +0100 +++ linux-2.4.23/drivers/char/ipmi/ipmi_kcs_intf.c 2003-12-06 17:46:42.974458964 +0100 @@ -54,7 +54,7 @@ #include #include #include -#include "ipmi_kcs_sm.h" +#include "ipmi_si_sm.h" #include /* Measure times between events in the driver. */ @@ -72,6 +72,8 @@ /* This forces a dependency to the config file for this option. */ #endif +extern struct si_sm_handlers kcs_smi_handlers; + enum kcs_intf_state { KCS_NORMAL, KCS_GETTING_FLAGS, @@ -87,7 +89,7 @@ struct kcs_info { ipmi_smi_t intf; - struct kcs_data *kcs_sm; + struct si_sm_data *kcs_sm; spinlock_t kcs_lock; spinlock_t msg_lock; struct list_head xmit_msgs; @@ -112,8 +114,10 @@ out. */ int run_to_completion; + struct si_sm_io io; + /* The I/O port of a KCS interface. */ - int port; + unsigned int port; /* zero if no irq; */ int irq; @@ -164,7 +168,7 @@ deliver_recv_msg(kcs_info, msg); } -static enum kcs_result start_next_msg(struct kcs_info *kcs_info) +static enum si_sm_result start_next_msg(struct kcs_info *kcs_info) { int rv; struct list_head *entry = NULL; @@ -185,7 +189,7 @@ if (!entry) { kcs_info->curr_msg = NULL; - rv = KCS_SM_IDLE; + rv = SI_SM_IDLE; } else { int err; @@ -197,14 +201,15 @@ do_gettimeofday(&t); printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec); #endif - err = start_kcs_transaction(kcs_info->kcs_sm, - kcs_info->curr_msg->data, - kcs_info->curr_msg->data_size); + err = kcs_smi_handlers.start_transaction( + kcs_info->kcs_sm, + kcs_info->curr_msg->data, + kcs_info->curr_msg->data_size); if (err) { return_hosed_msg(kcs_info); } - rv = KCS_CALL_WITHOUT_DELAY; + rv = SI_SM_CALL_WITHOUT_DELAY; } spin_unlock(&(kcs_info->msg_lock)); @@ -220,7 +225,7 @@ msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; - start_kcs_transaction(kcs_info->kcs_sm, msg, 2); + kcs_smi_handlers.start_transaction(kcs_info->kcs_sm, msg, 2); kcs_info->kcs_state = KCS_ENABLE_INTERRUPTS1; } @@ -233,7 +238,7 @@ msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; msg[2] = WDT_PRE_TIMEOUT_INT; - start_kcs_transaction(kcs_info->kcs_sm, msg, 3); + kcs_smi_handlers.start_transaction(kcs_info->kcs_sm, msg, 3); kcs_info->kcs_state = KCS_CLEARING_FLAGS; } @@ -280,9 +285,10 @@ kcs_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; kcs_info->curr_msg->data_size = 2; - start_kcs_transaction(kcs_info->kcs_sm, - kcs_info->curr_msg->data, - kcs_info->curr_msg->data_size); + kcs_smi_handlers.start_transaction( + kcs_info->kcs_sm, + kcs_info->curr_msg->data, + kcs_info->curr_msg->data_size); kcs_info->kcs_state = KCS_GETTING_MESSAGES; } else if (kcs_info->msg_flags & EVENT_MSG_BUFFER_FULL) { /* Events available. */ @@ -298,9 +304,10 @@ kcs_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; kcs_info->curr_msg->data_size = 2; - start_kcs_transaction(kcs_info->kcs_sm, - kcs_info->curr_msg->data, - kcs_info->curr_msg->data_size); + kcs_smi_handlers.start_transaction( + kcs_info->kcs_sm, + kcs_info->curr_msg->data, + kcs_info->curr_msg->data_size); kcs_info->kcs_state = KCS_GETTING_EVENTS; } else { kcs_info->kcs_state = KCS_NORMAL; @@ -322,9 +329,10 @@ break; kcs_info->curr_msg->rsp_size - = kcs_get_result(kcs_info->kcs_sm, - kcs_info->curr_msg->rsp, - IPMI_MAX_MSG_LENGTH); + = kcs_smi_handlers.get_result( + kcs_info->kcs_sm, + kcs_info->curr_msg->rsp, + IPMI_MAX_MSG_LENGTH); /* Do this here becase deliver_recv_msg() releases the lock, and a new message can be put in during the @@ -340,7 +348,7 @@ unsigned int len; /* We got the flags from the KCS, now handle them. */ - len = kcs_get_result(kcs_info->kcs_sm, msg, 4); + len = kcs_smi_handlers.get_result(kcs_info->kcs_sm, msg, 4); if (msg[2] != 0) { /* Error fetching flags, just give up for now. */ @@ -362,7 +370,7 @@ unsigned char msg[3]; /* We cleared the flags. */ - kcs_get_result(kcs_info->kcs_sm, msg, 3); + kcs_smi_handlers.get_result(kcs_info->kcs_sm, msg, 3); if (msg[2] != 0) { /* Error clearing flags */ printk(KERN_WARNING @@ -379,9 +387,9 @@ case KCS_GETTING_EVENTS: { kcs_info->curr_msg->rsp_size - = kcs_get_result(kcs_info->kcs_sm, - kcs_info->curr_msg->rsp, - IPMI_MAX_MSG_LENGTH); + = kcs_smi_handlers.get_result(kcs_info->kcs_sm, + kcs_info->curr_msg->rsp, + IPMI_MAX_MSG_LENGTH); /* Do this here becase deliver_recv_msg() releases the lock, and a new message can be put in during the @@ -404,9 +412,9 @@ case KCS_GETTING_MESSAGES: { kcs_info->curr_msg->rsp_size - = kcs_get_result(kcs_info->kcs_sm, - kcs_info->curr_msg->rsp, - IPMI_MAX_MSG_LENGTH); + = kcs_smi_handlers.get_result(kcs_info->kcs_sm, + kcs_info->curr_msg->rsp, + IPMI_MAX_MSG_LENGTH); /* Do this here becase deliver_recv_msg() releases the lock, and a new message can be put in during the @@ -431,7 +439,7 @@ unsigned char msg[4]; /* We got the flags from the KCS, now handle them. */ - kcs_get_result(kcs_info->kcs_sm, msg, 4); + kcs_smi_handlers.get_result(kcs_info->kcs_sm, msg, 4); if (msg[2] != 0) { printk(KERN_WARNING "ipmi_kcs: Could not enable interrupts" @@ -441,7 +449,8 @@ msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; msg[2] = msg[3] | 1; /* enable msg queue int */ - start_kcs_transaction(kcs_info->kcs_sm, msg,3); + kcs_smi_handlers.start_transaction( + kcs_info->kcs_sm, msg,3); kcs_info->kcs_state = KCS_ENABLE_INTERRUPTS2; } break; @@ -452,7 +461,7 @@ unsigned char msg[4]; /* We got the flags from the KCS, now handle them. */ - kcs_get_result(kcs_info->kcs_sm, msg, 4); + kcs_smi_handlers.get_result(kcs_info->kcs_sm, msg, 4); if (msg[2] != 0) { printk(KERN_WARNING "ipmi_kcs: Could not enable interrupts" @@ -466,9 +475,10 @@ /* Called on timeouts and events. Timeouts should pass the elapsed time, interrupts should pass in zero. */ -static enum kcs_result kcs_event_handler(struct kcs_info *kcs_info, int time) +static enum si_sm_result kcs_event_handler(struct kcs_info *kcs_info, + int time) { - enum kcs_result kcs_result; + enum si_sm_result kcs_result; restart: /* There used to be a loop here that waited a little while @@ -477,19 +487,19 @@ range, which is far too long to wait in an interrupt. So we just run until the state machine tells us something happened or it needs a delay. */ - kcs_result = kcs_event(kcs_info->kcs_sm, time); + kcs_result = kcs_smi_handlers.event(kcs_info->kcs_sm, time); time = 0; - while (kcs_result == KCS_CALL_WITHOUT_DELAY) + while (kcs_result == SI_SM_CALL_WITHOUT_DELAY) { - kcs_result = kcs_event(kcs_info->kcs_sm, 0); + kcs_result = kcs_smi_handlers.event(kcs_info->kcs_sm, 0); } - if (kcs_result == KCS_TRANSACTION_COMPLETE) + if (kcs_result == SI_SM_TRANSACTION_COMPLETE) { handle_transaction_done(kcs_info); - kcs_result = kcs_event(kcs_info->kcs_sm, 0); + kcs_result = kcs_smi_handlers.event(kcs_info->kcs_sm, 0); } - else if (kcs_result == KCS_SM_HOSED) + else if (kcs_result == SI_SM_HOSED) { if (kcs_info->curr_msg != NULL) { /* If we were handling a user message, format @@ -497,12 +507,12 @@ tell it about the error. */ return_hosed_msg(kcs_info); } - kcs_result = kcs_event(kcs_info->kcs_sm, 0); + kcs_result = kcs_smi_handlers.event(kcs_info->kcs_sm, 0); kcs_info->kcs_state = KCS_NORMAL; } /* We prefer handling attn over new messages. */ - if (kcs_result == KCS_ATTN) + if (kcs_result == SI_SM_ATTN) { unsigned char msg[2]; @@ -514,19 +524,19 @@ msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_GET_MSG_FLAGS_CMD; - start_kcs_transaction(kcs_info->kcs_sm, msg, 2); + kcs_smi_handlers.start_transaction(kcs_info->kcs_sm, msg, 2); kcs_info->kcs_state = KCS_GETTING_FLAGS; goto restart; } /* If we are currently idle, try to start the next message. */ - if (kcs_result == KCS_SM_IDLE) { + if (kcs_result == SI_SM_IDLE) { kcs_result = start_next_msg(kcs_info); - if (kcs_result != KCS_SM_IDLE) + if (kcs_result != SI_SM_IDLE) goto restart; } - if ((kcs_result == KCS_SM_IDLE) + if ((kcs_result == SI_SM_IDLE) && (atomic_read(&kcs_info->req_events))) { /* We are idle and the upper layer requested that I fetch @@ -537,7 +547,7 @@ msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_GET_MSG_FLAGS_CMD; - start_kcs_transaction(kcs_info->kcs_sm, msg, 2); + kcs_smi_handlers.start_transaction(kcs_info->kcs_sm, msg, 2); kcs_info->kcs_state = KCS_GETTING_FLAGS; goto restart; } @@ -549,11 +559,11 @@ struct ipmi_smi_msg *msg, int priority) { - struct kcs_info *kcs_info = (struct kcs_info *) send_info; - enum kcs_result result; - unsigned long flags; + struct kcs_info *kcs_info = (struct kcs_info *) send_info; + enum si_sm_result result; + unsigned long flags; #ifdef DEBUG_TIMING - struct timeval t; + struct timeval t; #endif spin_lock_irqsave(&(kcs_info->msg_lock), flags); @@ -574,7 +584,7 @@ spin_lock_irqsave(&(kcs_info->kcs_lock), flags); result = kcs_event_handler(kcs_info, 0); - while (result != KCS_SM_IDLE) { + while (result != SI_SM_IDLE) { udelay(KCS_SHORT_TIMEOUT_USEC); result = kcs_event_handler(kcs_info, KCS_SHORT_TIMEOUT_USEC); @@ -602,16 +612,16 @@ static void set_run_to_completion(void *send_info, int i_run_to_completion) { - struct kcs_info *kcs_info = (struct kcs_info *) send_info; - enum kcs_result result; - unsigned long flags; + struct kcs_info *kcs_info = (struct kcs_info *) send_info; + enum si_sm_result result; + unsigned long flags; spin_lock_irqsave(&(kcs_info->kcs_lock), flags); kcs_info->run_to_completion = i_run_to_completion; if (i_run_to_completion) { result = kcs_event_handler(kcs_info, 0); - while (result != KCS_SM_IDLE) { + while (result != SI_SM_IDLE) { udelay(KCS_SHORT_TIMEOUT_USEC); result = kcs_event_handler(kcs_info, KCS_SHORT_TIMEOUT_USEC); @@ -676,13 +686,13 @@ static void kcs_timeout(unsigned long data) { - struct kcs_info *kcs_info = (struct kcs_info *) data; - enum kcs_result kcs_result; - unsigned long flags; - unsigned long jiffies_now; - unsigned long time_diff; + struct kcs_info *kcs_info = (struct kcs_info *) data; + enum si_sm_result kcs_result; + unsigned long flags; + unsigned long jiffies_now; + unsigned long time_diff; #ifdef DEBUG_TIMING - struct timeval t; + struct timeval t; #endif if (kcs_info->stop_operation) { @@ -712,7 +722,7 @@ /* If the state machine asks for a short delay, then shorten the timer timeout. */ #ifdef CONFIG_HIGH_RES_TIMERS - if (kcs_result == KCS_CALL_WITH_DELAY) { + if (kcs_result == SI_SM_CALL_WITH_DELAY) { kcs_info->kcs_timer.sub_expires += usec_to_arch_cycles(KCS_SHORT_TIMEOUT_USEC); while (kcs_info->kcs_timer.sub_expires >= cycles_per_jiffies) { @@ -725,7 +735,7 @@ } #else /* If requested, take the shortest delay possible */ - if (kcs_result == KCS_CALL_WITH_DELAY) { + if (kcs_result == SI_SM_CALL_WITH_DELAY) { kcs_info->kcs_timer.expires = jiffies + 1; } else { kcs_info->kcs_timer.expires = jiffies + KCS_TIMEOUT_JIFFIES; @@ -776,12 +786,12 @@ extern int kcs_dbg; static int ipmi_kcs_detect_hardware(unsigned int port, unsigned char *addr, - struct kcs_data *data) + struct si_sm_data *data) { - unsigned char msg[2]; - unsigned char resp[IPMI_MAX_MSG_LENGTH]; - unsigned long resp_len; - enum kcs_result kcs_result; + unsigned char msg[2]; + unsigned char resp[IPMI_MAX_MSG_LENGTH]; + unsigned long resp_len; + enum si_sm_result kcs_result; /* It's impossible for the KCS status register to be all 1's, (assuming a properly functioning, self-initialized BMC) @@ -798,29 +808,30 @@ useful info. */ msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_GET_DEVICE_ID_CMD; - start_kcs_transaction(data, msg, 2); + kcs_smi_handlers.start_transaction(data, msg, 2); - kcs_result = kcs_event(data, 0); + kcs_result = kcs_smi_handlers.event(data, 0); for (;;) { - if (kcs_result == KCS_CALL_WITH_DELAY) { - udelay(100); - kcs_result = kcs_event(data, 100); + if (kcs_result == SI_SM_CALL_WITH_DELAY) { + schedule_timeout(1); + kcs_result = kcs_smi_handlers.event(data, 100); } - else if (kcs_result == KCS_CALL_WITHOUT_DELAY) + else if (kcs_result == SI_SM_CALL_WITHOUT_DELAY) { - kcs_result = kcs_event(data, 0); + kcs_result = kcs_smi_handlers.event(data, 0); } else break; } - if (kcs_result == KCS_SM_HOSED) { + if (kcs_result == SI_SM_HOSED) { /* We couldn't get the state machine to run, so whatever's at the port is probably not an IPMI KCS interface. */ return -ENODEV; } /* Otherwise, we got some data. */ - resp_len = kcs_get_result(data, resp, IPMI_MAX_MSG_LENGTH); + resp_len = kcs_smi_handlers.get_result(data, resp, + IPMI_MAX_MSG_LENGTH); if (resp_len < 6) /* That's odd, it should be longer. */ return -EINVAL; @@ -860,6 +871,36 @@ MODULE_PARM(kcs_irqs, "1-4i"); MODULE_PARM(kcs_ports, "1-4i"); +static unsigned char port_inb(struct si_sm_io *io, unsigned int offset) +{ + struct kcs_info *info = io->info; + + return inb(info->port+offset); +} + +static void port_outb(struct si_sm_io *io, unsigned int offset, + unsigned char b) +{ + struct kcs_info *info = io->info; + + outb(b, info->port+offset); +} + +static unsigned char mem_inb(struct si_sm_io *io, unsigned int offset) +{ + struct kcs_info *info = io->info; + + return readb(info->addr+offset); +} + +static void mem_outb(struct si_sm_io *io, unsigned int offset, + unsigned char b) +{ + struct kcs_info *info = io->info; + + writeb(b, info->addr+offset); +} + /* Returns 0 if initialized, or negative on an error. */ static int init_one_kcs(int kcs_port, int irq, @@ -902,6 +943,9 @@ kcs_port); return -EIO; } + new_kcs->io.outputb = port_outb; + new_kcs->io.inputb = port_inb; + new_kcs->io.info = new_kcs; } else { if (request_mem_region(kcs_physaddr, 2, DEVICE_NAME) == NULL) { kfree(new_kcs); @@ -917,15 +961,18 @@ kcs_physaddr); return -EIO; } + new_kcs->io.outputb = mem_outb; + new_kcs->io.inputb = mem_inb; + new_kcs->io.info = new_kcs; } - new_kcs->kcs_sm = kmalloc(kcs_size(), GFP_KERNEL); + new_kcs->kcs_sm = kmalloc(kcs_smi_handlers.size(), GFP_KERNEL); if (!new_kcs->kcs_sm) { printk(KERN_ERR "ipmi_kcs: out of memory\n"); rv = -ENOMEM; goto out_err; } - init_kcs_data(new_kcs->kcs_sm, kcs_port, new_kcs->addr); + kcs_smi_handlers.init_data(new_kcs->kcs_sm, &(new_kcs->io)); spin_lock_init(&(new_kcs->kcs_lock)); spin_lock_init(&(new_kcs->msg_lock)); @@ -1027,11 +1074,7 @@ #ifdef CONFIG_ACPI_INTERPRETER -/* Retrieve the base physical address from ACPI tables. Originally - from Hewlett-Packard simple bmc.c, a GPL KCS driver. */ - #include -/* A real hack, but everything's not there yet in 2.4. */ #include #include #include @@ -1046,37 +1089,69 @@ s8 OEMRevision[4]; s8 CreatorID[4]; s8 CreatorRevision[4]; - s16 InterfaceType; + u8 InterfaceType[2]; s16 SpecificationRevision; + + /* + * Bit 0 - SCI interrupt supported + * Bit 1 - I/O APIC/SAPIC + */ u8 InterruptType; + + /* If bit 0 of InterruptType is set, then this is the SCI + interrupt in the GPEx_STS register. */ u8 GPE; + s16 Reserved; - u64 GlobalSystemInterrupt; - u8 BaseAddress[12]; + + /* If bit 1 of InterruptType is set, then this is the I/O + APIC/SAPIC interrupt. */ + u32 GlobalSystemInterrupt; + + /* The actual register address. */ + struct acpi_generic_address addr; + u8 UID[4]; -} __attribute__ ((packed)); -static unsigned long acpi_find_bmc(void) + s8 spmi_id[1]; /* A '\0' terminated array starts here. */ +}; + +static int acpi_find_bmc(unsigned long *physaddr, int *port) { acpi_status status; - struct acpi_table_header *spmi; - static unsigned long io_base = 0; - - if (io_base != 0) - return io_base; + struct SPMITable *spmi; status = acpi_get_firmware_table("SPMI", 1, - ACPI_LOGICAL_ADDRESSING, &spmi); + ACPI_LOGICAL_ADDRESSING, + (struct acpi_table_header **) &spmi); + if (status != AE_OK) + goto not_found; + + if (spmi->InterfaceType[0] != 1) + /* Not IPMI. */ + goto not_found; + + if (spmi->InterfaceType[1] != 1) + /* Not KCS. */ + goto not_found; + + if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + *physaddr = spmi->addr.address; + printk("ipmi_kcs_intf: Found ACPI-specified state machine" + " at memory address 0x%lx\n", + (unsigned long) spmi->addr.address); + } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) { + *port = spmi->addr.address; + printk("ipmi_kcs_intf: Found ACPI-specified state machine" + " at I/O address 0x%x\n", + (int) spmi->addr.address); + } else + goto not_found; /* Not an address type we recognise. */ - if (status != AE_OK) { - printk(KERN_ERR "ipmi_kcs: SPMI table not found.\n"); - return 0; - } + return 0; - memcpy(&io_base, ((struct SPMITable *)spmi)->BaseAddress, - sizeof(io_base)); - - return io_base; + not_found: + return -ENODEV; } #endif @@ -1087,6 +1162,7 @@ int i = 0; #ifdef CONFIG_ACPI_INTERPRETER unsigned long physaddr = 0; + int port = 0; #endif if (initialized) @@ -1114,26 +1190,25 @@ /* Only try the defaults if enabled and resources are available (because they weren't already specified above). */ - if (kcs_trydefaults) { + if (kcs_trydefaults && (pos == 0)) { + rv = -EINVAL; #ifdef CONFIG_ACPI_INTERPRETER - if ((physaddr = acpi_find_bmc())) { - if (!check_mem_region(physaddr, 2)) { - rv = init_one_kcs(0, - 0, - physaddr, - &(kcs_infos[pos])); - if (rv == 0) - pos++; - } + if (rv && (acpi_find_bmc(&physaddr, &port) == 0)) { + rv = init_one_kcs(port, + 0, + physaddr, + &(kcs_infos[pos])); + if (rv == 0) + pos++; } #endif - if (!check_region(DEFAULT_IO_PORT, 2)) { + if (rv) { rv = init_one_kcs(DEFAULT_IO_PORT, 0, 0, &(kcs_infos[pos])); if (rv == 0) - pos++; + pos++; } } diff -urN linux-2.4.23.org/drivers/char/ipmi/ipmi_kcs_sm.c linux-2.4.23/drivers/char/ipmi/ipmi_kcs_sm.c --- linux-2.4.23.org/drivers/char/ipmi/ipmi_kcs_sm.c 2003-12-06 12:53:23.035496886 +0100 +++ linux-2.4.23/drivers/char/ipmi/ipmi_kcs_sm.c 2003-12-06 17:46:42.982457286 +0100 @@ -37,10 +37,11 @@ * that document. */ -#include -#include /* Gets rid of memcpy warning */ +#include /* For printk. */ +#include +#include "ipmi_si_sm.h" -#include "ipmi_kcs_sm.h" +#define IPMI_KCS_VERSION "v27" /* Set this if you want a printout of why the state machine was hosed when it gets hosed. */ @@ -95,29 +96,28 @@ #define IPMI_ERR_MSG_TRUNCATED 0xc6 #define IPMI_ERR_UNSPECIFIED 0xff -struct kcs_data +struct si_sm_data { - enum kcs_states state; - unsigned int port; - unsigned char *addr; - unsigned char write_data[MAX_KCS_WRITE_SIZE]; - int write_pos; - int write_count; - int orig_write_count; - unsigned char read_data[MAX_KCS_READ_SIZE]; - int read_pos; - int truncated; + enum kcs_states state; + struct si_sm_io *io; + unsigned char write_data[MAX_KCS_WRITE_SIZE]; + int write_pos; + int write_count; + int orig_write_count; + unsigned char read_data[MAX_KCS_READ_SIZE]; + int read_pos; + int truncated; unsigned int error_retries; long ibf_timeout; long obf_timeout; }; -void init_kcs_data(struct kcs_data *kcs, unsigned int port, unsigned char *addr) +static unsigned int init_kcs_data(struct si_sm_data *kcs, + struct si_sm_io *io) { kcs->state = KCS_IDLE; - kcs->port = port; - kcs->addr = addr; + kcs->io = io; kcs->write_pos = 0; kcs->write_count = 0; kcs->orig_write_count = 0; @@ -126,40 +126,29 @@ kcs->truncated = 0; kcs->ibf_timeout = IBF_RETRY_TIMEOUT; kcs->obf_timeout = OBF_RETRY_TIMEOUT; -} -/* Remember, init_one_kcs() insured port and addr can't both be set */ + /* Reserve 2 I/O bytes. */ + return 2; +} -static inline unsigned char read_status(struct kcs_data *kcs) +static inline unsigned char read_status(struct si_sm_data *kcs) { - if (kcs->port) - return inb(kcs->port + 1); - else - return readb(kcs->addr + 1); + return kcs->io->inputb(kcs->io, 1); } -static inline unsigned char read_data(struct kcs_data *kcs) +static inline unsigned char read_data(struct si_sm_data *kcs) { - if (kcs->port) - return inb(kcs->port + 0); - else - return readb(kcs->addr + 0); + return kcs->io->inputb(kcs->io, 0); } -static inline void write_cmd(struct kcs_data *kcs, unsigned char data) +static inline void write_cmd(struct si_sm_data *kcs, unsigned char data) { - if (kcs->port) - outb(data, kcs->port + 1); - else - writeb(data, kcs->addr + 1); + kcs->io->outputb(kcs->io, 1, data); } -static inline void write_data(struct kcs_data *kcs, unsigned char data) +static inline void write_data(struct si_sm_data *kcs, unsigned char data) { - if (kcs->port) - outb(data, kcs->port + 0); - else - writeb(data, kcs->addr + 0); + kcs->io->outputb(kcs->io, 0, data); } /* Control codes. */ @@ -179,14 +168,14 @@ #define GET_STATUS_OBF(status) ((status) & 0x01) -static inline void write_next_byte(struct kcs_data *kcs) +static inline void write_next_byte(struct si_sm_data *kcs) { write_data(kcs, kcs->write_data[kcs->write_pos]); (kcs->write_pos)++; (kcs->write_count)--; } -static inline void start_error_recovery(struct kcs_data *kcs, char *reason) +static inline void start_error_recovery(struct si_sm_data *kcs, char *reason) { (kcs->error_retries)++; if (kcs->error_retries > MAX_ERROR_RETRIES) { @@ -199,7 +188,7 @@ } } -static inline void read_next_byte(struct kcs_data *kcs) +static inline void read_next_byte(struct si_sm_data *kcs) { if (kcs->read_pos >= MAX_KCS_READ_SIZE) { /* Throw the data away and mark it truncated. */ @@ -212,9 +201,8 @@ write_data(kcs, KCS_READ_BYTE); } -static inline int check_ibf(struct kcs_data *kcs, - unsigned char status, - long time) +static inline int check_ibf(struct si_sm_data *kcs, unsigned char status, + long time) { if (GET_STATUS_IBF(status)) { kcs->ibf_timeout -= time; @@ -229,9 +217,8 @@ return 1; } -static inline int check_obf(struct kcs_data *kcs, - unsigned char status, - long time) +static inline int check_obf(struct si_sm_data *kcs, unsigned char status, + long time) { if (! GET_STATUS_OBF(status)) { kcs->obf_timeout -= time; @@ -245,13 +232,13 @@ return 1; } -static void clear_obf(struct kcs_data *kcs, unsigned char status) +static void clear_obf(struct si_sm_data *kcs, unsigned char status) { if (GET_STATUS_OBF(status)) read_data(kcs); } -static void restart_kcs_transaction(struct kcs_data *kcs) +static void restart_kcs_transaction(struct si_sm_data *kcs) { kcs->write_count = kcs->orig_write_count; kcs->write_pos = 0; @@ -262,7 +249,8 @@ write_cmd(kcs, KCS_WRITE_START); } -int start_kcs_transaction(struct kcs_data *kcs, char *data, unsigned int size) +static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data, + unsigned int size) { if ((size < 2) || (size > MAX_KCS_WRITE_SIZE)) { return -1; @@ -284,7 +272,8 @@ return 0; } -int kcs_get_result(struct kcs_data *kcs, unsigned char *data, int length) +static int get_kcs_result(struct si_sm_data *kcs, unsigned char *data, + unsigned int length) { if (length < kcs->read_pos) { kcs->read_pos = length; @@ -313,7 +302,7 @@ /* This implements the state machine defined in the IPMI manual, see that for details on how this works. Divide that flowchart into sections delimited by "Wait for IBF" and this will become clear. */ -enum kcs_result kcs_event(struct kcs_data *kcs, long time) +static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) { unsigned char status; unsigned char state; @@ -325,7 +314,7 @@ #endif /* All states wait for ibf, so just do it here. */ if (!check_ibf(kcs, status, time)) - return KCS_CALL_WITH_DELAY; + return SI_SM_CALL_WITH_DELAY; /* Just about everything looks at the KCS state, so grab that, too. */ state = GET_STATUS_STATE(status); @@ -336,9 +325,9 @@ clear_obf(kcs, status); if (GET_STATUS_ATN(status)) - return KCS_ATTN; + return SI_SM_ATTN; else - return KCS_SM_IDLE; + return SI_SM_IDLE; case KCS_START_OP: if (state != KCS_IDLE) { @@ -405,7 +394,7 @@ if (state == KCS_READ_STATE) { if (! check_obf(kcs, status, time)) - return KCS_CALL_WITH_DELAY; + return SI_SM_CALL_WITH_DELAY; read_next_byte(kcs); } else { /* We don't implement this exactly like the state @@ -418,7 +407,7 @@ clear_obf(kcs, status); kcs->orig_write_count = 0; kcs->state = KCS_IDLE; - return KCS_TRANSACTION_COMPLETE; + return SI_SM_TRANSACTION_COMPLETE; } break; @@ -441,7 +430,7 @@ break; } if (! check_obf(kcs, status, time)) - return KCS_CALL_WITH_DELAY; + return SI_SM_CALL_WITH_DELAY; clear_obf(kcs, status); write_data(kcs, KCS_READ_BYTE); @@ -456,14 +445,14 @@ } if (! check_obf(kcs, status, time)) - return KCS_CALL_WITH_DELAY; + return SI_SM_CALL_WITH_DELAY; clear_obf(kcs, status); if (kcs->orig_write_count) { restart_kcs_transaction(kcs); } else { kcs->state = KCS_IDLE; - return KCS_TRANSACTION_COMPLETE; + return SI_SM_TRANSACTION_COMPLETE; } break; @@ -472,14 +461,42 @@ } if (kcs->state == KCS_HOSED) { - init_kcs_data(kcs, kcs->port, kcs->addr); - return KCS_SM_HOSED; + init_kcs_data(kcs, kcs->io); + return SI_SM_HOSED; } - return KCS_CALL_WITHOUT_DELAY; + return SI_SM_CALL_WITHOUT_DELAY; } -int kcs_size(void) +static int kcs_size(void) { - return sizeof(struct kcs_data); + return sizeof(struct si_sm_data); +} + +static int kcs_detect(struct si_sm_data *kcs) +{ + /* It's impossible for the KCS status register to be all 1's, + (assuming a properly functioning, self-initialized BMC) + but that's what you get from reading a bogus address, so we + test that first. */ + if (read_status(kcs) == 0xff) + return 1; + + return 0; } + +static void kcs_cleanup(struct si_sm_data *kcs) +{ +} + +struct si_sm_handlers kcs_smi_handlers = +{ + .version = IPMI_KCS_VERSION, + .init_data = init_kcs_data, + .start_transaction = start_kcs_transaction, + .get_result = get_kcs_result, + .event = kcs_event, + .detect = kcs_detect, + .cleanup = kcs_cleanup, + .size = kcs_size, +}; diff -urN linux-2.4.23.org/drivers/char/ipmi/ipmi_msghandler.c linux-2.4.23/drivers/char/ipmi/ipmi_msghandler.c --- linux-2.4.23.org/drivers/char/ipmi/ipmi_msghandler.c 2003-12-06 12:53:23.012501675 +0100 +++ linux-2.4.23/drivers/char/ipmi/ipmi_msghandler.c 2003-12-06 17:46:42.995454560 +0100 @@ -44,16 +44,21 @@ #include #include #include +#include + +#define IPMI_MSGHANDLER_VERSION "v27" struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); static int ipmi_init_msghandler(void); static int initialized = 0; +static struct proc_dir_entry *proc_ipmi_root = NULL; + #define MAX_EVENTS_IN_QUEUE 25 /* Don't let a message sit in a queue forever, always time it with at lest - the max message timer. */ + the max message timer. This is in milliseconds. */ #define MAX_MSG_TIMEOUT 60000 struct ipmi_user @@ -82,7 +87,8 @@ struct seq_table { - int inuse : 1; + unsigned int inuse : 1; + unsigned int broadcast : 1; unsigned long timeout; unsigned long orig_timeout; @@ -169,6 +175,72 @@ /* My LUN. This should generally stay the SMS LUN, but just in case... */ unsigned char my_lun; + + /* The event receiver for my BMC, only really used at panic + shutdown as a place to store this. */ + unsigned char event_receiver; + unsigned char event_receiver_lun; + unsigned char local_sel_device; + unsigned char local_event_generator; + + /* A cheap hack, if this is non-null and a message to an + interface comes in with a NULL user, call this routine with + it. Note that the message will still be freed by the + caller. This only works on the system interface. */ + void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_smi_msg *msg); + + /* Proc FS stuff. */ + struct proc_dir_entry *proc_dir; + char proc_dir_name[10]; + + spinlock_t counter_lock; /* For making counters atomic. */ + + /* Commands we got that were invalid. */ + unsigned int sent_invalid_commands; + + /* Commands we sent to the MC. */ + unsigned int sent_local_commands; + /* Responses from the MC that were delivered to a user. */ + unsigned int handled_local_responses; + /* Responses from the MC that were not delivered to a user. */ + unsigned int unhandled_local_responses; + + /* Commands we sent out to the IPMB bus. */ + unsigned int sent_ipmb_commands; + /* Commands sent on the IPMB that had errors on the SEND CMD */ + unsigned int sent_ipmb_command_errs; + /* Each retransmit increments this count. */ + unsigned int retransmitted_ipmb_commands; + /* When a message times out (runs out of retransmits) this is + incremented. */ + unsigned int timed_out_ipmb_commands; + + /* This is like above, but for broadcasts. Broadcasts are + *not* included in the above count (they are expected to + time out). */ + unsigned int timed_out_ipmb_broadcasts; + + /* Responses I have sent to the IPMB bus. */ + unsigned int sent_ipmb_responses; + + /* The response was delivered to the user. */ + unsigned int handled_ipmb_responses; + /* The response had invalid data in it. */ + unsigned int invalid_ipmb_responses; + /* The response didn't have anyone waiting for it. */ + unsigned int unhandled_ipmb_responses; + + /* The command was delivered to the user. */ + unsigned int handled_commands; + /* The command had invalid data in it. */ + unsigned int invalid_commands; + /* The command didn't have anyone waiting for it. */ + unsigned int unhandled_commands; + + /* Invalid data in an event. */ + unsigned int invalid_events; + /* Events that were received with the proper format. */ + unsigned int events; }; int @@ -328,7 +400,7 @@ static void deliver_response(struct ipmi_recv_msg *msg) { - msg->user->handler->ipmi_recv_hndl(msg, msg->user->handler_data); + msg->user->handler->ipmi_recv_hndl(msg, msg->user->handler_data); } /* Find the next sequence number not being used and add the given @@ -338,6 +410,7 @@ struct ipmi_recv_msg *recv_msg, unsigned long timeout, int retries, + int broadcast, unsigned char *seq, long *seqid) { @@ -360,6 +433,7 @@ intf->seq_table[i].timeout = MAX_MSG_TIMEOUT; intf->seq_table[i].orig_timeout = timeout; intf->seq_table[i].retries_left = retries; + intf->seq_table[i].broadcast = broadcast; intf->seq_table[i].inuse = 1; intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid); *seq = i; @@ -412,8 +486,8 @@ /* Start the timer for a specific sequence table entry. */ -static int intf_start_seq_timer(ipmi_smi_t intf, - long msgid) +static int intf_start_seq_timer(ipmi_smi_t intf, + long msgid) { int rv = -ENODEV; unsigned long flags; @@ -431,9 +505,50 @@ { struct seq_table *ent = &(intf->seq_table[seq]); ent->timeout = ent->orig_timeout; + rv = 0; + } + spin_unlock_irqrestore(&(intf->seq_lock), flags); + + return rv; +} + +/* Got an error for the send message for a specific sequence number. */ +static int intf_err_seq(ipmi_smi_t intf, + long msgid, + unsigned int err) +{ + int rv = -ENODEV; + unsigned long flags; + unsigned char seq; + unsigned long seqid; + struct ipmi_recv_msg *msg = NULL; + + + GET_SEQ_FROM_MSGID(msgid, seq, seqid); + + spin_lock_irqsave(&(intf->seq_lock), flags); + /* We do this verification because the user can be deleted + while a message is outstanding. */ + if ((intf->seq_table[seq].inuse) + && (intf->seq_table[seq].seqid == seqid)) + { + struct seq_table *ent = &(intf->seq_table[seq]); + + ent->inuse = 0; + msg = ent->recv_msg; + rv = 0; } spin_unlock_irqrestore(&(intf->seq_lock), flags); + if (msg) { + msg->recv_type = IPMI_RESPONSE_RECV_TYPE; + msg->msg_data[0] = err; + msg->msg.netfn |= 1; /* Convert to a response. */ + msg->msg.data_len = 1; + msg->msg.data = msg->msg_data; + deliver_response(msg); + } + return rv; } @@ -769,7 +884,9 @@ struct ipmi_recv_msg *supplied_recv, int priority, unsigned char source_address, - unsigned char source_lun) + unsigned char source_lun, + int retries, + unsigned int retry_time_ms) { int rv = 0; struct ipmi_smi_msg *smi_msg; @@ -797,8 +914,11 @@ } if (addr->channel > IPMI_NUM_CHANNELS) { - rv = -EINVAL; - goto out_err; + spin_lock_irqsave(&intf->counter_lock, flags); + intf->sent_invalid_commands++; + spin_unlock_irqrestore(&intf->counter_lock, flags); + rv = -EINVAL; + goto out_err; } recv_msg->user = user; @@ -812,8 +932,12 @@ smi_addr = (struct ipmi_system_interface_addr *) addr; - if (smi_addr->lun > 3) + if (smi_addr->lun > 3) { + spin_lock_irqsave(&intf->counter_lock, flags); + intf->sent_invalid_commands++; + spin_unlock_irqrestore(&intf->counter_lock, flags); return -EINVAL; + } memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr)); @@ -824,11 +948,17 @@ { /* We don't let the user do these, since we manage the sequence numbers. */ + spin_lock_irqsave(&intf->counter_lock, flags); + intf->sent_invalid_commands++; + spin_unlock_irqrestore(&intf->counter_lock, flags); rv = -EINVAL; goto out_err; } if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) { + spin_lock_irqsave(&intf->counter_lock, flags); + intf->sent_invalid_commands++; + spin_unlock_irqrestore(&intf->counter_lock, flags); rv = -EMSGSIZE; goto out_err; } @@ -840,41 +970,59 @@ if (msg->data_len > 0) memcpy(&(smi_msg->data[2]), msg->data, msg->data_len); smi_msg->data_size = msg->data_len + 2; + spin_lock_irqsave(&intf->counter_lock, flags); + intf->sent_local_commands++; + spin_unlock_irqrestore(&intf->counter_lock, flags); } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE) || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) { struct ipmi_ipmb_addr *ipmb_addr; unsigned char ipmb_seq; long seqid; - int broadcast; - int retries; + int broadcast = 0; if (addr == NULL) { + spin_lock_irqsave(&intf->counter_lock, flags); + intf->sent_invalid_commands++; + spin_unlock_irqrestore(&intf->counter_lock, flags); rv = -EINVAL; goto out_err; } + if (retries < 0) { + if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) + retries = 0; /* Don't retry broadcasts. */ + else + retries = 4; + } if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { /* Broadcasts add a zero at the beginning of the message, but otherwise is the same as an IPMB address. */ addr->addr_type = IPMI_IPMB_ADDR_TYPE; broadcast = 1; - retries = 0; /* Don't retry broadcasts. */ - } else { - broadcast = 0; - retries = 4; } + + /* Default to 1 second retries. */ + if (retry_time_ms == 0) + retry_time_ms = 1000; + /* 9 for the header and 1 for the checksum, plus possibly one for the broadcast. */ if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { + spin_lock_irqsave(&intf->counter_lock, flags); + intf->sent_invalid_commands++; + spin_unlock_irqrestore(&intf->counter_lock, flags); rv = -EMSGSIZE; goto out_err; } ipmb_addr = (struct ipmi_ipmb_addr *) addr; if (ipmb_addr->lun > 3) { + spin_lock_irqsave(&intf->counter_lock, flags); + intf->sent_invalid_commands++; + spin_unlock_irqrestore(&intf->counter_lock, flags); rv = -EINVAL; goto out_err; } @@ -884,6 +1032,9 @@ if (recv_msg->msg.netfn & 0x1) { /* It's a response, so use the user's sequence from msgid. */ + spin_lock_irqsave(&intf->counter_lock, flags); + intf->sent_ipmb_responses++; + spin_unlock_irqrestore(&intf->counter_lock, flags); format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, msgid, broadcast, source_address, source_lun); @@ -892,13 +1043,17 @@ spin_lock_irqsave(&(intf->seq_lock), flags); + spin_lock(&intf->counter_lock); + intf->sent_ipmb_commands++; + spin_unlock(&intf->counter_lock); + /* Create a sequence number with a 1 second timeout and 4 retries. */ - /* FIXME - magic number for the timeout. */ rv = intf_next_seq(intf, recv_msg, - 1000, + retry_time_ms, retries, + broadcast, &ipmb_seq, &seqid); if (rv) { @@ -934,16 +1089,19 @@ } } else { /* Unknown address type. */ - rv = -EINVAL; - goto out_err; + spin_lock_irqsave(&intf->counter_lock, flags); + intf->sent_invalid_commands++; + spin_unlock_irqrestore(&intf->counter_lock, flags); + rv = -EINVAL; + goto out_err; } #if DEBUG_MSGING { - int m; - for (m=0; mdata_size; m++) - printk(" %2.2x", smi_msg->data[m]); - printk("\n"); + int m; + for (m=0; mdata_size; m++) + printk(" %2.2x", smi_msg->data[m]); + printk("\n"); } #endif intf->handlers->sender(intf->send_info, smi_msg, priority); @@ -970,7 +1128,29 @@ NULL, NULL, priority, user->intf->my_address, - user->intf->my_lun); + user->intf->my_lun, + -1, 0); +} + +int ipmi_request_settime(ipmi_user_t user, + struct ipmi_addr *addr, + long msgid, + struct ipmi_msg *msg, + int priority, + int retries, + unsigned int retry_time_ms) +{ + return i_ipmi_request(user, + user->intf, + addr, + msgid, + msg, + NULL, NULL, + priority, + user->intf->my_address, + user->intf->my_lun, + retries, + retry_time_ms); } int ipmi_request_supply_msgs(ipmi_user_t user, @@ -990,7 +1170,8 @@ supplied_recv, priority, user->intf->my_address, - user->intf->my_lun); + user->intf->my_lun, + -1, 0); } int ipmi_request_with_source(ipmi_user_t user, @@ -1009,7 +1190,124 @@ NULL, NULL, priority, source_address, - source_lun); + source_lun, + -1, 0); +} + +static int ipmb_file_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + char *out = (char *) page; + ipmi_smi_t intf = data; + + return sprintf(out, "%x\n", intf->my_address); +} + +static int version_file_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + char *out = (char *) page; + ipmi_smi_t intf = data; + + return sprintf(out, "%d.%d\n", + intf->version_major, intf->version_minor); +} + +static int stat_file_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + char *out = (char *) page; + ipmi_smi_t intf = data; + + out += sprintf(out, "sent_invalid_commands: %d\n", + intf->sent_invalid_commands); + out += sprintf(out, "sent_local_commands: %d\n", + intf->sent_local_commands); + out += sprintf(out, "handled_local_responses: %d\n", + intf->handled_local_responses); + out += sprintf(out, "unhandled_local_responses: %d\n", + intf->unhandled_local_responses); + out += sprintf(out, "sent_ipmb_commands: %d\n", + intf->sent_ipmb_commands); + out += sprintf(out, "sent_ipmb_command_errs: %d\n", + intf->sent_ipmb_command_errs); + out += sprintf(out, "retransmitted_ipmb_commands: %d\n", + intf->retransmitted_ipmb_commands); + out += sprintf(out, "timed_out_ipmb_commands: %d\n", + intf->timed_out_ipmb_commands); + out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n", + intf->timed_out_ipmb_broadcasts); + out += sprintf(out, "sent_ipmb_responses: %d\n", + intf->sent_ipmb_responses); + out += sprintf(out, "handled_ipmb_responses: %d\n", + intf->handled_ipmb_responses); + out += sprintf(out, "invalid_ipmb_responses: %d\n", + intf->invalid_ipmb_responses); + out += sprintf(out, "unhandled_ipmb_responses: %d\n", + intf->unhandled_ipmb_responses); + out += sprintf(out, "handled_commands: %d\n", + intf->handled_commands); + out += sprintf(out, "invalid_commands: %d\n", + intf->invalid_commands); + out += sprintf(out, "unhandled_commands: %d\n", + intf->unhandled_commands); + out += sprintf(out, "invalid_events: %d\n", + intf->invalid_events); + out += sprintf(out, "events: %d\n", + intf->events); + + return (out - ((char *) page)); +} + +int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, + read_proc_t *read_proc, write_proc_t *write_proc, + void *data, struct module *owner) +{ + struct proc_dir_entry *file; + int rv = 0; + + file = create_proc_entry(name, 0, smi->proc_dir); + if (!file) + rv = -ENOMEM; + else { + file->nlink = 1; + file->data = data; + file->read_proc = read_proc; + file->write_proc = write_proc; + file->owner = owner; + } + + return rv; +} + +static int add_proc_entries(ipmi_smi_t smi, int num) +{ + int rv = 0; + + sprintf(smi->proc_dir_name, "%d", num); + smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root); + if (!smi->proc_dir) + rv = -ENOMEM; + else { + smi->proc_dir->owner = THIS_MODULE; + } + + if (rv == 0) + rv = ipmi_smi_add_proc_entry(smi, "stats", + stat_file_read_proc, NULL, + smi, THIS_MODULE); + + if (rv == 0) + rv = ipmi_smi_add_proc_entry(smi, "ipmb", + ipmb_file_read_proc, NULL, + smi, THIS_MODULE); + + if (rv == 0) + rv = ipmi_smi_add_proc_entry(smi, "version", + version_file_read_proc, NULL, + smi, THIS_MODULE); + + return rv; } int ipmi_register_smi(struct ipmi_smi_handlers *handlers, @@ -1040,6 +1338,9 @@ new_intf = kmalloc(sizeof(*new_intf), GFP_KERNEL); if (!new_intf) return -ENOMEM; + memset(new_intf, 0, sizeof(*new_intf)); + + new_intf->proc_dir = NULL; rv = -ENOMEM; @@ -1069,6 +1370,8 @@ INIT_LIST_HEAD(&(new_intf->cmd_rcvrs)); new_intf->all_cmd_rcvr = NULL; + spin_lock_init(&(new_intf->counter_lock)); + spin_lock_irqsave(&interfaces_lock, flags); ipmi_interfaces[i] = new_intf; spin_unlock_irqrestore(&interfaces_lock, flags); @@ -1089,6 +1392,9 @@ /* Well, it went away. Just return. */ goto out; + if (rv == 0) + rv = add_proc_entries(*intf, i); + if (rv == 0) { /* Call all the watcher interfaces to tell them that a new interface is available. */ @@ -1096,7 +1402,11 @@ list_for_each(entry, &smi_watchers) { struct ipmi_smi_watcher *w; w = list_entry(entry, struct ipmi_smi_watcher, link); - w->new_smi(i); + if (try_inc_mod_count(w->owner)) { + w->new_smi(i); + if (w->owner) + __MOD_DEC_USE_COUNT(w->owner); + } } up_read(&smi_watchers_sem); } @@ -1104,8 +1414,12 @@ out: up_read(&interfaces_sem); - if (rv) + if (rv) { + if (new_intf->proc_dir) + remove_proc_entry(new_intf->proc_dir_name, + proc_ipmi_root); kfree(new_intf); + } return rv; } @@ -1163,6 +1477,8 @@ { for (i=0; iproc_dir_name, + proc_ipmi_root); spin_lock_irqsave(&interfaces_lock, flags); ipmi_interfaces[i] = NULL; clean_up_interface_data(intf); @@ -1204,15 +1520,21 @@ { struct ipmi_ipmb_addr ipmb_addr; struct ipmi_recv_msg *recv_msg; + unsigned long flags; - if (msg->rsp_size < 11) + if (msg->rsp_size < 11) { /* Message not big enough, just ignore it. */ + spin_lock_irqsave(&intf->counter_lock, flags); + intf->invalid_ipmb_responses++; + spin_unlock_irqrestore(&intf->counter_lock, flags); return 0; + } - if (msg->rsp[2] != 0) + if (msg->rsp[2] != 0) { /* An error getting the response, just ignore it. */ return 0; + } ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE; ipmb_addr.slave_addr = msg->rsp[6]; @@ -1231,6 +1553,9 @@ { /* We were unable to find the sequence number, so just nuke the message. */ + spin_lock_irqsave(&intf->counter_lock, flags); + intf->unhandled_ipmb_responses++; + spin_unlock_irqrestore(&intf->counter_lock, flags); return 0; } @@ -1244,6 +1569,9 @@ recv_msg->msg.data = recv_msg->msg_data; recv_msg->msg.data_len = msg->rsp_size - 10; recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; + spin_lock_irqsave(&intf->counter_lock, flags); + intf->handled_ipmb_responses++; + spin_unlock_irqrestore(&intf->counter_lock, flags); deliver_response(recv_msg); return 0; @@ -1252,18 +1580,23 @@ static int handle_get_msg_cmd(ipmi_smi_t intf, struct ipmi_smi_msg *msg) { - struct list_head *entry; + struct list_head *entry; struct cmd_rcvr *rcvr; - int rv = 0; - unsigned char netfn; - unsigned char cmd; - ipmi_user_t user = NULL; + int rv = 0; + unsigned char netfn; + unsigned char cmd; + ipmi_user_t user = NULL; struct ipmi_ipmb_addr *ipmb_addr; struct ipmi_recv_msg *recv_msg; + unsigned long flags; - if (msg->rsp_size < 10) + if (msg->rsp_size < 10) { /* Message not big enough, just ignore it. */ + spin_lock_irqsave(&intf->counter_lock, flags); + intf->invalid_commands++; + spin_unlock_irqrestore(&intf->counter_lock, flags); return 0; + } if (msg->rsp[2] != 0) { /* An error getting the response, just ignore it. */ @@ -1291,6 +1624,10 @@ if (user == NULL) { /* We didn't find a user, deliver an error response. */ + spin_lock_irqsave(&intf->counter_lock, flags); + intf->unhandled_commands++; + spin_unlock_irqrestore(&intf->counter_lock, flags); + msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); msg->data[1] = IPMI_SEND_MSG_CMD; msg->data[2] = msg->rsp[3]; @@ -1311,6 +1648,10 @@ causes it to not be freed or queued. */ } else { /* Deliver the message to the user. */ + spin_lock_irqsave(&intf->counter_lock, flags); + intf->handled_commands++; + spin_unlock_irqrestore(&intf->counter_lock, flags); + recv_msg = ipmi_alloc_recv_msg(); if (! recv_msg) { /* We couldn't allocate memory for the @@ -1374,6 +1715,9 @@ if (msg->rsp_size < 19) { /* Message is too small to be an IPMB event. */ + spin_lock_irqsave(&intf->counter_lock, flags); + intf->invalid_events++; + spin_unlock_irqrestore(&intf->counter_lock, flags); return 0; } @@ -1386,6 +1730,10 @@ spin_lock_irqsave(&(intf->events_lock), flags); + spin_lock(&intf->counter_lock); + intf->events++; + spin_unlock(&intf->counter_lock); + /* Allocate and fill in one message for every user that is getting events. */ list_for_each(entry, &(intf->users)) { @@ -1459,6 +1807,7 @@ struct ipmi_recv_msg *recv_msg; int found = 0; struct list_head *entry; + unsigned long flags; recv_msg = (struct ipmi_recv_msg *) msg->user_data; @@ -1474,11 +1823,20 @@ } if (!found) { + /* Special handling for NULL users. */ + if (!recv_msg->user && intf->null_user_handler) + intf->null_user_handler(intf, msg); /* The user for the message went away, so give up. */ + spin_lock_irqsave(&intf->counter_lock, flags); + intf->unhandled_local_responses++; + spin_unlock_irqrestore(&intf->counter_lock, flags); ipmi_free_recv_msg(recv_msg); } else { struct ipmi_system_interface_addr *smi_addr; + spin_lock_irqsave(&intf->counter_lock, flags); + intf->handled_local_responses++; + spin_unlock_irqrestore(&intf->counter_lock, flags); recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; recv_msg->msgid = msg->msgid; smi_addr = ((struct ipmi_system_interface_addr *) @@ -1505,7 +1863,7 @@ static int handle_new_recv_msg(ipmi_smi_t intf, struct ipmi_smi_msg *msg) { - int requeue; + int requeue; if (msg->rsp_size < 2) { /* Message is too small to be correct. */ @@ -1551,10 +1909,30 @@ working on it. */ read_lock(&(intf->users_lock)); - if ((msg->data_size >= 2) && (msg->data[1] == IPMI_SEND_MSG_CMD)) { + if ((msg->data_size >= 2) + && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) + && (msg->data[1] == IPMI_SEND_MSG_CMD)) { /* This is the local response to a send, start the timer for these. */ - intf_start_seq_timer(intf, msg->msgid); + + /* Check for errors, if we get certain errors (ones + that mean basically we can try again later), we + ignore them and start the timer. Otherwise we + report the error immediately. */ + if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) + && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) + && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)) + { + /* Got an error sending the message, handle it. */ + spin_lock_irqsave(&intf->counter_lock, flags); + intf->sent_ipmb_command_errs++; + spin_unlock_irqrestore(&intf->counter_lock, flags); + intf_err_seq(intf, msg->msgid, msg->rsp[2]); + } else { + /* The message was sent, start the timer. */ + intf_start_seq_timer(intf, msg->msgid); + } + ipmi_free_smi_msg(msg); goto out_unlock; } @@ -1699,6 +2077,12 @@ ent->inuse = 0; msg = ent->recv_msg; list_add_tail(&(msg->link), &timeouts); + spin_lock(&intf->counter_lock); + if (ent->broadcast) + intf->timed_out_ipmb_broadcasts++; + else + intf->timed_out_ipmb_commands++; + spin_unlock(&intf->counter_lock); } else { /* More retries, send again. */ @@ -1708,6 +2092,9 @@ ent->retries_left--; send_from_recv_msg(intf, ent->recv_msg, NULL, j, ent->seqid); + spin_lock(&intf->counter_lock); + intf->retransmitted_ipmb_commands++; + spin_unlock(&intf->counter_lock); } } spin_unlock_irqrestore(&(intf->seq_lock), flags); @@ -1740,13 +2127,16 @@ static struct timer_list ipmi_timer; -/* Call every 100 ms. */ +/* Call every ~100 ms. */ #define IPMI_TIMEOUT_TIME 100 -#define IPMI_TIMEOUT_JIFFIES (IPMI_TIMEOUT_TIME/(1000/HZ)) -/* Request events from the queue every second. Hopefully, in the - future, IPMI will add a way to know immediately if an event is - in the queue. */ +/* How many jiffies does it take to get to the timeout time. */ +#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) + +/* Request events from the queue every second (this is the number of + IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the + future, IPMI will add a way to know immediately if an event is in + the queue and this silliness can go away. */ #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) static volatile int stop_operation = 0; @@ -1822,18 +2212,48 @@ { } -static void send_panic_events(void) +#ifdef CONFIG_IPMI_PANIC_STRING +static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_smi_msg *msg) +{ + if ((msg->rsp[0] == (IPMI_NETFN_SENSOR_EVENT_RESPONSE << 2)) + && (msg->rsp[1] == IPMI_GET_EVENT_RECEIVER_CMD) + && (msg->rsp[2] == IPMI_CC_NO_ERROR)) + { + /* A get event receiver command, save it. */ + intf->event_receiver = msg->rsp[3]; + intf->event_receiver_lun = msg->rsp[4] & 0x3; + } +} + +static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_smi_msg *msg) +{ + if ((msg->rsp[0] == (IPMI_NETFN_APP_RESPONSE << 2)) + && (msg->rsp[1] == IPMI_GET_DEVICE_ID_CMD) + && (msg->rsp[2] == IPMI_CC_NO_ERROR)) + { + /* A get device id command, save if we are an event + receiver or generator. */ + intf->local_sel_device = (msg->rsp[8] >> 2) & 1; + intf->local_event_generator = (msg->rsp[8] >> 5) & 1; + } +} +#endif + +static void send_panic_events(char *str) { struct ipmi_msg msg; ipmi_smi_t intf; - unsigned char data[8]; + unsigned char data[16]; int i; - struct ipmi_system_interface_addr addr; + struct ipmi_system_interface_addr *si; + struct ipmi_addr addr; struct ipmi_smi_msg smi_msg; struct ipmi_recv_msg recv_msg; - addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; - addr.channel = IPMI_BMC_CHANNEL; + si = (struct ipmi_system_interface_addr *) &addr; + si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; + si->channel = IPMI_BMC_CHANNEL; + si->lun = 0; /* Fill in an event telling that we have failed. */ msg.netfn = 0x04; /* Sensor or Event. */ @@ -1846,12 +2266,13 @@ data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */ data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */ - /* These used to have the first three bytes of the panic string, - but not only is that not terribly useful, it's not available - any more. */ - data[3] = 0; - data[6] = 0; - data[7] = 0; + /* Put a few breadcrums in. Hopefully later we can add more things + to make the panic events more useful. */ + if (str) { + data[3] = str[0]; + data[6] = str[1]; + data[7] = str[2]; + } smi_msg.done = dummy_smi_done_handler; recv_msg.done = dummy_recv_done_handler; @@ -1862,18 +2283,147 @@ if (intf == NULL) continue; + /* Send the event announcing the panic. */ intf->handlers->set_run_to_completion(intf->send_info, 1); i_ipmi_request(NULL, intf, - (struct ipmi_addr *) &addr, + &addr, 0, &msg, &smi_msg, &recv_msg, 0, intf->my_address, - intf->my_lun); + intf->my_lun, + 0, 1); /* Don't retry, and don't wait. */ } + +#ifdef CONFIG_IPMI_PANIC_STRING + /* On every interface, dump a bunch of OEM event holding the + string. */ + if (!str) + return; + + for (i=0; ilocal_sel_device = 0; + intf->local_event_generator = 0; + intf->event_receiver = 0; + + /* Request the device info from the local MC. */ + msg.netfn = IPMI_NETFN_APP_REQUEST; + msg.cmd = IPMI_GET_DEVICE_ID_CMD; + msg.data = NULL; + msg.data_len = 0; + intf->null_user_handler = device_id_fetcher; + i_ipmi_request(NULL, + intf, + &addr, + 0, + &msg, + &smi_msg, + &recv_msg, + 0, + intf->my_address, + intf->my_lun, + 0, 1); /* Don't retry, and don't wait. */ + + if (intf->local_event_generator) { + /* Request the event receiver from the local MC. */ + msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST; + msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD; + msg.data = NULL; + msg.data_len = 0; + intf->null_user_handler = event_receiver_fetcher; + i_ipmi_request(NULL, + intf, + &addr, + 0, + &msg, + &smi_msg, + &recv_msg, + 0, + intf->my_address, + intf->my_lun, + 0, 1); /* no retry, and no wait. */ + } + intf->null_user_handler = NULL; + + /* Validate the event receiver. The low bit must not + be 1 (it must be a valid IPMB address), it cannot + be zero, and it must not be my address. */ + if (((intf->event_receiver & 1) == 0) + && (intf->event_receiver != 0) + && (intf->event_receiver != intf->my_address)) + { + /* The event receiver is valid, send an IPMB + message. */ + ipmb = (struct ipmi_ipmb_addr *) &addr; + ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; + ipmb->channel = 0; /* FIXME - is this right? */ + ipmb->lun = intf->event_receiver_lun; + ipmb->slave_addr = intf->event_receiver; + } else if (intf->local_sel_device) { + /* The event receiver was not valid (or was + me), but I am an SEL device, just dump it + in my SEL. */ + si = (struct ipmi_system_interface_addr *) &addr; + si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; + si->channel = IPMI_BMC_CHANNEL; + si->lun = 0; + } else + continue; /* No where to send the event. */ + + + msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ + msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; + msg.data = data; + msg.data_len = 16; + + j = 0; + while (*p) { + int size = strlen(p); + + if (size > 11) + size = 11; + data[0] = 0; + data[1] = 0; + data[2] = 0xf0; /* OEM event without timestamp. */ + data[3] = intf->my_address; + data[4] = j++; /* sequence # */ + /* Always give 11 bytes, so strncpy will fill + it with zeroes for me. */ + strncpy(data+5, p, 11); + p += size; + + i_ipmi_request(NULL, + intf, + &addr, + 0, + &msg, + &smi_msg, + &recv_msg, + 0, + intf->my_address, + intf->my_lun, + 0, 1); /* no retry, and no wait. */ + } + } +#endif /* CONFIG_IPMI_PANIC_STRING */ } #endif /* CONFIG_IPMI_PANIC_EVENT */ @@ -1900,7 +2450,7 @@ } #ifdef CONFIG_IPMI_PANIC_EVENT - send_panic_events(); + send_panic_events(ptr); #endif return NOTIFY_DONE; @@ -1912,7 +2462,6 @@ 200 /* priority: INT_MAX >= x >= 0 */ }; - static __init int ipmi_init_msghandler(void) { int i; @@ -1920,10 +2469,21 @@ if (initialized) return 0; + printk(KERN_INFO "ipmi message handler version " + IPMI_MSGHANDLER_VERSION "\n"); + for (i=0; iowner = THIS_MODULE; + init_timer(&ipmi_timer); ipmi_timer.data = 0; ipmi_timer.function = ipmi_timeout; @@ -1934,8 +2494,6 @@ initialized = 1; - printk(KERN_INFO "ipmi: message handler initialized\n"); - return 0; } @@ -1980,6 +2538,7 @@ EXPORT_SYMBOL(ipmi_destroy_user); EXPORT_SYMBOL(ipmi_get_version); EXPORT_SYMBOL(ipmi_request); +EXPORT_SYMBOL(ipmi_request_settime); EXPORT_SYMBOL(ipmi_request_supply_msgs); EXPORT_SYMBOL(ipmi_request_with_source); EXPORT_SYMBOL(ipmi_register_smi); @@ -2001,3 +2560,4 @@ EXPORT_SYMBOL(ipmi_get_my_address); EXPORT_SYMBOL(ipmi_set_my_LUN); EXPORT_SYMBOL(ipmi_get_my_LUN); +EXPORT_SYMBOL(ipmi_smi_add_proc_entry); diff -urN linux-2.4.23.org/drivers/char/ipmi/ipmi_si.c linux-2.4.23/drivers/char/ipmi/ipmi_si.c --- linux-2.4.23.org/drivers/char/ipmi/ipmi_si.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.23/drivers/char/ipmi/ipmi_si.c 2003-12-06 17:46:43.002453092 +0100 @@ -0,0 +1,1729 @@ +/* + * ipmi_si.c + * + * The interface to the IPMI driver for the system interfaces (KCS, SMIC, + * BT in the future). + * + * Author: MontaVista Software, Inc. + * Corey Minyard + * source@mvista.com + * + * Copyright 2002 MontaVista Software Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* + * This file holds the "policy" for the interface to the SMI state + * machine. It does the configuration, handles timers and interrupts, + * and drives the real SMI state machine. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_HIGH_RES_TIMERS +#include +# if defined(schedule_next_int) +/* Old high-res timer code, do translations. */ +# define get_arch_cycles(a) quick_update_jiffies_sub(a) +# define arch_cycles_per_jiffy cycles_per_jiffies +# endif +static inline void add_usec_to_timer(struct timer_list *t, long v) +{ + t->sub_expires += nsec_to_arch_cycle(v * 1000); + while (t->sub_expires >= arch_cycles_per_jiffy) + { + t->expires++; + t->sub_expires -= arch_cycles_per_jiffy; + } +} +#endif +#include +#include +#include +#include "ipmi_si_sm.h" +#include + +#define IPMI_SI_VERSION "v27" + +/* Measure times between events in the driver. */ +#undef DEBUG_TIMING + +/* Call every 10 ms when nothing is going on. */ +#define SI_TIMEOUT_TIME_USEC 10000 +#define SI_USEC_PER_JIFFY (1000000/HZ) +#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY) +#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a + short timeout */ + +enum si_intf_state { + SI_NORMAL, + SI_GETTING_FLAGS, + SI_GETTING_EVENTS, + SI_CLEARING_FLAGS, + SI_CLEARING_FLAGS_THEN_SET_IRQ, + SI_GETTING_MESSAGES, + SI_ENABLE_INTERRUPTS1, + SI_ENABLE_INTERRUPTS2 + /* FIXME - add watchdog stuff. */ +}; + +enum si_type { + SI_KCS, SI_SMIC, SI_BT +}; + +struct smi_info +{ + ipmi_smi_t intf; + struct si_sm_data *si_sm; + struct si_sm_handlers *handlers; + enum si_type si_type; + spinlock_t si_lock; + spinlock_t msg_lock; + struct list_head xmit_msgs; + struct list_head hp_xmit_msgs; + struct ipmi_smi_msg *curr_msg; + enum si_intf_state si_state; + + /* Used to handle the various types of I/O that can occur with + IPMI */ + struct si_sm_io io; + int (*io_setup)(struct smi_info *info); + void (*io_cleanup)(struct smi_info *info); + int (*irq_setup)(struct smi_info *info); + void (*irq_cleanup)(struct smi_info *info); + unsigned int io_size; + + /* Flags from the last GET_MSG_FLAGS command, used when an ATTN + is set to hold the flags until we are done handling everything + from the flags. */ +#define RECEIVE_MSG_AVAIL 0x01 +#define EVENT_MSG_BUFFER_FULL 0x02 +#define WDT_PRE_TIMEOUT_INT 0x08 + unsigned char msg_flags; + + /* If set to true, this will request events the next time the + state machine is idle. */ + atomic_t req_events; + + /* If true, run the state machine to completion on every send + call. Generally used after a panic to make sure stuff goes + out. */ + int run_to_completion; + + /* The I/O port of an SI interface. */ + int port; + + /* zero if no irq; */ + int irq; + + /* The physical and remapped memory addresses of a SI interface. */ + unsigned long physaddr; + unsigned char *addr; + + /* The timer for this si. */ + struct timer_list si_timer; + + /* The time (in jiffies) the last timeout occurred at. */ + unsigned long last_timeout_jiffies; + + /* Used to gracefully stop the timer without race conditions. */ + volatile int stop_operation; + volatile int timer_stopped; + + /* The driver will disable interrupts when it gets into a + situation where it cannot handle messages due to lack of + memory. Once that situation clears up, it will re-enable + interrupts. */ + int interrupt_disabled; + + unsigned char ipmi_si_dev_rev; + unsigned char ipmi_si_fw_rev_major; + unsigned char ipmi_si_fw_rev_minor; + unsigned char ipmi_version_major; + unsigned char ipmi_version_minor; + + /* Counters and things for the proc filesystem. */ + spinlock_t count_lock; + unsigned long short_timeouts; + unsigned long long_timeouts; + unsigned long timeout_restarts; + unsigned long idles; + unsigned long interrupts; + unsigned long attentions; + unsigned long flag_fetches; + unsigned long hosed_count; + unsigned long complete_transactions; + unsigned long events; + unsigned long watchdog_pretimeouts; + unsigned long incoming_messages; +}; + +static void si_restart_short_timer(struct smi_info *smi_info); + +static void deliver_recv_msg(struct smi_info *smi_info, + struct ipmi_smi_msg *msg) +{ + /* Deliver the message to the upper layer with the lock + released. */ + spin_unlock(&(smi_info->si_lock)); + ipmi_smi_msg_received(smi_info->intf, msg); + spin_lock(&(smi_info->si_lock)); +} + +static void return_hosed_msg(struct smi_info *smi_info) +{ + struct ipmi_smi_msg *msg = smi_info->curr_msg; + + /* Make it a reponse */ + msg->rsp[0] = msg->data[0] | 4; + msg->rsp[1] = msg->data[1]; + msg->rsp[2] = 0xFF; /* Unknown error. */ + msg->rsp_size = 3; + + smi_info->curr_msg = NULL; + deliver_recv_msg(smi_info, msg); +} + +static enum si_sm_result start_next_msg(struct smi_info *smi_info) +{ + int rv; + struct list_head *entry = NULL; +#ifdef DEBUG_TIMING + struct timeval t; +#endif + + /* No need to save flags, we aleady have interrupts off and we + already hold the SMI lock. */ + spin_lock(&(smi_info->msg_lock)); + + /* Pick the high priority queue first. */ + if (! list_empty(&(smi_info->hp_xmit_msgs))) { + entry = smi_info->hp_xmit_msgs.next; + } else if (! list_empty(&(smi_info->xmit_msgs))) { + entry = smi_info->xmit_msgs.next; + } + + if (!entry) { + smi_info->curr_msg = NULL; + rv = SI_SM_IDLE; + } else { + int err; + + list_del(entry); + smi_info->curr_msg = list_entry(entry, + struct ipmi_smi_msg, + link); +#ifdef DEBUG_TIMING + do_gettimeofday(&t); + printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec); +#endif + err = smi_info->handlers->start_transaction( + smi_info->si_sm, + smi_info->curr_msg->data, + smi_info->curr_msg->data_size); + if (err) { + return_hosed_msg(smi_info); + } + + rv = SI_SM_CALL_WITHOUT_DELAY; + } + spin_unlock(&(smi_info->msg_lock)); + + return rv; +} + +static void start_enable_irq(struct smi_info *smi_info) +{ + unsigned char msg[2]; + + /* If we are enabling interrupts, we have to tell the + BMC to use them. */ + msg[0] = (IPMI_NETFN_APP_REQUEST << 2); + msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; + + smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); + smi_info->si_state = SI_ENABLE_INTERRUPTS1; +} + +static void start_clear_flags(struct smi_info *smi_info) +{ + unsigned char msg[3]; + + /* Make sure the watchdog pre-timeout flag is not set at startup. */ + msg[0] = (IPMI_NETFN_APP_REQUEST << 2); + msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; + msg[2] = WDT_PRE_TIMEOUT_INT; + + smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); + smi_info->si_state = SI_CLEARING_FLAGS; +} + +/* When we have a situtaion where we run out of memory and cannot + allocate messages, we just leave them in the BMC and run the system + polled until we can allocate some memory. Once we have some + memory, we will re-enable the interrupt. */ +static inline void disable_si_irq(struct smi_info *smi_info) +{ + if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { + disable_irq_nosync(smi_info->irq); + smi_info->interrupt_disabled = 1; + } +} + +static inline void enable_si_irq(struct smi_info *smi_info) +{ + if ((smi_info->irq) && (smi_info->interrupt_disabled)) { + enable_irq(smi_info->irq); + smi_info->interrupt_disabled = 0; + } +} + +static void handle_flags(struct smi_info *smi_info) +{ + if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) { + /* Watchdog pre-timeout */ + spin_lock(&smi_info->count_lock); + smi_info->watchdog_pretimeouts++; + spin_unlock(&smi_info->count_lock); + + start_clear_flags(smi_info); + smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; + spin_unlock(&(smi_info->si_lock)); + ipmi_smi_watchdog_pretimeout(smi_info->intf); + spin_lock(&(smi_info->si_lock)); + } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { + /* Messages available. */ + smi_info->curr_msg = ipmi_alloc_smi_msg(); + if (!smi_info->curr_msg) { + disable_si_irq(smi_info); + smi_info->si_state = SI_NORMAL; + return; + } + enable_si_irq(smi_info); + + smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); + smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; + smi_info->curr_msg->data_size = 2; + + smi_info->handlers->start_transaction( + smi_info->si_sm, + smi_info->curr_msg->data, + smi_info->curr_msg->data_size); + smi_info->si_state = SI_GETTING_MESSAGES; + } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) { + /* Events available. */ + smi_info->curr_msg = ipmi_alloc_smi_msg(); + if (!smi_info->curr_msg) { + disable_si_irq(smi_info); + smi_info->si_state = SI_NORMAL; + return; + } + enable_si_irq(smi_info); + + smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); + smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; + smi_info->curr_msg->data_size = 2; + + smi_info->handlers->start_transaction( + smi_info->si_sm, + smi_info->curr_msg->data, + smi_info->curr_msg->data_size); + smi_info->si_state = SI_GETTING_EVENTS; + } else { + smi_info->si_state = SI_NORMAL; + } +} + +static void handle_transaction_done(struct smi_info *smi_info) +{ + struct ipmi_smi_msg *msg; +#ifdef DEBUG_TIMING + struct timeval t; + + do_gettimeofday(&t); + printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec); +#endif + switch (smi_info->si_state) { + case SI_NORMAL: + if (!smi_info->curr_msg) + break; + + smi_info->curr_msg->rsp_size + = smi_info->handlers->get_result( + smi_info->si_sm, + smi_info->curr_msg->rsp, + IPMI_MAX_MSG_LENGTH); + + /* Do this here becase deliver_recv_msg() releases the + lock, and a new message can be put in during the + time the lock is released. */ + msg = smi_info->curr_msg; + smi_info->curr_msg = NULL; + deliver_recv_msg(smi_info, msg); + break; + + case SI_GETTING_FLAGS: + { + unsigned char msg[4]; + unsigned int len; + + /* We got the flags from the SMI, now handle them. */ + len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4); + if (msg[2] != 0) { + /* Error fetching flags, just give up for + now. */ + smi_info->si_state = SI_NORMAL; + } else if (len < 3) { + /* Hmm, no flags. That's technically illegal, but + don't use uninitialized data. */ + smi_info->si_state = SI_NORMAL; + } else { + smi_info->msg_flags = msg[3]; + handle_flags(smi_info); + } + break; + } + + case SI_CLEARING_FLAGS: + case SI_CLEARING_FLAGS_THEN_SET_IRQ: + { + unsigned char msg[3]; + + /* We cleared the flags. */ + smi_info->handlers->get_result(smi_info->si_sm, msg, 3); + if (msg[2] != 0) { + /* Error clearing flags */ + printk(KERN_WARNING + "ipmi_smi: Error clearing flags: %2.2x\n", + msg[2]); + } + if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ) + start_enable_irq(smi_info); + else + smi_info->si_state = SI_NORMAL; + break; + } + + case SI_GETTING_EVENTS: + { + smi_info->curr_msg->rsp_size + = smi_info->handlers->get_result( + smi_info->si_sm, + smi_info->curr_msg->rsp, + IPMI_MAX_MSG_LENGTH); + + /* Do this here becase deliver_recv_msg() releases the + lock, and a new message can be put in during the + time the lock is released. */ + msg = smi_info->curr_msg; + smi_info->curr_msg = NULL; + if (msg->rsp[2] != 0) { + /* Error getting event, probably done. */ + msg->done(msg); + + /* Take off the event flag. */ + smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; + } else { + spin_lock(&smi_info->count_lock); + smi_info->events++; + spin_unlock(&smi_info->count_lock); + + deliver_recv_msg(smi_info, msg); + } + handle_flags(smi_info); + break; + } + + case SI_GETTING_MESSAGES: + { + smi_info->curr_msg->rsp_size + = smi_info->handlers->get_result( + smi_info->si_sm, + smi_info->curr_msg->rsp, + IPMI_MAX_MSG_LENGTH); + + /* Do this here becase deliver_recv_msg() releases the + lock, and a new message can be put in during the + time the lock is released. */ + msg = smi_info->curr_msg; + smi_info->curr_msg = NULL; + if (msg->rsp[2] != 0) { + /* Error getting event, probably done. */ + msg->done(msg); + + /* Take off the msg flag. */ + smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL; + } else { + spin_lock(&smi_info->count_lock); + smi_info->incoming_messages++; + spin_unlock(&smi_info->count_lock); + + deliver_recv_msg(smi_info, msg); + } + handle_flags(smi_info); + break; + } + + case SI_ENABLE_INTERRUPTS1: + { + unsigned char msg[4]; + + /* We got the flags from the SMI, now handle them. */ + smi_info->handlers->get_result(smi_info->si_sm, msg, 4); + if (msg[2] != 0) { + printk(KERN_WARNING + "ipmi_smi: Could not enable interrupts" + ", failed get, using polled mode.\n"); + smi_info->si_state = SI_NORMAL; + } else { + msg[0] = (IPMI_NETFN_APP_REQUEST << 2); + msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; + msg[2] = msg[3] | 1; /* enable msg queue int */ + smi_info->handlers->start_transaction( + smi_info->si_sm, msg, 3); + smi_info->si_state = SI_ENABLE_INTERRUPTS2; + } + break; + } + + case SI_ENABLE_INTERRUPTS2: + { + unsigned char msg[4]; + + /* We got the flags from the SMI, now handle them. */ + smi_info->handlers->get_result(smi_info->si_sm, msg, 4); + if (msg[2] != 0) { + printk(KERN_WARNING + "ipmi_smi: Could not enable interrupts" + ", failed set, using polled mode.\n"); + } + smi_info->si_state = SI_NORMAL; + break; + } + } +} + +/* Called on timeouts and events. Timeouts should pass the elapsed + time, interrupts should pass in zero. */ +static enum si_sm_result smi_event_handler(struct smi_info *smi_info, + int time) +{ + enum si_sm_result si_sm_result; + + restart: + /* There used to be a loop here that waited a little while + (around 25us) before giving up. That turned out to be + pointless, the minimum delays I was seeing were in the 300us + range, which is far too long to wait in an interrupt. So + we just run until the state machine tells us something + happened or it needs a delay. */ + si_sm_result = smi_info->handlers->event(smi_info->si_sm, time); + time = 0; + while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY) + { + si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); + } + + if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) + { + spin_lock(&smi_info->count_lock); + smi_info->complete_transactions++; + spin_unlock(&smi_info->count_lock); + + handle_transaction_done(smi_info); + si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); + } + else if (si_sm_result == SI_SM_HOSED) + { + spin_lock(&smi_info->count_lock); + smi_info->hosed_count++; + spin_unlock(&smi_info->count_lock); + + if (smi_info->curr_msg != NULL) { + /* If we were handling a user message, format + a response to send to the upper layer to + tell it about the error. */ + return_hosed_msg(smi_info); + } + si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); + smi_info->si_state = SI_NORMAL; + } + + /* We prefer handling attn over new messages. */ + if (si_sm_result == SI_SM_ATTN) + { + unsigned char msg[2]; + + spin_lock(&smi_info->count_lock); + smi_info->attentions++; + spin_unlock(&smi_info->count_lock); + + /* Got a attn, send down a get message flags to see + what's causing it. It would be better to handle + this in the upper layer, but due to the way + interrupts work with the SMI, that's not really + possible. */ + msg[0] = (IPMI_NETFN_APP_REQUEST << 2); + msg[1] = IPMI_GET_MSG_FLAGS_CMD; + + smi_info->handlers->start_transaction( + smi_info->si_sm, msg, 2); + smi_info->si_state = SI_GETTING_FLAGS; + goto restart; + } + + /* If we are currently idle, try to start the next message. */ + if (si_sm_result == SI_SM_IDLE) { + spin_lock(&smi_info->count_lock); + smi_info->idles++; + spin_unlock(&smi_info->count_lock); + + si_sm_result = start_next_msg(smi_info); + if (si_sm_result != SI_SM_IDLE) + goto restart; + } + + if ((si_sm_result == SI_SM_IDLE) + && (atomic_read(&smi_info->req_events))) + { + /* We are idle and the upper layer requested that I fetch + events, so do so. */ + unsigned char msg[2]; + + spin_lock(&smi_info->count_lock); + smi_info->flag_fetches++; + spin_unlock(&smi_info->count_lock); + + atomic_set(&smi_info->req_events, 0); + msg[0] = (IPMI_NETFN_APP_REQUEST << 2); + msg[1] = IPMI_GET_MSG_FLAGS_CMD; + + smi_info->handlers->start_transaction( + smi_info->si_sm, msg, 2); + smi_info->si_state = SI_GETTING_FLAGS; + goto restart; + } + + return si_sm_result; +} + +static void sender(void *send_info, + struct ipmi_smi_msg *msg, + int priority) +{ + struct smi_info *smi_info = (struct smi_info *) send_info; + enum si_sm_result result; + unsigned long flags; +#ifdef DEBUG_TIMING + struct timeval t; +#endif + + spin_lock_irqsave(&(smi_info->msg_lock), flags); +#ifdef DEBUG_TIMING + do_gettimeofday(&t); + printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); +#endif + + if (smi_info->run_to_completion) { + /* If we are running to completion, then throw it in + the list and run transactions until everything is + clear. Priority doesn't matter here. */ + list_add_tail(&(msg->link), &(smi_info->xmit_msgs)); + + /* We have to release the msg lock and claim the smi + lock in this case, because of race conditions. */ + spin_unlock_irqrestore(&(smi_info->msg_lock), flags); + + spin_lock_irqsave(&(smi_info->si_lock), flags); + result = smi_event_handler(smi_info, 0); + while (result != SI_SM_IDLE) { + udelay(SI_SHORT_TIMEOUT_USEC); + result = smi_event_handler(smi_info, + SI_SHORT_TIMEOUT_USEC); + } + spin_unlock_irqrestore(&(smi_info->si_lock), flags); + return; + } else { + if (priority > 0) { + list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs)); + } else { + list_add_tail(&(msg->link), &(smi_info->xmit_msgs)); + } + } + spin_unlock_irqrestore(&(smi_info->msg_lock), flags); + + spin_lock_irqsave(&(smi_info->si_lock), flags); + if ((smi_info->si_state == SI_NORMAL) + && (smi_info->curr_msg == NULL)) + { + start_next_msg(smi_info); + si_restart_short_timer(smi_info); + } + spin_unlock_irqrestore(&(smi_info->si_lock), flags); +} + +static void set_run_to_completion(void *send_info, int i_run_to_completion) +{ + struct smi_info *smi_info = (struct smi_info *) send_info; + enum si_sm_result result; + unsigned long flags; + + spin_lock_irqsave(&(smi_info->si_lock), flags); + + smi_info->run_to_completion = i_run_to_completion; + if (i_run_to_completion) { + result = smi_event_handler(smi_info, 0); + while (result != SI_SM_IDLE) { + udelay(SI_SHORT_TIMEOUT_USEC); + result = smi_event_handler(smi_info, + SI_SHORT_TIMEOUT_USEC); + } + } + + spin_unlock_irqrestore(&(smi_info->si_lock), flags); +} + +static void request_events(void *send_info) +{ + struct smi_info *smi_info = (struct smi_info *) send_info; + + atomic_set(&smi_info->req_events, 1); +} + +static int new_user(void *send_info) +{ + if (!try_inc_mod_count(THIS_MODULE)) + return -EBUSY; + return 0; +} + +static void user_left(void *send_info) +{ + MOD_DEC_USE_COUNT; +} + +static int initialized = 0; + +/* Must be called with interrupts off and with the si_lock held. */ +static void si_restart_short_timer(struct smi_info *smi_info) +{ +#if defined(CONFIG_HIGH_RES_TIMERS) + unsigned long flags; + unsigned long jiffies_now; + + if (del_timer(&(smi_info->si_timer))) { + /* If we don't delete the timer, then it will go off + immediately, anyway. So we only process if we + actually delete the timer. */ + + /* We already have irqsave on, so no need for it + here. */ + read_lock(&xtime_lock); + jiffies_now = jiffies; + smi_info->si_timer.expires = jiffies_now; + smi_info->si_timer.sub_expires = get_arch_cycles(jiffies_now); + read_unlock(&xtime_lock); + + add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC); + + add_timer(&(smi_info->si_timer)); + spin_lock_irqsave(&smi_info->count_lock, flags); + smi_info->timeout_restarts++; + spin_unlock_irqrestore(&smi_info->count_lock, flags); + } +#endif +} + +static void smi_timeout(unsigned long data) +{ + struct smi_info *smi_info = (struct smi_info *) data; + enum si_sm_result smi_result; + unsigned long flags; + unsigned long jiffies_now; + unsigned long time_diff; +#ifdef DEBUG_TIMING + struct timeval t; +#endif + + if (smi_info->stop_operation) { + smi_info->timer_stopped = 1; + return; + } + + spin_lock_irqsave(&(smi_info->si_lock), flags); +#ifdef DEBUG_TIMING + do_gettimeofday(&t); + printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec); +#endif + jiffies_now = jiffies; + time_diff = ((jiffies_now - smi_info->last_timeout_jiffies) + * SI_USEC_PER_JIFFY); + smi_result = smi_event_handler(smi_info, time_diff); + + spin_unlock_irqrestore(&(smi_info->si_lock), flags); + + smi_info->last_timeout_jiffies = jiffies_now; + + if ((smi_info->irq) && (! smi_info->interrupt_disabled)) { + /* Running with interrupts, only do long timeouts. */ + smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; + spin_lock_irqsave(&smi_info->count_lock, flags); + smi_info->long_timeouts++; + spin_unlock_irqrestore(&smi_info->count_lock, flags); + goto do_add_timer; + } + + /* If the state machine asks for a short delay, then shorten + the timer timeout. */ + if (smi_result == SI_SM_CALL_WITH_DELAY) { + spin_lock_irqsave(&smi_info->count_lock, flags); + smi_info->short_timeouts++; + spin_unlock_irqrestore(&smi_info->count_lock, flags); +#if defined(CONFIG_HIGH_RES_TIMERS) + add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC); +#else + smi_info->si_timer.expires = jiffies + 1; +#endif + } else { + spin_lock_irqsave(&smi_info->count_lock, flags); + smi_info->long_timeouts++; + spin_unlock_irqrestore(&smi_info->count_lock, flags); + smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; +#if defined(CONFIG_HIGH_RES_TIMERS) + smi_info->si_timer.sub_expires = 0; +#endif + } + + do_add_timer: + add_timer(&(smi_info->si_timer)); +} + +static void si_irq_handler(int irq, void *data, struct pt_regs *regs) +{ + struct smi_info *smi_info = (struct smi_info *) data; + unsigned long flags; +#ifdef DEBUG_TIMING + struct timeval t; +#endif + + spin_lock_irqsave(&(smi_info->si_lock), flags); + + spin_lock(&smi_info->count_lock); + smi_info->interrupts++; + spin_unlock(&smi_info->count_lock); + + if (smi_info->stop_operation) + goto out; + +#ifdef DEBUG_TIMING + do_gettimeofday(&t); + printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec); +#endif + smi_event_handler(smi_info, 0); + out: + spin_unlock_irqrestore(&(smi_info->si_lock), flags); +} + +static struct ipmi_smi_handlers handlers = +{ + sender: sender, + request_events: request_events, + new_user: new_user, + user_left: user_left, + set_run_to_completion: set_run_to_completion +}; + +/* There can be 4 IO ports passed in (with or without IRQs), 4 addresses, + a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */ + +#define SI_MAX_PARMS 4 +#define SI_MAX_DRIVERS ((SI_MAX_PARMS * 2) + 2) +static struct smi_info *smi_infos[SI_MAX_DRIVERS] = +{ NULL, NULL, NULL, NULL }; + +#define DEVICE_NAME "ipmi_si" + +#define DEFAULT_KCS_IO_PORT 0xca2 +#define DEFAULT_SMIC_IO_PORT 0xca9 + +static int si_trydefaults = 1; +static char *si_type[SI_MAX_PARMS] = { NULL, NULL, NULL, NULL }; +static unsigned long si_addrs[SI_MAX_PARMS] = { 0, 0, 0, 0 }; +static unsigned int si_ports[SI_MAX_PARMS] = { 0, 0, 0, 0 }; +static int si_irqs[SI_MAX_PARMS] = { 0, 0, 0, 0 }; + + +MODULE_PARM(si_trydefaults, "i"); +MODULE_PARM(si_type, "1-4s"); +MODULE_PARM(si_addrs, "1-4l"); +MODULE_PARM(si_irqs, "1-4i"); +MODULE_PARM(si_ports, "1-4i"); + + +static int std_irq_setup(struct smi_info *info) +{ + int rv; + + if (!info->irq) + return 0; + + rv = request_irq(info->irq, + si_irq_handler, + SA_INTERRUPT, + DEVICE_NAME, + info); + if (rv) { + printk(KERN_WARNING + "ipmi_smi: %s unable to claim interrupt %d," + " running polled\n", + DEVICE_NAME, info->irq); + info->irq = 0; + } else { + printk(" Using irq %d\n", info->irq); + } + + return rv; +} + +static void std_irq_cleanup(struct smi_info *info) +{ + if (!info->irq) + return; + + free_irq(info->irq, info); +} + +static unsigned char port_inb(struct si_sm_io *io, unsigned int offset) +{ + unsigned int *addr = io->info; + + return inb((*addr)+offset); +} + +static void port_outb(struct si_sm_io *io, unsigned int offset, + unsigned char b) +{ + unsigned int *addr = io->info; + + outb(b, (*addr)+offset); +} + +static int port_setup(struct smi_info *info) +{ + unsigned int *addr = info->io.info; + + if (!addr || (!*addr)) + return -ENODEV; + + if (request_region(*addr, info->io_size, DEVICE_NAME) == NULL) + return -EIO; + return 0; +} + +static void port_cleanup(struct smi_info *info) +{ + unsigned int *addr = info->io.info; + + if (addr && (*addr)) + release_region (*addr, info->io_size); + kfree(info); +} + +static int try_init_port(int intf_num, struct smi_info **new_info) +{ + struct smi_info *info; + + if (!si_ports[intf_num]) + return -ENODEV; + + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + printk("ipmi_smi: Could not allocate SMI data\n"); + return -ENOMEM; + } + memset(info, 0, sizeof(*info)); + + info->io_setup = port_setup; + info->io_cleanup = port_cleanup; + info->io.inputb = port_inb; + info->io.outputb = port_outb; + info->io.info = &(si_ports[intf_num]); + info->irq = 0; + info->irq_setup = NULL; + *new_info = info; + + printk("ipmi_smi: Trying state machine at I/O address 0x%x\n", + si_ports[intf_num]); + return 0; +} + +static unsigned char mem_inb(struct si_sm_io *io, unsigned int offset) +{ + return readb((io->addr)+offset); +} + +static void mem_outb(struct si_sm_io *io, unsigned int offset, + unsigned char b) +{ + writeb(b, (io->addr)+offset); +} + +static int mem_setup(struct smi_info *info) +{ + unsigned long *addr = info->io.info; + + if (!addr || (!*addr)) + return -ENODEV; + + if (request_mem_region(*addr, info->io_size, DEVICE_NAME) == NULL) + return -EIO; + info->io.addr = ioremap(*addr, info->io_size); + if (info->io.addr == NULL) { + release_mem_region(*addr, info->io_size); + return -EIO; + } + return 0; +} + +static void mem_cleanup(struct smi_info *info) +{ + unsigned int *addr = info->io.info; + + if (addr) { + iounmap(info->addr); + release_mem_region(*addr, info->io_size); + } + kfree(info); +} + +static int try_init_mem(int intf_num, struct smi_info **new_info) +{ + struct smi_info *info; + + if (!si_addrs[intf_num]) + return -ENODEV; + + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + printk("ipmi_smi: Could not allocate SMI data\n"); + return -ENOMEM; + } + memset(info, 0, sizeof(*info)); + + info->io_setup = mem_setup; + info->io_cleanup = mem_cleanup; + info->io.inputb = mem_inb; + info->io.outputb = mem_outb; + info->io.info = &(si_addrs[intf_num]); + info->irq = 0; + info->irq_setup = NULL; + *new_info = info; + + printk("ipmi_smi: Trying state machine at memory address 0x%lx\n", + si_addrs[intf_num]); + return 0; +} + + +#ifdef CONFIG_ACPI_INTERPRETER + +#include +#include +#include +#include + +/* Once we get an ACPI failure, we don't try any more, because we go + through the tables sequentially. Once we don't find a table, there + are no more. */ +static int acpi_failure = 0; + +/* For GPE-type interrupts. */ +void ipmi_acpi_gpe(void *context) +{ + struct smi_info *smi_info = context; + unsigned long flags; +#ifdef DEBUG_TIMING + struct timeval t; +#endif + + spin_lock_irqsave(&(smi_info->si_lock), flags); + + spin_lock(&smi_info->count_lock); + smi_info->interrupts++; + spin_unlock(&smi_info->count_lock); + + if (smi_info->stop_operation) + goto out; + +#ifdef DEBUG_TIMING + do_gettimeofday(&t); + printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec); +#endif + smi_event_handler(smi_info, 0); + out: + spin_unlock_irqrestore(&(smi_info->si_lock), flags); +} + +static int acpi_gpe_irq_setup(struct smi_info *info) +{ + acpi_status status; + + if (!info->irq) + return 0; + + /* FIXME - is level triggered right? */ + status = acpi_install_gpe_handler(NULL, + info->irq, + ACPI_EVENT_LEVEL_TRIGGERED, + ipmi_acpi_gpe, + info); + if (status != AE_OK) { + printk(KERN_WARNING + "ipmi_smi: %s unable to claim ACPI GPE %d," + " running polled\n", + DEVICE_NAME, info->irq); + info->irq = 0; + return -EINVAL; + } else { + printk(" Using ACPI GPE %d\n", info->irq); + return 0; + } + +} + +static void acpi_gpe_irq_cleanup(struct smi_info *info) +{ + if (!info->irq) + return; + + acpi_remove_gpe_handler(NULL, info->irq, ipmi_acpi_gpe); +} + +/* + * Defined at + * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf + */ +struct SPMITable { + s8 Signature[4]; + u32 Length; + u8 Revision; + u8 Checksum; + s8 OEMID[6]; + s8 OEMTableID[8]; + s8 OEMRevision[4]; + s8 CreatorID[4]; + s8 CreatorRevision[4]; + u8 InterfaceType[2]; + s16 SpecificationRevision; + + /* + * Bit 0 - SCI interrupt supported + * Bit 1 - I/O APIC/SAPIC + */ + u8 InterruptType; + + /* If bit 0 of InterruptType is set, then this is the SCI + interrupt in the GPEx_STS register. */ + u8 GPE; + + s16 Reserved; + + /* If bit 1 of InterruptType is set, then this is the I/O + APIC/SAPIC interrupt. */ + u32 GlobalSystemInterrupt; + + /* The actual register address. */ + struct acpi_generic_address addr; + + u8 UID[4]; + + s8 spmi_id[1]; /* A '\0' terminated array starts here. */ +}; + +static int try_init_acpi(int intf_num, struct smi_info **new_info) +{ + struct smi_info *info; + acpi_status status; + struct SPMITable *spmi; + char *io_type; + + if (acpi_failure) + return -ENODEV; + + status = acpi_get_firmware_table("SPMI", intf_num+1, + ACPI_LOGICAL_ADDRESSING, + (struct acpi_table_header **) &spmi); + if (status != AE_OK) { + acpi_failure = 1; + return -ENODEV; + } + + if (spmi->InterfaceType[0] != 1) + return -ENODEV; + + /* Figure out the interface type. */ + switch (spmi->InterfaceType[1]) + { + case 1: /* KCS */ + si_type[intf_num] = "kcs"; + break; + + case 2: /* SMIC */ + si_type[intf_num] = "smic"; + break; + + case 3: /* BT */ + si_type[intf_num] = "bt"; + break; + + default: + printk("ipmi_smi: Unknown ACPI SMI type.\n"); + return -EIO; + } + + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + printk("ipmi_smi: Could not allocate SMI data\n"); + return -ENOMEM; + } + memset(info, 0, sizeof(*info)); + + if (spmi->InterruptType & 1) { + /* We've got a GPE interrupt. */ + info->irq = spmi->GPE; + info->irq_setup = acpi_gpe_irq_setup; + info->irq_cleanup = acpi_gpe_irq_cleanup; + } else if (spmi->InterruptType & 2) { + /* We've got an APIC/SAPIC interrupt. */ + info->irq = spmi->GlobalSystemInterrupt; + info->irq_setup = std_irq_setup; + info->irq_cleanup = std_irq_cleanup; + } else { + /* Use the default interrupt setting. */ + info->irq = 0; + info->irq_setup = NULL; + } + + if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + io_type = "memory"; + info->io_setup = mem_setup; + info->io_cleanup = mem_cleanup; + si_addrs[intf_num] = spmi->addr.address; + info->io.inputb = mem_inb; + info->io.outputb = mem_outb; + info->io.info = &(si_addrs[intf_num]); + } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) { + io_type = "I/O"; + info->io_setup = port_setup; + info->io_cleanup = port_cleanup; + si_ports[intf_num] = spmi->addr.address; + info->io.inputb = port_inb; + info->io.outputb = port_outb; + info->io.info = &(si_ports[intf_num]); + } else { + kfree(info); + printk("ipmi_smi: Unknown ACPI I/O Address type.\n"); + return -EIO; + } + + *new_info = info; + + printk("ipmi_smi: Found ACPI-specified state machine at %s" + " address 0x%lx\n", + io_type, (unsigned long) spmi->addr.address); + return 0; +} +#endif + +static int try_get_dev_id(struct smi_info *smi_info) +{ + unsigned char msg[2]; + unsigned char resp[IPMI_MAX_MSG_LENGTH]; + unsigned long resp_len; + enum si_sm_result smi_result; + + /* Do a Get Device ID command, since it comes back with some + useful info. */ + msg[0] = IPMI_NETFN_APP_REQUEST << 2; + msg[1] = IPMI_GET_DEVICE_ID_CMD; + smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); + + smi_result = smi_info->handlers->event(smi_info->si_sm, 0); + for (;;) + { + if (smi_result == SI_SM_CALL_WITH_DELAY) { + schedule_timeout(1); + smi_result = smi_info->handlers->event( + smi_info->si_sm, 100); + } + else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) + { + smi_result = smi_info->handlers->event( + smi_info->si_sm, 0); + } + else + break; + } + if (smi_result == SI_SM_HOSED) + /* We couldn't get the state machine to run, so whatever's at + the port is probably not an IPMI SMI interface. */ + return -ENODEV; + + /* Otherwise, we got some data. */ + resp_len = smi_info->handlers->get_result(smi_info->si_sm, + resp, IPMI_MAX_MSG_LENGTH); + if (resp_len < 6) + /* That's odd, it should be longer. */ + return -EINVAL; + + if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) + /* That's odd, it shouldn't be able to fail. */ + return -EINVAL; + + /* Record info from the get device id, in case we need it. */ + smi_info->ipmi_si_dev_rev = resp[4] & 0xf; + smi_info->ipmi_si_fw_rev_major = resp[5] & 0x7f; + smi_info->ipmi_si_fw_rev_minor = resp[6]; + smi_info->ipmi_version_major = resp[7] & 0xf; + smi_info->ipmi_version_minor = resp[7] >> 4; + + return 0; +} + +extern struct si_sm_handlers kcs_smi_handlers; +extern struct si_sm_handlers smic_smi_handlers; + +static int type_file_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + char *out = (char *) page; + struct smi_info *smi = data; + + switch (smi->si_type) { + case SI_KCS: + return sprintf(out, "kcs\n"); + case SI_SMIC: + return sprintf(out, "smic\n"); + case SI_BT: + return sprintf(out, "bt\n"); + default: + return 0; + } +} + +static int stat_file_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + char *out = (char *) page; + struct smi_info *smi = data; + + out += sprintf(out, "interrupts_enabled: %d\n", + smi->irq && !smi->interrupt_disabled); + out += sprintf(out, "short_timeouts: %ld\n", + smi->short_timeouts); + out += sprintf(out, "long_timeouts: %ld\n", + smi->long_timeouts); + out += sprintf(out, "timeout_restarts: %ld\n", + smi->timeout_restarts); + out += sprintf(out, "idles: %ld\n", + smi->idles); + out += sprintf(out, "interrupts: %ld\n", + smi->interrupts); + out += sprintf(out, "attentions: %ld\n", + smi->attentions); + out += sprintf(out, "flag_fetches: %ld\n", + smi->flag_fetches); + out += sprintf(out, "hosed_count: %ld\n", + smi->hosed_count); + out += sprintf(out, "complete_transactions: %ld\n", + smi->complete_transactions); + out += sprintf(out, "events: %ld\n", + smi->events); + out += sprintf(out, "watchdog_pretimeouts: %ld\n", + smi->watchdog_pretimeouts); + out += sprintf(out, "incoming_messages: %ld\n", + smi->incoming_messages); + + return (out - ((char *) page)); +} + +/* Returns 0 if initialized, or negative on an error. */ +static int init_one_smi(int intf_num, struct smi_info **smi) +{ + int rv; + struct smi_info *new_smi; + + + rv = try_init_mem(intf_num, &new_smi); + if (rv) + rv = try_init_port(intf_num, &new_smi); +#ifdef CONFIG_ACPI_INTERPRETER + if ((rv) && (si_trydefaults)) { + rv = try_init_acpi(intf_num, &new_smi); + } +#endif + + if (rv) + return rv; + + /* So we know not to free it unless we have allocated one. */ + new_smi->intf = NULL; + new_smi->si_sm = NULL; + new_smi->handlers = 0; + + if (!new_smi->irq_setup) { + new_smi->irq = si_irqs[intf_num]; + new_smi->irq_setup = std_irq_setup; + new_smi->irq_cleanup = std_irq_cleanup; + } + + /* Default to KCS if no type is specified. */ + if (si_type[intf_num] == NULL) { + if (si_trydefaults) + si_type[intf_num] = "kcs"; + else { + rv = -EINVAL; + goto out_err; + } + } + + /* Set up the state machine to use. */ + if (strcmp(si_type[intf_num], "kcs") == 0) { + new_smi->handlers = &kcs_smi_handlers; + new_smi->si_type = SI_KCS; + } else if (strcmp(si_type[intf_num], "smic") == 0) { + new_smi->handlers = &smic_smi_handlers; + new_smi->si_type = SI_SMIC; + } else { + /* No support for anything else yet. */ + rv = -EIO; + goto out_err; + } + + /* Allocate the state machine's data and initialize it. */ + new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); + if (!new_smi->si_sm) { + printk(" Could not allocate state machine memory\n"); + rv = -ENOMEM; + goto out_err; + } + new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm, + &new_smi->io); + + /* Now that we know the I/O size, we can set up the I/O. */ + rv = new_smi->io_setup(new_smi); + if (rv) { + printk(" Could not set up I/O space\n"); + goto out_err; + } + + spin_lock_init(&(new_smi->si_lock)); + spin_lock_init(&(new_smi->msg_lock)); + spin_lock_init(&(new_smi->count_lock)); + + /* Do low-level detection first. */ + if (new_smi->handlers->detect(new_smi->si_sm)) { + rv = -ENODEV; + goto out_err; + } + + /* Attempt a get device id command. If it fails, we probably + don't have a SMI here. */ + rv = try_get_dev_id(new_smi); + if (rv) + goto out_err; + + /* Try to claim any interrupts. */ + new_smi->irq_setup(new_smi); + + INIT_LIST_HEAD(&(new_smi->xmit_msgs)); + INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs)); + new_smi->curr_msg = NULL; + atomic_set(&new_smi->req_events, 0); + new_smi->run_to_completion = 0; + + rv = ipmi_register_smi(&handlers, + new_smi, + new_smi->ipmi_version_major, + new_smi->ipmi_version_minor, + &(new_smi->intf)); + if (rv) { + printk(KERN_ERR + "ipmi_smi: Unable to register device: error %d\n", + rv); + goto out_err; + } + + rv = ipmi_smi_add_proc_entry(new_smi->intf, "type", + type_file_read_proc, NULL, + new_smi, THIS_MODULE); + if (rv) { + printk(KERN_ERR + "ipmi_smi: Unable to create proc entry: %d\n", + rv); + goto out_err; + } + + rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats", + stat_file_read_proc, NULL, + new_smi, THIS_MODULE); + if (rv) { + printk(KERN_ERR + "ipmi_smi: Unable to create proc entry: %d\n", + rv); + goto out_err; + } + + start_clear_flags(new_smi); + + /* IRQ is defined to be set when non-zero. */ + if (new_smi->irq) + new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ; + + new_smi->interrupt_disabled = 0; + new_smi->timer_stopped = 0; + new_smi->stop_operation = 0; + + init_timer(&(new_smi->si_timer)); + new_smi->si_timer.data = (long) new_smi; + new_smi->si_timer.function = smi_timeout; + new_smi->last_timeout_jiffies = jiffies; + new_smi->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; + add_timer(&(new_smi->si_timer)); + + *smi = new_smi; + + printk(" IPMI %s interface initialized\n", si_type[intf_num]); + + return 0; + + out_err: + if (new_smi->intf) + ipmi_unregister_smi(new_smi->intf); + + new_smi->irq_cleanup(new_smi); + if (new_smi->si_sm) { + if (new_smi->handlers) + new_smi->handlers->cleanup(new_smi->si_sm); + kfree(new_smi->si_sm); + } + new_smi->io_cleanup(new_smi); + return rv; +} + +static __init int init_ipmi_si(void) +{ + int rv = 0; + int pos = 0; + int i = 0; + + if (initialized) + return 0; + initialized = 1; + + printk(KERN_INFO "IPMI System Interface driver version " + IPMI_SI_VERSION); + if (kcs_smi_handlers.version) + printk(", KCS version %s", kcs_smi_handlers.version); + if (smic_smi_handlers.version) + printk(", SMIC version %s", smic_smi_handlers.version); + printk("\n"); + + rv = init_one_smi(0, &(smi_infos[pos])); + if (rv && !si_ports[0] && si_trydefaults) { + /* If we are trying defaults and the initial port is + not set, then set it. */ + si_type[0] = "kcs"; + si_ports[0] = DEFAULT_KCS_IO_PORT; + rv = init_one_smi(0, &(smi_infos[pos])); + if (rv) { + /* No KCS - try SMIC */ + si_type[0] = "smic"; + si_ports[0] = DEFAULT_SMIC_IO_PORT; + rv = init_one_smi(0, &(smi_infos[pos])); + } + } + if (rv == 0) + pos++; + + for (i=1; i < SI_MAX_PARMS; i++) { + rv = init_one_smi(i, &(smi_infos[pos])); + if (rv == 0) + pos++; + } + + if (smi_infos[0] == NULL) { + printk("ipmi_smi: Unable to find any SMI interfaces\n"); + return -ENODEV; + } + + return 0; +} +module_init(init_ipmi_si); + +#ifdef MODULE +void __exit cleanup_one_si(struct smi_info *to_clean) +{ + int rv; + unsigned long flags; + + if (! to_clean) + return; + + /* Tell the timer and interrupt handlers that we are shutting + down. */ + spin_lock_irqsave(&(to_clean->si_lock), flags); + spin_lock(&(to_clean->msg_lock)); + + to_clean->stop_operation = 1; + + to_clean->irq_cleanup(to_clean); + + spin_unlock(&(to_clean->msg_lock)); + spin_unlock_irqrestore(&(to_clean->si_lock), flags); + + /* Wait for the timer to stop. This avoids problems with race + conditions removing the timer here. Hopefully this will be + long enough to avoid problems with interrupts still + running. */ + schedule_timeout(2); + while (!to_clean->timer_stopped) { + schedule_timeout(1); + } + + rv = ipmi_unregister_smi(to_clean->intf); + if (rv) { + printk(KERN_ERR + "ipmi_smi: Unable to unregister device: errno=%d\n", + rv); + } + + to_clean->handlers->cleanup(to_clean->si_sm); + + kfree(to_clean->si_sm); + + to_clean->io_cleanup(to_clean); +} + +static __exit void cleanup_ipmi_si(void) +{ + int i; + + if (!initialized) + return; + + for (i=0; i + * source@mvista.com + * + * Copyright 2002 MontaVista Software Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* This is defined by the state machines themselves, it is an opaque + data type for them to use. */ +struct si_sm_data; + +/* The structure for doing I/O in the state machine. The state + machine doesn't have the actual I/O routines, they are done through + this interface. */ +struct si_sm_io +{ + unsigned char (*inputb)(struct si_sm_io *io, unsigned int offset); + void (*outputb)(struct si_sm_io *io, + unsigned int offset, + unsigned char b); + + /* Generic info used by the actual handling routines, the + state machine shouldn't touch these. */ + void *info; + void *addr; +}; + +/* Results of SMI events. */ +enum si_sm_result +{ + SI_SM_CALL_WITHOUT_DELAY, /* Call the driver again immediately */ + SI_SM_CALL_WITH_DELAY, /* Delay some before calling again. */ + SI_SM_TRANSACTION_COMPLETE, /* A transaction is finished. */ + SI_SM_IDLE, /* The SM is in idle state. */ + SI_SM_HOSED, /* The hardware violated the state machine. */ + SI_SM_ATTN /* The hardware is asserting attn and the + state machine is idle. */ +}; + +/* Handlers for the SMI state machine. */ +struct si_sm_handlers +{ + /* Put the version number of the state machine here so the + upper layer can print it. */ + char *version; + + /* Initialize the data and return the amount of I/O space to + reserve for the space. */ + unsigned int (*init_data)(struct si_sm_data *smi, + struct si_sm_io *io); + + /* Start a new transaction in the state machine. This will + return -2 if the state machine is not idle, -1 if the size + is invalid (to large or too small), or 0 if the transaction + is successfully completed. */ + int (*start_transaction)(struct si_sm_data *smi, + unsigned char *data, unsigned int size); + + /* Return the results after the transaction. This will return + -1 if the buffer is too small, zero if no transaction is + present, or the actual length of the result data. */ + int (*get_result)(struct si_sm_data *smi, + unsigned char *data, unsigned int length); + + /* Call this periodically (for a polled interface) or upon + receiving an interrupt (for a interrupt-driven interface). + If interrupt driven, you should probably poll this + periodically when not in idle state. This should be called + with the time that passed since the last call, if it is + significant. Time is in microseconds. */ + enum si_sm_result (*event)(struct si_sm_data *smi, long time); + + /* Attempt to detect an SMI. Returns 0 on success or nonzero + on failure. */ + int (*detect)(struct si_sm_data *smi); + + /* The interface is shutting down, so clean it up. */ + void (*cleanup)(struct si_sm_data *smi); + + /* Return the size of the SMI structure in bytes. */ + int (*size)(void); +}; + diff -urN linux-2.4.23.org/drivers/char/ipmi/ipmi_smic_sm.c linux-2.4.23/drivers/char/ipmi/ipmi_smic_sm.c --- linux-2.4.23.org/drivers/char/ipmi/ipmi_smic_sm.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.23/drivers/char/ipmi/ipmi_smic_sm.c 2003-12-06 17:46:43.024448478 +0100 @@ -0,0 +1,601 @@ +/* + * ipmi_smic_sm.c + * + * The state-machine driver for an IPMI SMIC driver + * + * It started as a copy of Corey Minyard's driver for the KSC interface + * and the kernel patch "mmcdev-patch-245" by HP + * + * modified by: Hannes Schulz + * ipmi@schwaar.com + * + * + * Corey Minyard's driver for the KSC interface has the following + * copyright notice: + * Copyright 2002 MontaVista Software Inc. + * + * the kernel patch "mmcdev-patch-245" by HP has the following + * copyright notice: + * (c) Copyright 2001 Grant Grundler (c) Copyright + * 2001 Hewlett-Packard Company + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. */ + +#include /* For printk. */ +#include +#include "ipmi_si_sm.h" + +#define IPMI_SMIC_VERSION "v27" + +/* smic_debug is a bit-field + * SMIC_DEBUG_ENABLE - turned on for now + * SMIC_DEBUG_MSG - commands and their responses + * SMIC_DEBUG_STATES - state machine +*/ +#define SMIC_DEBUG_STATES 4 +#define SMIC_DEBUG_MSG 2 +#define SMIC_DEBUG_ENABLE 1 + +static int smic_debug = 1; + +enum smic_states { + SMIC_IDLE, + SMIC_START_OP, + SMIC_OP_OK, + SMIC_WRITE_START, + SMIC_WRITE_NEXT, + SMIC_WRITE_END, + SMIC_WRITE2READ, + SMIC_READ_START, + SMIC_READ_NEXT, + SMIC_READ_END, + SMIC_HOSED +}; + +#define MAX_SMIC_READ_SIZE 80 +#define MAX_SMIC_WRITE_SIZE 80 +#define SMIC_MAX_ERROR_RETRIES 3 + +/* Timeouts in microseconds. */ +#define SMIC_RETRY_TIMEOUT 100000 +#define IPMI_ERR_MSG_TRUNCATED 0xc6 +#define IPMI_ERR_UNSPECIFIED 0xff + + +/* SMIC Flags Register Bits */ +#define SMIC_RX_DATA_READY 0x80 +#define SMIC_TX_DATA_READY 0x40 +#define SMIC_SMI 0x10 +#define SMIC_EVM_DATA_AVAIL 0x08 +#define SMIC_SMS_DATA_AVAIL 0x04 +#define SMIC_FLAG_BSY 0x01 + +/* SMIC Error Codes */ +#define EC_NO_ERROR 0x00 +#define EC_ABORTED 0x01 +#define EC_ILLEGAL_CONTROL 0x02 +#define EC_NO_RESPONSE 0x03 +#define EC_ILLEGAL_COMMAND 0x04 +#define EC_BUFFER_FULL 0x05 + +struct si_sm_data +{ + enum smic_states state; + struct si_sm_io *io; + unsigned char write_data[MAX_SMIC_WRITE_SIZE]; + int write_pos; + int write_count; + int orig_write_count; + unsigned char read_data[MAX_SMIC_READ_SIZE]; + int read_pos; + int truncated; + unsigned int error_retries; + long smic_timeout; +}; + +static unsigned int init_smic_data (struct si_sm_data *smic, + struct si_sm_io *io) +{ + smic->state = SMIC_IDLE; + smic->io = io; + smic->write_pos = 0; + smic->write_count = 0; + smic->orig_write_count = 0; + smic->read_pos = 0; + smic->error_retries = 0; + smic->truncated = 0; + smic->smic_timeout = SMIC_RETRY_TIMEOUT; + + /* We use 3 bytes of I/O. */ + return 3; +} + +static int start_smic_transaction(struct si_sm_data *smic, + unsigned char *data, unsigned int size) +{ + unsigned int i; + + if ((size < 2) || (size > MAX_SMIC_WRITE_SIZE)) { + return -1; + } + if ((smic->state != SMIC_IDLE) && (smic->state != SMIC_HOSED)) { + return -2; + } + if (smic_debug & SMIC_DEBUG_MSG) { + printk(KERN_INFO "start_smic_transaction -"); + for (i = 0; i < size; i ++) { + printk (" %02x", (unsigned char) (data [i])); + } + printk ("\n"); + } + smic->error_retries = 0; + memcpy(smic->write_data, data, size); + smic->write_count = size; + smic->orig_write_count = size; + smic->write_pos = 0; + smic->read_pos = 0; + smic->state = SMIC_START_OP; + smic->smic_timeout = SMIC_RETRY_TIMEOUT; + return 0; +} + +static int smic_get_result(struct si_sm_data *smic, + unsigned char *data, unsigned int length) +{ + int i; + + if (smic_debug & SMIC_DEBUG_MSG) { + printk (KERN_INFO "smic_get result -"); + for (i = 0; i < smic->read_pos; i ++) { + printk (" %02x", (smic->read_data [i])); + } + printk ("\n"); + } + if (length < smic->read_pos) { + smic->read_pos = length; + smic->truncated = 1; + } + memcpy(data, smic->read_data, smic->read_pos); + + if ((length >= 3) && (smic->read_pos < 3)) { + data[2] = IPMI_ERR_UNSPECIFIED; + smic->read_pos = 3; + } + if (smic->truncated) { + data[2] = IPMI_ERR_MSG_TRUNCATED; + smic->truncated = 0; + } + return smic->read_pos; +} + +static inline unsigned char read_smic_flags(struct si_sm_data *smic) +{ + return smic->io->inputb(smic->io, 2); +} + +static inline unsigned char read_smic_status(struct si_sm_data *smic) +{ + return smic->io->inputb(smic->io, 1); +} + +static inline unsigned char read_smic_data(struct si_sm_data *smic) +{ + return smic->io->inputb(smic->io, 0); +} + +static inline void write_smic_flags(struct si_sm_data *smic, + unsigned char flags) +{ + smic->io->outputb(smic->io, 2, flags); +} + +static inline void write_smic_control(struct si_sm_data *smic, + unsigned char control) +{ + smic->io->outputb(smic->io, 1, control); +} + +static inline void write_si_sm_data (struct si_sm_data *smic, + unsigned char data) +{ + smic->io->outputb(smic->io, 0, data); +} + +static inline void start_error_recovery(struct si_sm_data *smic, char *reason) +{ + (smic->error_retries)++; + if (smic->error_retries > SMIC_MAX_ERROR_RETRIES) { + if (smic_debug & SMIC_DEBUG_ENABLE) { + printk(KERN_WARNING + "ipmi_smic_drv: smic hosed: %s\n", reason); + } + smic->state = SMIC_HOSED; + } else { + smic->write_count = smic->orig_write_count; + smic->write_pos = 0; + smic->read_pos = 0; + smic->state = SMIC_START_OP; + smic->smic_timeout = SMIC_RETRY_TIMEOUT; + } +} + +static inline void write_next_byte(struct si_sm_data *smic) +{ + write_si_sm_data(smic, smic->write_data[smic->write_pos]); + (smic->write_pos)++; + (smic->write_count)--; +} + +static inline void read_next_byte (struct si_sm_data *smic) +{ + if (smic->read_pos >= MAX_SMIC_READ_SIZE) { + read_smic_data (smic); + smic->truncated = 1; + } else { + smic->read_data[smic->read_pos] = read_smic_data(smic); + (smic->read_pos)++; + } +} + +/* SMIC Control/Status Code Components */ +#define SMIC_GET_STATUS 0x00 /* Control form's name */ +#define SMIC_READY 0x00 /* Status form's name */ +#define SMIC_WR_START 0x01 /* Unified Control/Status names... */ +#define SMIC_WR_NEXT 0x02 +#define SMIC_WR_END 0x03 +#define SMIC_RD_START 0x04 +#define SMIC_RD_NEXT 0x05 +#define SMIC_RD_END 0x06 +#define SMIC_CODE_MASK 0x0f + +#define SMIC_CONTROL 0x00 +#define SMIC_STATUS 0x80 +#define SMIC_CS_MASK 0x80 + +#define SMIC_SMS 0x40 +#define SMIC_SMM 0x60 +#define SMIC_STREAM_MASK 0x60 + +/* SMIC Control Codes */ +#define SMIC_CC_SMS_GET_STATUS (SMIC_CONTROL|SMIC_SMS|SMIC_GET_STATUS) +#define SMIC_CC_SMS_WR_START (SMIC_CONTROL|SMIC_SMS|SMIC_WR_START) +#define SMIC_CC_SMS_WR_NEXT (SMIC_CONTROL|SMIC_SMS|SMIC_WR_NEXT) +#define SMIC_CC_SMS_WR_END (SMIC_CONTROL|SMIC_SMS|SMIC_WR_END) +#define SMIC_CC_SMS_RD_START (SMIC_CONTROL|SMIC_SMS|SMIC_RD_START) +#define SMIC_CC_SMS_RD_NEXT (SMIC_CONTROL|SMIC_SMS|SMIC_RD_NEXT) +#define SMIC_CC_SMS_RD_END (SMIC_CONTROL|SMIC_SMS|SMIC_RD_END) + +#define SMIC_CC_SMM_GET_STATUS (SMIC_CONTROL|SMIC_SMM|SMIC_GET_STATUS) +#define SMIC_CC_SMM_WR_START (SMIC_CONTROL|SMIC_SMM|SMIC_WR_START) +#define SMIC_CC_SMM_WR_NEXT (SMIC_CONTROL|SMIC_SMM|SMIC_WR_NEXT) +#define SMIC_CC_SMM_WR_END (SMIC_CONTROL|SMIC_SMM|SMIC_WR_END) +#define SMIC_CC_SMM_RD_START (SMIC_CONTROL|SMIC_SMM|SMIC_RD_START) +#define SMIC_CC_SMM_RD_NEXT (SMIC_CONTROL|SMIC_SMM|SMIC_RD_NEXT) +#define SMIC_CC_SMM_RD_END (SMIC_CONTROL|SMIC_SMM|SMIC_RD_END) + +/* SMIC Status Codes */ +#define SMIC_SC_SMS_READY (SMIC_STATUS|SMIC_SMS|SMIC_READY) +#define SMIC_SC_SMS_WR_START (SMIC_STATUS|SMIC_SMS|SMIC_WR_START) +#define SMIC_SC_SMS_WR_NEXT (SMIC_STATUS|SMIC_SMS|SMIC_WR_NEXT) +#define SMIC_SC_SMS_WR_END (SMIC_STATUS|SMIC_SMS|SMIC_WR_END) +#define SMIC_SC_SMS_RD_START (SMIC_STATUS|SMIC_SMS|SMIC_RD_START) +#define SMIC_SC_SMS_RD_NEXT (SMIC_STATUS|SMIC_SMS|SMIC_RD_NEXT) +#define SMIC_SC_SMS_RD_END (SMIC_STATUS|SMIC_SMS|SMIC_RD_END) + +#define SMIC_SC_SMM_READY (SMIC_STATUS|SMIC_SMM|SMIC_READY) +#define SMIC_SC_SMM_WR_START (SMIC_STATUS|SMIC_SMM|SMIC_WR_START) +#define SMIC_SC_SMM_WR_NEXT (SMIC_STATUS|SMIC_SMM|SMIC_WR_NEXT) +#define SMIC_SC_SMM_WR_END (SMIC_STATUS|SMIC_SMM|SMIC_WR_END) +#define SMIC_SC_SMM_RD_START (SMIC_STATUS|SMIC_SMM|SMIC_RD_START) +#define SMIC_SC_SMM_RD_NEXT (SMIC_STATUS|SMIC_SMM|SMIC_RD_NEXT) +#define SMIC_SC_SMM_RD_END (SMIC_STATUS|SMIC_SMM|SMIC_RD_END) + +/* these are the control/status codes we actually use + SMIC_CC_SMS_GET_STATUS 0x40 + SMIC_CC_SMS_WR_START 0x41 + SMIC_CC_SMS_WR_NEXT 0x42 + SMIC_CC_SMS_WR_END 0x43 + SMIC_CC_SMS_RD_START 0x44 + SMIC_CC_SMS_RD_NEXT 0x45 + SMIC_CC_SMS_RD_END 0x46 + + SMIC_SC_SMS_READY 0xC0 + SMIC_SC_SMS_WR_START 0xC1 + SMIC_SC_SMS_WR_NEXT 0xC2 + SMIC_SC_SMS_WR_END 0xC3 + SMIC_SC_SMS_RD_START 0xC4 + SMIC_SC_SMS_RD_NEXT 0xC5 + SMIC_SC_SMS_RD_END 0xC6 +*/ + +static enum si_sm_result smic_event (struct si_sm_data *smic, long time) +{ + unsigned char status; + unsigned char flags; + unsigned char data; + + if (smic->state == SMIC_HOSED) { + init_smic_data(smic, smic->io); + return SI_SM_HOSED; + } + if (smic->state != SMIC_IDLE) { + if (smic_debug & SMIC_DEBUG_STATES) { + printk(KERN_INFO + "smic_event - smic->smic_timeout = %ld," + " time = %ld\n", + smic->smic_timeout, time); + } +/* FIXME: smic_event is sometimes called with time > SMIC_RETRY_TIMEOUT */ + if (time < SMIC_RETRY_TIMEOUT) { + smic->smic_timeout -= time; + if (smic->smic_timeout < 0) { + start_error_recovery(smic, "smic timed out."); + return SI_SM_CALL_WITH_DELAY; + } + } + } + flags = read_smic_flags(smic); + if (flags & SMIC_FLAG_BSY) + return SI_SM_CALL_WITH_DELAY; + + status = read_smic_status (smic); + if (smic_debug & SMIC_DEBUG_STATES) + printk(KERN_INFO + "smic_event - state = %d, flags = 0x%02x," + " status = 0x%02x\n", + smic->state, flags, status); + + switch (smic->state) { + case SMIC_IDLE: + /* in IDLE we check for available messages */ + if (flags & (SMIC_SMI | + SMIC_EVM_DATA_AVAIL | SMIC_SMS_DATA_AVAIL)) + { + return SI_SM_ATTN; + } + return SI_SM_IDLE; + + case SMIC_START_OP: + /* sanity check whether smic is really idle */ + write_smic_control(smic, SMIC_CC_SMS_GET_STATUS); + write_smic_flags(smic, flags | SMIC_FLAG_BSY); + smic->state = SMIC_OP_OK; + break; + + case SMIC_OP_OK: + if (status != SMIC_SC_SMS_READY) { + /* this should not happen */ + start_error_recovery(smic, + "state = SMIC_OP_OK," + " status != SMIC_SC_SMS_READY"); + return SI_SM_CALL_WITH_DELAY; + } + /* OK so far; smic is idle let us start ... */ + write_smic_control(smic, SMIC_CC_SMS_WR_START); + write_next_byte(smic); + write_smic_flags(smic, flags | SMIC_FLAG_BSY); + smic->state = SMIC_WRITE_START; + break; + + case SMIC_WRITE_START: + if (status != SMIC_SC_SMS_WR_START) { + start_error_recovery(smic, + "state = SMIC_WRITE_START, " + "status != SMIC_SC_SMS_WR_START"); + return SI_SM_CALL_WITH_DELAY; + } + /* we must not issue WR_(NEXT|END) unless + TX_DATA_READY is set */ + if (flags & SMIC_TX_DATA_READY) { + if (smic->write_count == 1) { + /* last byte */ + write_smic_control(smic, SMIC_CC_SMS_WR_END); + smic->state = SMIC_WRITE_END; + } else { + write_smic_control(smic, SMIC_CC_SMS_WR_NEXT); + smic->state = SMIC_WRITE_NEXT; + } + write_next_byte(smic); + write_smic_flags(smic, flags | SMIC_FLAG_BSY); + } + else { + return SI_SM_CALL_WITH_DELAY; + } + break; + + case SMIC_WRITE_NEXT: + if (status != SMIC_SC_SMS_WR_NEXT) { + start_error_recovery(smic, + "state = SMIC_WRITE_NEXT, " + "status != SMIC_SC_SMS_WR_NEXT"); + return SI_SM_CALL_WITH_DELAY; + } + /* this is the same code as in SMIC_WRITE_START */ + if (flags & SMIC_TX_DATA_READY) { + if (smic->write_count == 1) { + write_smic_control(smic, SMIC_CC_SMS_WR_END); + smic->state = SMIC_WRITE_END; + } + else { + write_smic_control(smic, SMIC_CC_SMS_WR_NEXT); + smic->state = SMIC_WRITE_NEXT; + } + write_next_byte(smic); + write_smic_flags(smic, flags | SMIC_FLAG_BSY); + } + else { + return SI_SM_CALL_WITH_DELAY; + } + break; + + case SMIC_WRITE_END: + if (status != SMIC_SC_SMS_WR_END) { + start_error_recovery (smic, + "state = SMIC_WRITE_END, " + "status != SMIC_SC_SMS_WR_END"); + return SI_SM_CALL_WITH_DELAY; + } + /* data register holds an error code */ + data = read_smic_data(smic); + if (data != 0) { + if (smic_debug & SMIC_DEBUG_ENABLE) { + printk(KERN_INFO + "SMIC_WRITE_END: data = %02x\n", data); + } + start_error_recovery(smic, + "state = SMIC_WRITE_END, " + "data != SUCCESS"); + return SI_SM_CALL_WITH_DELAY; + } else { + smic->state = SMIC_WRITE2READ; + } + break; + + case SMIC_WRITE2READ: + /* we must wait for RX_DATA_READY to be set before we + can continue */ + if (flags & SMIC_RX_DATA_READY) { + write_smic_control(smic, SMIC_CC_SMS_RD_START); + write_smic_flags(smic, flags | SMIC_FLAG_BSY); + smic->state = SMIC_READ_START; + } else { + return SI_SM_CALL_WITH_DELAY; + } + break; + + case SMIC_READ_START: + if (status != SMIC_SC_SMS_RD_START) { + start_error_recovery(smic, + "state = SMIC_READ_START, " + "status != SMIC_SC_SMS_RD_START"); + return SI_SM_CALL_WITH_DELAY; + } + if (flags & SMIC_RX_DATA_READY) { + read_next_byte(smic); + write_smic_control(smic, SMIC_CC_SMS_RD_NEXT); + write_smic_flags(smic, flags | SMIC_FLAG_BSY); + smic->state = SMIC_READ_NEXT; + } else { + return SI_SM_CALL_WITH_DELAY; + } + break; + + case SMIC_READ_NEXT: + switch (status) { + /* smic tells us that this is the last byte to be read + --> clean up */ + case SMIC_SC_SMS_RD_END: + read_next_byte(smic); + write_smic_control(smic, SMIC_CC_SMS_RD_END); + write_smic_flags(smic, flags | SMIC_FLAG_BSY); + smic->state = SMIC_READ_END; + break; + case SMIC_SC_SMS_RD_NEXT: + if (flags & SMIC_RX_DATA_READY) { + read_next_byte(smic); + write_smic_control(smic, SMIC_CC_SMS_RD_NEXT); + write_smic_flags(smic, flags | SMIC_FLAG_BSY); + smic->state = SMIC_READ_NEXT; + } else { + return SI_SM_CALL_WITH_DELAY; + } + break; + default: + start_error_recovery( + smic, + "state = SMIC_READ_NEXT, " + "status != SMIC_SC_SMS_RD_(NEXT|END)"); + return SI_SM_CALL_WITH_DELAY; + } + break; + + case SMIC_READ_END: + if (status != SMIC_SC_SMS_READY) { + start_error_recovery(smic, + "state = SMIC_READ_END, " + "status != SMIC_SC_SMS_READY"); + return SI_SM_CALL_WITH_DELAY; + } + data = read_smic_data(smic); + /* data register holds an error code */ + if (data != 0) { + if (smic_debug & SMIC_DEBUG_ENABLE) { + printk(KERN_INFO + "SMIC_READ_END: data = %02x\n", data); + } + start_error_recovery(smic, + "state = SMIC_READ_END, " + "data != SUCCESS"); + return SI_SM_CALL_WITH_DELAY; + } else { + smic->state = SMIC_IDLE; + return SI_SM_TRANSACTION_COMPLETE; + } + + case SMIC_HOSED: + init_smic_data(smic, smic->io); + return SI_SM_HOSED; + + default: + if (smic_debug & SMIC_DEBUG_ENABLE) { + printk(KERN_WARNING "smic->state = %d\n", smic->state); + start_error_recovery(smic, "state = UNKNOWN"); + return SI_SM_CALL_WITH_DELAY; + } + } + smic->smic_timeout = SMIC_RETRY_TIMEOUT; + return SI_SM_CALL_WITHOUT_DELAY; +} + +static int smic_detect(struct si_sm_data *smic) +{ + /* It's impossible for the SMIC fnags register to be all 1's, + (assuming a properly functioning, self-initialized BMC) + but that's what you get from reading a bogus address, so we + test that first. */ + if (read_smic_flags(smic) == 0xff) + return 1; + + return 0; +} + +static void smic_cleanup(struct si_sm_data *kcs) +{ +} + +static int smic_size(void) +{ + return sizeof(struct si_sm_data); +} + +struct si_sm_handlers smic_smi_handlers = +{ + .version = IPMI_SMIC_VERSION, + .init_data = init_smic_data, + .start_transaction = start_smic_transaction, + .get_result = smic_get_result, + .event = smic_event, + .detect = smic_detect, + .cleanup = smic_cleanup, + .size = smic_size, +}; diff -urN linux-2.4.23.org/drivers/char/ipmi/ipmi_watchdog.c linux-2.4.23/drivers/char/ipmi/ipmi_watchdog.c --- linux-2.4.23.org/drivers/char/ipmi/ipmi_watchdog.c 2003-12-06 12:53:23.003503549 +0100 +++ linux-2.4.23/drivers/char/ipmi/ipmi_watchdog.c 2003-12-06 17:46:43.041444912 +0100 @@ -50,6 +50,8 @@ #include #endif +#define IPMI_WATCHDOG_VERSION "v27" + /* * The IPMI command/response information for the watchdog timer. */ @@ -153,10 +155,18 @@ static char pretimeout_since_last_heartbeat = 0; MODULE_PARM(timeout, "i"); +MODULE_PARM_DESC(timeout, "Timeout value in seconds."); MODULE_PARM(pretimeout, "i"); +MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds."); MODULE_PARM(action, "s"); +MODULE_PARM_DESC(action, "Timeout action. One of: " + "reset, none, power_cycle, power_off."); MODULE_PARM(preaction, "s"); +MODULE_PARM_DESC(preaction, "Pretimeout action. One of: " + "pre_none, pre_smi, pre_nmi, pre_int."); MODULE_PARM(preop, "s"); +MODULE_PARM_DESC(preop, "Pretimeout driver operation. One of: " + "preop_none, preop_panic, preop_give_data."); /* Default state of the timer. */ static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE; @@ -899,6 +909,7 @@ static struct ipmi_smi_watcher smi_watcher = { + .owner = THIS_MODULE, .new_smi = ipmi_new_smi, .smi_gone = ipmi_smi_gone }; @@ -907,6 +918,9 @@ { int rv; + printk(KERN_INFO "IPMI watchdog driver version " + IPMI_WATCHDOG_VERSION "\n"); + if (strcmp(action, "reset") == 0) { action_val = WDOG_TIMEOUT_RESET; } else if (strcmp(action, "none") == 0) { @@ -995,9 +1009,6 @@ register_reboot_notifier(&wdog_reboot_notifier); notifier_chain_register(&panic_notifier_list, &wdog_panic_notifier); - printk(KERN_INFO "IPMI watchdog by " - "Corey Minyard (minyard@mvista.com)\n"); - return 0; } diff -urN linux-2.4.23.org/drivers/char/ipmi/Makefile linux-2.4.23/drivers/char/ipmi/Makefile --- linux-2.4.23.org/drivers/char/ipmi/Makefile 2003-12-06 12:53:23.043495221 +0100 +++ linux-2.4.23/drivers/char/ipmi/Makefile 2003-12-06 17:46:43.064440089 +0100 @@ -6,11 +6,13 @@ export-objs := ipmi_msghandler.o ipmi_watchdog.o -list-multi := ipmi_kcs_drv.o +list-multi := ipmi_kcs_drv.o ipmi_si_drv.o ipmi_kcs_drv-objs := ipmi_kcs_sm.o ipmi_kcs_intf.o +ipmi_si_drv-objs := ipmi_si.o ipmi_kcs_sm.o ipmi_smic_sm.o obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o +obj-$(CONFIG_IPMI_SI) += ipmi_si_drv.o obj-$(CONFIG_IPMI_KCS) += ipmi_kcs_drv.o obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o @@ -18,3 +20,6 @@ ipmi_kcs_drv.o: $(ipmi_kcs_drv-objs) $(LD) -r -o $@ $(ipmi_kcs_drv-objs) + +ipmi_si_drv.o: $(ipmi_si_drv-objs) + $(LD) -r -o $@ $(ipmi_si_drv-objs) diff -urN linux-2.4.23.org/include/linux/ipmi.h linux-2.4.23/include/linux/ipmi.h --- linux-2.4.23.org/include/linux/ipmi.h 2003-12-06 12:52:27.330097221 +0100 +++ linux-2.4.23/include/linux/ipmi.h 2003-12-06 17:46:43.115429392 +0100 @@ -160,6 +160,7 @@ * The in-kernel interface. */ #include +#include /* Opaque type for a IPMI message user. One of these is needed to send and receive messages. */ @@ -221,7 +222,12 @@ void *handler_data, ipmi_user_t *user); -/* Destroy the given user of the IPMI layer. */ +/* Destroy the given user of the IPMI layer. Note that after this + function returns, the system is guaranteed to not call any + callbacks for the user. Thus as long as you destroy all the users + before you unload a module, you will be safe. And if you destroy + the users before you destroy the callback structures, it should be + safe, too. */ int ipmi_destroy_user(ipmi_user_t user); /* Get the IPMI version of the BMC we are talking to. */ @@ -261,6 +267,27 @@ int priority); /* + * Like ipmi_request, but lets you specify the number of retries and + * the retry time. The retries is the number of times the message + * will be resent if no reply is received. If set to -1, the default + * value will be used. The retry time is the time in milliseconds + * between retries. If set to zero, the default value will be + * used. + * + * Don't use this unless you *really* have to. It's primarily for the + * IPMI over LAN converter; since the LAN stuff does its own retries, + * it makes no sense to do it here. However, this can be used if you + * have unusual requirements. + */ +int ipmi_request_settime(ipmi_user_t user, + struct ipmi_addr *addr, + long msgid, + struct ipmi_msg *msg, + int priority, + int max_retries, + unsigned int retry_time_ms); + +/* * Like ipmi_request, but lets you specify the slave return address. */ int ipmi_request_with_source(ipmi_user_t user, @@ -331,6 +358,10 @@ { struct list_head link; + /* You must set the owner to the current module, if you are in + a module (generally just set it to "THIS_MODULE"). */ + struct module *owner; + /* These two are called with read locks held for the interface the watcher list. So you can add and remove users from the IPMI interface, send messages, etc., but you cannot add @@ -422,6 +453,29 @@ #define IPMICTL_SEND_COMMAND _IOR(IPMI_IOC_MAGIC, 13, \ struct ipmi_req) +/* Messages sent to the interface with timing parameters are this + format. */ +struct ipmi_req_settime +{ + struct ipmi_req req; + + /* See ipmi_request_settime() above for details on these + values. */ + int retries; + unsigned int retry_time_ms; +}; +/* + * Send a message to the interfaces with timing parameters. error values + * are: + * - EFAULT - an address supplied was invalid. + * - EINVAL - The address supplied was not valid, or the command + * was not allowed. + * - EMSGSIZE - The message to was too large. + * - ENOMEM - Buffers could not be allocated for the command. + */ +#define IPMICTL_SEND_COMMAND_SETTIME _IOR(IPMI_IOC_MAGIC, 21, \ + struct ipmi_req_settime) + /* Messages received from the interface are this format. */ struct ipmi_recv { @@ -513,4 +567,18 @@ #define IPMICTL_SET_MY_LUN_CMD _IOR(IPMI_IOC_MAGIC, 19, unsigned int) #define IPMICTL_GET_MY_LUN_CMD _IOR(IPMI_IOC_MAGIC, 20, unsigned int) +/* + * Get/set the default timing values for an interface. You shouldn't + * generally mess with these. + */ +struct ipmi_timing_parms +{ + int retries; + unsigned int retry_time_ms; +}; +#define IPMICTL_SET_TIMING_PARMS_CMD _IOR(IPMI_IOC_MAGIC, 22, \ + struct ipmi_timing_parms) +#define IPMICTL_GET_TIMING_PARMS_CMD _IOR(IPMI_IOC_MAGIC, 23, \ + struct ipmi_timing_parms) + #endif /* __LINUX_IPMI_H */ diff -urN linux-2.4.23.org/include/linux/ipmi_msgdefs.h linux-2.4.23/include/linux/ipmi_msgdefs.h --- linux-2.4.23.org/include/linux/ipmi_msgdefs.h 2003-12-06 12:52:27.323098678 +0100 +++ linux-2.4.23/include/linux/ipmi_msgdefs.h 2003-12-06 17:46:43.148422471 +0100 @@ -37,22 +37,34 @@ /* Various definitions for IPMI messages used by almost everything in the IPMI stack. */ -#define IPMI_NETFN_APP_REQUEST 0x06 -#define IPMI_NETFN_APP_RESPONSE 0x07 - -#define IPMI_BMC_SLAVE_ADDR 0x20 +/* NetFNs and commands used inside the IPMI stack. */ + +#define IPMI_NETFN_SENSOR_EVENT_REQUEST 0x04 +#define IPMI_NETFN_SENSOR_EVENT_RESPONSE 0x05 +#define IPMI_GET_EVENT_RECEIVER_CMD 0x01 +#define IPMI_NETFN_APP_REQUEST 0x06 +#define IPMI_NETFN_APP_RESPONSE 0x07 #define IPMI_GET_DEVICE_ID_CMD 0x01 - #define IPMI_CLEAR_MSG_FLAGS_CMD 0x30 #define IPMI_GET_MSG_FLAGS_CMD 0x31 #define IPMI_SEND_MSG_CMD 0x34 #define IPMI_GET_MSG_CMD 0x33 - #define IPMI_SET_BMC_GLOBAL_ENABLES_CMD 0x2e #define IPMI_GET_BMC_GLOBAL_ENABLES_CMD 0x2f #define IPMI_READ_EVENT_MSG_BUFFER_CMD 0x35 +#define IPMI_NETFN_STORAGE_REQUEST 0x0a +#define IPMI_NETFN_STORAGE_RESPONSE 0x0b +#define IPMI_ADD_SEL_ENTRY_CMD 0x44 + +/* The default slave address */ +#define IPMI_BMC_SLAVE_ADDR 0x20 + #define IPMI_MAX_MSG_LENGTH 80 +#define IPMI_CC_NO_ERROR 0 +#define IPMI_NODE_BUSY_ERR 0xc0 +#define IPMI_LOST_ARBITRATION_ERR 0x81 + #endif /* __LINUX_IPMI_MSGDEFS_H */ diff -urN linux-2.4.23.org/include/linux/ipmi_smi.h linux-2.4.23/include/linux/ipmi_smi.h --- linux-2.4.23.org/include/linux/ipmi_smi.h 2003-12-06 12:52:27.322098886 +0100 +++ linux-2.4.23/include/linux/ipmi_smi.h 2003-12-06 17:46:43.162419535 +0100 @@ -35,6 +35,8 @@ #define __LINUX_IPMI_SMI_H #include +#include +#include /* This files describes the interface for IPMI system management interface drivers to bind into the IPMI message handler. */ @@ -141,4 +143,11 @@ msg->done(msg); } +/* Allow the lower layer to add things to the proc filesystem + directory for this interface. Note that the entry will + automatically be dstroyed when the interface is destroyed. */ +int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, + read_proc_t *read_proc, write_proc_t *write_proc, + void *data, struct module *owner); + #endif /* __LINUX_IPMI_SMI_H */ diff -urN linux-2.4.23.org/include/linux/net.h linux-2.4.23/include/linux/net.h --- linux-2.4.23.org/include/linux/net.h 2003-12-06 12:52:26.841199031 +0100 +++ linux-2.4.23/include/linux/net.h 2003-12-06 17:46:43.181415550 +0100 @@ -24,7 +24,7 @@ struct poll_table_struct; -#define NPROTO 32 /* should be enough for now.. */ +#define NPROTO 64 /* should be enough for now.. */ #define SYS_SOCKET 1 /* sys_socket(2) */ diff -urN linux-2.4.23.org/include/linux/socket.h linux-2.4.23/include/linux/socket.h --- linux-2.4.23.org/include/linux/socket.h 2003-12-06 12:52:26.722223807 +0100 +++ linux-2.4.23/include/linux/socket.h 2003-12-06 17:46:43.201411356 +0100 @@ -175,7 +175,8 @@ #define AF_WANPIPE 25 /* Wanpipe API Sockets */ #define AF_LLC 26 /* Linux LLC */ #define AF_BLUETOOTH 31 /* Bluetooth sockets */ -#define AF_MAX 32 /* For now.. */ +#define AF_IPMI 32 /* IPMI sockers */ +#define AF_MAX 33 /* For now.. */ /* Protocol families, same as address families. */ #define PF_UNSPEC AF_UNSPEC @@ -207,6 +208,7 @@ #define PF_WANPIPE AF_WANPIPE #define PF_LLC AF_LLC #define PF_BLUETOOTH AF_BLUETOOTH +#define PF_IPMI AF_IPMI #define PF_MAX AF_MAX /* Maximum queue length specifiable by listen. */ diff -urN linux-2.4.23.org/include/net/af_ipmi.h linux-2.4.23/include/net/af_ipmi.h --- linux-2.4.23.org/include/net/af_ipmi.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.23/include/net/af_ipmi.h 2003-12-06 17:46:43.228405693 +0100 @@ -0,0 +1,66 @@ +#ifndef _NET_IPMI_H +#define _NET_IPMI_H + +#include + +/* + * This is ipmi address for socket + */ +struct sockaddr_ipmi { + sa_family_t sipmi_family; /* AF_IPMI */ + int if_num; /* IPMI interface number */ + struct ipmi_addr ipmi_addr; +}; +#define SOCKADDR_IPMI_OVERHEAD (sizeof(struct sockaddr_ipmi) \ + - sizeof(struct ipmi_addr)) + +/* A msg_control item, this takes a 'struct ipmi_timing_parms' */ +#define IPMI_CMSG_TIMING_PARMS 0x01 + +/* + * This is ipmi message for socket + */ +struct ipmi_sock_msg { + int recv_type; + long msgid; + + unsigned char netfn; + unsigned char cmd; + int data_len; + unsigned char data[0]; +}; + +#define IPMI_MAX_SOCK_MSG_LENGTH (sizeof(struct ipmi_sock_msg)+IPMI_MAX_MSG_LENGTH) + +/* Register/unregister to receive specific commands. Uses struct + ipmi_cmdspec from linux/ipmi.h */ +#define SIOCIPMIREGCMD (SIOCPROTOPRIVATE + 0) +#define SIOCIPMIUNREGCMD (SIOCPROTOPRIVATE + 1) + +/* Register to receive events. Takes an integer */ +#define SIOCIPMIGETEVENT (SIOCPROTOPRIVATE + 2) + +/* Set the default timing parameters for the socket. Takes a struct + ipmi_timing_parms from linux/ipmi.h */ +#define SIOCIPMISETTIMING (SIOCPROTOPRIVATE + 3) +#define SIOCIPMIGETTIMING (SIOCPROTOPRIVATE + 4) + +/* Set/Get the IPMB address of the MC we are connected to, takes an + unsigned int. */ +#define SIOCIPMISETADDR (SIOCPROTOPRIVATE + 5) +#define SIOCIPMIGETADDR (SIOCPROTOPRIVATE + 6) + +/* Socket information for IPMI for protinfo. */ +struct ipmi_sock { + ipmi_user_t user; + struct sockaddr_ipmi addr; + struct list_head msg_list; + + wait_queue_head_t wait; + spinlock_t lock; + + int default_retries; + unsigned int default_retry_time_ms; +}; + +#endif/*_NET_IPMI_H*/ diff -urN linux-2.4.23.org/include/net/sock.h linux-2.4.23/include/net/sock.h --- linux-2.4.23.org/include/net/sock.h 2003-12-06 12:52:36.719142002 +0100 +++ linux-2.4.23/include/net/sock.h 2003-12-06 17:46:43.247401708 +0100 @@ -98,6 +98,10 @@ #include #endif +#if defined(CONFIG_IPMI_SOCKET) || defined(CONFIG_IPMI_SOCKET_MODULE) +#include +#endif + #if defined(CONFIG_ATM) || defined(CONFIG_ATM_MODULE) struct atm_vcc; #endif @@ -673,6 +677,9 @@ #if defined(CONFIG_WAN_ROUTER) || defined(CONFIG_WAN_ROUTER_MODULE) struct wanpipe_opt *af_wanpipe; #endif +#if defined(CONFIG_IPMI_SOCKET) || defined(CONFIG_IPMI_SOCKET_MODULE) + struct ipmi_sock af_ipmi; +#endif } protinfo; diff -urN linux-2.4.23.org/LOG linux-2.4.23/LOG --- linux-2.4.23.org/LOG 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.23/LOG 2003-12-06 17:46:43.371375702 +0100 @@ -0,0 +1,38 @@ +patching file Documentation/Configure.help +Hunk #1 succeeded at 4226 (offset 974 lines). +Hunk #2 succeeded at 29341 (offset 1959 lines). +patching file Documentation/IPMI.txt +patching file drivers/char/Config.in +Hunk #1 succeeded at 213 (offset 9 lines). +patching file drivers/char/ipmi/ipmi_devintf.c +patching file drivers/char/ipmi/ipmi_kcs_intf.c +patching file drivers/char/ipmi/ipmi_kcs_sm.c +patching file drivers/char/ipmi/ipmi_msghandler.c +patching file drivers/char/ipmi/ipmi_si.c +patching file drivers/char/ipmi/ipmi_si_sm.h +patching file drivers/char/ipmi/ipmi_smic_sm.c +patching file drivers/char/ipmi/ipmi_watchdog.c +patching file drivers/char/ipmi/Makefile +patching file include/linux/ipmi.h +patching file include/linux/ipmi_msgdefs.h +patching file include/linux/ipmi_smi.h +patching file include/linux/net.h +patching file include/linux/socket.h +Hunk #1 succeeded at 175 (offset 2 lines). +Hunk #2 succeeded at 208 (offset 2 lines). +patching file include/net/af_ipmi.h +patching file include/net/sock.h +Hunk #1 succeeded at 98 (offset 3 lines). +Hunk #2 succeeded at 677 (offset 6 lines). +patching file kernel/panic.c +Reversed (or previously applied) patch detected! Assume -R? [n] +Apply anyway? [n] +Skipping patch. +1 out of 1 hunk ignored -- saving rejects to file kernel/panic.c.rej +patching file net/Config.in +patching file net/ipmi/af_ipmi.c +patching file net/ipmi/Makefile +patching file net/Makefile +Hunk #1 FAILED at 7. +Hunk #2 succeeded at 47 with fuzz 2 (offset 2 lines). +1 out of 2 hunks FAILED -- saving rejects to file net/Makefile.rej diff -urN linux-2.4.23.org/net/Config.in linux-2.4.23/net/Config.in --- linux-2.4.23.org/net/Config.in 2003-12-06 12:52:51.579047508 +0100 +++ linux-2.4.23/net/Config.in 2003-12-06 17:46:43.327384930 +0100 @@ -16,6 +16,7 @@ fi bool 'Socket Filtering' CONFIG_FILTER tristate 'Unix domain sockets' CONFIG_UNIX +tristate 'IPMI sockets' CONFIG_IPMI_SOCKET bool 'TCP/IP networking' CONFIG_INET if [ "$CONFIG_INET" = "y" ]; then source net/ipv4/Config.in diff -urN linux-2.4.23.org/net/ipmi/af_ipmi.c linux-2.4.23/net/ipmi/af_ipmi.c --- linux-2.4.23.org/net/ipmi/af_ipmi.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.23/net/ipmi/af_ipmi.c 2003-12-06 17:46:43.330384301 +0100 @@ -0,0 +1,593 @@ +/* + * IPMI Socket Glue + * + * Author: Louis Zhuang + * Copyright by Intel Corp., 2003 + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define IPMI_SOCKINTF_VERSION "v25" + +#ifdef CONFIG_DEBUG_KERNEL +static int debug = 0; +#define dbg(format, arg...) \ + do { \ + if(debug) \ + printk (KERN_DEBUG "%s: " format "\n", \ + __FUNCTION__, ## arg); \ + } while(0) +#else +#define dbg(format, arg...) +#endif /* CONFIG_DEBUG_KERNEL */ + +#define err(format, arg...) \ + printk(KERN_ERR "%s: " format "\n", \ + __FUNCTION__ , ## arg) +#define info(format, arg...) \ + printk(KERN_INFO "%s: " format "\n", \ + __FUNCTION__ , ## arg) +#define warn(format, arg...) \ + printk(KERN_WARNING "%s: " format "\n", \ + __FUNCTION__ , ## arg) +#define trace(format, arg...) \ + printk(KERN_INFO "%s(" format ")\n", \ + __FUNCTION__ , ## arg) + +static kmem_cache_t *ipmi_sk_cachep = NULL; + +static atomic_t ipmi_nr_socks = ATOMIC_INIT(0); + + + +/* + * utility functions + */ +static inline struct ipmi_sock *to_ipmi_sock(struct sock *sk) +{ + return &sk->protinfo.af_ipmi; +} + +static inline void ipmi_release_sock(struct sock *sk, int embrion) +{ + struct ipmi_sock *i = to_ipmi_sock(sk); + struct sk_buff *skb; + + if (i->user) { + ipmi_destroy_user(i->user); + i->user = NULL; + } + + sock_orphan(sk); + sk->shutdown = SHUTDOWN_MASK; + sk->state = TCP_CLOSE; + + while((skb=skb_dequeue(&sk->receive_queue))!=NULL) + kfree_skb(skb); + + sock_put(sk); +} + +static inline long ipmi_wait_for_queue(struct ipmi_sock *i, long timeo) +{ + + DECLARE_WAITQUEUE(wait, current); + + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue_exclusive(&i->wait, &wait); + timeo = schedule_timeout(timeo); + set_current_state(TASK_RUNNING); + remove_wait_queue(&i->wait, &wait); + return timeo; +} + +/* + * IPMI operation functions + */ +static void sock_receive_handler(struct ipmi_recv_msg *msg, + void *handler_data) +{ + struct ipmi_sock *i = (struct ipmi_sock *)handler_data; + unsigned long flags; + + spin_lock_irqsave(&i->lock, flags); + list_add_tail(&msg->link, &i->msg_list); + spin_unlock_irqrestore(&i->lock, flags); + + wake_up_interruptible(&i->wait); +} + +/* + * protocol operation functions + */ +static int ipmi_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + + if (!sk) + return 0; + + sock->sk=NULL; + ipmi_release_sock(sk, 0); + return 0; +} + +static struct ipmi_user_hndl ipmi_hnd = { + .ipmi_recv_hndl = sock_receive_handler +}; + +static int ipmi_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) +{ + struct ipmi_sock *i = to_ipmi_sock(sock->sk); + struct sockaddr_ipmi *addr = (struct sockaddr_ipmi *)uaddr; + int err = -EINVAL; + + if (i->user != NULL) { + dbg("Cannot bind twice: %p", i->user); + return -EINVAL; + } + + err = ipmi_create_user(addr->if_num, &ipmi_hnd, i, &i->user); + if (err) { + dbg("Cannot create user for the socket: %p", i->user); + return err; + } + + memcpy(&i->addr, addr, sizeof(i->addr)); + return 0; +} + +static int ipmi_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) +{ + struct ipmi_sock *i = to_ipmi_sock(sock->sk); + memcpy(uaddr, &i->addr, sizeof(i->addr)); + return 0; +} + +static unsigned int ipmi_poll(struct file * file, struct socket *sock, poll_table *wait) +{ + unsigned int has_msg = 0; + struct ipmi_sock *i = to_ipmi_sock(sock->sk); + unsigned long flags; + + poll_wait(file, &i->wait, wait); + spin_lock_irqsave(&i->lock, flags); + if (!list_empty(&i->msg_list)) + has_msg = 1; + spin_unlock_irqrestore(&i->lock, flags); + + if (has_msg) + return POLLIN | POLLRDNORM; + return 0; +} + +static int ipmi_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + struct ipmi_sock *i = to_ipmi_sock(sock->sk); + struct ipmi_cmdspec val; + int ival; + unsigned int uival; + int err; + + dbg("cmd=%#x, arg=%#lx", cmd, arg); + switch(cmd) { + case SIOCIPMIREGCMD: + err = copy_from_user((void *)&val, (void *)arg, + sizeof(cmd)); + if (err) { + err = -EFAULT; + break; + } + + err = ipmi_register_for_cmd(i->user, val.netfn, + val.cmd); + break; + + case SIOCIPMIUNREGCMD: + err = copy_from_user((void *)&val, (void *)arg, + sizeof(cmd)); + if (err) { + err = -EFAULT; + break; + } + + err = ipmi_unregister_for_cmd(i->user, val.netfn, + val.cmd); + break; + + case SIOCIPMIGETEVENT: + err = copy_from_user((void *)&ival, (void *)arg, + sizeof(ival)); + if (err) { + err = -EFAULT; + break; + } + + err = ipmi_set_gets_events(i->user, ival); + break; + + case SIOCIPMISETADDR: + err = copy_from_user((void *)&uival, (void *)arg, + sizeof(uival)); + if (err) { + err = -EFAULT; + break; + } + + ipmi_set_my_address(i->user, uival); + break; + + case SIOCIPMIGETADDR: + uival = ipmi_get_my_address(i->user); + + if (copy_to_user((void *) arg, &uival, sizeof(uival))) { + err = -EFAULT; + break; + } + err = 0; + break; + + case SIOCIPMISETTIMING: + { + struct ipmi_timing_parms parms; + + if (copy_from_user(&parms, (void *) arg, sizeof(parms))) { + err = -EFAULT; + break; + } + + i->default_retries = parms.retries; + i->default_retry_time_ms = parms.retry_time_ms; + err = 0; + break; + } + + case SIOCIPMIGETTIMING: + { + struct ipmi_timing_parms parms; + + parms.retries = i->default_retries; + parms.retry_time_ms = i->default_retry_time_ms; + + if (copy_to_user((void *) arg, &parms, sizeof(parms))) { + err = -EFAULT; + break; + } + + err = 0; + break; + } + + default: + err = dev_ioctl(cmd, (void *)arg); + break; + } + + return err; +} + +static int ipmi_recvmsg(struct socket *sock, struct msghdr *msg, int size, + int rflags, struct scm_cookie *scm) +{ + struct ipmi_sock *i = to_ipmi_sock(sock->sk); + long timeo; + struct ipmi_recv_msg *rcvmsg; + struct sockaddr_ipmi addr; + char buf[IPMI_MAX_SOCK_MSG_LENGTH]; + struct ipmi_sock_msg *smsg = (struct ipmi_sock_msg *)buf; + int err; + unsigned long flags; + + timeo = sock_rcvtimeo(sock->sk, rflags & MSG_DONTWAIT); + + while (1) { + spin_lock_irqsave(&i->lock, flags); + if (!list_empty(&i->msg_list)) + break; + spin_unlock_irqrestore(&i->lock, flags); + if (!timeo) { + return -EAGAIN; + } else if (signal_pending (current)) { + dbg("Signal pending: %d", 1); + return -EINTR; + } + + timeo = ipmi_wait_for_queue(i, timeo); + } + + rcvmsg = list_entry(i->msg_list.next, struct ipmi_recv_msg, link); + list_del(&rcvmsg->link); + spin_unlock_irqrestore(&i->lock, flags); + + memcpy(&addr.ipmi_addr, &rcvmsg->addr, sizeof(addr.ipmi_addr)); + addr.if_num = i->addr.if_num; + addr.sipmi_family = i->addr.sipmi_family; + memcpy(msg->msg_name, &addr, sizeof(addr)); + msg->msg_namelen = (SOCKADDR_IPMI_OVERHEAD + + ipmi_addr_length(rcvmsg->addr.addr_type)); + + smsg->recv_type = rcvmsg->recv_type; + smsg->msgid = rcvmsg->msgid; + smsg->netfn = rcvmsg->msg.netfn; + smsg->cmd = rcvmsg->msg.cmd; + smsg->data_len = rcvmsg->msg.data_len; + memcpy(smsg->data, rcvmsg->msg.data, smsg->data_len); + + ipmi_free_recv_msg(rcvmsg); + + err = memcpy_toiovec(msg->msg_iov, (void *)smsg, + sizeof(struct ipmi_sock_msg) + smsg->data_len); + if (err) { + dbg("Cannot copy data to user: %p", i->user); + return err; + } + + dbg("user=%p", i->user); + dbg("addr_type=%x, channel=%x", + addr.ipmi_addr.addr_type, addr.ipmi_addr.channel); + dbg("netfn=%#02x, cmd=%#02x, data=%p, data_len=%x", + smsg->netfn, smsg->cmd, smsg->data, smsg->data_len); + + return (sizeof(struct ipmi_sock_msg) + smsg->data_len); +} + +static int ipmi_sendmsg(struct socket *sock, struct msghdr *msg, int len, + struct scm_cookie *scm) +{ + struct ipmi_sock *i = to_ipmi_sock(sock->sk); + struct sockaddr_ipmi *addr = (struct sockaddr_ipmi *)msg->msg_name; + struct ipmi_msg imsg; + unsigned char buf[IPMI_MAX_SOCK_MSG_LENGTH]; + struct ipmi_sock_msg *smsg = (struct ipmi_sock_msg *)buf; + int err; + struct ipmi_timing_parms tparms; + struct cmsghdr *cmsg; + + err = ipmi_validate_addr(&addr->ipmi_addr, + msg->msg_namelen - SOCKADDR_IPMI_OVERHEAD); + if (err) { + dbg("Invalid IPMI address: %p", i->user); + goto err; + } + + if (len > IPMI_MAX_SOCK_MSG_LENGTH) { + err = -EINVAL; + dbg("Message too long: %p", i->user); + goto err; + } + + if (len < sizeof(struct ipmi_sock_msg)) { + err = -EINVAL; + dbg("Msg data too small for header: %p", i->user); + goto err; + } + + err = memcpy_fromiovec((void *)smsg, msg->msg_iov, len); + if (err) { + dbg("Cannot copy data to kernel: %p", i->user); + goto err; + } + + if (len < smsg->data_len+sizeof(struct ipmi_sock_msg)) { + err = -EINVAL; + dbg("Msg data is out of bound: %p", i->user); + goto err; + } + + /* Set defaults. */ + tparms.retries = i->default_retries; + tparms.retry_time_ms = i->default_retry_time_ms; + + for (cmsg=CMSG_FIRSTHDR(msg); + cmsg; + cmsg = CMSG_NXTHDR(msg, cmsg)) + { + if (cmsg->cmsg_len < sizeof(struct cmsghdr)) { + err = -EINVAL; + dbg("cmsg length too short: %p", i->user); + goto err; + } + + if (cmsg->cmsg_level != SOL_SOCKET) + continue; + + if (cmsg->cmsg_type == IPMI_CMSG_TIMING_PARMS) { + struct ipmi_timing_parms *pparms; + + if (cmsg->cmsg_len != CMSG_LEN(sizeof(*pparms))) { + err = -EINVAL; + dbg("timing parms cmsg not right size: %p", + i->user); + goto err; + } + pparms = (struct ipmi_timing_parms *) CMSG_DATA(cmsg); + tparms.retries = pparms->retries; + tparms.retry_time_ms = pparms->retry_time_ms; + } + } + + imsg.netfn = smsg->netfn; + imsg.cmd = smsg->cmd; + imsg.data = smsg->data; + imsg.data_len = smsg->data_len; + + dbg("user=%p", i->user); + dbg("addr_type=%x, channel=%x", + addr->ipmi_addr.addr_type, addr->ipmi_addr.channel); + dbg("netfn=%#02x, cmd=%#02x, data=%p, data_len=%x", + imsg.netfn, imsg.cmd, imsg.data, imsg.data_len); + err = ipmi_request_settime(i->user, &addr->ipmi_addr, + smsg->msgid, &imsg, 0, + tparms.retries, tparms.retry_time_ms); + if (err) { + dbg("Cannot send message: %p", i->user); + goto err; + } + +err: + return err; +} + +static struct proto_ops ipmi_ops = { + .family = PF_IPMI, + .release = ipmi_release, + .bind = ipmi_bind, + .connect = sock_no_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .getname = ipmi_getname, + .poll = ipmi_poll, + .ioctl = ipmi_ioctl, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .setsockopt = sock_no_setsockopt, + .getsockopt = sock_no_getsockopt, + .sendmsg = ipmi_sendmsg, + .recvmsg = ipmi_recvmsg, + .mmap = sock_no_mmap, + .sendpage = sock_no_sendpage +}; + + +static void ipmi_sock_destructor(struct sock *sk) +{ + skb_queue_purge(&sk->receive_queue); + + BUG_TRAP(atomic_read(&sk->wmem_alloc) == 0); + BUG_TRAP(sk->socket==NULL); + if (sk->dead==0) { + printk("Attempt to release alive ipmi socket: %p\n", sk); + return; + } + + atomic_dec(&ipmi_nr_socks); + MOD_DEC_USE_COUNT; +} + +/* + * net protocol functions + */ +static struct sock *ipmi_socket_create1(struct socket *sock) +{ + struct sock *sk; + + if (atomic_read(&ipmi_nr_socks) >= 2*files_stat.max_files) + return NULL; + + MOD_INC_USE_COUNT; + + sk = sk_alloc(PF_IPMI, GFP_KERNEL, 1); + if (!sk) { + MOD_DEC_USE_COUNT; + return NULL; + } + + sock_init_data(sock, sk); + sock->sk->rcvtimeo = 5*HZ; + sock->sk->destruct = ipmi_sock_destructor; + spin_lock_init(&sk->protinfo.af_ipmi.lock); + INIT_LIST_HEAD(&sk->protinfo.af_ipmi.msg_list); + init_waitqueue_head(&sk->protinfo.af_ipmi.wait); + + /* Set to use default values. */ + sk->protinfo.af_ipmi.default_retries = -1; + sk->protinfo.af_ipmi.default_retry_time_ms = 0; + + atomic_inc(&ipmi_nr_socks); + return sk; +} + +static int ipmi_socket_create(struct socket *sock, int protocol) +{ + if (!capable(CAP_NET_RAW)) + return -EPERM; + if (protocol && protocol != PF_IPMI) + return -EPROTONOSUPPORT; + + sock->state = SS_UNCONNECTED; + + switch (sock->type) { + case SOCK_RAW: + sock->type=SOCK_DGRAM; + case SOCK_DGRAM: + sock->ops = &ipmi_ops; + break; + default: + return -EPROTONOSUPPORT; + } + + return ipmi_socket_create1(sock)? 0 : -ENOMEM; +} + +static struct net_proto_family ipmi_family_ops = { + .family = PF_IPMI, + .create = ipmi_socket_create, +}; + + +/* + * init/exit functions + */ +static int __init ipmi_socket_init(void) +{ + + int err=0; + + printk(KERN_INFO "ipmi socket interface version " + IPMI_SOCKINTF_VERSION "\n"); + + ipmi_sk_cachep = kmem_cache_create("ipmi_sock", + sizeof(struct ipmi_sock), 0, + SLAB_HWCACHE_ALIGN, 0, 0); + if (!ipmi_sk_cachep) { + printk(KERN_CRIT "%s: Unable to create ipmi_sock SLAB cache\n", __func__); + err = -ENOMEM; + goto out; + } + + err = sock_register(&ipmi_family_ops); + if (err) + kmem_cache_destroy(ipmi_sk_cachep); +out: + return err; +} + +static void __exit ipmi_socket_exit(void) +{ + sock_unregister(PF_IPMI); + kmem_cache_destroy(ipmi_sk_cachep); +} + +#ifdef CONFIG_DEBUG_KERNEL +MODULE_PARM(debug, "i"); +#endif +module_init(ipmi_socket_init); +module_exit(ipmi_socket_exit); + +MODULE_LICENSE("GPL"); diff -urN linux-2.4.23.org/net/ipmi/Makefile linux-2.4.23/net/ipmi/Makefile --- linux-2.4.23.org/net/ipmi/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.23/net/ipmi/Makefile 2003-12-06 17:46:43.353379477 +0100 @@ -0,0 +1,7 @@ + +O_TARGET = ipmi.o + +obj-$(CONFIG_IPMI_SOCKET) = af_ipmi.o + +include $(TOPDIR)/Rules.make + diff -urN linux-2.4.23.org/net/Makefile linux-2.4.23/net/Makefile --- linux-2.4.23.org/net/Makefile 2003-12-06 12:52:51.596043969 +0100 +++ linux-2.4.23/net/Makefile 2003-12-06 17:48:04.493359735 +0100 @@ -8,7 +8,7 @@ O_TARGET := network.o mod-subdirs := ipv4/netfilter ipv6/netfilter bridge/netfilter ipx irda \ - bluetooth atm netlink sched core sctp + bluetooth atm netlink sched core sctp ipmi export-objs := netsyms.o subdir-y := core ethernet @@ -47,6 +47,7 @@ subdir-$(CONFIG_DECNET) += decnet subdir-$(CONFIG_ECONET) += econet subdir-$(CONFIG_VLAN_8021Q) += 8021q +subdir-$(CONFIG_IPMI_SOCKET) += ipmi ifeq ($(CONFIG_NETFILTER),y) mod-subdirs += ipv4/ipvs