]> git.pld-linux.org Git - packages/kernel.git/blame - linux-2.4.20-ipmi.patch
- sparse fixes for 2.4.x <2.4.27
[packages/kernel.git] / linux-2.4.20-ipmi.patch
CommitLineData
2c4f4c6a 1diff -urNp linux-5010/Documentation/Configure.help linux-5020/Documentation/Configure.help
2--- linux-5010/Documentation/Configure.help
3+++ linux-5020/Documentation/Configure.help
4@@ -26617,6 +26617,31 @@ CONFIG_TULIP_MWI
5
6 If unsure, say N.
7
8+IPMI top-level message handler
9+CONFIG_IPMI_HANDLER
10+ This enables the central IPMI message handler, required for IPMI
11+ to work. Note that you must have this enabled to do any other IPMI
12+ things. See IPMI.txt for more details.
13+
14+Generate a panic event to all BMCs on a panic
15+CONFIG_IPMI_PANIC_EVENT
16+ When a panic occurs, this will cause the IPMI message handler to
17+ generate an IPMI event describing the panic to each interface
18+ registered with the message handler.
19+
20+Device interface for IPMI
21+CONFIG_IPMI_DEVICE_INTERFACE
22+ This provides an IOCTL interface to the IPMI message handler so
23+ userland processes may use IPMI. It supports poll() and select().
24+
25+IPMI KCS handler
26+CONFIG_IPMI_KCS
27+ Provides a driver for a KCS-style interface to a BMC.
28+
29+IPMI Watchdog Timer
30+CONFIG_IPMI_WATCHDOG
31+ This enables the IPMI watchdog timer.
32+
33 #
34 # A couple of things I keep forgetting:
35 # capitalize: AppleTalk, Ethernet, DOS, DMA, FAT, FTP, Internet,
36diff -urNp linux-5010/Documentation/IPMI.txt linux-5020/Documentation/IPMI.txt
37--- linux-5010/Documentation/IPMI.txt 1970-01-01 01:00:00.000000000 +0100
38+++ linux-5020/Documentation/IPMI.txt
39@@ -0,0 +1,352 @@
40+
41+ The Linux IPMI Driver
42+ ---------------------
43+ Corey Minyard
44+ <minyard@mvista.com>
45+ <minyard@acm.org>
46+
47+This document describes how to use the IPMI driver for Linux. If you
48+are not familiar with IPMI itself, see the web site at
49+http://www.intel.com/design/servers/ipmi/index.htm. IPMI is a big
50+subject and I can't cover it all here!
51+
52+Basic Design
53+------------
54+
55+The Linux IPMI driver is designed to be very modular and flexible, you
56+only need to take the pieces you need and you can use it in many
57+different ways. Because of that, it's broken into many chunks of
58+code. These chunks are:
59+
60+ipmi_msghandler - This is the central piece of software for the IPMI
61+system. It handles all messages, message timing, and responses. The
62+IPMI users tie into this, and the IPMI physical interfaces (called
63+System Management Interfaces, or SMIs) also tie in here. This
64+provides the kernelland interface for IPMI, but does not provide an
65+interface for use by application processes.
66+
67+ipmi_devintf - This provides a userland IOCTL interface for the IPMI
68+driver, each open file for this device ties in to the message handler
69+as an IPMI user.
70+
71+ipmi_kcs_drv - A driver for the KCS SMI. Most system have a KCS
72+interface for IPMI.
73+
74+
75+Much documentation for the interface is in the include files. The
76+IPMI include files are:
77+
78+ipmi.h - Contains the user interface and IOCTL interface for IPMI.
79+
80+ipmi_smi.h - Contains the interface for SMI drivers to use.
81+
82+ipmi_msgdefs.h - General definitions for base IPMI messaging.
83+
84+
85+Addressing
86+----------
87+
88+The IPMI addressing works much like IP addresses, you have an overlay
89+to handle the different address types. The overlay is:
90+
91+ struct ipmi_addr
92+ {
93+ int addr_type;
94+ short channel;
95+ char data[IPMI_MAX_ADDR_SIZE];
96+ };
97+
98+The addr_type determines what the address really is. The driver
99+currently understands two different types of addresses.
100+
101+"System Interface" addresses are defined as:
102+
103+ struct ipmi_system_interface_addr
104+ {
105+ int addr_type;
106+ short channel;
107+ };
108+
109+and the type is IPMI_SYSTEM_INTERFACE_ADDR_TYPE. This is used for talking
110+straight to the BMC on the current card. The channel must be
111+IPMI_BMC_CHANNEL.
112+
113+Messages that are destined to go out on the IPMB bus use the
114+IPMI_IPMB_ADDR_TYPE address type. The format is
115+
116+ struct ipmi_ipmb_addr
117+ {
118+ int addr_type;
119+ short channel;
120+ unsigned char slave_addr;
121+ unsigned char lun;
122+ };
123+
124+The "channel" here is generally zero, but some devices support more
125+than one channel, it corresponds to the channel as defined in the IPMI
126+spec.
127+
128+
129+Messages
130+--------
131+
132+Messages are defined as:
133+
134+struct ipmi_msg
135+{
136+ unsigned char netfn;
137+ unsigned char lun;
138+ unsigned char cmd;
139+ unsigned char *data;
140+ int data_len;
141+};
142+
143+The driver takes care of adding/stripping the header information. The
144+data portion is just the data to be send (do NOT put addressing info
145+here) or the response. Note that the completion code of a response is
146+the first item in "data", it is not stripped out because that is how
147+all the messages are defined in the spec (and thus makes counting the
148+offsets a little easier :-).
149+
150+When using the IOCTL interface from userland, you must provide a block
151+of data for "data", fill it, and set data_len to the length of the
152+block of data, even when receiving messages. Otherwise the driver
153+will have no place to put the message.
154+
155+Messages coming up from the message handler in kernelland will come in
156+as:
157+
158+ struct ipmi_recv_msg
159+ {
160+ struct list_head link;
161+
162+ /* The type of message as defined in the "Receive Types"
163+ defines above. */
164+ int recv_type;
165+
166+ ipmi_user_t *user;
167+ struct ipmi_addr addr;
168+ long msgid;
169+ struct ipmi_msg msg;
170+
171+ /* Call this when done with the message. It will presumably free
172+ the message and do any other necessary cleanup. */
173+ void (*done)(struct ipmi_recv_msg *msg);
174+
175+ /* Place-holder for the data, don't make any assumptions about
176+ the size or existence of this, since it may change. */
177+ unsigned char msg_data[IPMI_MAX_MSG_LENGTH];
178+ };
179+
180+You should look at the receive type and handle the message
181+appropriately.
182+
183+
184+The Upper Layer Interface (Message Handler)
185+-------------------------------------------
186+
187+The upper layer of the interface provides the users with a consistent
188+view of the IPMI interfaces. It allows multiple SMI interfaces to be
189+addressed (because some boards actually have multiple BMCs on them)
190+and the user should not have to care what type of SMI is below them.
191+
192+
193+Creating the User
194+
195+To user the message handler, you must first create a user using
196+ipmi_create_user. The interface number specifies which SMI you want
197+to connect to, and you must supply callback functions to be called
198+when data comes in. The callback function can run at interrupt level,
199+so be careful using the callbacks. This also allows to you pass in a
200+piece of data, the handler_data, that will be passed back to you on
201+all calls.
202+
203+Once you are done, call ipmi_destroy_user() to get rid of the user.
204+
205+From userland, opening the device automatically creates a user, and
206+closing the device automatically destroys the user.
207+
208+
209+Messaging
210+
211+To send a message from kernel-land, the ipmi_request() call does
212+pretty much all message handling. Most of the parameter are
213+self-explanatory. However, it takes a "msgid" parameter. This is NOT
214+the sequence number of messages. It is simply a long value that is
215+passed back when the response for the message is returned. You may
216+use it for anything you like.
217+
218+Responses come back in the function pointed to by the ipmi_recv_hndl
219+field of the "handler" that you passed in to ipmi_create_user().
220+Remember again, these may be running at interrupt level. Remember to
221+look at the receive type, too.
222+
223+From userland, you fill out an ipmi_req_t structure and use the
224+IPMICTL_SEND_COMMAND ioctl. For incoming stuff, you can use select()
225+or poll() to wait for messages to come in. However, you cannot use
226+read() to get them, you must call the IPMICTL_RECEIVE_MSG with the
227+ipmi_recv_t structure to actually get the message. Remember that you
228+must supply a pointer to a block of data in the msg.data field, and
229+you must fill in the msg.data_len field with the size of the data.
230+This gives the receiver a place to actually put the message.
231+
232+If the message cannot fit into the data you provide, you will get an
233+EMSGSIZE error and the driver will leave the data in the receive
234+queue. If you want to get it and have it truncate the message, us
235+the IPMICTL_RECEIVE_MSG_TRUNC ioctl.
236+
237+When you send a command (which is defined by the lowest-order bit of
238+the netfn per the IPMI spec) on the IPMB bus, the driver will
239+automatically assign the sequence number to the command and save the
240+command. If the response is not receive in the IPMI-specified 5
241+seconds, it will generate a response automatically saying the command
242+timed out. If an unsolicited response comes in (if it was after 5
243+seconds, for instance), that response will be ignored.
244+
245+In kernelland, after you receive a message and are done with it, you
246+MUST call ipmi_free_recv_msg() on it, or you will leak messages. Note
247+that you should NEVER mess with the "done" field of a message, that is
248+required to properly clean up the message.
249+
250+Note that when sending, there is an ipmi_request_supply_msgs() call
251+that lets you supply the smi and receive message. This is useful for
252+pieces of code that need to work even if the system is out of buffers
253+(the watchdog timer uses this, for instance). You supply your own
254+buffer and own free routines. This is not recommended for normal use,
255+though, since it is tricky to manage your own buffers.
256+
257+
258+Events and Incoming Commands
259+
260+The driver takes care of polling for IPMI events and receiving
261+commands (commands are messages that are not responses, they are
262+commands that other things on the IPMB bus have sent you). To receive
263+these, you must register for them, they will not automatically be sent
264+to you.
265+
266+To receive events, you must call ipmi_set_gets_events() and set the
267+"val" to non-zero. Any events that have been received by the driver
268+since startup will immediately be delivered to the first user that
269+registers for events. After that, if multiple users are registered
270+for events, they will all receive all events that come in.
271+
272+For receiving commands, you have to individually register commands you
273+want to receive. Call ipmi_register_for_cmd() and supply the netfn
274+and command name for each command you want to receive. Only one user
275+may be registered for each netfn/cmd, but different users may register
276+for different commands.
277+
278+From userland, equivalent IOCTLs are provided to do these functions.
279+
280+
281+The Lower Layer (SMI) Interface
282+-------------------------------
283+
284+As mentioned before, multiple SMI interfaces may be registered to the
285+message handler, each of these is assigned an interface number when
286+they register with the message handler. They are generally assigned
287+in the order they register, although if an SMI unregisters and then
288+another one registers, all bets are off.
289+
290+The ipmi_smi.h defines the interface for SMIs, see that for more
291+details.
292+
293+
294+The KCS Driver
295+--------------
296+
297+The KCS driver allows up to 4 KCS interfaces to be configured in the
298+system. By default, the driver will register one KCS interface at the
299+spec-specified I/O port 0xca2 without interrupts. You can change this
300+at module load time (for a module) with:
301+
302+ insmod ipmi_kcs_drv.o kcs_ports=<port1>,<port2>... kcs_addrs=<addr1>,<addr2>
303+ kcs_irqs=<irq1>,<irq2>... kcs_trydefaults=[0|1]
304+
305+The KCS driver supports two types of interfaces, ports (for I/O port
306+based KCS interfaces) and memory addresses (for KCS interfaces in
307+memory). The driver will support both of them simultaneously, setting
308+the port to zero (or just not specifying it) will allow the memory
309+address to be used. The port will override the memory address if it
310+is specified and non-zero. kcs_trydefaults sets whether the standard
311+IPMI interface at 0xca2 and any interfaces specified by ACPE are
312+tried. By default, the driver tries it, set this value to zero to
313+turn this off.
314+
315+When compiled into the kernel, the addresses can be specified on the
316+kernel command line as:
317+
318+ ipmi_kcs=<bmc1>:<irq1>,<bmc2>:<irq2>....,[nodefault]
319+
320+The <bmcx> values is either "p<port>" or "m<addr>" for port or memory
321+addresses. So for instance, a KCS interface at port 0xca2 using
322+interrupt 9 and a memory interface at address 0xf9827341 with no
323+interrupt would be specified "ipmi_kcs=p0xca2:9,m0xf9827341".
324+If you specify zero for in irq or don't specify it, the driver will
325+run polled unless the software can detect the interrupt to use in the
326+ACPI tables.
327+
328+By default, the driver will attempt to detect a KCS device at the
329+spec-specified 0xca2 address and any address specified by ACPI. If
330+you want to turn this off, use the "nodefault" option.
331+
332+If you have high-res timers compiled into the kernel, the driver will
333+use them to provide much better performance. Note that if you do not
334+have high-res timers enabled in the kernel and you don't have
335+interrupts enabled, the driver will run VERY slowly. Don't blame me,
336+the KCS interface sucks.
337+
338+
339+Other Pieces
340+------------
341+
342+Watchdog
343+
344+A watchdog timer is provided that implements the Linux-standard
345+watchdog timer interface. It has three module parameters that can be
346+used to control it:
347+
348+ insmod ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type>
349+ preaction=<preaction type> preop=<preop type>
350+
351+The timeout is the number of seconds to the action, and the pretimeout
352+is the amount of seconds before the reset that the pre-timeout panic will
353+occur (if pretimeout is zero, then pretimeout will not be enabled).
354+
355+The action may be "reset", "power_cycle", or "power_off", and
356+specifies what to do when the timer times out, and defaults to
357+"reset".
358+
359+The preaction may be "pre_smi" for an indication through the SMI
360+interface, "pre_int" for an indication through the SMI with an
361+interrupts, and "pre_nmi" for a NMI on a preaction. This is how
362+the driver is informed of the pretimeout.
363+
364+The preop may be set to "preop_none" for no operation on a pretimeout,
365+"preop_panic" to set the preoperation to panic, or "preop_give_data"
366+to provide data to read from the watchdog device when the pretimeout
367+occurs. A "pre_nmi" setting CANNOT be used with "preop_give_data"
368+because you can't do data operations from an NMI.
369+
370+When preop is set to "preop_give_data", one byte comes ready to read
371+on the device when the pretimeout occurs. Select and fasync work on
372+the device, as well.
373+
374+When compiled into the kernel, the kernel command line is available
375+for configuring the watchdog:
376+
377+ ipmi_wdog=<timeout>[,<pretimeout>[,<option>[,<options>....]]]
378+
379+The options are the actions and preaction above (if an option
380+controlling the same thing is specified twice, the last is taken). An
381+options "start_now" is also there, if included, the watchdog will
382+start running immediately when all the drivers are ready, it doesn't
383+have to have a user hooked up to start it.
384+
385+The watchdog will panic and start a 120 second reset timeout if it
386+gets a pre-action. During a panic or a reboot, the watchdog will
387+start a 120 timer if it is running to make sure the reboot occurs.
388+
389+Note that if you use the NMI preaction for the watchdog, you MUST
390+NOT use nmi watchdog mode 1. If you use the NMI watchdog, you
391+must use mode 2.
392diff -urNp linux-5010/drivers/char/Config.in linux-5020/drivers/char/Config.in
393--- linux-5010/drivers/char/Config.in
394+++ linux-5020/drivers/char/Config.in
395@@ -199,6 +199,12 @@ if [ "$CONFIG_QIC02_TAPE" != "n" ]; then
396 fi
397 fi
398
399+tristate 'IPMI top-level message handler' CONFIG_IPMI_HANDLER
400+dep_mbool ' Generate a panic event to all BMCs on a panic' CONFIG_IPMI_PANIC_EVENT $CONFIG_IPMI_HANDLER
401+dep_tristate ' Device interface for IPMI' CONFIG_IPMI_DEVICE_INTERFACE $CONFIG_IPMI_HANDLER
402+dep_tristate ' IPMI KCS handler' CONFIG_IPMI_KCS $CONFIG_IPMI_HANDLER
403+dep_tristate ' IPMI Watchdog Timer' CONFIG_IPMI_WATCHDOG $CONFIG_IPMI_HANDLER
404+
405 mainmenu_option next_comment
406 comment 'Watchdog Cards'
407 bool 'Watchdog Timer Support' CONFIG_WATCHDOG
408diff -urNp linux-5010/drivers/char/ipmi/ipmi_devintf.c linux-5020/drivers/char/ipmi/ipmi_devintf.c
409--- linux-5010/drivers/char/ipmi/ipmi_devintf.c 1970-01-01 01:00:00.000000000 +0100
410+++ linux-5020/drivers/char/ipmi/ipmi_devintf.c
411@@ -0,0 +1,532 @@
412+/*
413+ * ipmi_devintf.c
414+ *
415+ * Linux device interface for the IPMI message handler.
416+ *
417+ * Author: MontaVista Software, Inc.
418+ * Corey Minyard <minyard@mvista.com>
419+ * source@mvista.com
420+ *
421+ * Copyright 2002 MontaVista Software Inc.
422+ *
423+ * This program is free software; you can redistribute it and/or modify it
424+ * under the terms of the GNU General Public License as published by the
425+ * Free Software Foundation; either version 2 of the License, or (at your
426+ * option) any later version.
427+ *
428+ *
429+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
430+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
431+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
432+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
433+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
434+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
435+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
436+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
437+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
438+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
439+ *
440+ * You should have received a copy of the GNU General Public License along
441+ * with this program; if not, write to the Free Software Foundation, Inc.,
442+ * 675 Mass Ave, Cambridge, MA 02139, USA.
443+ */
444+
445+#include <linux/config.h>
446+#include <linux/module.h>
447+#include <linux/errno.h>
448+#include <asm/system.h>
449+#include <linux/sched.h>
450+#include <linux/poll.h>
451+#include <linux/spinlock.h>
452+#include <linux/slab.h>
453+#include <linux/devfs_fs_kernel.h>
454+#include <linux/ipmi.h>
455+#include <asm/semaphore.h>
456+#include <linux/init.h>
457+
458+struct ipmi_file_private
459+{
460+ ipmi_user_t user;
461+ spinlock_t recv_msg_lock;
462+ struct list_head recv_msgs;
463+ struct file *file;
464+ struct fasync_struct *fasync_queue;
465+ wait_queue_head_t wait;
466+ struct semaphore recv_sem;
467+};
468+
469+static void file_receive_handler(struct ipmi_recv_msg *msg,
470+ void *handler_data)
471+{
472+ struct ipmi_file_private *priv = handler_data;
473+ int was_empty;
474+ unsigned long flags;
475+
476+ spin_lock_irqsave(&(priv->recv_msg_lock), flags);
477+
478+ was_empty = list_empty(&(priv->recv_msgs));
479+ list_add_tail(&(msg->link), &(priv->recv_msgs));
480+
481+ if (was_empty) {
482+ wake_up_interruptible(&priv->wait);
483+ kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN);
484+ }
485+
486+ spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
487+}
488+
489+static unsigned int ipmi_poll(struct file *file, poll_table *wait)
490+{
491+ struct ipmi_file_private *priv = file->private_data;
492+ unsigned int mask = 0;
493+ unsigned long flags;
494+
495+ spin_lock_irqsave(&priv->recv_msg_lock, flags);
496+
497+ poll_wait(file, &priv->wait, wait);
498+
499+ if (! list_empty(&(priv->recv_msgs)))
500+ mask |= (POLLIN | POLLRDNORM);
501+
502+ spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
503+
504+ return mask;
505+}
506+
507+static int ipmi_fasync(int fd, struct file *file, int on)
508+{
509+ struct ipmi_file_private *priv = file->private_data;
510+ int result;
511+
512+ result = fasync_helper(fd, file, on, &priv->fasync_queue);
513+
514+ return (result);
515+}
516+
517+static struct ipmi_user_hndl ipmi_hndlrs =
518+{
519+ ipmi_recv_hndl : file_receive_handler
520+};
521+
522+static int ipmi_open(struct inode *inode, struct file *file)
523+{
524+ int if_num = minor(inode->i_rdev);
525+ int rv;
526+ struct ipmi_file_private *priv;
527+
528+
529+ priv = kmalloc(sizeof(*priv), GFP_KERNEL);
530+ if (!priv)
531+ return -ENOMEM;
532+
533+ priv->file = file;
534+
535+ rv = ipmi_create_user(if_num,
536+ &ipmi_hndlrs,
537+ priv,
538+ &(priv->user));
539+ if (rv) {
540+ kfree(priv);
541+ return rv;
542+ }
543+
544+ file->private_data = priv;
545+
546+ spin_lock_init(&(priv->recv_msg_lock));
547+ INIT_LIST_HEAD(&(priv->recv_msgs));
548+ init_waitqueue_head(&priv->wait);
549+ priv->fasync_queue = NULL;
550+ sema_init(&(priv->recv_sem), 1);
551+
552+ return 0;
553+}
554+
555+static int ipmi_release(struct inode *inode, struct file *file)
556+{
557+ struct ipmi_file_private *priv = file->private_data;
558+ int rv;
559+
560+ rv = ipmi_destroy_user(priv->user);
561+ if (rv)
562+ return rv;
563+
564+ ipmi_fasync (-1, file, 0);
565+
566+ /* FIXME - free the messages in the list. */
567+ kfree(priv);
568+
569+ return 0;
570+}
571+
572+static int ipmi_ioctl(struct inode *inode,
573+ struct file *file,
574+ unsigned int cmd,
575+ unsigned long data)
576+{
577+ int rv = -EINVAL;
578+ struct ipmi_file_private *priv = file->private_data;
579+
580+ switch (cmd)
581+ {
582+ case IPMICTL_SEND_COMMAND:
583+ {
584+ struct ipmi_req req;
585+ struct ipmi_addr addr;
586+ unsigned char msgdata[IPMI_MAX_MSG_LENGTH];
587+
588+ if (copy_from_user(&req, (void *) data, sizeof(req))) {
589+ rv = -EFAULT;
590+ break;
591+ }
592+
593+ if (req.addr_len > sizeof(struct ipmi_addr))
594+ {
595+ rv = -EINVAL;
596+ break;
597+ }
598+
599+ if (copy_from_user(&addr, req.addr, req.addr_len)) {
600+ rv = -EFAULT;
601+ break;
602+ }
603+
604+ rv = ipmi_validate_addr(&addr, req.addr_len);
605+ if (rv)
606+ break;
607+
608+ if (req.msg.data != NULL) {
609+ if (req.msg.data_len > IPMI_MAX_MSG_LENGTH) {
610+ rv = -EMSGSIZE;
611+ break;
612+ }
613+
614+ if (copy_from_user(&msgdata,
615+ req.msg.data,
616+ req.msg.data_len))
617+ {
618+ rv = -EFAULT;
619+ break;
620+ }
621+ } else {
622+ req.msg.data_len = 0;
623+ }
624+
625+ req.msg.data = msgdata;
626+
627+ rv = ipmi_request(priv->user,
628+ &addr,
629+ req.msgid,
630+ &(req.msg),
631+ 0);
632+ break;
633+ }
634+
635+ case IPMICTL_RECEIVE_MSG:
636+ case IPMICTL_RECEIVE_MSG_TRUNC:
637+ {
638+ struct ipmi_recv rsp;
639+ int addr_len;
640+ struct list_head *entry;
641+ struct ipmi_recv_msg *msg;
642+ unsigned long flags;
643+
644+
645+ rv = 0;
646+ if (copy_from_user(&rsp, (void *) data, sizeof(rsp))) {
647+ rv = -EFAULT;
648+ break;
649+ }
650+
651+ /* We claim a semaphore because we don't want two
652+ users getting something from the queue at a time.
653+ Since we have to release the spinlock before we can
654+ copy the data to the user, it's possible another
655+ user will grab something from the queue, too. Then
656+ the messages might get out of order if something
657+ fails and the message gets put back onto the
658+ queue. This semaphore prevents that problem. */
659+ down(&(priv->recv_sem));
660+
661+ /* Grab the message off the list. */
662+ spin_lock_irqsave(&(priv->recv_msg_lock), flags);
663+ if (list_empty(&(priv->recv_msgs))) {
664+ spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
665+ rv = -EAGAIN;
666+ goto recv_err;
667+ }
668+ entry = priv->recv_msgs.next;
669+ msg = list_entry(entry, struct ipmi_recv_msg, link);
670+ list_del(entry);
671+ spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
672+
673+ addr_len = ipmi_addr_length(msg->addr.addr_type);
674+ if (rsp.addr_len < addr_len)
675+ {
676+ rv = -EINVAL;
677+ goto recv_putback_on_err;
678+ }
679+
680+ if (copy_to_user(rsp.addr, &(msg->addr), addr_len)) {
681+ rv = -EFAULT;
682+ goto recv_putback_on_err;
683+ }
684+ rsp.addr_len = addr_len;
685+
686+ rsp.recv_type = msg->recv_type;
687+ rsp.msgid = msg->msgid;
688+ rsp.msg.netfn = msg->msg.netfn;
689+ rsp.msg.cmd = msg->msg.cmd;
690+
691+ if (msg->msg.data_len > 0) {
692+ if (rsp.msg.data_len < msg->msg.data_len) {
693+ rv = -EMSGSIZE;
694+ if (cmd == IPMICTL_RECEIVE_MSG_TRUNC) {
695+ msg->msg.data_len = rsp.msg.data_len;
696+ } else {
697+ goto recv_putback_on_err;
698+ }
699+ }
700+
701+ if (copy_to_user(rsp.msg.data,
702+ msg->msg.data,
703+ msg->msg.data_len))
704+ {
705+ rv = -EFAULT;
706+ goto recv_putback_on_err;
707+ }
708+ rsp.msg.data_len = msg->msg.data_len;
709+ } else {
710+ rsp.msg.data_len = 0;
711+ }
712+
713+ if (copy_to_user((void *) data, &rsp, sizeof(rsp))) {
714+ rv = -EFAULT;
715+ goto recv_putback_on_err;
716+ }
717+
718+ up(&(priv->recv_sem));
719+ ipmi_free_recv_msg(msg);
720+ break;
721+
722+ recv_putback_on_err:
723+ /* If we got an error, put the message back onto
724+ the head of the queue. */
725+ spin_lock_irqsave(&(priv->recv_msg_lock), flags);
726+ list_add(entry, &(priv->recv_msgs));
727+ spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
728+ up(&(priv->recv_sem));
729+ break;
730+
731+ recv_err:
732+ up(&(priv->recv_sem));
733+ break;
734+ }
735+
736+ case IPMICTL_REGISTER_FOR_CMD:
737+ {
738+ struct ipmi_cmdspec val;
739+
740+ if (copy_from_user(&val, (void *) data, sizeof(val))) {
741+ rv = -EFAULT;
742+ break;
743+ }
744+
745+ rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd);
746+ break;
747+ }
748+
749+ case IPMICTL_UNREGISTER_FOR_CMD:
750+ {
751+ struct ipmi_cmdspec val;
752+
753+ if (copy_from_user(&val, (void *) data, sizeof(val))) {
754+ rv = -EFAULT;
755+ break;
756+ }
757+
758+ rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd);
759+ break;
760+ }
761+
762+ case IPMICTL_SET_GETS_EVENTS_CMD:
763+ {
764+ int val;
765+
766+ if (copy_from_user(&val, (void *) data, sizeof(val))) {
767+ rv = -EFAULT;
768+ break;
769+ }
770+
771+ rv = ipmi_set_gets_events(priv->user, val);
772+ break;
773+ }
774+
775+ case IPMICTL_SET_MY_ADDRESS_CMD:
776+ {
777+ unsigned int val;
778+
779+ if (copy_from_user(&val, (void *) data, sizeof(val))) {
780+ rv = -EFAULT;
781+ break;
782+ }
783+
784+ ipmi_set_my_address(priv->user, val);
785+ rv = 0;
786+ break;
787+ }
788+
789+ case IPMICTL_GET_MY_ADDRESS_CMD:
790+ {
791+ unsigned int val;
792+
793+ val = ipmi_get_my_address(priv->user);
794+
795+ if (copy_to_user((void *) data, &val, sizeof(val))) {
796+ rv = -EFAULT;
797+ break;
798+ }
799+ rv = 0;
800+ break;
801+ }
802+
803+ case IPMICTL_SET_MY_LUN_CMD:
804+ {
805+ unsigned int val;
806+
807+ if (copy_from_user(&val, (void *) data, sizeof(val))) {
808+ rv = -EFAULT;
809+ break;
810+ }
811+
812+ ipmi_set_my_LUN(priv->user, val);
813+ rv = 0;
814+ break;
815+ }
816+
817+ case IPMICTL_GET_MY_LUN_CMD:
818+ {
819+ unsigned int val;
820+
821+ val = ipmi_get_my_LUN(priv->user);
822+
823+ if (copy_to_user((void *) data, &val, sizeof(val))) {
824+ rv = -EFAULT;
825+ break;
826+ }
827+ rv = 0;
828+ break;
829+ }
830+
831+ }
832+
833+ return rv;
834+}
835+
836+
837+static struct file_operations ipmi_fops = {
838+ owner: THIS_MODULE,
839+ ioctl: ipmi_ioctl,
840+ open: ipmi_open,
841+ release: ipmi_release,
842+ fasync: ipmi_fasync,
843+ poll: ipmi_poll
844+};
845+
846+#define DEVICE_NAME "ipmidev"
847+
848+static int ipmi_major = 0;
849+MODULE_PARM(ipmi_major, "i");
850+
851+static devfs_handle_t devfs_handle;
852+
853+#define MAX_DEVICES 10
854+static devfs_handle_t handles[MAX_DEVICES];
855+
856+static void ipmi_new_smi(int if_num)
857+{
858+ char name[2];
859+
860+ if (if_num > MAX_DEVICES)
861+ return;
862+
863+ name[0] = if_num + '0';
864+ name[1] = '\0';
865+
866+ handles[if_num] = devfs_register(devfs_handle, name, DEVFS_FL_NONE,
867+ ipmi_major, if_num,
868+ S_IFCHR | S_IRUSR | S_IWUSR,
869+ &ipmi_fops, NULL);
870+}
871+
872+static void ipmi_smi_gone(int if_num)
873+{
874+ if (if_num > MAX_DEVICES)
875+ return;
876+
877+ devfs_unregister(handles[if_num]);
878+}
879+
880+static struct ipmi_smi_watcher smi_watcher =
881+{
882+ new_smi : ipmi_new_smi,
883+ smi_gone : ipmi_smi_gone
884+};
885+
886+static __init int init_ipmi_devintf(void)
887+{
888+ int rv;
889+
890+ if (ipmi_major < 0)
891+ return -EINVAL;
892+
893+ rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops);
894+ if (rv < 0) {
895+ printk(KERN_ERR "ipmi: can't get major %d\n", ipmi_major);
896+ return rv;
897+ }
898+
899+ if (ipmi_major == 0) {
900+ ipmi_major = rv;
901+ }
902+
903+ devfs_handle = devfs_mk_dir(NULL, DEVICE_NAME, NULL);
904+
905+ rv = ipmi_smi_watcher_register(&smi_watcher);
906+ if (rv) {
907+ unregister_chrdev(ipmi_major, DEVICE_NAME);
908+ printk(KERN_WARNING "ipmi: can't register smi watcher");
909+ return rv;
910+ }
911+
912+ printk(KERN_INFO "ipmi: device interface at char major %d\n",
913+ ipmi_major);
914+
915+ return 0;
916+}
917+module_init(init_ipmi_devintf);
918+
919+static __exit void cleanup_ipmi(void)
920+{
921+ ipmi_smi_watcher_unregister(&smi_watcher);
922+ devfs_unregister(devfs_handle);
923+ unregister_chrdev(ipmi_major, DEVICE_NAME);
924+}
925+module_exit(cleanup_ipmi);
926+#ifndef MODULE
927+static __init int ipmi_setup (char *str)
928+{
929+ int x;
930+
931+ if (get_option (&str, &x)) {
932+ /* ipmi=x sets the major number to x. */
933+ ipmi_major = x;
934+ } else if (!strcmp(str, "off")) {
935+ ipmi_major = -1;
936+ }
937+
938+ return 1;
939+}
940+#endif
941+
942+__setup("ipmi=", ipmi_setup);
943+MODULE_LICENSE("GPL");
944diff -urNp linux-5010/drivers/char/ipmi/ipmi_kcs_intf.c linux-5020/drivers/char/ipmi/ipmi_kcs_intf.c
945--- linux-5010/drivers/char/ipmi/ipmi_kcs_intf.c 1970-01-01 01:00:00.000000000 +0100
946+++ linux-5020/drivers/char/ipmi/ipmi_kcs_intf.c
947@@ -0,0 +1,1243 @@
948+/*
949+ * ipmi_kcs_intf.c
950+ *
951+ * The interface to the IPMI driver for the KCS.
952+ *
953+ * Author: MontaVista Software, Inc.
954+ * Corey Minyard <minyard@mvista.com>
955+ * source@mvista.com
956+ *
957+ * Copyright 2002 MontaVista Software Inc.
958+ *
959+ * This program is free software; you can redistribute it and/or modify it
960+ * under the terms of the GNU General Public License as published by the
961+ * Free Software Foundation; either version 2 of the License, or (at your
962+ * option) any later version.
963+ *
964+ *
965+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
966+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
967+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
968+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
969+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
970+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
971+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
972+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
973+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
974+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
975+ *
976+ * You should have received a copy of the GNU General Public License along
977+ * with this program; if not, write to the Free Software Foundation, Inc.,
978+ * 675 Mass Ave, Cambridge, MA 02139, USA.
979+ */
980+
981+/*
982+ * This file holds the "policy" for the interface to the KCS state
983+ * machine. It does the configuration, handles timers and interrupts,
984+ * and drives the real KCS state machine.
985+ */
986+
987+#include <linux/config.h>
988+#include <linux/module.h>
989+#include <asm/system.h>
990+#include <linux/sched.h>
991+#include <linux/timer.h>
992+#include <linux/errno.h>
993+#include <linux/spinlock.h>
994+#include <linux/slab.h>
995+#include <linux/delay.h>
996+#include <linux/list.h>
997+#include <linux/ioport.h>
998+#ifdef CONFIG_HIGH_RES_TIMERS
999+#include <linux/hrtime.h>
1000+#endif
1001+#include <linux/interrupt.h>
1002+#include <linux/ipmi_smi.h>
1003+#include <asm/io.h>
1004+#include "ipmi_kcs_sm.h"
1005+#include <linux/init.h>
1006+
1007+/* Measure times between events in the driver. */
1008+#undef DEBUG_TIMING
1009+
1010+#ifdef CONFIG_IPMI_KCS
1011+/* This forces a dependency to the config file for this option. */
1012+#endif
1013+
1014+enum kcs_intf_state {
1015+ KCS_NORMAL,
1016+ KCS_GETTING_FLAGS,
1017+ KCS_GETTING_EVENTS,
1018+ KCS_CLEARING_FLAGS,
1019+ KCS_CLEARING_FLAGS_THEN_SET_IRQ,
1020+ KCS_GETTING_MESSAGES,
1021+ KCS_ENABLE_INTERRUPTS1,
1022+ KCS_ENABLE_INTERRUPTS2
1023+ /* FIXME - add watchdog stuff. */
1024+};
1025+
1026+struct kcs_info
1027+{
1028+ ipmi_smi_t intf;
1029+ struct kcs_data *kcs_sm;
1030+ spinlock_t kcs_lock;
1031+ spinlock_t msg_lock;
1032+ struct list_head xmit_msgs;
1033+ struct list_head hp_xmit_msgs;
1034+ struct ipmi_smi_msg *curr_msg;
1035+ enum kcs_intf_state kcs_state;
1036+
1037+ /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
1038+ is set to hold the flags until we are done handling everything
1039+ from the flags. */
1040+#define RECEIVE_MSG_AVAIL 0x01
1041+#define EVENT_MSG_BUFFER_FULL 0x02
1042+#define WDT_PRE_TIMEOUT_INT 0x08
1043+ unsigned char msg_flags;
1044+
1045+ /* If set to true, this will request events the next time the
1046+ state machine is idle. */
1047+ atomic_t req_events;
1048+
1049+ /* If true, run the state machine to completion on every send
1050+ call. Generally used after a panic to make sure stuff goes
1051+ out. */
1052+ int run_to_completion;
1053+
1054+ /* The I/O port of a KCS interface. */
1055+ int port;
1056+
1057+ /* zero if no irq; */
1058+ int irq;
1059+
1060+ /* The physical and remapped memory addresses of a KCS interface. */
1061+ unsigned long physaddr;
1062+ unsigned char *addr;
1063+
1064+ /* The timer for this kcs. */
1065+ struct timer_list kcs_timer;
1066+
1067+ /* The time (in jiffies) the last timeout occurred at. */
1068+ unsigned long last_timeout_jiffies;
1069+
1070+ /* Used to gracefully stop the timer without race conditions. */
1071+ volatile int stop_operation;
1072+ volatile int timer_stopped;
1073+
1074+ /* The driver will disable interrupts when it gets into a
1075+ situation where it cannot handle messages due to lack of
1076+ memory. Once that situation clears up, it will re-enable
1077+ interupts. */
1078+ int interrupt_disabled;
1079+};
1080+
1081+static void deliver_recv_msg(struct kcs_info *kcs_info, struct ipmi_smi_msg *msg)
1082+{
1083+ /* Deliver the message to the upper layer with the lock
1084+ released. */
1085+ spin_unlock(&(kcs_info->kcs_lock));
1086+ ipmi_smi_msg_received(kcs_info->intf, msg);
1087+ spin_lock(&(kcs_info->kcs_lock));
1088+}
1089+
1090+static void return_hosed_msg(struct kcs_info *kcs_info)
1091+{
1092+ struct ipmi_smi_msg *msg = kcs_info->curr_msg;
1093+
1094+ /* Make it a reponse */
1095+ msg->rsp[0] = msg->data[0] | 4;
1096+ msg->rsp[1] = msg->data[1];
1097+ msg->rsp[2] = 0xFF; /* Unknown error. */
1098+ msg->rsp_size = 3;
1099+
1100+ kcs_info->curr_msg = NULL;
1101+ deliver_recv_msg(kcs_info, msg);
1102+}
1103+
1104+static enum kcs_result start_next_msg(struct kcs_info *kcs_info)
1105+{
1106+ int rv;
1107+ struct list_head *entry = NULL;
1108+#ifdef DEBUG_TIMING
1109+ struct timeval t;
1110+#endif
1111+
1112+ /* No need to save flags, we aleady have interrupts off and we
1113+ already hold the KCS lock. */
1114+ spin_lock(&(kcs_info->msg_lock));
1115+
1116+ /* Pick the high priority queue first. */
1117+ if (! list_empty(&(kcs_info->hp_xmit_msgs))) {
1118+ entry = kcs_info->hp_xmit_msgs.next;
1119+ } else if (! list_empty(&(kcs_info->xmit_msgs))) {
1120+ entry = kcs_info->xmit_msgs.next;
1121+ }
1122+
1123+ if (!entry) {
1124+ kcs_info->curr_msg = NULL;
1125+ rv = KCS_SM_IDLE;
1126+ } else {
1127+ int err;
1128+
1129+ list_del(entry);
1130+ kcs_info->curr_msg = list_entry(entry,
1131+ struct ipmi_smi_msg,
1132+ link);
1133+#ifdef DEBUG_TIMING
1134+ do_gettimeofday(&t);
1135+ printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1136+#endif
1137+ err = start_kcs_transaction(kcs_info->kcs_sm,
1138+ kcs_info->curr_msg->data,
1139+ kcs_info->curr_msg->data_size);
1140+ if (err) {
1141+ return_hosed_msg(kcs_info);
1142+ }
1143+
1144+ rv = KCS_CALL_WITHOUT_DELAY;
1145+ }
1146+ spin_unlock(&(kcs_info->msg_lock));
1147+
1148+ return rv;
1149+}
1150+
1151+static void start_enable_irq(struct kcs_info *kcs_info)
1152+{
1153+ unsigned char msg[2];
1154+
1155+ /* If we are enabling interrupts, we have to tell the
1156+ BMC to use them. */
1157+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
1158+ msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
1159+
1160+ start_kcs_transaction(kcs_info->kcs_sm, msg, 2);
1161+ kcs_info->kcs_state = KCS_ENABLE_INTERRUPTS1;
1162+}
1163+
1164+static void start_clear_flags(struct kcs_info *kcs_info)
1165+{
1166+ unsigned char msg[3];
1167+
1168+ /* Make sure the watchdog pre-timeout flag is not set at startup. */
1169+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
1170+ msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
1171+ msg[2] = WDT_PRE_TIMEOUT_INT;
1172+
1173+ start_kcs_transaction(kcs_info->kcs_sm, msg, 3);
1174+ kcs_info->kcs_state = KCS_CLEARING_FLAGS;
1175+}
1176+
1177+/* When we have a situtaion where we run out of memory and cannot
1178+ allocate messages, we just leave them in the BMC and run the system
1179+ polled until we can allocate some memory. Once we have some
1180+ memory, we will re-enable the interrupt. */
1181+static inline void disable_kcs_irq(struct kcs_info *kcs_info)
1182+{
1183+ if ((kcs_info->irq) && (!kcs_info->interrupt_disabled)) {
1184+ disable_irq_nosync(kcs_info->irq);
1185+ kcs_info->interrupt_disabled = 1;
1186+ }
1187+}
1188+
1189+static inline void enable_kcs_irq(struct kcs_info *kcs_info)
1190+{
1191+ if ((kcs_info->irq) && (kcs_info->interrupt_disabled)) {
1192+ enable_irq(kcs_info->irq);
1193+ kcs_info->interrupt_disabled = 0;
1194+ }
1195+}
1196+
1197+static void handle_flags(struct kcs_info *kcs_info)
1198+{
1199+ if (kcs_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
1200+ /* Watchdog pre-timeout */
1201+ start_clear_flags(kcs_info);
1202+ kcs_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
1203+ spin_unlock(&(kcs_info->kcs_lock));
1204+ ipmi_smi_watchdog_pretimeout(kcs_info->intf);
1205+ spin_lock(&(kcs_info->kcs_lock));
1206+ } else if (kcs_info->msg_flags & RECEIVE_MSG_AVAIL) {
1207+ /* Messages available. */
1208+ kcs_info->curr_msg = ipmi_alloc_smi_msg();
1209+ if (!kcs_info->curr_msg) {
1210+ disable_kcs_irq(kcs_info);
1211+ kcs_info->kcs_state = KCS_NORMAL;
1212+ return;
1213+ }
1214+ enable_kcs_irq(kcs_info);
1215+
1216+ kcs_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1217+ kcs_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
1218+ kcs_info->curr_msg->data_size = 2;
1219+
1220+ start_kcs_transaction(kcs_info->kcs_sm,
1221+ kcs_info->curr_msg->data,
1222+ kcs_info->curr_msg->data_size);
1223+ kcs_info->kcs_state = KCS_GETTING_MESSAGES;
1224+ } else if (kcs_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
1225+ /* Events available. */
1226+ kcs_info->curr_msg = ipmi_alloc_smi_msg();
1227+ if (!kcs_info->curr_msg) {
1228+ disable_kcs_irq(kcs_info);
1229+ kcs_info->kcs_state = KCS_NORMAL;
1230+ return;
1231+ }
1232+ enable_kcs_irq(kcs_info);
1233+
1234+ kcs_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1235+ kcs_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
1236+ kcs_info->curr_msg->data_size = 2;
1237+
1238+ start_kcs_transaction(kcs_info->kcs_sm,
1239+ kcs_info->curr_msg->data,
1240+ kcs_info->curr_msg->data_size);
1241+ kcs_info->kcs_state = KCS_GETTING_EVENTS;
1242+ } else {
1243+ kcs_info->kcs_state = KCS_NORMAL;
1244+ }
1245+}
1246+
1247+static void handle_transaction_done(struct kcs_info *kcs_info)
1248+{
1249+ struct ipmi_smi_msg *msg;
1250+#ifdef DEBUG_TIMING
1251+ struct timeval t;
1252+
1253+ do_gettimeofday(&t);
1254+ printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1255+#endif
1256+ switch (kcs_info->kcs_state) {
1257+ case KCS_NORMAL:
1258+ kcs_info->curr_msg->rsp_size
1259+ = kcs_get_result(kcs_info->kcs_sm,
1260+ kcs_info->curr_msg->rsp,
1261+ IPMI_MAX_MSG_LENGTH);
1262+
1263+ /* Do this here becase deliver_recv_msg() releases the
1264+ lock, and a new message can be put in during the
1265+ time the lock is released. */
1266+ msg = kcs_info->curr_msg;
1267+ kcs_info->curr_msg = NULL;
1268+ deliver_recv_msg(kcs_info, msg);
1269+ break;
1270+
1271+ case KCS_GETTING_FLAGS:
1272+ {
1273+ unsigned char msg[4];
1274+ unsigned int len;
1275+
1276+ /* We got the flags from the KCS, now handle them. */
1277+ len = kcs_get_result(kcs_info->kcs_sm, msg, 4);
1278+ if (msg[2] != 0) {
1279+ /* Error fetching flags, just give up for
1280+ now. */
1281+ kcs_info->kcs_state = KCS_NORMAL;
1282+ } else if (len < 3) {
1283+ /* Hmm, no flags. That's technically illegal, but
1284+ don't use uninitialized data. */
1285+ kcs_info->kcs_state = KCS_NORMAL;
1286+ } else {
1287+ kcs_info->msg_flags = msg[3];
1288+ handle_flags(kcs_info);
1289+ }
1290+ break;
1291+ }
1292+
1293+ case KCS_CLEARING_FLAGS:
1294+ case KCS_CLEARING_FLAGS_THEN_SET_IRQ:
1295+ {
1296+ unsigned char msg[3];
1297+
1298+ /* We cleared the flags. */
1299+ kcs_get_result(kcs_info->kcs_sm, msg, 3);
1300+ if (msg[2] != 0) {
1301+ /* Error clearing flags */
1302+ printk(KERN_WARNING
1303+ "ipmi_kcs: Error clearing flags: %2.2x\n",
1304+ msg[2]);
1305+ }
1306+ if (kcs_info->kcs_state == KCS_CLEARING_FLAGS_THEN_SET_IRQ)
1307+ start_enable_irq(kcs_info);
1308+ else
1309+ kcs_info->kcs_state = KCS_NORMAL;
1310+ break;
1311+ }
1312+
1313+ case KCS_GETTING_EVENTS:
1314+ {
1315+ kcs_info->curr_msg->rsp_size
1316+ = kcs_get_result(kcs_info->kcs_sm,
1317+ kcs_info->curr_msg->rsp,
1318+ IPMI_MAX_MSG_LENGTH);
1319+
1320+ /* Do this here becase deliver_recv_msg() releases the
1321+ lock, and a new message can be put in during the
1322+ time the lock is released. */
1323+ msg = kcs_info->curr_msg;
1324+ kcs_info->curr_msg = NULL;
1325+ if (msg->rsp[2] != 0) {
1326+ /* Error getting event, probably done. */
1327+ msg->done(msg);
1328+
1329+ /* Take off the event flag. */
1330+ kcs_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
1331+ } else {
1332+ deliver_recv_msg(kcs_info, msg);
1333+ }
1334+ handle_flags(kcs_info);
1335+ break;
1336+ }
1337+
1338+ case KCS_GETTING_MESSAGES:
1339+ {
1340+ kcs_info->curr_msg->rsp_size
1341+ = kcs_get_result(kcs_info->kcs_sm,
1342+ kcs_info->curr_msg->rsp,
1343+ IPMI_MAX_MSG_LENGTH);
1344+
1345+ /* Do this here becase deliver_recv_msg() releases the
1346+ lock, and a new message can be put in during the
1347+ time the lock is released. */
1348+ msg = kcs_info->curr_msg;
1349+ kcs_info->curr_msg = NULL;
1350+ if (msg->rsp[2] != 0) {
1351+ /* Error getting event, probably done. */
1352+ msg->done(msg);
1353+
1354+ /* Take off the msg flag. */
1355+ kcs_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
1356+ } else {
1357+ deliver_recv_msg(kcs_info, msg);
1358+ }
1359+ handle_flags(kcs_info);
1360+ break;
1361+ }
1362+
1363+ case KCS_ENABLE_INTERRUPTS1:
1364+ {
1365+ unsigned char msg[4];
1366+
1367+ /* We got the flags from the KCS, now handle them. */
1368+ kcs_get_result(kcs_info->kcs_sm, msg, 4);
1369+ if (msg[2] != 0) {
1370+ printk(KERN_WARNING
1371+ "ipmi_kcs: Could not enable interrupts"
1372+ ", failed get, using polled mode.\n");
1373+ kcs_info->kcs_state = KCS_NORMAL;
1374+ } else {
1375+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
1376+ msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
1377+ msg[2] = msg[3] | 1; /* enable msg queue int */
1378+ start_kcs_transaction(kcs_info->kcs_sm, msg,3);
1379+ kcs_info->kcs_state = KCS_ENABLE_INTERRUPTS2;
1380+ }
1381+ break;
1382+ }
1383+
1384+ case KCS_ENABLE_INTERRUPTS2:
1385+ {
1386+ unsigned char msg[4];
1387+
1388+ /* We got the flags from the KCS, now handle them. */
1389+ kcs_get_result(kcs_info->kcs_sm, msg, 4);
1390+ if (msg[2] != 0) {
1391+ printk(KERN_WARNING
1392+ "ipmi_kcs: Could not enable interrupts"
1393+ ", failed set, using polled mode.\n");
1394+ }
1395+ kcs_info->kcs_state = KCS_NORMAL;
1396+ break;
1397+ }
1398+ }
1399+}
1400+
1401+/* Called on timeouts and events. Timeouts should pass the elapsed
1402+ time, interrupts should pass in zero. */
1403+static enum kcs_result kcs_event_handler(struct kcs_info *kcs_info, int time)
1404+{
1405+ enum kcs_result kcs_result;
1406+
1407+ restart:
1408+ /* There used to be a loop here that waited a little while
1409+ (around 25us) before giving up. That turned out to be
1410+ pointless, the minimum delays I was seeing were in the 300us
1411+ range, which is far too long to wait in an interrupt. So
1412+ we just run until the state machine tells us something
1413+ happened or it needs a delay. */
1414+ kcs_result = kcs_event(kcs_info->kcs_sm, time);
1415+ time = 0;
1416+ while (kcs_result == KCS_CALL_WITHOUT_DELAY)
1417+ {
1418+ kcs_result = kcs_event(kcs_info->kcs_sm, 0);
1419+ }
1420+
1421+ if (kcs_result == KCS_TRANSACTION_COMPLETE)
1422+ {
1423+ handle_transaction_done(kcs_info);
1424+ kcs_result = kcs_event(kcs_info->kcs_sm, 0);
1425+ }
1426+ else if (kcs_result == KCS_SM_HOSED)
1427+ {
1428+ if (kcs_info->curr_msg != NULL) {
1429+ /* If we were handling a user message, format
1430+ a response to send to the upper layer to
1431+ tell it about the error. */
1432+ return_hosed_msg(kcs_info);
1433+ }
1434+ kcs_result = kcs_event(kcs_info->kcs_sm, 0);
1435+ kcs_info->kcs_state = KCS_NORMAL;
1436+ }
1437+
1438+ /* We prefer handling attn over new messages. */
1439+ if (kcs_result == KCS_ATTN)
1440+ {
1441+ unsigned char msg[2];
1442+
1443+ /* Got a attn, send down a get message flags to see
1444+ what's causing it. It would be better to handle
1445+ this in the upper layer, but due to the way
1446+ interrupts work with the KCS, that's not really
1447+ possible. */
1448+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
1449+ msg[1] = IPMI_GET_MSG_FLAGS_CMD;
1450+
1451+ start_kcs_transaction(kcs_info->kcs_sm, msg, 2);
1452+ kcs_info->kcs_state = KCS_GETTING_FLAGS;
1453+ goto restart;
1454+ }
1455+
1456+ /* If we are currently idle, try to start the next message. */
1457+ if (kcs_result == KCS_SM_IDLE) {
1458+ kcs_result = start_next_msg(kcs_info);
1459+ if (kcs_result != KCS_SM_IDLE)
1460+ goto restart;
1461+ }
1462+
1463+ if ((kcs_result == KCS_SM_IDLE)
1464+ && (atomic_read(&kcs_info->req_events)))
1465+ {
1466+ /* We are idle and the upper layer requested that I fetch
1467+ events, so do so. */
1468+ unsigned char msg[2];
1469+
1470+ atomic_set(&kcs_info->req_events, 0);
1471+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
1472+ msg[1] = IPMI_GET_MSG_FLAGS_CMD;
1473+
1474+ start_kcs_transaction(kcs_info->kcs_sm, msg, 2);
1475+ kcs_info->kcs_state = KCS_GETTING_FLAGS;
1476+ goto restart;
1477+ }
1478+
1479+ return kcs_result;
1480+}
1481+
1482+static void sender(void *send_info,
1483+ struct ipmi_smi_msg *msg,
1484+ int priority)
1485+{
1486+ struct kcs_info *kcs_info = (struct kcs_info *) send_info;
1487+ enum kcs_result result;
1488+ unsigned long flags;
1489+#ifdef DEBUG_TIMING
1490+ struct timeval t;
1491+#endif
1492+
1493+ spin_lock_irqsave(&(kcs_info->msg_lock), flags);
1494+#ifdef DEBUG_TIMING
1495+ do_gettimeofday(&t);
1496+ printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1497+#endif
1498+
1499+ if (kcs_info->run_to_completion) {
1500+ /* If we are running to completion, then throw it in
1501+ the list and run transactions until everything is
1502+ clear. Priority doesn't matter here. */
1503+ list_add_tail(&(msg->link), &(kcs_info->xmit_msgs));
1504+
1505+ /* We have to release the msg lock and claim the kcs
1506+ lock in this case, because of race conditions. */
1507+ spin_unlock_irqrestore(&(kcs_info->msg_lock), flags);
1508+
1509+ spin_lock_irqsave(&(kcs_info->kcs_lock), flags);
1510+ result = kcs_event_handler(kcs_info, 0);
1511+ while (result != KCS_SM_IDLE) {
1512+ udelay(500);
1513+ result = kcs_event_handler(kcs_info, 500);
1514+ }
1515+ spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags);
1516+ return;
1517+ } else {
1518+ if (priority > 0) {
1519+ list_add_tail(&(msg->link), &(kcs_info->hp_xmit_msgs));
1520+ } else {
1521+ list_add_tail(&(msg->link), &(kcs_info->xmit_msgs));
1522+ }
1523+ }
1524+ spin_unlock_irqrestore(&(kcs_info->msg_lock), flags);
1525+
1526+ spin_lock_irqsave(&(kcs_info->kcs_lock), flags);
1527+ if ((kcs_info->kcs_state == KCS_NORMAL)
1528+ && (kcs_info->curr_msg == NULL))
1529+ {
1530+ start_next_msg(kcs_info);
1531+ }
1532+ spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags);
1533+}
1534+
1535+static void set_run_to_completion(void *send_info, int i_run_to_completion)
1536+{
1537+ struct kcs_info *kcs_info = (struct kcs_info *) send_info;
1538+ enum kcs_result result;
1539+ unsigned long flags;
1540+
1541+ spin_lock_irqsave(&(kcs_info->kcs_lock), flags);
1542+
1543+ kcs_info->run_to_completion = i_run_to_completion;
1544+ if (i_run_to_completion) {
1545+ result = kcs_event_handler(kcs_info, 0);
1546+ while (result != KCS_SM_IDLE) {
1547+ udelay(500);
1548+ result = kcs_event_handler(kcs_info, 500);
1549+ }
1550+ }
1551+
1552+ spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags);
1553+}
1554+
1555+static void request_events(void *send_info)
1556+{
1557+ struct kcs_info *kcs_info = (struct kcs_info *) send_info;
1558+
1559+ atomic_set(&kcs_info->req_events, 1);
1560+}
1561+
1562+static int new_user(void *send_info)
1563+{
1564+ if (!try_inc_mod_count(THIS_MODULE))
1565+ return -EBUSY;
1566+ return 0;
1567+}
1568+
1569+static void user_left(void *send_info)
1570+{
1571+ MOD_DEC_USE_COUNT;
1572+}
1573+
1574+/* Call every 10 ms. */
1575+#define KCS_TIMEOUT_TIME_USEC 10000
1576+#define KCS_USEC_PER_JIFFY (1000000/HZ)
1577+#define KCS_TIMEOUT_JIFFIES (KCS_TIMEOUT_TIME_USEC/KCS_USEC_PER_JIFFY)
1578+#define KCS_SHORT_TIMEOUT_USEC 500 /* .5ms when the SM request a
1579+ short timeout */
1580+static int initialized = 0;
1581+
1582+static void kcs_timeout(unsigned long data)
1583+{
1584+ struct kcs_info *kcs_info = (struct kcs_info *) data;
1585+ enum kcs_result kcs_result;
1586+ unsigned long flags;
1587+ unsigned long jiffies_now;
1588+ unsigned long time_diff;
1589+#ifdef DEBUG_TIMING
1590+ struct timeval t;
1591+#endif
1592+
1593+ if (kcs_info->stop_operation) {
1594+ kcs_info->timer_stopped = 1;
1595+ return;
1596+ }
1597+
1598+ spin_lock_irqsave(&(kcs_info->kcs_lock), flags);
1599+#ifdef DEBUG_TIMING
1600+ do_gettimeofday(&t);
1601+ printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1602+#endif
1603+ jiffies_now = jiffies;
1604+ time_diff = ((jiffies_now - kcs_info->last_timeout_jiffies)
1605+ * KCS_USEC_PER_JIFFY);
1606+ kcs_result = kcs_event_handler(kcs_info, time_diff);
1607+
1608+ spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags);
1609+
1610+ kcs_info->last_timeout_jiffies = jiffies_now;
1611+
1612+ if ((kcs_info->irq) && (! kcs_info->interrupt_disabled)) {
1613+ /* Running with interrupts, only do long timeouts. */
1614+ kcs_info->kcs_timer.expires = jiffies + KCS_TIMEOUT_JIFFIES;
1615+ goto do_add_timer;
1616+ }
1617+
1618+ /* If the state machine asks for a short delay, then shorten
1619+ the timer timeout. */
1620+#ifdef CONFIG_HIGH_RES_TIMERS
1621+ if (kcs_result == KCS_CALL_WITH_DELAY) {
1622+ kcs_info->kcs_timer.sub_expires
1623+ += usec_to_arch_cycles(KCS_SHORT_TIMEOUT_USEC);
1624+ while (kcs_info->kcs_timer.sub_expires >= cycles_per_jiffies) {
1625+ kcs_info->kcs_timer.expires++;
1626+ kcs_info->kcs_timer.sub_expires -= cycles_per_jiffies;
1627+ }
1628+ } else {
1629+ kcs_info->kcs_timer.expires = jiffies + KCS_TIMEOUT_JIFFIES;
1630+ }
1631+#else
1632+ /* If requested, take the shortest delay possible */
1633+ if (kcs_result == KCS_CALL_WITH_DELAY) {
1634+ kcs_info->kcs_timer.expires = jiffies + 1;
1635+ } else {
1636+ kcs_info->kcs_timer.expires = jiffies + KCS_TIMEOUT_JIFFIES;
1637+ }
1638+#endif
1639+
1640+ do_add_timer:
1641+ add_timer(&(kcs_info->kcs_timer));
1642+}
1643+
1644+static void kcs_irq_handler(int irq, void *data, struct pt_regs *regs)
1645+{
1646+ struct kcs_info *kcs_info = (struct kcs_info *) data;
1647+ unsigned long flags;
1648+#ifdef DEBUG_TIMING
1649+ struct timeval t;
1650+#endif
1651+
1652+ spin_lock_irqsave(&(kcs_info->kcs_lock), flags);
1653+ if (kcs_info->stop_operation)
1654+ goto out;
1655+
1656+#ifdef DEBUG_TIMING
1657+ do_gettimeofday(&t);
1658+ printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1659+#endif
1660+ kcs_event_handler(kcs_info, 0);
1661+ out:
1662+ spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags);
1663+}
1664+
1665+static struct ipmi_smi_handlers handlers =
1666+{
1667+ sender: sender,
1668+ request_events: request_events,
1669+ new_user: new_user,
1670+ user_left: user_left,
1671+ set_run_to_completion: set_run_to_completion
1672+};
1673+
1674+static unsigned char ipmi_kcs_dev_rev;
1675+static unsigned char ipmi_kcs_fw_rev_major;
1676+static unsigned char ipmi_kcs_fw_rev_minor;
1677+static unsigned char ipmi_version_major;
1678+static unsigned char ipmi_version_minor;
1679+
1680+extern int kcs_dbg;
1681+static int ipmi_kcs_detect_hardware(unsigned int port,
1682+ unsigned char *addr,
1683+ struct kcs_data *data)
1684+{
1685+ unsigned char msg[2];
1686+ unsigned char resp[IPMI_MAX_MSG_LENGTH];
1687+ unsigned long resp_len;
1688+ enum kcs_result kcs_result;
1689+
1690+ /* It's impossible for the KCS status register to be all 1's,
1691+ (assuming a properly functioning, self-initialized BMC)
1692+ but that's what you get from reading a bogus address, so we
1693+ test that first. */
1694+
1695+ if (port) {
1696+ if (inb(port+1) == 0xff) return -ENODEV;
1697+ } else {
1698+ if (readb(addr+1) == 0xff) return -ENODEV;
1699+ }
1700+
1701+ /* Do a Get Device ID command, since it comes back with some
1702+ useful info. */
1703+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1704+ msg[1] = IPMI_GET_DEVICE_ID_CMD;
1705+ start_kcs_transaction(data, msg, 2);
1706+
1707+ kcs_result = kcs_event(data, 0);
1708+ for (;;)
1709+ {
1710+ if (kcs_result == KCS_CALL_WITH_DELAY) {
1711+ udelay(100);
1712+ kcs_result = kcs_event(data, 100);
1713+ }
1714+ else if (kcs_result == KCS_CALL_WITHOUT_DELAY)
1715+ {
1716+ kcs_result = kcs_event(data, 0);
1717+ }
1718+ else
1719+ break;
1720+ }
1721+ if (kcs_result == KCS_SM_HOSED) {
1722+ /* We couldn't get the state machine to run, so whatever's at
1723+ the port is probably not an IPMI KCS interface. */
1724+ return -ENODEV;
1725+ }
1726+ /* Otherwise, we got some data. */
1727+ resp_len = kcs_get_result(data, resp, IPMI_MAX_MSG_LENGTH);
1728+ if (resp_len < 6)
1729+ /* That's odd, it should be longer. */
1730+ return -EINVAL;
1731+
1732+ if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0))
1733+ /* That's odd, it shouldn't be able to fail. */
1734+ return -EINVAL;
1735+
1736+ ipmi_kcs_dev_rev = resp[4] & 0xf;
1737+ ipmi_kcs_fw_rev_major = resp[5] & 0x7f;
1738+ ipmi_kcs_fw_rev_minor = resp[6];
1739+ ipmi_version_major = resp[7] & 0xf;
1740+ ipmi_version_minor = resp[7] >> 4;
1741+
1742+ return 0;
1743+}
1744+
1745+/* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
1746+ a default IO port, and 1 ACPI/SPMI address. That sets KCS_MAX_DRIVERS */
1747+
1748+#define KCS_MAX_PARMS 4
1749+#define KCS_MAX_DRIVERS ((KCS_MAX_PARMS * 2) + 2)
1750+static struct kcs_info *kcs_infos[KCS_MAX_DRIVERS] =
1751+{ NULL, NULL, NULL, NULL };
1752+
1753+#define DEVICE_NAME "ipmi_kcs"
1754+
1755+#define DEFAULT_IO_PORT 0xca2
1756+
1757+static int kcs_trydefaults = 1;
1758+static unsigned long kcs_addrs[KCS_MAX_PARMS] = { 0, 0, 0, 0 };
1759+static int kcs_ports[KCS_MAX_PARMS] = { 0, 0, 0, 0 };
1760+static int kcs_irqs[KCS_MAX_PARMS] = { 0, 0, 0, 0 };
1761+
1762+MODULE_PARM(kcs_trydefaults, "i");
1763+MODULE_PARM(kcs_addrs, "1-4l");
1764+MODULE_PARM(kcs_irqs, "1-4i");
1765+MODULE_PARM(kcs_ports, "1-4i");
1766+
1767+/* Returns 0 if initialized, or negative on an error. */
1768+static int init_one_kcs(int kcs_port,
1769+ int irq,
1770+ unsigned long kcs_physaddr,
1771+ struct kcs_info **kcs)
1772+{
1773+ int rv;
1774+ struct kcs_info *new_kcs;
1775+
1776+ /* Did anything get passed in at all? Both == zero disables the
1777+ driver. */
1778+
1779+ if (!(kcs_port || kcs_physaddr))
1780+ return -ENODEV;
1781+
1782+ /* Only initialize a port OR a physical address on this call.
1783+ Also, IRQs can go with either ports or addresses. */
1784+
1785+ if (kcs_port && kcs_physaddr)
1786+ return -EINVAL;
1787+
1788+ new_kcs = kmalloc(kcs_size(), GFP_KERNEL);
1789+ if (!new_kcs) {
1790+ printk(KERN_ERR "ipmi_kcs: out of memory\n");
1791+ return -ENOMEM;
1792+ }
1793+
1794+ /* So we know not to free it unless we have allocated one. */
1795+ new_kcs->kcs_sm = NULL;
1796+
1797+ new_kcs->addr = NULL;
1798+ new_kcs->physaddr = kcs_physaddr;
1799+ new_kcs->port = kcs_port;
1800+
1801+ if (kcs_port) {
1802+ if (request_region(kcs_port, 2, DEVICE_NAME) == NULL) {
1803+ kfree(new_kcs);
1804+ printk(KERN_ERR
1805+ "ipmi_kcs: can't reserve port @ 0x%4.4x\n",
1806+ kcs_port);
1807+ return -EIO;
1808+ }
1809+ } else {
1810+ if (request_mem_region(kcs_physaddr, 2, DEVICE_NAME) == NULL) {
1811+ kfree(new_kcs);
1812+ printk(KERN_ERR
1813+ "ipmi_kcs: can't reserve memory @ 0x%lx\n",
1814+ kcs_physaddr);
1815+ return -EIO;
1816+ }
1817+ if ((new_kcs->addr = ioremap(kcs_physaddr, 2)) == NULL) {
1818+ kfree(new_kcs);
1819+ printk(KERN_ERR
1820+ "ipmi_kcs: can't remap memory at 0x%lx\n",
1821+ kcs_physaddr);
1822+ return -EIO;
1823+ }
1824+ }
1825+
1826+ new_kcs->kcs_sm = kmalloc(kcs_size(), GFP_KERNEL);
1827+ if (!new_kcs->kcs_sm) {
1828+ printk(KERN_ERR "ipmi_kcs: out of memory\n");
1829+ rv = -ENOMEM;
1830+ goto out_err;
1831+ }
1832+ init_kcs_data(new_kcs->kcs_sm, kcs_port, new_kcs->addr);
1833+ spin_lock_init(&(new_kcs->kcs_lock));
1834+ spin_lock_init(&(new_kcs->msg_lock));
1835+
1836+ rv = ipmi_kcs_detect_hardware(kcs_port, new_kcs->addr, new_kcs->kcs_sm);
1837+ if (rv) {
1838+ if (kcs_port)
1839+ printk(KERN_ERR
1840+ "ipmi_kcs: No KCS @ port 0x%4.4x\n",
1841+ kcs_port);
1842+ else
1843+ printk(KERN_ERR
1844+ "ipmi_kcs: No KCS @ addr 0x%lx\n",
1845+ kcs_physaddr);
1846+ goto out_err;
1847+ }
1848+
1849+ if (irq != 0) {
1850+ rv = request_irq(irq,
1851+ kcs_irq_handler,
1852+ SA_INTERRUPT,
1853+ DEVICE_NAME,
1854+ new_kcs);
1855+ if (rv) {
1856+ printk(KERN_WARNING
1857+ "ipmi_kcs: %s unable to claim interrupt %d,"
1858+ " running polled\n",
1859+ DEVICE_NAME, irq);
1860+ irq = 0;
1861+ }
1862+ }
1863+ new_kcs->irq = irq;
1864+
1865+ INIT_LIST_HEAD(&(new_kcs->xmit_msgs));
1866+ INIT_LIST_HEAD(&(new_kcs->hp_xmit_msgs));
1867+ new_kcs->curr_msg = NULL;
1868+ atomic_set(&new_kcs->req_events, 0);
1869+ new_kcs->run_to_completion = 0;
1870+
1871+ start_clear_flags(new_kcs);
1872+
1873+ if (irq) {
1874+ new_kcs->kcs_state = KCS_CLEARING_FLAGS_THEN_SET_IRQ;
1875+
1876+ printk(KERN_INFO
1877+ "ipmi_kcs: Acquiring BMC @ port=0x%x irq=%d\n",
1878+ kcs_port, irq);
1879+
1880+ } else {
1881+ if (kcs_port)
1882+ printk(KERN_INFO
1883+ "ipmi_kcs: Acquiring BMC @ port=0x%x\n",
1884+ kcs_port);
1885+ else
1886+ printk(KERN_INFO
1887+ "ipmi_kcs: Acquiring BMC @ addr=0x%lx\n",
1888+ kcs_physaddr);
1889+ }
1890+
1891+ rv = ipmi_register_smi(&handlers,
1892+ new_kcs,
1893+ ipmi_version_major,
1894+ ipmi_version_minor,
1895+ &(new_kcs->intf));
1896+ if (rv) {
1897+ free_irq(irq, new_kcs);
1898+ printk(KERN_ERR
1899+ "ipmi_kcs: Unable to register device: error %d\n",
1900+ rv);
1901+ goto out_err;
1902+ }
1903+
1904+ new_kcs->interrupt_disabled = 0;
1905+ new_kcs->timer_stopped = 0;
1906+ new_kcs->stop_operation = 0;
1907+
1908+ init_timer(&(new_kcs->kcs_timer));
1909+ new_kcs->kcs_timer.data = (long) new_kcs;
1910+ new_kcs->kcs_timer.function = kcs_timeout;
1911+ new_kcs->last_timeout_jiffies = jiffies;
1912+ new_kcs->kcs_timer.expires = jiffies + KCS_TIMEOUT_JIFFIES;
1913+ add_timer(&(new_kcs->kcs_timer));
1914+
1915+ *kcs = new_kcs;
1916+
1917+ return 0;
1918+
1919+ out_err:
1920+ if (kcs_port)
1921+ release_region (kcs_port, 2);
1922+ if (new_kcs->addr)
1923+ iounmap(new_kcs->addr);
1924+ if (kcs_physaddr)
1925+ release_mem_region(kcs_physaddr, 2);
1926+ if (new_kcs->kcs_sm)
1927+ kfree(new_kcs->kcs_sm);
1928+ kfree(new_kcs);
1929+ return rv;
1930+}
1931+
1932+#ifdef CONFIG_ACPI
1933+
1934+/* Retrieve the base physical address from ACPI tables. Originally
1935+ from Hewlett-Packard simple bmc.c, a GPL KCS driver. */
1936+
1937+#include <linux/acpi.h>
1938+/* A real hack, but everything's not there yet in 2.4. */
1939+#define COMPILER_DEPENDENT_UINT64 unsigned long
1940+#include <../drivers/acpi/include/acpi.h>
1941+#include <../drivers/acpi/include/actypes.h>
1942+
1943+struct SPMITable {
1944+ s8 Signature[4];
1945+ u32 Length;
1946+ u8 Revision;
1947+ u8 Checksum;
1948+ s8 OEMID[6];
1949+ s8 OEMTableID[8];
1950+ s8 OEMRevision[4];
1951+ s8 CreatorID[4];
1952+ s8 CreatorRevision[4];
1953+ s16 InterfaceType;
1954+ s16 SpecificationRevision;
1955+ u8 InterruptType;
1956+ u8 GPE;
1957+ s16 Reserved;
1958+ u64 GlobalSystemInterrupt;
1959+ u8 BaseAddress[12];
1960+ u8 UID[4];
1961+} __attribute__ ((packed));
1962+
1963+static unsigned long acpi_find_bmc(void)
1964+{
1965+ acpi_status status;
1966+ acpi_table_header *spmi;
1967+ static unsigned long io_base = 0;
1968+
1969+ if (io_base != 0)
1970+ return io_base;
1971+
1972+ status = acpi_get_firmware_table("SPMI", 1,
1973+ ACPI_LOGICAL_ADDRESSING, &spmi);
1974+
1975+ if (status != AE_OK) {
1976+ printk(KERN_ERR "ipmi_kcs: SPMI table not found.\n");
1977+ return 0;
1978+ }
1979+
1980+ memcpy(&io_base, ((struct SPMITable *)spmi)->BaseAddress,
1981+ sizeof(io_base));
1982+
1983+ return io_base;
1984+}
1985+#endif
1986+
1987+static __init int init_ipmi_kcs(void)
1988+{
1989+ int rv = 0;
1990+ int pos = 0;
1991+ int i = 0;
1992+#ifdef CONFIG_ACPI
1993+ unsigned long physaddr = 0;
1994+#endif
1995+
1996+ if (initialized)
1997+ return 0;
1998+ initialized = 1;
1999+
2000+ /* First do the "command-line" parameters */
2001+
2002+ for (i=0; i < KCS_MAX_PARMS; i++) {
2003+ rv = init_one_kcs(kcs_ports[i],
2004+ kcs_irqs[i],
2005+ 0,
2006+ &(kcs_infos[pos]));
2007+ if (rv == 0)
2008+ pos++;
2009+
2010+ rv = init_one_kcs(0,
2011+ kcs_irqs[i],
2012+ kcs_addrs[i],
2013+ &(kcs_infos[pos]));
2014+ if (rv == 0)
2015+ pos++;
2016+ }
2017+
2018+ /* Only try the defaults if enabled and resources are available
2019+ (because they weren't already specified above). */
2020+
2021+ if (kcs_trydefaults) {
2022+#ifdef CONFIG_ACPI
2023+ if ((physaddr = acpi_find_bmc())) {
2024+ if (!check_mem_region(physaddr, 2)) {
2025+ rv = init_one_kcs(0,
2026+ 0,
2027+ physaddr,
2028+ &(kcs_infos[pos]));
2029+ if (rv == 0)
2030+ pos++;
2031+ }
2032+ }
2033+#endif
2034+ if (!check_region(DEFAULT_IO_PORT, 2)) {
2035+ rv = init_one_kcs(DEFAULT_IO_PORT,
2036+ 0,
2037+ 0,
2038+ &(kcs_infos[pos]));
2039+ if (rv == 0)
2040+ pos++;
2041+ }
2042+ }
2043+
2044+ if (kcs_infos[0] == NULL) {
2045+ printk("ipmi_kcs: Unable to find any KCS interfaces\n");
2046+ return -ENODEV;
2047+ }
2048+
2049+ return 0;
2050+}
2051+module_init(init_ipmi_kcs);
2052+
2053+#ifdef MODULE
2054+void __exit cleanup_one_kcs(struct kcs_info *to_clean)
2055+{
2056+ int rv;
2057+ unsigned long flags;
2058+
2059+ if (! to_clean)
2060+ return;
2061+
2062+ /* Tell the timer and interrupt handlers that we are shutting
2063+ down. */
2064+ spin_lock_irqsave(&(to_clean->kcs_lock), flags);
2065+ spin_lock(&(to_clean->msg_lock));
2066+
2067+ to_clean->stop_operation = 1;
2068+
2069+ if (to_clean->irq != 0)
2070+ free_irq(to_clean->irq, to_clean);
2071+ if (to_clean->port) {
2072+ printk(KERN_INFO
2073+ "ipmi_kcs: Releasing BMC @ port=0x%x\n",
2074+ to_clean->port);
2075+ release_region (to_clean->port, 2);
2076+ }
2077+ if (to_clean->addr) {
2078+ printk(KERN_INFO
2079+ "ipmi_kcs: Releasing BMC @ addr=0x%lx\n",
2080+ to_clean->physaddr);
2081+ iounmap(to_clean->addr);
2082+ release_mem_region(to_clean->physaddr, 2);
2083+ }
2084+
2085+ spin_unlock(&(to_clean->msg_lock));
2086+ spin_unlock_irqrestore(&(to_clean->kcs_lock), flags);
2087+
2088+ /* Wait for the timer to stop. This avoids problems with race
2089+ conditions removing the timer here. Hopefully this will be
2090+ long enough to avoid problems with interrupts still
2091+ running. */
2092+ schedule_timeout(2);
2093+ while (!to_clean->timer_stopped) {
2094+ schedule_timeout(1);
2095+ }
2096+
2097+ rv = ipmi_unregister_smi(to_clean->intf);
2098+ if (rv) {
2099+ printk(KERN_ERR
2100+ "ipmi_kcs: Unable to unregister device: errno=%d\n",
2101+ rv);
2102+ }
2103+
2104+ initialized = 0;
2105+
2106+ kfree(to_clean->kcs_sm);
2107+ kfree(to_clean);
2108+}
2109+
2110+static __exit void cleanup_ipmi_kcs(void)
2111+{
2112+ int i;
2113+
2114+ if (!initialized)
2115+ return;
2116+
2117+ for (i=0; i<KCS_MAX_DRIVERS; i++) {
2118+ cleanup_one_kcs(kcs_infos[i]);
2119+ }
2120+}
2121+module_exit(cleanup_ipmi_kcs);
2122+#else
2123+
2124+/* Unfortunately, cmdline::get_options() only returns integers, not
2125+ longs. Since we need ulongs (64-bit physical addresses) parse the
2126+ comma-separated list manually. Arguments can be one of these forms:
2127+ m0xaabbccddeeff A physical memory address without an IRQ
2128+ m0xaabbccddeeff:cc A physical memory address with an IRQ
2129+ p0xaabb An IO port without an IRQ
2130+ p0xaabb:cc An IO port with an IRQ
2131+ nodefaults Suppress trying the default IO port or ACPI address
2132+
2133+ For example, to pass one IO port with an IRQ, one address, and
2134+ suppress the use of the default IO port and ACPI address,
2135+ use this option string: ipmi_kcs=p0xCA2:5,m0xFF5B0022,nodefaults
2136+
2137+ Remember, ipmi_kcs_setup() is passed the string after the equal sign. */
2138+
2139+static int __init ipmi_kcs_setup(char *str)
2140+{
2141+ unsigned long val;
2142+ char *cur, *colon;
2143+ int pos;
2144+
2145+ pos = 0;
2146+
2147+ cur = strsep(&str, ",");
2148+ while ((cur) && (*cur) && (pos < KCS_MAX_PARMS)) {
2149+ switch (*cur) {
2150+ case 'n':
2151+ if (strcmp(cur, "nodefaults") == 0)
2152+ kcs_trydefaults = 0;
2153+ else
2154+ printk(KERN_INFO
2155+ "ipmi_kcs: bad parameter value %s\n",
2156+ cur);
2157+ break;
2158+
2159+ case 'm':
2160+ case 'p':
2161+ val = simple_strtoul(cur + 1,
2162+ &colon,
2163+ 0);
2164+ if (*cur == 'p')
2165+ kcs_ports[pos] = val;
2166+ else
2167+ kcs_addrs[pos] = val;
2168+ if (*colon == ':') {
2169+ val = simple_strtoul(colon + 1,
2170+ &colon,
2171+ 0);
2172+ kcs_irqs[pos] = val;
2173+ }
2174+ pos++;
2175+ break;
2176+
2177+ default:
2178+ printk(KERN_INFO
2179+ "ipmi_kcs: bad parameter value %s\n",
2180+ cur);
2181+ }
2182+ cur = strsep(&str, ",");
2183+ }
2184+
2185+ return 1;
2186+}
2187+__setup("ipmi_kcs=", ipmi_kcs_setup);
2188+#endif
2189+
2190+MODULE_LICENSE("GPL");
2191diff -urNp linux-5010/drivers/char/ipmi/ipmi_kcs_sm.c linux-5020/drivers/char/ipmi/ipmi_kcs_sm.c
2192--- linux-5010/drivers/char/ipmi/ipmi_kcs_sm.c 1970-01-01 01:00:00.000000000 +0100
2193+++ linux-5020/drivers/char/ipmi/ipmi_kcs_sm.c
2194@@ -0,0 +1,474 @@
2195+/*
2196+ * ipmi_kcs_sm.c
2197+ *
2198+ * State machine for handling IPMI KCS interfaces.
2199+ *
2200+ * Author: MontaVista Software, Inc.
2201+ * Corey Minyard <minyard@mvista.com>
2202+ * source@mvista.com
2203+ *
2204+ * Copyright 2002 MontaVista Software Inc.
2205+ *
2206+ * This program is free software; you can redistribute it and/or modify it
2207+ * under the terms of the GNU General Public License as published by the
2208+ * Free Software Foundation; either version 2 of the License, or (at your
2209+ * option) any later version.
2210+ *
2211+ *
2212+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
2213+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
2214+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
2215+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
2216+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
2217+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
2218+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
2219+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
2220+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
2221+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2222+ *
2223+ * You should have received a copy of the GNU General Public License along
2224+ * with this program; if not, write to the Free Software Foundation, Inc.,
2225+ * 675 Mass Ave, Cambridge, MA 02139, USA.
2226+ */
2227+
2228+/*
2229+ * This state machine is taken from the state machine in the IPMI spec,
2230+ * pretty much verbatim. If you have questions about the states, see
2231+ * that document.
2232+ */
2233+
2234+#include <asm/io.h>
2235+#include <asm/string.h> /* Gets rid of memcpy warning */
2236+
2237+#include "ipmi_kcs_sm.h"
2238+
2239+/* Set this if you want a printout of why the state machine was hosed
2240+ when it gets hosed. */
2241+#define DEBUG_HOSED_REASON
2242+
2243+/* Print the state machine state on entry every time. */
2244+#undef DEBUG_STATE
2245+
2246+/* The states the KCS driver may be in. */
2247+enum kcs_states {
2248+ KCS_IDLE, /* The KCS interface is currently
2249+ doing nothing. */
2250+ KCS_START_OP, /* We are starting an operation. The
2251+ data is in the output buffer, but
2252+ nothing has been done to the
2253+ interface yet. This was added to
2254+ the state machine in the spec to
2255+ wait for the initial IBF. */
2256+ KCS_WAIT_WRITE_START, /* We have written a write cmd to the
2257+ interface. */
2258+ KCS_WAIT_WRITE, /* We are writing bytes to the
2259+ interface. */
2260+ KCS_WAIT_WRITE_END, /* We have written the write end cmd
2261+ to the interface, and still need to
2262+ write the last byte. */
2263+ KCS_WAIT_READ, /* We are waiting to read data from
2264+ the interface. */
2265+ KCS_ERROR0, /* State to transition to the error
2266+ handler, this was added to the
2267+ state machine in the spec to be
2268+ sure IBF was there. */
2269+ KCS_ERROR1, /* First stage error handler, wait for
2270+ the interface to respond. */
2271+ KCS_ERROR2, /* The abort cmd has been written,
2272+ wait for the interface to
2273+ respond. */
2274+ KCS_ERROR3, /* We wrote some data to the
2275+ interface, wait for it to switch to
2276+ read mode. */
2277+ KCS_HOSED /* The hardware failed to follow the
2278+ state machine. */
2279+};
2280+
2281+#define MAX_KCS_READ_SIZE 80
2282+#define MAX_KCS_WRITE_SIZE 80
2283+
2284+/* Timeouts in microseconds. */
2285+#define IBF_RETRY_TIMEOUT 1000000
2286+#define OBF_RETRY_TIMEOUT 1000000
2287+#define MAX_ERROR_RETRIES 10
2288+
2289+#define IPMI_ERR_MSG_TRUNCATED 0xc6
2290+#define IPMI_ERR_UNSPECIFIED 0xff
2291+
2292+struct kcs_data
2293+{
2294+ enum kcs_states state;
2295+ unsigned int port;
2296+ unsigned char *addr;
2297+ unsigned char write_data[MAX_KCS_WRITE_SIZE];
2298+ int write_pos;
2299+ int write_count;
2300+ int orig_write_count;
2301+ unsigned char read_data[MAX_KCS_READ_SIZE];
2302+ int read_pos;
2303+ int truncated;
2304+
2305+ unsigned int error_retries;
2306+ long ibf_timeout;
2307+ long obf_timeout;
2308+};
2309+
2310+void init_kcs_data(struct kcs_data *kcs, unsigned int port, unsigned char *addr)
2311+{
2312+ kcs->state = KCS_IDLE;
2313+ kcs->port = port;
2314+ kcs->addr = addr;
2315+ kcs->write_pos = 0;
2316+ kcs->write_count = 0;
2317+ kcs->orig_write_count = 0;
2318+ kcs->read_pos = 0;
2319+ kcs->error_retries = 0;
2320+ kcs->truncated = 0;
2321+ kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
2322+ kcs->obf_timeout = OBF_RETRY_TIMEOUT;
2323+}
2324+
2325+/* Remember, init_one_kcs() insured port and addr can't both be set */
2326+
2327+static inline unsigned char read_status(struct kcs_data *kcs)
2328+{
2329+ if (kcs->port)
2330+ return inb(kcs->port + 1);
2331+ else
2332+ return readb(kcs->addr + 1);
2333+}
2334+
2335+static inline unsigned char read_data(struct kcs_data *kcs)
2336+{
2337+ if (kcs->port)
2338+ return inb(kcs->port + 0);
2339+ else
2340+ return readb(kcs->addr + 0);
2341+}
2342+
2343+static inline void write_cmd(struct kcs_data *kcs, unsigned char data)
2344+{
2345+ if (kcs->port)
2346+ outb(data, kcs->port + 1);
2347+ else
2348+ writeb(data, kcs->addr + 1);
2349+}
2350+
2351+static inline void write_data(struct kcs_data *kcs, unsigned char data)
2352+{
2353+ if (kcs->port)
2354+ outb(data, kcs->port + 0);
2355+ else
2356+ writeb(data, kcs->addr + 0);
2357+}
2358+
2359+/* Control codes. */
2360+#define KCS_GET_STATUS_ABORT 0x60
2361+#define KCS_WRITE_START 0x61
2362+#define KCS_WRITE_END 0x62
2363+#define KCS_READ_BYTE 0x68
2364+
2365+/* Status bits. */
2366+#define GET_STATUS_STATE(status) (((status) >> 6) & 0x03)
2367+#define KCS_IDLE_STATE 0
2368+#define KCS_READ_STATE 1
2369+#define KCS_WRITE_STATE 2
2370+#define KCS_ERROR_STATE 3
2371+#define GET_STATUS_ATN(status) ((status) & 0x04)
2372+#define GET_STATUS_IBF(status) ((status) & 0x02)
2373+#define GET_STATUS_OBF(status) ((status) & 0x01)
2374+
2375+
2376+static inline void write_next_byte(struct kcs_data *kcs)
2377+{
2378+ write_data(kcs, kcs->write_data[kcs->write_pos]);
2379+ (kcs->write_pos)++;
2380+ (kcs->write_count)--;
2381+}
2382+
2383+static inline void start_error_recovery(struct kcs_data *kcs, char *reason)
2384+{
2385+ (kcs->error_retries)++;
2386+ if (kcs->error_retries > MAX_ERROR_RETRIES) {
2387+#ifdef DEBUG_HOSED_REASON
2388+ printk("ipmi_kcs_sm: kcs hosed: %s\n", reason);
2389+#endif
2390+ kcs->state = KCS_HOSED;
2391+ } else {
2392+ kcs->state = KCS_ERROR0;
2393+ }
2394+}
2395+
2396+static inline void read_next_byte(struct kcs_data *kcs)
2397+{
2398+ if (kcs->read_pos >= MAX_KCS_READ_SIZE) {
2399+ /* Throw the data away and mark it truncated. */
2400+ read_data(kcs);
2401+ kcs->truncated = 1;
2402+ } else {
2403+ kcs->read_data[kcs->read_pos] = read_data(kcs);
2404+ (kcs->read_pos)++;
2405+ }
2406+ write_data(kcs, KCS_READ_BYTE);
2407+}
2408+
2409+static inline int check_ibf(struct kcs_data *kcs,
2410+ unsigned char status,
2411+ long time)
2412+{
2413+ if (GET_STATUS_IBF(status)) {
2414+ kcs->ibf_timeout -= time;
2415+ if (kcs->ibf_timeout < 0) {
2416+ start_error_recovery(kcs, "IBF not ready in time");
2417+ return 1;
2418+ }
2419+ return 0;
2420+ }
2421+ kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
2422+ return 1;
2423+}
2424+
2425+static inline int check_obf(struct kcs_data *kcs,
2426+ unsigned char status,
2427+ long time)
2428+{
2429+ if (! GET_STATUS_OBF(status)) {
2430+ kcs->obf_timeout -= time;
2431+ if (kcs->obf_timeout < 0) {
2432+ start_error_recovery(kcs, "OBF not ready in time");
2433+ return 1;
2434+ }
2435+ return 0;
2436+ }
2437+ kcs->obf_timeout = OBF_RETRY_TIMEOUT;
2438+ return 1;
2439+}
2440+
2441+static void clear_obf(struct kcs_data *kcs, unsigned char status)
2442+{
2443+ if (GET_STATUS_OBF(status))
2444+ read_data(kcs);
2445+}
2446+
2447+static void restart_kcs_transaction(struct kcs_data *kcs)
2448+{
2449+ kcs->write_count = kcs->orig_write_count;
2450+ kcs->write_pos = 0;
2451+ kcs->read_pos = 0;
2452+ kcs->state = KCS_WAIT_WRITE_START;
2453+ kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
2454+ kcs->obf_timeout = OBF_RETRY_TIMEOUT;
2455+ write_cmd(kcs, KCS_WRITE_START);
2456+}
2457+
2458+int start_kcs_transaction(struct kcs_data *kcs, char *data, unsigned int size)
2459+{
2460+ if ((size < 2) || (size > MAX_KCS_WRITE_SIZE)) {
2461+ return -1;
2462+ }
2463+
2464+ if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) {
2465+ return -2;
2466+ }
2467+
2468+ kcs->error_retries = 0;
2469+ memcpy(kcs->write_data, data, size);
2470+ kcs->write_count = size;
2471+ kcs->orig_write_count = size;
2472+ kcs->write_pos = 0;
2473+ kcs->read_pos = 0;
2474+ kcs->state = KCS_START_OP;
2475+ kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
2476+ kcs->obf_timeout = OBF_RETRY_TIMEOUT;
2477+ return 0;
2478+}
2479+
2480+int kcs_get_result(struct kcs_data *kcs, unsigned char *data, int length)
2481+{
2482+ if (length < kcs->read_pos) {
2483+ kcs->read_pos = length;
2484+ kcs->truncated = 1;
2485+ }
2486+
2487+ memcpy(data, kcs->read_data, kcs->read_pos);
2488+
2489+ if ((length >= 3) && (kcs->read_pos < 3)) {
2490+ /* Guarantee that we return at least 3 bytes, with an
2491+ error in the third byte if it is too short. */
2492+ data[2] = IPMI_ERR_UNSPECIFIED;
2493+ kcs->read_pos = 3;
2494+ }
2495+ if (kcs->truncated) {
2496+ /* Report a truncated error. We might overwrite
2497+ another error, but that's too bad, the user needs
2498+ to know it was truncated. */
2499+ data[2] = IPMI_ERR_MSG_TRUNCATED;
2500+ kcs->truncated = 0;
2501+ }
2502+
2503+ return kcs->read_pos;
2504+}
2505+
2506+/* This implements the state machine defined in the IPMI manual, see
2507+ that for details on how this works. Divide that flowchart into
2508+ sections delimited by "Wait for IBF" and this will become clear. */
2509+enum kcs_result kcs_event(struct kcs_data *kcs, long time)
2510+{
2511+ unsigned char status;
2512+ unsigned char state;
2513+
2514+ status = read_status(kcs);
2515+
2516+#ifdef DEBUG_STATE
2517+ printk(" State = %d, %x\n", kcs->state, status);
2518+#endif
2519+ /* All states wait for ibf, so just do it here. */
2520+ if (!check_ibf(kcs, status, time))
2521+ return KCS_CALL_WITH_DELAY;
2522+
2523+ /* Just about everything looks at the KCS state, so grab that, too. */
2524+ state = GET_STATUS_STATE(status);
2525+
2526+ switch (kcs->state) {
2527+ case KCS_IDLE:
2528+ if (GET_STATUS_ATN(status))
2529+ return KCS_ATTN;
2530+ else
2531+ return KCS_SM_IDLE;
2532+
2533+ case KCS_START_OP:
2534+ if (state != KCS_IDLE) {
2535+ start_error_recovery(kcs,
2536+ "State machine not idle at start");
2537+ break;
2538+ }
2539+
2540+ clear_obf(kcs, status);
2541+ write_cmd(kcs, KCS_WRITE_START);
2542+ kcs->state = KCS_WAIT_WRITE_START;
2543+ break;
2544+
2545+ case KCS_WAIT_WRITE_START:
2546+ if (state != KCS_WRITE_STATE) {
2547+ start_error_recovery(
2548+ kcs,
2549+ "Not in write state at write start");
2550+ break;
2551+ }
2552+ read_data(kcs);
2553+ if (kcs->write_count == 1) {
2554+ write_cmd(kcs, KCS_WRITE_END);
2555+ kcs->state = KCS_WAIT_WRITE_END;
2556+ } else {
2557+ write_next_byte(kcs);
2558+ kcs->state = KCS_WAIT_WRITE;
2559+ }
2560+ break;
2561+
2562+ case KCS_WAIT_WRITE:
2563+ if (state != KCS_WRITE_STATE) {
2564+ start_error_recovery(kcs,
2565+ "Not in write state for write");
2566+ break;
2567+ }
2568+ clear_obf(kcs, status);
2569+ if (kcs->write_count == 1) {
2570+ write_cmd(kcs, KCS_WRITE_END);
2571+ kcs->state = KCS_WAIT_WRITE_END;
2572+ } else {
2573+ write_next_byte(kcs);
2574+ }
2575+ break;
2576+
2577+ case KCS_WAIT_WRITE_END:
2578+ if (state != KCS_WRITE_STATE) {
2579+ start_error_recovery(kcs,
2580+ "Not in write state for write end");
2581+ break;
2582+ }
2583+ clear_obf(kcs, status);
2584+ write_next_byte(kcs);
2585+ kcs->state = KCS_WAIT_READ;
2586+ break;
2587+
2588+ case KCS_WAIT_READ:
2589+ if ((state != KCS_READ_STATE) && (state != KCS_IDLE_STATE)) {
2590+ start_error_recovery(
2591+ kcs,
2592+ "Not in read or idle in read state");
2593+ break;
2594+ }
2595+ if (! check_obf(kcs, status, time))
2596+ return KCS_CALL_WITH_DELAY;
2597+
2598+ if (state == KCS_READ_STATE) {
2599+ read_next_byte(kcs);
2600+ } else {
2601+ read_data(kcs);
2602+ kcs->orig_write_count = 0;
2603+ kcs->state = KCS_IDLE;
2604+ return KCS_TRANSACTION_COMPLETE;
2605+ }
2606+ break;
2607+
2608+ case KCS_ERROR0:
2609+ clear_obf(kcs, status);
2610+ write_cmd(kcs, KCS_GET_STATUS_ABORT);
2611+ kcs->state = KCS_ERROR1;
2612+ break;
2613+
2614+ case KCS_ERROR1:
2615+ clear_obf(kcs, status);
2616+ write_data(kcs, 0);
2617+ kcs->state = KCS_ERROR2;
2618+ break;
2619+
2620+ case KCS_ERROR2:
2621+ if (state != KCS_READ_STATE) {
2622+ start_error_recovery(kcs,
2623+ "Not in read state for error2");
2624+ break;
2625+ }
2626+ if (! check_obf(kcs, status, time))
2627+ return KCS_CALL_WITH_DELAY;
2628+
2629+ clear_obf(kcs, status);
2630+ write_data(kcs, KCS_READ_BYTE);
2631+ kcs->state = KCS_ERROR3;
2632+ break;
2633+
2634+ case KCS_ERROR3:
2635+ if (state != KCS_IDLE_STATE) {
2636+ start_error_recovery(kcs,
2637+ "Not in idle state for error3");
2638+ break;
2639+ }
2640+
2641+ if (! check_obf(kcs, status, time))
2642+ return KCS_CALL_WITH_DELAY;
2643+
2644+ clear_obf(kcs, status);
2645+ if (kcs->orig_write_count) {
2646+ restart_kcs_transaction(kcs);
2647+ } else {
2648+ kcs->state = KCS_IDLE;
2649+ return KCS_TRANSACTION_COMPLETE;
2650+ }
2651+ break;
2652+
2653+ case KCS_HOSED:
2654+ return KCS_SM_HOSED;
2655+ }
2656+
2657+ if (kcs->state == KCS_HOSED) {
2658+ init_kcs_data(kcs, kcs->port, kcs->addr);
2659+ return KCS_SM_HOSED;
2660+ }
2661+
2662+ return KCS_CALL_WITHOUT_DELAY;
2663+}
2664+
2665+int kcs_size(void)
2666+{
2667+ return sizeof(struct kcs_data);
2668+}
2669diff -urNp linux-5010/drivers/char/ipmi/ipmi_kcs_sm.h linux-5020/drivers/char/ipmi/ipmi_kcs_sm.h
2670--- linux-5010/drivers/char/ipmi/ipmi_kcs_sm.h 1970-01-01 01:00:00.000000000 +0100
2671+++ linux-5020/drivers/char/ipmi/ipmi_kcs_sm.h
2672@@ -0,0 +1,70 @@
2673+/*
2674+ * ipmi_kcs_sm.h
2675+ *
2676+ * State machine for handling IPMI KCS interfaces.
2677+ *
2678+ * Author: MontaVista Software, Inc.
2679+ * Corey Minyard <minyard@mvista.com>
2680+ * source@mvista.com
2681+ *
2682+ * Copyright 2002 MontaVista Software Inc.
2683+ *
2684+ * This program is free software; you can redistribute it and/or modify it
2685+ * under the terms of the GNU General Public License as published by the
2686+ * Free Software Foundation; either version 2 of the License, or (at your
2687+ * option) any later version.
2688+ *
2689+ *
2690+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
2691+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
2692+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
2693+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
2694+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
2695+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
2696+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
2697+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
2698+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
2699+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2700+ *
2701+ * You should have received a copy of the GNU General Public License along
2702+ * with this program; if not, write to the Free Software Foundation, Inc.,
2703+ * 675 Mass Ave, Cambridge, MA 02139, USA.
2704+ */
2705+
2706+struct kcs_data;
2707+
2708+void init_kcs_data(struct kcs_data *kcs,
2709+ unsigned int port,
2710+ unsigned char *addr);
2711+
2712+/* Start a new transaction in the state machine. This will return -2
2713+ if the state machine is not idle, -1 if the size is invalid (to
2714+ large or too small), or 0 if the transaction is successfully
2715+ completed. */
2716+int start_kcs_transaction(struct kcs_data *kcs, char *data, unsigned int size);
2717+
2718+/* Return the results after the transaction. This will return -1 if
2719+ the buffer is too small, zero if no transaction is present, or the
2720+ actual length of the result data. */
2721+int kcs_get_result(struct kcs_data *kcs, unsigned char *data, int length);
2722+
2723+enum kcs_result
2724+{
2725+ KCS_CALL_WITHOUT_DELAY, /* Call the driver again immediately */
2726+ KCS_CALL_WITH_DELAY, /* Delay some before calling again. */
2727+ KCS_TRANSACTION_COMPLETE, /* A transaction is finished. */
2728+ KCS_SM_IDLE, /* The SM is in idle state. */
2729+ KCS_SM_HOSED, /* The hardware violated the state machine. */
2730+ KCS_ATTN /* The hardware is asserting attn and the
2731+ state machine is idle. */
2732+};
2733+
2734+/* Call this periodically (for a polled interface) or upon receiving
2735+ an interrupt (for a interrupt-driven interface). If interrupt
2736+ driven, you should probably poll this periodically when not in idle
2737+ state. This should be called with the time that passed since the
2738+ last call, if it is significant. Time is in microseconds. */
2739+enum kcs_result kcs_event(struct kcs_data *kcs, long time);
2740+
2741+/* Return the size of the KCS structure in bytes. */
2742+int kcs_size(void);
2743diff -urNp linux-5010/drivers/char/ipmi/ipmi_msghandler.c linux-5020/drivers/char/ipmi/ipmi_msghandler.c
2744--- linux-5010/drivers/char/ipmi/ipmi_msghandler.c 1970-01-01 01:00:00.000000000 +0100
2745+++ linux-5020/drivers/char/ipmi/ipmi_msghandler.c
2746@@ -0,0 +1,1811 @@
2747+/*
2748+ * ipmi_msghandler.c
2749+ *
2750+ * Incoming and outgoing message routing for an IPMI interface.
2751+ *
2752+ * Author: MontaVista Software, Inc.
2753+ * Corey Minyard <minyard@mvista.com>
2754+ * source@mvista.com
2755+ *
2756+ * Copyright 2002 MontaVista Software Inc.
2757+ *
2758+ * This program is free software; you can redistribute it and/or modify it
2759+ * under the terms of the GNU General Public License as published by the
2760+ * Free Software Foundation; either version 2 of the License, or (at your
2761+ * option) any later version.
2762+ *
2763+ *
2764+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
2765+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
2766+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
2767+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
2768+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
2769+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
2770+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
2771+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
2772+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
2773+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2774+ *
2775+ * You should have received a copy of the GNU General Public License along
2776+ * with this program; if not, write to the Free Software Foundation, Inc.,
2777+ * 675 Mass Ave, Cambridge, MA 02139, USA.
2778+ */
2779+
2780+#include <linux/config.h>
2781+#include <linux/module.h>
2782+#include <linux/errno.h>
2783+#include <asm/system.h>
2784+#include <linux/sched.h>
2785+#include <linux/poll.h>
2786+#include <linux/spinlock.h>
2787+#include <linux/rwsem.h>
2788+#include <linux/slab.h>
2789+#include <linux/ipmi.h>
2790+#include <linux/ipmi_smi.h>
2791+#include <linux/notifier.h>
2792+#include <linux/init.h>
2793+
2794+struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
2795+static int ipmi_init_msghandler(void);
2796+
2797+static int initialized = 0;
2798+
2799+#define MAX_EVENTS_IN_QUEUE 25
2800+
2801+struct ipmi_user
2802+{
2803+ struct list_head link;
2804+
2805+ /* The upper layer that handles receive messages. */
2806+ struct ipmi_user_hndl *handler;
2807+ void *handler_data;
2808+
2809+ /* The interface this user is bound to. */
2810+ ipmi_smi_t intf;
2811+
2812+ /* Does this interface receive IPMI events? */
2813+ int gets_events;
2814+};
2815+
2816+struct cmd_rcvr
2817+{
2818+ struct list_head link;
2819+
2820+ ipmi_user_t user;
2821+ unsigned char netfn;
2822+ unsigned char cmd;
2823+};
2824+
2825+#define IPMI_IPMB_NUM_SEQ 64
2826+struct ipmi_smi
2827+{
2828+ /* The list of upper layers that are using me. We read-lock
2829+ this when delivering messages to the upper layer to keep
2830+ the user from going away while we are processing the
2831+ message. This means that you cannot add or delete a user
2832+ from the receive callback. */
2833+ rwlock_t users_lock;
2834+ struct list_head users;
2835+
2836+ /* The IPMI version of the BMC on the other end. */
2837+ unsigned char version_major;
2838+ unsigned char version_minor;
2839+
2840+ /* This is the lower-layer's sender routine. */
2841+ struct ipmi_smi_handlers *handlers;
2842+ void *send_info;
2843+
2844+ /* A table of sequence numbers for this interface. We use the
2845+ sequence numbers for IPMB messages that go out of the
2846+ interface to match them up with their responses. A routine
2847+ is called periodically to time the items in this list. */
2848+ spinlock_t seq_lock;
2849+ struct {
2850+ unsigned long timeout;
2851+ int inuse;
2852+ struct ipmi_recv_msg *recv_msg;
2853+ } seq_table[IPMI_IPMB_NUM_SEQ];
2854+ int curr_seq;
2855+
2856+ /* Messages that were delayed for some reason (out of memory,
2857+ for instance), will go in here to be processed later in a
2858+ periodic timer interrupt. */
2859+ spinlock_t waiting_msgs_lock;
2860+ struct list_head waiting_msgs;
2861+
2862+ /* The list of command receivers that are registered for commands
2863+ on this interface. */
2864+ rwlock_t cmd_rcvr_lock;
2865+ struct list_head cmd_rcvrs;
2866+
2867+ /* Events that were queues because no one was there to receive
2868+ them. */
2869+ spinlock_t events_lock; /* For dealing with event stuff. */
2870+ struct list_head waiting_events;
2871+ unsigned int waiting_events_count; /* How many events in queue? */
2872+
2873+ /* This will be non-null if someone registers to receive all
2874+ IPMI commands (this is for interface emulation). There
2875+ may not be any things in the cmd_rcvrs list above when
2876+ this is registered. */
2877+ ipmi_user_t all_cmd_rcvr;
2878+
2879+ /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
2880+ but may be changed by the user. */
2881+ unsigned char my_address;
2882+
2883+ /* My LUN. This should generally stay the SMS LUN, but just in
2884+ case... */
2885+ unsigned char my_lun;
2886+};
2887+
2888+int
2889+ipmi_register_all_cmd_rcvr(ipmi_user_t user)
2890+{
2891+ int flags;
2892+ int rv = -EBUSY;
2893+
2894+ write_lock_irqsave(&(user->intf->users_lock), flags);
2895+ write_lock(&(user->intf->cmd_rcvr_lock));
2896+ if ((user->intf->all_cmd_rcvr == NULL)
2897+ && (list_empty(&(user->intf->cmd_rcvrs))))
2898+ {
2899+ user->intf->all_cmd_rcvr = user;
2900+ rv = 0;
2901+ }
2902+ write_unlock(&(user->intf->cmd_rcvr_lock));
2903+ write_unlock_irqrestore(&(user->intf->users_lock), flags);
2904+ return rv;
2905+}
2906+
2907+int
2908+ipmi_unregister_all_cmd_rcvr(ipmi_user_t user)
2909+{
2910+ int flags;
2911+ int rv = -EINVAL;
2912+
2913+ write_lock_irqsave(&(user->intf->users_lock), flags);
2914+ write_lock(&(user->intf->cmd_rcvr_lock));
2915+ if (user->intf->all_cmd_rcvr == user)
2916+ {
2917+ user->intf->all_cmd_rcvr = NULL;
2918+ rv = 0;
2919+ }
2920+ write_unlock(&(user->intf->cmd_rcvr_lock));
2921+ write_unlock_irqrestore(&(user->intf->users_lock), flags);
2922+ return rv;
2923+}
2924+
2925+
2926+#define MAX_IPMI_INTERFACES 4
2927+static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES];
2928+
2929+/* Used to keep interfaces from going away while operations are
2930+ operating on interfaces. Grab read if you are not modifying the
2931+ interfaces, write if you are. */
2932+static DECLARE_RWSEM(interfaces_sem);
2933+
2934+/* Directly protects the ipmi_interfaces data structure. This is
2935+ claimed in the timer interrupt. */
2936+static spinlock_t interfaces_lock = SPIN_LOCK_UNLOCKED;
2937+
2938+/* List of watchers that want to know when smi's are added and
2939+ deleted. */
2940+static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
2941+static DECLARE_RWSEM(smi_watchers_sem);
2942+
2943+int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
2944+{
2945+ int i;
2946+
2947+ down_read(&interfaces_sem);
2948+ down_write(&smi_watchers_sem);
2949+ list_add(&(watcher->link), &smi_watchers);
2950+ for (i=0; i<MAX_IPMI_INTERFACES; i++) {
2951+ if (ipmi_interfaces[i] != NULL) {
2952+ watcher->new_smi(i);
2953+ }
2954+ }
2955+ up_write(&smi_watchers_sem);
2956+ up_read(&interfaces_sem);
2957+ return 0;
2958+}
2959+
2960+int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
2961+{
2962+ down_write(&smi_watchers_sem);
2963+ list_del(&(watcher->link));
2964+ up_write(&smi_watchers_sem);
2965+ return 0;
2966+}
2967+
2968+int
2969+ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
2970+{
2971+ if (addr1->addr_type != addr2->addr_type)
2972+ return 0;
2973+
2974+ if (addr1->channel != addr2->channel)
2975+ return 0;
2976+
2977+ if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
2978+ struct ipmi_system_interface_addr *smi_addr1
2979+ = (struct ipmi_system_interface_addr *) addr1;
2980+ struct ipmi_system_interface_addr *smi_addr2
2981+ = (struct ipmi_system_interface_addr *) addr2;
2982+ return (smi_addr1->lun == smi_addr2->lun);
2983+ }
2984+
2985+ if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
2986+ || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
2987+ {
2988+ struct ipmi_ipmb_addr *ipmb_addr1
2989+ = (struct ipmi_ipmb_addr *) addr1;
2990+ struct ipmi_ipmb_addr *ipmb_addr2
2991+ = (struct ipmi_ipmb_addr *) addr2;
2992+
2993+ return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
2994+ && (ipmb_addr1->lun == ipmb_addr2->lun));
2995+ }
2996+
2997+ return 1;
2998+}
2999+
3000+int ipmi_validate_addr(struct ipmi_addr *addr, int len)
3001+{
3002+ if (len < sizeof(struct ipmi_system_interface_addr)) {
3003+ return -EINVAL;
3004+ }
3005+
3006+ if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
3007+ if (addr->channel != IPMI_BMC_CHANNEL)
3008+ return -EINVAL;
3009+ return 0;
3010+ }
3011+
3012+ if ((addr->channel == IPMI_BMC_CHANNEL)
3013+ || (addr->channel >= IPMI_NUM_CHANNELS)
3014+ || (addr->channel < 0))
3015+ return -EINVAL;
3016+
3017+ if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
3018+ || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
3019+ {
3020+ if (len < sizeof(struct ipmi_ipmb_addr)) {
3021+ return -EINVAL;
3022+ }
3023+ return 0;
3024+ }
3025+
3026+ return -EINVAL;
3027+}
3028+
3029+unsigned int ipmi_addr_length(int addr_type)
3030+{
3031+ if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3032+ return sizeof(struct ipmi_system_interface_addr);
3033+
3034+ if ((addr_type == IPMI_IPMB_ADDR_TYPE)
3035+ || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
3036+ {
3037+ return sizeof(struct ipmi_ipmb_addr);
3038+ }
3039+
3040+ return 0;
3041+}
3042+
3043+static void deliver_response(struct ipmi_recv_msg *msg)
3044+{
3045+ msg->user->handler->ipmi_recv_hndl(msg, msg->user->handler_data);
3046+}
3047+
3048+/* Find the next sequence number not being used and add the given
3049+ message with the given timeout to the sequence table. */
3050+static int intf_next_seq(ipmi_smi_t intf,
3051+ struct ipmi_recv_msg *recv_msg,
3052+ unsigned long timeout,
3053+ unsigned char *seq)
3054+{
3055+ int rv = 0;
3056+ unsigned long flags;
3057+ unsigned int i;
3058+
3059+ spin_lock_irqsave(&(intf->seq_lock), flags);
3060+ for (i=intf->curr_seq;
3061+ i!=(intf->curr_seq-1);
3062+ i=(i+1)%IPMI_IPMB_NUM_SEQ)
3063+ {
3064+ if (! intf->seq_table[i].inuse)
3065+ break;
3066+ }
3067+
3068+ if (! intf->seq_table[i].inuse) {
3069+ intf->seq_table[i].recv_msg = recv_msg;
3070+ intf->seq_table[i].timeout = timeout;
3071+ intf->seq_table[i].inuse = 1;
3072+ *seq = i;
3073+ intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
3074+ } else {
3075+ rv = -EAGAIN;
3076+ }
3077+
3078+ spin_unlock_irqrestore(&(intf->seq_lock), flags);
3079+
3080+ return rv;
3081+}
3082+
3083+/* Return the receive message for the given sequence number and
3084+ release the sequence number so it can be reused. Some other data
3085+ is passed in to be sure the message matches up correctly (to help
3086+ guard against message coming in after their timeout and the
3087+ sequence number being reused). */
3088+static int intf_find_seq(ipmi_smi_t intf,
3089+ unsigned char seq,
3090+ short channel,
3091+ unsigned char cmd,
3092+ unsigned char netfn,
3093+ struct ipmi_addr *addr,
3094+ struct ipmi_recv_msg **recv_msg)
3095+{
3096+ int rv = -ENODEV;
3097+ unsigned long flags;
3098+
3099+ if (seq >= IPMI_IPMB_NUM_SEQ)
3100+ return -EINVAL;
3101+
3102+ spin_lock_irqsave(&(intf->seq_lock), flags);
3103+ if (intf->seq_table[seq].inuse) {
3104+ struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
3105+
3106+ if ((msg->addr.channel == channel)
3107+ && (msg->msg.cmd == cmd)
3108+ && (msg->msg.netfn == netfn)
3109+ && (ipmi_addr_equal(addr, &(msg->addr))))
3110+ {
3111+ *recv_msg = msg;
3112+ intf->seq_table[seq].inuse = 0;
3113+ rv = 0;
3114+ }
3115+ }
3116+ spin_unlock_irqrestore(&(intf->seq_lock), flags);
3117+
3118+ return rv;
3119+}
3120+
3121+
3122+int ipmi_create_user(unsigned int if_num,
3123+ struct ipmi_user_hndl *handler,
3124+ void *handler_data,
3125+ ipmi_user_t *user)
3126+{
3127+ unsigned long flags;
3128+ ipmi_user_t new_user;
3129+ int rv = 0;
3130+
3131+ /* There is no module usecount here, because it's not
3132+ required. Since this can only be used by and called from
3133+ other modules, they will implicitly use this module, and
3134+ thus this can't be removed unless the other modules are
3135+ removed. */
3136+
3137+ if (handler == NULL)
3138+ return -EINVAL;
3139+
3140+ /* Make sure the driver is actually initialized, this handles
3141+ problems with initialization order. */
3142+ if (!initialized) {
3143+ rv = ipmi_init_msghandler();
3144+ if (rv)
3145+ return rv;
3146+
3147+ /* The init code doesn't return an error if it was turned
3148+ off, but it won't initialize. Check that. */
3149+ if (!initialized)
3150+ return -ENODEV;
3151+ }
3152+
3153+ new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
3154+ if (! new_user)
3155+ return -ENOMEM;
3156+
3157+ down_read(&interfaces_sem);
3158+ if ((if_num > MAX_IPMI_INTERFACES) || ipmi_interfaces[if_num] == NULL)
3159+ {
3160+ rv = -EINVAL;
3161+ goto out_unlock;
3162+ }
3163+
3164+ new_user->handler = handler;
3165+ new_user->handler_data = handler_data;
3166+ new_user->intf = ipmi_interfaces[if_num];
3167+ new_user->gets_events = 0;
3168+
3169+ rv = new_user->intf->handlers->new_user(new_user->intf->send_info);
3170+ if (rv)
3171+ goto out_unlock;
3172+
3173+ write_lock_irqsave(&(new_user->intf->users_lock), flags);
3174+ list_add_tail(&(new_user->link), &(new_user->intf->users));
3175+ write_unlock_irqrestore(&(new_user->intf->users_lock), flags);
3176+
3177+ out_unlock:
3178+ if (rv) {
3179+ kfree(new_user);
3180+ } else {
3181+ *user = new_user;
3182+ }
3183+
3184+ up_read(&interfaces_sem);
3185+ return rv;
3186+}
3187+
3188+static int ipmi_destroy_user_nolock(ipmi_user_t user)
3189+{
3190+ int rv = -ENODEV;
3191+ ipmi_user_t t_user;
3192+ struct list_head *entry, *entry2;
3193+ int i;
3194+ unsigned long flags;
3195+
3196+ /* Find the user and delete them from the list. */
3197+ list_for_each(entry, &(user->intf->users)) {
3198+ t_user = list_entry(entry, struct ipmi_user, link);
3199+ if (t_user == user) {
3200+ list_del(entry);
3201+ rv = 0;
3202+ break;
3203+ }
3204+ }
3205+
3206+ if (rv) {
3207+ goto out_unlock;
3208+ }
3209+
3210+ /* Remove the user from the interfaces sequence table. */
3211+ spin_lock_irqsave(&(user->intf->seq_lock), flags);
3212+ for (i=0; i<IPMI_IPMB_NUM_SEQ; i++) {
3213+ if (user->intf->seq_table[i].inuse
3214+ && (user->intf->seq_table[i].recv_msg->user == user))
3215+ {
3216+ user->intf->seq_table[i].inuse = 0;
3217+ }
3218+ }
3219+ spin_unlock_irqrestore(&(user->intf->seq_lock), flags);
3220+
3221+ /* Remove the user from the command receiver's table. */
3222+ write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags);
3223+ list_for_each_safe(entry, entry2, &(user->intf->cmd_rcvrs)) {
3224+ struct cmd_rcvr *rcvr;
3225+ rcvr = list_entry(entry, struct cmd_rcvr, link);
3226+ if (rcvr->user == user) {
3227+ list_del(entry);
3228+ kfree(rcvr);
3229+ }
3230+ }
3231+ write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags);
3232+
3233+ kfree(user);
3234+
3235+ out_unlock:
3236+
3237+ return rv;
3238+}
3239+
3240+int ipmi_destroy_user(ipmi_user_t user)
3241+{
3242+ int rv;
3243+ ipmi_smi_t intf = user->intf;
3244+ unsigned long flags;
3245+
3246+ down_read(&interfaces_sem);
3247+ write_lock_irqsave(&(intf->users_lock), flags);
3248+ rv = ipmi_destroy_user_nolock(user);
3249+ if (!rv)
3250+ intf->handlers->user_left(intf->send_info);
3251+
3252+ write_unlock_irqrestore(&(intf->users_lock), flags);
3253+ up_read(&interfaces_sem);
3254+ return rv;
3255+}
3256+
3257+void ipmi_get_version(ipmi_user_t user,
3258+ unsigned char *major,
3259+ unsigned char *minor)
3260+{
3261+ *major = user->intf->version_major;
3262+ *minor = user->intf->version_minor;
3263+}
3264+
3265+void ipmi_set_my_address(ipmi_user_t user,
3266+ unsigned char address)
3267+{
3268+ user->intf->my_address = address;
3269+}
3270+
3271+unsigned char ipmi_get_my_address(ipmi_user_t user)
3272+{
3273+ return user->intf->my_address;
3274+}
3275+
3276+void ipmi_set_my_LUN(ipmi_user_t user,
3277+ unsigned char LUN)
3278+{
3279+ user->intf->my_lun = LUN & 0x3;
3280+}
3281+
3282+unsigned char ipmi_get_my_LUN(ipmi_user_t user)
3283+{
3284+ return user->intf->my_lun;
3285+}
3286+
3287+int ipmi_set_gets_events(ipmi_user_t user, int val)
3288+{
3289+ unsigned long flags;
3290+ struct list_head *e, *e2;
3291+ struct ipmi_recv_msg *msg;
3292+
3293+ read_lock(&(user->intf->users_lock));
3294+ spin_lock_irqsave(&(user->intf->events_lock), flags);
3295+ user->gets_events = val;
3296+
3297+ if (val) {
3298+ /* Deliver any queued events. */
3299+ list_for_each_safe(e, e2, &(user->intf->waiting_events)) {
3300+ msg = list_entry(e, struct ipmi_recv_msg, link);
3301+ list_del(e);
3302+ msg->user = user;
3303+ deliver_response(msg);
3304+ }
3305+ }
3306+
3307+ spin_unlock_irqrestore(&(user->intf->events_lock), flags);
3308+ read_unlock(&(user->intf->users_lock));
3309+
3310+ return 0;
3311+}
3312+
3313+int ipmi_register_for_cmd(ipmi_user_t user,
3314+ unsigned char netfn,
3315+ unsigned char cmd)
3316+{
3317+ struct list_head *entry;
3318+ unsigned long flags;
3319+ struct cmd_rcvr *rcvr;
3320+ int rv = 0;
3321+
3322+
3323+ rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
3324+ if (! rcvr)
3325+ return -ENOMEM;
3326+
3327+ read_lock(&(user->intf->users_lock));
3328+ write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags);
3329+ if (user->intf->all_cmd_rcvr != NULL) {
3330+ rv = -EBUSY;
3331+ goto out_unlock;
3332+ }
3333+
3334+ /* Make sure the command/netfn is not already registered. */
3335+ list_for_each(entry, &(user->intf->cmd_rcvrs)) {
3336+ struct cmd_rcvr *cmp;
3337+ cmp = list_entry(entry, struct cmd_rcvr, link);
3338+ if ((cmp->netfn == netfn) && (cmp->cmd == cmd)) {
3339+ rv = -EBUSY;
3340+ break;
3341+ }
3342+ }
3343+
3344+ if (! rv) {
3345+ rcvr->cmd = cmd;
3346+ rcvr->netfn = netfn;
3347+ rcvr->user = user;
3348+ list_add_tail(&(rcvr->link), &(user->intf->cmd_rcvrs));
3349+ }
3350+ out_unlock:
3351+ write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags);
3352+ read_unlock(&(user->intf->users_lock));
3353+
3354+ if (rv)
3355+ kfree(rcvr);
3356+
3357+ return rv;
3358+}
3359+
3360+int ipmi_unregister_for_cmd(ipmi_user_t user,
3361+ unsigned char netfn,
3362+ unsigned char cmd)
3363+{
3364+ struct list_head *entry;
3365+ unsigned long flags;
3366+ struct cmd_rcvr *rcvr;
3367+ int rv = -ENOENT;
3368+
3369+ read_lock(&(user->intf->users_lock));
3370+ write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags);
3371+ /* Make sure the command/netfn is not already registered. */
3372+ list_for_each(entry, &(user->intf->cmd_rcvrs)) {
3373+ rcvr = list_entry(entry, struct cmd_rcvr, link);
3374+ if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) {
3375+ rv = 0;
3376+ list_del(entry);
3377+ kfree(rcvr);
3378+ break;
3379+ }
3380+ }
3381+ write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags);
3382+ read_unlock(&(user->intf->users_lock));
3383+
3384+ return rv;
3385+}
3386+
3387+static unsigned char
3388+ipmb_checksum(unsigned char *data, int size)
3389+{
3390+ unsigned char csum = 0;
3391+
3392+ for (; size > 0; size--, data++)
3393+ csum += *data;
3394+
3395+ return -csum;
3396+}
3397+
3398+/* Separate from ipmi_request so that the user does not have to be
3399+ supplied in certain circumstances (mainly at panic time). If
3400+ messages are supplied, they will be freed, even if an error
3401+ occurs. */
3402+static inline int i_ipmi_request(ipmi_user_t user,
3403+ ipmi_smi_t intf,
3404+ struct ipmi_addr *addr,
3405+ long msgid,
3406+ struct ipmi_msg *msg,
3407+ void *supplied_smi,
3408+ struct ipmi_recv_msg *supplied_recv,
3409+ int priority,
3410+ unsigned char source_address,
3411+ unsigned char source_lun)
3412+{
3413+ int rv = 0;
3414+ struct ipmi_smi_msg *smi_msg;
3415+ struct ipmi_recv_msg *recv_msg;
3416+
3417+
3418+ if (supplied_recv) {
3419+ recv_msg = supplied_recv;
3420+ } else {
3421+ recv_msg = ipmi_alloc_recv_msg();
3422+ if (recv_msg == NULL) {
3423+ return -ENOMEM;
3424+ }
3425+ }
3426+
3427+ if (supplied_smi) {
3428+ smi_msg = (struct ipmi_smi_msg *) supplied_smi;
3429+ } else {
3430+ smi_msg = ipmi_alloc_smi_msg();
3431+ if (smi_msg == NULL) {
3432+ ipmi_free_recv_msg(recv_msg);
3433+ return -ENOMEM;
3434+ }
3435+ }
3436+
3437+ if (addr->channel > IPMI_NUM_CHANNELS) {
3438+ rv = -EINVAL;
3439+ goto out_err;
3440+ }
3441+
3442+ if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
3443+ struct ipmi_system_interface_addr *smi_addr;
3444+
3445+ smi_addr = (struct ipmi_system_interface_addr *) addr;
3446+ if (smi_addr->lun > 3)
3447+ return -EINVAL;
3448+
3449+ if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
3450+ && ((msg->cmd == IPMI_SEND_MSG_CMD)
3451+ || (msg->cmd == IPMI_GET_MSG_CMD)
3452+ || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
3453+ {
3454+ /* We don't let the user do these, since we manage
3455+ the sequence numbers. */
3456+ rv = -EINVAL;
3457+ goto out_err;
3458+ }
3459+
3460+ if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
3461+ rv = -EMSGSIZE;
3462+ goto out_err;
3463+ }
3464+
3465+ recv_msg->user = user;
3466+ recv_msg->addr = *addr;
3467+ recv_msg->msgid = msgid;
3468+ recv_msg->msg = *msg;
3469+
3470+ smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
3471+ smi_msg->data[1] = msg->cmd;
3472+ smi_msg->msgid = msgid;
3473+ smi_msg->user_data = recv_msg;
3474+ if (msg->data_len > 0)
3475+ memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
3476+ smi_msg->data_size = msg->data_len + 2;
3477+ } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
3478+ || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
3479+ {
3480+ struct ipmi_ipmb_addr *ipmb_addr;
3481+ unsigned char ipmb_seq;
3482+ int i;
3483+
3484+ if (addr == NULL) {
3485+ rv = -EINVAL;
3486+ goto out_err;
3487+ }
3488+
3489+ if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
3490+ /* Broadcasts add a zero at the beginning of the
3491+ message, but otherwise is the same as an IPMB
3492+ address. */
3493+ smi_msg->data[3] = 0;
3494+ addr->addr_type = IPMI_IPMB_ADDR_TYPE;
3495+ i = 1;
3496+ } else {
3497+ i = 0;
3498+ }
3499+
3500+ /* 9 for the header and 1 for the checksum, plus
3501+ possibly one for the broadcast. */
3502+ if ((msg->data_len + 10 + i) > IPMI_MAX_MSG_LENGTH) {
3503+ rv = -EMSGSIZE;
3504+ goto out_err;
3505+ }
3506+
3507+ ipmb_addr = (struct ipmi_ipmb_addr *) addr;
3508+ if (ipmb_addr->lun > 3)
3509+ return -EINVAL;
3510+
3511+ memcpy(&(recv_msg->addr), ipmb_addr, sizeof(*ipmb_addr));
3512+
3513+ recv_msg->user = user;
3514+ recv_msg->msgid = msgid;
3515+ recv_msg->msg = *msg;
3516+
3517+ if (recv_msg->msg.netfn & 0x1) {
3518+ /* It's a response, so use the user's sequence. */
3519+ ipmb_seq = msgid;
3520+ } else {
3521+ /* It's a command, so get a sequence for it. */
3522+ /* Create a sequence number with a 5 second timeout. */
3523+ /* FIXME - magic number for the timeout. */
3524+ rv = intf_next_seq(intf,
3525+ recv_msg,
3526+ 5000,
3527+ &ipmb_seq);
3528+ if (rv) {
3529+ /* We have used up all the sequence numbers,
3530+ probably, so abort. */
3531+ ipmi_free_recv_msg(recv_msg);
3532+ smi_msg->done(smi_msg);
3533+ goto out_err;
3534+ }
3535+ }
3536+
3537+ /* Format the IPMB header data. */
3538+ smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
3539+ smi_msg->data[1] = IPMI_SEND_MSG_CMD;
3540+ smi_msg->data[2] = addr->channel;
3541+ smi_msg->data[i+3] = ipmb_addr->slave_addr;
3542+ smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
3543+ smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
3544+ smi_msg->data[i+6] = source_address;
3545+ smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
3546+ smi_msg->data[i+8] = msg->cmd;
3547+
3548+ /* Now tack on the data to the message. */
3549+ if (msg->data_len > 0)
3550+ memcpy(&(smi_msg->data[i+9]), msg->data, msg->data_len);
3551+ smi_msg->data_size = msg->data_len + 9;
3552+
3553+ /* Now calculate the checksum and tack it on. */
3554+ smi_msg->data[i+smi_msg->data_size]
3555+ = ipmb_checksum(&(smi_msg->data[i+6]), smi_msg->data_size-6);
3556+
3557+ /* Add on the checksum size and the offset from the
3558+ broadcast. */
3559+ smi_msg->data_size += 1 + i;
3560+
3561+ smi_msg->msgid = msgid;
3562+ } else {
3563+ /* Unknown address type. */
3564+ rv = -EINVAL;
3565+ goto out_err;
3566+ }
3567+
3568+#if DEBUG_MSGING
3569+ {
3570+ int m;
3571+ for (m=0; m<smi_msg->data_size; m++)
3572+ printk(" %2.2x", smi_msg->data[m]);
3573+ printk("\n");
3574+ }
3575+#endif
3576+ intf->handlers->sender(intf->send_info, smi_msg, priority);
3577+
3578+ return 0;
3579+
3580+ out_err:
3581+ smi_msg->done(smi_msg);
3582+ recv_msg->done(recv_msg);
3583+ return rv;
3584+}
3585+
3586+int ipmi_request(ipmi_user_t user,
3587+ struct ipmi_addr *addr,
3588+ long msgid,
3589+ struct ipmi_msg *msg,
3590+ int priority)
3591+{
3592+ return i_ipmi_request(user,
3593+ user->intf,
3594+ addr,
3595+ msgid,
3596+ msg,
3597+ NULL, NULL,
3598+ priority,
3599+ user->intf->my_address,
3600+ user->intf->my_lun);
3601+}
3602+
3603+int ipmi_request_supply_msgs(ipmi_user_t user,
3604+ struct ipmi_addr *addr,
3605+ long msgid,
3606+ struct ipmi_msg *msg,
3607+ void *supplied_smi,
3608+ struct ipmi_recv_msg *supplied_recv,
3609+ int priority)
3610+{
3611+ return i_ipmi_request(user,
3612+ user->intf,
3613+ addr,
3614+ msgid,
3615+ msg,
3616+ supplied_smi,
3617+ supplied_recv,
3618+ priority,
3619+ user->intf->my_address,
3620+ user->intf->my_lun);
3621+}
3622+
3623+int ipmi_request_with_source(ipmi_user_t user,
3624+ struct ipmi_addr *addr,
3625+ long msgid,
3626+ struct ipmi_msg *msg,
3627+ int priority,
3628+ unsigned char source_address,
3629+ unsigned char source_lun)
3630+{
3631+ return i_ipmi_request(user,
3632+ user->intf,
3633+ addr,
3634+ msgid,
3635+ msg,
3636+ NULL, NULL,
3637+ priority,
3638+ source_address,
3639+ source_lun);
3640+}
3641+
3642+int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
3643+ void *send_info,
3644+ unsigned char version_major,
3645+ unsigned char version_minor,
3646+ ipmi_smi_t *intf)
3647+{
3648+ int i, j;
3649+ int rv;
3650+ ipmi_smi_t new_intf;
3651+ struct list_head *entry;
3652+ unsigned int flags;
3653+
3654+
3655+ /* Make sure the driver is actually initialized, this handles
3656+ problems with initialization order. */
3657+ if (!initialized) {
3658+ rv = ipmi_init_msghandler();
3659+ if (rv)
3660+ return rv;
3661+ /* The init code doesn't return an error if it was turned
3662+ off, but it won't initialize. Check that. */
3663+ if (!initialized)
3664+ return -ENODEV;
3665+ }
3666+
3667+ new_intf = kmalloc(sizeof(*new_intf), GFP_KERNEL);
3668+ if (!new_intf)
3669+ return -ENOMEM;
3670+
3671+ rv = -ENOMEM;
3672+
3673+ down_write(&interfaces_sem);
3674+ for (i=0; i<MAX_IPMI_INTERFACES; i++) {
3675+ if (ipmi_interfaces[i] == NULL) {
3676+ new_intf->version_major = version_major;
3677+ new_intf->version_minor = version_minor;
3678+ new_intf->my_address = IPMI_BMC_SLAVE_ADDR;
3679+ new_intf->my_lun = 2; /* the SMS LUN. */
3680+ rwlock_init(&(new_intf->users_lock));
3681+ INIT_LIST_HEAD(&(new_intf->users));
3682+ new_intf->handlers = handlers;
3683+ new_intf->send_info = send_info;
3684+ spin_lock_init(&(new_intf->seq_lock));
3685+ for (j=0; j<IPMI_IPMB_NUM_SEQ; j++)
3686+ new_intf->seq_table[j].inuse = 0;
3687+ new_intf->curr_seq = 0;
3688+ spin_lock_init(&(new_intf->waiting_msgs_lock));
3689+ INIT_LIST_HEAD(&(new_intf->waiting_msgs));
3690+ spin_lock_init(&(new_intf->events_lock));
3691+ INIT_LIST_HEAD(&(new_intf->waiting_events));
3692+ new_intf->waiting_events_count = 0;
3693+ rwlock_init(&(new_intf->cmd_rcvr_lock));
3694+ INIT_LIST_HEAD(&(new_intf->cmd_rcvrs));
3695+ new_intf->all_cmd_rcvr = NULL;
3696+
3697+ spin_lock_irqsave(&interfaces_lock, flags);
3698+ ipmi_interfaces[i] = new_intf;
3699+ spin_unlock_irqrestore(&interfaces_lock, flags);
3700+
3701+ rv = 0;
3702+ *intf = new_intf;
3703+ break;
3704+ }
3705+ }
3706+
3707+ /* We convert to a read semaphore here. It's possible the
3708+ interface was removed between the calls, we have to recheck
3709+ afterwards. */
3710+ up_write(&interfaces_sem);
3711+ down_read(&interfaces_sem);
3712+
3713+ if (ipmi_interfaces[i] != new_intf)
3714+ /* Well, it went away. Just return. */
3715+ goto out;
3716+
3717+ if (rv == 0) {
3718+ /* Call all the watcher interfaces to tell them that a
3719+ new interface is available. */
3720+ down_read(&smi_watchers_sem);
3721+ list_for_each(entry, &smi_watchers) {
3722+ struct ipmi_smi_watcher *w;
3723+ w = list_entry(entry, struct ipmi_smi_watcher, link);
3724+ w->new_smi(i);
3725+ }
3726+ up_read(&smi_watchers_sem);
3727+ }
3728+
3729+ out:
3730+ up_read(&interfaces_sem);
3731+
3732+ if (rv)
3733+ kfree(new_intf);
3734+
3735+ return rv;
3736+}
3737+
3738+static void free_recv_msg_list(struct list_head *q)
3739+{
3740+ struct list_head *entry, *entry2;
3741+ struct ipmi_recv_msg *msg;
3742+
3743+ list_for_each_safe(entry, entry2, q) {
3744+ msg = list_entry(entry, struct ipmi_recv_msg, link);
3745+ list_del(entry);
3746+ ipmi_free_recv_msg(msg);
3747+ }
3748+}
3749+
3750+static void free_cmd_rcvr_list(struct list_head *q)
3751+{
3752+ struct list_head *entry, *entry2;
3753+ struct cmd_rcvr *rcvr;
3754+
3755+ list_for_each_safe(entry, entry2, q) {
3756+ rcvr = list_entry(entry, struct cmd_rcvr, link);
3757+ list_del(entry);
3758+ kfree(rcvr);
3759+ }
3760+}
3761+
3762+static void clean_up_interface_data(ipmi_smi_t intf)
3763+{
3764+ int i;
3765+
3766+ free_recv_msg_list(&(intf->waiting_msgs));
3767+ free_recv_msg_list(&(intf->waiting_events));
3768+ free_cmd_rcvr_list(&(intf->cmd_rcvrs));
3769+
3770+ for (i=0; i<IPMI_IPMB_NUM_SEQ; i++) {
3771+ if ((intf->seq_table[i].inuse)
3772+ && (intf->seq_table[i].recv_msg))
3773+ {
3774+ ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
3775+ }
3776+ }
3777+}
3778+
3779+int ipmi_unregister_smi(ipmi_smi_t intf)
3780+{
3781+ int rv = -ENODEV;
3782+ int i;
3783+ struct list_head *entry;
3784+ unsigned int flags;
3785+
3786+ down_write(&interfaces_sem);
3787+ if (list_empty(&(intf->users)))
3788+ {
3789+ for (i=0; i<MAX_IPMI_INTERFACES; i++) {
3790+ if (ipmi_interfaces[i] == intf) {
3791+ spin_lock_irqsave(&interfaces_lock, flags);
3792+ ipmi_interfaces[i] = NULL;
3793+ clean_up_interface_data(intf);
3794+ spin_unlock_irqrestore(&interfaces_lock,flags);
3795+ kfree(intf);
3796+ rv = 0;
3797+ goto out_call_watcher;
3798+ }
3799+ }
3800+ } else {
3801+ rv = -EBUSY;
3802+ }
3803+ up_write(&interfaces_sem);
3804+
3805+ return rv;
3806+
3807+ out_call_watcher:
3808+ /* Convert to a read semaphore so callbacks don't bite us. */
3809+ up_write(&interfaces_sem);
3810+ down_read(&interfaces_sem);
3811+
3812+ /* Call all the watcher interfaces to tell them that
3813+ an interface is gone. */
3814+ down_read(&smi_watchers_sem);
3815+ list_for_each(entry, &smi_watchers) {
3816+ struct ipmi_smi_watcher *w;
3817+ w = list_entry(entry,
3818+ struct ipmi_smi_watcher,
3819+ link);
3820+ w->smi_gone(i);
3821+ }
3822+ up_read(&smi_watchers_sem);
3823+ up_read(&interfaces_sem);
3824+ return 0;
3825+}
3826+
3827+static int handle_get_msg_rsp(ipmi_smi_t intf,
3828+ struct ipmi_smi_msg *msg)
3829+{
3830+ struct ipmi_ipmb_addr ipmb_addr;
3831+ struct ipmi_recv_msg *recv_msg;
3832+
3833+
3834+ if (msg->rsp_size < 11)
3835+ /* Message not big enough, just ignore it. */
3836+ return 0;
3837+
3838+ if (msg->rsp[2] != 0)
3839+ /* An error getting the response, just ignore it. */
3840+ return 0;
3841+
3842+ ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
3843+ ipmb_addr.slave_addr = msg->rsp[6];
3844+ ipmb_addr.channel = msg->rsp[3] & 0x0f;
3845+ ipmb_addr.lun = msg->rsp[7] & 3;
3846+
3847+ /* It's a response from a remote entity. Look up the sequence
3848+ number and handle the response. */
3849+ if (intf_find_seq(intf,
3850+ msg->rsp[7] >> 2,
3851+ msg->rsp[3] & 0x0f,
3852+ msg->rsp[8],
3853+ (msg->rsp[4] >> 2) & (~1),
3854+ (struct ipmi_addr *) &(ipmb_addr),
3855+ &recv_msg))
3856+ {
3857+ /* We were unable to find the sequence number,
3858+ so just nuke the message. */
3859+ return 0;
3860+ }
3861+
3862+ memcpy(recv_msg->msg_data,
3863+ &(msg->rsp[9]),
3864+ msg->rsp_size - 9);
3865+ /* THe other fields matched, so no need to set them, except
3866+ for netfn, which needs to be the response that was
3867+ returned, not the request value. */
3868+ recv_msg->msg.netfn = msg->rsp[4] >> 2;
3869+ recv_msg->msg.data = recv_msg->msg_data;
3870+ recv_msg->msg.data_len = msg->rsp_size - 10;
3871+ recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3872+ deliver_response(recv_msg);
3873+
3874+ return 0;
3875+}
3876+
3877+static int handle_get_msg_cmd(ipmi_smi_t intf,
3878+ struct ipmi_smi_msg *msg)
3879+{
3880+ struct list_head *entry;
3881+ struct cmd_rcvr *rcvr;
3882+ int rv = 0;
3883+ unsigned char netfn;
3884+ unsigned char cmd;
3885+ ipmi_user_t user = NULL;
3886+ struct ipmi_ipmb_addr *ipmb_addr;
3887+ struct ipmi_recv_msg *recv_msg;
3888+
3889+ if (msg->rsp_size < 10)
3890+ /* Message not big enough, just ignore it. */
3891+ return 0;
3892+
3893+ if (msg->rsp[2] != 0) {
3894+ /* An error getting the response, just ignore it. */
3895+ return 0;
3896+ }
3897+
3898+ netfn = msg->rsp[4] >> 2;
3899+ cmd = msg->rsp[8];
3900+
3901+ read_lock(&(intf->cmd_rcvr_lock));
3902+
3903+ if (intf->all_cmd_rcvr) {
3904+ user = intf->all_cmd_rcvr;
3905+ } else {
3906+ /* Find the command/netfn. */
3907+ list_for_each(entry, &(intf->cmd_rcvrs)) {
3908+ rcvr = list_entry(entry, struct cmd_rcvr, link);
3909+ if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) {
3910+ user = rcvr->user;
3911+ break;
3912+ }
3913+ }
3914+ }
3915+ read_unlock(&(intf->cmd_rcvr_lock));
3916+
3917+ if (user == NULL) {
3918+ /* We didn't find a user, deliver an error response. */
3919+ msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
3920+ msg->data[1] = IPMI_SEND_MSG_CMD;
3921+ msg->data[2] = msg->rsp[3];
3922+ msg->data[3] = msg->rsp[6];
3923+ msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
3924+ msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
3925+ msg->data[6] = intf->my_address;
3926+ /* rqseq/lun */
3927+ msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
3928+ msg->data[8] = msg->rsp[8]; /* cmd */
3929+ msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
3930+ msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
3931+ msg->data_size = 11;
3932+
3933+ intf->handlers->sender(intf->send_info, msg, 0);
3934+
3935+ rv = -1; /* We used the message, so return the value that
3936+ causes it to not be freed or queued. */
3937+ } else {
3938+ /* Deliver the message to the user. */
3939+ recv_msg = ipmi_alloc_recv_msg();
3940+ if (! recv_msg) {
3941+ /* We couldn't allocate memory for the
3942+ message, so requeue it for handling
3943+ later. */
3944+ rv = 1;
3945+ } else {
3946+ ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
3947+ ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
3948+ ipmb_addr->slave_addr = msg->rsp[6];
3949+ ipmb_addr->lun = msg->rsp[7] & 3;
3950+ ipmb_addr->channel = msg->rsp[3];
3951+
3952+ recv_msg->user = user;
3953+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3954+ recv_msg->msgid = msg->rsp[7] >> 2;
3955+ recv_msg->msg.netfn = msg->rsp[4] >> 2;
3956+ recv_msg->msg.cmd = msg->rsp[8];
3957+ recv_msg->msg.data = recv_msg->msg_data;
3958+ recv_msg->msg.data_len = msg->rsp_size - 10;
3959+ memcpy(recv_msg->msg_data,
3960+ &(msg->rsp[9]),
3961+ msg->rsp_size - 10);
3962+ deliver_response(recv_msg);
3963+ }
3964+ }
3965+
3966+ return rv;
3967+}
3968+
3969+static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
3970+ struct ipmi_smi_msg *msg)
3971+{
3972+ struct ipmi_system_interface_addr *smi_addr;
3973+
3974+ recv_msg->msgid = 0;
3975+ smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
3976+ smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3977+ smi_addr->channel = IPMI_BMC_CHANNEL;
3978+ smi_addr->lun = msg->rsp[0] & 3;
3979+ recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
3980+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
3981+ recv_msg->msg.cmd = msg->rsp[1];
3982+ memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
3983+ recv_msg->msg.data = recv_msg->msg_data;
3984+ recv_msg->msg.data_len = msg->rsp_size - 3;
3985+}
3986+
3987+/* This will be called with the intf->users_lock read-locked, so no need
3988+ to do that here. */
3989+static int handle_read_event_rsp(ipmi_smi_t intf,
3990+ struct ipmi_smi_msg *msg)
3991+{
3992+ struct ipmi_recv_msg *recv_msg;
3993+ struct list_head msgs;
3994+ struct list_head *entry, *entry2;
3995+ ipmi_user_t user;
3996+ int rv = 0;
3997+ int deliver_count = 0;
3998+ unsigned long flags;
3999+
4000+ if (msg->rsp_size < 19) {
4001+ /* Message is too small to be an IPMB event. */
4002+ return 0;
4003+ }
4004+
4005+ if (msg->rsp[2] != 0) {
4006+ /* An error getting the event, just ignore it. */
4007+ return 0;
4008+ }
4009+
4010+ INIT_LIST_HEAD(&msgs);
4011+
4012+ spin_lock_irqsave(&(intf->events_lock), flags);
4013+
4014+ /* Allocate and fill in one message for every user that is getting
4015+ events. */
4016+ list_for_each(entry, &(intf->users)) {
4017+ user = list_entry(entry, struct ipmi_user, link);
4018+
4019+ if (! user->gets_events)
4020+ continue;
4021+
4022+ recv_msg = ipmi_alloc_recv_msg();
4023+ if (! recv_msg) {
4024+ list_for_each_safe(entry, entry2, &msgs) {
4025+ recv_msg = list_entry(entry,
4026+ struct ipmi_recv_msg,
4027+ link);
4028+ list_del(entry);
4029+ ipmi_free_recv_msg(recv_msg);
4030+ }
4031+ /* We couldn't allocate memory for the
4032+ message, so requeue it for handling
4033+ later. */
4034+ rv = 1;
4035+ goto out;
4036+ }
4037+
4038+ deliver_count++;
4039+
4040+ copy_event_into_recv_msg(recv_msg, msg);
4041+ recv_msg->user = user;
4042+ list_add_tail(&(recv_msg->link), &msgs);
4043+ }
4044+
4045+ if (deliver_count) {
4046+ /* Now deliver all the messages. */
4047+ list_for_each_safe(entry, entry2, &msgs) {
4048+ recv_msg = list_entry(entry,
4049+ struct ipmi_recv_msg,
4050+ link);
4051+ list_del(entry);
4052+ deliver_response(recv_msg);
4053+ }
4054+ } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
4055+ /* No one to receive the message, put it in queue if there's
4056+ not already too many things in the queue. */
4057+ recv_msg = ipmi_alloc_recv_msg();
4058+ if (! recv_msg) {
4059+ /* We couldn't allocate memory for the
4060+ message, so requeue it for handling
4061+ later. */
4062+ rv = 1;
4063+ goto out;
4064+ }
4065+
4066+ copy_event_into_recv_msg(recv_msg, msg);
4067+ list_add_tail(&(recv_msg->link), &(intf->waiting_events));
4068+ } else {
4069+ /* There's too many things in the queue, discard this
4070+ message. */
4071+ printk(KERN_WARNING "ipmi: Event queue full, discarding an"
4072+ " incoming event\n");
4073+ }
4074+
4075+ out:
4076+ spin_unlock_irqrestore(&(intf->events_lock), flags);
4077+
4078+ return rv;
4079+}
4080+
4081+static int handle_bmc_rsp(ipmi_smi_t intf,
4082+ struct ipmi_smi_msg *msg)
4083+{
4084+ struct ipmi_recv_msg *recv_msg;
4085+ int found = 0;
4086+ struct list_head *entry;
4087+
4088+ recv_msg = (struct ipmi_recv_msg *) msg->user_data;
4089+
4090+ /* Make sure the user still exists. */
4091+ list_for_each(entry, &(intf->users)) {
4092+ if (list_entry(entry, struct ipmi_user, link)
4093+ == recv_msg->user)
4094+ {
4095+ /* Found it, so we can deliver it */
4096+ found = 1;
4097+ break;
4098+ }
4099+ }
4100+
4101+ if (!found) {
4102+ /* The user for the message went away, so give up. */
4103+ ipmi_free_recv_msg(recv_msg);
4104+ } else {
4105+ struct ipmi_system_interface_addr *smi_addr;
4106+
4107+ recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4108+ recv_msg->msgid = msg->msgid;
4109+ smi_addr = ((struct ipmi_system_interface_addr *)
4110+ &(recv_msg->addr));
4111+ smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4112+ smi_addr->channel = IPMI_BMC_CHANNEL;
4113+ smi_addr->lun = msg->rsp[0] & 3;
4114+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
4115+ recv_msg->msg.cmd = msg->rsp[1];
4116+ memcpy(recv_msg->msg_data,
4117+ &(msg->rsp[2]),
4118+ msg->rsp_size - 2);
4119+ recv_msg->msg.data = recv_msg->msg_data;
4120+ recv_msg->msg.data_len = msg->rsp_size - 2;
4121+ deliver_response(recv_msg);
4122+ }
4123+
4124+ return 0;
4125+}
4126+
4127+/* Handle a new message. Return 1 if the message should be requeued,
4128+ 0 if the message should be freed, or -1 if the message should not
4129+ be freed or requeued. */
4130+static int handle_new_recv_msg(ipmi_smi_t intf,
4131+ struct ipmi_smi_msg *msg)
4132+{
4133+ int requeue;
4134+
4135+ if (msg->rsp_size < 2) {
4136+ /* Message is too small to be correct. */
4137+ requeue = 0;
4138+ } else if (msg->rsp[1] == IPMI_GET_MSG_CMD) {
4139+#if DEBUG_MSGING
4140+ int m;
4141+ printk("Response:");
4142+ for (m=0; m<msg->rsp_size; m++)
4143+ printk(" %2.2x", msg->rsp[m]);
4144+ printk("\n");
4145+#endif
4146+ /* It's from the receive queue. */
4147+ if (msg->rsp[4] & 0x04) {
4148+ /* It's a response, so find the
4149+ requesting message and send it up. */
4150+ requeue = handle_get_msg_rsp(intf, msg);
4151+ } else {
4152+ /* It's a command to the SMS from some other
4153+ entity. Handle that. */
4154+ requeue = handle_get_msg_cmd(intf, msg);
4155+ }
4156+ } else if (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD) {
4157+ /* It's an asyncronous event. */
4158+ requeue = handle_read_event_rsp(intf, msg);
4159+ } else {
4160+ /* It's a response from the local BMC. */
4161+ requeue = handle_bmc_rsp(intf, msg);
4162+ }
4163+
4164+ return requeue;
4165+}
4166+
4167+/* Handle a new message from the lower layer. */
4168+void ipmi_smi_msg_received(ipmi_smi_t intf,
4169+ struct ipmi_smi_msg *msg)
4170+{
4171+ unsigned long flags;
4172+ int rv;
4173+
4174+
4175+ if ((msg->data_size >= 2) && (msg->data[1] == IPMI_SEND_MSG_CMD)) {
4176+ /* This is the local response to a send, we just
4177+ ignore these. */
4178+ msg->done(msg);
4179+ return;
4180+ }
4181+
4182+ /* Lock the user lock so the user can't go away while we are
4183+ working on it. */
4184+ read_lock(&(intf->users_lock));
4185+
4186+ /* To preserve message order, if the list is not empty, we
4187+ tack this message onto the end of the list. */
4188+ spin_lock_irqsave(&(intf->waiting_msgs_lock), flags);
4189+ if (!list_empty(&(intf->waiting_msgs))) {
4190+ list_add_tail(&(msg->link), &(intf->waiting_msgs));
4191+ spin_unlock(&(intf->waiting_msgs_lock));
4192+ return;
4193+ }
4194+ spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
4195+
4196+ rv = handle_new_recv_msg(intf, msg);
4197+ if (rv > 0) {
4198+ /* Could not handle the message now, just add it to a
4199+ list to handle later. */
4200+ spin_lock(&(intf->waiting_msgs_lock));
4201+ list_add_tail(&(msg->link), &(intf->waiting_msgs));
4202+ spin_unlock(&(intf->waiting_msgs_lock));
4203+ } else if (rv == 0) {
4204+ msg->done(msg);
4205+ }
4206+
4207+ read_unlock(&(intf->users_lock));
4208+}
4209+
4210+void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
4211+{
4212+ struct list_head *entry;
4213+ ipmi_user_t user;
4214+
4215+ read_lock(&(intf->users_lock));
4216+ list_for_each(entry, &(intf->users)) {
4217+ user = list_entry(entry, struct ipmi_user, link);
4218+
4219+ if (! user->handler->ipmi_watchdog_pretimeout)
4220+ continue;
4221+
4222+ user->handler->ipmi_watchdog_pretimeout(user->handler_data);
4223+ }
4224+ read_unlock(&(intf->users_lock));
4225+}
4226+
4227+static void
4228+handle_msg_timeout(struct ipmi_recv_msg *msg)
4229+{
4230+ msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4231+ msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE;
4232+ msg->msg.netfn |= 1; /* Convert to a response. */
4233+ msg->msg.data_len = 1;
4234+ msg->msg.data = msg->msg_data;
4235+ deliver_response(msg);
4236+}
4237+
4238+static void
4239+ipmi_timeout_handler(long timeout_period)
4240+{
4241+ ipmi_smi_t intf;
4242+ struct list_head timeouts;
4243+ struct ipmi_recv_msg *msg;
4244+ struct ipmi_smi_msg *smi_msg;
4245+ unsigned long flags;
4246+ struct list_head *entry, *entry2;
4247+ int i, j;
4248+
4249+ INIT_LIST_HEAD(&timeouts);
4250+
4251+ spin_lock(&interfaces_lock);
4252+ for (i=0; i<MAX_IPMI_INTERFACES; i++) {
4253+ intf = ipmi_interfaces[i];
4254+ if (intf == NULL)
4255+ continue;
4256+
4257+ read_lock(&(intf->users_lock));
4258+
4259+ /* See if any waiting messages need to be processed. */
4260+ spin_lock_irqsave(&(intf->waiting_msgs_lock), flags);
4261+ list_for_each_safe(entry, entry2, &(intf->waiting_msgs)) {
4262+ smi_msg = list_entry(entry, struct ipmi_smi_msg, link);
4263+ if (! handle_new_recv_msg(intf, smi_msg)) {
4264+ list_del(entry);
4265+ smi_msg->done(smi_msg);
4266+ } else {
4267+ /* To preserve message order, quit if we
4268+ can't handle a message. */
4269+ break;
4270+ }
4271+ }
4272+ spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
4273+
4274+ /* Go through the seq table and find any messages that
4275+ have timed out, putting them in the timeouts
4276+ list. */
4277+ spin_lock_irqsave(&(intf->seq_lock), flags);
4278+ for (j=0; j<IPMI_IPMB_NUM_SEQ; j++) {
4279+ if (intf->seq_table[j].inuse) {
4280+ intf->seq_table[j].timeout -= timeout_period;
4281+ if (intf->seq_table[j].timeout <= 0) {
4282+ intf->seq_table[j].inuse = 0;
4283+ msg = intf->seq_table[j].recv_msg;
4284+ list_add_tail(&(msg->link), &timeouts);
4285+ }
4286+ }
4287+ }
4288+ spin_unlock_irqrestore(&(intf->seq_lock), flags);
4289+
4290+ list_for_each_safe(entry, entry2, &timeouts) {
4291+ msg = list_entry(entry, struct ipmi_recv_msg, link);
4292+ handle_msg_timeout(msg);
4293+ }
4294+
4295+ read_unlock(&(intf->users_lock));
4296+ }
4297+ spin_unlock(&interfaces_lock);
4298+}
4299+
4300+static void ipmi_request_event(void)
4301+{
4302+ ipmi_smi_t intf;
4303+ int i;
4304+
4305+ spin_lock(&interfaces_lock);
4306+ for (i=0; i<MAX_IPMI_INTERFACES; i++) {
4307+ intf = ipmi_interfaces[i];
4308+ if (intf == NULL)
4309+ continue;
4310+
4311+ intf->handlers->request_events(intf->send_info);
4312+ }
4313+ spin_unlock(&interfaces_lock);
4314+}
4315+
4316+static struct timer_list ipmi_timer;
4317+
4318+/* Call every 100 ms. */
4319+#define IPMI_TIMEOUT_TIME 100
4320+#define IPMI_TIMEOUT_JIFFIES (IPMI_TIMEOUT_TIME/(1000/HZ))
4321+
4322+/* Request events from the queue every second. Hopefully, in the
4323+ future, IPMI will add a way to know immediately if an event is
4324+ in the queue. */
4325+#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
4326+
4327+static volatile int stop_operation = 0;
4328+static volatile int timer_stopped = 0;
4329+static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
4330+
4331+static void ipmi_timeout(unsigned long data)
4332+{
4333+ if (stop_operation) {
4334+ timer_stopped = 1;
4335+ return;
4336+ }
4337+
4338+ ticks_to_req_ev--;
4339+ if (ticks_to_req_ev == 0) {
4340+ ipmi_request_event();
4341+ ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
4342+ }
4343+
4344+ ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
4345+
4346+ ipmi_timer.expires += IPMI_TIMEOUT_JIFFIES;
4347+ add_timer(&ipmi_timer);
4348+}
4349+
4350+
4351+/* FIXME - convert these to slabs. */
4352+static void free_smi_msg(struct ipmi_smi_msg *msg)
4353+{
4354+ kfree(msg);
4355+}
4356+
4357+struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
4358+{
4359+ struct ipmi_smi_msg *rv;
4360+ rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
4361+ if (rv)
4362+ rv->done = free_smi_msg;
4363+ return rv;
4364+}
4365+
4366+static void free_recv_msg(struct ipmi_recv_msg *msg)
4367+{
4368+ kfree(msg);
4369+}
4370+
4371+struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
4372+{
4373+ struct ipmi_recv_msg *rv;
4374+
4375+ rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
4376+ if (rv)
4377+ rv->done = free_recv_msg;
4378+ return rv;
4379+}
4380+
4381+#ifdef CONFIG_IPMI_PANIC_EVENT
4382+
4383+static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
4384+{
4385+}
4386+
4387+static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
4388+{
4389+}
4390+
4391+static void send_panic_events(void)
4392+{
4393+ struct ipmi_msg msg;
4394+ ipmi_smi_t intf;
4395+ unsigned char data[8];
4396+ int i;
4397+ struct ipmi_system_interface_addr addr;
4398+ struct ipmi_smi_msg smi_msg;
4399+ struct ipmi_recv_msg recv_msg;
4400+
4401+ addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4402+ addr.channel = IPMI_BMC_CHANNEL;
4403+
4404+ /* Fill in an event telling that we have failed. */
4405+ msg.netfn = 0x04; /* Sensor or Event. */
4406+ msg.cmd = 2; /* Platform event command. */
4407+ msg.data = data;
4408+ msg.data_len = 8;
4409+ data[0] = 0x21; /* Kernel generator ID, IPMI table 5-4 */
4410+ data[1] = 0x03; /* This is for IPMI 1.0. */
4411+ data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
4412+ data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
4413+ data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
4414+
4415+ /* These used to have the first three bytes of the panic string,
4416+ but not only is that not terribly useful, it's not available
4417+ any more. */
4418+ data[3] = 0;
4419+ data[6] = 0;
4420+ data[7] = 0;
4421+
4422+ smi_msg.done = dummy_smi_done_handler;
4423+ recv_msg.done = dummy_recv_done_handler;
4424+
4425+ /* For every registered interface, send the event. */
4426+ for (i=0; i<MAX_IPMI_INTERFACES; i++) {
4427+ intf = ipmi_interfaces[i];
4428+ if (intf == NULL)
4429+ continue;
4430+
4431+ intf->handlers->set_run_to_completion(intf->send_info, 1);
4432+ i_ipmi_request(NULL,
4433+ intf,
4434+ (struct ipmi_addr *) &addr,
4435+ 0,
4436+ &msg,
4437+ &smi_msg,
4438+ &recv_msg,
4439+ 0,
4440+ intf->my_address,
4441+ intf->my_lun);
4442+ }
4443+}
4444+#endif /* CONFIG_IPMI_PANIC_EVENT */
4445+
4446+static int has_paniced = 0;
4447+
4448+static int panic_event(struct notifier_block *this,
4449+ unsigned long event,
4450+ void *ptr)
4451+{
4452+ int i;
4453+ ipmi_smi_t intf;
4454+
4455+ if (has_paniced)
4456+ return NOTIFY_DONE;
4457+ has_paniced = 1;
4458+
4459+ /* For every registered interface, set it to run to completion. */
4460+ for (i=0; i<MAX_IPMI_INTERFACES; i++) {
4461+ intf = ipmi_interfaces[i];
4462+ if (intf == NULL)
4463+ continue;
4464+
4465+ intf->handlers->set_run_to_completion(intf->send_info, 1);
4466+ }
4467+
4468+#ifdef CONFIG_IPMI_PANIC_EVENT
4469+ send_panic_events();
4470+#endif
4471+
4472+ return NOTIFY_DONE;
4473+}
4474+
4475+static struct notifier_block panic_block = {
4476+ panic_event,
4477+ NULL,
4478+ 200 /* priority: INT_MAX >= x >= 0 */
4479+};
4480+
4481+
4482+static __init int ipmi_init_msghandler(void)
4483+{
4484+ int i;
4485+
4486+ if (initialized)
4487+ return 0;
4488+
4489+ for (i=0; i<MAX_IPMI_INTERFACES; i++) {
4490+ ipmi_interfaces[i] = NULL;
4491+ }
4492+
4493+ init_timer(&ipmi_timer);
4494+ ipmi_timer.data = 0;
4495+ ipmi_timer.function = ipmi_timeout;
4496+ ipmi_timer.expires = jiffies + IPMI_TIMEOUT_JIFFIES;
4497+ add_timer(&ipmi_timer);
4498+
4499+ notifier_chain_register(&panic_notifier_list, &panic_block);
4500+
4501+ initialized = 1;
4502+
4503+ printk(KERN_INFO "ipmi: message handler initialized\n");
4504+
4505+ return 0;
4506+}
4507+
4508+static __exit void cleanup_ipmi(void)
4509+{
4510+ if (!initialized)
4511+ return;
4512+
4513+ notifier_chain_unregister(&panic_notifier_list, &panic_block);
4514+
4515+ /* This can't be called if any interfaces exist, so no worry about
4516+ shutting down the interfaces. */
4517+
4518+ /* Tell the timer to stop, then wait for it to stop. This avoids
4519+ problems with race conditions removing the timer here. */
4520+ stop_operation = 1;
4521+ while (!timer_stopped) {
4522+ schedule_timeout(1);
4523+ }
4524+
4525+ initialized = 0;
4526+}
4527+module_exit(cleanup_ipmi);
4528+
4529+module_init(ipmi_init_msghandler);
4530+MODULE_LICENSE("GPL");
4531+
4532+EXPORT_SYMBOL_GPL(ipmi_alloc_recv_msg);
4533+EXPORT_SYMBOL_GPL(ipmi_create_user);
4534+EXPORT_SYMBOL_GPL(ipmi_destroy_user);
4535+EXPORT_SYMBOL_GPL(ipmi_get_version);
4536+EXPORT_SYMBOL_GPL(ipmi_request);
4537+EXPORT_SYMBOL_GPL(ipmi_request_supply_msgs);
4538+EXPORT_SYMBOL_GPL(ipmi_request_with_source);
4539+EXPORT_SYMBOL_GPL(ipmi_register_smi);
4540+EXPORT_SYMBOL_GPL(ipmi_unregister_smi);
4541+EXPORT_SYMBOL_GPL(ipmi_register_for_cmd);
4542+EXPORT_SYMBOL_GPL(ipmi_unregister_for_cmd);
4543+EXPORT_SYMBOL_GPL(ipmi_smi_msg_received);
4544+EXPORT_SYMBOL_GPL(ipmi_smi_watchdog_pretimeout);
4545+EXPORT_SYMBOL_GPL(ipmi_alloc_smi_msg);
4546+EXPORT_SYMBOL_GPL(ipmi_register_all_cmd_rcvr);
4547+EXPORT_SYMBOL_GPL(ipmi_unregister_all_cmd_rcvr);
4548+EXPORT_SYMBOL_GPL(ipmi_addr_length);
4549+EXPORT_SYMBOL_GPL(ipmi_validate_addr);
4550+EXPORT_SYMBOL_GPL(ipmi_set_gets_events);
4551+EXPORT_SYMBOL_GPL(ipmi_addr_equal);
4552+EXPORT_SYMBOL_GPL(ipmi_smi_watcher_register);
4553+EXPORT_SYMBOL_GPL(ipmi_smi_watcher_unregister);
4554+EXPORT_SYMBOL_GPL(ipmi_set_my_address);
4555+EXPORT_SYMBOL_GPL(ipmi_get_my_address);
4556+EXPORT_SYMBOL_GPL(ipmi_set_my_LUN);
4557+EXPORT_SYMBOL_GPL(ipmi_get_my_LUN);
4558diff -urNp linux-5010/drivers/char/ipmi/ipmi_watchdog.c linux-5020/drivers/char/ipmi/ipmi_watchdog.c
4559--- linux-5010/drivers/char/ipmi/ipmi_watchdog.c 1970-01-01 01:00:00.000000000 +0100
4560+++ linux-5020/drivers/char/ipmi/ipmi_watchdog.c
4561@@ -0,0 +1,1113 @@
4562+/*
4563+ * ipmi_watchdog.c
4564+ *
4565+ * A watchdog timer based upon the IPMI interface.
4566+ *
4567+ * Author: MontaVista Software, Inc.
4568+ * Corey Minyard <minyard@mvista.com>
4569+ * source@mvista.com
4570+ *
4571+ * Copyright 2002 MontaVista Software Inc.
4572+ *
4573+ * This program is free software; you can redistribute it and/or modify it
4574+ * under the terms of the GNU General Public License as published by the
4575+ * Free Software Foundation; either version 2 of the License, or (at your
4576+ * option) any later version.
4577+ *
4578+ *
4579+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
4580+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
4581+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
4582+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
4583+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
4584+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
4585+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
4586+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
4587+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
4588+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4589+ *
4590+ * You should have received a copy of the GNU General Public License along
4591+ * with this program; if not, write to the Free Software Foundation, Inc.,
4592+ * 675 Mass Ave, Cambridge, MA 02139, USA.
4593+ */
4594+
4595+#include <linux/config.h>
4596+#include <linux/module.h>
4597+#include <linux/ipmi.h>
4598+#include <linux/ipmi_smi.h>
4599+#include <linux/watchdog.h>
4600+#include <linux/miscdevice.h>
4601+#include <linux/init.h>
4602+#include <linux/rwsem.h>
4603+#include <linux/errno.h>
4604+#include <asm/uaccess.h>
4605+#include <linux/notifier.h>
4606+#include <linux/nmi.h>
4607+#include <linux/reboot.h>
4608+#include <linux/wait.h>
4609+#include <linux/poll.h>
4610+#ifdef CONFIG_X86_LOCAL_APIC
4611+#include <asm/apic.h>
4612+#endif
4613+
4614+/*
4615+ * The IPMI command/response information for the watchdog timer.
4616+ */
4617+
4618+/* values for byte 1 of the set command, byte 2 of the get response. */
4619+#define WDOG_DONT_LOG (1 << 7)
4620+#define WDOG_DONT_STOP_ON_SET (1 << 6)
4621+#define WDOG_SET_TIMER_USE(byte, use) \
4622+ byte = ((byte) & 0xf8) | ((use) & 0x7)
4623+#define WDOG_GET_TIMER_USE(byte) ((byte) & 0x7)
4624+#define WDOG_TIMER_USE_BIOS_FRB2 1
4625+#define WDOG_TIMER_USE_BIOS_POST 2
4626+#define WDOG_TIMER_USE_OS_LOAD 3
4627+#define WDOG_TIMER_USE_SMS_OS 4
4628+#define WDOG_TIMER_USE_OEM 5
4629+
4630+/* values for byte 2 of the set command, byte 3 of the get response. */
4631+#define WDOG_SET_PRETIMEOUT_ACT(byte, use) \
4632+ byte = ((byte) & 0x8f) | (((use) & 0x7) << 4)
4633+#define WDOG_GET_PRETIMEOUT_ACT(byte) (((byte) >> 4) & 0x7)
4634+#define WDOG_PRETIMEOUT_NONE 0
4635+#define WDOG_PRETIMEOUT_SMI 1
4636+#define WDOG_PRETIMEOUT_NMI 2
4637+#define WDOG_PRETIMEOUT_MSG_INT 3
4638+
4639+/* Operations that can be performed on a pretimout. */
4640+#define WDOG_PREOP_NONE 0
4641+#define WDOG_PREOP_PANIC 1
4642+#define WDOG_PREOP_GIVE_DATA 2 /* Cause data to be available to
4643+ read. Doesn't work in NMI
4644+ mode. */
4645+
4646+/* Actions to perform on a full timeout. */
4647+#define WDOG_SET_TIMEOUT_ACT(byte, use) \
4648+ byte = ((byte) & 0xf8) | ((use) & 0x7)
4649+#define WDOG_GET_TIMEOUT_ACT(byte) ((byte) & 0x7)
4650+#define WDOG_TIMEOUT_NONE 0
4651+#define WDOG_TIMEOUT_RESET 1
4652+#define WDOG_TIMEOUT_POWER_DOWN 2
4653+#define WDOG_TIMEOUT_POWER_CYCLE 3
4654+
4655+/* Byte 3 of the get command, byte 4 of the get response is the
4656+ pre-timeout in seconds. */
4657+
4658+/* Bits for setting byte 4 of the set command, byte 5 of the get response. */
4659+#define WDOG_EXPIRE_CLEAR_BIOS_FRB2 (1 << 1)
4660+#define WDOG_EXPIRE_CLEAR_BIOS_POST (1 << 2)
4661+#define WDOG_EXPIRE_CLEAR_OS_LOAD (1 << 3)
4662+#define WDOG_EXPIRE_CLEAR_SMS_OS (1 << 4)
4663+#define WDOG_EXPIRE_CLEAR_OEM (1 << 5)
4664+
4665+/* Setting/getting the watchdog timer value. This is for bytes 5 and
4666+ 6 (the timeout time) of the set command, and bytes 6 and 7 (the
4667+ timeout time) and 8 and 9 (the current countdown value) of the
4668+ response. The timeout value is given in seconds (in the command it
4669+ is 100ms intervals). */
4670+#define WDOG_SET_TIMEOUT(byte1, byte2, val) \
4671+ (byte1) = (((val) * 10) & 0xff), (byte2) = (((val) * 10) >> 8)
4672+#define WDOG_GET_TIMEOUT(byte1, byte2) \
4673+ (((byte1) | ((byte2) << 8)) / 10)
4674+
4675+#define IPMI_WDOG_RESET_TIMER 0x22
4676+#define IPMI_WDOG_SET_TIMER 0x24
4677+#define IPMI_WDOG_GET_TIMER 0x25
4678+
4679+/* These are here until the real ones get into the watchdog.h interface. */
4680+#ifndef WDIOC_GETTIMEOUT
4681+#define WDIOC_GETTIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 20, int)
4682+#endif
4683+#ifndef WDIOC_SET_PRETIMEOUT
4684+#define WDIOC_SET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 21, int)
4685+#endif
4686+#ifndef WDIOC_GET_PRETIMEOUT
4687+#define WDIOC_GET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 22, int)
4688+#endif
4689+
4690+static ipmi_user_t watchdog_user = NULL;
4691+
4692+/* Default the timeout to 10 seconds. */
4693+static int timeout = 10;
4694+
4695+/* The pre-timeout is disabled by default. */
4696+static int pretimeout = 0;
4697+
4698+/* Default action is to reset the board on a timeout. */
4699+static unsigned char action_val = WDOG_TIMEOUT_RESET;
4700+
4701+static char *action = "reset";
4702+
4703+static unsigned char preaction_val = WDOG_PRETIMEOUT_NONE;
4704+
4705+static char *preaction = "pre_none";
4706+
4707+static unsigned char preop_val = WDOG_PREOP_NONE;
4708+
4709+static char *preop = "preop_none";
4710+static spinlock_t ipmi_read_lock = SPIN_LOCK_UNLOCKED;
4711+static char data_to_read = 0;
4712+static DECLARE_WAIT_QUEUE_HEAD(read_q);
4713+static struct fasync_struct *fasync_q = NULL;
4714+static char pretimeout_since_last_heartbeat = 0;
4715+
4716+MODULE_PARM(timeout, "i");
4717+MODULE_PARM(pretimeout, "i");
4718+MODULE_PARM(action, "s");
4719+MODULE_PARM(preaction, "s");
4720+MODULE_PARM(preop, "s");
4721+
4722+/* Default state of the timer. */
4723+static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
4724+
4725+/* If shutting down via IPMI, we ignore the heartbeat. */
4726+static int ipmi_ignore_heartbeat = 0;
4727+
4728+/* Is someone using the watchdog? Only one user is allowed. */
4729+static int ipmi_wdog_open = 0;
4730+
4731+/* If true, the driver will start running as soon as it is configured
4732+ and ready. */
4733+static int start_now = 0;
4734+
4735+/* If set to 1, the heartbeat command will set the state to reset and
4736+ start the timer. The timer doesn't normally run when the driver is
4737+ first opened until the heartbeat is set the first time, this
4738+ variable is used to accomplish this. */
4739+static int ipmi_start_timer_on_heartbeat = 0;
4740+
4741+/* IPMI version of the BMC. */
4742+static unsigned char ipmi_version_major;
4743+static unsigned char ipmi_version_minor;
4744+
4745+
4746+static int ipmi_heartbeat(void);
4747+static void panic_halt_ipmi_heartbeat(void);
4748+
4749+
4750+/* We use a semaphore to make sure that only one thing can send a set
4751+ timeout at one time, because we only have one copy of the data.
4752+ The semaphore is claimed when the set_timeout is sent and freed
4753+ when both messages are free. */
4754+static atomic_t set_timeout_tofree = ATOMIC_INIT(0);
4755+static DECLARE_MUTEX(set_timeout_lock);
4756+static void set_timeout_free_smi(struct ipmi_smi_msg *msg)
4757+{
4758+ if (atomic_dec_and_test(&set_timeout_tofree))
4759+ up(&set_timeout_lock);
4760+}
4761+static void set_timeout_free_recv(struct ipmi_recv_msg *msg)
4762+{
4763+ if (atomic_dec_and_test(&set_timeout_tofree))
4764+ up(&set_timeout_lock);
4765+}
4766+static struct ipmi_smi_msg set_timeout_smi_msg =
4767+{
4768+ .done = set_timeout_free_smi
4769+};
4770+static struct ipmi_recv_msg set_timeout_recv_msg =
4771+{
4772+ .done = set_timeout_free_recv
4773+};
4774+
4775+static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
4776+ struct ipmi_recv_msg *recv_msg,
4777+ int *send_heartbeat_now)
4778+{
4779+ struct ipmi_msg msg;
4780+ unsigned char data[6];
4781+ int rv;
4782+ struct ipmi_system_interface_addr addr;
4783+
4784+
4785+ *send_heartbeat_now = 0;
4786+ data[0] = 0;
4787+ WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS);
4788+
4789+ if ((ipmi_version_major > 1)
4790+ || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5)))
4791+ {
4792+ /* This is an IPMI 1.5-only feature. */
4793+ data[0] |= WDOG_DONT_STOP_ON_SET;
4794+ } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
4795+ /* In ipmi 1.0, setting the timer stops the watchdog, we
4796+ need to start it back up again. */
4797+ *send_heartbeat_now = 1;
4798+ }
4799+
4800+ data[1] = 0;
4801+ WDOG_SET_TIMEOUT_ACT(data[1], ipmi_watchdog_state);
4802+ if (pretimeout > 0) {
4803+ WDOG_SET_PRETIMEOUT_ACT(data[1], preaction_val);
4804+ data[2] = pretimeout;
4805+ } else {
4806+ WDOG_SET_PRETIMEOUT_ACT(data[1], WDOG_PRETIMEOUT_NONE);
4807+ data[2] = 0; /* No pretimeout. */
4808+ }
4809+ data[3] = 0;
4810+ WDOG_SET_TIMEOUT(data[4], data[5], timeout);
4811+
4812+ addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4813+ addr.channel = IPMI_BMC_CHANNEL;
4814+ addr.lun = 0;
4815+
4816+ msg.netfn = 0x06;
4817+ msg.cmd = IPMI_WDOG_SET_TIMER;
4818+ msg.data = data;
4819+ msg.data_len = sizeof(data);
4820+ rv = ipmi_request_supply_msgs(watchdog_user,
4821+ (struct ipmi_addr *) &addr,
4822+ 0,
4823+ &msg,
4824+ smi_msg,
4825+ recv_msg,
4826+ 1);
4827+ if (rv) {
4828+ printk(KERN_WARNING "IPMI Watchdog, set timeout error: %d\n",
4829+ rv);
4830+ }
4831+
4832+ return rv;
4833+}
4834+
4835+static int ipmi_set_timeout(void)
4836+{
4837+ int send_heartbeat_now;
4838+ int rv;
4839+
4840+
4841+ /* We can only send one of these at a time. */
4842+ down(&set_timeout_lock);
4843+
4844+ atomic_set(&set_timeout_tofree, 2);
4845+
4846+ rv = i_ipmi_set_timeout(&set_timeout_smi_msg,
4847+ &set_timeout_recv_msg,
4848+ &send_heartbeat_now);
4849+ if (rv) {
4850+ up(&set_timeout_lock);
4851+ } else {
4852+ if (send_heartbeat_now)
4853+ rv = ipmi_heartbeat();
4854+ }
4855+
4856+ return rv;
4857+}
4858+
4859+static void dummy_smi_free(struct ipmi_smi_msg *msg)
4860+{
4861+}
4862+static void dummy_recv_free(struct ipmi_recv_msg *msg)
4863+{
4864+}
4865+static struct ipmi_smi_msg panic_halt_smi_msg =
4866+{
4867+ .done = dummy_smi_free
4868+};
4869+static struct ipmi_recv_msg panic_halt_recv_msg =
4870+{
4871+ .done = dummy_recv_free
4872+};
4873+
4874+/* Special call, doesn't claim any locks. This is only to be called
4875+ at panic or halt time, in run-to-completion mode, when the caller
4876+ is the only CPU and the only thing that will be going IPMI
4877+ calls. */
4878+static void panic_halt_ipmi_set_timeout(void)
4879+{
4880+ int send_heartbeat_now;
4881+ int rv;
4882+
4883+ rv = i_ipmi_set_timeout(&panic_halt_smi_msg,
4884+ &panic_halt_recv_msg,
4885+ &send_heartbeat_now);
4886+ if (!rv) {
4887+ if (send_heartbeat_now)
4888+ panic_halt_ipmi_heartbeat();
4889+ }
4890+}
4891+
4892+/* Do a delayed shutdown, with the delay in milliseconds. If power_off is
4893+ false, do a reset. If power_off is true, do a power down. This is
4894+ primarily for the IMB code's shutdown. */
4895+void ipmi_delayed_shutdown(long delay, int power_off)
4896+{
4897+ ipmi_ignore_heartbeat = 1;
4898+ if (power_off)
4899+ ipmi_watchdog_state = WDOG_TIMEOUT_POWER_DOWN;
4900+ else
4901+ ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
4902+ timeout = delay;
4903+ ipmi_set_timeout();
4904+}
4905+
4906+/* We use a semaphore to make sure that only one thing can send a
4907+ heartbeat at one time, because we only have one copy of the data.
4908+ The semaphore is claimed when the set_timeout is sent and freed
4909+ when both messages are free. */
4910+static atomic_t heartbeat_tofree = ATOMIC_INIT(0);
4911+static DECLARE_MUTEX(heartbeat_lock);
4912+static DECLARE_MUTEX_LOCKED(heartbeat_wait_lock);
4913+static void heartbeat_free_smi(struct ipmi_smi_msg *msg)
4914+{
4915+ if (atomic_dec_and_test(&heartbeat_tofree))
4916+ up(&heartbeat_wait_lock);
4917+}
4918+static void heartbeat_free_recv(struct ipmi_recv_msg *msg)
4919+{
4920+ if (atomic_dec_and_test(&heartbeat_tofree))
4921+ up(&heartbeat_wait_lock);
4922+}
4923+static struct ipmi_smi_msg heartbeat_smi_msg =
4924+{
4925+ .done = heartbeat_free_smi
4926+};
4927+static struct ipmi_recv_msg heartbeat_recv_msg =
4928+{
4929+ .done = heartbeat_free_recv
4930+};
4931+
4932+static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg =
4933+{
4934+ .done = dummy_smi_free
4935+};
4936+static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg =
4937+{
4938+ .done = dummy_recv_free
4939+};
4940+
4941+static int ipmi_heartbeat(void)
4942+{
4943+ struct ipmi_msg msg;
4944+ int rv;
4945+ struct ipmi_system_interface_addr addr;
4946+
4947+ if (ipmi_ignore_heartbeat) {
4948+ return 0;
4949+ }
4950+
4951+ if (ipmi_start_timer_on_heartbeat) {
4952+ ipmi_start_timer_on_heartbeat = 0;
4953+ ipmi_watchdog_state = action_val;
4954+ return ipmi_set_timeout();
4955+ }
4956+
4957+ if (pretimeout_since_last_heartbeat) {
4958+ /* A pretimeout occurred, make sure we set the timeout.
4959+ We don't want to set the action, though, we want to
4960+ leave that alone (thus it can't be combined with the
4961+ above operation. */
4962+ pretimeout_since_last_heartbeat = 0;
4963+ return ipmi_set_timeout();
4964+ }
4965+
4966+ down(&heartbeat_lock);
4967+
4968+ atomic_set(&heartbeat_tofree, 2);
4969+
4970+ /* Don't reset the timer if we have the timer turned off, that
4971+ re-enables the watchdog. */
4972+ if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) {
4973+ up(&heartbeat_lock);
4974+ return 0;
4975+ }
4976+
4977+ addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4978+ addr.channel = IPMI_BMC_CHANNEL;
4979+ addr.lun = 0;
4980+
4981+ msg.netfn = 0x06;
4982+ msg.cmd = IPMI_WDOG_RESET_TIMER;
4983+ msg.data = NULL;
4984+ msg.data_len = 0;
4985+ rv = ipmi_request_supply_msgs(watchdog_user,
4986+ (struct ipmi_addr *) &addr,
4987+ 0,
4988+ &msg,
4989+ &heartbeat_smi_msg,
4990+ &heartbeat_recv_msg,
4991+ 1);
4992+ if (rv) {
4993+ up(&heartbeat_lock);
4994+ printk(KERN_WARNING "IPMI Watchdog, heartbeat failure: %d\n",
4995+ rv);
4996+ return rv;
4997+ }
4998+
4999+ /* Wait for the heartbeat to be sent. */
5000+ down(&heartbeat_wait_lock);
5001+
5002+ if (heartbeat_recv_msg.msg.data[0] != 0) {
5003+ /* Got an error in the heartbeat response. It was already
5004+ reported in ipmi_wdog_msg_handler, but we should return
5005+ an error here. */
5006+ rv = -EINVAL;
5007+ }
5008+
5009+ up(&heartbeat_lock);
5010+
5011+ return rv;
5012+}
5013+
5014+static void panic_halt_ipmi_heartbeat(void)
5015+{
5016+ struct ipmi_msg msg;
5017+ struct ipmi_system_interface_addr addr;
5018+
5019+
5020+ /* Don't reset the timer if we have the timer turned off, that
5021+ re-enables the watchdog. */
5022+ if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
5023+ return;
5024+
5025+ addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
5026+ addr.channel = IPMI_BMC_CHANNEL;
5027+ addr.lun = 0;
5028+
5029+ msg.netfn = 0x06;
5030+ msg.cmd = IPMI_WDOG_RESET_TIMER;
5031+ msg.data = NULL;
5032+ msg.data_len = 0;
5033+ ipmi_request_supply_msgs(watchdog_user,
5034+ (struct ipmi_addr *) &addr,
5035+ 0,
5036+ &msg,
5037+ &panic_halt_heartbeat_smi_msg,
5038+ &panic_halt_heartbeat_recv_msg,
5039+ 1);
5040+}
5041+
5042+static struct watchdog_info ident=
5043+{
5044+ 0, /* WDIOF_SETTIMEOUT, */
5045+ 1,
5046+ "IPMI"
5047+};
5048+
5049+static int ipmi_ioctl(struct inode *inode, struct file *file,
5050+ unsigned int cmd, unsigned long arg)
5051+{
5052+ int i;
5053+ int val;
5054+
5055+ switch(cmd) {
5056+ case WDIOC_GETSUPPORT:
5057+ i = copy_to_user((void*)arg, &ident, sizeof(ident));
5058+ return i ? -EFAULT : 0;
5059+
5060+ case WDIOC_SETTIMEOUT:
5061+ i = copy_from_user(&val, (void *) arg, sizeof(int));
5062+ if (i)
5063+ return -EFAULT;
5064+ timeout = val;
5065+ return ipmi_set_timeout();
5066+
5067+ case WDIOC_GETTIMEOUT:
5068+ i = copy_to_user((void *) arg,
5069+ &timeout,
5070+ sizeof(timeout));
5071+ if (i)
5072+ return -EFAULT;
5073+ return 0;
5074+
5075+ case WDIOC_SET_PRETIMEOUT:
5076+ i = copy_from_user(&val, (void *) arg, sizeof(int));
5077+ if (i)
5078+ return -EFAULT;
5079+ pretimeout = val;
5080+ return ipmi_set_timeout();
5081+
5082+ case WDIOC_GET_PRETIMEOUT:
5083+ i = copy_to_user((void *) arg,
5084+ &pretimeout,
5085+ sizeof(pretimeout));
5086+ if (i)
5087+ return -EFAULT;
5088+ return 0;
5089+
5090+ case WDIOC_KEEPALIVE:
5091+ return ipmi_heartbeat();
5092+
5093+ case WDIOC_SETOPTIONS:
5094+ i = copy_from_user(&val, (void *) arg, sizeof(int));
5095+ if (i)
5096+ return -EFAULT;
5097+ if (val & WDIOS_DISABLECARD)
5098+ {
5099+ ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
5100+ ipmi_set_timeout();
5101+ ipmi_start_timer_on_heartbeat = 0;
5102+ }
5103+
5104+ if (val & WDIOS_ENABLECARD)
5105+ {
5106+ ipmi_watchdog_state = action_val;
5107+ ipmi_set_timeout();
5108+ }
5109+ return 0;
5110+
5111+ case WDIOC_GETSTATUS:
5112+ val = 0;
5113+ return copy_to_user((void *) arg, &val, sizeof(val));
5114+
5115+ default:
5116+ return -ENOIOCTLCMD;
5117+ }
5118+}
5119+
5120+static ssize_t ipmi_write(struct file *file,
5121+ const char *buf,
5122+ size_t len,
5123+ loff_t *ppos)
5124+{
5125+ int rv;
5126+
5127+ /* Can't seek (pwrite) on this device */
5128+ if (ppos != &file->f_pos)
5129+ return -ESPIPE;
5130+
5131+ if (len) {
5132+ rv = ipmi_heartbeat();
5133+ if (rv)
5134+ return rv;
5135+ return 1;
5136+ }
5137+ return 0;
5138+}
5139+
5140+static ssize_t ipmi_read(struct file *file,
5141+ char *buf,
5142+ size_t count,
5143+ loff_t *ppos)
5144+{
5145+ int rv = 0;
5146+ wait_queue_t wait;
5147+
5148+ /* Can't seek (pread) on this device */
5149+ if (ppos != &file->f_pos)
5150+ return -ESPIPE;
5151+
5152+ if (count <= 0)
5153+ return 0;
5154+
5155+ /* Reading returns if the pretimeout has gone off, and it only does
5156+ it once per pretimeout. */
5157+ spin_lock(&ipmi_read_lock);
5158+ if (!data_to_read) {
5159+ if (file->f_flags & O_NONBLOCK) {
5160+ rv = -EAGAIN;
5161+ goto out;
5162+ }
5163+
5164+ init_waitqueue_entry(&wait, current);
5165+ add_wait_queue(&read_q, &wait);
5166+ while (!data_to_read) {
5167+ set_current_state(TASK_INTERRUPTIBLE);
5168+ spin_unlock(&ipmi_read_lock);
5169+ schedule();
5170+ spin_lock(&ipmi_read_lock);
5171+ }
5172+ remove_wait_queue(&read_q, &wait);
5173+
5174+ if (signal_pending(current)) {
5175+ rv = -ERESTARTSYS;
5176+ goto out;
5177+ }
5178+ }
5179+ data_to_read = 0;
5180+
5181+ out:
5182+ spin_unlock(&ipmi_read_lock);
5183+
5184+ if (rv == 0) {
5185+ if (copy_to_user(buf, &data_to_read, 1))
5186+ rv = -EFAULT;
5187+ else
5188+ rv = 1;
5189+ }
5190+
5191+ return rv;
5192+}
5193+
5194+static int ipmi_open(struct inode *ino, struct file *filep)
5195+{
5196+ switch (minor(ino->i_rdev))
5197+ {
5198+ case WATCHDOG_MINOR:
5199+ if (ipmi_wdog_open)
5200+ return -EBUSY;
5201+
5202+ ipmi_wdog_open = 1;
5203+
5204+ /* Don't start the timer now, let it start on the
5205+ first heartbeat. */
5206+ ipmi_start_timer_on_heartbeat = 1;
5207+ return(0);
5208+
5209+ default:
5210+ return (-ENODEV);
5211+ }
5212+}
5213+
5214+static unsigned int ipmi_poll(struct file *file, poll_table *wait)
5215+{
5216+ unsigned int mask = 0;
5217+
5218+ poll_wait(file, &read_q, wait);
5219+
5220+ spin_lock(&ipmi_read_lock);
5221+ if (data_to_read)
5222+ mask |= (POLLIN | POLLRDNORM);
5223+ spin_unlock(&ipmi_read_lock);
5224+
5225+ return mask;
5226+}
5227+
5228+static int ipmi_fasync(int fd, struct file *file, int on)
5229+{
5230+ int result;
5231+
5232+ result = fasync_helper(fd, file, on, &fasync_q);
5233+
5234+ return (result);
5235+}
5236+
5237+static int ipmi_close(struct inode *ino, struct file *filep)
5238+{
5239+ if (minor(ino->i_rdev)==WATCHDOG_MINOR)
5240+ {
5241+#ifndef CONFIG_WATCHDOG_NOWAYOUT
5242+ ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
5243+ ipmi_set_timeout();
5244+#endif
5245+ ipmi_wdog_open = 0;
5246+ }
5247+
5248+ ipmi_fasync (-1, filep, 0);
5249+
5250+ return 0;
5251+}
5252+
5253+static struct file_operations ipmi_wdog_fops = {
5254+ .owner = THIS_MODULE,
5255+ .read = ipmi_read,
5256+ .poll = ipmi_poll,
5257+ .write = ipmi_write,
5258+ .ioctl = ipmi_ioctl,
5259+ .open = ipmi_open,
5260+ .release = ipmi_close,
5261+ .fasync = ipmi_fasync,
5262+};
5263+
5264+static struct miscdevice ipmi_wdog_miscdev = {
5265+ WATCHDOG_MINOR,
5266+ "watchdog",
5267+ &ipmi_wdog_fops
5268+};
5269+
5270+static DECLARE_RWSEM(register_sem);
5271+
5272+static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
5273+ void *handler_data)
5274+{
5275+ if (msg->msg.data[0] != 0) {
5276+ printk(KERN_ERR "IPMI Watchdog response: Error %x on cmd %x\n",
5277+ msg->msg.data[0],
5278+ msg->msg.cmd);
5279+ }
5280+
5281+ ipmi_free_recv_msg(msg);
5282+}
5283+
5284+static void ipmi_wdog_pretimeout_handler(void *handler_data)
5285+{
5286+ if (preaction_val != WDOG_PRETIMEOUT_NONE) {
5287+ if (preop_val == WDOG_PREOP_PANIC)
5288+ panic("Watchdog pre-timeout");
5289+ else if (preop_val == WDOG_PREOP_GIVE_DATA) {
5290+ spin_lock(&ipmi_read_lock);
5291+ data_to_read = 1;
5292+ wake_up_interruptible(&read_q);
5293+ kill_fasync(&fasync_q, SIGIO, POLL_IN);
5294+
5295+ /* On some machines, the heartbeat will give
5296+ an error and not work unless we re-enable
5297+ the timer. So do so. */
5298+ pretimeout_since_last_heartbeat = 1;
5299+
5300+ spin_unlock(&ipmi_read_lock);
5301+ }
5302+ }
5303+}
5304+
5305+static struct ipmi_user_hndl ipmi_hndlrs =
5306+{
5307+ .ipmi_recv_hndl = ipmi_wdog_msg_handler,
5308+ .ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler
5309+};
5310+
5311+static void ipmi_register_watchdog(int ipmi_intf)
5312+{
5313+ int rv = -EBUSY;
5314+
5315+ down_read(&register_sem);
5316+ if (watchdog_user)
5317+ goto out;
5318+
5319+ rv = ipmi_create_user(ipmi_intf, &ipmi_hndlrs, NULL, &watchdog_user);
5320+ if (rv < 0) {
5321+ printk("IPMI watchdog: Unable to register with ipmi\n");
5322+ goto out;
5323+ }
5324+
5325+ ipmi_get_version(watchdog_user,
5326+ &ipmi_version_major,
5327+ &ipmi_version_minor);
5328+
5329+ rv = misc_register(&ipmi_wdog_miscdev);
5330+ if (rv < 0) {
5331+ ipmi_destroy_user(watchdog_user);
5332+ watchdog_user = NULL;
5333+ printk("IPMI watchdog: Unable to register misc device\n");
5334+ }
5335+
5336+ out:
5337+ up_write(&register_sem);
5338+
5339+ if ((start_now) && (rv == 0)) {
5340+ /* Run from startup, so start the timer now. */
5341+ start_now = 0; /* Disable this function after first startup. */
5342+ ipmi_watchdog_state = action_val;
5343+ ipmi_set_timeout();
5344+ printk("Starting IPMI Watchdog now!\n");
5345+ }
5346+}
5347+
5348+#ifdef HAVE_NMI_HANDLER
5349+static int
5350+ipmi_nmi(void *dev_id, struct pt_regs *regs, int cpu, int handled)
5351+{
5352+ /* If no one else handled the NMI, we assume it was the IPMI
5353+ watchdog. */
5354+ if ((!handled) && (preop_val == WDOG_PREOP_PANIC))
5355+ panic("IPMI watchdog pre-timeout");
5356+ return NOTIFY_DONE;
5357+}
5358+
5359+static struct nmi_handler ipmi_nmi_handler =
5360+{
5361+ .dev_name = "ipmi_watchdog",
5362+ .dev_id = NULL,
5363+ .handler = ipmi_nmi,
5364+ .priority = 0, /* Call us last. */
5365+};
5366+#endif
5367+
5368+static int wdog_reboot_handler(struct notifier_block *this,
5369+ unsigned long code,
5370+ void *unused)
5371+{
5372+ static int reboot_event_handled = 0;
5373+
5374+ if ((watchdog_user) && (!reboot_event_handled)) {
5375+ /* Make sure we only do this once. */
5376+ reboot_event_handled = 1;
5377+
5378+ if (code == SYS_DOWN || code == SYS_HALT) {
5379+ /* Disable the WDT if we are shutting down. */
5380+ ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
5381+ panic_halt_ipmi_set_timeout();
5382+ } else {
5383+ /* Set a long timer to let the reboot happens, but
5384+ reboot if it hangs. */
5385+ timeout = 120;
5386+ pretimeout = 0;
5387+ ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
5388+ panic_halt_ipmi_set_timeout();
5389+ }
5390+ }
5391+ return NOTIFY_OK;
5392+}
5393+
5394+static struct notifier_block wdog_reboot_notifier = {
5395+ wdog_reboot_handler,
5396+ NULL,
5397+ 0
5398+};
5399+
5400+extern int panic_timeout; /* Why isn't this defined anywhere? */
5401+
5402+static int wdog_panic_handler(struct notifier_block *this,
5403+ unsigned long event,
5404+ void *unused)
5405+{
5406+ static int panic_event_handled = 0;
5407+
5408+ /* On a panic, if we have a panic timeout, make sure that the thing
5409+ reboots, even if it hangs during that panic. */
5410+ if (watchdog_user && !panic_event_handled && (panic_timeout > 0)) {
5411+ /* Make sure the panic doesn't hang, and make sure we
5412+ do this only once. */
5413+ panic_event_handled = 1;
5414+
5415+ timeout = panic_timeout + 120;
5416+ if (timeout > 255)
5417+ timeout = 255;
5418+ pretimeout = 0;
5419+ ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
5420+ panic_halt_ipmi_set_timeout();
5421+ }
5422+
5423+ return NOTIFY_OK;
5424+}
5425+
5426+static struct notifier_block wdog_panic_notifier = {
5427+ wdog_panic_handler,
5428+ NULL,
5429+ 150 /* priority: INT_MAX >= x >= 0 */
5430+};
5431+
5432+
5433+static void ipmi_new_smi(int if_num)
5434+{
5435+ ipmi_register_watchdog(if_num);
5436+}
5437+
5438+static void ipmi_smi_gone(int if_num)
5439+{
5440+ /* This can never be called, because once the watchdog is
5441+ registered, the interface can't go away until the watchdog
5442+ is unregistered. */
5443+}
5444+
5445+static struct ipmi_smi_watcher smi_watcher =
5446+{
5447+ .new_smi = ipmi_new_smi,
5448+ .smi_gone = ipmi_smi_gone
5449+};
5450+
5451+static int __init ipmi_wdog_init(void)
5452+{
5453+ int rv;
5454+
5455+ if (strcmp(action, "reset") == 0) {
5456+ action_val = WDOG_TIMEOUT_RESET;
5457+ } else if (strcmp(action, "none") == 0) {
5458+ action_val = WDOG_TIMEOUT_NONE;
5459+ } else if (strcmp(action, "power_cycle") == 0) {
5460+ action_val = WDOG_TIMEOUT_POWER_CYCLE;
5461+ } else if (strcmp(action, "power_off") == 0) {
5462+ action_val = WDOG_TIMEOUT_POWER_DOWN;
5463+ } else {
5464+ action_val = WDOG_TIMEOUT_RESET;
5465+ printk("ipmi_watchdog: Unknown action '%s', defaulting to"
5466+ " reset\n", action);
5467+ }
5468+
5469+ if (strcmp(preaction, "pre_none") == 0) {
5470+ preaction_val = WDOG_PRETIMEOUT_NONE;
5471+ } else if (strcmp(preaction, "pre_smi") == 0) {
5472+ preaction_val = WDOG_PRETIMEOUT_SMI;
5473+#ifdef HAVE_NMI_HANDLER
5474+ } else if (strcmp(preaction, "pre_nmi") == 0) {
5475+ preaction_val = WDOG_PRETIMEOUT_NMI;
5476+#endif
5477+ } else if (strcmp(preaction, "pre_int") == 0) {
5478+ preaction_val = WDOG_PRETIMEOUT_MSG_INT;
5479+ } else {
5480+ action_val = WDOG_PRETIMEOUT_NONE;
5481+ printk("ipmi_watchdog: Unknown preaction '%s', defaulting to"
5482+ " none\n", preaction);
5483+ }
5484+
5485+ if (strcmp(preop, "preop_none") == 0) {
5486+ preop_val = WDOG_PREOP_NONE;
5487+ } else if (strcmp(preop, "preop_panic") == 0) {
5488+ preop_val = WDOG_PREOP_PANIC;
5489+ } else if (strcmp(preop, "preop_give_data") == 0) {
5490+ preop_val = WDOG_PREOP_GIVE_DATA;
5491+ } else {
5492+ action_val = WDOG_PREOP_NONE;
5493+ printk("ipmi_watchdog: Unknown preop '%s', defaulting to"
5494+ " none\n", preop);
5495+ }
5496+
5497+#ifdef HAVE_NMI_HANDLER
5498+ if (preaction_val == WDOG_PRETIMEOUT_NMI) {
5499+ if (preop_val == WDOG_PREOP_GIVE_DATA) {
5500+ printk(KERN_WARNING
5501+ "ipmi_watchdog: Pretimeout op is to give data"
5502+ " but NMI pretimeout is enabled, setting"
5503+ " pretimeout op to none\n");
5504+ preop_val = WDOG_PREOP_NONE;
5505+ }
5506+#ifdef CONFIG_X86_LOCAL_APIC
5507+ if (nmi_watchdog == NMI_IO_APIC) {
5508+ printk(KERN_WARNING
5509+ "ipmi_watchdog: nmi_watchdog is set to IO APIC"
5510+ " mode (value is %d), that is incompatible"
5511+ " with using NMI in the IPMI watchdog."
5512+ " Disabling IPMI nmi pretimeout.\n",
5513+ nmi_watchdog);
5514+ preaction_val = WDOG_PRETIMEOUT_NONE;
5515+ } else {
5516+#endif
5517+ rv = request_nmi(&ipmi_nmi_handler);
5518+ if (rv) {
5519+ printk(KERN_WARNING
5520+ "ipmi_watchdog: Can't register nmi handler\n");
5521+ return rv;
5522+ }
5523+#ifdef CONFIG_X86_LOCAL_APIC
5524+ }
5525+#endif
5526+ }
5527+#endif
5528+
5529+ rv = ipmi_smi_watcher_register(&smi_watcher);
5530+ if (rv) {
5531+#ifdef HAVE_NMI_HANDLER
5532+ if (preaction_val == WDOG_PRETIMEOUT_NMI)
5533+ release_nmi(&ipmi_nmi_handler);
5534+#endif
5535+ printk(KERN_WARNING
5536+ "ipmi_watchdog: can't register smi watcher\n");
5537+ return rv;
5538+ }
5539+
5540+ register_reboot_notifier(&wdog_reboot_notifier);
5541+ notifier_chain_register(&panic_notifier_list, &wdog_panic_notifier);
5542+
5543+ printk(KERN_INFO "IPMI watchdog by "
5544+ "Corey Minyard (minyard@mvista.com)\n");
5545+
5546+ return 0;
5547+}
5548+
5549+#ifdef MODULE
5550+static void ipmi_unregister_watchdog(void)
5551+{
5552+ int rv;
5553+
5554+ down_write(&register_sem);
5555+
5556+#ifdef HAVE_NMI_HANDLER
5557+ if (preaction_val == WDOG_PRETIMEOUT_NMI)
5558+ release_nmi(&ipmi_nmi_handler);
5559+#endif
5560+
5561+ notifier_chain_unregister(&panic_notifier_list, &wdog_panic_notifier);
5562+ unregister_reboot_notifier(&wdog_reboot_notifier);
5563+
5564+ if (! watchdog_user)
5565+ goto out;
5566+
5567+ /* Make sure no one can call us any more. */
5568+ misc_deregister(&ipmi_wdog_miscdev);
5569+
5570+ /* Disable the timer. */
5571+ ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
5572+ ipmi_set_timeout();
5573+
5574+ /* Wait to make sure the message makes it out. The lower layer has
5575+ pointers to our buffers, we want to make sure they are done before
5576+ we release our memory. */
5577+ while (atomic_read(&set_timeout_tofree)) {
5578+ schedule_timeout(1);
5579+ }
5580+
5581+ /* Disconnect from IPMI. */
5582+ rv = ipmi_destroy_user(watchdog_user);
5583+ if (rv) {
5584+ printk(KERN_WARNING
5585+ "IPMI Watchdog, error unlinking from IPMI: %d\n",
5586+ rv);
5587+ }
5588+ watchdog_user = NULL;
5589+
5590+ out:
5591+ up_write(&register_sem);
5592+}
5593+
5594+static void __exit ipmi_wdog_exit(void)
5595+{
5596+ ipmi_smi_watcher_unregister(&smi_watcher);
5597+ ipmi_unregister_watchdog();
5598+}
5599+module_exit(ipmi_wdog_exit);
5600+#else
5601+static int __init ipmi_wdog_setup(char *str)
5602+{
5603+ int val;
5604+ int rv;
5605+ char *option;
5606+
5607+ rv = get_option(&str, &val);
5608+ if (rv == 0)
5609+ return 1;
5610+ if (val > 0)
5611+ timeout = val;
5612+ if (rv == 1)
5613+ return 1;
5614+
5615+ rv = get_option(&str, &val);
5616+ if (rv == 0)
5617+ return 1;
5618+ if (val >= 0)
5619+ pretimeout = val;
5620+ if (rv == 1)
5621+ return 1;
5622+
5623+ while ((option = strsep(&str, ",")) != NULL) {
5624+ if (strcmp(option, "reset") == 0) {
5625+ action = "reset";
5626+ }
5627+ else if (strcmp(option, "none") == 0) {
5628+ action = "none";
5629+ }
5630+ else if (strcmp(option, "power_cycle") == 0) {
5631+ action = "power_cycle";
5632+ }
5633+ else if (strcmp(option, "power_off") == 0) {
5634+ action = "power_off";
5635+ }
5636+ else if (strcmp(option, "pre_none") == 0) {
5637+ preaction = "pre_none";
5638+ }
5639+ else if (strcmp(option, "pre_smi") == 0) {
5640+ preaction = "pre_smi";
5641+ }
5642+#ifdef HAVE_NMI_HANDLER
5643+ else if (strcmp(option, "pre_nmi") == 0) {
5644+ preaction = "pre_nmi";
5645+ }
5646+#endif
5647+ else if (strcmp(option, "pre_int") == 0) {
5648+ preaction = "pre_int";
5649+ }
5650+ else if (strcmp(option, "start_now") == 0) {
5651+ start_now = 1;
5652+ }
5653+ else if (strcmp(option, "preop_none") == 0) {
5654+ preop = "preop_none";
5655+ }
5656+ else if (strcmp(option, "preop_panic") == 0) {
5657+ preop = "preop_panic";
5658+ }
5659+ else if (strcmp(option, "preop_none") == 0) {
5660+ preop = "preop_give_data";
5661+ } else {
5662+ printk("Unknown IPMI watchdog option: '%s'\n", option);
5663+ }
5664+ }
5665+
5666+ return 1;
5667+}
5668+__setup("ipmi_wdog=", ipmi_wdog_setup);
5669+#endif
5670+
5671+EXPORT_SYMBOL_GPL(ipmi_delayed_shutdown);
5672+
5673+module_init(ipmi_wdog_init);
5674+MODULE_LICENSE("GPL");
5675diff -urNp linux-5010/drivers/char/ipmi/Makefile linux-5020/drivers/char/ipmi/Makefile
5676--- linux-5010/drivers/char/ipmi/Makefile 1970-01-01 01:00:00.000000000 +0100
5677+++ linux-5020/drivers/char/ipmi/Makefile
5678@@ -0,0 +1,20 @@
5679+#
5680+# Makefile for the ipmi drivers.
5681+#
5682+
5683+O_TARGET := ipmi.o
5684+
5685+export-objs := ipmi_msghandler.o ipmi_watchdog.o
5686+
5687+list-multi := ipmi_kcs_drv.o
5688+ipmi_kcs_drv-objs := ipmi_kcs_sm.o ipmi_kcs_intf.o
5689+
5690+obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o
5691+obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o
5692+obj-$(CONFIG_IPMI_KCS) += ipmi_kcs_drv.o
5693+obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
5694+
5695+include $(TOPDIR)/Rules.make
5696+
5697+ipmi_kcs_drv.o: $(ipmi_kcs_drv-objs)
5698+ $(LD) -r -o $@ $(ipmi_kcs_drv-objs)
5699diff -urNp linux-5010/drivers/char/Makefile linux-5020/drivers/char/Makefile
5700--- linux-5010/drivers/char/Makefile
5701+++ linux-5020/drivers/char/Makefile
5702@@ -303,6 +303,11 @@ ifeq ($(CONFIG_MWAVE),y)
5703 obj-y += mwave/mwave.o
5704 endif
5705
5706+subdir-$(CONFIG_IPMI_HANDLER) += ipmi
5707+ifeq ($(CONFIG_IPMI_HANDLER),y)
5708+ obj-y += ipmi/ipmi.o
5709+endif
5710+
5711 include $(TOPDIR)/Rules.make
5712
5713 fastdep:
5714diff -urNp linux-5010/include/linux/ipmi.h linux-5020/include/linux/ipmi.h
5715--- linux-5010/include/linux/ipmi.h 1970-01-01 01:00:00.000000000 +0100
5716+++ linux-5020/include/linux/ipmi.h
5717@@ -0,0 +1,516 @@
5718+/*
5719+ * ipmi.h
5720+ *
5721+ * MontaVista IPMI interface
5722+ *
5723+ * Author: MontaVista Software, Inc.
5724+ * Corey Minyard <minyard@mvista.com>
5725+ * source@mvista.com
5726+ *
5727+ * Copyright 2002 MontaVista Software Inc.
5728+ *
5729+ * This program is free software; you can redistribute it and/or modify it
5730+ * under the terms of the GNU General Public License as published by the
5731+ * Free Software Foundation; either version 2 of the License, or (at your
5732+ * option) any later version.
5733+ *
5734+ *
5735+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
5736+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
5737+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
5738+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
5739+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
5740+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
5741+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
5742+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
5743+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
5744+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5745+ *
5746+ * You should have received a copy of the GNU General Public License along
5747+ * with this program; if not, write to the Free Software Foundation, Inc.,
5748+ * 675 Mass Ave, Cambridge, MA 02139, USA.
5749+ */
5750+
5751+#ifndef __LINUX_IPMI_H
5752+#define __LINUX_IPMI_H
5753+
5754+#include <linux/ipmi_msgdefs.h>
5755+
5756+/*
5757+ * This file describes an interface to an IPMI driver. You have to
5758+ * have a fairly good understanding of IPMI to use this, so go read
5759+ * the specs first before actually trying to do anything.
5760+ *
5761+ * With that said, this driver provides a multi-user interface to the
5762+ * IPMI driver, and it allows multiple IPMI physical interfaces below
5763+ * the driver. The physical interfaces bind as a lower layer on the
5764+ * driver. They appear as interfaces to the application using this
5765+ * interface.
5766+ *
5767+ * Multi-user means that multiple applications may use the driver,
5768+ * send commands, receive responses, etc. The driver keeps track of
5769+ * commands the user sends and tracks the responses. The responses
5770+ * will go back to the application that send the command. If the
5771+ * response doesn't come back in time, the driver will return a
5772+ * timeout error response to the application. Asynchronous events
5773+ * from the BMC event queue will go to all users bound to the driver.
5774+ * The incoming event queue in the BMC will automatically be flushed
5775+ * if it becomes full and it is queried once a second to see if
5776+ * anything is in it. Incoming commands to the driver will get
5777+ * delivered as commands.
5778+ *
5779+ * This driver provides two main interfaces: one for in-kernel
5780+ * applications and another for userland applications. The
5781+ * capabilities are basically the same for both interface, although
5782+ * the interfaces are somewhat different. The stuff in the
5783+ * #ifdef KERNEL below is the in-kernel interface. The userland
5784+ * interface is defined later in the file. */
5785+
5786+
5787+
5788+/*
5789+ * This is an overlay for all the address types, so it's easy to
5790+ * determine the actual address type. This is kind of like addresses
5791+ * work for sockets.
5792+ */
5793+#define IPMI_MAX_ADDR_SIZE 32
5794+struct ipmi_addr
5795+{
5796+ /* Try to take these from the "Channel Medium Type" table
5797+ in section 6.5 of the IPMI 1.5 manual. */
5798+ int addr_type;
5799+ short channel;
5800+ char data[IPMI_MAX_ADDR_SIZE];
5801+};
5802+
5803+/*
5804+ * When the address is not used, the type will be set to this value.
5805+ * The channel is the BMC's channel number for the channel (usually
5806+ * 0), or IPMC_BMC_CHANNEL if communicating directly with the BMC.
5807+ */
5808+#define IPMI_SYSTEM_INTERFACE_ADDR_TYPE 0x0c
5809+struct ipmi_system_interface_addr
5810+{
5811+ int addr_type;
5812+ short channel;
5813+ unsigned char lun;
5814+};
5815+
5816+/* An IPMB Address. */
5817+#define IPMI_IPMB_ADDR_TYPE 0x01
5818+/* Used for broadcast get device id as described in section 17.9 of the
5819+ IPMI 1.5 manual. */
5820+#define IPMI_IPMB_BROADCAST_ADDR_TYPE 0x41
5821+struct ipmi_ipmb_addr
5822+{
5823+ int addr_type;
5824+ short channel;
5825+ unsigned char slave_addr;
5826+ unsigned char lun;
5827+};
5828+
5829+
5830+/*
5831+ * Channel for talking directly with the BMC. When using this
5832+ * channel, This is for the system interface address type only. FIXME
5833+ * - is this right, or should we use -1?
5834+ */
5835+#define IPMI_BMC_CHANNEL 0xf
5836+#define IPMI_NUM_CHANNELS 0x10
5837+
5838+
5839+/*
5840+ * A raw IPMI message without any addressing. This covers both
5841+ * commands and responses. The completion code is always the first
5842+ * byte of data in the response (as the spec shows the messages laid
5843+ * out).
5844+ */
5845+struct ipmi_msg
5846+{
5847+ unsigned char netfn;
5848+ unsigned char cmd;
5849+ unsigned short data_len;
5850+ unsigned char *data;
5851+};
5852+
5853+/*
5854+ * Various defines that are useful for IPMI applications.
5855+ */
5856+#define IPMI_INVALID_CMD_COMPLETION_CODE 0xC1
5857+#define IPMI_TIMEOUT_COMPLETION_CODE 0xC3
5858+#define IPMI_UNKNOWN_ERR_COMPLETION_CODE 0xff
5859+
5860+
5861+/*
5862+ * Receive types for messages coming from the receive interface. This
5863+ * is used for the receive in-kernel interface and in the receive
5864+ * IOCTL.
5865+ */
5866+#define IPMI_RESPONSE_RECV_TYPE 1 /* A response to a command */
5867+#define IPMI_ASYNC_EVENT_RECV_TYPE 2 /* Something from the event queue */
5868+#define IPMI_CMD_RECV_TYPE 3 /* A command from somewhere else */
5869+/* Note that async events and received commands do not have a completion
5870+ code as the first byte of the incoming data, unlike a response. */
5871+
5872+
5873+
5874+#ifdef __KERNEL__
5875+
5876+/*
5877+ * The in-kernel interface.
5878+ */
5879+#include <linux/list.h>
5880+
5881+/* Opaque type for a IPMI message user. One of these is needed to
5882+ send and receive messages. */
5883+typedef struct ipmi_user *ipmi_user_t;
5884+
5885+/*
5886+ * Stuff coming from the recieve interface comes as one of these.
5887+ * They are allocated, the receiver must free them with
5888+ * ipmi_free_recv_msg() when done with the message. The link is not
5889+ * used after the message is delivered, so the upper layer may use the
5890+ * link to build a linked list, if it likes.
5891+ */
5892+struct ipmi_recv_msg
5893+{
5894+ struct list_head link;
5895+
5896+ /* The type of message as defined in the "Receive Types"
5897+ defines above. */
5898+ int recv_type;
5899+
5900+ ipmi_user_t user;
5901+ struct ipmi_addr addr;
5902+ long msgid;
5903+ struct ipmi_msg msg;
5904+
5905+ /* Call this when done with the message. It will presumably free
5906+ the message and do any other necessary cleanup. */
5907+ void (*done)(struct ipmi_recv_msg *msg);
5908+
5909+ /* Place-holder for the data, don't make any assumptions about
5910+ the size or existance of this, since it may change. */
5911+ unsigned char msg_data[IPMI_MAX_MSG_LENGTH];
5912+};
5913+
5914+/* Allocate and free the receive message. */
5915+static inline void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
5916+{
5917+ msg->done(msg);
5918+}
5919+struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
5920+
5921+struct ipmi_user_hndl
5922+{
5923+ /* Routine type to call when a message needs to be routed to
5924+ the upper layer. This will be called with some locks held,
5925+ the only IPMI routines that can be called are ipmi_request
5926+ and the alloc/free operations. */
5927+ void (*ipmi_recv_hndl)(struct ipmi_recv_msg *msg,
5928+ void *handler_data);
5929+
5930+ /* Called when the interface detects a watchdog pre-timeout. If
5931+ this is NULL, it will be ignored for the user. */
5932+ void (*ipmi_watchdog_pretimeout)(void *handler_data);
5933+};
5934+
5935+/* Create a new user of the IPMI layer on the given interface number. */
5936+int ipmi_create_user(unsigned int if_num,
5937+ struct ipmi_user_hndl *handler,
5938+ void *handler_data,
5939+ ipmi_user_t *user);
5940+
5941+/* Destroy the given user of the IPMI layer. */
5942+int ipmi_destroy_user(ipmi_user_t user);
5943+
5944+/* Get the IPMI version of the BMC we are talking to. */
5945+void ipmi_get_version(ipmi_user_t user,
5946+ unsigned char *major,
5947+ unsigned char *minor);
5948+
5949+/* Set and get the slave address and LUN that we will use for our
5950+ source messages. Note that this affects the interface, not just
5951+ this user, so it will affect all users of this interface. This is
5952+ so some initialization code can come in and do the OEM-specific
5953+ things it takes to determine your address (if not the BMC) and set
5954+ it for everyone else. */
5955+void ipmi_set_my_address(ipmi_user_t user,
5956+ unsigned char address);
5957+unsigned char ipmi_get_my_address(ipmi_user_t user);
5958+void ipmi_set_my_LUN(ipmi_user_t user,
5959+ unsigned char LUN);
5960+unsigned char ipmi_get_my_LUN(ipmi_user_t user);
5961+
5962+/*
5963+ * Send a command request from the given user. The address is the
5964+ * proper address for the channel type. If this is a command, then
5965+ * the message response comes back, the receive handler for this user
5966+ * will be called with the given msgid value in the recv msg. If this
5967+ * is a response to a command, then the msgid will be used as the
5968+ * sequence number for the response (truncated if necessary), so when
5969+ * sending a response you should use the sequence number you received
5970+ * in the msgid field of the received command. If the priority is >
5971+ * 0, the message will go into a high-priority queue and be sent
5972+ * first. Otherwise, it goes into a normal-priority queue.
5973+ */
5974+int ipmi_request(ipmi_user_t user,
5975+ struct ipmi_addr *addr,
5976+ long msgid,
5977+ struct ipmi_msg *msg,
5978+ int priority);
5979+
5980+/*
5981+ * Like ipmi_request, but lets you specify the slave return address.
5982+ */
5983+int ipmi_request_with_source(ipmi_user_t user,
5984+ struct ipmi_addr *addr,
5985+ long msgid,
5986+ struct ipmi_msg *msg,
5987+ int priority,
5988+ unsigned char source_address,
5989+ unsigned char source_lun);
5990+
5991+/*
5992+ * Like ipmi_request, but with messages supplied. This will not
5993+ * allocate any memory, and the messages may be statically allocated
5994+ * (just make sure to do the "done" handling on them). Note that this
5995+ * is primarily for the watchdog timer, since it should be able to
5996+ * send messages even if no memory is available. This is subject to
5997+ * change as the system changes, so don't use it unless you REALLY
5998+ * have to.
5999+ */
6000+int ipmi_request_supply_msgs(ipmi_user_t user,
6001+ struct ipmi_addr *addr,
6002+ long msgid,
6003+ struct ipmi_msg *msg,
6004+ void *supplied_smi,
6005+ struct ipmi_recv_msg *supplied_recv,
6006+ int priority);
6007+
6008+/*
6009+ * When commands come in to the SMS, the user can register to receive
6010+ * them. Only one user can be listening on a specific netfn/cmd pair
6011+ * at a time, you will get an EBUSY error if the command is already
6012+ * registered. If a command is received that does not have a user
6013+ * registered, the driver will automatically return the proper
6014+ * error.
6015+ */
6016+int ipmi_register_for_cmd(ipmi_user_t user,
6017+ unsigned char netfn,
6018+ unsigned char cmd);
6019+int ipmi_unregister_for_cmd(ipmi_user_t user,
6020+ unsigned char netfn,
6021+ unsigned char cmd);
6022+
6023+/*
6024+ * When the user is created, it will not receive IPMI events by
6025+ * default. The user must set this to TRUE to get incoming events.
6026+ * The first user that sets this to TRUE will receive all events that
6027+ * have been queued while no one was waiting for events.
6028+ */
6029+int ipmi_set_gets_events(ipmi_user_t user, int val);
6030+
6031+/*
6032+ * Register the given user to handle all received IPMI commands. This
6033+ * will fail if anyone is registered as a command receiver or if
6034+ * another is already registered to receive all commands. NOTE THAT
6035+ * THIS IS FOR EMULATION USERS ONLY, DO NOT USER THIS FOR NORMAL
6036+ * STUFF.
6037+ */
6038+int ipmi_register_all_cmd_rcvr(ipmi_user_t user);
6039+int ipmi_unregister_all_cmd_rcvr(ipmi_user_t user);
6040+
6041+
6042+/*
6043+ * Called when a new SMI is registered. This will also be called on
6044+ * every existing interface when a new watcher is registered with
6045+ * ipmi_smi_watcher_register().
6046+ */
6047+struct ipmi_smi_watcher
6048+{
6049+ struct list_head link;
6050+
6051+ /* These two are called with read locks held for the interface
6052+ the watcher list. So you can add and remove users from the
6053+ IPMI interface, send messages, etc., but you cannot add
6054+ or remove SMI watchers or SMI interfaces. */
6055+ void (*new_smi)(int if_num);
6056+ void (*smi_gone)(int if_num);
6057+};
6058+
6059+int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher);
6060+int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher);
6061+
6062+/* The following are various helper functions for dealing with IPMI
6063+ addresses. */
6064+
6065+/* Return the maximum length of an IPMI address given it's type. */
6066+unsigned int ipmi_addr_length(int addr_type);
6067+
6068+/* Validate that the given IPMI address is valid. */
6069+int ipmi_validate_addr(struct ipmi_addr *addr, int len);
6070+
6071+/* Return 1 if the given addresses are equal, 0 if not. */
6072+int ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2);
6073+
6074+#endif /* __KERNEL__ */
6075+
6076+
6077+/*
6078+ * The userland interface
6079+ */
6080+
6081+/*
6082+ * The userland interface for the IPMI driver is a standard character
6083+ * device, with each instance of an interface registered as a minor
6084+ * number under the major character device.
6085+ *
6086+ * The read and write calls do not work, to get messages in and out
6087+ * requires ioctl calls because of the complexity of the data. select
6088+ * and poll do work, so you can wait for input using the file
6089+ * descriptor, you just can use read to get it.
6090+ *
6091+ * In general, you send a command down to the interface and receive
6092+ * responses back. You can use the msgid value to correlate commands
6093+ * and responses, the driver will take care of figuring out which
6094+ * incoming messages are for which command and find the proper msgid
6095+ * value to report. You will only receive reponses for commands you
6096+ * send. Asynchronous events, however, go to all open users, so you
6097+ * must be ready to handle these (or ignore them if you don't care).
6098+ *
6099+ * The address type depends upon the channel type. When talking
6100+ * directly to the BMC (IPMC_BMC_CHANNEL), the address is ignored
6101+ * (IPMI_UNUSED_ADDR_TYPE). When talking to an IPMB channel, you must
6102+ * supply a valid IPMB address with the addr_type set properly.
6103+ *
6104+ * When talking to normal channels, the driver takes care of the
6105+ * details of formatting and sending messages on that channel. You do
6106+ * not, for instance, have to format a send command, you just send
6107+ * whatever command you want to the channel, the driver will create
6108+ * the send command, automatically issue receive command and get even
6109+ * commands, and pass those up to the proper user.
6110+ */
6111+
6112+
6113+/* The magic IOCTL value for this interface. */
6114+#define IPMI_IOC_MAGIC 'i'
6115+
6116+
6117+/* Messages sent to the interface are this format. */
6118+struct ipmi_req
6119+{
6120+ unsigned char *addr; /* Address to send the message to. */
6121+ unsigned int addr_len;
6122+
6123+ long msgid; /* The sequence number for the message. This
6124+ exact value will be reported back in the
6125+ response to this request if it is a command.
6126+ If it is a response, this will be used as
6127+ the sequence value for the response. */
6128+
6129+ struct ipmi_msg msg;
6130+};
6131+/*
6132+ * Send a message to the interfaces. error values are:
6133+ * - EFAULT - an address supplied was invalid.
6134+ * - EINVAL - The address supplied was not valid, or the command
6135+ * was not allowed.
6136+ * - EMSGSIZE - The message to was too large.
6137+ * - ENOMEM - Buffers could not be allocated for the command.
6138+ */
6139+#define IPMICTL_SEND_COMMAND _IOR(IPMI_IOC_MAGIC, 13, \
6140+ struct ipmi_req)
6141+
6142+/* Messages received from the interface are this format. */
6143+struct ipmi_recv
6144+{
6145+ int recv_type; /* Is this a command, response or an
6146+ asyncronous event. */
6147+
6148+ unsigned char *addr; /* Address the message was from is put
6149+ here. The caller must supply the
6150+ memory. */
6151+ unsigned int addr_len; /* The size of the address buffer.
6152+ The caller supplies the full buffer
6153+ length, this value is updated to
6154+ the actual message length when the
6155+ message is received. */
6156+
6157+ long msgid; /* The sequence number specified in the request
6158+ if this is a response. If this is a command,
6159+ this will be the sequence number from the
6160+ command. */
6161+
6162+ struct ipmi_msg msg; /* The data field must point to a buffer.
6163+ The data_size field must be set to the
6164+ size of the message buffer. The
6165+ caller supplies the full buffer
6166+ length, this value is updated to the
6167+ actual message length when the message
6168+ is received. */
6169+};
6170+
6171+/*
6172+ * Receive a message. error values:
6173+ * - EAGAIN - no messages in the queue.
6174+ * - EFAULT - an address supplied was invalid.
6175+ * - EINVAL - The address supplied was not valid.
6176+ * - EMSGSIZE - The message to was too large to fit into the message buffer,
6177+ * the message will be left in the buffer. */
6178+#define IPMICTL_RECEIVE_MSG _IOWR(IPMI_IOC_MAGIC, 12, \
6179+ struct ipmi_recv)
6180+
6181+/*
6182+ * Like RECEIVE_MSG, but if the message won't fit in the buffer, it
6183+ * will truncate the contents instead of leaving the data in the
6184+ * buffer.
6185+ */
6186+#define IPMICTL_RECEIVE_MSG_TRUNC _IOWR(IPMI_IOC_MAGIC, 11, \
6187+ struct ipmi_recv)
6188+
6189+/* Register to get commands from other entities on this interface. */
6190+struct ipmi_cmdspec
6191+{
6192+ unsigned char netfn;
6193+ unsigned char cmd;
6194+};
6195+
6196+/*
6197+ * Register to receive a specific command. error values:
6198+ * - EFAULT - an address supplied was invalid.
6199+ * - EBUSY - The netfn/cmd supplied was already in use.
6200+ * - ENOMEM - could not allocate memory for the entry.
6201+ */
6202+#define IPMICTL_REGISTER_FOR_CMD _IOR(IPMI_IOC_MAGIC, 14, \
6203+ struct ipmi_cmdspec)
6204+/*
6205+ * Unregister a regsitered command. error values:
6206+ * - EFAULT - an address supplied was invalid.
6207+ * - ENOENT - The netfn/cmd was not found registered for this user.
6208+ */
6209+#define IPMICTL_UNREGISTER_FOR_CMD _IOR(IPMI_IOC_MAGIC, 15, \
6210+ struct ipmi_cmdspec)
6211+
6212+/*
6213+ * Set whether this interface receives events. Note that the first
6214+ * user registered for events will get all pending events for the
6215+ * interface. error values:
6216+ * - EFAULT - an address supplied was invalid.
6217+ */
6218+#define IPMICTL_SET_GETS_EVENTS_CMD _IOR(IPMI_IOC_MAGIC, 16, int)
6219+
6220+/*
6221+ * Set and get the slave address and LUN that we will use for our
6222+ * source messages. Note that this affects the interface, not just
6223+ * this user, so it will affect all users of this interface. This is
6224+ * so some initialization code can come in and do the OEM-specific
6225+ * things it takes to determine your address (if not the BMC) and set
6226+ * it for everyone else. You should probably leave the LUN alone.
6227+ */
6228+#define IPMICTL_SET_MY_ADDRESS_CMD _IOR(IPMI_IOC_MAGIC, 17, unsigned int)
6229+#define IPMICTL_GET_MY_ADDRESS_CMD _IOR(IPMI_IOC_MAGIC, 18, unsigned int)
6230+#define IPMICTL_SET_MY_LUN_CMD _IOR(IPMI_IOC_MAGIC, 19, unsigned int)
6231+#define IPMICTL_GET_MY_LUN_CMD _IOR(IPMI_IOC_MAGIC, 20, unsigned int)
6232+
6233+#endif /* __LINUX_IPMI_H */
6234diff -urNp linux-5010/include/linux/ipmi_msgdefs.h linux-5020/include/linux/ipmi_msgdefs.h
6235--- linux-5010/include/linux/ipmi_msgdefs.h 1970-01-01 01:00:00.000000000 +0100
6236+++ linux-5020/include/linux/ipmi_msgdefs.h
6237@@ -0,0 +1,58 @@
6238+/*
6239+ * ipmi_smi.h
6240+ *
6241+ * MontaVista IPMI system management interface
6242+ *
6243+ * Author: MontaVista Software, Inc.
6244+ * Corey Minyard <minyard@mvista.com>
6245+ * source@mvista.com
6246+ *
6247+ * Copyright 2002 MontaVista Software Inc.
6248+ *
6249+ * This program is free software; you can redistribute it and/or modify it
6250+ * under the terms of the GNU General Public License as published by the
6251+ * Free Software Foundation; either version 2 of the License, or (at your
6252+ * option) any later version.
6253+ *
6254+ *
6255+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
6256+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
6257+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
6258+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
6259+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
6260+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
6261+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
6262+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
6263+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
6264+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6265+ *
6266+ * You should have received a copy of the GNU General Public License along
6267+ * with this program; if not, write to the Free Software Foundation, Inc.,
6268+ * 675 Mass Ave, Cambridge, MA 02139, USA.
6269+ */
6270+
6271+#ifndef __LINUX_IPMI_MSGDEFS_H
6272+#define __LINUX_IPMI_MSGDEFS_H
6273+
6274+/* Various definitions for IPMI messages used by almost everything in
6275+ the IPMI stack. */
6276+
6277+#define IPMI_NETFN_APP_REQUEST 0x06
6278+#define IPMI_NETFN_APP_RESPONSE 0x07
6279+
6280+#define IPMI_BMC_SLAVE_ADDR 0x20
6281+
6282+#define IPMI_GET_DEVICE_ID_CMD 0x01
6283+
6284+#define IPMI_CLEAR_MSG_FLAGS_CMD 0x30
6285+#define IPMI_GET_MSG_FLAGS_CMD 0x31
6286+#define IPMI_SEND_MSG_CMD 0x34
6287+#define IPMI_GET_MSG_CMD 0x33
6288+
6289+#define IPMI_SET_BMC_GLOBAL_ENABLES_CMD 0x2e
6290+#define IPMI_GET_BMC_GLOBAL_ENABLES_CMD 0x2f
6291+#define IPMI_READ_EVENT_MSG_BUFFER_CMD 0x35
6292+
6293+#define IPMI_MAX_MSG_LENGTH 80
6294+
6295+#endif /* __LINUX_IPMI_MSGDEFS_H */
6296diff -urNp linux-5010/include/linux/ipmi_smi.h linux-5020/include/linux/ipmi_smi.h
6297--- linux-5010/include/linux/ipmi_smi.h 1970-01-01 01:00:00.000000000 +0100
6298+++ linux-5020/include/linux/ipmi_smi.h
6299@@ -0,0 +1,144 @@
6300+/*
6301+ * ipmi_smi.h
6302+ *
6303+ * MontaVista IPMI system management interface
6304+ *
6305+ * Author: MontaVista Software, Inc.
6306+ * Corey Minyard <minyard@mvista.com>
6307+ * source@mvista.com
6308+ *
6309+ * Copyright 2002 MontaVista Software Inc.
6310+ *
6311+ * This program is free software; you can redistribute it and/or modify it
6312+ * under the terms of the GNU General Public License as published by the
6313+ * Free Software Foundation; either version 2 of the License, or (at your
6314+ * option) any later version.
6315+ *
6316+ *
6317+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
6318+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
6319+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
6320+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
6321+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
6322+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
6323+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
6324+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
6325+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
6326+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6327+ *
6328+ * You should have received a copy of the GNU General Public License along
6329+ * with this program; if not, write to the Free Software Foundation, Inc.,
6330+ * 675 Mass Ave, Cambridge, MA 02139, USA.
6331+ */
6332+
6333+#ifndef __LINUX_IPMI_SMI_H
6334+#define __LINUX_IPMI_SMI_H
6335+
6336+#include <linux/ipmi_msgdefs.h>
6337+
6338+/* This files describes the interface for IPMI system management interface
6339+ drivers to bind into the IPMI message handler. */
6340+
6341+/* Structure for the low-level drivers. */
6342+typedef struct ipmi_smi *ipmi_smi_t;
6343+
6344+/*
6345+ * Messages to/from the lower layer. The smi interface will take one
6346+ * of these to send. After the send has occurred and a response has
6347+ * been received, it will report this same data structure back up to
6348+ * the upper layer. If an error occurs, it should fill in the
6349+ * response with an error code in the completion code location. When
6350+ * asyncronous data is received, one of these is allocated, the
6351+ * data_size is set to zero and the response holds the data from the
6352+ * get message or get event command that the interface initiated.
6353+ * Note that it is the interfaces responsibility to detect
6354+ * asynchronous data and messages and request them from the
6355+ * interface.
6356+ */
6357+struct ipmi_smi_msg
6358+{
6359+ struct list_head link;
6360+
6361+ long msgid;
6362+ void *user_data;
6363+
6364+ /* If 0, add to the end of the queue. If 1, add to the beginning. */
6365+ int prio;
6366+
6367+ int data_size;
6368+ unsigned char data[IPMI_MAX_MSG_LENGTH];
6369+
6370+ int rsp_size;
6371+ unsigned char rsp[IPMI_MAX_MSG_LENGTH];
6372+
6373+ /* Will be called when the system is done with the message
6374+ (presumably to free it). */
6375+ void (*done)(struct ipmi_smi_msg *msg);
6376+};
6377+
6378+struct ipmi_smi_handlers
6379+{
6380+ /* Called to enqueue an SMI message to be sent. This
6381+ operation is not allowed to fail. If an error occurs, it
6382+ should report back the error in a received message. It may
6383+ do this in the current call context, since no write locks
6384+ are held when this is run. If the priority is > 0, the
6385+ message will go into a high-priority queue and be sent
6386+ first. Otherwise, it goes into a normal-priority queue. */
6387+ void (*sender)(void *send_info,
6388+ struct ipmi_smi_msg *msg,
6389+ int priority);
6390+
6391+ /* Called by the upper layer to request that we try to get
6392+ events from the BMC we are attached to. */
6393+ void (*request_events)(void *send_info);
6394+
6395+ /* Called when someone is using the interface, so the module can
6396+ adjust it's use count. Return zero if successful, or an
6397+ errno if not. */
6398+ int (*new_user)(void *send_info);
6399+
6400+ /* Called when someone is no longer using the interface, so the
6401+ module can adjust it's use count. */
6402+ void (*user_left)(void *send_info);
6403+
6404+ /* Called when the interface should go into "run to
6405+ completion" mode. If this call sets the value to true, the
6406+ interface should make sure that all messages are flushed
6407+ out and that none are pending, and any new requests are run
6408+ to completion immediately. */
6409+ void (*set_run_to_completion)(void *send_info, int run_to_completion);
6410+};
6411+
6412+/* Add a low-level interface to the IPMI driver. */
6413+int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
6414+ void *send_info,
6415+ unsigned char version_major,
6416+ unsigned char version_minor,
6417+ ipmi_smi_t *intf);
6418+
6419+/*
6420+ * Remove a low-level interface from the IPMI driver. This will
6421+ * return an error if the interface is still in use by a user.
6422+ */
6423+int ipmi_unregister_smi(ipmi_smi_t intf);
6424+
6425+/*
6426+ * The lower layer reports received messages through this interface.
6427+ * The data_size should be zero if this is an asyncronous message. If
6428+ * the lower layer gets an error sending a message, it should format
6429+ * an error response in the message response.
6430+ */
6431+void ipmi_smi_msg_received(ipmi_smi_t intf,
6432+ struct ipmi_smi_msg *msg);
6433+
6434+/* The lower layer received a watchdog pre-timeout on interface. */
6435+void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf);
6436+
6437+struct ipmi_smi_msg *ipmi_alloc_smi_msg(void);
6438+static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg)
6439+{
6440+ msg->done(msg);
6441+}
6442+
6443+#endif /* __LINUX_IPMI_SMI_H */
6444diff -urNp linux-5010/kernel/ksyms.c linux-5020/kernel/ksyms.c
6445--- linux-5010/kernel/ksyms.c
6446+++ linux-5020/kernel/ksyms.c
6447@@ -73,6 +73,8 @@ extern struct timezone sys_tz;
6448 extern int request_dma(unsigned int dmanr, char * deviceID);
6449 extern void free_dma(unsigned int dmanr);
6450 extern spinlock_t dma_spin_lock;
6451+extern int panic_timeout;
6452+
6453
6454 #ifdef CONFIG_MODVERSIONS
6455 const struct module_symbol __export_Using_Versions
6456@@ -507,6 +509,8 @@ EXPORT_SYMBOL(nr_running);
6457
6458 /* misc */
6459 EXPORT_SYMBOL(panic);
6460+EXPORT_SYMBOL_GPL(panic_notifier_list);
6461+EXPORT_SYMBOL_GPL(panic_timeout);
6462 EXPORT_SYMBOL(__out_of_line_bug);
6463 EXPORT_SYMBOL(sprintf);
6464 EXPORT_SYMBOL(snprintf);
This page took 1.06659 seconds and 4 git commands to generate.