File: [cvs.NetBSD.org] / othersrc / nastore / volman / srvr / rc / vault / vaultrc.c (download)
Revision 1.1.1.1 (vendor branch), Mon Feb 28 02:18:33 2000 UTC (24 years, 1 month ago) by wrstuden
Branch: NAS, MAIN
CVS Tags: nastore3-beta-20000227, HEAD Changes since 1.1: +0 -0
lines
Import of snapshot of nastore3 code. Includes kernel code for dmfs, dmfs
user utilities, ms66 import and export, vvm, and volman. Also includes
makefile magic to automatically generate .tgz source files from the source.
Solaris support a bit of a question as zoularis is not working at the
moment.
|
#ifndef lint
static char rcsid[] = "$Header: /ftp/pub/NetBSD-cvs/main.mirror/othersrc/nastore/volman/srvr/rc/vault/vaultrc.c,v 1.1.1.1 2000/02/28 02:18:33 wrstuden Exp $";
#endif
/*
* vaultrc.c
*
* NAStore Volume Manager Repository Controller (RC) for tape vault
* and manual tape drives (3420 & 3480)
*
* To turn on rpc to vault db server,
* #define RPC_VERS
* char *server = "server_machine";
*
* The RC connects with the Volume Manager's Request Processor (RP)
* via socket/XDR protocol linked from rl_xdr.o and srv_xdr.o. After
* establishing a connection, the RC opens for business by identifying
* itself as ready to the RP, which may then send it requests.
*
* Mounts: the RC tests the drive to see if mount is done and/or checks
* label & sets up user node on the client host via requests passed thru
* vold to the volman node daemon (volnd). The volnd puts the mount msg
* on any drive billboard, while the RC communicates with operators by
* a blast-to-uid-logins protocol from libvol.a and, if RPC_VERS, via
* msgs on the remote console.
*
* Dismounts: the vold is responsible for removing the user node on the
* client's host; the RC leaves volumes mounted ('cached'), and normally
* only dismounts if a new mount gets allocated the drive, in which case
* the (pre-)dismount flag is added to the new mount request. Abnormally,
* a dismount can occur when making a cached volume available for operator
* access (if the write-ring was found to be in the wrong position or if
* a move request has been made). In this case the vold has no specific
* client host to tie the request to, and the NOSEQ request id is used to
* flag that this is a 'fire-and-forget' command.
*
* Operators & other RCs communicate with the RC by M_RCMSG packets which
* are sent e.g. by the rcerr utility via the RP.
*
* The RC blocks on reading input from the RP, which includes RP commands
* and volnode responses. It receives timer messages from the RP which
* inspire it to examine its queues to see if anything has changed.
*
* Original manual RC by
* Bill Ross, NAS/NASA Ames, 8/18/89
*
* Reworked to include RPC hooks by
* Tom Proett, NAS/NASA Ames, 4/5/93
*
* Reworked again for mount cacheing and multi-host/volnd configuration,
* Bill Ross, NAS/NASA Ames, 2/29/96
*
*/
#include <stdio.h>
#include <string.h>
#include <signal.h>
#include <fcntl.h>
#include <dirent.h>
#include <utmp.h>
#include <pwd.h>
#include <setjmp.h>
#include <sys/file.h>
#include <rpc/rpc.h>
#ifdef EXTERN
# undef EXTERN
#endif
#define EXTERN
#include "srv_space.h"
#include "rclib.h"
#include "vault.h" /* from vault.x; must follow srv_space.h */
/* from cl_rc.c */
extern RC_t *cGetRcp(), *cRCInfo();
#define LABELLEN 80
/*
* WARNTIME defines the number of seconds before the operator
* is notified about a pending tape mount.
*/
#define WARNTIME 60
#define IDLE 1 /* totally unused */
#define MOUNT 2 /* mount in progress (via volnd) */
#define CREATENODE 3 /* mounted, recreating node (via volnd) */
#define MOUNTED 4 /* mounted, node checked, vold/client has it */
#define AVAIL 5 /* vold/client done, mount 'cached' */
#define EJECT 6
#define ACCEPT 7
#define NOTALLOC 8
#define SCRATCH 9
extern int errno;
extern struct passwd *getpwnam();
/*
* libvol.a stuff
*/
extern DevEnt_t *DriveArray;
extern int DriveTotal;
/*
* from srv_xdr.c
*/
extern int immortal;
extern jmp_buf jmpbuf_i;
/*
* rpc globals
*/
char *server = NULL;
CLIENT *cl;
/*
* other globals
*/
char rcno[20], *progname = NULL;
time_t Now;
static char **args;
usage()
{
fprintf(stderr, "usage: %s rc [-i]\n", progname);
exit(1);
}
/*
* call rpc and reset client structure if needed
*/
#define RPC(xxx) { \
if ((cl == NULL) || (xxx) == NULL) { \
cl = clnt_create(server, VAULTPROG, VAULTVERS, "tcp"); \
if (cl == NULL) { \
char *err = clnt_spcreateerror(server); \
nLogMsg(EL_ERROR1, id, \
"clnt_create error: %s", err); \
} \
else { \
cl->cl_auth = authunix_create_default(); \
(xxx); \
} \
} \
}
void
VaultErr(int what, char *vsn, char *where)
{
char *err;
switch (what) {
case E_NOERROR:
return;
case E_NOTFOUND:
err = "not in in vault database";
break;
case E_PERM:
err = "permission denied";
break;
default:
err = "failed";
break;
}
VMError = what;
nLogMsg(EL_ERROR1|EL_CODE, where, "%.*s: %s", VSNLEN, vsn, err);
}
/*
* repository controller info ptr, usable drive counter & status of rc login
*/
RC_t *rcp;
DevMap_t AvailableDrives;
int config;
int logged;
int activemounts = 0, activemoves = 0;
/*
* a doubly linked list of movnodes is kept for tracking move requests
*/
typedef struct movnode {
struct movnode *next;
struct movnode *prev;
int seq;
vsn_t vsn;
char rcname[20];
short src;
short dst;
long time;
int status;
short sent;
} movnode_t;
movnode_t *movq;
movnode_t *findmov();
/*
* an array of DriveEntries is kept to track mount requests and drive
* availability - NOTE that it parallels the *DriveArray array from
* libvol.
*/
typedef struct DriveEntry {
vsn_t vsn, label;
int flags;
int uid;
int host;
int den;
int seq;
int status;
int ring;
long time;
short sent;
} DriveEntry_t;
DriveEntry_t *DriveArray2 = NULL;
int lastdrv = 0, /* last drive considered for rotating alloc */
mindrv = MAXDEVS,
maxdrv = -1; /* min, max for this rc */
/*
* exit routine: inform RP & die
*/
done()
{
int drv;
movnode_t *mvp;
RPCLGoodbye();
if (cl) {
for (drv=mindrv; drv<maxdrv; drv++)
WipeDrive(drv); /* cancel all mounts */
mvp = movq; /* cancel all moves */
while (mvp != NULL)
remove_mv(mvp);
auth_destroy(cl->cl_auth);
}
nLogMsg(EL_CONSOLE0|EL_STATUS1, 0, "Exiting");
sleep(5); /* allow vold to rcv goodbye */
exit(VMError);
}
/*
* Cleanup() - lose all state in preparation for connection
* with new vold & loading new config
*/
Cleanup()
{
char *id = "Cleanup";
movnode_t *mov, *next;
DriveEntry_t *dp;
int i;
for (mov=movq; mov!=NULL; mov=next) {
next = mov->next;
remove_mv(mov);
}
#ifdef RPC_VERS
dp = &DriveArray2[mindrv];
for (i=mindrv; i<maxdrv; i++, dp++) {
if (dp->status == MOUNT && dp->sent) {
req = NULL;
RPC(req = vault_cancel_1(&dp->vsn, cl));
if (req == NULL) {
nLogMsg(EL_ERROR1, id,
"%.*s: vault cancel failed",
VSNLEN, &dp->vsn);
} else {
DriveArray2[drv].sent = 0;
VaultErr(*req, &dp->vsn, "vault_cancel");
}
}
}
#endif
nFree(DriveArray2);
DriveArray2 = NULL;
lastdrv = 0;
mindrv = MAXDEVS;
maxdrv = -1;
}
/**************
MAIN
***************/
main(int argc, char *argv[])
{
char *id = "main";
vmpHdr_t *hp;
args = argv; /* needs global for setjmp/longjmp */
progname = argv[0];
/*
* check arg & save rcno
*/
if (argc < 2 || argc > 3)
usage();
if (argc == 2) {
strcpy(rcno, argv[1]);
} else {
if (!strcmp(argv[1], "-i")) {
immortal = 1;
strcpy(rcno, argv[2]);
} else if (!strcmp(argv[2], "-i")) {
immortal = 1;
strcpy(rcno, argv[1]);
} else {
usage();
}
}
Init(argv);
/*
* loop reading packets
*/
while (1) {
nLogMsg(EL_DEBUG3, 0, "WAIT on rcv");
/*
* since RPCLRecv drives the main loop (i.e. no
* select()), no need to check the result of
* RPCLNextRecord() to see if a pkt is buffered -
* will be getting it anyway.
*/
hp = RPCLRecv("main loop");
RPCLNextRecord();
time(&Now);
procpkt(hp);
nFree(hp);
hp = NULL;
if (activemounts || activemoves) {
nLogMsg(EL_DEBUG3, id,
"active requests = %d mount, %d move",
activemounts, activemoves);
if (!loggedin()) {
nLogMsg(EL_CONSOLE0|EL_ERROR1, 0,
"Operator acct (%s) not logged in: requests waiting",
rcp->rcd_login);
}
if (activemounts)
ProcMountQ();
if (activemoves)
ProcMoveQ();
}
}
}
/*****************
INITIALIZATION
******************/
Init()
{
char coredir[80], logtag[20];
DIR *dirp;
sprintf(coredir, "%s/%s", vSpoolDir, "vaultrc");
if ((dirp = opendir(coredir)) == NULL) {
if (mkdir(coredir, 0755) < 0) { /* create it */
fprintf(stderr, "can't create %s: mkdir", coredir);
exit(1);
}
} else
closedir(dirp);
if (chdir(coredir) < 0) {
fprintf(stderr, "%s: chdir", coredir);
exit(1);
}
vInitDirNames();
sprintf(logtag, "VAULT%s", rcno);
nLogInit(logtag, done);
nLogInitFile(vLogFile, EL_LEVEL, 0, NULL);
signal(SIGCHLD, SIG_IGN);
signal(SIGPIPE, SIG_IGN);
/*
** here is where to come back to if immortal & vold goes away
*/
if (setjmp(jmpbuf_i)) {
nLogMsg(EL_STATUS1, 0, "setjmp'd");
Cleanup();
sleep(30);
}
/*
* establish RP connection & get drive/RC config
*/
RCConnectRP();
if ((rcp = cGetRcp(rcno)) == NULL) {
fprintf(stderr, "%s pid %d: can't get RC %s info\n",
progname, getpid(), rcno);
exit(1);
}
/*
* start console (all coded for 1 console=rp_syslog)
*/
nLogInitConsole(rcp->rcd_syslog, EL_CONSOLE0, EL_LEVEL);
InitDriveArrays();
nLogMsg(EL_STATUS1, 0, "==== %s here: rc=%s pid=%d ====",
progname, rcp->rcd_id, getpid());
if (server != NULL) {
cl = clnt_create(server, VAULTPROG, VAULTVERS, "tcp");
if (cl == NULL) {
char *err = clnt_spcreateerror(server);
nLogMsg(EL_ERROR1, 0,
"clnt_create error: %s", err);
} else
cl->cl_auth = authunix_create_default();
}
/*
* see if rc login exists
*/
if (!(logged = loggedin()))
nLogMsg(EL_CONSOLE0|EL_ERROR1, 0,
"%s: console %s not logged in",
rcp->rcd_name, rcp->rcd_login);
/*
* make sure we are ok to run as root
*/
CheckId(getuid(), rcp);
RCReadyRP(rcp);
nLogMsg(EL_CONSOLE0|EL_STATUS1, 0, "Initialized");
}
InitDriveArrays()
{
char *id = "InitDriveArrays";
int i;
/*
* initialize drive array based on available ones in configuration.
* mindrv = 1st drive for rc,
* maxdrv = last drv for rc, +1 for loop checking
*/
DM_ZERO(AvailableDrives);
DriveArray2 = (DriveEntry_t *)
nAlloc(DriveTotal * sizeof(DriveEntry_t));
for (i=0; i<DriveTotal; i++) {
DriveArray2[i].ring = 0;
if (DriveArray[i].de_rc == rcp->rcd_number) {
nLogMsg(EL_DEBUG2, id, "drive %d configured", i);
DM_SET(i, AvailableDrives);
DriveArray2[i].status = IDLE;
if (i > maxdrv)
maxdrv = i;
if (i < mindrv)
mindrv = i;
config++;
} else
DriveArray2[i].status = NOTALLOC;
}
if (!config)
nLogMsg(EL_CONSOLE0|EL_STATUS1, 0, "NO DEVICES CONFIGURED");
else
nLogMsg(EL_DEBUG2, id, "min/max configured %d %d",
mindrv, maxdrv);
maxdrv++;
lastdrv = mindrv - 1;
}
/*******************
PACKET HANDLING
********************/
/*
* procpkt - handle a packet from the RP
*/
procpkt(vmpHdr_t *hp)
{
char *id = "procpkt";
switch (hp->vh_type) {
case M_TIMER: /* an excuse to check the queues */
case M_NOP: /* communications check (not used) */
case M_VARY: /* response to RC vary off */
break;
case M_VERROR:
MMError(hp, 0, 0);
break;
case M_RCMOUNT:
activemounts += MMMount(hp);
break;
case M_CREATENODE: /* mount/label check or dismount/rmv node ok */
MMNode(hp);
break;
case M_UNMOUNT:
MMUnmount(hp);
break;
case M_SCRATCH:
MMScratched(hp);
break;
case M_MOVE:
activemoves += MMMove(hp);
break;
case M_RCMSG:
MMRCMsg(hp);
break;
case M_VOLSTAT:
MMVolstat(hp);
break;
case M_NEWLOG:
/* assumes sharing voldaemon log, so doesn't rename */
nLogInitFile(vLogFile, EL_LEVEL, 0, NULL);
break;
case M_DEBUG:
MMDebug(hp);
break;
case M_SHUTDOWN:
nLogMsg(EL_CONSOLE0|EL_STATUS1, 0, "shutdown rcvd from vold");
if (immortal) {
nLogMsg(EL_STATUS1, 0, "immortal - longjmp");
longjmp(jmpbuf_i, 1);
nLogMsg(EL_SYS, 0, "longjmp");
}
done();
default:
nLogMsg(EL_ERROR1, id, "%s: unexpected packet",
vMsgType(hp->vh_type));
}
}
/*
* MError - handle error; mount err from volnd or rcerr,
* or vold cancelling a move
*/
MMError(vmpHdr_t *hp, int rcerr, int flag)
{
char *id = "MMError";
vmpError_t *vep;
int *req, drv;
vep = MSGPTR(hp, vmpError_t);
nLogMsg(EL_ERROR1, 0, vep->vn_msg);
/*
* see if it's a mount
*/
for (drv=mindrv; (!SAMEVSN(&DriveArray2[drv].vsn, &vep->vn_vsn)
&& (drv < maxdrv)); drv++);
if (drv == maxdrv) {
/*
* no drive request
*/
if (moverr(hp)) {
nLogMsg(EL_CONSOLE0|EL_ERROR1, 0,
"ACS Enter failed on %.*s",
VSNLEN, &vep->vn_vsn);
/*
* messages originating from the RP shouldn't
* be forwarded back to the RP. Only
* messages from the rcerr program (or volnd)
* should be forwarded.
*/
if (rcerr && !(flag & RCM_NORPFORWARD))
RPCLSend(hp, 1);
} else if (vep->vn_state == VS_MOVING)
nLogMsg(EL_STATUS2, 0, "%.*s: move failed by other RC",
VSNLEN, &vep->vn_vsn);
else
nLogMsg(EL_ERROR1, 0, "Unexpected error pkt");
return;
}
if (hp->vh_seq != DriveArray2[drv].seq) {
nLogMsg(EL_CONSOLE0|EL_ERROR1, id,
"%.*s_%d: mismatched seq %d",
VSNLEN, &vep->vn_vsn, hp->vh_seq,
DriveArray2[drv].seq);
return;
}
switch (DriveArray2[drv].status) {
case MOUNT:
/*
* forward error pkt to vold & reset drivearray
*/
RPCLSend(hp, 1);
if (rcerr) {
/*
* cancel the wait-on-mount
*/
RCSendVolNode(M_CREATENODE, DriveArray2[drv].seq,
&vep->vn_vsn, NULL, VO_CANCEL,
drv, DriveArray2[drv].host, 0);
}
DriveArray2[drv].status = IDLE;
DriveArray2[drv].vsn.v_vsn[0] = 0;
DriveArray2[drv].time = Now;
activemounts--;
break;
case CREATENODE:
/*
* forward error pkt to vold & reset drivearray
*/
RPCLSend(hp, 1);
if (rcerr) {
nLogMsg(EL_CONSOLE0|EL_ERROR1, id,
"%.*s_%d: unexpected rcerr",
VSNLEN, &vep->vn_vsn, hp->vh_seq);
return;
}
DriveArray2[drv].status = IDLE;
DriveArray2[drv].vsn.v_vsn[0] = 0;
break;
default:
nLogMsg(EL_CONSOLE0|EL_ERROR1, id,
"%.*s_%d: unexpected status %d",
VSNLEN, &vep->vn_vsn, hp->vh_seq,
DriveArray2[drv].status);
break;
}
}
/*
* MMount - handle mount request; return TRUE if need to bug
* operator.
*/
int
MMMount(vmpHdr_t *hp)
{
char *id = "MMMount";
int drive, seldrive, gotidle, nodeflags;
time_t oldtime;
vmpRCMount_t *mp;
mp = MSGPTR(hp, vmpRCMount_t);
if (!config) {
/*
* this process will never mount - no drives configured
*/
VMError = E_INTERNAL;
nLogMsg(EL_CODE|EL_ERROR1, id, "no drives configured");
RCErr2RP(&mp->vm_vsn, hp->vh_seq, M_RCMOUNT, 0);
return(FALSE);
}
nodeflags = mp->vm_flag & VO_VOLNDMASK;
/*
* see if acceptable cached mount (AVAIL)
*/
for (drive=mindrv; drive<maxdrv; drive++) {
#ifdef PURIFY
if (DriveArray2[drive].status != IDLE)
continue;
if (!DM_ISSET(drive, mp->vm_devmap))
continue;
nLogMsg(EL_STATUS1, id, "%.*s: using 'idle' drive",
VSNLEN, &mp->vm_vsn);
memcpy(&DriveArray2[drive].vsn, &mp->vm_vsn, VSNLEN);
DriveArray2[drive].seq = hp->vh_seq;
DriveArray2[drive].label = mp->vm_label;
DriveArray2[drive].flags = mp->vm_flag;
DriveArray2[drive].uid = mp->vm_uid;
DriveArray2[drive].host = mp->vm_host;
DriveArray2[drive].time = Now;
if (mp->vm_flag & VO_SCRATCH) {
DriveArray2[drive].status = SCRATCH;
RCSendVolNode(M_SCRATCH, hp->vh_seq,
&DriveArray2[drive].vsn,
&DriveArray2[drive].label,
nodeflags, drive,
DriveArray2[drive].host,
DriveArray2[drive].uid);
} else {
DriveArray2[drive].status = CREATENODE;
RCSendVolNode(M_CREATENODE, hp->vh_seq,
&DriveArray2[drive].vsn,
&DriveArray2[drive].label,
nodeflags, drive,
DriveArray2[drive].host,
DriveArray2[drive].uid);
}
return(FALSE);
#endif
if (DriveArray2[drive].status != AVAIL)
continue;
if (!SAMEVSN(&mp->vm_vsn, &DriveArray2[drive].vsn))
continue;
/*
* vsn is mounted & available; error if write ring wrong
*/
if (DriveArray2[drive].ring != VV_RING &&
mp->vm_flag & VO_WRITE) {
/*
* kick it off for operator access to ring
* (host=-1 => any host can dismount)
*/
RCSendVolNode(M_DELETENODE, NOSEQ,
&DriveArray2[drive].vsn, NULL,
VO_UNMOUNT, drive, -1, 0);
DriveArray2[drive].status = IDLE;
VMError = E_USER;
nLogMsg(EL_CODE|EL_ERROR1, id, "%.*s: %s - %s",
VSNLEN, &mp->vm_vsn,
"write ring wrong for write",
"have operator enable ring");
RCErr2RP(&mp->vm_vsn, hp->vh_seq, M_RCMOUNT, 0);
return(FALSE);
}
if (DriveArray2[drive].ring == VV_RING &&
!(mp->vm_flag & VO_WRITE) &&
!(mp->vm_flag & VO_IGNORE_RING)) {
/*
* kick it off for operator access to ring
* (host=-1 => any host can dismount)
*/
RCSendVolNode(M_DELETENODE, NOSEQ,
&DriveArray2[drive].vsn, NULL,
VO_UNMOUNT, drive, -1, 0);
DriveArray2[drive].status = IDLE;
VMError = E_USER;
nLogMsg(EL_CODE|EL_ERROR1, id, "%.*s: %s - %s %s",
VSNLEN, &mp->vm_vsn,
"write ring enabled on read request",
"have operator disable ring",
"or use 'ignore-ring'");
RCErr2RP(&mp->vm_vsn, hp->vh_seq, M_RCMOUNT, 0);
return(FALSE);
}
/*
* if drive not in drivemap, kick it off current
* drive & fall thru to alloc for new mount
* (host=-1 => any host can dismount)
*/
if (!DM_ISSET(drive, mp->vm_devmap)) {
RCSendVolNode(M_DELETENODE, NOSEQ,
&DriveArray2[drive].vsn, NULL,
VO_UNMOUNT, drive, -1, 0);
DriveArray2[drive].status = IDLE;
break;
}
/*
* cached mount is 100% ok - have volnode
* recheck label & make client node
*/
nLogMsg(EL_STATUS1, id, "%.*s: using cached mount",
VSNLEN, &mp->vm_vsn);
DriveArray2[drive].seq = hp->vh_seq;
DriveArray2[drive].label = mp->vm_label;
DriveArray2[drive].flags = mp->vm_flag;
DriveArray2[drive].uid = mp->vm_uid;
DriveArray2[drive].host = mp->vm_host;
DriveArray2[drive].time = Now;
if (mp->vm_flag & VO_SCRATCH) {
DriveArray2[drive].status = SCRATCH;
RCSendVolNode(M_SCRATCH, hp->vh_seq,
&DriveArray2[drive].vsn,
&DriveArray2[drive].label,
nodeflags, drive,
DriveArray2[drive].host,
DriveArray2[drive].uid);
} else {
DriveArray2[drive].status = CREATENODE;
RCSendVolNode(M_CREATENODE, hp->vh_seq,
&DriveArray2[drive].vsn,
&DriveArray2[drive].label,
nodeflags, drive,
DriveArray2[drive].host,
DriveArray2[drive].uid);
}
return(FALSE);
}
#ifndef PURIFY
/*
* see if an empty or cached drive is available;
* choose oldest, preferring empty drive
*/
nLogMsg(EL_DEBUG3, id, "drive req: %s", vDevMapStr(&mp->vm_devmap));
nLogMsg(EL_DEBUG3, id, "drive avail: %s", vDevMapStr(&AvailableDrives));
nLogMsg(EL_DEBUG3, id, "drive and: %s", vDevMapStr(vDMand(&AvailableDrives,
&mp->vm_devmap)));
seldrive = -1;
oldtime = Now + 1;
gotidle = 0;
for (drive=mindrv; drive<maxdrv; drive++) {
if (!(DriveArray2[drive].status & (IDLE|AVAIL)))
continue;
if (!DM_ISSET(drive, mp->vm_devmap))
continue;
nLogMsg(EL_DEBUG2,0,"drv %d legal",drive);
if (DriveArray2[drive].status == IDLE) {
if (!gotidle++ || DriveArray2[drive].time < oldtime) {
seldrive = drive;
oldtime = DriveArray2[drive].time;
}
continue;
}
/* AVAIL */
if (DriveArray2[drive].time < oldtime) {
seldrive = drive;
oldtime = DriveArray2[drive].time;
}
}
if (seldrive != -1) {
drive = seldrive;
if (gotidle)
nLogMsg(EL_STATUS2, id, "%.*s: using idle drive",
VSNLEN, &mp->vm_vsn);
else
nLogMsg(EL_STATUS2, id,
"%.*s: bumping cached mount (%.*s)",
VSNLEN, &mp->vm_vsn,
VSNLEN, &DriveArray2[drive].vsn);
/*
* send nodesetup w/ wait on mount, kicking off
* anything mounted 1st (including 'idle')
*/
DriveArray2[drive].time = Now;
DriveArray2[drive].seq = hp->vh_seq;
DriveArray2[drive].vsn = mp->vm_vsn;
DriveArray2[drive].label = mp->vm_label;
DriveArray2[drive].flags = mp->vm_flag;
DriveArray2[drive].uid = mp->vm_uid;
DriveArray2[drive].host = mp->vm_host;
DriveArray2[drive].sent = 0;
if (mp->vm_flag & VO_SCRATCH) {
DriveArray2[drive].status = SCRATCH;
RCSendVolNode(M_SCRATCH, hp->vh_seq,
&DriveArray2[drive].vsn,
&DriveArray2[drive].label,
VO_WAIT | VO_CLEARFIRST | nodeflags, drive,
DriveArray2[drive].host,
DriveArray2[drive].uid);
} else {
DriveArray2[drive].status = CREATENODE;
RCSendVolNode(M_CREATENODE, hp->vh_seq,
&DriveArray2[drive].vsn,
&DriveArray2[drive].label,
VO_WAIT | VO_CLEARFIRST | nodeflags, drive,
DriveArray2[drive].host,
DriveArray2[drive].uid);
}
nLogMsg(EL_DEBUG2, id,
"setting to mount: %.*s label %.*s on drive %d",
VSNLEN, &mp->vm_vsn, VSNLEN, &mp->vm_label, drive);
nLogMsg(EL_CONSOLE0|EL_STATUS1, 0, "%.*s: MOUNT%s on drive %s",
VSNLEN, &DriveArray2[drive].vsn,
(mp->vm_flag & VO_SCRATCH ? "/SCRATCH" : ""),
DriveArray[drive].de_name);
return(TRUE);
}
#endif
/*
* no drive ok
*/
VMError = E_UNAVAIL;
nLogMsg(EL_CODE|EL_ERROR1, id, "%.*s_%d no drive available",
VSNLEN, &mp->vm_vsn, hp->vh_seq);
RCErr2RP(&mp->vm_vsn, hp->vh_seq, M_RCMOUNT, 0);
return(FALSE);
}
/*
* MUnmount - appear to unload volume from drive (actually, keep
* it mounted - cached)
*/
MMUnmount(vmpHdr_t *hp)
{
char *id = "MMUnmount";
int drv, cancel = FALSE;
vmpVsn_t *vp;
int *req;
vp = MSGPTR(hp, vmpVsn_t);
drv = vp->v_flag;
if (drv == -1) {
/*
* this is a cancellation rather than a dismount;
* figure out the drive
*/
for (drv=mindrv; (!SAMEVSN(&DriveArray2[drv].vsn, &vp->v_vsn)
&& (drv < maxdrv)); drv++);
if (drv == maxdrv) {
VMError = E_INTERNAL;
nLogMsg(EL_CODE|EL_ERROR1, id,
"%.*s_%d not mounted here",
VSNLEN, &vp->v_vsn, hp->vh_seq);
RCErr2RP(&vp->v_vsn, hp->vh_seq, M_UNMOUNT, 0);
return;
}
vp->v_flag = drv;
} else if (!SAMEVSN(&vp->v_vsn, &DriveArray2[drv].vsn)) {
VMError = E_INTERNAL;
nLogMsg(EL_CODE|EL_ERROR1, id,
"%.*s: another vsn (%.*s) in drive %s",
VSNLEN, &vp->v_vsn,
VSNLEN, &DriveArray2[drv].vsn,
DriveArray[drv].de_name);
RCErr2RP(&vp->v_vsn, hp->vh_seq, M_UNMOUNT, 0);
return;
}
switch (DriveArray2[drv].status) {
case MOUNT:
nLogMsg(EL_CONSOLE0|EL_STATUS1, 0,
"CANCEL MOUNT: %.*s", VSNLEN, &vp->v_vsn);
/* fall thru */
case CREATENODE:
nLogMsg(EL_STATUS1, 0, "Cancelling mount for %.*s_%d",
VSNLEN, &vp->v_vsn, hp->vh_seq);
#ifdef RPC_VERS
if (DriveArray2[drv].sent) {
req = NULL;
RPC(req = vault_cancel_1(&vp->v_vsn, cl));
if (req == NULL) {
nLogMsg(EL_ERROR1, id,
"%.*s: vault cancel failed",
VSNLEN, &vp->v_vsn);
} else {
DriveArray2[drv].sent = 0;
VaultErr(*req, &vp->v_vsn, "vault_cancel");
}
}
#endif
RCSendVolNode(M_CREATENODE, DriveArray2[drv].seq,
&vp->v_vsn, NULL, VO_CANCEL, drv,
DriveArray2[drv].host, 0);
if (!(vp->v_flag2 & VF_NORESP))
RCSendUnmountDone(hp->vh_seq, &vp->v_vsn);
else
nLogMsg(EL_STATUS2, id,
"(%.*s: RP requested no response)",
VSNLEN, &vp->v_vsn);
DriveArray2[drv].status = IDLE;
DriveArray2[drv].vsn.v_vsn[0] = 0;
DriveArray2[drv].time = Now;
break;
case MOUNTED:
nLogMsg(EL_STATUS1, id, "freeing %.*s on drive %s",
VSNLEN, &vp->v_vsn,
DriveArray[drv].de_name);
#ifdef PURIFY
DriveArray2[drv].status = IDLE;
#else
DriveArray2[drv].status = AVAIL;
#endif
DriveArray2[drv].time = Now;
if (!(vp->v_flag2 & VF_NORESP))
RCSendUnmountDone(hp->vh_seq, &vp->v_vsn);
else
nLogMsg(EL_STATUS2, id,
"(%.*s: RP requested no response)",
VSNLEN, &vp->v_vsn);
break;
case IDLE:
/*
* assuming that a mount failed & the vold is
* canceling before learning of the failure
* (happens when parallel mounts both fail)
*/
nLogMsg(EL_STATUS1, id, "%.*s: ignoring unmount",
VSNLEN, &vp->v_vsn);
break;
default:
if (vp->v_flag2 & VF_NORESP) {
nLogMsg(EL_STATUS1, id,
"ignored: %.*s_%d: %s: unexpected drive status: %d",
VSNLEN, &vp->v_vsn, hp->vh_seq,
DriveArray[drv].de_name,
DriveArray2[drv].status);
return;
}
VMError = E_INTERNAL;
nLogMsg(EL_CONSOLE0|EL_ERROR1|EL_CODE, id,
"%.*s_%d: %s: unexpected drive status: %d",
VSNLEN, &vp->v_vsn, hp->vh_seq,
DriveArray[drv].de_name,
DriveArray2[drv].status);
RCErr2RP(&vp->v_vsn, hp->vh_seq, M_UNMOUNT, 0);
break;
}
}
/*
* MMove - set up move request
*/
int
MMMove(vmpHdr_t *hp)
{
char *id = "MMMove";
vmpMove_t *mvp;
vsnseq_t *vsnseqp;
movnode_t *mov;
RC_t *lrcp;
int i, drv, reqcount = 0, headererr = 0;
mvp = MSGPTR(hp, vmpMove_t);
nLogMsg(EL_DEBUG2, 0, "Move from %d to %d",
mvp->vmv_srcrc,mvp->vmv_dstrc);
/*
* is this a bad move, eject or accept for this RC?
*/
if (mvp->vmv_srcrc == mvp->vmv_dstrc) {
/*
* bad move
*/
VMError = E_INTERNAL;
nLogMsg(EL_ERROR1|EL_CODE, id,
"%s: move to same location (%d)",
rcp->rcd_id, mvp->vmv_srcrc);
headererr++;
goto vsnloop;
}
if (rcp->rcd_number != mvp->vmv_srcrc
&& rcp->rcd_number != mvp->vmv_dstrc) {
/*
* bad move
*/
VMError = E_INTERNAL;
nLogMsg(EL_ERROR1|EL_CODE, id,
"move doesn't involve %s: %d->%d",
rcp->rcd_id, mvp->vmv_srcrc, mvp->vmv_dstrc);
headererr++;
goto vsnloop;
}
if (mvp->vmv_srcrc != rcp->rcd_number) {
/* TODO - add/check property flag of src */
/*
* ACS eject to CAP will complete move so no
* operator action req'd
*/
return(0);
}
/*
* OK.. send off errors or put node(s) at head of movq
*/
vsnloop:
vsnseqp = (vsnseq_t *) ((char *)mvp + PadSize(sizeof(vmpMove_t)));
for (i=0; i<hp->vh_count; i++, vsnseqp++) {
if (headererr) {
RCErr2RP(&vsnseqp->r_vsn, vsnseqp->r_seq, M_MOVE, 0);
continue;
}
/*
* if it's a cached mount, kick it off drive
*/
for (drv=mindrv; drv<maxdrv; drv++) {
if (DriveArray2[drv].status != AVAIL)
continue;
if (SAMEVSN(&vsnseqp->r_vsn, &DriveArray2[drv].vsn)) {
/*
* host=-1 means use any host
*/
RCSendVolNode(M_DELETENODE, NOSEQ,
&DriveArray2[drv].vsn, NULL, 0, drv,
-1, 0);
DriveArray2[drv].status = IDLE;
break;
}
}
mov = (movnode_t *) nAlloc(sizeof(movnode_t));
mov->next = movq;
mov->prev = NULL;
if (movq != NULL)
movq->prev = mov;
movq = mov;
mov->seq = vsnseqp->r_seq;
mov->vsn = vsnseqp->r_vsn;
mov->src = mvp->vmv_srcrc;
mov->dst = mvp->vmv_dstrc;
mov->time = 0;
mov->sent = 0;
if (mvp->vmv_srcrc == rcp->rcd_number) { /* eject */
nLogMsg(EL_STATUS1, id,
"%.*s eject status", VSNLEN, &vsnseqp->r_vsn);
if ((lrcp = cRCInfo(mov->dst)) == NULL)
sprintf(mov->rcname, "RC # %d", mov->dst);
else
strcpy(mov->rcname, lrcp->rcd_name);
mov->status = EJECT;
} else {
nLogMsg(EL_STATUS1, id,
"%.*s accept status", VSNLEN, &vsnseqp->r_vsn);
if ((lrcp = cRCInfo(mov->src)) == NULL)
sprintf(mov->rcname, "RC # %d", mov->src);
else
strcpy(mov->rcname, lrcp->rcd_name);
mov->status = ACCEPT;
}
reqcount++;
}
return(reqcount);
}
/*
* MMRCMsg - handle msg from operator
*/
MMRCMsg(vmpHdr_t *hp)
{
char *id = "MMRCMsg";
vmpRcMsg_t *rm;
vmpHdr_t *hp2;
rm = MSGPTR(hp, vmpRcMsg_t);
hp2 = (vmpHdr_t *) rm->rm_msg;
nLogMsg(EL_DEBUG2, 0, "subpkt magic %c type %d",
hp2->vh_magic, hp2->vh_type);
switch (hp2->vh_type) {
/* case M_VOLSTAT:
MMVolstat(hp2);
break; */
case M_VALLOC: /* arbitrary type for msg from ACS */
nLogMsg(EL_CONSOLE0|EL_STATUS1,0,
"CAP FULL in LSM %d: PLEASE EMPTY",hp2->vh_seq);
break;
case M_VERROR:
MMError(hp2, 1, rm->rm_flag);
break;
default:
nLogMsg(EL_CONSOLE0|EL_ERROR1,0, "unsupported RCMSG type %s",
vMsgType(hp2->vh_type));
}
}
MMVolstat(vmpHdr_t *hp)
{
vmpVolStat_t *vp;
movnode_t *mov;
char *id = "MMVolstat";
vp = MSGPTR(hp, vmpVolStat_t);
switch (vp->vv_state) {
case VS_ACCEPTED:
/*
* this is a copy of the pkt sent to voldaemon by ACS
* so cart has arrived there: just cancel the req
*/
mov = findmov((char *)&vp->vv_vsn);
if (mov == NULL) {
nLogMsg(EL_ERROR2, id, "No record of move (%.*s)",
VSNLEN, &vp->vv_vsn);
return;
}
if (mov->src != rcp->rcd_number)
nLogMsg(EL_ERROR2, id, "not source rc %.*s",
VSNLEN, &vp->vv_vsn);
nLogMsg(EL_DEBUG2, 0, "ACS accepted %.*s",
VSNLEN, &vp->vv_vsn);
activemoves--;
remove_mv(mov);
break;
default:
nLogMsg(EL_DEBUG1, id, "unexpected state %d", vp->vv_state);
}
}
int
moverr(vmpHdr_t *hp)
{
char *id = "moverr";
vmpError_t *vep;
movnode_t *mov;
vep = MSGPTR(hp, vmpError_t);
mov = findmov((char *)&vep->vn_vsn);
if (mov == NULL) {
nLogMsg(EL_DEBUG3, id, "No record of move (%.*s)",
VSNLEN, &vep->vn_vsn);
return(0);
}
/*
* set up pkt in case it was in an RCMSG & needs to be sent to RP
*/
hp->vh_seq = mov->seq;
remove_mv(mov);
activemoves--;
return(1);
}
/**************************
FOLLOW-UP: QUEUE HANDLING
***************************/
ProcMountQ()
{
int i;
for (i=mindrv; i<maxdrv; i++) {
if (DriveArray2[i].status != MOUNT)
continue;
if (MountMsg(i) != E_NOERROR) { /* request mount */
WipeDrive(i);
RCErr2RP(&DriveArray2[i].vsn, DriveArray2[i].seq,
M_RCMOUNT, 1);
return(TRUE);
}
}
}
/*
* WipeDrive() - unconditionally clear the drive
*/
WipeDrive(int drv)
{
char *id = "WipeDrive";
switch (DriveArray2[drv].status) {
case MOUNT:
RCSendVolNode(M_CREATENODE, DriveArray2[drv].seq,
&DriveArray2[drv].vsn, NULL, VO_CANCEL, drv,
DriveArray2[drv].host, 0);
DriveArray2[drv].status = IDLE;
#ifdef RPC_VERS
if (DriveArray2[drv].sent) {
RPC(vault_cancel_1(&DriveArray2[drv].vsn, cl));
DriveArray2[drv].sent = 0;
}
#endif
break;
case MOUNTED:
RCSendVolNode(M_DELETENODE, NOSEQ,
&DriveArray2[drv].vsn, NULL, VO_UNMOUNT, drv,
DriveArray2[drv].host, 0);
DriveArray2[drv].status = IDLE;
DriveArray2[drv].time = Now;
break;
case AVAIL:
/* host=-1 means use any host */
RCSendVolNode(M_DELETENODE, NOSEQ,
&DriveArray2[drv].vsn, NULL, VO_UNMOUNT, drv, -1, 0);
DriveArray2[drv].status = IDLE;
default:
break;
}
}
MMNode(vmpHdr_t *hp)
{
char *id = "MMNode";
vmpNode_t *np;
int drive;
np = MSGPTR(hp, vmpNode_t);
drive = np->vs_drive;
if (drive < mindrv || drive >= maxdrv) {
nLogMsg(EL_CONSOLE0|EL_ERROR1, id,
"drive %d out of range (%d..%d)",
drive, mindrv, maxdrv-1);
return;
}
if (hp->vh_seq != DriveArray2[drive].seq ||
!SAMEVSN(&np->vs_vsn, &DriveArray2[drive].vsn)) {
nLogMsg(EL_CONSOLE0|EL_ERROR1, id,
"node %.*s_%d doesn't match %.*s_%d",
VSNLEN, &np->vs_vsn, hp->vh_seq,
VSNLEN, &DriveArray2[drive].vsn,
&DriveArray2[drive].seq);
return;
}
switch(DriveArray2[drive].status) {
case MOUNT:
case CREATENODE:
/*
* successful conclusion
*/
nLogMsg(EL_STATUS1, id, "%s %.*s_%d on %s",
(DriveArray2[drive].status == MOUNT ?
"mounted" : "cached mount done" ),
VSNLEN, &DriveArray2[drive].vsn,
DriveArray2[drive].seq,
DriveArray[drive].de_name);
DriveArray2[drive].status = MOUNTED;
DriveArray2[drive].ring = np->vs_flag;
RCSendMountDone(DriveArray2[drive].seq,
&DriveArray2[drive].vsn, drive, np->vs_flag);
#ifdef RPC_VERS
if (DriveArray2[drive].sent) {
RPC(vault_cancel_1(&DriveArray2[drive].vsn, cl));
DriveArray2[drive].sent = 0;
}
vsnloc.vsn = DriveArray2[drive].vsn;
vsnloc.location.when = Now;
memset(vsnloc.location.where.general, '\0', LOCSIZE);
vsnloc.location.where.tag = display;
sprintf(vsnloc.location.where.general,
"drive %s", DriveArray[drive].de_name);
RPC(vault_update_1(&vsnloc, cl));
#endif
break;
default:
nLogMsg(EL_CONSOLE0|EL_ERROR1, id,
"drive %d state %d, but got node msg (%.*s_%d %x)",
drive, DriveArray2[drive].status,
VSNLEN, &np->vs_vsn, hp->vh_seq, np->vs_flag);
break;
}
}
MMScratched(vmpHdr_t *hp)
{
char *id = "MMScratched";
vmpNode_t *np;
int drive;
np = MSGPTR(hp, vmpNode_t);
drive = np->vs_drive;
if (drive < mindrv || drive >= maxdrv) {
nLogMsg(EL_CONSOLE0|EL_ERROR1, id,
"drive %d out of range (%d..%d)",
drive, mindrv, maxdrv-1);
return;
}
if (hp->vh_seq != DriveArray2[drive].seq ||
!SAMEVSN(&np->vs_vsn, &DriveArray2[drive].vsn)) {
nLogMsg(EL_CONSOLE0|EL_ERROR1, id,
"node %.*s_%d doesn't match %.*s_%d",
VSNLEN, &np->vs_vsn, hp->vh_seq,
VSNLEN, &DriveArray2[drive].vsn,
&DriveArray2[drive].seq);
return;
}
switch(DriveArray2[drive].status) {
case SCRATCH:
/*
* successful conclusion
*/
nLogMsg(EL_STATUS1, id, "SCRATCHED %.*s_%d on %s",
VSNLEN, &DriveArray2[drive].vsn,
DriveArray2[drive].seq,
DriveArray[drive].de_name);
DriveArray2[drive].status = IDLE;
RCSendUnmountDone(DriveArray2[drive].seq,
&DriveArray2[drive].vsn);
#ifdef RPC_VERS
if (DriveArray2[drive].sent) {
RPC(vault_cancel_1(&DriveArray2[drive].vsn, cl));
DriveArray2[drive].sent = 0;
}
#endif
break;
default:
nLogMsg(EL_CONSOLE0|EL_ERROR1, id,
"drive %d state %d, but got node msg (%.*s_%d %x)",
drive, DriveArray2[drive].status,
VSNLEN, &np->vs_vsn, hp->vh_seq, np->vs_flag);
break;
}
}
ProcMoveQ()
{
char *id = "ProcMoveQ";
movnode_t *mov, *next;
vsnreq_t vsnreq;
int *req;
for (mov=movq; mov!=NULL; mov=next) {
next = mov->next;
vsnreq.vsn = mov->vsn;
if (mov->src == rcp->rcd_number)
sprintf(vsnreq.message, "PUT into ACS");
else
sprintf(vsnreq.message, "GET from ACS");
nLogMsg(EL_CONSOLE0|EL_STATUS1, id, "%.*s: %s",
VSNLEN, &mov->vsn, vsnreq.message);
#ifdef RPC_VERS
if (!mov->sent) {
req = NULL;
RPC(req = vault_request_1(&vsnreq, cl));
if (req == NULL) {
nLogMsg(EL_ERROR1, id,
"%.*s request failed",
VSNLEN, &mov->vsn);
} else if (*req != E_NOERROR) {
VaultErr(*req, &mov->vsn, "vault_request");
VMError = E_SYS;
RCErr2RP(&mov->vsn, mov->seq, M_MOVE, 0);
remove_mv(mov);
} else
mov->sent = 1;
}
#endif
}
}
/**********************
GENERAL UTILITIES
***********************/
movnode_t *
findmov(char *vsn)
{
movnode_t *mov;
for (mov=movq; mov!=NULL; mov=mov->next)
if (SAMEVSN(vsn, &mov->vsn))
return(mov);
return(NULL);
}
/*
* remove_mv() - remove a move node
*/
remove_mv(movnode_t *mov)
{
char *id = "remove_mv";
#ifdef RPC_VERS
if (mov->sent) {
RPC(vault_cancel_1(&mov->vsn, cl));
}
#endif
if (mov == movq) { /* 1st on q */
movq = mov->next;
if (movq != NULL) {
movq->prev = NULL;
}
} else if (mov->next == NULL) { /* last on q, q>1 */
mov->prev->next = NULL;
} else { /* between nodes */
mov->prev->next = mov->next;
mov->next->prev = mov->prev;
}
nFree(mov);
}
/*
* CheckLogin - if operator isn't logged in, log msg on system console
*/
int
CheckLogin()
{
if (loggedin()) {
nLogMsg(EL_DEBUG3,0,"RC acct logged in");
logged = TRUE;
return(TRUE);
}
nLogMsg(EL_CONSOLE0|EL_ERROR1, 0, "operator not logged in");
if (logged)
logged = FALSE;
return(FALSE);
}
/*
* Inform the responsible parties that a mount message is outstanding.
*/
int
MountMsg(int drv)
{
char *id = "MountMsg";
vsnreq_t vsnreq;
int *req;
char msg[9];
#ifdef RPC_VERS
if (!DriveArray2[drv].sent) {
req = NULL;
RPC(req = vault_request_1(&vsnreq, cl));
if (req == NULL) {
nLogMsg(EL_ERROR1, id,
"%.*s: vault_request_1 failed",
VSNLEN, &vsnreq.vsn);
} else if (*req != E_NOERROR) {
VaultErr(*req, &vsnreq.vsn, "vault_request");
VMError = *req;
return(*req);
} else
DriveArray2[drv].sent = 1;
}
#endif
/*
* is it time to complain again?
*/
if ( DriveArray2[drv].time <= Now - WARNTIME ) {
nLogMsg(EL_CONSOLE0|EL_STATUS1, 0, "%.*s: MOUNT on drive %s",
VSNLEN, &DriveArray2[drv].vsn,
DriveArray[drv].de_name);
DriveArray2[drv].time = Now;
}
return E_NOERROR;
}
/*
* loggedin - is anyone logged in under RC console login?
*/
int
loggedin()
{
char *id = "loggedin";
struct utmp *utmp, *getutent();
void setutent();
setutent();
while (1) {
utmp = getutent();
if (utmp == NULL) {
if (errno != ENOTTY && errno != 0)
/* enotty if system coming up */
nLogMsg(EL_ERROR1|EL_SYS, id, "getutent");
return(0);
}
#ifdef AMDAHL_UTS
if (utmp->ut_type == USER_PROCESS)
#endif
if (!strncmp(utmp->ut_name, rcp->rcd_login,
sizeof (utmp->ut_name)))
return(1);
}
}