Preventing FIN_WAIT2 when closing socket
- by patrickvacek
I have a server program that connects to another program via a given socket, and in certain cases I need to close the connection and almost immediately re-open it on the same socket. This by and large works, except that I have to wait exactly one minute for the socket to reset. In the meantime, netstat indicates that the server sees the socket in FIN_WAIT2 and the client sees it as CLOSE_WAIT. I'm already using SO_REUSEADDR, which I thought would prevent the wait, but that isn't doing the trick. Setting SO_LINGER to zero also does not help. What else can I do to resolve this?
Here are the relevant code snippets:
SetUpSocket()
{
// Set up the socket and listen for a connection from the exelerate client.
// Open a TCP/IP socket.
m_baseSock = socket(PF_INET, SOCK_STREAM, IPPROTO_IP);
if (m_baseSock < 0)
{
return XERROR;
}
// Set the socket options to reuse local addresses.
int flag = 1;
if (setsockopt(m_baseSock, SOL_SOCKET, SO_REUSEADDR, &flag, sizeof(flag)) == -1)
{
return XERROR;
}
// Set the socket options to prevent lingering after closing the socket.
//~ linger li = {1,0};
//~ if (setsockopt(m_baseSock, SOL_SOCKET, SO_LINGER, &li, sizeof(li)) == -1)
//~ {
//~ return XERROR;
//~ }
// Bind the socket to the address of the current host and our given port.
struct sockaddr_in addr;
memset(&addr, 0, sizeof(addr));
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = INADDR_ANY;
addr.sin_port = htons(m_port);
if (bind(m_baseSock, (struct sockaddr*)&addr, sizeof(addr)) != 0)
{
return XERROR;
}
// Tell the socket to listen for a connection from client.
if (listen(m_baseSock, 4) != 0)
{
return XERROR;
}
return XSUCCESS;
}
ConnectSocket()
{
// Add the socket to a file descriptor set.
fd_set readfds;
FD_ZERO(&readfds);
FD_SET(m_baseSock, &readfds);
// Set timeout to ten seconds. Plenty of time.
struct timeval timeout;
timeout.tv_sec = 10;
timeout.tv_usec = 0;
// Check to see if the socket is ready for reading.
int numReady = select(m_baseSock + 1, &readfds, NULL, NULL, &timeout);
if (numReady > 0)
{
int flags = fcntl(m_baseSock, F_GETFL, 0);
fcntl(m_baseSock, flags | O_NONBLOCK, 1);
// Wait for a connection attempt from the client. Do not block - we shouldn't
// need to since we just selected.
m_connectedSock = accept(m_baseSock, NULL, NULL);
if (m_connectedSock > 0)
{
m_failedSend = false;
m_logout = false;
// Spawn a thread to accept commands from client.
CreateThread(&m_controlThread, ControlThread, (void *)&m_connectedSock);
return XSUCCESS;
}
}
return XERROR;
}
ControlThread(void *arg)
{
// Get the socket from the argument.
socket sock = *((socket*)arg);
while (true)
{
// Add the socket to a file descriptor set.
fd_set readfds;
FD_ZERO(&readfds);
FD_SET(sock, &readfds);
// Set timeout to ten seconds. Plenty of time.
struct timeval timeout;
timeout.tv_sec = 10;
timeout.tv_usec = 0;
// Check if there is any readable data on the socket.
int num_ready = select(sock + 1, &readfds, NULL, NULL, &timeout);
if (num_ready < 0)
{
return NULL;
}
// If there is data, read it.
else if (num_ready > 0)
{
// Check the read buffer.
xuint8 buf[128];
ssize_t size_read = recv(sock, buf, sizeof(buf));
if (size_read > 0)
{
// Get the message out of the buffer.
char msg = *buf;
if (msg == CONNECTED)
{
// Do some things...
}
// If we get the log-out message, log out.
else if (msg == LOGOUT)
{
return NULL;
}
}
}
} // while
return NULL;
}
~Server()
{
// Close the sockets.
if (m_baseSock != SOCKET_ERROR)
{
close(m_baseSock);
m_baseSock = SOCKET_ERROR;
}
if (m_connectedSock != SOCKET_ERROR)
{
close(m_connectedSock);
m_connectedSock = SOCKET_ERROR;
}
}
SOCKET_ERROR is equal to -1. The server object gets destroyed, at which point the connection should close, and then recreated, at which point the SetUpSocket() and ConnectSocket() routines are called.
So why do I have to wait a minute for the socket to clear? Any ideas would be appreaciated.